aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core
diff options
context:
space:
mode:
authorGravatar Craig Tiller <ctiller@google.com>2016-03-22 08:49:28 -0700
committerGravatar Craig Tiller <ctiller@google.com>2016-03-22 08:49:28 -0700
commitd76eb8af377b428eed85d399cd0b0be437d4b642 (patch)
treeaf2d5536827e4c3c9038fd80c741e488effd22ce /src/core
parent1f5894de47d8f47ccf6e82a2c44da9fe3681e4b3 (diff)
parentfb6e13b1b5ba135220c7be1edf3fb6f92e79872b (diff)
Merge github.com:grpc/grpc into split-me-baby-one-more-time
(not yet compiling)
Diffstat (limited to 'src/core')
-rw-r--r--src/core/README.md3
-rw-r--r--src/core/census/aggregation.h66
-rw-r--r--src/core/census/context.c484
-rw-r--r--src/core/census/grpc_context.c22
-rw-r--r--src/core/census/grpc_filter.c184
-rw-r--r--src/core/census/grpc_filter.h (renamed from src/core/channel/census_filter.h)8
-rw-r--r--src/core/census/grpc_plugin.c72
-rw-r--r--src/core/census/grpc_plugin.h (renamed from src/core/census/grpc_context.h)14
-rw-r--r--src/core/census/initialize.c26
-rw-r--r--src/core/census/mlog.c600
-rw-r--r--src/core/census/mlog.h95
-rw-r--r--src/core/census/operation.c (renamed from src/core/compression/algorithm.c)58
-rw-r--r--src/core/census/placeholders.c109
-rw-r--r--src/core/census/rpc_metric_id.h51
-rw-r--r--src/core/census/tracing.c45
-rw-r--r--src/core/channel/census_filter.c210
-rw-r--r--src/core/channel/channel_args.c186
-rw-r--r--src/core/channel/channel_args.h52
-rw-r--r--src/core/channel/channel_stack.c97
-rw-r--r--src/core/channel/channel_stack.h165
-rw-r--r--src/core/channel/channel_stack_builder.c259
-rw-r--r--src/core/channel/channel_stack_builder.h155
-rw-r--r--src/core/channel/child_channel.c308
-rw-r--r--src/core/channel/client_channel.c836
-rw-r--r--src/core/channel/client_channel.h31
-rw-r--r--src/core/channel/client_setup.c302
-rw-r--r--src/core/channel/client_setup.h77
-rw-r--r--src/core/channel/client_uchannel.c233
-rw-r--r--src/core/channel/client_uchannel.h60
-rw-r--r--src/core/channel/compress_filter.c297
-rw-r--r--src/core/channel/compress_filter.h65
-rw-r--r--src/core/channel/connected_channel.c166
-rw-r--r--src/core/channel/connected_channel.h21
-rw-r--r--src/core/channel/context.h8
-rw-r--r--src/core/channel/http_client_filter.c278
-rw-r--r--src/core/channel/http_client_filter.h8
-rw-r--r--src/core/channel/http_server_filter.c275
-rw-r--r--src/core/channel/http_server_filter.h8
-rw-r--r--src/core/channel/noop_filter.c141
-rw-r--r--src/core/channel/subchannel_call_holder.c259
-rw-r--r--src/core/channel/subchannel_call_holder.h98
-rw-r--r--src/core/client_config/README.md66
-rw-r--r--src/core/client_config/client_config.c (renamed from src/core/iomgr/alarm_internal.h)56
-rw-r--r--src/core/client_config/client_config.h53
-rw-r--r--src/core/client_config/connector.c55
-rw-r--r--src/core/client_config/connector.h92
-rw-r--r--src/core/client_config/default_initial_connect_string.c39
-rw-r--r--src/core/client_config/initial_connect_string.c53
-rw-r--r--src/core/client_config/initial_connect_string.h50
-rw-r--r--src/core/client_config/lb_policies/load_balancer_api.c163
-rw-r--r--src/core/client_config/lb_policies/load_balancer_api.h85
-rw-r--r--src/core/client_config/lb_policies/pick_first.c415
-rw-r--r--src/core/client_config/lb_policies/pick_first.h43
-rw-r--r--src/core/client_config/lb_policies/round_robin.c536
-rw-r--r--src/core/client_config/lb_policies/round_robin.h46
-rw-r--r--src/core/client_config/lb_policy.c134
-rw-r--r--src/core/client_config/lb_policy.h144
-rw-r--r--src/core/client_config/lb_policy_factory.c (renamed from src/core/channel/noop_filter.h)20
-rw-r--r--src/core/client_config/lb_policy_factory.h73
-rw-r--r--src/core/client_config/lb_policy_registry.c88
-rw-r--r--src/core/client_config/lb_policy_registry.h (renamed from src/core/surface/byte_buffer_queue.h)40
-rw-r--r--src/core/client_config/resolver.c82
-rw-r--r--src/core/client_config/resolver.h94
-rw-r--r--src/core/client_config/resolver_factory.c55
-rw-r--r--src/core/client_config/resolver_factory.h82
-rw-r--r--src/core/client_config/resolver_registry.c137
-rw-r--r--src/core/client_config/resolver_registry.h65
-rw-r--r--src/core/client_config/resolvers/dns_resolver.c298
-rw-r--r--src/core/client_config/resolvers/dns_resolver.h42
-rw-r--r--src/core/client_config/resolvers/sockaddr_resolver.c391
-rw-r--r--src/core/client_config/resolvers/sockaddr_resolver.h50
-rw-r--r--src/core/client_config/resolvers/zookeeper_resolver.c520
-rw-r--r--src/core/client_config/resolvers/zookeeper_resolver.h42
-rw-r--r--src/core/client_config/subchannel.c678
-rw-r--r--src/core/client_config/subchannel.h174
-rw-r--r--src/core/client_config/subchannel_factory.c49
-rw-r--r--src/core/client_config/subchannel_factory.h (renamed from src/core/channel/child_channel.h)51
-rw-r--r--src/core/client_config/subchannel_index.c262
-rw-r--r--src/core/client_config/subchannel_index.h77
-rw-r--r--src/core/client_config/uri_parser.c242
-rw-r--r--src/core/client_config/uri_parser.h51
-rw-r--r--src/core/compression/algorithm_metadata.h53
-rw-r--r--src/core/compression/compression_algorithm.c203
-rw-r--r--src/core/compression/message_compress.c54
-rw-r--r--src/core/compression/message_compress.h12
-rw-r--r--src/core/debug/trace.c16
-rw-r--r--src/core/debug/trace.h8
-rw-r--r--src/core/httpcli/format_request.c8
-rw-r--r--src/core/httpcli/format_request.h8
-rw-r--r--src/core/httpcli/httpcli.c288
-rw-r--r--src/core/httpcli/httpcli.h57
-rw-r--r--src/core/httpcli/httpcli_security_connector.c90
-rw-r--r--src/core/httpcli/httpcli_security_connector.h43
-rw-r--r--src/core/httpcli/parser.c33
-rw-r--r--src/core/httpcli/parser.h18
-rw-r--r--src/core/iomgr/closure.c98
-rw-r--r--src/core/iomgr/closure.h98
-rw-r--r--src/core/iomgr/endpoint.c38
-rw-r--r--src/core/iomgr/endpoint.h88
-rw-r--r--src/core/iomgr/endpoint_pair.h8
-rw-r--r--src/core/iomgr/endpoint_pair_posix.c11
-rw-r--r--src/core/iomgr/endpoint_pair_windows.c26
-rw-r--r--src/core/iomgr/exec_ctx.c151
-rw-r--r--src/core/iomgr/exec_ctx.h98
-rw-r--r--src/core/iomgr/executor.c143
-rw-r--r--src/core/iomgr/executor.h53
-rw-r--r--src/core/iomgr/fd_posix.c342
-rw-r--r--src/core/iomgr/fd_posix.h73
-rw-r--r--src/core/iomgr/iocp_windows.c155
-rw-r--r--src/core/iomgr/iocp_windows.h29
-rw-r--r--src/core/iomgr/iomgr.c223
-rw-r--r--src/core/iomgr/iomgr.h45
-rw-r--r--src/core/iomgr/iomgr_internal.h19
-rw-r--r--src/core/iomgr/iomgr_posix.c10
-rw-r--r--src/core/iomgr/iomgr_posix.h11
-rw-r--r--src/core/iomgr/iomgr_windows.c4
-rw-r--r--src/core/iomgr/pollset.h65
-rw-r--r--src/core/iomgr/pollset_kick_posix.c168
-rw-r--r--src/core/iomgr/pollset_kick_posix.h93
-rw-r--r--src/core/iomgr/pollset_multipoller_with_epoll.c266
-rw-r--r--src/core/iomgr/pollset_multipoller_with_poll_posix.c182
-rw-r--r--src/core/iomgr/pollset_posix.c520
-rw-r--r--src/core/iomgr/pollset_posix.h106
-rw-r--r--src/core/iomgr/pollset_set.h32
-rw-r--r--src/core/iomgr/pollset_set_posix.c103
-rw-r--r--src/core/iomgr/pollset_set_posix.h28
-rw-r--r--src/core/iomgr/pollset_set_windows.c26
-rw-r--r--src/core/iomgr/pollset_set_windows.h10
-rw-r--r--src/core/iomgr/pollset_windows.c207
-rw-r--r--src/core/iomgr/pollset_windows.h43
-rw-r--r--src/core/iomgr/resolve_address.h19
-rw-r--r--src/core/iomgr/resolve_address_posix.c52
-rw-r--r--src/core/iomgr/resolve_address_windows.c44
-rw-r--r--src/core/iomgr/sockaddr.h8
-rw-r--r--src/core/iomgr/sockaddr_posix.h8
-rw-r--r--src/core/iomgr/sockaddr_utils.c64
-rw-r--r--src/core/iomgr/sockaddr_utils.h10
-rw-r--r--src/core/iomgr/sockaddr_win32.h15
-rw-r--r--src/core/iomgr/socket_utils_posix.h8
-rw-r--r--src/core/iomgr/socket_windows.c62
-rw-r--r--src/core/iomgr/socket_windows.h33
-rw-r--r--src/core/iomgr/tcp_client.h17
-rw-r--r--src/core/iomgr/tcp_client_posix.c140
-rw-r--r--src/core/iomgr/tcp_client_windows.c92
-rw-r--r--src/core/iomgr/tcp_posix.c627
-rw-r--r--src/core/iomgr/tcp_posix.h23
-rw-r--r--src/core/iomgr/tcp_server.h75
-rw-r--r--src/core/iomgr/tcp_server_posix.c313
-rw-r--r--src/core/iomgr/tcp_server_windows.c375
-rw-r--r--src/core/iomgr/tcp_windows.c341
-rw-r--r--src/core/iomgr/tcp_windows.h10
-rw-r--r--src/core/iomgr/time_averaged_stats.c6
-rw-r--r--src/core/iomgr/time_averaged_stats.h14
-rw-r--r--src/core/iomgr/timer.c (renamed from src/core/iomgr/alarm.c)249
-rw-r--r--src/core/iomgr/timer.h (renamed from src/core/iomgr/alarm.h)77
-rw-r--r--src/core/iomgr/timer_heap.c (renamed from src/core/iomgr/alarm_heap.c)108
-rw-r--r--src/core/iomgr/timer_heap.h (renamed from src/core/iomgr/alarm_heap.h)34
-rw-r--r--src/core/iomgr/udp_server.c430
-rw-r--r--src/core/iomgr/udp_server.h76
-rw-r--r--src/core/iomgr/wakeup_fd_eventfd.c21
-rw-r--r--src/core/iomgr/wakeup_fd_nospecial.c9
-rw-r--r--src/core/iomgr/wakeup_fd_pipe.c20
-rw-r--r--src/core/iomgr/wakeup_fd_pipe.h8
-rw-r--r--src/core/iomgr/wakeup_fd_posix.c24
-rw-r--r--src/core/iomgr/wakeup_fd_posix.h30
-rw-r--r--src/core/iomgr/workqueue.h83
-rw-r--r--src/core/iomgr/workqueue_posix.c144
-rw-r--r--src/core/iomgr/workqueue_posix.h53
-rw-r--r--src/core/iomgr/workqueue_windows.c (renamed from src/core/surface/server_create.c)12
-rw-r--r--src/core/iomgr/workqueue_windows.h37
-rw-r--r--src/core/json/json.h32
-rw-r--r--src/core/json/json_common.h8
-rw-r--r--src/core/json/json_reader.c88
-rw-r--r--src/core/json/json_reader.h46
-rw-r--r--src/core/json/json_string.c154
-rw-r--r--src/core/json/json_writer.c57
-rw-r--r--src/core/json/json_writer.h38
-rw-r--r--src/core/profiling/basic_timers.c272
-rw-r--r--src/core/profiling/stap_timers.c16
-rw-r--r--src/core/profiling/timers.h117
-rw-r--r--src/core/proto/grpc/lb/v0/load_balancer.pb.c119
-rw-r--r--src/core/proto/grpc/lb/v0/load_balancer.pb.h182
-rw-r--r--src/core/security/auth_filters.h8
-rw-r--r--src/core/security/b64.c (renamed from src/core/security/base64.c)18
-rw-r--r--src/core/security/b64.h (renamed from src/core/security/base64.h)8
-rw-r--r--src/core/security/client_auth_filter.c307
-rw-r--r--src/core/security/credentials.c1154
-rw-r--r--src/core/security/credentials.h294
-rw-r--r--src/core/security/credentials_metadata.c4
-rw-r--r--src/core/security/credentials_posix.c2
-rw-r--r--src/core/security/credentials_win32.c2
-rw-r--r--src/core/security/google_default_credentials.c194
-rw-r--r--src/core/security/handshake.c336
-rw-r--r--src/core/security/handshake.h (renamed from src/core/security/secure_transport_setup.h)28
-rw-r--r--src/core/security/json_token.c76
-rw-r--r--src/core/security/json_token.h23
-rw-r--r--src/core/security/jwt_verifier.c843
-rw-r--r--src/core/security/jwt_verifier.h136
-rw-r--r--src/core/security/secure_endpoint.c229
-rw-r--r--src/core/security/secure_endpoint.h8
-rw-r--r--src/core/security/secure_transport_setup.c286
-rw-r--r--src/core/security/security_connector.c562
-rw-r--r--src/core/security/security_connector.h173
-rw-r--r--src/core/security/security_context.c188
-rw-r--r--src/core/security/security_context.h39
-rw-r--r--src/core/security/server_auth_filter.c237
-rw-r--r--src/core/security/server_secure_chttp2.c149
-rw-r--r--src/core/statistics/census_init.c2
-rw-r--r--src/core/statistics/census_interface.h16
-rw-r--r--src/core/statistics/census_log.c125
-rw-r--r--src/core/statistics/census_log.h14
-rw-r--r--src/core/statistics/census_rpc_stats.c81
-rw-r--r--src/core/statistics/census_rpc_stats.h30
-rw-r--r--src/core/statistics/census_tracing.c77
-rw-r--r--src/core/statistics/census_tracing.h22
-rw-r--r--src/core/statistics/hash_table.c80
-rw-r--r--src/core/statistics/hash_table.h44
-rw-r--r--src/core/statistics/window_stats.c97
-rw-r--r--src/core/statistics/window_stats.h36
-rw-r--r--src/core/support/alloc.c36
-rw-r--r--src/core/support/avl.c288
-rw-r--r--src/core/support/backoff.c (renamed from src/core/surface/byte_buffer_queue.c)80
-rw-r--r--src/core/support/backoff.h65
-rw-r--r--src/core/support/block_annotate.h (renamed from src/core/census/context.h)27
-rw-r--r--src/core/support/cancellable.c156
-rw-r--r--src/core/support/cmdline.c114
-rw-r--r--src/core/support/cpu_iphone.c8
-rw-r--r--src/core/support/cpu_linux.c2
-rw-r--r--src/core/support/cpu_posix.c10
-rw-r--r--src/core/support/cpu_windows.c1
-rw-r--r--src/core/support/env.h8
-rw-r--r--src/core/support/env_linux.c29
-rw-r--r--src/core/support/env_win32.c22
-rw-r--r--src/core/support/histogram.c25
-rw-r--r--src/core/support/host_port.c12
-rw-r--r--src/core/support/load_file.c (renamed from src/core/support/file.c)10
-rw-r--r--src/core/support/load_file.h (renamed from src/core/support/file.h)16
-rw-r--r--src/core/support/log.c3
-rw-r--r--src/core/support/log_linux.c19
-rw-r--r--src/core/support/log_posix.c12
-rw-r--r--src/core/support/log_win32.c23
-rw-r--r--src/core/support/murmur_hash.c22
-rw-r--r--src/core/support/murmur_hash.h10
-rw-r--r--src/core/support/slice.c36
-rw-r--r--src/core/support/slice_buffer.c86
-rw-r--r--src/core/support/stack_lockfree.c185
-rw-r--r--src/core/support/stack_lockfree.h53
-rw-r--r--src/core/support/string.c167
-rw-r--r--src/core/support/string.h46
-rw-r--r--src/core/support/string_win32.c25
-rw-r--r--src/core/support/string_win32.h12
-rw-r--r--src/core/support/subprocess_windows.c141
-rw-r--r--src/core/support/sync.c30
-rw-r--r--src/core/support/sync_posix.c44
-rw-r--r--src/core/support/sync_win32.c23
-rw-r--r--src/core/support/thd.c12
-rw-r--r--src/core/support/thd_internal.h8
-rw-r--r--src/core/support/thd_posix.c23
-rw-r--r--src/core/support/thd_win32.c5
-rw-r--r--src/core/support/time.c202
-rw-r--r--src/core/support/time_posix.c97
-rw-r--r--src/core/support/time_precise.c (renamed from src/core/profiling/timers_preciseclock.h)74
-rw-r--r--src/core/support/time_precise.h42
-rw-r--r--src/core/support/time_win32.c58
-rw-r--r--src/core/support/tls_pthread.c4
-rw-r--r--src/core/support/tmpfile.h55
-rw-r--r--src/core/support/tmpfile_posix.c (renamed from src/core/support/file_posix.c)4
-rw-r--r--src/core/support/tmpfile_win32.c (renamed from src/core/support/file_win32.c)4
-rw-r--r--src/core/support/wrap_memcpy.c53
-rw-r--r--src/core/surface/alarm.c84
-rw-r--r--src/core/surface/api_trace.c (renamed from src/core/surface/surface_trace.c)4
-rw-r--r--src/core/surface/api_trace.h65
-rw-r--r--src/core/surface/byte_buffer.c21
-rw-r--r--src/core/surface/byte_buffer_reader.c18
-rw-r--r--src/core/surface/call.c2171
-rw-r--r--src/core/surface/call.h126
-rw-r--r--src/core/surface/call_details.c8
-rw-r--r--src/core/surface/call_log_batch.c28
-rw-r--r--src/core/surface/call_test_only.h64
-rw-r--r--src/core/surface/channel.c317
-rw-r--r--src/core/surface/channel.h43
-rw-r--r--src/core/surface/channel_connectivity.c220
-rw-r--r--src/core/surface/channel_create.c297
-rw-r--r--src/core/surface/channel_init.c148
-rw-r--r--src/core/surface/channel_init.h86
-rw-r--r--src/core/surface/channel_ping.c79
-rw-r--r--src/core/surface/channel_stack_type.c56
-rw-r--r--src/core/surface/channel_stack_type.h61
-rw-r--r--src/core/surface/client.c89
-rw-r--r--src/core/surface/completion_queue.c532
-rw-r--r--src/core/surface/completion_queue.h42
-rw-r--r--src/core/surface/event_string.h8
-rw-r--r--src/core/surface/init.c167
-rw-r--r--src/core/surface/init.h9
-rw-r--r--src/core/surface/init_secure.c49
-rw-r--r--src/core/surface/init_unsecure.c7
-rw-r--r--src/core/surface/lame_client.c144
-rw-r--r--src/core/surface/lame_client.h (renamed from src/core/surface/client.h)10
-rw-r--r--src/core/surface/metadata_array.c8
-rw-r--r--src/core/surface/secure_channel_create.c375
-rw-r--r--src/core/surface/server.c1322
-rw-r--r--src/core/surface/server.h35
-rw-r--r--src/core/surface/server_chttp2.c66
-rw-r--r--src/core/surface/surface_trace.h13
-rw-r--r--src/core/surface/validate_metadata.c73
-rw-r--r--src/core/surface/version.c39
-rw-r--r--src/core/transport/byte_stream.c78
-rw-r--r--src/core/transport/byte_stream.h89
-rw-r--r--src/core/transport/chttp2/alpn.c3
-rw-r--r--src/core/transport/chttp2/alpn.h8
-rw-r--r--src/core/transport/chttp2/bin_encoder.c156
-rw-r--r--src/core/transport/chttp2/bin_encoder.h10
-rw-r--r--src/core/transport/chttp2/frame.h8
-rw-r--r--src/core/transport/chttp2/frame_data.c148
-rw-r--r--src/core/transport/chttp2/frame_data.h48
-rw-r--r--src/core/transport/chttp2/frame_goaway.c68
-rw-r--r--src/core/transport/chttp2/frame_goaway.h24
-rw-r--r--src/core/transport/chttp2/frame_ping.c26
-rw-r--r--src/core/transport/chttp2/frame_ping.h22
-rw-r--r--src/core/transport/chttp2/frame_rst_stream.c40
-rw-r--r--src/core/transport/chttp2/frame_rst_stream.h20
-rw-r--r--src/core/transport/chttp2/frame_settings.c101
-rw-r--r--src/core/transport/chttp2/frame_settings.h37
-rw-r--r--src/core/transport/chttp2/frame_window_update.c55
-rw-r--r--src/core/transport/chttp2/frame_window_update.h24
-rw-r--r--src/core/transport/chttp2/hpack_encoder.c568
-rw-r--r--src/core/transport/chttp2/hpack_encoder.h (renamed from src/core/transport/chttp2/stream_encoder.h)66
-rw-r--r--src/core/transport/chttp2/hpack_parser.c543
-rw-r--r--src/core/transport/chttp2/hpack_parser.h47
-rw-r--r--src/core/transport/chttp2/hpack_table.c331
-rw-r--r--src/core/transport/chttp2/hpack_table.h45
-rw-r--r--src/core/transport/chttp2/http2_errors.h8
-rw-r--r--src/core/transport/chttp2/huffsyms.c320
-rw-r--r--src/core/transport/chttp2/huffsyms.h8
-rw-r--r--src/core/transport/chttp2/incoming_metadata.c133
-rw-r--r--src/core/transport/chttp2/incoming_metadata.h34
-rw-r--r--src/core/transport/chttp2/internal.h587
-rw-r--r--src/core/transport/chttp2/parsing.c494
-rw-r--r--src/core/transport/chttp2/status_conversion.h8
-rw-r--r--src/core/transport/chttp2/stream_encoder.c631
-rw-r--r--src/core/transport/chttp2/stream_lists.c208
-rw-r--r--src/core/transport/chttp2/stream_map.c33
-rw-r--r--src/core/transport/chttp2/stream_map.h21
-rw-r--r--src/core/transport/chttp2/timeout_encoding.c50
-rw-r--r--src/core/transport/chttp2/timeout_encoding.h8
-rw-r--r--src/core/transport/chttp2/varint.c16
-rw-r--r--src/core/transport/chttp2/varint.h28
-rw-r--r--src/core/transport/chttp2/writing.c355
-rw-r--r--src/core/transport/chttp2_transport.c1723
-rw-r--r--src/core/transport/chttp2_transport.h21
-rw-r--r--src/core/transport/connectivity_state.c164
-rw-r--r--src/core/transport/connectivity_state.h85
-rw-r--r--src/core/transport/metadata.c731
-rw-r--r--src/core/transport/metadata.h85
-rw-r--r--src/core/transport/metadata_batch.c (renamed from src/core/transport/stream_op.c)192
-rw-r--r--src/core/transport/metadata_batch.h (renamed from src/core/transport/stream_op.h)107
-rw-r--r--src/core/transport/static_metadata.c89
-rw-r--r--src/core/transport/static_metadata.h408
-rw-r--r--src/core/transport/transport.c167
-rw-r--r--src/core/transport/transport.h259
-rw-r--r--src/core/transport/transport_impl.h49
-rw-r--r--src/core/transport/transport_op_string.c98
-rw-r--r--src/core/tsi/fake_transport_security.c165
-rw-r--r--src/core/tsi/fake_transport_security.h14
-rw-r--r--src/core/tsi/ssl_transport_security.c586
-rw-r--r--src/core/tsi/ssl_transport_security.h61
-rw-r--r--src/core/tsi/ssl_types.h55
-rw-r--r--src/core/tsi/test_creds/server1.pem28
-rw-r--r--src/core/tsi/transport_security.c76
-rw-r--r--src/core/tsi/transport_security.h82
-rw-r--r--src/core/tsi/transport_security_interface.h64
371 files changed, 32537 insertions, 15066 deletions
diff --git a/src/core/README.md b/src/core/README.md
index 407dc4f701..0d8c0d5bd9 100644
--- a/src/core/README.md
+++ b/src/core/README.md
@@ -5,5 +5,4 @@ Python, PHP, NodeJS, Objective-C) are layered on top of this library.
#Status
-Alpha : Ready for early adopters
-
+Beta
diff --git a/src/core/census/aggregation.h b/src/core/census/aggregation.h
new file mode 100644
index 0000000000..e0ef9630c9
--- /dev/null
+++ b/src/core/census/aggregation.h
@@ -0,0 +1,66 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <stddef.h>
+
+#ifndef GRPC_CORE_CENSUS_AGGREGATION_H
+#define GRPC_CORE_CENSUS_AGGREGATION_H
+
+/** Structure used to describe an aggregation type. */
+struct census_aggregation_ops {
+ /* Create a new aggregation. The pointer returned can be used in future calls
+ to clone(), free(), record(), data() and reset(). */
+ void *(*create)(const void *create_arg);
+ /* Make a copy of an aggregation created by create() */
+ void *(*clone)(const void *aggregation);
+ /* Destroy an aggregation created by create() */
+ void (*free)(void *aggregation);
+ /* Record a new value against aggregation. */
+ void (*record)(void *aggregation, double value);
+ /* Return current aggregation data. The caller must cast this object into
+ the correct type for the aggregation result. The object returned can be
+ freed by using free_data(). */
+ void *(*data)(const void *aggregation);
+ /* free data returned by data() */
+ void (*free_data)(void *data);
+ /* Reset an aggregation to default (zero) values. */
+ void (*reset)(void *aggregation);
+ /* Merge 'from' aggregation into 'to'. Both aggregations must be compatible */
+ void (*merge)(void *to, const void *from);
+ /* Fill buffer with printable string version of aggregation contents. For
+ debugging only. Returns the number of bytes added to buffer (a value == n
+ implies the buffer was of insufficient size). */
+ size_t (*print)(const void *aggregation, char *buffer, size_t n);
+};
+
+#endif /* GRPC_CORE_CENSUS_AGGREGATION_H */
diff --git a/src/core/census/context.c b/src/core/census/context.c
index df238ec98c..89b8ee0b39 100644
--- a/src/core/census/context.c
+++ b/src/core/census/context.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,29 +31,479 @@
*
*/
-#include "src/core/census/context.h"
-
-#include <string.h>
#include <grpc/census.h>
#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/port_platform.h>
+#include <grpc/support/useful.h>
+#include <stdbool.h>
+#include <string.h>
+#include "src/core/support/string.h"
+
+// Functions in this file support the public context API, including
+// encoding/decoding as part of context propagation across RPC's. The overall
+// requirements (in approximate priority order) for the
+// context representation:
+// 1. Efficient conversion to/from wire format
+// 2. Minimal bytes used on-wire
+// 3. Efficient context creation
+// 4. Efficient lookup of tag value for a key
+// 5. Efficient iteration over tags
+// 6. Minimal memory footprint
+//
+// Notes on tradeoffs/decisions:
+// * tag includes 1 byte length of key, as well as nil-terminating byte. These
+// are to aid in efficient parsing and the ability to directly return key
+// strings. This is more important than saving a single byte/tag on the wire.
+// * The wire encoding uses only single byte values. This eliminates the need
+// to handle endian-ness conversions. It also means there is a hard upper
+// limit of 255 for both CENSUS_MAX_TAG_KV_LEN and CENSUS_MAX_PROPAGATED_TAGS.
+// * Keep all tag information (keys/values/flags) in a single memory buffer,
+// that can be directly copied to the wire.
+
+// min and max valid chars in tag keys and values. All printable ASCII is OK.
+#define MIN_VALID_TAG_CHAR 32 // ' '
+#define MAX_VALID_TAG_CHAR 126 // '~'
+
+// Structure representing a set of tags. Essentially a count of number of tags
+// present, and pointer to a chunk of memory that contains the per-tag details.
+struct tag_set {
+ int ntags; // number of tags.
+ int ntags_alloc; // ntags + number of deleted tags (total number of tags
+ // in all of kvm). This will always be == ntags, except during the process
+ // of building a new tag set.
+ size_t kvm_size; // number of bytes allocated for key/value storage.
+ size_t kvm_used; // number of bytes of used key/value memory
+ char *kvm; // key/value memory. Consists of repeated entries of:
+ // Offset Size Description
+ // 0 1 Key length, including trailing 0. (K)
+ // 1 1 Value length, including trailing 0 (V)
+ // 2 1 Flags
+ // 3 K Key bytes
+ // 3 + K V Value bytes
+ //
+ // We refer to the first 3 entries as the 'tag header'. If extra values are
+ // introduced in the header, you will need to modify the TAG_HEADER_SIZE
+ // constant, the raw_tag structure (and everything that uses it) and the
+ // encode/decode functions appropriately.
+};
+
+// Number of bytes in tag header.
+#define TAG_HEADER_SIZE 3 // key length (1) + value length (1) + flags (1)
+// Offsets to tag header entries.
+#define KEY_LEN_OFFSET 0
+#define VALUE_LEN_OFFSET 1
+#define FLAG_OFFSET 2
+
+// raw_tag represents the raw-storage form of a tag in the kvm of a tag_set.
+struct raw_tag {
+ uint8_t key_len;
+ uint8_t value_len;
+ uint8_t flags;
+ char *key;
+ char *value;
+};
+
+// Use a reserved flag bit for indication of deleted tag.
+#define CENSUS_TAG_DELETED CENSUS_TAG_RESERVED
+#define CENSUS_TAG_IS_DELETED(flags) (flags & CENSUS_TAG_DELETED)
-/* Placeholder implementation only. */
+// Primary representation of a context. Composed of 2 underlying tag_set
+// structs, one each for propagated and local (non-propagated) tags. This is
+// to efficiently support tag encoding/decoding.
+// TODO(aveitch): need to add tracing id's/structure.
+struct census_context {
+ struct tag_set tags[2];
+ census_context_status status;
+};
+
+// Indices into the tags member of census_context
+#define PROPAGATED_TAGS 0
+#define LOCAL_TAGS 1
+
+// Validate (check all characters are in range and size is less than limit) a
+// key or value string. Returns 0 if the string is invalid, or the length
+// (including terminator) if valid.
+static size_t validate_tag(const char *kv) {
+ size_t len = 1;
+ char ch;
+ while ((ch = *kv++) != 0) {
+ if (ch < MIN_VALID_TAG_CHAR || ch > MAX_VALID_TAG_CHAR) {
+ return 0;
+ }
+ len++;
+ }
+ if (len > CENSUS_MAX_TAG_KV_LEN) {
+ return 0;
+ }
+ return len;
+}
+
+// Extract a raw tag given a pointer (raw) to the tag header. Allow for some
+// extra bytes in the tag header (see encode/decode functions for usage: this
+// allows for future expansion of the tag header).
+static char *decode_tag(struct raw_tag *tag, char *header, int offset) {
+ tag->key_len = (uint8_t)(*header++);
+ tag->value_len = (uint8_t)(*header++);
+ tag->flags = (uint8_t)(*header++);
+ header += offset;
+ tag->key = header;
+ header += tag->key_len;
+ tag->value = header;
+ return header + tag->value_len;
+}
+
+// Make a copy (in 'to') of an existing tag_set.
+static void tag_set_copy(struct tag_set *to, const struct tag_set *from) {
+ memcpy(to, from, sizeof(struct tag_set));
+ to->kvm = gpr_malloc(to->kvm_size);
+ memcpy(to->kvm, from->kvm, from->kvm_used);
+}
-size_t census_context_serialize(const census_context *context, char *buffer,
- size_t buf_size) {
- /* TODO(aveitch): implement serialization */
+// Delete a tag from a tag_set, if it exists (returns true if it did).
+static bool tag_set_delete_tag(struct tag_set *tags, const char *key,
+ size_t key_len) {
+ char *kvp = tags->kvm;
+ for (int i = 0; i < tags->ntags_alloc; i++) {
+ uint8_t *flags = (uint8_t *)(kvp + FLAG_OFFSET);
+ struct raw_tag tag;
+ kvp = decode_tag(&tag, kvp, 0);
+ if (CENSUS_TAG_IS_DELETED(tag.flags)) continue;
+ if ((key_len == tag.key_len) && (memcmp(key, tag.key, key_len) == 0)) {
+ *flags |= CENSUS_TAG_DELETED;
+ tags->ntags--;
+ return true;
+ }
+ }
+ return false;
+}
+
+// Delete a tag from a context, return true if it existed.
+static bool context_delete_tag(census_context *context, const census_tag *tag,
+ size_t key_len) {
+ return (
+ tag_set_delete_tag(&context->tags[LOCAL_TAGS], tag->key, key_len) ||
+ tag_set_delete_tag(&context->tags[PROPAGATED_TAGS], tag->key, key_len));
+}
+
+// Add a tag to a tag_set. Return true on success, false if the tag could
+// not be added because of constraints on tag set size. This function should
+// not be called if the tag may already exist (in a non-deleted state) in
+// the tag_set, as that would result in two tags with the same key.
+static bool tag_set_add_tag(struct tag_set *tags, const census_tag *tag,
+ size_t key_len, size_t value_len) {
+ if (tags->ntags == CENSUS_MAX_PROPAGATED_TAGS) {
+ return false;
+ }
+ const size_t tag_size = key_len + value_len + TAG_HEADER_SIZE;
+ if (tags->kvm_used + tag_size > tags->kvm_size) {
+ // allocate new memory if needed
+ tags->kvm_size += 2 * CENSUS_MAX_TAG_KV_LEN + TAG_HEADER_SIZE;
+ char *new_kvm = gpr_malloc(tags->kvm_size);
+ memcpy(new_kvm, tags->kvm, tags->kvm_used);
+ gpr_free(tags->kvm);
+ tags->kvm = new_kvm;
+ }
+ char *kvp = tags->kvm + tags->kvm_used;
+ *kvp++ = (char)key_len;
+ *kvp++ = (char)value_len;
+ // ensure reserved flags are not used.
+ *kvp++ = (char)(tag->flags & (CENSUS_TAG_PROPAGATE | CENSUS_TAG_STATS));
+ memcpy(kvp, tag->key, key_len);
+ kvp += key_len;
+ memcpy(kvp, tag->value, value_len);
+ tags->kvm_used += tag_size;
+ tags->ntags++;
+ tags->ntags_alloc++;
+ return true;
+}
+
+// Add/modify/delete a tag to/in a context. Caller must validate that tag key
+// etc. are valid.
+static void context_modify_tag(census_context *context, const census_tag *tag,
+ size_t key_len, size_t value_len) {
+ // First delete the tag if it is already present.
+ bool deleted = context_delete_tag(context, tag, key_len);
+ bool added = false;
+ if (CENSUS_TAG_IS_PROPAGATED(tag->flags)) {
+ added = tag_set_add_tag(&context->tags[PROPAGATED_TAGS], tag, key_len,
+ value_len);
+ } else {
+ added =
+ tag_set_add_tag(&context->tags[LOCAL_TAGS], tag, key_len, value_len);
+ }
+
+ if (deleted) {
+ context->status.n_modified_tags++;
+ } else {
+ if (added) {
+ context->status.n_added_tags++;
+ } else {
+ context->status.n_ignored_tags++;
+ }
+ }
+}
+
+// Remove memory used for deleted tags from a tag set. Basic algorithm:
+// 1) Walk through tag set to find first deleted tag. Record where it is.
+// 2) Find the next not-deleted tag. Copy all of kvm from there to the end
+// "over" the deleted tags
+// 3) repeat #1 and #2 until we have seen all tags
+// 4) if we are still looking for a not-deleted tag, then all the end portion
+// of the kvm is deleted. Just reduce the used amount of memory by the
+// appropriate amount.
+static void tag_set_flatten(struct tag_set *tags) {
+ if (tags->ntags == tags->ntags_alloc) return;
+ bool found_deleted = false; // found a deleted tag.
+ char *kvp = tags->kvm;
+ char *dbase = NULL; // record location of deleted tag
+ for (int i = 0; i < tags->ntags_alloc; i++) {
+ struct raw_tag tag;
+ char *next_kvp = decode_tag(&tag, kvp, 0);
+ if (found_deleted) {
+ if (!CENSUS_TAG_IS_DELETED(tag.flags)) {
+ ptrdiff_t reduce = kvp - dbase; // #bytes in deleted tags
+ GPR_ASSERT(reduce > 0);
+ ptrdiff_t copy_size = tags->kvm + tags->kvm_used - kvp;
+ GPR_ASSERT(copy_size > 0);
+ memmove(dbase, kvp, (size_t)copy_size);
+ tags->kvm_used -= (size_t)reduce;
+ next_kvp -= reduce;
+ found_deleted = false;
+ }
+ } else {
+ if (CENSUS_TAG_IS_DELETED(tag.flags)) {
+ dbase = kvp;
+ found_deleted = true;
+ }
+ }
+ kvp = next_kvp;
+ }
+ if (found_deleted) {
+ GPR_ASSERT(dbase > tags->kvm);
+ tags->kvm_used = (size_t)(dbase - tags->kvm);
+ }
+ tags->ntags_alloc = tags->ntags;
+}
+
+census_context *census_context_create(const census_context *base,
+ const census_tag *tags, int ntags,
+ census_context_status const **status) {
+ census_context *context = gpr_malloc(sizeof(census_context));
+ // If we are given a base, copy it into our new tag set. Otherwise set it
+ // to zero/NULL everything.
+ if (base == NULL) {
+ memset(context, 0, sizeof(census_context));
+ } else {
+ tag_set_copy(&context->tags[PROPAGATED_TAGS], &base->tags[PROPAGATED_TAGS]);
+ tag_set_copy(&context->tags[LOCAL_TAGS], &base->tags[LOCAL_TAGS]);
+ memset(&context->status, 0, sizeof(context->status));
+ }
+ // Walk over the additional tags and, for those that aren't invalid, modify
+ // the context to add/replace/delete as required.
+ for (int i = 0; i < ntags; i++) {
+ const census_tag *tag = &tags[i];
+ size_t key_len = validate_tag(tag->key);
+ // ignore the tag if it is invalid or too short.
+ if (key_len <= 1) {
+ context->status.n_invalid_tags++;
+ } else {
+ if (tag->value != NULL) {
+ size_t value_len = validate_tag(tag->value);
+ if (value_len != 0) {
+ context_modify_tag(context, tag, key_len, value_len);
+ } else {
+ context->status.n_invalid_tags++;
+ }
+ } else {
+ if (context_delete_tag(context, tag, key_len)) {
+ context->status.n_deleted_tags++;
+ }
+ }
+ }
+ }
+ // Remove any deleted tags, update status if needed, and return.
+ tag_set_flatten(&context->tags[PROPAGATED_TAGS]);
+ tag_set_flatten(&context->tags[LOCAL_TAGS]);
+ context->status.n_propagated_tags = context->tags[PROPAGATED_TAGS].ntags;
+ context->status.n_local_tags = context->tags[LOCAL_TAGS].ntags;
+ if (status) {
+ *status = &context->status;
+ }
+ return context;
+}
+
+const census_context_status *census_context_get_status(
+ const census_context *context) {
+ return &context->status;
+}
+
+void census_context_destroy(census_context *context) {
+ gpr_free(context->tags[PROPAGATED_TAGS].kvm);
+ gpr_free(context->tags[LOCAL_TAGS].kvm);
+ gpr_free(context);
+}
+
+void census_context_initialize_iterator(const census_context *context,
+ census_context_iterator *iterator) {
+ iterator->context = context;
+ iterator->index = 0;
+ if (context->tags[PROPAGATED_TAGS].ntags != 0) {
+ iterator->base = PROPAGATED_TAGS;
+ iterator->kvm = context->tags[PROPAGATED_TAGS].kvm;
+ } else if (context->tags[LOCAL_TAGS].ntags != 0) {
+ iterator->base = LOCAL_TAGS;
+ iterator->kvm = context->tags[LOCAL_TAGS].kvm;
+ } else {
+ iterator->base = -1;
+ }
+}
+
+int census_context_next_tag(census_context_iterator *iterator,
+ census_tag *tag) {
+ if (iterator->base < 0) {
+ return 0;
+ }
+ struct raw_tag raw;
+ iterator->kvm = decode_tag(&raw, iterator->kvm, 0);
+ tag->key = raw.key;
+ tag->value = raw.value;
+ tag->flags = raw.flags;
+ if (++iterator->index == iterator->context->tags[iterator->base].ntags) {
+ do {
+ if (iterator->base == LOCAL_TAGS) {
+ iterator->base = -1;
+ return 1;
+ }
+ } while (iterator->context->tags[++iterator->base].ntags == 0);
+ iterator->index = 0;
+ iterator->kvm = iterator->context->tags[iterator->base].kvm;
+ }
+ return 1;
+}
+
+// Find a tag in a tag_set by key. Return true if found, false otherwise.
+static bool tag_set_get_tag(const struct tag_set *tags, const char *key,
+ size_t key_len, census_tag *tag) {
+ char *kvp = tags->kvm;
+ for (int i = 0; i < tags->ntags; i++) {
+ struct raw_tag raw;
+ kvp = decode_tag(&raw, kvp, 0);
+ if (key_len == raw.key_len && memcmp(raw.key, key, key_len) == 0) {
+ tag->key = raw.key;
+ tag->value = raw.value;
+ tag->flags = raw.flags;
+ return true;
+ }
+ }
+ return false;
+}
+
+int census_context_get_tag(const census_context *context, const char *key,
+ census_tag *tag) {
+ size_t key_len = strlen(key) + 1;
+ if (key_len == 1) {
+ return 0;
+ }
+ if (tag_set_get_tag(&context->tags[PROPAGATED_TAGS], key, key_len, tag) ||
+ tag_set_get_tag(&context->tags[LOCAL_TAGS], key, key_len, tag)) {
+ return 1;
+ }
return 0;
}
-int census_context_deserialize(const char *buffer, census_context **context) {
- int ret = 0;
- if (buffer != NULL) {
- /* TODO(aveitch): implement deserialization. */
- ret = 1;
+// Context encoding and decoding functions.
+//
+// Wire format for tag_set's on the wire:
+//
+// First, a tag set header:
+//
+// offset bytes description
+// 0 1 version number
+// 1 1 number of bytes in this header. This allows for future
+// expansion.
+// 2 1 number of bytes in each tag header.
+// 3 1 ntags value from tag set.
+//
+// This is followed by the key/value memory from struct tag_set.
+
+#define ENCODED_VERSION 0 // Version number
+#define ENCODED_HEADER_SIZE 4 // size of tag set header
+
+// Encode a tag set. Returns 0 if buffer is too small.
+static size_t tag_set_encode(const struct tag_set *tags, char *buffer,
+ size_t buf_size) {
+ if (buf_size < ENCODED_HEADER_SIZE + tags->kvm_used) {
+ return 0;
}
- *context = gpr_malloc(sizeof(census_context));
- memset(*context, 0, sizeof(census_context));
- return ret;
+ buf_size -= ENCODED_HEADER_SIZE;
+ *buffer++ = (char)ENCODED_VERSION;
+ *buffer++ = (char)ENCODED_HEADER_SIZE;
+ *buffer++ = (char)TAG_HEADER_SIZE;
+ *buffer++ = (char)tags->ntags;
+ if (tags->ntags == 0) {
+ return ENCODED_HEADER_SIZE;
+ }
+ memcpy(buffer, tags->kvm, tags->kvm_used);
+ return ENCODED_HEADER_SIZE + tags->kvm_used;
}
-void census_context_destroy(census_context *context) { gpr_free(context); }
+size_t census_context_encode(const census_context *context, char *buffer,
+ size_t buf_size) {
+ return tag_set_encode(&context->tags[PROPAGATED_TAGS], buffer, buf_size);
+}
+
+// Decode a tag set.
+static void tag_set_decode(struct tag_set *tags, const char *buffer,
+ size_t size) {
+ uint8_t version = (uint8_t)(*buffer++);
+ uint8_t header_size = (uint8_t)(*buffer++);
+ uint8_t tag_header_size = (uint8_t)(*buffer++);
+ tags->ntags = tags->ntags_alloc = (int)(*buffer++);
+ if (tags->ntags == 0) {
+ tags->ntags_alloc = 0;
+ tags->kvm_size = 0;
+ tags->kvm_used = 0;
+ tags->kvm = NULL;
+ return;
+ }
+ if (header_size != ENCODED_HEADER_SIZE) {
+ GPR_ASSERT(version != ENCODED_VERSION);
+ GPR_ASSERT(ENCODED_HEADER_SIZE < header_size);
+ buffer += (header_size - ENCODED_HEADER_SIZE);
+ }
+ tags->kvm_used = size - header_size;
+ tags->kvm_size = tags->kvm_used + CENSUS_MAX_TAG_KV_LEN;
+ tags->kvm = gpr_malloc(tags->kvm_size);
+ if (tag_header_size != TAG_HEADER_SIZE) {
+ // something new in the tag information. I don't understand it, so
+ // don't copy it over.
+ GPR_ASSERT(version != ENCODED_VERSION);
+ GPR_ASSERT(tag_header_size > TAG_HEADER_SIZE);
+ char *kvp = tags->kvm;
+ for (int i = 0; i < tags->ntags; i++) {
+ memcpy(kvp, buffer, TAG_HEADER_SIZE);
+ kvp += header_size;
+ struct raw_tag raw;
+ buffer =
+ decode_tag(&raw, (char *)buffer, tag_header_size - TAG_HEADER_SIZE);
+ memcpy(kvp, raw.key, (size_t)raw.key_len + raw.value_len);
+ kvp += raw.key_len + raw.value_len;
+ }
+ } else {
+ memcpy(tags->kvm, buffer, tags->kvm_used);
+ }
+}
+
+census_context *census_context_decode(const char *buffer, size_t size) {
+ census_context *context = gpr_malloc(sizeof(census_context));
+ memset(&context->tags[LOCAL_TAGS], 0, sizeof(struct tag_set));
+ if (buffer == NULL) {
+ memset(&context->tags[PROPAGATED_TAGS], 0, sizeof(struct tag_set));
+ } else {
+ tag_set_decode(&context->tags[PROPAGATED_TAGS], buffer, size);
+ }
+ memset(&context->status, 0, sizeof(context->status));
+ context->status.n_propagated_tags = context->tags[PROPAGATED_TAGS].ntags;
+ return context;
+}
diff --git a/src/core/census/grpc_context.c b/src/core/census/grpc_context.c
index cf2353199f..4b61382a2c 100644
--- a/src/core/census/grpc_context.c
+++ b/src/core/census/grpc_context.c
@@ -32,14 +32,22 @@
*/
#include <grpc/census.h>
-#include "src/core/census/grpc_context.h"
+#include <grpc/grpc.h>
+#include "src/core/surface/api_trace.h"
+#include "src/core/surface/call.h"
-void *grpc_census_context_create() {
- census_context *context;
- census_context_deserialize(NULL, &context);
- return (void *)context;
+void grpc_census_call_set_context(grpc_call *call, census_context *context) {
+ GRPC_API_TRACE("grpc_census_call_set_context(call=%p, census_context=%p)", 2,
+ (call, context));
+ if (census_enabled() == CENSUS_FEATURE_NONE) {
+ return;
+ }
+ if (context != NULL) {
+ grpc_call_context_set(call, GRPC_CONTEXT_TRACING, context, NULL);
+ }
}
-void grpc_census_context_destroy(void *context) {
- census_context_destroy((census_context *)context);
+census_context *grpc_census_call_get_context(grpc_call *call) {
+ GRPC_API_TRACE("grpc_census_call_get_context(call=%p)", 1, (call));
+ return (census_context *)grpc_call_context_get(call, GRPC_CONTEXT_TRACING);
}
diff --git a/src/core/census/grpc_filter.c b/src/core/census/grpc_filter.c
new file mode 100644
index 0000000000..c8aaf31e2d
--- /dev/null
+++ b/src/core/census/grpc_filter.c
@@ -0,0 +1,184 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/census/grpc_filter.h"
+
+#include <stdio.h>
+#include <string.h>
+
+#include <grpc/census.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/slice.h>
+#include <grpc/support/time.h>
+
+#include "src/core/channel/channel_stack.h"
+#include "src/core/statistics/census_interface.h"
+#include "src/core/statistics/census_rpc_stats.h"
+#include "src/core/transport/static_metadata.h"
+
+typedef struct call_data {
+ census_op_id op_id;
+ census_context *ctxt;
+ gpr_timespec start_ts;
+ int error;
+
+ /* recv callback */
+ grpc_metadata_batch *recv_initial_metadata;
+ grpc_closure *on_done_recv;
+ grpc_closure finish_recv;
+} call_data;
+
+typedef struct channel_data { uint8_t unused; } channel_data;
+
+static void extract_and_annotate_method_tag(grpc_metadata_batch *md,
+ call_data *calld,
+ channel_data *chand) {
+ grpc_linked_mdelem *m;
+ for (m = md->list.head; m != NULL; m = m->next) {
+ if (m->md->key == GRPC_MDSTR_PATH) {
+ gpr_log(GPR_DEBUG, "%s",
+ (const char *)GPR_SLICE_START_PTR(m->md->value->slice));
+ /* Add method tag here */
+ }
+ }
+}
+
+static void client_mutate_op(grpc_call_element *elem,
+ grpc_transport_stream_op *op) {
+ call_data *calld = elem->call_data;
+ channel_data *chand = elem->channel_data;
+ if (op->send_initial_metadata) {
+ extract_and_annotate_method_tag(op->send_initial_metadata, calld, chand);
+ }
+}
+
+static void client_start_transport_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_transport_stream_op *op) {
+ client_mutate_op(elem, op);
+ grpc_call_next_op(exec_ctx, elem, op);
+}
+
+static void server_on_done_recv(grpc_exec_ctx *exec_ctx, void *ptr,
+ bool success) {
+ grpc_call_element *elem = ptr;
+ call_data *calld = elem->call_data;
+ channel_data *chand = elem->channel_data;
+ if (success) {
+ extract_and_annotate_method_tag(calld->recv_initial_metadata, calld, chand);
+ }
+ calld->on_done_recv->cb(exec_ctx, calld->on_done_recv->cb_arg, success);
+}
+
+static void server_mutate_op(grpc_call_element *elem,
+ grpc_transport_stream_op *op) {
+ call_data *calld = elem->call_data;
+ if (op->recv_initial_metadata) {
+ /* substitute our callback for the op callback */
+ calld->recv_initial_metadata = op->recv_initial_metadata;
+ calld->on_done_recv = op->recv_initial_metadata_ready;
+ op->recv_initial_metadata_ready = &calld->finish_recv;
+ }
+}
+
+static void server_start_transport_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_transport_stream_op *op) {
+ /* TODO(ctiller): this code fails. I don't know why. I expect it's
+ incomplete, and someone should look at it soon.
+
+ call_data *calld = elem->call_data;
+ GPR_ASSERT((calld->op_id.upper != 0) || (calld->op_id.lower != 0)); */
+ server_mutate_op(elem, op);
+ grpc_call_next_op(exec_ctx, elem, op);
+}
+
+static void client_init_call_elem(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_call_element_args *args) {
+ call_data *d = elem->call_data;
+ GPR_ASSERT(d != NULL);
+ memset(d, 0, sizeof(*d));
+ d->start_ts = gpr_now(GPR_CLOCK_REALTIME);
+}
+
+static void client_destroy_call_elem(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
+ call_data *d = elem->call_data;
+ GPR_ASSERT(d != NULL);
+ /* TODO(hongyu): record rpc client stats and census_rpc_end_op here */
+}
+
+static void server_init_call_elem(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_call_element_args *args) {
+ call_data *d = elem->call_data;
+ GPR_ASSERT(d != NULL);
+ memset(d, 0, sizeof(*d));
+ d->start_ts = gpr_now(GPR_CLOCK_REALTIME);
+ /* TODO(hongyu): call census_tracing_start_op here. */
+ grpc_closure_init(&d->finish_recv, server_on_done_recv, elem);
+}
+
+static void server_destroy_call_elem(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
+ call_data *d = elem->call_data;
+ GPR_ASSERT(d != NULL);
+ /* TODO(hongyu): record rpc server stats and census_tracing_end_op here */
+}
+
+static void init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
+ channel_data *chand = elem->channel_data;
+ GPR_ASSERT(chand != NULL);
+}
+
+static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem) {
+ channel_data *chand = elem->channel_data;
+ GPR_ASSERT(chand != NULL);
+}
+
+const grpc_channel_filter grpc_client_census_filter = {
+ client_start_transport_op, grpc_channel_next_op, sizeof(call_data),
+ client_init_call_elem, grpc_call_stack_ignore_set_pollset,
+ client_destroy_call_elem, sizeof(channel_data), init_channel_elem,
+ destroy_channel_elem, grpc_call_next_get_peer, "census-client"};
+
+const grpc_channel_filter grpc_server_census_filter = {
+ server_start_transport_op, grpc_channel_next_op, sizeof(call_data),
+ server_init_call_elem, grpc_call_stack_ignore_set_pollset,
+ server_destroy_call_elem, sizeof(channel_data), init_channel_elem,
+ destroy_channel_elem, grpc_call_next_get_peer, "census-server"};
diff --git a/src/core/channel/census_filter.h b/src/core/census/grpc_filter.h
index 4f9759f0db..4699e4d692 100644
--- a/src/core/channel/census_filter.h
+++ b/src/core/census/grpc_filter.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_CHANNEL_CENSUS_FILTER_H
-#define GRPC_INTERNAL_CORE_CHANNEL_CENSUS_FILTER_H
+#ifndef GRPC_CORE_CENSUS_GRPC_FILTER_H
+#define GRPC_CORE_CENSUS_GRPC_FILTER_H
#include "src/core/channel/channel_stack.h"
@@ -41,4 +41,4 @@
extern const grpc_channel_filter grpc_client_census_filter;
extern const grpc_channel_filter grpc_server_census_filter;
-#endif /* GRPC_INTERNAL_CORE_CHANNEL_CENSUS_FILTER_H */
+#endif /* GRPC_CORE_CENSUS_GRPC_FILTER_H */
diff --git a/src/core/census/grpc_plugin.c b/src/core/census/grpc_plugin.c
new file mode 100644
index 0000000000..3be2a48eb8
--- /dev/null
+++ b/src/core/census/grpc_plugin.c
@@ -0,0 +1,72 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/census/grpc_plugin.h"
+
+#include <limits.h>
+
+#include <grpc/census.h>
+
+#include "src/core/census/grpc_filter.h"
+#include "src/core/surface/channel_init.h"
+#include "src/core/channel/channel_stack_builder.h"
+
+static bool maybe_add_census_filter(grpc_channel_stack_builder *builder,
+ void *arg_must_be_null) {
+ const grpc_channel_args *args =
+ grpc_channel_stack_builder_get_channel_arguments(builder);
+ if (grpc_channel_args_is_census_enabled(args)) {
+ return grpc_channel_stack_builder_prepend_filter(
+ builder, &grpc_client_census_filter, NULL, NULL);
+ }
+ return true;
+}
+
+void census_grpc_plugin_init(void) {
+ /* Only initialize census if no one else has and some features are
+ * available. */
+ if (census_enabled() == CENSUS_FEATURE_NONE &&
+ census_supported() != CENSUS_FEATURE_NONE) {
+ if (census_initialize(census_supported())) { /* enable all features. */
+ gpr_log(GPR_ERROR, "Could not initialize census.");
+ }
+ }
+ grpc_channel_init_register_stage(GRPC_CLIENT_CHANNEL, INT_MAX,
+ maybe_add_census_filter, NULL);
+ grpc_channel_init_register_stage(GRPC_CLIENT_UCHANNEL, INT_MAX,
+ maybe_add_census_filter, NULL);
+ grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX,
+ maybe_add_census_filter, NULL);
+}
+
+void census_grpc_plugin_destroy(void) { census_shutdown(); }
diff --git a/src/core/census/grpc_context.h b/src/core/census/grpc_plugin.h
index f610f6ce21..9321c2c30f 100644
--- a/src/core/census/grpc_context.h
+++ b/src/core/census/grpc_plugin.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,12 +31,10 @@
*
*/
-/* GRPC <--> CENSUS context interface */
+#ifndef GRPC_CORE_CENSUS_GRPC_PLUGIN_H
+#define GRPC_CORE_CENSUS_GRPC_PLUGIN_H
-#ifndef CENSUS_GRPC_CONTEXT_H
-#define CENSUS_GRPC_CONTEXT_H
+void census_grpc_plugin_init(void);
+void census_grpc_plugin_destroy(void);
-void *grpc_census_context_create();
-void grpc_census_context_destroy(void *context);
-
-#endif /* CENSUS_GRPC_CONTEXT_H */
+#endif /* GRPC_CORE_CENSUS_GRPC_PLUGIN_H */
diff --git a/src/core/census/initialize.c b/src/core/census/initialize.c
index 057ac78ee7..ce7ec09b89 100644
--- a/src/core/census/initialize.c
+++ b/src/core/census/initialize.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -33,18 +33,22 @@
#include <grpc/census.h>
-static int census_fns_enabled = CENSUS_NONE;
+static int features_enabled = CENSUS_FEATURE_NONE;
-int census_initialize(int functions) {
- if (census_fns_enabled != CENSUS_NONE) {
+int census_initialize(int features) {
+ if (features_enabled != CENSUS_FEATURE_NONE) {
+ // Must have been a previous call to census_initialize; return error
return 1;
}
- if (functions != CENSUS_NONE) {
- return 1;
- } else {
- census_fns_enabled = functions;
- return 0;
- }
+ features_enabled = features;
+ return 0;
+}
+
+void census_shutdown(void) { features_enabled = CENSUS_FEATURE_NONE; }
+
+int census_supported(void) {
+ /* TODO(aveitch): improve this as we implement features... */
+ return CENSUS_FEATURE_NONE;
}
-void census_shutdown() { census_fns_enabled = CENSUS_NONE; }
+int census_enabled(void) { return features_enabled; }
diff --git a/src/core/census/mlog.c b/src/core/census/mlog.c
new file mode 100644
index 0000000000..a2cc46d3f2
--- /dev/null
+++ b/src/core/census/mlog.c
@@ -0,0 +1,600 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+// Implements an efficient in-memory log, optimized for multiple writers and
+// a single reader. Available log space is divided up in blocks of
+// CENSUS_LOG_2_MAX_RECORD_SIZE bytes. A block can be in one of the following
+// three data structures:
+// - Free blocks (free_block_list)
+// - Blocks with unread data (dirty_block_list)
+// - Blocks currently attached to cores (core_local_blocks[])
+//
+// census_log_start_write() moves a block from core_local_blocks[] to the end of
+// dirty_block_list when block:
+// - is out-of-space OR
+// - has an incomplete record (an incomplete record occurs when a thread calls
+// census_log_start_write() and is context-switched before calling
+// census_log_end_write()
+// So, blocks in dirty_block_list are ordered, from oldest to newest, by the
+// time when block is detached from the core.
+//
+// census_log_read_next() first iterates over dirty_block_list and then
+// core_local_blocks[]. It moves completely read blocks from dirty_block_list
+// to free_block_list. Blocks in core_local_blocks[] are not freed, even when
+// completely read.
+//
+// If the log is configured to discard old records and free_block_list is empty,
+// census_log_start_write() iterates over dirty_block_list to allocate a
+// new block. It moves the oldest available block (no pending read/write) to
+// core_local_blocks[].
+//
+// core_local_block_struct is used to implement a map from core id to the block
+// associated with that core. This mapping is advisory. It is possible that the
+// block returned by this mapping is no longer associated with that core. This
+// mapping is updated, lazily, by census_log_start_write().
+//
+// Locking in block struct:
+//
+// Exclusive g_log.lock must be held before calling any functions operating on
+// block structs except census_log_start_write() and census_log_end_write().
+//
+// Writes to a block are serialized via writer_lock. census_log_start_write()
+// acquires this lock and census_log_end_write() releases it. On failure to
+// acquire the lock, writer allocates a new block for the current core and
+// updates core_local_block accordingly.
+//
+// Simultaneous read and write access is allowed. Readers can safely read up to
+// committed bytes (bytes_committed).
+//
+// reader_lock protects the block, currently being read, from getting recycled.
+// start_read() acquires reader_lock and end_read() releases the lock.
+//
+// Read/write access to a block is disabled via try_disable_access(). It returns
+// with both writer_lock and reader_lock held. These locks are subsequently
+// released by enable_access() to enable access to the block.
+//
+// A note on naming: Most function/struct names are prepended by cl_
+// (shorthand for census_log). Further, functions that manipulate structures
+// include the name of the structure, which will be passed as the first
+// argument. E.g. cl_block_initialize() will initialize a cl_block.
+
+#include "src/core/census/mlog.h"
+#include <grpc/support/alloc.h>
+#include <grpc/support/atm.h>
+#include <grpc/support/cpu.h>
+#include <grpc/support/log.h>
+#include <grpc/support/sync.h>
+#include <grpc/support/useful.h>
+#include <stdbool.h>
+#include <string.h>
+
+// End of platform specific code
+
+typedef struct census_log_block_list_struct {
+ struct census_log_block_list_struct* next;
+ struct census_log_block_list_struct* prev;
+ struct census_log_block* block;
+} cl_block_list_struct;
+
+typedef struct census_log_block {
+ // Pointer to underlying buffer.
+ char* buffer;
+ gpr_atm writer_lock;
+ gpr_atm reader_lock;
+ // Keeps completely written bytes. Declared atomic because accessed
+ // simultaneously by reader and writer.
+ gpr_atm bytes_committed;
+ // Bytes already read.
+ size_t bytes_read;
+ // Links for list.
+ cl_block_list_struct link;
+// We want this structure to be cacheline aligned. We assume the following
+// sizes for the various parts on 32/64bit systems:
+// type 32b size 64b size
+// char* 4 8
+// 3x gpr_atm 12 24
+// size_t 4 8
+// cl_block_list_struct 12 24
+// TOTAL 32 64
+//
+// Depending on the size of our cacheline and the architecture, we
+// selectively add char buffering to this structure. The size is checked
+// via assert in census_log_initialize().
+#if defined(GPR_ARCH_64)
+#define CL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 64)
+#else
+#if defined(GPR_ARCH_32)
+#define CL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 32)
+#else
+#error "Unknown architecture"
+#endif
+#endif
+#if CL_BLOCK_PAD_SIZE > 0
+ char padding[CL_BLOCK_PAD_SIZE];
+#endif
+} cl_block;
+
+// A list of cl_blocks, doubly-linked through cl_block::link.
+typedef struct census_log_block_list {
+ int32_t count; // Number of items in list.
+ cl_block_list_struct ht; // head/tail of linked list.
+} cl_block_list;
+
+// Cacheline aligned block pointers to avoid false sharing. Block pointer must
+// be initialized via set_block(), before calling other functions
+typedef struct census_log_core_local_block {
+ gpr_atm block;
+// Ensure cachline alignment: we assume sizeof(gpr_atm) == 4 or 8
+#if defined(GPR_ARCH_64)
+#define CL_CORE_LOCAL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 8)
+#else
+#if defined(GPR_ARCH_32)
+#define CL_CORE_LOCAL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 4)
+#else
+#error "Unknown architecture"
+#endif
+#endif
+#if CL_CORE_LOCAL_BLOCK_PAD_SIZE > 0
+ char padding[CL_CORE_LOCAL_BLOCK_PAD_SIZE];
+#endif
+} cl_core_local_block;
+
+struct census_log {
+ int discard_old_records;
+ // Number of cores (aka hardware-contexts)
+ unsigned num_cores;
+ // number of CENSUS_LOG_2_MAX_RECORD_SIZE blocks in log
+ uint32_t num_blocks;
+ cl_block* blocks; // Block metadata.
+ cl_core_local_block* core_local_blocks; // Keeps core to block mappings.
+ gpr_mu lock;
+ int initialized; // has log been initialized?
+ // Keeps the state of the reader iterator. A value of 0 indicates that
+ // iterator has reached the end. census_log_init_reader() resets the value
+ // to num_core to restart iteration.
+ uint32_t read_iterator_state;
+ // Points to the block being read. If non-NULL, the block is locked for
+ // reading(block_being_read_->reader_lock is held).
+ cl_block* block_being_read;
+ char* buffer;
+ cl_block_list free_block_list;
+ cl_block_list dirty_block_list;
+ gpr_atm out_of_space_count;
+};
+
+// Single internal log.
+static struct census_log g_log;
+
+// Functions that operate on an atomic memory location used as a lock.
+
+// Returns non-zero if lock is acquired.
+static int cl_try_lock(gpr_atm* lock) { return gpr_atm_acq_cas(lock, 0, 1); }
+
+static void cl_unlock(gpr_atm* lock) { gpr_atm_rel_store(lock, 0); }
+
+// Functions that operate on cl_core_local_block's.
+
+static void cl_core_local_block_set_block(cl_core_local_block* clb,
+ cl_block* block) {
+ gpr_atm_rel_store(&clb->block, (gpr_atm)block);
+}
+
+static cl_block* cl_core_local_block_get_block(cl_core_local_block* clb) {
+ return (cl_block*)gpr_atm_acq_load(&clb->block);
+}
+
+// Functions that operate on cl_block_list_struct's.
+
+static void cl_block_list_struct_initialize(cl_block_list_struct* bls,
+ cl_block* block) {
+ bls->next = bls->prev = bls;
+ bls->block = block;
+}
+
+// Functions that operate on cl_block_list's.
+
+static void cl_block_list_initialize(cl_block_list* list) {
+ list->count = 0;
+ cl_block_list_struct_initialize(&list->ht, NULL);
+}
+
+// Returns head of *this, or NULL if empty.
+static cl_block* cl_block_list_head(cl_block_list* list) {
+ return list->ht.next->block;
+}
+
+// Insert element *e after *pos.
+static void cl_block_list_insert(cl_block_list* list, cl_block_list_struct* pos,
+ cl_block_list_struct* e) {
+ list->count++;
+ e->next = pos->next;
+ e->prev = pos;
+ e->next->prev = e;
+ e->prev->next = e;
+}
+
+// Insert block at the head of the list
+static void cl_block_list_insert_at_head(cl_block_list* list, cl_block* block) {
+ cl_block_list_insert(list, &list->ht, &block->link);
+}
+
+// Insert block at the tail of the list.
+static void cl_block_list_insert_at_tail(cl_block_list* list, cl_block* block) {
+ cl_block_list_insert(list, list->ht.prev, &block->link);
+}
+
+// Removes block *b. Requires *b be in the list.
+static void cl_block_list_remove(cl_block_list* list, cl_block* b) {
+ list->count--;
+ b->link.next->prev = b->link.prev;
+ b->link.prev->next = b->link.next;
+}
+
+// Functions that operate on cl_block's
+
+static void cl_block_initialize(cl_block* block, char* buffer) {
+ block->buffer = buffer;
+ gpr_atm_rel_store(&block->writer_lock, 0);
+ gpr_atm_rel_store(&block->reader_lock, 0);
+ gpr_atm_rel_store(&block->bytes_committed, 0);
+ block->bytes_read = 0;
+ cl_block_list_struct_initialize(&block->link, block);
+}
+
+// Guards against exposing partially written buffer to the reader.
+static void cl_block_set_bytes_committed(cl_block* block,
+ size_t bytes_committed) {
+ gpr_atm_rel_store(&block->bytes_committed, (gpr_atm)bytes_committed);
+}
+
+static size_t cl_block_get_bytes_committed(cl_block* block) {
+ return (size_t)gpr_atm_acq_load(&block->bytes_committed);
+}
+
+// Tries to disable future read/write access to this block. Succeeds if:
+// - no in-progress write AND
+// - no in-progress read AND
+// - 'discard_data' set to true OR no unread data
+// On success, clears the block state and returns with writer_lock_ and
+// reader_lock_ held. These locks are released by a subsequent
+// cl_block_access_enable() call.
+static bool cl_block_try_disable_access(cl_block* block, int discard_data) {
+ if (!cl_try_lock(&block->writer_lock)) {
+ return false;
+ }
+ if (!cl_try_lock(&block->reader_lock)) {
+ cl_unlock(&block->writer_lock);
+ return false;
+ }
+ if (!discard_data &&
+ (block->bytes_read != cl_block_get_bytes_committed(block))) {
+ cl_unlock(&block->reader_lock);
+ cl_unlock(&block->writer_lock);
+ return false;
+ }
+ cl_block_set_bytes_committed(block, 0);
+ block->bytes_read = 0;
+ return true;
+}
+
+static void cl_block_enable_access(cl_block* block) {
+ cl_unlock(&block->reader_lock);
+ cl_unlock(&block->writer_lock);
+}
+
+// Returns with writer_lock held.
+static void* cl_block_start_write(cl_block* block, size_t size) {
+ if (!cl_try_lock(&block->writer_lock)) {
+ return NULL;
+ }
+ size_t bytes_committed = cl_block_get_bytes_committed(block);
+ if (bytes_committed + size > CENSUS_LOG_MAX_RECORD_SIZE) {
+ cl_unlock(&block->writer_lock);
+ return NULL;
+ }
+ return block->buffer + bytes_committed;
+}
+
+// Releases writer_lock and increments committed bytes by 'bytes_written'.
+// 'bytes_written' must be <= 'size' specified in the corresponding
+// StartWrite() call. This function is thread-safe.
+static void cl_block_end_write(cl_block* block, size_t bytes_written) {
+ cl_block_set_bytes_committed(
+ block, cl_block_get_bytes_committed(block) + bytes_written);
+ cl_unlock(&block->writer_lock);
+}
+
+// Returns a pointer to the first unread byte in buffer. The number of bytes
+// available are returned in 'bytes_available'. Acquires reader lock that is
+// released by a subsequent cl_block_end_read() call. Returns NULL if:
+// - read in progress
+// - no data available
+static void* cl_block_start_read(cl_block* block, size_t* bytes_available) {
+ if (!cl_try_lock(&block->reader_lock)) {
+ return NULL;
+ }
+ // bytes_committed may change from under us. Use bytes_available to update
+ // bytes_read below.
+ size_t bytes_committed = cl_block_get_bytes_committed(block);
+ GPR_ASSERT(bytes_committed >= block->bytes_read);
+ *bytes_available = bytes_committed - block->bytes_read;
+ if (*bytes_available == 0) {
+ cl_unlock(&block->reader_lock);
+ return NULL;
+ }
+ void* record = block->buffer + block->bytes_read;
+ block->bytes_read += *bytes_available;
+ return record;
+}
+
+static void cl_block_end_read(cl_block* block) {
+ cl_unlock(&block->reader_lock);
+}
+
+// Internal functions operating on g_log
+
+// Allocates a new free block (or recycles an available dirty block if log is
+// configured to discard old records). Returns NULL if out-of-space.
+static cl_block* cl_allocate_block(void) {
+ cl_block* block = cl_block_list_head(&g_log.free_block_list);
+ if (block != NULL) {
+ cl_block_list_remove(&g_log.free_block_list, block);
+ return block;
+ }
+ if (!g_log.discard_old_records) {
+ // No free block and log is configured to keep old records.
+ return NULL;
+ }
+ // Recycle dirty block. Start from the oldest.
+ for (block = cl_block_list_head(&g_log.dirty_block_list); block != NULL;
+ block = block->link.next->block) {
+ if (cl_block_try_disable_access(block, 1 /* discard data */)) {
+ cl_block_list_remove(&g_log.dirty_block_list, block);
+ return block;
+ }
+ }
+ return NULL;
+}
+
+// Allocates a new block and updates core id => block mapping. 'old_block'
+// points to the block that the caller thinks is attached to
+// 'core_id'. 'old_block' may be NULL. Returns true if:
+// - allocated a new block OR
+// - 'core_id' => 'old_block' mapping changed (another thread allocated a
+// block before lock was acquired).
+static bool cl_allocate_core_local_block(uint32_t core_id,
+ cl_block* old_block) {
+ // Now that we have the lock, check if core-local mapping has changed.
+ cl_core_local_block* core_local_block = &g_log.core_local_blocks[core_id];
+ cl_block* block = cl_core_local_block_get_block(core_local_block);
+ if ((block != NULL) && (block != old_block)) {
+ return true;
+ }
+ if (block != NULL) {
+ cl_core_local_block_set_block(core_local_block, NULL);
+ cl_block_list_insert_at_tail(&g_log.dirty_block_list, block);
+ }
+ block = cl_allocate_block();
+ if (block == NULL) {
+ return false;
+ }
+ cl_core_local_block_set_block(core_local_block, block);
+ cl_block_enable_access(block);
+ return true;
+}
+
+static cl_block* cl_get_block(void* record) {
+ uintptr_t p = (uintptr_t)((char*)record - g_log.buffer);
+ uintptr_t index = p >> CENSUS_LOG_2_MAX_RECORD_SIZE;
+ return &g_log.blocks[index];
+}
+
+// Gets the next block to read and tries to free 'prev' block (if not NULL).
+// Returns NULL if reached the end.
+static cl_block* cl_next_block_to_read(cl_block* prev) {
+ cl_block* block = NULL;
+ if (g_log.read_iterator_state == g_log.num_cores) {
+ // We are traversing dirty list; find the next dirty block.
+ if (prev != NULL) {
+ // Try to free the previous block if there is no unread data. This
+ // block
+ // may have unread data if previously incomplete record completed
+ // between
+ // read_next() calls.
+ block = prev->link.next->block;
+ if (cl_block_try_disable_access(prev, 0 /* do not discard data */)) {
+ cl_block_list_remove(&g_log.dirty_block_list, prev);
+ cl_block_list_insert_at_head(&g_log.free_block_list, prev);
+ }
+ } else {
+ block = cl_block_list_head(&g_log.dirty_block_list);
+ }
+ if (block != NULL) {
+ return block;
+ }
+ // We are done with the dirty list; moving on to core-local blocks.
+ }
+ while (g_log.read_iterator_state > 0) {
+ g_log.read_iterator_state--;
+ block = cl_core_local_block_get_block(
+ &g_log.core_local_blocks[g_log.read_iterator_state]);
+ if (block != NULL) {
+ return block;
+ }
+ }
+ return NULL;
+}
+
+#define CL_LOG_2_MB 20 // 2^20 = 1MB
+
+// External functions: primary stats_log interface
+void census_log_initialize(size_t size_in_mb, int discard_old_records) {
+ // Check cacheline alignment.
+ GPR_ASSERT(sizeof(cl_block) % GPR_CACHELINE_SIZE == 0);
+ GPR_ASSERT(sizeof(cl_core_local_block) % GPR_CACHELINE_SIZE == 0);
+ GPR_ASSERT(!g_log.initialized);
+ g_log.discard_old_records = discard_old_records;
+ g_log.num_cores = gpr_cpu_num_cores();
+ // Ensure that we will not get any overflow in calaculating num_blocks
+ GPR_ASSERT(CL_LOG_2_MB >= CENSUS_LOG_2_MAX_RECORD_SIZE);
+ GPR_ASSERT(size_in_mb < 1000);
+ // Ensure at least 2x as many blocks as there are cores.
+ g_log.num_blocks =
+ (uint32_t)GPR_MAX(2 * g_log.num_cores, (size_in_mb << CL_LOG_2_MB) >>
+ CENSUS_LOG_2_MAX_RECORD_SIZE);
+ gpr_mu_init(&g_log.lock);
+ g_log.read_iterator_state = 0;
+ g_log.block_being_read = NULL;
+ g_log.core_local_blocks = (cl_core_local_block*)gpr_malloc_aligned(
+ g_log.num_cores * sizeof(cl_core_local_block), GPR_CACHELINE_SIZE_LOG);
+ memset(g_log.core_local_blocks, 0,
+ g_log.num_cores * sizeof(cl_core_local_block));
+ g_log.blocks = (cl_block*)gpr_malloc_aligned(
+ g_log.num_blocks * sizeof(cl_block), GPR_CACHELINE_SIZE_LOG);
+ memset(g_log.blocks, 0, g_log.num_blocks * sizeof(cl_block));
+ g_log.buffer = gpr_malloc(g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
+ memset(g_log.buffer, 0, g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
+ cl_block_list_initialize(&g_log.free_block_list);
+ cl_block_list_initialize(&g_log.dirty_block_list);
+ for (uint32_t i = 0; i < g_log.num_blocks; ++i) {
+ cl_block* block = g_log.blocks + i;
+ cl_block_initialize(block, g_log.buffer + (CENSUS_LOG_MAX_RECORD_SIZE * i));
+ cl_block_try_disable_access(block, 1 /* discard data */);
+ cl_block_list_insert_at_tail(&g_log.free_block_list, block);
+ }
+ gpr_atm_rel_store(&g_log.out_of_space_count, 0);
+ g_log.initialized = 1;
+}
+
+void census_log_shutdown(void) {
+ GPR_ASSERT(g_log.initialized);
+ gpr_mu_destroy(&g_log.lock);
+ gpr_free_aligned(g_log.core_local_blocks);
+ g_log.core_local_blocks = NULL;
+ gpr_free_aligned(g_log.blocks);
+ g_log.blocks = NULL;
+ gpr_free(g_log.buffer);
+ g_log.buffer = NULL;
+ g_log.initialized = 0;
+}
+
+void* census_log_start_write(size_t size) {
+ // Used to bound number of times block allocation is attempted.
+ GPR_ASSERT(size > 0);
+ GPR_ASSERT(g_log.initialized);
+ if (size > CENSUS_LOG_MAX_RECORD_SIZE) {
+ return NULL;
+ }
+ uint32_t attempts_remaining = g_log.num_blocks;
+ uint32_t core_id = gpr_cpu_current_cpu();
+ do {
+ void* record = NULL;
+ cl_block* block =
+ cl_core_local_block_get_block(&g_log.core_local_blocks[core_id]);
+ if (block && (record = cl_block_start_write(block, size))) {
+ return record;
+ }
+ // Need to allocate a new block. We are here if:
+ // - No block associated with the core OR
+ // - Write in-progress on the block OR
+ // - block is out of space
+ gpr_mu_lock(&g_log.lock);
+ bool allocated = cl_allocate_core_local_block(core_id, block);
+ gpr_mu_unlock(&g_log.lock);
+ if (!allocated) {
+ gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1);
+ return NULL;
+ }
+ } while (attempts_remaining--);
+ // Give up.
+ gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1);
+ return NULL;
+}
+
+void census_log_end_write(void* record, size_t bytes_written) {
+ GPR_ASSERT(g_log.initialized);
+ cl_block_end_write(cl_get_block(record), bytes_written);
+}
+
+void census_log_init_reader(void) {
+ GPR_ASSERT(g_log.initialized);
+ gpr_mu_lock(&g_log.lock);
+ // If a block is locked for reading unlock it.
+ if (g_log.block_being_read != NULL) {
+ cl_block_end_read(g_log.block_being_read);
+ g_log.block_being_read = NULL;
+ }
+ g_log.read_iterator_state = g_log.num_cores;
+ gpr_mu_unlock(&g_log.lock);
+}
+
+const void* census_log_read_next(size_t* bytes_available) {
+ GPR_ASSERT(g_log.initialized);
+ gpr_mu_lock(&g_log.lock);
+ if (g_log.block_being_read != NULL) {
+ cl_block_end_read(g_log.block_being_read);
+ }
+ do {
+ g_log.block_being_read = cl_next_block_to_read(g_log.block_being_read);
+ if (g_log.block_being_read != NULL) {
+ void* record =
+ cl_block_start_read(g_log.block_being_read, bytes_available);
+ if (record != NULL) {
+ gpr_mu_unlock(&g_log.lock);
+ return record;
+ }
+ }
+ } while (g_log.block_being_read != NULL);
+ gpr_mu_unlock(&g_log.lock);
+ return NULL;
+}
+
+size_t census_log_remaining_space(void) {
+ GPR_ASSERT(g_log.initialized);
+ size_t space = 0;
+ gpr_mu_lock(&g_log.lock);
+ if (g_log.discard_old_records) {
+ // Remaining space is not meaningful; just return the entire log space.
+ space = g_log.num_blocks << CENSUS_LOG_2_MAX_RECORD_SIZE;
+ } else {
+ GPR_ASSERT(g_log.free_block_list.count >= 0);
+ space = (size_t)g_log.free_block_list.count * CENSUS_LOG_MAX_RECORD_SIZE;
+ }
+ gpr_mu_unlock(&g_log.lock);
+ return space;
+}
+
+int64_t census_log_out_of_space_count(void) {
+ GPR_ASSERT(g_log.initialized);
+ return gpr_atm_acq_load(&g_log.out_of_space_count);
+}
diff --git a/src/core/census/mlog.h b/src/core/census/mlog.h
new file mode 100644
index 0000000000..bc6eaeaf28
--- /dev/null
+++ b/src/core/census/mlog.h
@@ -0,0 +1,95 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/* A very fast in-memory log, optimized for multiple writers. */
+
+#ifndef GRPC_CORE_CENSUS_MLOG_H
+#define GRPC_CORE_CENSUS_MLOG_H
+
+#include <grpc/support/port_platform.h>
+#include <stddef.h>
+
+/* Maximum record size, in bytes. */
+#define CENSUS_LOG_2_MAX_RECORD_SIZE 14 /* 2^14 = 16KB */
+#define CENSUS_LOG_MAX_RECORD_SIZE (1 << CENSUS_LOG_2_MAX_RECORD_SIZE)
+
+/* Initialize the statistics logging subsystem with the given log size. A log
+ size of 0 will result in the smallest possible log for the platform
+ (approximately CENSUS_LOG_MAX_RECORD_SIZE * gpr_cpu_num_cores()). If
+ discard_old_records is non-zero, then new records will displace older ones
+ when the log is full. This function must be called before any other
+ census_log functions.
+*/
+void census_log_initialize(size_t size_in_mb, int discard_old_records);
+
+/* Shutdown the logging subsystem. Caller must ensure that:
+ - no in progress or future call to any census_log functions
+ - no incomplete records
+*/
+void census_log_shutdown(void);
+
+/* Allocates and returns a 'size' bytes record and marks it in use. A
+ subsequent census_log_end_write() marks the record complete. The
+ 'bytes_written' census_log_end_write() argument must be <=
+ 'size'. Returns NULL if out-of-space AND:
+ - log is configured to keep old records OR
+ - all blocks are pinned by incomplete records.
+*/
+void* census_log_start_write(size_t size);
+
+void census_log_end_write(void* record, size_t bytes_written);
+
+void census_log_init_reader(void);
+
+/* census_log_read_next() iterates over blocks with data and for each block
+ returns a pointer to the first unread byte. The number of bytes that can be
+ read are returned in 'bytes_available'. Reader is expected to read all
+ available data. Reading the data consumes it i.e. it cannot be read again.
+ census_log_read_next() returns NULL if the end is reached i.e last block
+ is read. census_log_init_reader() starts the iteration or aborts the
+ current iteration.
+*/
+const void* census_log_read_next(size_t* bytes_available);
+
+/* Returns estimated remaining space across all blocks, in bytes. If log is
+ configured to discard old records, returns total log space. Otherwise,
+ returns space available in empty blocks (partially filled blocks are
+ treated as full).
+*/
+size_t census_log_remaining_space(void);
+
+/* Returns the number of times gprc_stats_log_start_write() failed due to
+ out-of-space. */
+int64_t census_log_out_of_space_count(void);
+
+#endif /* GRPC_CORE_CENSUS_MLOG_H */
diff --git a/src/core/compression/algorithm.c b/src/core/census/operation.c
index 4db48df6cb..5c58704372 100644
--- a/src/core/compression/algorithm.c
+++ b/src/core/census/operation.c
@@ -1,5 +1,4 @@
/*
- *
* Copyright 2015, Google Inc.
* All rights reserved.
*
@@ -31,37 +30,34 @@
*
*/
-#include <stdlib.h>
-#include <grpc/compression.h>
+#include <grpc/census.h>
+
+/* TODO(aveitch): These are all placeholder implementations. */
+
+census_timestamp census_start_rpc_op_timestamp(void) {
+ census_timestamp ct;
+ /* TODO(aveitch): assumes gpr_timespec implementation of census_timestamp. */
+ ct.ts = gpr_now(GPR_CLOCK_MONOTONIC);
+ return ct;
+}
-const char *grpc_compression_algorithm_name(
- grpc_compression_algorithm algorithm) {
- switch (algorithm) {
- case GRPC_COMPRESS_NONE:
- return "none";
- case GRPC_COMPRESS_DEFLATE:
- return "deflate";
- case GRPC_COMPRESS_GZIP:
- return "gzip";
- case GRPC_COMPRESS_ALGORITHMS_COUNT:
- return "error";
- }
- return "error";
+census_context *census_start_client_rpc_op(
+ const census_context *context, int64_t rpc_name_id,
+ const census_rpc_name_info *rpc_name_info, const char *peer, int trace_mask,
+ const census_timestamp *start_time) {
+ return NULL;
}
-/* TODO(dgq): Add the ability to specify parameters to the individual
- * compression algorithms */
-grpc_compression_algorithm grpc_compression_algorithm_for_level(
- grpc_compression_level level) {
- switch (level) {
- case GRPC_COMPRESS_LEVEL_NONE:
- return GRPC_COMPRESS_NONE;
- case GRPC_COMPRESS_LEVEL_LOW:
- case GRPC_COMPRESS_LEVEL_MED:
- case GRPC_COMPRESS_LEVEL_HIGH:
- return GRPC_COMPRESS_DEFLATE;
- default:
- /* we shouldn't be making it here */
- abort();
- }
+census_context *census_start_server_rpc_op(
+ const char *buffer, int64_t rpc_name_id,
+ const census_rpc_name_info *rpc_name_info, const char *peer, int trace_mask,
+ census_timestamp *start_time) {
+ return NULL;
}
+
+census_context *census_start_op(census_context *context, const char *family,
+ const char *name, int trace_mask) {
+ return NULL;
+}
+
+void census_end_op(census_context *context, int status) {}
diff --git a/src/core/census/placeholders.c b/src/core/census/placeholders.c
new file mode 100644
index 0000000000..fe23d13971
--- /dev/null
+++ b/src/core/census/placeholders.c
@@ -0,0 +1,109 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/census.h>
+
+#include <grpc/support/log.h>
+
+/* Placeholders for the pending APIs */
+
+int census_get_trace_record(census_trace_record *trace_record) {
+ (void)trace_record;
+ abort();
+}
+
+void census_record_values(census_context *context, census_value *values,
+ size_t nvalues) {
+ (void)context;
+ (void)values;
+ (void)nvalues;
+ abort();
+}
+
+void census_set_rpc_client_peer(census_context *context, const char *peer) {
+ (void)context;
+ (void)peer;
+ abort();
+}
+
+void census_trace_scan_end() { abort(); }
+
+int census_trace_scan_start(int consume) {
+ (void)consume;
+ abort();
+}
+
+const census_aggregation *census_view_aggregrations(const census_view *view) {
+ (void)view;
+ abort();
+}
+
+census_view *census_view_create(uint32_t metric_id, const census_context *tags,
+ const census_aggregation *aggregations,
+ size_t naggregations) {
+ (void)metric_id;
+ (void)tags;
+ (void)aggregations;
+ (void)naggregations;
+ abort();
+}
+
+const census_context *census_view_tags(const census_view *view) {
+ (void)view;
+ abort();
+}
+
+void census_view_delete(census_view *view) {
+ (void)view;
+ abort();
+}
+
+const census_view_data *census_view_get_data(const census_view *view) {
+ (void)view;
+ abort();
+}
+
+size_t census_view_metric(const census_view *view) {
+ (void)view;
+ abort();
+}
+
+size_t census_view_naggregations(const census_view *view) {
+ (void)view;
+ abort();
+}
+
+void census_view_reset(census_view *view) {
+ (void)view;
+ abort();
+}
diff --git a/src/core/census/rpc_metric_id.h b/src/core/census/rpc_metric_id.h
new file mode 100644
index 0000000000..f8d8dad0bf
--- /dev/null
+++ b/src/core/census/rpc_metric_id.h
@@ -0,0 +1,51 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_CENSUS_RPC_METRIC_ID_H
+#define GRPC_CORE_CENSUS_RPC_METRIC_ID_H
+
+/* Metric ID's used for RPC measurements. */
+/* Count of client requests sent. */
+#define CENSUS_METRIC_RPC_CLIENT_REQUESTS ((uint32_t)0)
+/* Count of server requests sent. */
+#define CENSUS_METRIC_RPC_SERVER_REQUESTS ((uint32_t)1)
+/* Client error counts. */
+#define CENSUS_METRIC_RPC_CLIENT_ERRORS ((uint32_t)2)
+/* Server error counts. */
+#define CENSUS_METRIC_RPC_SERVER_ERRORS ((uint32_t)3)
+/* Client side request latency. */
+#define CENSUS_METRIC_RPC_CLIENT_LATENCY ((uint32_t)4)
+/* Server side request latency. */
+#define CENSUS_METRIC_RPC_SERVER_LATENCY ((uint32_t)5)
+
+#endif /* GRPC_CORE_CENSUS_RPC_METRIC_ID_H */
diff --git a/src/core/census/tracing.c b/src/core/census/tracing.c
new file mode 100644
index 0000000000..3b5d6dab2b
--- /dev/null
+++ b/src/core/census/tracing.c
@@ -0,0 +1,45 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/census.h>
+
+/* TODO(aveitch): These are all placeholder implementations. */
+
+int census_trace_mask(const census_context *context) {
+ return CENSUS_TRACE_MASK_NONE;
+}
+
+void census_set_trace_mask(int trace_mask) {}
+
+void census_trace_print(census_context *context, uint32_t type,
+ const char *buffer, size_t n) {}
diff --git a/src/core/channel/census_filter.c b/src/core/channel/census_filter.c
deleted file mode 100644
index 7e393a01a6..0000000000
--- a/src/core/channel/census_filter.c
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include "src/core/channel/census_filter.h"
-
-#include <stdio.h>
-#include <string.h>
-
-#include "src/core/channel/channel_stack.h"
-#include "src/core/channel/noop_filter.h"
-#include "src/core/statistics/census_interface.h"
-#include "src/core/statistics/census_rpc_stats.h"
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/slice.h>
-#include <grpc/support/time.h>
-
-typedef struct call_data {
- census_op_id op_id;
- census_rpc_stats stats;
- gpr_timespec start_ts;
-
- /* recv callback */
- grpc_stream_op_buffer* recv_ops;
- void (*on_done_recv)(void* user_data, int success);
- void* recv_user_data;
-} call_data;
-
-typedef struct channel_data {
- grpc_mdstr* path_str; /* pointer to meta data str with key == ":path" */
-} channel_data;
-
-static void init_rpc_stats(census_rpc_stats* stats) {
- memset(stats, 0, sizeof(census_rpc_stats));
- stats->cnt = 1;
-}
-
-static void extract_and_annotate_method_tag(grpc_stream_op_buffer* sopb,
- call_data* calld,
- channel_data* chand) {
- grpc_linked_mdelem* m;
- size_t i;
- for (i = 0; i < sopb->nops; i++) {
- grpc_stream_op* op = &sopb->ops[i];
- if (op->type != GRPC_OP_METADATA) continue;
- for (m = op->data.metadata.list.head; m != NULL; m = m->next) {
- if (m->md->key == chand->path_str) {
- gpr_log(GPR_DEBUG, "%s",
- (const char*)GPR_SLICE_START_PTR(m->md->value->slice));
- census_add_method_tag(calld->op_id, (const char*)GPR_SLICE_START_PTR(
- m->md->value->slice));
- }
- }
- }
-}
-
-static void client_mutate_op(grpc_call_element* elem, grpc_transport_op* op) {
- call_data* calld = elem->call_data;
- channel_data* chand = elem->channel_data;
- if (op->send_ops) {
- extract_and_annotate_method_tag(op->send_ops, calld, chand);
- }
-}
-
-static void client_start_transport_op(grpc_call_element* elem,
- grpc_transport_op* op) {
- call_data* calld = elem->call_data;
- GPR_ASSERT((calld->op_id.upper != 0) || (calld->op_id.lower != 0));
- client_mutate_op(elem, op);
- grpc_call_next_op(elem, op);
-}
-
-static void server_on_done_recv(void* ptr, int success) {
- grpc_call_element* elem = ptr;
- call_data* calld = elem->call_data;
- channel_data* chand = elem->channel_data;
- if (success) {
- extract_and_annotate_method_tag(calld->recv_ops, calld, chand);
- }
- calld->on_done_recv(calld->recv_user_data, success);
-}
-
-static void server_mutate_op(grpc_call_element* elem, grpc_transport_op* op) {
- call_data* calld = elem->call_data;
- if (op->recv_ops) {
- /* substitute our callback for the op callback */
- calld->recv_ops = op->recv_ops;
- calld->on_done_recv = op->on_done_recv;
- calld->recv_user_data = op->recv_user_data;
- op->on_done_recv = server_on_done_recv;
- op->recv_user_data = elem;
- }
-}
-
-static void server_start_transport_op(grpc_call_element* elem,
- grpc_transport_op* op) {
- call_data* calld = elem->call_data;
- GPR_ASSERT((calld->op_id.upper != 0) || (calld->op_id.lower != 0));
- server_mutate_op(elem, op);
- grpc_call_next_op(elem, op);
-}
-
-static void channel_op(grpc_channel_element* elem,
- grpc_channel_element* from_elem, grpc_channel_op* op) {
- switch (op->type) {
- case GRPC_TRANSPORT_CLOSED:
- /* TODO(hongyu): Annotate trace information for all calls of the channel
- */
- break;
- default:
- break;
- }
- grpc_channel_next_op(elem, op);
-}
-
-static void client_init_call_elem(grpc_call_element* elem,
- const void* server_transport_data,
- grpc_transport_op* initial_op) {
- call_data* d = elem->call_data;
- GPR_ASSERT(d != NULL);
- init_rpc_stats(&d->stats);
- d->start_ts = gpr_now();
- d->op_id = census_tracing_start_op();
- if (initial_op) client_mutate_op(elem, initial_op);
-}
-
-static void client_destroy_call_elem(grpc_call_element* elem) {
- call_data* d = elem->call_data;
- GPR_ASSERT(d != NULL);
- census_record_rpc_client_stats(d->op_id, &d->stats);
- census_tracing_end_op(d->op_id);
-}
-
-static void server_init_call_elem(grpc_call_element* elem,
- const void* server_transport_data,
- grpc_transport_op* initial_op) {
- call_data* d = elem->call_data;
- GPR_ASSERT(d != NULL);
- init_rpc_stats(&d->stats);
- d->start_ts = gpr_now();
- d->op_id = census_tracing_start_op();
- if (initial_op) server_mutate_op(elem, initial_op);
-}
-
-static void server_destroy_call_elem(grpc_call_element* elem) {
- call_data* d = elem->call_data;
- GPR_ASSERT(d != NULL);
- d->stats.elapsed_time_ms =
- gpr_timespec_to_micros(gpr_time_sub(gpr_now(), d->start_ts));
- census_record_rpc_server_stats(d->op_id, &d->stats);
- census_tracing_end_op(d->op_id);
-}
-
-static void init_channel_elem(grpc_channel_element* elem,
- const grpc_channel_args* args, grpc_mdctx* mdctx,
- int is_first, int is_last) {
- channel_data* chand = elem->channel_data;
- GPR_ASSERT(chand != NULL);
- GPR_ASSERT(!is_first);
- GPR_ASSERT(!is_last);
- chand->path_str = grpc_mdstr_from_string(mdctx, ":path");
-}
-
-static void destroy_channel_elem(grpc_channel_element* elem) {
- channel_data* chand = elem->channel_data;
- GPR_ASSERT(chand != NULL);
- if (chand->path_str != NULL) {
- grpc_mdstr_unref(chand->path_str);
- }
-}
-
-const grpc_channel_filter grpc_client_census_filter = {
- client_start_transport_op, channel_op, sizeof(call_data),
- client_init_call_elem, client_destroy_call_elem, sizeof(channel_data),
- init_channel_elem, destroy_channel_elem, "census-client"};
-
-const grpc_channel_filter grpc_server_census_filter = {
- server_start_transport_op, channel_op, sizeof(call_data),
- server_init_call_elem, server_destroy_call_elem, sizeof(channel_data),
- init_channel_elem, destroy_channel_elem, "census-server"};
diff --git a/src/core/channel/channel_args.c b/src/core/channel/channel_args.c
index 166d559a45..bae7a90a01 100644
--- a/src/core/channel/channel_args.c
+++ b/src/core/channel/channel_args.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -35,8 +35,11 @@
#include "src/core/channel/channel_args.h"
#include "src/core/support/string.h"
+#include <grpc/census.h>
#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
+#include <grpc/support/useful.h>
#include <string.h>
@@ -53,16 +56,16 @@ static grpc_arg copy_arg(const grpc_arg *src) {
break;
case GRPC_ARG_POINTER:
dst.value.pointer = src->value.pointer;
- dst.value.pointer.p = src->value.pointer.copy
- ? src->value.pointer.copy(src->value.pointer.p)
- : src->value.pointer.p;
+ dst.value.pointer.p =
+ src->value.pointer.vtable->copy(src->value.pointer.p);
break;
}
return dst;
}
grpc_channel_args *grpc_channel_args_copy_and_add(const grpc_channel_args *src,
- const grpc_arg *to_add) {
+ const grpc_arg *to_add,
+ size_t num_to_add) {
grpc_channel_args *dst = gpr_malloc(sizeof(grpc_channel_args));
size_t i;
size_t src_num_args = (src == NULL) ? 0 : src->num_args;
@@ -71,17 +74,76 @@ grpc_channel_args *grpc_channel_args_copy_and_add(const grpc_channel_args *src,
dst->args = NULL;
return dst;
}
- dst->num_args = src_num_args + ((to_add == NULL) ? 0 : 1);
+ dst->num_args = src_num_args + num_to_add;
dst->args = gpr_malloc(sizeof(grpc_arg) * dst->num_args);
for (i = 0; i < src_num_args; i++) {
dst->args[i] = copy_arg(&src->args[i]);
}
- if (to_add != NULL) dst->args[src_num_args] = copy_arg(to_add);
+ for (i = 0; i < num_to_add; i++) {
+ dst->args[i + src_num_args] = copy_arg(&to_add[i]);
+ }
return dst;
}
grpc_channel_args *grpc_channel_args_copy(const grpc_channel_args *src) {
- return grpc_channel_args_copy_and_add(src, NULL);
+ return grpc_channel_args_copy_and_add(src, NULL, 0);
+}
+
+grpc_channel_args *grpc_channel_args_merge(const grpc_channel_args *a,
+ const grpc_channel_args *b) {
+ return grpc_channel_args_copy_and_add(a, b->args, b->num_args);
+}
+
+static int cmp_arg(const grpc_arg *a, const grpc_arg *b) {
+ int c = GPR_ICMP(a->type, b->type);
+ if (c != 0) return c;
+ c = strcmp(a->key, b->key);
+ if (c != 0) return c;
+ switch (a->type) {
+ case GRPC_ARG_STRING:
+ return strcmp(a->value.string, b->value.string);
+ case GRPC_ARG_INTEGER:
+ return GPR_ICMP(a->value.integer, b->value.integer);
+ case GRPC_ARG_POINTER:
+ c = GPR_ICMP(a->value.pointer.p, b->value.pointer.p);
+ if (c != 0) {
+ c = GPR_ICMP(a->value.pointer.vtable, b->value.pointer.vtable);
+ if (c == 0) {
+ c = a->value.pointer.vtable->cmp(a->value.pointer.p,
+ b->value.pointer.p);
+ }
+ }
+ return c;
+ }
+ GPR_UNREACHABLE_CODE(return 0);
+}
+
+/* stabilizing comparison function: since channel_args ordering matters for
+ * keys with the same name, we need to preserve that ordering */
+static int cmp_key_stable(const void *ap, const void *bp) {
+ const grpc_arg *const *a = ap;
+ const grpc_arg *const *b = bp;
+ int c = strcmp((*a)->key, (*b)->key);
+ if (c == 0) c = GPR_ICMP(*a, *b);
+ return c;
+}
+
+grpc_channel_args *grpc_channel_args_normalize(const grpc_channel_args *a) {
+ grpc_arg **args = gpr_malloc(sizeof(grpc_arg *) * a->num_args);
+ for (size_t i = 0; i < a->num_args; i++) {
+ args[i] = &a->args[i];
+ }
+ qsort(args, a->num_args, sizeof(grpc_arg *), cmp_key_stable);
+
+ grpc_channel_args *b = gpr_malloc(sizeof(grpc_channel_args));
+ b->num_args = a->num_args;
+ b->args = gpr_malloc(sizeof(grpc_arg) * b->num_args);
+ for (size_t i = 0; i < a->num_args; i++) {
+ b->args[i] = copy_arg(args[i]);
+ }
+
+ gpr_free(args);
+ return b;
}
void grpc_channel_args_destroy(grpc_channel_args *a) {
@@ -94,9 +156,7 @@ void grpc_channel_args_destroy(grpc_channel_args *a) {
case GRPC_ARG_INTEGER:
break;
case GRPC_ARG_POINTER:
- if (a->args[i].value.pointer.destroy) {
- a->args[i].value.pointer.destroy(a->args[i].value.pointer.p);
- }
+ a->args[i].value.pointer.vtable->destroy(a->args[i].value.pointer.p);
break;
}
gpr_free(a->args[i].key);
@@ -106,36 +166,106 @@ void grpc_channel_args_destroy(grpc_channel_args *a) {
}
int grpc_channel_args_is_census_enabled(const grpc_channel_args *a) {
- unsigned i;
+ size_t i;
if (a == NULL) return 0;
for (i = 0; i < a->num_args; i++) {
if (0 == strcmp(a->args[i].key, GRPC_ARG_ENABLE_CENSUS)) {
- return a->args[i].value.integer != 0;
+ return a->args[i].value.integer != 0 && census_enabled();
}
}
- return 0;
+ return census_enabled();
}
-grpc_compression_level grpc_channel_args_get_compression_level(
+grpc_compression_algorithm grpc_channel_args_get_compression_algorithm(
const grpc_channel_args *a) {
size_t i;
- if (a) {
- for (i = 0; a && i < a->num_args; ++i) {
- if (a->args[i].type == GRPC_ARG_INTEGER &&
- !strcmp(GRPC_COMPRESSION_LEVEL_ARG, a->args[i].key)) {
- return a->args[i].value.integer;
- break;
- }
+ if (a == NULL) return 0;
+ for (i = 0; i < a->num_args; ++i) {
+ if (a->args[i].type == GRPC_ARG_INTEGER &&
+ !strcmp(GRPC_COMPRESSION_ALGORITHM_ARG, a->args[i].key)) {
+ return (grpc_compression_algorithm)a->args[i].value.integer;
+ break;
}
}
- return GRPC_COMPRESS_LEVEL_NONE;
+ return GRPC_COMPRESS_NONE;
}
-void grpc_channel_args_set_compression_level(
- grpc_channel_args **a, grpc_compression_level level) {
+grpc_channel_args *grpc_channel_args_set_compression_algorithm(
+ grpc_channel_args *a, grpc_compression_algorithm algorithm) {
grpc_arg tmp;
tmp.type = GRPC_ARG_INTEGER;
- tmp.key = GRPC_COMPRESSION_LEVEL_ARG;
- tmp.value.integer = level;
- *a = grpc_channel_args_copy_and_add(*a, &tmp);
+ tmp.key = GRPC_COMPRESSION_ALGORITHM_ARG;
+ tmp.value.integer = algorithm;
+ return grpc_channel_args_copy_and_add(a, &tmp, 1);
+}
+
+/** Returns 1 if the argument for compression algorithm's enabled states bitset
+ * was found in \a a, returning the arg's value in \a states. Otherwise, returns
+ * 0. */
+static int find_compression_algorithm_states_bitset(const grpc_channel_args *a,
+ int **states_arg) {
+ if (a != NULL) {
+ size_t i;
+ for (i = 0; i < a->num_args; ++i) {
+ if (a->args[i].type == GRPC_ARG_INTEGER &&
+ !strcmp(GRPC_COMPRESSION_ALGORITHM_STATE_ARG, a->args[i].key)) {
+ *states_arg = &a->args[i].value.integer;
+ return 1; /* GPR_TRUE */
+ }
+ }
+ }
+ return 0; /* GPR_FALSE */
+}
+
+grpc_channel_args *grpc_channel_args_compression_algorithm_set_state(
+ grpc_channel_args **a, grpc_compression_algorithm algorithm, int state) {
+ int *states_arg;
+ grpc_channel_args *result = *a;
+ const int states_arg_found =
+ find_compression_algorithm_states_bitset(*a, &states_arg);
+
+ if (states_arg_found) {
+ if (state != 0) {
+ GPR_BITSET((unsigned *)states_arg, algorithm);
+ } else {
+ GPR_BITCLEAR((unsigned *)states_arg, algorithm);
+ }
+ } else {
+ /* create a new arg */
+ grpc_arg tmp;
+ tmp.type = GRPC_ARG_INTEGER;
+ tmp.key = GRPC_COMPRESSION_ALGORITHM_STATE_ARG;
+ /* all enabled by default */
+ tmp.value.integer = (1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1;
+ if (state != 0) {
+ GPR_BITSET((unsigned *)&tmp.value.integer, algorithm);
+ } else {
+ GPR_BITCLEAR((unsigned *)&tmp.value.integer, algorithm);
+ }
+ result = grpc_channel_args_copy_and_add(*a, &tmp, 1);
+ grpc_channel_args_destroy(*a);
+ *a = result;
+ }
+ return result;
+}
+
+int grpc_channel_args_compression_algorithm_get_states(
+ const grpc_channel_args *a) {
+ int *states_arg;
+ if (find_compression_algorithm_states_bitset(a, &states_arg)) {
+ return *states_arg;
+ } else {
+ return (1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1; /* All algs. enabled */
+ }
+}
+
+int grpc_channel_args_compare(const grpc_channel_args *a,
+ const grpc_channel_args *b) {
+ int c = GPR_ICMP(a->num_args, b->num_args);
+ if (c != 0) return c;
+ for (size_t i = 0; i < a->num_args; i++) {
+ c = cmp_arg(&a->args[i], &b->args[i]);
+ if (c != 0) return c;
+ }
+ return 0;
}
diff --git a/src/core/channel/channel_args.h b/src/core/channel/channel_args.h
index bf747b26e6..e19440f76f 100644
--- a/src/core/channel/channel_args.h
+++ b/src/core/channel/channel_args.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_CHANNEL_CHANNEL_ARGS_H
-#define GRPC_INTERNAL_CORE_CHANNEL_CHANNEL_ARGS_H
+#ifndef GRPC_CORE_CHANNEL_CHANNEL_ARGS_H
+#define GRPC_CORE_CHANNEL_CHANNEL_ARGS_H
#include <grpc/compression.h>
#include <grpc/grpc.h>
@@ -40,10 +40,18 @@
/* Copy some arguments */
grpc_channel_args *grpc_channel_args_copy(const grpc_channel_args *src);
+/* Copy some arguments, stably sorting keys */
+grpc_channel_args *grpc_channel_args_normalize(const grpc_channel_args *a);
+
/** Copy some arguments and add the to_add parameter in the end.
If to_add is NULL, it is equivalent to call grpc_channel_args_copy. */
grpc_channel_args *grpc_channel_args_copy_and_add(const grpc_channel_args *src,
- const grpc_arg *to_add);
+ const grpc_arg *to_add,
+ size_t num_to_add);
+
+/** Copy args from a then args from b into a new channel args */
+grpc_channel_args *grpc_channel_args_merge(const grpc_channel_args *a,
+ const grpc_channel_args *b);
/** Destroy arguments created by grpc_channel_args_copy */
void grpc_channel_args_destroy(grpc_channel_args *a);
@@ -52,13 +60,35 @@ void grpc_channel_args_destroy(grpc_channel_args *a);
* is specified in channel args, otherwise returns 0. */
int grpc_channel_args_is_census_enabled(const grpc_channel_args *a);
-/** Returns the compression level set in \a a. */
-grpc_compression_level grpc_channel_args_get_compression_level(
+/** Returns the compression algorithm set in \a a. */
+grpc_compression_algorithm grpc_channel_args_get_compression_algorithm(
+ const grpc_channel_args *a);
+
+/** Returns a channel arg instance with compression enabled. If \a a is
+ * non-NULL, its args are copied. N.B. GRPC_COMPRESS_NONE disables compression
+ * for the channel. */
+grpc_channel_args *grpc_channel_args_set_compression_algorithm(
+ grpc_channel_args *a, grpc_compression_algorithm algorithm);
+
+/** Sets the support for the given compression algorithm. By default, all
+ * compression algorithms are enabled. It's an error to disable an algorithm set
+ * by grpc_channel_args_set_compression_algorithm.
+ *
+ * Returns an instance with the updated algorithm states. The \a a pointer is
+ * modified to point to the returned instance (which may be different from the
+ * input value of \a a). */
+grpc_channel_args *grpc_channel_args_compression_algorithm_set_state(
+ grpc_channel_args **a, grpc_compression_algorithm algorithm, int enabled);
+
+/** Returns the bitset representing the support state (true for enabled, false
+ * for disabled) for compression algorithms.
+ *
+ * The i-th bit of the returned bitset corresponds to the i-th entry in the
+ * grpc_compression_algorithm enum. */
+int grpc_channel_args_compression_algorithm_get_states(
const grpc_channel_args *a);
-/** Sets the compression level in \a a to \a level. Setting it to
- * GRPC_COMPRESS_LEVEL_NONE disables compression for the channel. */
-void grpc_channel_args_set_compression_level(
- grpc_channel_args **a, grpc_compression_level level);
+int grpc_channel_args_compare(const grpc_channel_args *a,
+ const grpc_channel_args *b);
-#endif /* GRPC_INTERNAL_CORE_CHANNEL_CHANNEL_ARGS_H */
+#endif /* GRPC_CORE_CHANNEL_CHANNEL_ARGS_H */
diff --git a/src/core/channel/channel_stack.c b/src/core/channel/channel_stack.c
index 9eec8163f5..3e61688364 100644
--- a/src/core/channel/channel_stack.c
+++ b/src/core/channel/channel_stack.c
@@ -57,7 +57,7 @@ int grpc_trace_channel = 0;
/* Given a size, round up to the next multiple of sizeof(void*) */
#define ROUND_UP_TO_ALIGNMENT_SIZE(x) \
- (((x) + GPR_MAX_ALIGNMENT - 1) & ~(GPR_MAX_ALIGNMENT - 1))
+ (((x) + GPR_MAX_ALIGNMENT - 1u) & ~(GPR_MAX_ALIGNMENT - 1u))
size_t grpc_channel_stack_size(const grpc_channel_filter **filters,
size_t filter_count) {
@@ -101,18 +101,23 @@ grpc_call_element *grpc_call_stack_element(grpc_call_stack *call_stack,
return CALL_ELEMS_FROM_STACK(call_stack) + index;
}
-void grpc_channel_stack_init(const grpc_channel_filter **filters,
- size_t filter_count, const grpc_channel_args *args,
- grpc_mdctx *metadata_context,
- grpc_channel_stack *stack) {
+void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx, int initial_refs,
+ grpc_iomgr_cb_func destroy, void *destroy_arg,
+ const grpc_channel_filter **filters,
+ size_t filter_count,
+ const grpc_channel_args *channel_args,
+ const char *name, grpc_channel_stack *stack) {
size_t call_size =
ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call_stack)) +
ROUND_UP_TO_ALIGNMENT_SIZE(filter_count * sizeof(grpc_call_element));
grpc_channel_element *elems;
+ grpc_channel_element_args args;
char *user_data;
size_t i;
stack->count = filter_count;
+ GRPC_STREAM_REF_INIT(&stack->refcount, initial_refs, destroy, destroy_arg,
+ name);
elems = CHANNEL_ELEMS_FROM_STACK(stack);
user_data =
((char *)elems) +
@@ -120,78 +125,121 @@ void grpc_channel_stack_init(const grpc_channel_filter **filters,
/* init per-filter data */
for (i = 0; i < filter_count; i++) {
+ args.channel_stack = stack;
+ args.channel_args = channel_args;
+ args.is_first = i == 0;
+ args.is_last = i == (filter_count - 1);
elems[i].filter = filters[i];
elems[i].channel_data = user_data;
- elems[i].filter->init_channel_elem(&elems[i], args, metadata_context,
- i == 0, i == (filter_count - 1));
+ elems[i].filter->init_channel_elem(exec_ctx, &elems[i], &args);
user_data += ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_channel_data);
call_size += ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_call_data);
}
GPR_ASSERT(user_data > (char *)stack);
- GPR_ASSERT((gpr_uintptr)(user_data - (char *)stack) ==
+ GPR_ASSERT((uintptr_t)(user_data - (char *)stack) ==
grpc_channel_stack_size(filters, filter_count));
stack->call_stack_size = call_size;
}
-void grpc_channel_stack_destroy(grpc_channel_stack *stack) {
+void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_channel_stack *stack) {
grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(stack);
size_t count = stack->count;
size_t i;
/* destroy per-filter data */
for (i = 0; i < count; i++) {
- channel_elems[i].filter->destroy_channel_elem(&channel_elems[i]);
+ channel_elems[i].filter->destroy_channel_elem(exec_ctx, &channel_elems[i]);
}
}
-void grpc_call_stack_init(grpc_channel_stack *channel_stack,
+void grpc_call_stack_init(grpc_exec_ctx *exec_ctx,
+ grpc_channel_stack *channel_stack, int initial_refs,
+ grpc_iomgr_cb_func destroy, void *destroy_arg,
+ grpc_call_context_element *context,
const void *transport_server_data,
- grpc_transport_op *initial_op,
grpc_call_stack *call_stack) {
grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(channel_stack);
+ grpc_call_element_args args;
size_t count = channel_stack->count;
grpc_call_element *call_elems;
char *user_data;
size_t i;
call_stack->count = count;
+ GRPC_STREAM_REF_INIT(&call_stack->refcount, initial_refs, destroy,
+ destroy_arg, "CALL_STACK");
call_elems = CALL_ELEMS_FROM_STACK(call_stack);
user_data = ((char *)call_elems) +
ROUND_UP_TO_ALIGNMENT_SIZE(count * sizeof(grpc_call_element));
/* init per-filter data */
for (i = 0; i < count; i++) {
+ args.call_stack = call_stack;
+ args.server_transport_data = transport_server_data;
+ args.context = context;
call_elems[i].filter = channel_elems[i].filter;
call_elems[i].channel_data = channel_elems[i].channel_data;
call_elems[i].call_data = user_data;
- call_elems[i].filter->init_call_elem(&call_elems[i], transport_server_data,
- initial_op);
+ call_elems[i].filter->init_call_elem(exec_ctx, &call_elems[i], &args);
user_data +=
ROUND_UP_TO_ALIGNMENT_SIZE(call_elems[i].filter->sizeof_call_data);
}
}
-void grpc_call_stack_destroy(grpc_call_stack *stack) {
+void grpc_call_stack_set_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_call_stack *call_stack,
+ grpc_pollset *pollset) {
+ size_t count = call_stack->count;
+ grpc_call_element *call_elems;
+ char *user_data;
+ size_t i;
+
+ call_elems = CALL_ELEMS_FROM_STACK(call_stack);
+ user_data = ((char *)call_elems) +
+ ROUND_UP_TO_ALIGNMENT_SIZE(count * sizeof(grpc_call_element));
+
+ /* init per-filter data */
+ for (i = 0; i < count; i++) {
+ call_elems[i].filter->set_pollset(exec_ctx, &call_elems[i], pollset);
+ user_data +=
+ ROUND_UP_TO_ALIGNMENT_SIZE(call_elems[i].filter->sizeof_call_data);
+ }
+}
+
+void grpc_call_stack_ignore_set_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_pollset *pollset) {}
+
+void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack) {
grpc_call_element *elems = CALL_ELEMS_FROM_STACK(stack);
size_t count = stack->count;
size_t i;
/* destroy per-filter data */
for (i = 0; i < count; i++) {
- elems[i].filter->destroy_call_elem(&elems[i]);
+ elems[i].filter->destroy_call_elem(exec_ctx, &elems[i]);
}
}
-void grpc_call_next_op(grpc_call_element *elem, grpc_transport_op *op) {
+void grpc_call_next_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_transport_stream_op *op) {
+ grpc_call_element *next_elem = elem + 1;
+ next_elem->filter->start_transport_stream_op(exec_ctx, next_elem, op);
+}
+
+char *grpc_call_next_get_peer(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
grpc_call_element *next_elem = elem + 1;
- next_elem->filter->start_transport_op(next_elem, op);
+ return next_elem->filter->get_peer(exec_ctx, next_elem);
}
-void grpc_channel_next_op(grpc_channel_element *elem, grpc_channel_op *op) {
- grpc_channel_element *next_elem = elem + op->dir;
- next_elem->filter->channel_op(next_elem, elem, op);
+void grpc_channel_next_op(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
+ grpc_transport_op *op) {
+ grpc_channel_element *next_elem = elem + 1;
+ next_elem->filter->start_transport_op(exec_ctx, next_elem, op);
}
grpc_channel_stack *grpc_channel_stack_from_top_element(
@@ -205,9 +253,10 @@ grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem) {
sizeof(grpc_call_stack)));
}
-void grpc_call_element_send_cancel(grpc_call_element *cur_elem) {
- grpc_transport_op op;
+void grpc_call_element_send_cancel(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *cur_elem) {
+ grpc_transport_stream_op op;
memset(&op, 0, sizeof(op));
op.cancel_with_status = GRPC_STATUS_CANCELLED;
- grpc_call_next_op(cur_elem, &op);
+ grpc_call_next_op(exec_ctx, cur_elem, &op);
}
diff --git a/src/core/channel/channel_stack.h b/src/core/channel/channel_stack.h
index de0e4e4518..52362f0b20 100644
--- a/src/core/channel/channel_stack.h
+++ b/src/core/channel/channel_stack.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_CHANNEL_CHANNEL_STACK_H
-#define GRPC_INTERNAL_CORE_CHANNEL_CHANNEL_STACK_H
+#ifndef GRPC_CORE_CHANNEL_CHANNEL_STACK_H
+#define GRPC_CORE_CHANNEL_CHANNEL_STACK_H
/* A channel filter defines how operations on a channel are implemented.
Channel filters are chained together to create full channels, and if those
@@ -51,44 +51,21 @@
typedef struct grpc_channel_element grpc_channel_element;
typedef struct grpc_call_element grpc_call_element;
-/* The direction of the call.
- The values of the enums (1, -1) matter here - they are used to increment
- or decrement a pointer to find the next element to call */
-typedef enum { GRPC_CALL_DOWN = 1, GRPC_CALL_UP = -1 } grpc_call_dir;
-
-typedef enum {
- /* send a goaway message to remote channels indicating that we are going
- to disconnect in the future */
- GRPC_CHANNEL_GOAWAY,
- /* disconnect any underlying transports */
- GRPC_CHANNEL_DISCONNECT,
- /* transport received a new call */
- GRPC_ACCEPT_CALL,
- /* an underlying transport was closed */
- GRPC_TRANSPORT_CLOSED,
- /* an underlying transport is about to be closed */
- GRPC_TRANSPORT_GOAWAY
-} grpc_channel_op_type;
-
-/* A single filterable operation to be performed on a channel */
+typedef struct grpc_channel_stack grpc_channel_stack;
+typedef struct grpc_call_stack grpc_call_stack;
+
+typedef struct {
+ grpc_channel_stack *channel_stack;
+ const grpc_channel_args *channel_args;
+ int is_first;
+ int is_last;
+} grpc_channel_element_args;
+
typedef struct {
- /* The type of operation we're performing */
- grpc_channel_op_type type;
- /* The directionality of this call - is it bubbling up the stack, or down? */
- grpc_call_dir dir;
-
- /* Argument data, matching up with grpc_channel_op_type names */
- union {
- struct {
- grpc_transport *transport;
- const void *transport_server_data;
- } accept_call;
- struct {
- grpc_status_code status;
- gpr_slice message;
- } goaway;
- } data;
-} grpc_channel_op;
+ grpc_call_stack *call_stack;
+ const void *server_transport_data;
+ grpc_call_context_element *context;
+} grpc_call_element_args;
/* Channel filters specify:
1. the amount of memory needed in the channel & call (via the sizeof_XXX
@@ -103,12 +80,14 @@ typedef struct {
typedef struct {
/* Called to eg. send/receive data on a call.
See grpc_call_next_op on how to call the next element in the stack */
- void (*start_transport_op)(grpc_call_element *elem, grpc_transport_op *op);
+ void (*start_transport_stream_op)(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_transport_stream_op *op);
/* Called to handle channel level operations - e.g. new calls, or transport
closure.
See grpc_channel_next_op on how to call the next element in the stack */
- void (*channel_op)(grpc_channel_element *elem,
- grpc_channel_element *from_elem, grpc_channel_op *op);
+ void (*start_transport_op)(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem, grpc_transport_op *op);
/* sizeof(per call data) */
size_t sizeof_call_data;
@@ -119,13 +98,14 @@ typedef struct {
server_transport_data is an opaque pointer. If it is NULL, this call is
on a client; if it is non-NULL, then it points to memory owned by the
transport and is on the server. Most filters want to ignore this
- argument.*/
- void (*init_call_elem)(grpc_call_element *elem,
- const void *server_transport_data,
- grpc_transport_op *initial_op);
+ argument. */
+ void (*init_call_elem)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_call_element_args *args);
+ void (*set_pollset)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_pollset *pollset);
/* Destroy per call data.
The filter does not need to do any chaining */
- void (*destroy_call_elem)(grpc_call_element *elem);
+ void (*destroy_call_elem)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem);
/* sizeof(per channel data) */
size_t sizeof_channel_data;
@@ -135,13 +115,15 @@ typedef struct {
is_first, is_last designate this elements position in the stack, and are
useful for asserting correct configuration by upper layer code.
The filter does not need to do any chaining */
- void (*init_channel_elem)(grpc_channel_element *elem,
- const grpc_channel_args *args,
- grpc_mdctx *metadata_context, int is_first,
- int is_last);
+ void (*init_channel_elem)(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
+ grpc_channel_element_args *args);
/* Destroy per channel data.
The filter does not need to do any chaining */
- void (*destroy_channel_elem)(grpc_channel_element *elem);
+ void (*destroy_channel_elem)(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem);
+
+ /* Implement grpc_call_get_peer() */
+ char *(*get_peer)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem);
/* The name of this filter */
const char *name;
@@ -165,16 +147,24 @@ struct grpc_call_element {
/* A channel stack tracks a set of related filters for one channel, and
guarantees they live within a single malloc() allocation */
-typedef struct {
+struct grpc_channel_stack {
+ grpc_stream_refcount refcount;
size_t count;
/* Memory required for a call stack (computed at channel stack
initialization) */
size_t call_stack_size;
-} grpc_channel_stack;
+};
/* A call stack tracks a set of related filters for one call, and guarantees
they live within a single malloc() allocation */
-typedef struct { size_t count; } grpc_call_stack;
+struct grpc_call_stack {
+ /* shared refcount for this channel stack.
+ MUST be the first element: the underlying code calls destroy
+ with the address of the refcount, but higher layers prefer to think
+ about the address of the call stack itself. */
+ grpc_stream_refcount refcount;
+ size_t count;
+};
/* Get a channel element given a channel stack and its index */
grpc_channel_element *grpc_channel_stack_element(grpc_channel_stack *stack,
@@ -189,28 +179,66 @@ grpc_call_element *grpc_call_stack_element(grpc_call_stack *stack, size_t i);
size_t grpc_channel_stack_size(const grpc_channel_filter **filters,
size_t filter_count);
/* Initialize a channel stack given some filters */
-void grpc_channel_stack_init(const grpc_channel_filter **filters,
+void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx, int initial_refs,
+ grpc_iomgr_cb_func destroy, void *destroy_arg,
+ const grpc_channel_filter **filters,
size_t filter_count, const grpc_channel_args *args,
- grpc_mdctx *metadata_context,
- grpc_channel_stack *stack);
+ const char *name, grpc_channel_stack *stack);
/* Destroy a channel stack */
-void grpc_channel_stack_destroy(grpc_channel_stack *stack);
+void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_channel_stack *stack);
/* Initialize a call stack given a channel stack. transport_server_data is
expected to be NULL on a client, or an opaque transport owned pointer on the
server. */
-void grpc_call_stack_init(grpc_channel_stack *channel_stack,
+void grpc_call_stack_init(grpc_exec_ctx *exec_ctx,
+ grpc_channel_stack *channel_stack, int initial_refs,
+ grpc_iomgr_cb_func destroy, void *destroy_arg,
+ grpc_call_context_element *context,
const void *transport_server_data,
- grpc_transport_op *initial_op,
grpc_call_stack *call_stack);
+/* Set a pollset for a call stack: must occur before the first op is started */
+void grpc_call_stack_set_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_call_stack *call_stack,
+ grpc_pollset *pollset);
+
+#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#define GRPC_CALL_STACK_REF(call_stack, reason) \
+ grpc_stream_ref(&(call_stack)->refcount, reason)
+#define GRPC_CALL_STACK_UNREF(exec_ctx, call_stack, reason) \
+ grpc_stream_unref(exec_ctx, &(call_stack)->refcount, reason)
+#define GRPC_CHANNEL_STACK_REF(channel_stack, reason) \
+ grpc_stream_ref(&(channel_stack)->refcount, reason)
+#define GRPC_CHANNEL_STACK_UNREF(exec_ctx, channel_stack, reason) \
+ grpc_stream_unref(exec_ctx, &(channel_stack)->refcount, reason)
+#else
+#define GRPC_CALL_STACK_REF(call_stack, reason) \
+ grpc_stream_ref(&(call_stack)->refcount)
+#define GRPC_CALL_STACK_UNREF(exec_ctx, call_stack, reason) \
+ grpc_stream_unref(exec_ctx, &(call_stack)->refcount)
+#define GRPC_CHANNEL_STACK_REF(channel_stack, reason) \
+ grpc_stream_ref(&(channel_stack)->refcount)
+#define GRPC_CHANNEL_STACK_UNREF(exec_ctx, channel_stack, reason) \
+ grpc_stream_unref(exec_ctx, &(channel_stack)->refcount)
+#endif
+
/* Destroy a call stack */
-void grpc_call_stack_destroy(grpc_call_stack *stack);
+void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack);
+/* Ignore set pollset - used by filters to implement the set_pollset method
+ if they don't care about pollsets at all. Does nothing. */
+void grpc_call_stack_ignore_set_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_pollset *pollset);
/* Call the next operation in a call stack */
-void grpc_call_next_op(grpc_call_element *elem, grpc_transport_op *op);
+void grpc_call_next_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_transport_stream_op *op);
/* Call the next operation (depending on call directionality) in a channel
stack */
-void grpc_channel_next_op(grpc_channel_element *elem, grpc_channel_op *op);
+void grpc_channel_next_op(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
+ grpc_transport_op *op);
+/* Pass through a request to get_peer to the next child element */
+char *grpc_call_next_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem);
/* Given the top element of a channel stack, get the channel stack itself */
grpc_channel_stack *grpc_channel_stack_from_top_element(
@@ -219,13 +247,14 @@ grpc_channel_stack *grpc_channel_stack_from_top_element(
grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem);
void grpc_call_log_op(char *file, int line, gpr_log_severity severity,
- grpc_call_element *elem, grpc_transport_op *op);
+ grpc_call_element *elem, grpc_transport_stream_op *op);
-void grpc_call_element_send_cancel(grpc_call_element *cur_elem);
+void grpc_call_element_send_cancel(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *cur_elem);
extern int grpc_trace_channel;
#define GRPC_CALL_LOG_OP(sev, elem, op) \
if (grpc_trace_channel) grpc_call_log_op(sev, elem, op)
-#endif /* GRPC_INTERNAL_CORE_CHANNEL_CHANNEL_STACK_H */
+#endif /* GRPC_CORE_CHANNEL_CHANNEL_STACK_H */
diff --git a/src/core/channel/channel_stack_builder.c b/src/core/channel/channel_stack_builder.c
new file mode 100644
index 0000000000..80e2e393f9
--- /dev/null
+++ b/src/core/channel/channel_stack_builder.c
@@ -0,0 +1,259 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/channel/channel_stack_builder.h"
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+
+int grpc_trace_channel_stack_builder = 0;
+
+typedef struct filter_node {
+ struct filter_node *next;
+ struct filter_node *prev;
+ const grpc_channel_filter *filter;
+ grpc_post_filter_create_init_func init;
+ void *init_arg;
+} filter_node;
+
+struct grpc_channel_stack_builder {
+ // sentinel nodes for filters that have been added
+ filter_node begin;
+ filter_node end;
+ // various set/get-able parameters
+ const grpc_channel_args *args;
+ grpc_transport *transport;
+ const char *name;
+};
+
+struct grpc_channel_stack_builder_iterator {
+ grpc_channel_stack_builder *builder;
+ filter_node *node;
+};
+
+grpc_channel_stack_builder *grpc_channel_stack_builder_create(void) {
+ grpc_channel_stack_builder *b = gpr_malloc(sizeof(*b));
+ memset(b, 0, sizeof(*b));
+
+ b->begin.filter = NULL;
+ b->end.filter = NULL;
+ b->begin.next = &b->end;
+ b->begin.prev = &b->end;
+ b->end.next = &b->begin;
+ b->end.prev = &b->begin;
+
+ return b;
+}
+
+static grpc_channel_stack_builder_iterator *create_iterator_at_filter_node(
+ grpc_channel_stack_builder *builder, filter_node *node) {
+ grpc_channel_stack_builder_iterator *it = gpr_malloc(sizeof(*it));
+ it->builder = builder;
+ it->node = node;
+ return it;
+}
+
+void grpc_channel_stack_builder_iterator_destroy(
+ grpc_channel_stack_builder_iterator *it) {
+ gpr_free(it);
+}
+
+grpc_channel_stack_builder_iterator *
+grpc_channel_stack_builder_create_iterator_at_first(
+ grpc_channel_stack_builder *builder) {
+ return create_iterator_at_filter_node(builder, &builder->begin);
+}
+
+grpc_channel_stack_builder_iterator *
+grpc_channel_stack_builder_create_iterator_at_last(
+ grpc_channel_stack_builder *builder) {
+ return create_iterator_at_filter_node(builder, &builder->end);
+}
+
+bool grpc_channel_stack_builder_move_next(
+ grpc_channel_stack_builder_iterator *iterator) {
+ if (iterator->node == &iterator->builder->end) return false;
+ iterator->node = iterator->node->next;
+ return true;
+}
+
+bool grpc_channel_stack_builder_move_prev(
+ grpc_channel_stack_builder_iterator *iterator) {
+ if (iterator->node == &iterator->builder->begin) return false;
+ iterator->node = iterator->node->prev;
+ return true;
+}
+
+bool grpc_channel_stack_builder_move_prev(
+ grpc_channel_stack_builder_iterator *iterator);
+
+void grpc_channel_stack_builder_set_name(grpc_channel_stack_builder *builder,
+ const char *name) {
+ GPR_ASSERT(builder->name == NULL);
+ builder->name = name;
+}
+
+void grpc_channel_stack_builder_set_channel_arguments(
+ grpc_channel_stack_builder *builder, const grpc_channel_args *args) {
+ GPR_ASSERT(builder->args == NULL);
+ builder->args = args;
+}
+
+void grpc_channel_stack_builder_set_transport(
+ grpc_channel_stack_builder *builder, grpc_transport *transport) {
+ GPR_ASSERT(builder->transport == NULL);
+ builder->transport = transport;
+}
+
+grpc_transport *grpc_channel_stack_builder_get_transport(
+ grpc_channel_stack_builder *builder) {
+ return builder->transport;
+}
+
+const grpc_channel_args *grpc_channel_stack_builder_get_channel_arguments(
+ grpc_channel_stack_builder *builder) {
+ return builder->args;
+}
+
+bool grpc_channel_stack_builder_append_filter(
+ grpc_channel_stack_builder *builder, const grpc_channel_filter *filter,
+ grpc_post_filter_create_init_func post_init_func, void *user_data) {
+ grpc_channel_stack_builder_iterator *it =
+ grpc_channel_stack_builder_create_iterator_at_last(builder);
+ bool ok = grpc_channel_stack_builder_add_filter_before(
+ it, filter, post_init_func, user_data);
+ grpc_channel_stack_builder_iterator_destroy(it);
+ return ok;
+}
+
+bool grpc_channel_stack_builder_prepend_filter(
+ grpc_channel_stack_builder *builder, const grpc_channel_filter *filter,
+ grpc_post_filter_create_init_func post_init_func, void *user_data) {
+ grpc_channel_stack_builder_iterator *it =
+ grpc_channel_stack_builder_create_iterator_at_first(builder);
+ bool ok = grpc_channel_stack_builder_add_filter_after(
+ it, filter, post_init_func, user_data);
+ grpc_channel_stack_builder_iterator_destroy(it);
+ return ok;
+}
+
+static void add_after(filter_node *before, const grpc_channel_filter *filter,
+ grpc_post_filter_create_init_func post_init_func,
+ void *user_data) {
+ filter_node *new = gpr_malloc(sizeof(*new));
+ new->next = before->next;
+ new->prev = before;
+ new->next->prev = new->prev->next = new;
+ new->filter = filter;
+ new->init = post_init_func;
+ new->init_arg = user_data;
+}
+
+bool grpc_channel_stack_builder_add_filter_before(
+ grpc_channel_stack_builder_iterator *iterator,
+ const grpc_channel_filter *filter,
+ grpc_post_filter_create_init_func post_init_func, void *user_data) {
+ if (iterator->node == &iterator->builder->begin) return false;
+ add_after(iterator->node->prev, filter, post_init_func, user_data);
+ return true;
+}
+
+bool grpc_channel_stack_builder_add_filter_after(
+ grpc_channel_stack_builder_iterator *iterator,
+ const grpc_channel_filter *filter,
+ grpc_post_filter_create_init_func post_init_func, void *user_data) {
+ if (iterator->node == &iterator->builder->end) return false;
+ add_after(iterator->node, filter, post_init_func, user_data);
+ return true;
+}
+
+void grpc_channel_stack_builder_destroy(grpc_channel_stack_builder *builder) {
+ filter_node *p = builder->begin.next;
+ while (p != &builder->end) {
+ filter_node *next = p->next;
+ gpr_free(p);
+ p = next;
+ }
+ gpr_free(builder);
+}
+
+void *grpc_channel_stack_builder_finish(grpc_exec_ctx *exec_ctx,
+ grpc_channel_stack_builder *builder,
+ size_t prefix_bytes, int initial_refs,
+ grpc_iomgr_cb_func destroy,
+ void *destroy_arg) {
+ // count the number of filters
+ size_t num_filters = 0;
+ for (filter_node *p = builder->begin.next; p != &builder->end; p = p->next) {
+ gpr_log(GPR_DEBUG, "%d: %s", num_filters, p->filter->name);
+ num_filters++;
+ }
+
+ // create an array of filters
+ const grpc_channel_filter **filters =
+ gpr_malloc(sizeof(*filters) * num_filters);
+ size_t i = 0;
+ for (filter_node *p = builder->begin.next; p != &builder->end; p = p->next) {
+ filters[i++] = p->filter;
+ }
+
+ // calculate the size of the channel stack
+ size_t channel_stack_size = grpc_channel_stack_size(filters, num_filters);
+
+ // allocate memory, with prefix_bytes followed by channel_stack_size
+ char *result = gpr_malloc(prefix_bytes + channel_stack_size);
+ // fetch a pointer to the channel stack
+ grpc_channel_stack *channel_stack =
+ (grpc_channel_stack *)(result + prefix_bytes);
+ // and initialize it
+ grpc_channel_stack_init(exec_ctx, initial_refs, destroy,
+ destroy_arg == NULL ? result : destroy_arg, filters,
+ num_filters, builder->args, builder->name,
+ channel_stack);
+
+ // run post-initialization functions
+ i = 0;
+ for (filter_node *p = builder->begin.next; p != &builder->end; p = p->next) {
+ if (p->init != NULL) {
+ p->init(channel_stack, grpc_channel_stack_element(channel_stack, i),
+ p->init_arg);
+ }
+ i++;
+ }
+
+ grpc_channel_stack_builder_destroy(builder);
+ gpr_free((grpc_channel_filter **)filters);
+
+ return result;
+}
diff --git a/src/core/channel/channel_stack_builder.h b/src/core/channel/channel_stack_builder.h
new file mode 100644
index 0000000000..15f395e8b8
--- /dev/null
+++ b/src/core/channel/channel_stack_builder.h
@@ -0,0 +1,155 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_CHANNEL_CHANNEL_STACK_BUILDER_H
+#define GRPC_CORE_CHANNEL_CHANNEL_STACK_BUILDER_H
+
+#include <stdbool.h>
+
+#include "src/core/channel/channel_args.h"
+#include "src/core/channel/channel_stack.h"
+
+/// grpc_channel_stack_builder offers a programmatic interface to selected
+/// and order channel filters
+typedef struct grpc_channel_stack_builder grpc_channel_stack_builder;
+typedef struct grpc_channel_stack_builder_iterator
+ grpc_channel_stack_builder_iterator;
+
+/// Create a new channel stack builder
+grpc_channel_stack_builder *grpc_channel_stack_builder_create(void);
+
+/// Assign a name to the channel stack: \a name must be statically allocated
+void grpc_channel_stack_builder_set_name(grpc_channel_stack_builder *builder,
+ const char *name);
+
+/// Attach \a transport to the builder (does not take ownership)
+void grpc_channel_stack_builder_set_transport(
+ grpc_channel_stack_builder *builder, grpc_transport *transport);
+
+/// Fetch attached transport
+grpc_transport *grpc_channel_stack_builder_get_transport(
+ grpc_channel_stack_builder *builder);
+
+/// Set channel arguments: \a args must continue to exist until after
+/// grpc_channel_stack_builder_finish returns
+void grpc_channel_stack_builder_set_channel_arguments(
+ grpc_channel_stack_builder *builder, const grpc_channel_args *args);
+
+/// Return a borrowed pointer to the channel arguments
+const grpc_channel_args *grpc_channel_stack_builder_get_channel_arguments(
+ grpc_channel_stack_builder *builder);
+
+/// Begin iterating over already defined filters in the builder at the beginning
+grpc_channel_stack_builder_iterator *
+grpc_channel_stack_builder_create_iterator_at_first(
+ grpc_channel_stack_builder *builder);
+
+/// Begin iterating over already defined filters in the builder at the end
+grpc_channel_stack_builder_iterator *
+grpc_channel_stack_builder_create_iterator_at_last(
+ grpc_channel_stack_builder *builder);
+
+/// Is an iterator at the first element?
+bool grpc_channel_stack_builder_iterator_is_first(
+ grpc_channel_stack_builder_iterator *iterator);
+
+/// Is an iterator at the end?
+bool grpc_channel_stack_builder_iterator_is_end(
+ grpc_channel_stack_builder_iterator *iterator);
+
+/// Move an iterator to the next item
+bool grpc_channel_stack_builder_move_next(
+ grpc_channel_stack_builder_iterator *iterator);
+
+/// Move an iterator to the previous item
+bool grpc_channel_stack_builder_move_prev(
+ grpc_channel_stack_builder_iterator *iterator);
+
+typedef void (*grpc_post_filter_create_init_func)(
+ grpc_channel_stack *channel_stack, grpc_channel_element *elem, void *arg);
+
+/// Add \a filter to the stack, after \a iterator.
+/// Call \a post_init_func(..., \a user_data) once the channel stack is
+/// created.
+bool grpc_channel_stack_builder_add_filter_after(
+ grpc_channel_stack_builder_iterator *iterator,
+ const grpc_channel_filter *filter,
+ grpc_post_filter_create_init_func post_init_func,
+ void *user_data) GRPC_MUST_USE_RESULT;
+
+/// Add \a filter to the stack, before \a iterator.
+/// Call \a post_init_func(..., \a user_data) once the channel stack is
+/// created.
+bool grpc_channel_stack_builder_add_filter_before(
+ grpc_channel_stack_builder_iterator *iterator,
+ const grpc_channel_filter *filter,
+ grpc_post_filter_create_init_func post_init_func,
+ void *user_data) GRPC_MUST_USE_RESULT;
+
+/// Add \a filter to the beginning of the filter list.
+/// Call \a post_init_func(..., \a user_data) once the channel stack is
+/// created.
+bool grpc_channel_stack_builder_prepend_filter(
+ grpc_channel_stack_builder *builder, const grpc_channel_filter *filter,
+ grpc_post_filter_create_init_func post_init_func,
+ void *user_data) GRPC_MUST_USE_RESULT;
+
+/// Add \a filter to the end of the filter list.
+/// Call \a post_init_func(..., \a user_data) once the channel stack is
+/// created.
+bool grpc_channel_stack_builder_append_filter(
+ grpc_channel_stack_builder *builder, const grpc_channel_filter *filter,
+ grpc_post_filter_create_init_func post_init_func,
+ void *user_data) GRPC_MUST_USE_RESULT;
+
+/// Terminate iteration and destroy \a iterator
+void grpc_channel_stack_builder_iterator_destroy(
+ grpc_channel_stack_builder_iterator *iterator);
+
+/// Destroy the builder, return the freshly minted channel stack
+/// Allocates \a prefix_bytes bytes before the channel stack
+/// Returns the base pointer of the allocated block
+/// \a initial_refs, \a destroy, \a destroy_arg are as per
+/// grpc_channel_stack_init
+void *grpc_channel_stack_builder_finish(grpc_exec_ctx *exec_ctx,
+ grpc_channel_stack_builder *builder,
+ size_t prefix_bytes, int initial_refs,
+ grpc_iomgr_cb_func destroy,
+ void *destroy_arg);
+
+/// Destroy the builder without creating a channel stack
+void grpc_channel_stack_builder_destroy(grpc_channel_stack_builder *builder);
+
+extern int grpc_trace_channel_stack_builder;
+
+#endif /* GRPC_CORE_CHANNEL_CHANNEL_STACK_BUILDER_H */
diff --git a/src/core/channel/child_channel.c b/src/core/channel/child_channel.c
deleted file mode 100644
index 6690265d75..0000000000
--- a/src/core/channel/child_channel.c
+++ /dev/null
@@ -1,308 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include "src/core/channel/child_channel.h"
-#include "src/core/iomgr/iomgr.h"
-#include <grpc/support/alloc.h>
-
-/* Link back filter: passes up calls to the client channel, pushes down calls
- down */
-
-static void maybe_destroy_channel(grpc_child_channel *channel);
-
-typedef struct {
- gpr_mu mu;
- gpr_cv cv;
- grpc_channel_element *back;
- /* # of active calls on the channel */
- gpr_uint32 active_calls;
- /* has grpc_child_channel_destroy been called? */
- gpr_uint8 destroyed;
- /* has the transport reported itself disconnected? */
- gpr_uint8 disconnected;
- /* are we calling 'back' - our parent channel */
- gpr_uint8 calling_back;
- /* have we or our parent sent goaway yet? - dup suppression */
- gpr_uint8 sent_goaway;
- /* are we currently sending farewell (in this file: goaway + disconnect) */
- gpr_uint8 sending_farewell;
- /* have we sent farewell (goaway + disconnect) */
- gpr_uint8 sent_farewell;
-
- grpc_iomgr_closure finally_destroy_channel_closure;
- grpc_iomgr_closure send_farewells_closure;
-} lb_channel_data;
-
-typedef struct { grpc_child_channel *channel; } lb_call_data;
-
-static void lb_start_transport_op(grpc_call_element *elem,
- grpc_transport_op *op) {
- grpc_call_next_op(elem, op);
-}
-
-/* Currently we assume all channel operations should just be pushed up. */
-static void lb_channel_op(grpc_channel_element *elem,
- grpc_channel_element *from_elem,
- grpc_channel_op *op) {
- lb_channel_data *chand = elem->channel_data;
- grpc_channel_element *back;
- int calling_back = 0;
-
- switch (op->dir) {
- case GRPC_CALL_UP:
- gpr_mu_lock(&chand->mu);
- back = chand->back;
- if (back) {
- chand->calling_back++;
- calling_back = 1;
- }
- gpr_mu_unlock(&chand->mu);
- if (back) {
- back->filter->channel_op(chand->back, elem, op);
- } else if (op->type == GRPC_TRANSPORT_GOAWAY) {
- gpr_slice_unref(op->data.goaway.message);
- }
- break;
- case GRPC_CALL_DOWN:
- grpc_channel_next_op(elem, op);
- break;
- }
-
- gpr_mu_lock(&chand->mu);
- switch (op->type) {
- case GRPC_TRANSPORT_CLOSED:
- chand->disconnected = 1;
- maybe_destroy_channel(grpc_channel_stack_from_top_element(elem));
- break;
- case GRPC_CHANNEL_GOAWAY:
- chand->sent_goaway = 1;
- break;
- case GRPC_CHANNEL_DISCONNECT:
- case GRPC_TRANSPORT_GOAWAY:
- case GRPC_ACCEPT_CALL:
- break;
- }
-
- if (calling_back) {
- chand->calling_back--;
- gpr_cv_signal(&chand->cv);
- maybe_destroy_channel(grpc_channel_stack_from_top_element(elem));
- }
- gpr_mu_unlock(&chand->mu);
-}
-
-/* Constructor for call_data */
-static void lb_init_call_elem(grpc_call_element *elem,
- const void *server_transport_data,
- grpc_transport_op *initial_op) {}
-
-/* Destructor for call_data */
-static void lb_destroy_call_elem(grpc_call_element *elem) {}
-
-/* Constructor for channel_data */
-static void lb_init_channel_elem(grpc_channel_element *elem,
- const grpc_channel_args *args,
- grpc_mdctx *metadata_context, int is_first,
- int is_last) {
- lb_channel_data *chand = elem->channel_data;
- GPR_ASSERT(is_first);
- GPR_ASSERT(!is_last);
- gpr_mu_init(&chand->mu);
- gpr_cv_init(&chand->cv);
- chand->back = NULL;
- chand->destroyed = 0;
- chand->disconnected = 0;
- chand->active_calls = 0;
- chand->sent_goaway = 0;
- chand->calling_back = 0;
- chand->sending_farewell = 0;
- chand->sent_farewell = 0;
-}
-
-/* Destructor for channel_data */
-static void lb_destroy_channel_elem(grpc_channel_element *elem) {
- lb_channel_data *chand = elem->channel_data;
- gpr_mu_destroy(&chand->mu);
- gpr_cv_destroy(&chand->cv);
-}
-
-const grpc_channel_filter grpc_child_channel_top_filter = {
- lb_start_transport_op, lb_channel_op,
- sizeof(lb_call_data), lb_init_call_elem, lb_destroy_call_elem,
- sizeof(lb_channel_data), lb_init_channel_elem, lb_destroy_channel_elem,
- "child-channel",
-};
-
-/* grpc_child_channel proper */
-
-#define LINK_BACK_ELEM_FROM_CHANNEL(channel) \
- grpc_channel_stack_element((channel), 0)
-
-#define LINK_BACK_ELEM_FROM_CALL(call) grpc_call_stack_element((call), 0)
-
-static void finally_destroy_channel(void *c, int success) {
- /* ignore success or not... this is a destruction callback and will only
- happen once - the only purpose here is to release resources */
- grpc_child_channel *channel = c;
- lb_channel_data *chand = LINK_BACK_ELEM_FROM_CHANNEL(channel)->channel_data;
- /* wait for the initiator to leave the mutex */
- gpr_mu_lock(&chand->mu);
- gpr_mu_unlock(&chand->mu);
- grpc_channel_stack_destroy(channel);
- gpr_free(channel);
-}
-
-static void send_farewells(void *c, int success) {
- grpc_child_channel *channel = c;
- grpc_channel_element *lbelem = LINK_BACK_ELEM_FROM_CHANNEL(channel);
- lb_channel_data *chand = lbelem->channel_data;
- int send_goaway;
- grpc_channel_op op;
-
- gpr_mu_lock(&chand->mu);
- send_goaway = !chand->sent_goaway;
- chand->sent_goaway = 1;
- gpr_mu_unlock(&chand->mu);
-
- if (send_goaway) {
- op.type = GRPC_CHANNEL_GOAWAY;
- op.dir = GRPC_CALL_DOWN;
- op.data.goaway.status = GRPC_STATUS_OK;
- op.data.goaway.message = gpr_slice_from_copied_string("Client disconnect");
- grpc_channel_next_op(lbelem, &op);
- }
-
- op.type = GRPC_CHANNEL_DISCONNECT;
- op.dir = GRPC_CALL_DOWN;
- grpc_channel_next_op(lbelem, &op);
-
- gpr_mu_lock(&chand->mu);
- chand->sending_farewell = 0;
- chand->sent_farewell = 1;
- maybe_destroy_channel(channel);
- gpr_mu_unlock(&chand->mu);
-}
-
-static void maybe_destroy_channel(grpc_child_channel *channel) {
- lb_channel_data *chand = LINK_BACK_ELEM_FROM_CHANNEL(channel)->channel_data;
- if (chand->destroyed && chand->disconnected && chand->active_calls == 0 &&
- !chand->sending_farewell && !chand->calling_back) {
- chand->finally_destroy_channel_closure.cb = finally_destroy_channel;
- chand->finally_destroy_channel_closure.cb_arg = channel;
- grpc_iomgr_add_callback(&chand->finally_destroy_channel_closure);
- } else if (chand->destroyed && !chand->disconnected &&
- chand->active_calls == 0 && !chand->sending_farewell &&
- !chand->sent_farewell) {
- chand->sending_farewell = 1;
- chand->send_farewells_closure.cb = send_farewells;
- chand->send_farewells_closure.cb_arg = channel;
- grpc_iomgr_add_callback(&chand->send_farewells_closure);
- }
-}
-
-grpc_child_channel *grpc_child_channel_create(
- grpc_channel_element *parent, const grpc_channel_filter **filters,
- size_t filter_count, const grpc_channel_args *args,
- grpc_mdctx *metadata_context) {
- grpc_channel_stack *stk =
- gpr_malloc(grpc_channel_stack_size(filters, filter_count));
- lb_channel_data *lb;
-
- grpc_channel_stack_init(filters, filter_count, args, metadata_context, stk);
-
- lb = LINK_BACK_ELEM_FROM_CHANNEL(stk)->channel_data;
- gpr_mu_lock(&lb->mu);
- lb->back = parent;
- gpr_mu_unlock(&lb->mu);
-
- return stk;
-}
-
-void grpc_child_channel_destroy(grpc_child_channel *channel,
- int wait_for_callbacks) {
- grpc_channel_element *lbelem = LINK_BACK_ELEM_FROM_CHANNEL(channel);
- lb_channel_data *chand = lbelem->channel_data;
-
- gpr_mu_lock(&chand->mu);
- while (wait_for_callbacks && chand->calling_back) {
- gpr_cv_wait(&chand->cv, &chand->mu, gpr_inf_future);
- }
-
- chand->back = NULL;
- chand->destroyed = 1;
- maybe_destroy_channel(channel);
- gpr_mu_unlock(&chand->mu);
-}
-
-void grpc_child_channel_handle_op(grpc_child_channel *channel,
- grpc_channel_op *op) {
- grpc_channel_next_op(LINK_BACK_ELEM_FROM_CHANNEL(channel), op);
-}
-
-grpc_child_call *grpc_child_channel_create_call(grpc_child_channel *channel,
- grpc_call_element *parent,
- grpc_transport_op *initial_op) {
- grpc_call_stack *stk = gpr_malloc((channel)->call_stack_size);
- grpc_call_element *lbelem;
- lb_call_data *lbcalld;
- lb_channel_data *lbchand;
-
- grpc_call_stack_init(channel, NULL, initial_op, stk);
- lbelem = LINK_BACK_ELEM_FROM_CALL(stk);
- lbchand = lbelem->channel_data;
- lbcalld = lbelem->call_data;
- lbcalld->channel = channel;
-
- gpr_mu_lock(&lbchand->mu);
- lbchand->active_calls++;
- gpr_mu_unlock(&lbchand->mu);
-
- return stk;
-}
-
-void grpc_child_call_destroy(grpc_child_call *call) {
- grpc_call_element *lbelem = LINK_BACK_ELEM_FROM_CALL(call);
- lb_call_data *calld = lbelem->call_data;
- lb_channel_data *chand = lbelem->channel_data;
- grpc_child_channel *channel = calld->channel;
- grpc_call_stack_destroy(call);
- gpr_free(call);
- gpr_mu_lock(&chand->mu);
- chand->active_calls--;
- maybe_destroy_channel(channel);
- gpr_mu_unlock(&chand->mu);
-}
-
-grpc_call_element *grpc_child_call_get_top_element(grpc_child_call *call) {
- return LINK_BACK_ELEM_FROM_CALL(call);
-}
diff --git a/src/core/channel/client_channel.c b/src/core/channel/client_channel.c
index 711e105464..f021a8ae32 100644
--- a/src/core/channel/client_channel.c
+++ b/src/core/channel/client_channel.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -34,511 +34,485 @@
#include "src/core/channel/client_channel.h"
#include <stdio.h>
+#include <string.h>
-#include "src/core/channel/channel_args.h"
-#include "src/core/channel/child_channel.h"
-#include "src/core/channel/connected_channel.h"
-#include "src/core/iomgr/iomgr.h"
-#include "src/core/iomgr/pollset_set.h"
-#include "src/core/support/string.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/useful.h>
-/* Client channel implementation */
+#include "src/core/channel/channel_args.h"
+#include "src/core/channel/connected_channel.h"
+#include "src/core/channel/subchannel_call_holder.h"
+#include "src/core/iomgr/iomgr.h"
+#include "src/core/profiling/timers.h"
+#include "src/core/support/string.h"
+#include "src/core/surface/channel.h"
+#include "src/core/transport/connectivity_state.h"
-typedef struct call_data call_data;
+/* Client channel implementation */
-typedef struct {
- /* protects children, child_count, child_capacity, active_child,
- transport_setup_initiated
- does not protect channel stacks held by children
- transport_setup is assumed to be set once during construction */
- gpr_mu mu;
-
- /* the sending child (may be null) */
- grpc_child_channel *active_child;
- grpc_mdctx *mdctx;
-
- /* calls waiting for a channel to be ready */
- call_data **waiting_children;
- size_t waiting_child_count;
- size_t waiting_child_capacity;
-
- /* transport setup for this channel */
- grpc_transport_setup *transport_setup;
- int transport_setup_initiated;
-
- grpc_channel_args *args;
+typedef grpc_subchannel_call_holder call_data;
+
+typedef struct client_channel_channel_data {
+ /** resolver for this channel */
+ grpc_resolver *resolver;
+ /** have we started resolving this channel */
+ int started_resolving;
+
+ /** mutex protecting client configuration, including all
+ variables below in this data structure */
+ gpr_mu mu_config;
+ /** currently active load balancer - guarded by mu_config */
+ grpc_lb_policy *lb_policy;
+ /** incoming configuration - set by resolver.next
+ guarded by mu_config */
+ grpc_client_config *incoming_configuration;
+ /** a list of closures that are all waiting for config to come in */
+ grpc_closure_list waiting_for_config_closures;
+ /** resolver callback */
+ grpc_closure on_config_changed;
+ /** connectivity state being tracked */
+ grpc_connectivity_state_tracker state_tracker;
+ /** when an lb_policy arrives, should we try to exit idle */
+ int exit_idle_when_lb_policy_arrives;
+ /** owning stack */
+ grpc_channel_stack *owning_stack;
+ /** interested parties (owned) */
+ grpc_pollset_set *interested_parties;
} channel_data;
-typedef enum {
- CALL_CREATED,
- CALL_WAITING,
- CALL_ACTIVE,
- CALL_CANCELLED
-} call_state;
+/** We create one watcher for each new lb_policy that is returned from a
+ resolver,
+ to watch for state changes from the lb_policy. When a state change is seen,
+ we
+ update the channel, and create a new watcher */
+typedef struct {
+ channel_data *chand;
+ grpc_closure on_changed;
+ grpc_connectivity_state state;
+ grpc_lb_policy *lb_policy;
+} lb_policy_connectivity_watcher;
-struct call_data {
- /* owning element */
+typedef struct {
+ grpc_closure closure;
grpc_call_element *elem;
+} waiting_call;
- call_state state;
- gpr_timespec deadline;
- union {
- struct {
- /* our child call stack */
- grpc_child_call *child_call;
- } active;
- grpc_transport_op waiting_op;
- struct {
- grpc_linked_mdelem status;
- grpc_linked_mdelem details;
- } cancelled;
- } s;
-};
-
-static int prepare_activate(grpc_call_element *elem,
- grpc_child_channel *on_child) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
- if (calld->state == CALL_CANCELLED) return 0;
+static char *cc_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
+ return grpc_subchannel_call_holder_get_peer(exec_ctx, elem->call_data);
+}
- /* no more access to calld->s.waiting allowed */
- GPR_ASSERT(calld->state == CALL_WAITING);
+static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_transport_stream_op *op) {
+ GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
+ grpc_subchannel_call_holder_perform_op(exec_ctx, elem->call_data, op);
+}
- if (calld->s.waiting_op.bind_pollset) {
- grpc_transport_setup_del_interested_party(chand->transport_setup,
- calld->s.waiting_op.bind_pollset);
+static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
+ grpc_lb_policy *lb_policy,
+ grpc_connectivity_state current_state);
+
+static void on_lb_policy_state_changed_locked(
+ grpc_exec_ctx *exec_ctx, lb_policy_connectivity_watcher *w) {
+ grpc_connectivity_state publish_state = w->state;
+ /* check if the notification is for a stale policy */
+ if (w->lb_policy != w->chand->lb_policy) return;
+
+ if (publish_state == GRPC_CHANNEL_FATAL_FAILURE &&
+ w->chand->resolver != NULL) {
+ publish_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
+ grpc_resolver_channel_saw_error(exec_ctx, w->chand->resolver);
+ GRPC_LB_POLICY_UNREF(exec_ctx, w->chand->lb_policy, "channel");
+ w->chand->lb_policy = NULL;
+ }
+ grpc_connectivity_state_set(exec_ctx, &w->chand->state_tracker, publish_state,
+ "lb_changed");
+ if (w->state != GRPC_CHANNEL_FATAL_FAILURE) {
+ watch_lb_policy(exec_ctx, w->chand, w->lb_policy, w->state);
}
+}
- calld->state = CALL_ACTIVE;
+static void on_lb_policy_state_changed(grpc_exec_ctx *exec_ctx, void *arg,
+ bool iomgr_success) {
+ lb_policy_connectivity_watcher *w = arg;
- /* create a child call */
- /* TODO(ctiller): pass the waiting op down here */
- calld->s.active.child_call =
- grpc_child_channel_create_call(on_child, elem, NULL);
+ gpr_mu_lock(&w->chand->mu_config);
+ on_lb_policy_state_changed_locked(exec_ctx, w);
+ gpr_mu_unlock(&w->chand->mu_config);
- return 1;
+ GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack, "watch_lb_policy");
+ gpr_free(w);
}
-static void complete_activate(grpc_call_element *elem, grpc_transport_op *op) {
- call_data *calld = elem->call_data;
- grpc_call_element *child_elem =
- grpc_child_call_get_top_element(calld->s.active.child_call);
+static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
+ grpc_lb_policy *lb_policy,
+ grpc_connectivity_state current_state) {
+ lb_policy_connectivity_watcher *w = gpr_malloc(sizeof(*w));
+ GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy");
+
+ w->chand = chand;
+ grpc_closure_init(&w->on_changed, on_lb_policy_state_changed, w);
+ w->state = current_state;
+ w->lb_policy = lb_policy;
+ grpc_lb_policy_notify_on_state_change(exec_ctx, lb_policy, &w->state,
+ &w->on_changed);
+}
- GPR_ASSERT(calld->state == CALL_ACTIVE);
+static void cc_on_config_changed(grpc_exec_ctx *exec_ctx, void *arg,
+ bool iomgr_success) {
+ channel_data *chand = arg;
+ grpc_lb_policy *lb_policy = NULL;
+ grpc_lb_policy *old_lb_policy;
+ grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
+ int exit_idle = 0;
+
+ if (chand->incoming_configuration != NULL) {
+ lb_policy = grpc_client_config_get_lb_policy(chand->incoming_configuration);
+ if (lb_policy != NULL) {
+ GRPC_LB_POLICY_REF(lb_policy, "channel");
+ GRPC_LB_POLICY_REF(lb_policy, "config_change");
+ state = grpc_lb_policy_check_connectivity(exec_ctx, lb_policy);
+ }
- /* continue the start call down the stack, this nees to happen after metadata
- are flushed*/
- child_elem->filter->start_transport_op(child_elem, op);
-}
+ grpc_client_config_unref(exec_ctx, chand->incoming_configuration);
+ }
-static void remove_waiting_child(channel_data *chand, call_data *calld) {
- size_t new_count;
- size_t i;
- for (i = 0, new_count = 0; i < chand->waiting_child_count; i++) {
- if (chand->waiting_children[i] == calld) {
- grpc_transport_setup_del_interested_party(
- chand->transport_setup, calld->s.waiting_op.bind_pollset);
- continue;
+ chand->incoming_configuration = NULL;
+
+ if (lb_policy != NULL) {
+ grpc_pollset_set_add_pollset_set(exec_ctx, lb_policy->interested_parties,
+ chand->interested_parties);
+ }
+
+ gpr_mu_lock(&chand->mu_config);
+ old_lb_policy = chand->lb_policy;
+ chand->lb_policy = lb_policy;
+ if (lb_policy != NULL || chand->resolver == NULL /* disconnected */) {
+ grpc_exec_ctx_enqueue_list(exec_ctx, &chand->waiting_for_config_closures,
+ NULL);
+ }
+ if (lb_policy != NULL && chand->exit_idle_when_lb_policy_arrives) {
+ GRPC_LB_POLICY_REF(lb_policy, "exit_idle");
+ exit_idle = 1;
+ chand->exit_idle_when_lb_policy_arrives = 0;
+ }
+
+ if (iomgr_success && chand->resolver) {
+ grpc_connectivity_state_set(exec_ctx, &chand->state_tracker, state,
+ "new_lb+resolver");
+ if (lb_policy != NULL) {
+ watch_lb_policy(exec_ctx, chand, lb_policy, state);
+ }
+ GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
+ grpc_resolver_next(exec_ctx, chand->resolver,
+ &chand->incoming_configuration,
+ &chand->on_config_changed);
+ gpr_mu_unlock(&chand->mu_config);
+ } else {
+ if (chand->resolver != NULL) {
+ grpc_resolver_shutdown(exec_ctx, chand->resolver);
+ GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
+ chand->resolver = NULL;
}
- chand->waiting_children[new_count++] = chand->waiting_children[i];
+ grpc_connectivity_state_set(exec_ctx, &chand->state_tracker,
+ GRPC_CHANNEL_FATAL_FAILURE, "resolver_gone");
+ gpr_mu_unlock(&chand->mu_config);
}
- GPR_ASSERT(new_count == chand->waiting_child_count - 1 ||
- new_count == chand->waiting_child_count);
- chand->waiting_child_count = new_count;
-}
-static void handle_op_after_cancellation(grpc_call_element *elem,
- grpc_transport_op *op) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
- if (op->send_ops) {
- grpc_stream_ops_unref_owned_objects(op->send_ops->ops, op->send_ops->nops);
- op->on_done_send->cb(op->on_done_send->cb_arg, 0);
+ if (exit_idle) {
+ grpc_lb_policy_exit_idle(exec_ctx, lb_policy);
+ GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "exit_idle");
}
- if (op->recv_ops) {
- char status[GPR_LTOA_MIN_BUFSIZE];
- grpc_metadata_batch mdb;
- gpr_ltoa(GRPC_STATUS_CANCELLED, status);
- calld->s.cancelled.status.md =
- grpc_mdelem_from_strings(chand->mdctx, "grpc-status", status);
- calld->s.cancelled.details.md =
- grpc_mdelem_from_strings(chand->mdctx, "grpc-message", "Cancelled");
- calld->s.cancelled.status.prev = calld->s.cancelled.details.next = NULL;
- calld->s.cancelled.status.next = &calld->s.cancelled.details;
- calld->s.cancelled.details.prev = &calld->s.cancelled.status;
- mdb.list.head = &calld->s.cancelled.status;
- mdb.list.tail = &calld->s.cancelled.details;
- mdb.garbage.head = mdb.garbage.tail = NULL;
- mdb.deadline = gpr_inf_future;
- grpc_sopb_add_metadata(op->recv_ops, mdb);
- *op->recv_state = GRPC_STREAM_CLOSED;
- op->on_done_recv->cb(op->on_done_recv->cb_arg, 1);
+
+ if (old_lb_policy != NULL) {
+ grpc_pollset_set_del_pollset_set(
+ exec_ctx, old_lb_policy->interested_parties, chand->interested_parties);
+ GRPC_LB_POLICY_UNREF(exec_ctx, old_lb_policy, "channel");
}
- if (op->on_consumed) {
- op->on_consumed->cb(op->on_consumed->cb_arg, 0);
+
+ if (lb_policy != NULL) {
+ GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "config_change");
}
+
+ GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "resolver");
}
-static void cc_start_transport_op(grpc_call_element *elem,
+static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
grpc_transport_op *op) {
- call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
- grpc_call_element *child_elem;
- grpc_transport_op waiting_op;
- GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
- GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
- gpr_mu_lock(&chand->mu);
- switch (calld->state) {
- case CALL_ACTIVE:
- child_elem = grpc_child_call_get_top_element(calld->s.active.child_call);
- gpr_mu_unlock(&chand->mu);
- child_elem->filter->start_transport_op(child_elem, op);
- break;
- case CALL_CREATED:
- if (op->cancel_with_status != GRPC_STATUS_OK) {
- calld->state = CALL_CANCELLED;
- gpr_mu_unlock(&chand->mu);
- handle_op_after_cancellation(elem, op);
- } else {
- calld->state = CALL_WAITING;
- calld->s.waiting_op.bind_pollset = NULL;
- if (chand->active_child) {
- /* channel is connected - use the connected stack */
- if (prepare_activate(elem, chand->active_child)) {
- gpr_mu_unlock(&chand->mu);
- /* activate the request (pass it down) outside the lock */
- complete_activate(elem, op);
- } else {
- gpr_mu_unlock(&chand->mu);
- }
- } else {
- /* check to see if we should initiate a connection (if we're not
- already),
- but don't do so until outside the lock to avoid re-entrancy
- problems if
- the callback is immediate */
- int initiate_transport_setup = 0;
- if (!chand->transport_setup_initiated) {
- chand->transport_setup_initiated = 1;
- initiate_transport_setup = 1;
- }
- /* add this call to the waiting set to be resumed once we have a child
- channel stack, growing the waiting set if needed */
- if (chand->waiting_child_count == chand->waiting_child_capacity) {
- chand->waiting_child_capacity =
- GPR_MAX(chand->waiting_child_capacity * 2, 8);
- chand->waiting_children = gpr_realloc(
- chand->waiting_children,
- chand->waiting_child_capacity * sizeof(call_data *));
- }
- calld->s.waiting_op = *op;
- chand->waiting_children[chand->waiting_child_count++] = calld;
- grpc_transport_setup_add_interested_party(chand->transport_setup,
- op->bind_pollset);
- gpr_mu_unlock(&chand->mu);
-
- /* finally initiate transport setup if needed */
- if (initiate_transport_setup) {
- grpc_transport_setup_initiate(chand->transport_setup);
- }
- }
- }
- break;
- case CALL_WAITING:
- if (op->cancel_with_status != GRPC_STATUS_OK) {
- waiting_op = calld->s.waiting_op;
- remove_waiting_child(chand, calld);
- calld->state = CALL_CANCELLED;
- gpr_mu_unlock(&chand->mu);
- handle_op_after_cancellation(elem, &waiting_op);
- handle_op_after_cancellation(elem, op);
- } else {
- GPR_ASSERT((calld->s.waiting_op.send_ops == NULL) !=
- (op->send_ops == NULL));
- GPR_ASSERT((calld->s.waiting_op.recv_ops == NULL) !=
- (op->recv_ops == NULL));
- if (op->send_ops) {
- calld->s.waiting_op.send_ops = op->send_ops;
- calld->s.waiting_op.is_last_send = op->is_last_send;
- calld->s.waiting_op.on_done_send = op->on_done_send;
- }
- if (op->recv_ops) {
- calld->s.waiting_op.recv_ops = op->recv_ops;
- calld->s.waiting_op.recv_state = op->recv_state;
- calld->s.waiting_op.on_done_recv = op->on_done_recv;
- }
- gpr_mu_unlock(&chand->mu);
- if (op->on_consumed) {
- op->on_consumed->cb(op->on_consumed->cb_arg, 0);
- }
- }
- break;
- case CALL_CANCELLED:
- gpr_mu_unlock(&chand->mu);
- handle_op_after_cancellation(elem, op);
- break;
+ grpc_exec_ctx_enqueue(exec_ctx, op->on_consumed, true, NULL);
+
+ GPR_ASSERT(op->set_accept_stream == false);
+ if (op->bind_pollset != NULL) {
+ grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties,
+ op->bind_pollset);
}
+
+ gpr_mu_lock(&chand->mu_config);
+ if (op->on_connectivity_state_change != NULL) {
+ grpc_connectivity_state_notify_on_state_change(
+ exec_ctx, &chand->state_tracker, op->connectivity_state,
+ op->on_connectivity_state_change);
+ op->on_connectivity_state_change = NULL;
+ op->connectivity_state = NULL;
+ }
+
+ if (op->send_ping != NULL) {
+ if (chand->lb_policy == NULL) {
+ grpc_exec_ctx_enqueue(exec_ctx, op->send_ping, false, NULL);
+ } else {
+ grpc_lb_policy_ping_one(exec_ctx, chand->lb_policy, op->send_ping);
+ op->bind_pollset = NULL;
+ }
+ op->send_ping = NULL;
+ }
+
+ if (op->disconnect && chand->resolver != NULL) {
+ grpc_connectivity_state_set(exec_ctx, &chand->state_tracker,
+ GRPC_CHANNEL_FATAL_FAILURE, "disconnect");
+ grpc_resolver_shutdown(exec_ctx, chand->resolver);
+ GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
+ chand->resolver = NULL;
+ if (chand->lb_policy != NULL) {
+ grpc_pollset_set_del_pollset_set(exec_ctx,
+ chand->lb_policy->interested_parties,
+ chand->interested_parties);
+ GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
+ chand->lb_policy = NULL;
+ }
+ }
+ gpr_mu_unlock(&chand->mu_config);
}
-static void channel_op(grpc_channel_element *elem,
- grpc_channel_element *from_elem, grpc_channel_op *op) {
+typedef struct {
+ grpc_metadata_batch *initial_metadata;
+ grpc_connected_subchannel **connected_subchannel;
+ grpc_closure *on_ready;
+ grpc_call_element *elem;
+ grpc_closure closure;
+} continue_picking_args;
+
+static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_metadata_batch *initial_metadata,
+ grpc_connected_subchannel **connected_subchannel,
+ grpc_closure *on_ready);
+
+static void continue_picking(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
+ continue_picking_args *cpa = arg;
+ if (!success) {
+ grpc_exec_ctx_enqueue(exec_ctx, cpa->on_ready, false, NULL);
+ } else if (cpa->connected_subchannel == NULL) {
+ /* cancelled, do nothing */
+ } else if (cc_pick_subchannel(exec_ctx, cpa->elem, cpa->initial_metadata,
+ cpa->connected_subchannel, cpa->on_ready)) {
+ grpc_exec_ctx_enqueue(exec_ctx, cpa->on_ready, true, NULL);
+ }
+ gpr_free(cpa);
+}
+
+static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *elemp,
+ grpc_metadata_batch *initial_metadata,
+ grpc_connected_subchannel **connected_subchannel,
+ grpc_closure *on_ready) {
+ grpc_call_element *elem = elemp;
channel_data *chand = elem->channel_data;
- grpc_child_channel *child_channel;
- grpc_channel_op rop;
- GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
+ call_data *calld = elem->call_data;
+ continue_picking_args *cpa;
+ grpc_closure *closure;
- switch (op->type) {
- case GRPC_CHANNEL_GOAWAY:
- /* sending goaway: clear out the active child on the way through */
- gpr_mu_lock(&chand->mu);
- child_channel = chand->active_child;
- chand->active_child = NULL;
- gpr_mu_unlock(&chand->mu);
- if (child_channel) {
- grpc_child_channel_handle_op(child_channel, op);
- grpc_child_channel_destroy(child_channel, 1);
- } else {
- gpr_slice_unref(op->data.goaway.message);
- }
- break;
- case GRPC_CHANNEL_DISCONNECT:
- /* sending disconnect: clear out the active child on the way through */
- gpr_mu_lock(&chand->mu);
- child_channel = chand->active_child;
- chand->active_child = NULL;
- gpr_mu_unlock(&chand->mu);
- if (child_channel) {
- grpc_child_channel_destroy(child_channel, 1);
- }
- /* fake a transport closed to satisfy the refcounting in client */
- rop.type = GRPC_TRANSPORT_CLOSED;
- rop.dir = GRPC_CALL_UP;
- grpc_channel_next_op(elem, &rop);
- break;
- case GRPC_TRANSPORT_GOAWAY:
- /* receiving goaway: if it's from our active child, drop the active child;
- in all cases consume the event here */
- gpr_mu_lock(&chand->mu);
- child_channel = grpc_channel_stack_from_top_element(from_elem);
- if (child_channel == chand->active_child) {
- chand->active_child = NULL;
- } else {
- child_channel = NULL;
- }
- gpr_mu_unlock(&chand->mu);
- if (child_channel) {
- grpc_child_channel_destroy(child_channel, 0);
- }
- gpr_slice_unref(op->data.goaway.message);
- break;
- case GRPC_TRANSPORT_CLOSED:
- /* receiving disconnect: if it's from our active child, drop the active
- child; in all cases consume the event here */
- gpr_mu_lock(&chand->mu);
- child_channel = grpc_channel_stack_from_top_element(from_elem);
- if (child_channel == chand->active_child) {
- chand->active_child = NULL;
- } else {
- child_channel = NULL;
- }
- gpr_mu_unlock(&chand->mu);
- if (child_channel) {
- grpc_child_channel_destroy(child_channel, 0);
- }
- break;
- default:
- switch (op->dir) {
- case GRPC_CALL_UP:
- grpc_channel_next_op(elem, op);
- break;
- case GRPC_CALL_DOWN:
- gpr_log(GPR_ERROR, "unhandled channel op: %d", op->type);
- abort();
- break;
+ GPR_ASSERT(connected_subchannel);
+
+ gpr_mu_lock(&chand->mu_config);
+ if (initial_metadata == NULL) {
+ if (chand->lb_policy != NULL) {
+ grpc_lb_policy_cancel_pick(exec_ctx, chand->lb_policy,
+ connected_subchannel);
+ }
+ for (closure = chand->waiting_for_config_closures.head; closure != NULL;
+ closure = grpc_closure_next(closure)) {
+ cpa = closure->cb_arg;
+ if (cpa->connected_subchannel == connected_subchannel) {
+ cpa->connected_subchannel = NULL;
+ grpc_exec_ctx_enqueue(exec_ctx, cpa->on_ready, false, NULL);
}
- break;
+ }
+ gpr_mu_unlock(&chand->mu_config);
+ return 1;
}
+ if (chand->lb_policy != NULL) {
+ grpc_lb_policy *lb_policy = chand->lb_policy;
+ int r;
+ GRPC_LB_POLICY_REF(lb_policy, "cc_pick_subchannel");
+ gpr_mu_unlock(&chand->mu_config);
+ r = grpc_lb_policy_pick(exec_ctx, lb_policy, calld->pollset,
+ initial_metadata, connected_subchannel, on_ready);
+ GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "cc_pick_subchannel");
+ return r;
+ }
+ if (chand->resolver != NULL && !chand->started_resolving) {
+ chand->started_resolving = 1;
+ GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
+ grpc_resolver_next(exec_ctx, chand->resolver,
+ &chand->incoming_configuration,
+ &chand->on_config_changed);
+ }
+ cpa = gpr_malloc(sizeof(*cpa));
+ cpa->initial_metadata = initial_metadata;
+ cpa->connected_subchannel = connected_subchannel;
+ cpa->on_ready = on_ready;
+ cpa->elem = elem;
+ grpc_closure_init(&cpa->closure, continue_picking, cpa);
+ grpc_closure_list_add(&chand->waiting_for_config_closures, &cpa->closure, 1);
+ gpr_mu_unlock(&chand->mu_config);
+ return 0;
}
/* Constructor for call_data */
-static void init_call_elem(grpc_call_element *elem,
- const void *server_transport_data,
- grpc_transport_op *initial_op) {
- call_data *calld = elem->call_data;
-
- /* TODO(ctiller): is there something useful we can do here? */
- GPR_ASSERT(initial_op == NULL);
-
- GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
- GPR_ASSERT(server_transport_data == NULL);
- calld->elem = elem;
- calld->state = CALL_CREATED;
- calld->deadline = gpr_inf_future;
+static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_call_element_args *args) {
+ grpc_subchannel_call_holder_init(elem->call_data, cc_pick_subchannel, elem,
+ args->call_stack);
}
/* Destructor for call_data */
-static void destroy_call_elem(grpc_call_element *elem) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
-
- /* if the call got activated, we need to destroy the child stack also, and
- remove it from the in-flight requests tracked by the child_entry we
- picked */
- gpr_mu_lock(&chand->mu);
- switch (calld->state) {
- case CALL_ACTIVE:
- gpr_mu_unlock(&chand->mu);
- grpc_child_call_destroy(calld->s.active.child_call);
- break;
- case CALL_WAITING:
- remove_waiting_child(chand, calld);
- gpr_mu_unlock(&chand->mu);
- break;
- default:
- gpr_mu_unlock(&chand->mu);
- break;
- }
- GPR_ASSERT(calld->state != CALL_WAITING);
+static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
+ grpc_subchannel_call_holder_destroy(exec_ctx, elem->call_data);
}
/* Constructor for channel_data */
-static void init_channel_elem(grpc_channel_element *elem,
- const grpc_channel_args *args,
- grpc_mdctx *metadata_context, int is_first,
- int is_last) {
+static void init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
channel_data *chand = elem->channel_data;
- GPR_ASSERT(!is_first);
- GPR_ASSERT(is_last);
+ memset(chand, 0, sizeof(*chand));
+
+ GPR_ASSERT(args->is_last);
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
- gpr_mu_init(&chand->mu);
- chand->active_child = NULL;
- chand->waiting_children = NULL;
- chand->waiting_child_count = 0;
- chand->waiting_child_capacity = 0;
- chand->transport_setup = NULL;
- chand->transport_setup_initiated = 0;
- chand->args = grpc_channel_args_copy(args);
- chand->mdctx = metadata_context;
+ gpr_mu_init(&chand->mu_config);
+ grpc_closure_init(&chand->on_config_changed, cc_on_config_changed, chand);
+ chand->owning_stack = args->channel_stack;
+
+ grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
+ "client_channel");
+ chand->interested_parties = grpc_pollset_set_create();
}
/* Destructor for channel_data */
-static void destroy_channel_elem(grpc_channel_element *elem) {
+static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem) {
channel_data *chand = elem->channel_data;
- grpc_transport_setup_cancel(chand->transport_setup);
-
- if (chand->active_child) {
- grpc_child_channel_destroy(chand->active_child, 1);
- chand->active_child = NULL;
+ if (chand->resolver != NULL) {
+ grpc_resolver_shutdown(exec_ctx, chand->resolver);
+ GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
}
+ if (chand->lb_policy != NULL) {
+ grpc_pollset_set_del_pollset_set(exec_ctx,
+ chand->lb_policy->interested_parties,
+ chand->interested_parties);
+ GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
+ }
+ grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker);
+ grpc_pollset_set_destroy(chand->interested_parties);
+ gpr_mu_destroy(&chand->mu_config);
+}
- grpc_channel_args_destroy(chand->args);
-
- gpr_mu_destroy(&chand->mu);
- GPR_ASSERT(chand->waiting_child_count == 0);
- gpr_free(chand->waiting_children);
+static void cc_set_pollset(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_pollset *pollset) {
+ call_data *calld = elem->call_data;
+ calld->pollset = pollset;
}
const grpc_channel_filter grpc_client_channel_filter = {
- cc_start_transport_op, channel_op, sizeof(call_data),
- init_call_elem, destroy_call_elem, sizeof(channel_data),
- init_channel_elem, destroy_channel_elem, "client-channel",
+ cc_start_transport_stream_op, cc_start_transport_op, sizeof(call_data),
+ init_call_elem, cc_set_pollset, destroy_call_elem, sizeof(channel_data),
+ init_channel_elem, destroy_channel_elem, cc_get_peer, "client-channel",
};
-grpc_transport_setup_result grpc_client_channel_transport_setup_complete(
- grpc_channel_stack *channel_stack, grpc_transport *transport,
- grpc_channel_filter const **channel_filters, size_t num_channel_filters,
- grpc_mdctx *mdctx) {
- /* we just got a new transport: lets create a child channel stack for it */
+void grpc_client_channel_set_resolver(grpc_exec_ctx *exec_ctx,
+ grpc_channel_stack *channel_stack,
+ grpc_resolver *resolver) {
+ /* post construction initialization: set the transport setup pointer */
grpc_channel_element *elem = grpc_channel_stack_last_element(channel_stack);
channel_data *chand = elem->channel_data;
- size_t num_child_filters = 2 + num_channel_filters;
- grpc_channel_filter const **child_filters;
- grpc_transport_setup_result result;
- grpc_child_channel *old_active = NULL;
- call_data **waiting_children;
- size_t waiting_child_count;
- size_t i;
- grpc_transport_op *call_ops;
-
- /* build the child filter stack */
- child_filters = gpr_malloc(sizeof(grpc_channel_filter *) * num_child_filters);
- /* we always need a link back filter to get back to the connected channel */
- child_filters[0] = &grpc_child_channel_top_filter;
- for (i = 0; i < num_channel_filters; i++) {
- child_filters[i + 1] = channel_filters[i];
- }
- /* and we always need a connected channel to talk to the transport */
- child_filters[num_child_filters - 1] = &grpc_connected_channel_filter;
-
- GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
-
- /* BEGIN LOCKING CHANNEL */
- gpr_mu_lock(&chand->mu);
- chand->transport_setup_initiated = 0;
-
- if (chand->active_child) {
- old_active = chand->active_child;
- }
- chand->active_child = grpc_child_channel_create(
- elem, child_filters, num_child_filters, chand->args, mdctx);
- result =
- grpc_connected_channel_bind_transport(chand->active_child, transport);
-
- /* capture the waiting children - we'll activate them outside the lock
- to avoid re-entrancy problems */
- waiting_children = chand->waiting_children;
- waiting_child_count = chand->waiting_child_count;
- /* bumping up inflight_requests here avoids taking a lock per rpc below */
-
- chand->waiting_children = NULL;
- chand->waiting_child_count = 0;
- chand->waiting_child_capacity = 0;
-
- call_ops = gpr_malloc(sizeof(*call_ops) * waiting_child_count);
-
- for (i = 0; i < waiting_child_count; i++) {
- call_ops[i] = waiting_children[i]->s.waiting_op;
- if (!prepare_activate(waiting_children[i]->elem, chand->active_child)) {
- waiting_children[i] = NULL;
- grpc_transport_op_finish_with_failure(&call_ops[i]);
- }
+ gpr_mu_lock(&chand->mu_config);
+ GPR_ASSERT(!chand->resolver);
+ chand->resolver = resolver;
+ GRPC_RESOLVER_REF(resolver, "channel");
+ if (!grpc_closure_list_empty(chand->waiting_for_config_closures) ||
+ chand->exit_idle_when_lb_policy_arrives) {
+ chand->started_resolving = 1;
+ GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
+ grpc_resolver_next(exec_ctx, resolver, &chand->incoming_configuration,
+ &chand->on_config_changed);
}
+ gpr_mu_unlock(&chand->mu_config);
+}
- /* END LOCKING CHANNEL */
- gpr_mu_unlock(&chand->mu);
-
- /* activate any pending operations - this is safe to do as we guarantee one
- and only one write operation per request at the surface api - if we lose
- that guarantee we need to do some curly locking here */
- for (i = 0; i < waiting_child_count; i++) {
- if (waiting_children[i]) {
- complete_activate(waiting_children[i]->elem, &call_ops[i]);
+grpc_connectivity_state grpc_client_channel_check_connectivity_state(
+ grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect) {
+ channel_data *chand = elem->channel_data;
+ grpc_connectivity_state out;
+ gpr_mu_lock(&chand->mu_config);
+ out = grpc_connectivity_state_check(&chand->state_tracker);
+ if (out == GRPC_CHANNEL_IDLE && try_to_connect) {
+ if (chand->lb_policy != NULL) {
+ grpc_lb_policy_exit_idle(exec_ctx, chand->lb_policy);
+ } else {
+ chand->exit_idle_when_lb_policy_arrives = 1;
+ if (!chand->started_resolving && chand->resolver != NULL) {
+ GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
+ chand->started_resolving = 1;
+ grpc_resolver_next(exec_ctx, chand->resolver,
+ &chand->incoming_configuration,
+ &chand->on_config_changed);
+ }
}
}
- gpr_free(waiting_children);
- gpr_free(call_ops);
- gpr_free(child_filters);
-
- if (old_active) {
- grpc_child_channel_destroy(old_active, 1);
- }
+ gpr_mu_unlock(&chand->mu_config);
+ return out;
+}
- return result;
+typedef struct {
+ channel_data *chand;
+ grpc_pollset *pollset;
+ grpc_closure *on_complete;
+ grpc_closure my_closure;
+} external_connectivity_watcher;
+
+static void on_external_watch_complete(grpc_exec_ctx *exec_ctx, void *arg,
+ bool iomgr_success) {
+ external_connectivity_watcher *w = arg;
+ grpc_closure *follow_up = w->on_complete;
+ grpc_pollset_set_del_pollset(exec_ctx, w->chand->interested_parties,
+ w->pollset);
+ GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack,
+ "external_connectivity_watcher");
+ gpr_free(w);
+ follow_up->cb(exec_ctx, follow_up->cb_arg, iomgr_success);
}
-void grpc_client_channel_set_transport_setup(grpc_channel_stack *channel_stack,
- grpc_transport_setup *setup) {
- /* post construction initialization: set the transport setup pointer */
- grpc_channel_element *elem = grpc_channel_stack_last_element(channel_stack);
+void grpc_client_channel_watch_connectivity_state(
+ grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, grpc_pollset *pollset,
+ grpc_connectivity_state *state, grpc_closure *on_complete) {
channel_data *chand = elem->channel_data;
- GPR_ASSERT(!chand->transport_setup);
- chand->transport_setup = setup;
+ external_connectivity_watcher *w = gpr_malloc(sizeof(*w));
+ w->chand = chand;
+ w->pollset = pollset;
+ w->on_complete = on_complete;
+ grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties, pollset);
+ grpc_closure_init(&w->my_closure, on_external_watch_complete, w);
+ GRPC_CHANNEL_STACK_REF(w->chand->owning_stack,
+ "external_connectivity_watcher");
+ gpr_mu_lock(&chand->mu_config);
+ grpc_connectivity_state_notify_on_state_change(
+ exec_ctx, &chand->state_tracker, state, &w->my_closure);
+ gpr_mu_unlock(&chand->mu_config);
}
diff --git a/src/core/channel/client_channel.h b/src/core/channel/client_channel.h
index 7a67a9f21f..422f7f8374 100644
--- a/src/core/channel/client_channel.h
+++ b/src/core/channel/client_channel.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,10 +31,11 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_CHANNEL_CLIENT_CHANNEL_H
-#define GRPC_INTERNAL_CORE_CHANNEL_CLIENT_CHANNEL_H
+#ifndef GRPC_CORE_CHANNEL_CLIENT_CHANNEL_H
+#define GRPC_CORE_CHANNEL_CLIENT_CHANNEL_H
#include "src/core/channel/channel_stack.h"
+#include "src/core/client_config/resolver.h"
/* A client channel is a channel that begins disconnected, and can connect
to some endpoint on demand. If that endpoint disconnects, it will be
@@ -48,15 +49,15 @@ extern const grpc_channel_filter grpc_client_channel_filter;
/* post-construction initializer to let the client channel know which
transport setup it should cancel upon destruction, or initiate when it needs
a connection */
-void grpc_client_channel_set_transport_setup(grpc_channel_stack *channel_stack,
- grpc_transport_setup *setup);
-
-/* grpc_transport_setup_callback for binding new transports into a client
- channel - user_data should be the channel stack containing the client
- channel */
-grpc_transport_setup_result grpc_client_channel_transport_setup_complete(
- grpc_channel_stack *channel_stack, grpc_transport *transport,
- grpc_channel_filter const **channel_filters, size_t num_channel_filters,
- grpc_mdctx *mdctx);
-
-#endif /* GRPC_INTERNAL_CORE_CHANNEL_CLIENT_CHANNEL_H */
+void grpc_client_channel_set_resolver(grpc_exec_ctx *exec_ctx,
+ grpc_channel_stack *channel_stack,
+ grpc_resolver *resolver);
+
+grpc_connectivity_state grpc_client_channel_check_connectivity_state(
+ grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect);
+
+void grpc_client_channel_watch_connectivity_state(
+ grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, grpc_pollset *pollset,
+ grpc_connectivity_state *state, grpc_closure *on_complete);
+
+#endif /* GRPC_CORE_CHANNEL_CLIENT_CHANNEL_H */
diff --git a/src/core/channel/client_setup.c b/src/core/channel/client_setup.c
deleted file mode 100644
index 5be8fa66e9..0000000000
--- a/src/core/channel/client_setup.c
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include "src/core/channel/client_setup.h"
-#include "src/core/channel/channel_args.h"
-#include "src/core/channel/channel_stack.h"
-#include "src/core/iomgr/alarm.h"
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/time.h>
-
-struct grpc_client_setup {
- grpc_transport_setup base; /* must be first */
- void (*initiate)(void *user_data, grpc_client_setup_request *request);
- void (*done)(void *user_data);
- void *user_data;
- grpc_channel_args *args;
- grpc_mdctx *mdctx;
- grpc_alarm backoff_alarm;
- gpr_timespec current_backoff_interval;
- int in_alarm;
- int in_cb;
- int cancelled;
-
- gpr_mu mu;
- gpr_cv cv;
- grpc_client_setup_request *active_request;
- int refs;
- /** The set of pollsets that are currently interested in this
- connection being established */
- grpc_pollset_set interested_parties;
-};
-
-struct grpc_client_setup_request {
- /* pointer back to the setup object */
- grpc_client_setup *setup;
- gpr_timespec deadline;
-};
-
-gpr_timespec grpc_client_setup_request_deadline(grpc_client_setup_request *r) {
- return r->deadline;
-}
-
-grpc_pollset_set *grpc_client_setup_get_interested_parties(
- grpc_client_setup_request *r) {
- return &r->setup->interested_parties;
-}
-
-static void destroy_setup(grpc_client_setup *s) {
- gpr_mu_destroy(&s->mu);
- gpr_cv_destroy(&s->cv);
- s->done(s->user_data);
- grpc_channel_args_destroy(s->args);
- grpc_pollset_set_destroy(&s->interested_parties);
- gpr_free(s);
-}
-
-static void destroy_request(grpc_client_setup_request *r) { gpr_free(r); }
-
-/* initiate handshaking */
-static void setup_initiate(grpc_transport_setup *sp) {
- grpc_client_setup *s = (grpc_client_setup *)sp;
- grpc_client_setup_request *r = gpr_malloc(sizeof(grpc_client_setup_request));
- int in_alarm = 0;
-
- r->setup = s;
- r->deadline = gpr_time_add(gpr_now(), gpr_time_from_seconds(60));
-
- gpr_mu_lock(&s->mu);
- GPR_ASSERT(s->refs > 0);
- /* there might be more than one request outstanding if the caller calls
- initiate in some kind of rapid-fire way: we try to connect each time,
- and keep track of the latest request (which is the only one that gets
- to finish) */
- if (!s->in_alarm) {
- s->active_request = r;
- s->refs++;
- } else {
- /* TODO(klempner): Maybe do something more clever here */
- in_alarm = 1;
- }
- gpr_mu_unlock(&s->mu);
-
- if (!in_alarm) {
- s->initiate(s->user_data, r);
- } else {
- destroy_request(r);
- }
-}
-
-/** implementation of add_interested_party for setup vtable */
-static void setup_add_interested_party(grpc_transport_setup *sp,
- grpc_pollset *pollset) {
- grpc_client_setup *s = (grpc_client_setup *)sp;
-
- gpr_mu_lock(&s->mu);
- grpc_pollset_set_add_pollset(&s->interested_parties, pollset);
- gpr_mu_unlock(&s->mu);
-}
-
-/** implementation of del_interested_party for setup vtable */
-static void setup_del_interested_party(grpc_transport_setup *sp,
- grpc_pollset *pollset) {
- grpc_client_setup *s = (grpc_client_setup *)sp;
-
- gpr_mu_lock(&s->mu);
- grpc_pollset_set_del_pollset(&s->interested_parties, pollset);
- gpr_mu_unlock(&s->mu);
-}
-
-/* cancel handshaking: cancel all requests, and shutdown (the caller promises
- not to initiate again) */
-static void setup_cancel(grpc_transport_setup *sp) {
- grpc_client_setup *s = (grpc_client_setup *)sp;
- int cancel_alarm = 0;
-
- gpr_mu_lock(&s->mu);
- s->cancelled = 1;
- while (s->in_cb) {
- gpr_cv_wait(&s->cv, &s->mu, gpr_inf_future);
- }
-
- GPR_ASSERT(s->refs > 0);
- /* effectively cancels the current request (if any) */
- s->active_request = NULL;
- if (s->in_alarm) {
- cancel_alarm = 1;
- }
- if (--s->refs == 0) {
- gpr_mu_unlock(&s->mu);
- destroy_setup(s);
- } else {
- gpr_mu_unlock(&s->mu);
- }
- if (cancel_alarm) {
- grpc_alarm_cancel(&s->backoff_alarm);
- }
-}
-
-int grpc_client_setup_cb_begin(grpc_client_setup_request *r,
- const char *reason) {
- gpr_mu_lock(&r->setup->mu);
- if (r->setup->cancelled) {
- gpr_mu_unlock(&r->setup->mu);
- return 0;
- }
- r->setup->in_cb++;
- gpr_mu_unlock(&r->setup->mu);
- return 1;
-}
-
-void grpc_client_setup_cb_end(grpc_client_setup_request *r,
- const char *reason) {
- gpr_mu_lock(&r->setup->mu);
- r->setup->in_cb--;
- if (r->setup->cancelled) gpr_cv_signal(&r->setup->cv);
- gpr_mu_unlock(&r->setup->mu);
-}
-
-/* vtable for transport setup */
-static const grpc_transport_setup_vtable setup_vtable = {
- setup_initiate, setup_add_interested_party, setup_del_interested_party,
- setup_cancel};
-
-void grpc_client_setup_create_and_attach(
- grpc_channel_stack *newly_minted_channel, const grpc_channel_args *args,
- grpc_mdctx *mdctx,
- void (*initiate)(void *user_data, grpc_client_setup_request *request),
- void (*done)(void *user_data), void *user_data) {
- grpc_client_setup *s = gpr_malloc(sizeof(grpc_client_setup));
-
- s->base.vtable = &setup_vtable;
- gpr_mu_init(&s->mu);
- gpr_cv_init(&s->cv);
- s->refs = 1;
- s->mdctx = mdctx;
- s->initiate = initiate;
- s->done = done;
- s->user_data = user_data;
- s->active_request = NULL;
- s->args = grpc_channel_args_copy(args);
- s->current_backoff_interval = gpr_time_from_micros(1000000);
- s->in_alarm = 0;
- s->in_cb = 0;
- s->cancelled = 0;
- grpc_pollset_set_init(&s->interested_parties);
-
- grpc_client_channel_set_transport_setup(newly_minted_channel, &s->base);
-}
-
-int grpc_client_setup_request_should_continue(grpc_client_setup_request *r,
- const char *reason) {
- int result;
- if (gpr_time_cmp(gpr_now(), r->deadline) > 0) {
- result = 0;
- } else {
- gpr_mu_lock(&r->setup->mu);
- result = r->setup->active_request == r;
- gpr_mu_unlock(&r->setup->mu);
- }
- return result;
-}
-
-static void backoff_alarm_done(void *arg /* grpc_client_setup_request */,
- int success) {
- grpc_client_setup_request *r = arg;
- grpc_client_setup *s = r->setup;
- /* Handle status cancelled? */
- gpr_mu_lock(&s->mu);
- s->in_alarm = 0;
- if (s->active_request != NULL || !success) {
- if (0 == --s->refs) {
- gpr_mu_unlock(&s->mu);
- destroy_setup(s);
- destroy_request(r);
- return;
- } else {
- gpr_mu_unlock(&s->mu);
- destroy_request(r);
- return;
- }
- }
- s->active_request = r;
- gpr_mu_unlock(&s->mu);
- s->initiate(s->user_data, r);
-}
-
-void grpc_client_setup_request_finish(grpc_client_setup_request *r,
- int was_successful) {
- int retry = !was_successful;
- grpc_client_setup *s = r->setup;
-
- gpr_mu_lock(&s->mu);
- if (s->active_request == r) {
- s->active_request = NULL;
- } else {
- retry = 0;
- }
-
- if (!retry && 0 == --s->refs) {
- gpr_mu_unlock(&s->mu);
- destroy_setup(s);
- destroy_request(r);
- } else if (retry) {
- /* TODO(klempner): Replace these values with further consideration. 2x is
- probably too aggressive of a backoff. */
- gpr_timespec max_backoff = gpr_time_from_minutes(2);
- gpr_timespec now = gpr_now();
- gpr_timespec deadline = gpr_time_add(s->current_backoff_interval, now);
- GPR_ASSERT(!s->in_alarm);
- s->in_alarm = 1;
- grpc_alarm_init(&s->backoff_alarm, deadline, backoff_alarm_done, r, now);
- s->current_backoff_interval =
- gpr_time_add(s->current_backoff_interval, s->current_backoff_interval);
- if (gpr_time_cmp(s->current_backoff_interval, max_backoff) > 0) {
- s->current_backoff_interval = max_backoff;
- }
- gpr_mu_unlock(&s->mu);
- } else {
- gpr_mu_unlock(&s->mu);
- destroy_request(r);
- }
-}
-
-const grpc_channel_args *grpc_client_setup_get_channel_args(
- grpc_client_setup_request *r) {
- return r->setup->args;
-}
-
-grpc_mdctx *grpc_client_setup_get_mdctx(grpc_client_setup_request *r) {
- return r->setup->mdctx;
-}
diff --git a/src/core/channel/client_setup.h b/src/core/channel/client_setup.h
deleted file mode 100644
index 7d40338840..0000000000
--- a/src/core/channel/client_setup.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#ifndef GRPC_INTERNAL_CORE_CHANNEL_CLIENT_SETUP_H
-#define GRPC_INTERNAL_CORE_CHANNEL_CLIENT_SETUP_H
-
-#include "src/core/channel/client_channel.h"
-#include "src/core/transport/metadata.h"
-#include <grpc/support/time.h>
-
-/* Convenience API's to simplify transport setup */
-
-typedef struct grpc_client_setup grpc_client_setup;
-typedef struct grpc_client_setup_request grpc_client_setup_request;
-
-void grpc_client_setup_create_and_attach(
- grpc_channel_stack *newly_minted_channel, const grpc_channel_args *args,
- grpc_mdctx *mdctx,
- void (*initiate)(void *user_data, grpc_client_setup_request *request),
- void (*done)(void *user_data), void *user_data);
-
-/* Check that r is the active request: needs to be performed at each callback.
- If this races, we'll have two connection attempts running at once and the
- old one will get cleaned up in due course, which is fine. */
-int grpc_client_setup_request_should_continue(grpc_client_setup_request *r,
- const char *reason);
-void grpc_client_setup_request_finish(grpc_client_setup_request *r,
- int was_successful);
-const grpc_channel_args *grpc_client_setup_get_channel_args(
- grpc_client_setup_request *r);
-
-/* Call before calling back into the setup listener, and call only if
- this function returns 1. If it returns 1, also promise to call
- grpc_client_setup_cb_end */
-int grpc_client_setup_cb_begin(grpc_client_setup_request *r,
- const char *reason);
-void grpc_client_setup_cb_end(grpc_client_setup_request *r, const char *reason);
-
-/* Get the deadline for a request passed in to initiate. Implementations should
- make a best effort to honor this deadline. */
-gpr_timespec grpc_client_setup_request_deadline(grpc_client_setup_request *r);
-grpc_pollset_set *grpc_client_setup_get_interested_parties(
- grpc_client_setup_request *r);
-
-grpc_mdctx *grpc_client_setup_get_mdctx(grpc_client_setup_request *r);
-
-#endif /* GRPC_INTERNAL_CORE_CHANNEL_CLIENT_SETUP_H */
diff --git a/src/core/channel/client_uchannel.c b/src/core/channel/client_uchannel.c
new file mode 100644
index 0000000000..d32327206e
--- /dev/null
+++ b/src/core/channel/client_uchannel.c
@@ -0,0 +1,233 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/channel/client_uchannel.h"
+
+#include <string.h>
+
+#include "src/core/census/grpc_filter.h"
+#include "src/core/channel/channel_args.h"
+#include "src/core/channel/client_channel.h"
+#include "src/core/channel/compress_filter.h"
+#include "src/core/channel/subchannel_call_holder.h"
+#include "src/core/iomgr/iomgr.h"
+#include "src/core/support/string.h"
+#include "src/core/surface/channel.h"
+#include "src/core/transport/connectivity_state.h"
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/sync.h>
+#include <grpc/support/useful.h>
+
+/** Microchannel (uchannel) implementation: a lightweight channel without any
+ * load-balancing mechanisms meant for communication from within the core. */
+
+typedef struct client_uchannel_channel_data {
+ /** master channel - the grpc_channel instance that ultimately owns
+ this channel_data via its channel stack.
+ We occasionally use this to bump the refcount on the master channel
+ to keep ourselves alive through an asynchronous operation. */
+ grpc_channel_stack *owning_stack;
+
+ /** connectivity state being tracked */
+ grpc_connectivity_state_tracker state_tracker;
+
+ /** the subchannel wrapped by the microchannel */
+ grpc_connected_subchannel *connected_subchannel;
+
+ /** the callback used to stay subscribed to subchannel connectivity
+ * notifications */
+ grpc_closure connectivity_cb;
+
+ /** the current connectivity state of the wrapped subchannel */
+ grpc_connectivity_state subchannel_connectivity;
+
+ gpr_mu mu_state;
+} channel_data;
+
+typedef grpc_subchannel_call_holder call_data;
+
+static void monitor_subchannel(grpc_exec_ctx *exec_ctx, void *arg,
+ bool iomgr_success) {
+ channel_data *chand = arg;
+ grpc_connectivity_state_set(exec_ctx, &chand->state_tracker,
+ chand->subchannel_connectivity,
+ "uchannel_monitor_subchannel");
+ grpc_connected_subchannel_notify_on_state_change(
+ exec_ctx, chand->connected_subchannel, NULL,
+ &chand->subchannel_connectivity, &chand->connectivity_cb);
+}
+
+static char *cuc_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
+ return grpc_subchannel_call_holder_get_peer(exec_ctx, elem->call_data);
+}
+
+static void cuc_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_transport_stream_op *op) {
+ GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
+ grpc_subchannel_call_holder_perform_op(exec_ctx, elem->call_data, op);
+}
+
+static void cuc_start_transport_op(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ grpc_transport_op *op) {
+ channel_data *chand = elem->channel_data;
+
+ grpc_exec_ctx_enqueue(exec_ctx, op->on_consumed, true, NULL);
+
+ GPR_ASSERT(op->set_accept_stream == false);
+ GPR_ASSERT(op->bind_pollset == NULL);
+
+ if (op->on_connectivity_state_change != NULL) {
+ grpc_connectivity_state_notify_on_state_change(
+ exec_ctx, &chand->state_tracker, op->connectivity_state,
+ op->on_connectivity_state_change);
+ op->on_connectivity_state_change = NULL;
+ op->connectivity_state = NULL;
+ }
+
+ if (op->disconnect) {
+ grpc_connectivity_state_set(exec_ctx, &chand->state_tracker,
+ GRPC_CHANNEL_FATAL_FAILURE, "disconnect");
+ }
+}
+
+static int cuc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_metadata_batch *initial_metadata,
+ grpc_connected_subchannel **connected_subchannel,
+ grpc_closure *on_ready) {
+ channel_data *chand = arg;
+ GPR_ASSERT(initial_metadata != NULL);
+ *connected_subchannel = chand->connected_subchannel;
+ return 1;
+}
+
+/* Constructor for call_data */
+static void cuc_init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_call_element_args *args) {
+ grpc_subchannel_call_holder_init(elem->call_data, cuc_pick_subchannel,
+ elem->channel_data, args->call_stack);
+}
+
+/* Destructor for call_data */
+static void cuc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
+ grpc_subchannel_call_holder_destroy(exec_ctx, elem->call_data);
+}
+
+/* Constructor for channel_data */
+static void cuc_init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
+ channel_data *chand = elem->channel_data;
+ memset(chand, 0, sizeof(*chand));
+ grpc_closure_init(&chand->connectivity_cb, monitor_subchannel, chand);
+ GPR_ASSERT(args->is_last);
+ GPR_ASSERT(elem->filter == &grpc_client_uchannel_filter);
+ chand->owning_stack = args->channel_stack;
+ grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
+ "client_uchannel");
+ gpr_mu_init(&chand->mu_state);
+}
+
+/* Destructor for channel_data */
+static void cuc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem) {
+ channel_data *chand = elem->channel_data;
+ /* cancel subscription */
+ grpc_connected_subchannel_notify_on_state_change(
+ exec_ctx, chand->connected_subchannel, NULL, NULL,
+ &chand->connectivity_cb);
+ grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker);
+ gpr_mu_destroy(&chand->mu_state);
+ GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, chand->connected_subchannel,
+ "uchannel");
+}
+
+static void cuc_set_pollset(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_pollset *pollset) {
+ call_data *calld = elem->call_data;
+ calld->pollset = pollset;
+}
+
+const grpc_channel_filter grpc_client_uchannel_filter = {
+ cuc_start_transport_stream_op, cuc_start_transport_op, sizeof(call_data),
+ cuc_init_call_elem, cuc_set_pollset, cuc_destroy_call_elem,
+ sizeof(channel_data), cuc_init_channel_elem, cuc_destroy_channel_elem,
+ cuc_get_peer, "client-uchannel",
+};
+
+grpc_connectivity_state grpc_client_uchannel_check_connectivity_state(
+ grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect) {
+ channel_data *chand = elem->channel_data;
+ grpc_connectivity_state out;
+ gpr_mu_lock(&chand->mu_state);
+ out = grpc_connectivity_state_check(&chand->state_tracker);
+ gpr_mu_unlock(&chand->mu_state);
+ return out;
+}
+
+void grpc_client_uchannel_watch_connectivity_state(
+ grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, grpc_pollset *pollset,
+ grpc_connectivity_state *state, grpc_closure *on_complete) {
+ channel_data *chand = elem->channel_data;
+ gpr_mu_lock(&chand->mu_state);
+ grpc_connectivity_state_notify_on_state_change(
+ exec_ctx, &chand->state_tracker, state, on_complete);
+ gpr_mu_unlock(&chand->mu_state);
+}
+
+grpc_channel *grpc_client_uchannel_create(grpc_subchannel *subchannel,
+ grpc_channel_args *args) {
+ grpc_channel *channel = NULL;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
+ channel =
+ grpc_channel_create(&exec_ctx, NULL, args, GRPC_CLIENT_UCHANNEL, NULL);
+
+ return channel;
+}
+
+void grpc_client_uchannel_set_connected_subchannel(
+ grpc_channel *uchannel, grpc_connected_subchannel *connected_subchannel) {
+ grpc_channel_element *elem =
+ grpc_channel_stack_last_element(grpc_channel_get_channel_stack(uchannel));
+ channel_data *chand = elem->channel_data;
+ GPR_ASSERT(elem->filter == &grpc_client_uchannel_filter);
+ gpr_mu_lock(&chand->mu_state);
+ chand->connected_subchannel = connected_subchannel;
+ GRPC_CONNECTED_SUBCHANNEL_REF(connected_subchannel, "uchannel");
+ gpr_mu_unlock(&chand->mu_state);
+}
diff --git a/src/core/channel/client_uchannel.h b/src/core/channel/client_uchannel.h
new file mode 100644
index 0000000000..8bb288e7d4
--- /dev/null
+++ b/src/core/channel/client_uchannel.h
@@ -0,0 +1,60 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_CHANNEL_CLIENT_UCHANNEL_H
+#define GRPC_CORE_CHANNEL_CLIENT_UCHANNEL_H
+
+#include "src/core/channel/channel_stack.h"
+#include "src/core/client_config/resolver.h"
+
+#define GRPC_MICROCHANNEL_SUBCHANNEL_ARG "grpc.microchannel_subchannel_key"
+
+/* A client microchannel (aka uchannel) is a channel wrapping a subchannel, for
+ * the purposes of lightweight RPC communications from within the core.*/
+
+extern const grpc_channel_filter grpc_client_uchannel_filter;
+
+grpc_connectivity_state grpc_client_uchannel_check_connectivity_state(
+ grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect);
+
+void grpc_client_uchannel_watch_connectivity_state(
+ grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, grpc_pollset *pollset,
+ grpc_connectivity_state *state, grpc_closure *on_complete);
+
+grpc_channel *grpc_client_uchannel_create(grpc_subchannel *subchannel,
+ grpc_channel_args *args);
+
+void grpc_client_uchannel_set_connected_subchannel(
+ grpc_channel *uchannel, grpc_connected_subchannel *connected_subchannel);
+
+#endif /* GRPC_CORE_CHANNEL_CLIENT_UCHANNEL_H */
diff --git a/src/core/channel/compress_filter.c b/src/core/channel/compress_filter.c
new file mode 100644
index 0000000000..3e7ca08fd2
--- /dev/null
+++ b/src/core/channel/compress_filter.c
@@ -0,0 +1,297 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include <grpc/compression.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/slice_buffer.h>
+
+#include "src/core/channel/channel_args.h"
+#include "src/core/channel/compress_filter.h"
+#include "src/core/compression/algorithm_metadata.h"
+#include "src/core/compression/message_compress.h"
+#include "src/core/profiling/timers.h"
+#include "src/core/support/string.h"
+#include "src/core/transport/static_metadata.h"
+
+typedef struct call_data {
+ gpr_slice_buffer slices; /**< Buffers up input slices to be compressed */
+ grpc_linked_mdelem compression_algorithm_storage;
+ grpc_linked_mdelem accept_encoding_storage;
+ uint32_t remaining_slice_bytes;
+ /** Compression algorithm we'll try to use. It may be given by incoming
+ * metadata, or by the channel's default compression settings. */
+ grpc_compression_algorithm compression_algorithm;
+ /** If true, contents of \a compression_algorithm are authoritative */
+ int has_compression_algorithm;
+
+ grpc_transport_stream_op send_op;
+ uint32_t send_length;
+ uint32_t send_flags;
+ gpr_slice incoming_slice;
+ grpc_slice_buffer_stream replacement_stream;
+ grpc_closure *post_send;
+ grpc_closure send_done;
+ grpc_closure got_slice;
+} call_data;
+
+typedef struct channel_data {
+ /** The default, channel-level, compression algorithm */
+ grpc_compression_algorithm default_compression_algorithm;
+ /** Compression options for the channel */
+ grpc_compression_options compression_options;
+ /** Supported compression algorithms */
+ uint32_t supported_compression_algorithms;
+} channel_data;
+
+/** For each \a md element from the incoming metadata, filter out the entry for
+ * "grpc-encoding", using its value to populate the call data's
+ * compression_algorithm field. */
+static grpc_mdelem *compression_md_filter(void *user_data, grpc_mdelem *md) {
+ grpc_call_element *elem = user_data;
+ call_data *calld = elem->call_data;
+ channel_data *channeld = elem->channel_data;
+
+ if (md->key == GRPC_MDSTR_GRPC_INTERNAL_ENCODING_REQUEST) {
+ const char *md_c_str = grpc_mdstr_as_c_string(md->value);
+ if (!grpc_compression_algorithm_parse(md_c_str, strlen(md_c_str),
+ &calld->compression_algorithm)) {
+ gpr_log(GPR_ERROR,
+ "Invalid compression algorithm: '%s' (unknown). Ignoring.",
+ md_c_str);
+ calld->compression_algorithm = GRPC_COMPRESS_NONE;
+ }
+ if (grpc_compression_options_is_algorithm_enabled(
+ &channeld->compression_options, calld->compression_algorithm) ==
+ 0) {
+ gpr_log(GPR_ERROR,
+ "Invalid compression algorithm: '%s' (previously disabled). "
+ "Ignoring.",
+ md_c_str);
+ calld->compression_algorithm = GRPC_COMPRESS_NONE;
+ }
+ calld->has_compression_algorithm = 1;
+ return NULL;
+ }
+
+ return md;
+}
+
+static int skip_compression(grpc_call_element *elem) {
+ call_data *calld = elem->call_data;
+ channel_data *channeld = elem->channel_data;
+ if (calld->has_compression_algorithm) {
+ if (calld->compression_algorithm == GRPC_COMPRESS_NONE) {
+ return 1;
+ }
+ return 0; /* we have an actual call-specific algorithm */
+ }
+ /* no per-call compression override */
+ return channeld->default_compression_algorithm == GRPC_COMPRESS_NONE;
+}
+
+/** Filter initial metadata */
+static void process_send_initial_metadata(
+ grpc_call_element *elem, grpc_metadata_batch *initial_metadata) {
+ call_data *calld = elem->call_data;
+ channel_data *channeld = elem->channel_data;
+ /* Parse incoming request for compression. If any, it'll be available
+ * at calld->compression_algorithm */
+ grpc_metadata_batch_filter(initial_metadata, compression_md_filter, elem);
+ if (!calld->has_compression_algorithm) {
+ /* If no algorithm was found in the metadata and we aren't
+ * exceptionally skipping compression, fall back to the channel
+ * default */
+ calld->compression_algorithm = channeld->default_compression_algorithm;
+ calld->has_compression_algorithm = 1; /* GPR_TRUE */
+ }
+ /* hint compression algorithm */
+ grpc_metadata_batch_add_tail(
+ initial_metadata, &calld->compression_algorithm_storage,
+ grpc_compression_encoding_mdelem(calld->compression_algorithm));
+
+ /* convey supported compression algorithms */
+ grpc_metadata_batch_add_tail(initial_metadata,
+ &calld->accept_encoding_storage,
+ GRPC_MDELEM_ACCEPT_ENCODING_FOR_ALGORITHMS(
+ channeld->supported_compression_algorithms));
+}
+
+static void continue_send_message(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem);
+
+static void send_done(grpc_exec_ctx *exec_ctx, void *elemp, bool success) {
+ grpc_call_element *elem = elemp;
+ call_data *calld = elem->call_data;
+ gpr_slice_buffer_reset_and_unref(&calld->slices);
+ calld->post_send->cb(exec_ctx, calld->post_send->cb_arg, success);
+}
+
+static void finish_send_message(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
+ call_data *calld = elem->call_data;
+ int did_compress;
+ gpr_slice_buffer tmp;
+ gpr_slice_buffer_init(&tmp);
+ did_compress =
+ grpc_msg_compress(calld->compression_algorithm, &calld->slices, &tmp);
+ if (did_compress) {
+ gpr_slice_buffer_swap(&calld->slices, &tmp);
+ calld->send_flags |= GRPC_WRITE_INTERNAL_COMPRESS;
+ }
+ gpr_slice_buffer_destroy(&tmp);
+
+ grpc_slice_buffer_stream_init(&calld->replacement_stream, &calld->slices,
+ calld->send_flags);
+ calld->send_op.send_message = &calld->replacement_stream.base;
+ calld->post_send = calld->send_op.on_complete;
+ calld->send_op.on_complete = &calld->send_done;
+
+ grpc_call_next_op(exec_ctx, elem, &calld->send_op);
+}
+
+static void got_slice(grpc_exec_ctx *exec_ctx, void *elemp, bool success) {
+ grpc_call_element *elem = elemp;
+ call_data *calld = elem->call_data;
+ gpr_slice_buffer_add(&calld->slices, calld->incoming_slice);
+ if (calld->send_length == calld->slices.length) {
+ finish_send_message(exec_ctx, elem);
+ } else {
+ continue_send_message(exec_ctx, elem);
+ }
+}
+
+static void continue_send_message(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
+ call_data *calld = elem->call_data;
+ while (grpc_byte_stream_next(exec_ctx, calld->send_op.send_message,
+ &calld->incoming_slice, ~(size_t)0,
+ &calld->got_slice)) {
+ gpr_slice_buffer_add(&calld->slices, calld->incoming_slice);
+ if (calld->send_length == calld->slices.length) {
+ finish_send_message(exec_ctx, elem);
+ break;
+ }
+ }
+}
+
+static void compress_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_transport_stream_op *op) {
+ call_data *calld = elem->call_data;
+
+ GPR_TIMER_BEGIN("compress_start_transport_stream_op", 0);
+
+ if (op->send_initial_metadata) {
+ process_send_initial_metadata(elem, op->send_initial_metadata);
+ }
+ if (op->send_message != NULL && !skip_compression(elem) &&
+ 0 == (op->send_message->flags & GRPC_WRITE_NO_COMPRESS)) {
+ calld->send_op = *op;
+ calld->send_length = op->send_message->length;
+ calld->send_flags = op->send_message->flags;
+ continue_send_message(exec_ctx, elem);
+ } else {
+ /* pass control down the stack */
+ grpc_call_next_op(exec_ctx, elem, op);
+ }
+
+ GPR_TIMER_END("compress_start_transport_stream_op", 0);
+}
+
+/* Constructor for call_data */
+static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_call_element_args *args) {
+ /* grab pointers to our data from the call element */
+ call_data *calld = elem->call_data;
+
+ /* initialize members */
+ gpr_slice_buffer_init(&calld->slices);
+ calld->has_compression_algorithm = 0;
+ grpc_closure_init(&calld->got_slice, got_slice, elem);
+ grpc_closure_init(&calld->send_done, send_done, elem);
+}
+
+/* Destructor for call_data */
+static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
+ /* grab pointers to our data from the call element */
+ call_data *calld = elem->call_data;
+ gpr_slice_buffer_destroy(&calld->slices);
+}
+
+/* Constructor for channel_data */
+static void init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
+ channel_data *channeld = elem->channel_data;
+ grpc_compression_algorithm algo_idx;
+
+ grpc_compression_options_init(&channeld->compression_options);
+ channeld->compression_options.enabled_algorithms_bitset =
+ (uint32_t)grpc_channel_args_compression_algorithm_get_states(
+ args->channel_args);
+
+ channeld->default_compression_algorithm =
+ grpc_channel_args_get_compression_algorithm(args->channel_args);
+ /* Make sure the default isn't disabled. */
+ GPR_ASSERT(grpc_compression_options_is_algorithm_enabled(
+ &channeld->compression_options, channeld->default_compression_algorithm));
+ channeld->compression_options.default_compression_algorithm =
+ channeld->default_compression_algorithm;
+
+ channeld->supported_compression_algorithms = 0;
+ for (algo_idx = 0; algo_idx < GRPC_COMPRESS_ALGORITHMS_COUNT; ++algo_idx) {
+ /* skip disabled algorithms */
+ if (grpc_compression_options_is_algorithm_enabled(
+ &channeld->compression_options, algo_idx) == 0) {
+ continue;
+ }
+ channeld->supported_compression_algorithms |= 1u << algo_idx;
+ }
+
+ GPR_ASSERT(!args->is_last);
+}
+
+/* Destructor for channel data */
+static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem) {}
+
+const grpc_channel_filter grpc_compress_filter = {
+ compress_start_transport_stream_op, grpc_channel_next_op, sizeof(call_data),
+ init_call_elem, grpc_call_stack_ignore_set_pollset, destroy_call_elem,
+ sizeof(channel_data), init_channel_elem, destroy_channel_elem,
+ grpc_call_next_get_peer, "compress"};
diff --git a/src/core/channel/compress_filter.h b/src/core/channel/compress_filter.h
new file mode 100644
index 0000000000..8c208ac799
--- /dev/null
+++ b/src/core/channel/compress_filter.h
@@ -0,0 +1,65 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_CHANNEL_COMPRESS_FILTER_H
+#define GRPC_CORE_CHANNEL_COMPRESS_FILTER_H
+
+#include "src/core/channel/channel_stack.h"
+
+#define GRPC_COMPRESS_REQUEST_ALGORITHM_KEY "grpc-internal-encoding-request"
+
+/** Compression filter for outgoing data.
+ *
+ * See <grpc/compression.h> for the available compression settings.
+ *
+ * Compression settings may come from:
+ * - Channel configuration, as established at channel creation time.
+ * - The metadata accompanying the outgoing data to be compressed. This is
+ * taken as a request only. We may choose not to honor it. The metadata key
+ * is given by \a GRPC_COMPRESS_REQUEST_ALGORITHM_KEY.
+ *
+ * Compression can be disabled for concrete messages (for instance in order to
+ * prevent CRIME/BEAST type attacks) by having the GRPC_WRITE_NO_COMPRESS set in
+ * the BEGIN_MESSAGE flags.
+ *
+ * The attempted compression mechanism is added to the resulting initial
+ * metadata under the'grpc-encoding' key.
+ *
+ * If compression is actually performed, BEGIN_MESSAGE's flag is modified to
+ * incorporate GRPC_WRITE_INTERNAL_COMPRESS. Otherwise, and regardless of the
+ * aforementioned 'grpc-encoding' metadata value, data will pass through
+ * uncompressed. */
+
+extern const grpc_channel_filter grpc_compress_filter;
+
+#endif /* GRPC_CORE_CHANNEL_COMPRESS_FILTER_H */
diff --git a/src/core/channel/connected_channel.c b/src/core/channel/connected_channel.c
index 14dda88698..e7ed3ccfeb 100644
--- a/src/core/channel/connected_channel.c
+++ b/src/core/channel/connected_channel.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -39,6 +39,7 @@
#include "src/core/support/string.h"
#include "src/core/transport/transport.h"
+#include "src/core/profiling/timers.h"
#include <grpc/byte_buffer.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
@@ -61,148 +62,87 @@ typedef struct connected_channel_call_data { void *unused; } call_data;
/* Intercept a call operation and either push it directly up or translate it
into transport stream operations */
-static void con_start_transport_op(grpc_call_element *elem,
- grpc_transport_op *op) {
+static void con_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_transport_stream_op *op) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
- GPR_ASSERT(elem->filter == &grpc_connected_channel_filter);
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
- grpc_transport_perform_op(chand->transport,
- TRANSPORT_STREAM_FROM_CALL_DATA(calld), op);
+ grpc_transport_perform_stream_op(exec_ctx, chand->transport,
+ TRANSPORT_STREAM_FROM_CALL_DATA(calld), op);
}
-/* Currently we assume all channel operations should just be pushed up. */
-static void channel_op(grpc_channel_element *elem,
- grpc_channel_element *from_elem, grpc_channel_op *op) {
+static void con_start_transport_op(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ grpc_transport_op *op) {
channel_data *chand = elem->channel_data;
- GPR_ASSERT(elem->filter == &grpc_connected_channel_filter);
-
- switch (op->type) {
- case GRPC_CHANNEL_GOAWAY:
- grpc_transport_goaway(chand->transport, op->data.goaway.status,
- op->data.goaway.message);
- break;
- case GRPC_CHANNEL_DISCONNECT:
- grpc_transport_close(chand->transport);
- break;
- default:
- GPR_ASSERT(op->dir == GRPC_CALL_UP);
- grpc_channel_next_op(elem, op);
- break;
- }
+ grpc_transport_perform_op(exec_ctx, chand->transport, op);
}
/* Constructor for call_data */
-static void init_call_elem(grpc_call_element *elem,
- const void *server_transport_data,
- grpc_transport_op *initial_op) {
+static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_call_element_args *args) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
int r;
- GPR_ASSERT(elem->filter == &grpc_connected_channel_filter);
- r = grpc_transport_init_stream(chand->transport,
- TRANSPORT_STREAM_FROM_CALL_DATA(calld),
- server_transport_data, initial_op);
+ r = grpc_transport_init_stream(
+ exec_ctx, chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld),
+ &args->call_stack->refcount, args->server_transport_data);
GPR_ASSERT(r == 0);
}
+static void set_pollset(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_pollset *pollset) {
+ call_data *calld = elem->call_data;
+ channel_data *chand = elem->channel_data;
+ grpc_transport_set_pollset(exec_ctx, chand->transport,
+ TRANSPORT_STREAM_FROM_CALL_DATA(calld), pollset);
+}
+
/* Destructor for call_data */
-static void destroy_call_elem(grpc_call_element *elem) {
+static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
- GPR_ASSERT(elem->filter == &grpc_connected_channel_filter);
- grpc_transport_destroy_stream(chand->transport,
+ grpc_transport_destroy_stream(exec_ctx, chand->transport,
TRANSPORT_STREAM_FROM_CALL_DATA(calld));
}
/* Constructor for channel_data */
-static void init_channel_elem(grpc_channel_element *elem,
- const grpc_channel_args *args, grpc_mdctx *mdctx,
- int is_first, int is_last) {
+static void init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
channel_data *cd = (channel_data *)elem->channel_data;
- GPR_ASSERT(!is_first);
- GPR_ASSERT(is_last);
- GPR_ASSERT(elem->filter == &grpc_connected_channel_filter);
+ GPR_ASSERT(args->is_last);
cd->transport = NULL;
}
/* Destructor for channel_data */
-static void destroy_channel_elem(grpc_channel_element *elem) {
+static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem) {
channel_data *cd = (channel_data *)elem->channel_data;
- GPR_ASSERT(elem->filter == &grpc_connected_channel_filter);
- grpc_transport_destroy(cd->transport);
-}
-
-const grpc_channel_filter grpc_connected_channel_filter = {
- con_start_transport_op, channel_op, sizeof(call_data), init_call_elem,
- destroy_call_elem, sizeof(channel_data), init_channel_elem,
- destroy_channel_elem, "connected",
-};
-
-/* Transport callback to accept a new stream... calls up to handle it */
-static void accept_stream(void *user_data, grpc_transport *transport,
- const void *transport_server_data) {
- grpc_channel_element *elem = user_data;
- channel_data *chand = elem->channel_data;
- grpc_channel_op op;
-
- GPR_ASSERT(elem->filter == &grpc_connected_channel_filter);
- GPR_ASSERT(chand->transport == transport);
-
- op.type = GRPC_ACCEPT_CALL;
- op.dir = GRPC_CALL_UP;
- op.data.accept_call.transport = transport;
- op.data.accept_call.transport_server_data = transport_server_data;
- channel_op(elem, NULL, &op);
-}
-
-static void transport_goaway(void *user_data, grpc_transport *transport,
- grpc_status_code status, gpr_slice debug) {
- /* transport got goaway ==> call up and handle it */
- grpc_channel_element *elem = user_data;
- channel_data *chand = elem->channel_data;
- grpc_channel_op op;
-
- GPR_ASSERT(elem->filter == &grpc_connected_channel_filter);
- GPR_ASSERT(chand->transport == transport);
-
- op.type = GRPC_TRANSPORT_GOAWAY;
- op.dir = GRPC_CALL_UP;
- op.data.goaway.status = status;
- op.data.goaway.message = debug;
- channel_op(elem, NULL, &op);
+ grpc_transport_destroy(exec_ctx, cd->transport);
}
-static void transport_closed(void *user_data, grpc_transport *transport) {
- /* transport was closed ==> call up and handle it */
- grpc_channel_element *elem = user_data;
+static char *con_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
channel_data *chand = elem->channel_data;
- grpc_channel_op op;
-
- GPR_ASSERT(elem->filter == &grpc_connected_channel_filter);
- GPR_ASSERT(chand->transport == transport);
-
- op.type = GRPC_TRANSPORT_CLOSED;
- op.dir = GRPC_CALL_UP;
- channel_op(elem, NULL, &op);
+ return grpc_transport_get_peer(exec_ctx, chand->transport);
}
-const grpc_transport_callbacks connected_channel_transport_callbacks = {
- accept_stream, transport_goaway, transport_closed,
+static const grpc_channel_filter connected_channel_filter = {
+ con_start_transport_stream_op, con_start_transport_op, sizeof(call_data),
+ init_call_elem, set_pollset, destroy_call_elem, sizeof(channel_data),
+ init_channel_elem, destroy_channel_elem, con_get_peer, "connected",
};
-grpc_transport_setup_result grpc_connected_channel_bind_transport(
- grpc_channel_stack *channel_stack, grpc_transport *transport) {
- /* Assumes that the connected channel filter is always the last filter
- in a channel stack */
- grpc_channel_element *elem = grpc_channel_stack_last_element(channel_stack);
+static void bind_transport(grpc_channel_stack *channel_stack,
+ grpc_channel_element *elem, void *t) {
channel_data *cd = (channel_data *)elem->channel_data;
- grpc_transport_setup_result ret;
- GPR_ASSERT(elem->filter == &grpc_connected_channel_filter);
+ GPR_ASSERT(elem->filter == &connected_channel_filter);
GPR_ASSERT(cd->transport == NULL);
- cd->transport = transport;
+ cd->transport = t;
/* HACK(ctiller): increase call stack size for the channel to make space
for channel data. We need a cleaner (but performant) way to do this,
@@ -210,9 +150,19 @@ grpc_transport_setup_result grpc_connected_channel_bind_transport(
This is only "safe" because call stacks place no additional data after
the last call element, and the last call element MUST be the connected
channel. */
- channel_stack->call_stack_size += grpc_transport_stream_size(transport);
+ channel_stack->call_stack_size += grpc_transport_stream_size(t);
+}
- ret.user_data = elem;
- ret.callbacks = &connected_channel_transport_callbacks;
- return ret;
+bool grpc_add_connected_filter(grpc_channel_stack_builder *builder,
+ void *arg_must_be_null) {
+ GPR_ASSERT(arg_must_be_null == NULL);
+ grpc_transport *t = grpc_channel_stack_builder_get_transport(builder);
+ GPR_ASSERT(t != NULL);
+ return grpc_channel_stack_builder_append_filter(
+ builder, &connected_channel_filter, bind_transport, t);
+}
+
+grpc_stream *grpc_connected_channel_get_stream(grpc_call_element *elem) {
+ call_data *calld = elem->call_data;
+ return TRANSPORT_STREAM_FROM_CALL_DATA(calld);
}
diff --git a/src/core/channel/connected_channel.h b/src/core/channel/connected_channel.h
index 8b35f69b26..7c0c8359a4 100644
--- a/src/core/channel/connected_channel.h
+++ b/src/core/channel/connected_channel.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,19 +31,12 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_CHANNEL_CONNECTED_CHANNEL_H
-#define GRPC_INTERNAL_CORE_CHANNEL_CONNECTED_CHANNEL_H
+#ifndef GRPC_CORE_CHANNEL_CONNECTED_CHANNEL_H
+#define GRPC_CORE_CHANNEL_CONNECTED_CHANNEL_H
-#include "src/core/channel/channel_stack.h"
+#include "src/core/channel/channel_stack_builder.h"
-/* A channel filter representing a channel that is on a connected transport.
- This filter performs actual sending and receiving of messages. */
+bool grpc_add_connected_filter(grpc_channel_stack_builder *builder,
+ void *arg_must_be_null);
-extern const grpc_channel_filter grpc_connected_channel_filter;
-
-/* Post construction fixup: set the transport in the connected channel.
- Must be called before any call stack using this filter is used. */
-grpc_transport_setup_result grpc_connected_channel_bind_transport(
- grpc_channel_stack *channel_stack, grpc_transport *transport);
-
-#endif /* GRPC_INTERNAL_CORE_CHANNEL_CONNECTED_CHANNEL_H */
+#endif /* GRPC_CORE_CHANNEL_CONNECTED_CHANNEL_H */
diff --git a/src/core/channel/context.h b/src/core/channel/context.h
index ac5796b9ef..db217dc133 100644
--- a/src/core/channel/context.h
+++ b/src/core/channel/context.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_CHANNEL_CONTEXT_H
-#define GRPC_INTERNAL_CORE_CHANNEL_CONTEXT_H
+#ifndef GRPC_CORE_CHANNEL_CONTEXT_H
+#define GRPC_CORE_CHANNEL_CONTEXT_H
/* Call object context pointers */
typedef enum {
@@ -46,4 +46,4 @@ typedef struct {
void (*destroy)(void *);
} grpc_call_context_element;
-#endif /* GRPC_INTERNAL_CORE_CHANNEL_CONTEXT_H */
+#endif /* GRPC_CORE_CHANNEL_CONTEXT_H */
diff --git a/src/core/channel/http_client_filter.c b/src/core/channel/http_client_filter.c
index 08a2c0df3c..1aa27208c2 100644
--- a/src/core/channel/http_client_filter.c
+++ b/src/core/channel/http_client_filter.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,196 +31,218 @@
*/
#include "src/core/channel/http_client_filter.h"
-#include <string.h>
+#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+#include <string.h>
+#include "src/core/profiling/timers.h"
+#include "src/core/support/string.h"
+#include "src/core/transport/static_metadata.h"
typedef struct call_data {
grpc_linked_mdelem method;
grpc_linked_mdelem scheme;
+ grpc_linked_mdelem authority;
grpc_linked_mdelem te_trailers;
grpc_linked_mdelem content_type;
- int sent_initial_metadata;
+ grpc_linked_mdelem user_agent;
- int got_initial_metadata;
- grpc_stream_op_buffer *recv_ops;
+ grpc_metadata_batch *recv_initial_metadata;
/** Closure to call when finished with the hc_on_recv hook */
- grpc_iomgr_closure *on_done_recv;
+ grpc_closure *on_done_recv;
/** Receive closures are chained: we inject this closure as the on_done_recv
up-call on transport_op, and remember to call our on_done_recv member
after handling it. */
- grpc_iomgr_closure hc_on_recv;
+ grpc_closure hc_on_recv;
} call_data;
typedef struct channel_data {
- grpc_mdelem *te_trailers;
- grpc_mdelem *method;
- grpc_mdelem *scheme;
- grpc_mdelem *content_type;
- grpc_mdelem *status;
+ grpc_mdelem *static_scheme;
+ grpc_mdelem *user_agent;
} channel_data;
-/* used to silence 'variable not used' warnings */
-static void ignore_unused(void *ignored) {}
+typedef struct {
+ grpc_call_element *elem;
+ grpc_exec_ctx *exec_ctx;
+} client_recv_filter_args;
-static grpc_mdelem *client_filter(void *user_data, grpc_mdelem *md) {
- grpc_call_element *elem = user_data;
- channel_data *channeld = elem->channel_data;
- if (md == channeld->status) {
+static grpc_mdelem *client_recv_filter(void *user_data, grpc_mdelem *md) {
+ client_recv_filter_args *a = user_data;
+ if (md == GRPC_MDELEM_STATUS_200) {
+ return NULL;
+ } else if (md->key == GRPC_MDSTR_STATUS) {
+ grpc_call_element_send_cancel(a->exec_ctx, a->elem);
return NULL;
- } else if (md->key == channeld->status->key) {
- grpc_call_element_send_cancel(elem);
+ } else if (md->key == GRPC_MDSTR_CONTENT_TYPE) {
return NULL;
}
return md;
}
-static void hc_on_recv(void *user_data, int success) {
+static void hc_on_recv(grpc_exec_ctx *exec_ctx, void *user_data, bool success) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
- if (success) {
- size_t i;
- size_t nops = calld->recv_ops->nops;
- grpc_stream_op *ops = calld->recv_ops->ops;
- for (i = 0; i < nops; i++) {
- grpc_stream_op *op = &ops[i];
- if (op->type != GRPC_OP_METADATA) continue;
- calld->got_initial_metadata = 1;
- grpc_metadata_batch_filter(&op->data.metadata, client_filter, elem);
- }
- }
- calld->on_done_recv->cb(calld->on_done_recv->cb_arg, success);
+ client_recv_filter_args a;
+ a.elem = elem;
+ a.exec_ctx = exec_ctx;
+ grpc_metadata_batch_filter(calld->recv_initial_metadata, client_recv_filter,
+ &a);
+ calld->on_done_recv->cb(exec_ctx, calld->on_done_recv->cb_arg, success);
+}
+
+static grpc_mdelem *client_strip_filter(void *user_data, grpc_mdelem *md) {
+ /* eat the things we'd like to set ourselves */
+ if (md->key == GRPC_MDSTR_METHOD) return NULL;
+ if (md->key == GRPC_MDSTR_SCHEME) return NULL;
+ if (md->key == GRPC_MDSTR_TE) return NULL;
+ if (md->key == GRPC_MDSTR_CONTENT_TYPE) return NULL;
+ if (md->key == GRPC_MDSTR_USER_AGENT) return NULL;
+ return md;
}
-static void hc_mutate_op(grpc_call_element *elem, grpc_transport_op *op) {
+static void hc_mutate_op(grpc_call_element *elem,
+ grpc_transport_stream_op *op) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
- size_t i;
- if (op->send_ops && !calld->sent_initial_metadata) {
- size_t nops = op->send_ops->nops;
- grpc_stream_op *ops = op->send_ops->ops;
- for (i = 0; i < nops; i++) {
- grpc_stream_op *op = &ops[i];
- if (op->type != GRPC_OP_METADATA) continue;
- calld->sent_initial_metadata = 1;
- /* Send : prefixed headers, which have to be before any application
- layer headers. */
- grpc_metadata_batch_add_head(&op->data.metadata, &calld->method,
- grpc_mdelem_ref(channeld->method));
- grpc_metadata_batch_add_head(&op->data.metadata, &calld->scheme,
- grpc_mdelem_ref(channeld->scheme));
- grpc_metadata_batch_add_tail(&op->data.metadata, &calld->te_trailers,
- grpc_mdelem_ref(channeld->te_trailers));
- grpc_metadata_batch_add_tail(&op->data.metadata, &calld->content_type,
- grpc_mdelem_ref(channeld->content_type));
- break;
- }
+ if (op->send_initial_metadata != NULL) {
+ grpc_metadata_batch_filter(op->send_initial_metadata, client_strip_filter,
+ elem);
+ /* Send : prefixed headers, which have to be before any application
+ layer headers. */
+ grpc_metadata_batch_add_head(op->send_initial_metadata, &calld->method,
+ GRPC_MDELEM_METHOD_POST);
+ grpc_metadata_batch_add_head(op->send_initial_metadata, &calld->scheme,
+ channeld->static_scheme);
+ grpc_metadata_batch_add_tail(op->send_initial_metadata, &calld->te_trailers,
+ GRPC_MDELEM_TE_TRAILERS);
+ grpc_metadata_batch_add_tail(
+ op->send_initial_metadata, &calld->content_type,
+ GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC);
+ grpc_metadata_batch_add_tail(op->send_initial_metadata, &calld->user_agent,
+ GRPC_MDELEM_REF(channeld->user_agent));
}
- if (op->recv_ops && !calld->got_initial_metadata) {
+ if (op->recv_initial_metadata != NULL) {
/* substitute our callback for the higher callback */
- calld->recv_ops = op->recv_ops;
- calld->on_done_recv = op->on_done_recv;
- op->on_done_recv = &calld->hc_on_recv;
+ calld->recv_initial_metadata = op->recv_initial_metadata;
+ calld->on_done_recv = op->recv_initial_metadata_ready;
+ op->recv_initial_metadata_ready = &calld->hc_on_recv;
}
}
-static void hc_start_transport_op(grpc_call_element *elem,
- grpc_transport_op *op) {
+static void hc_start_transport_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_transport_stream_op *op) {
+ GPR_TIMER_BEGIN("hc_start_transport_op", 0);
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
hc_mutate_op(elem, op);
- grpc_call_next_op(elem, op);
-}
-
-/* Called on special channel events, such as disconnection or new incoming
- calls on the server */
-static void channel_op(grpc_channel_element *elem,
- grpc_channel_element *from_elem, grpc_channel_op *op) {
- /* grab pointers to our data from the channel element */
- channel_data *channeld = elem->channel_data;
-
- ignore_unused(channeld);
-
- switch (op->type) {
- default:
- /* pass control up or down the stack depending on op->dir */
- grpc_channel_next_op(elem, op);
- break;
- }
+ GPR_TIMER_END("hc_start_transport_op", 0);
+ grpc_call_next_op(exec_ctx, elem, op);
}
/* Constructor for call_data */
-static void init_call_elem(grpc_call_element *elem,
- const void *server_transport_data,
- grpc_transport_op *initial_op) {
+static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_call_element_args *args) {
call_data *calld = elem->call_data;
- calld->sent_initial_metadata = 0;
- calld->got_initial_metadata = 0;
calld->on_done_recv = NULL;
- grpc_iomgr_closure_init(&calld->hc_on_recv, hc_on_recv, elem);
- if (initial_op) hc_mutate_op(elem, initial_op);
+ grpc_closure_init(&calld->hc_on_recv, hc_on_recv, elem);
}
/* Destructor for call_data */
-static void destroy_call_elem(grpc_call_element *elem) {
- /* grab pointers to our data from the call element */
- call_data *calld = elem->call_data;
- channel_data *channeld = elem->channel_data;
-
- ignore_unused(calld);
- ignore_unused(channeld);
-}
+static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {}
-static const char *scheme_from_args(const grpc_channel_args *args) {
+static grpc_mdelem *scheme_from_args(const grpc_channel_args *args) {
unsigned i;
+ size_t j;
+ grpc_mdelem *valid_schemes[] = {GRPC_MDELEM_SCHEME_HTTP,
+ GRPC_MDELEM_SCHEME_HTTPS};
if (args != NULL) {
for (i = 0; i < args->num_args; ++i) {
if (args->args[i].type == GRPC_ARG_STRING &&
strcmp(args->args[i].key, GRPC_ARG_HTTP2_SCHEME) == 0) {
- return args->args[i].value.string;
+ for (j = 0; j < GPR_ARRAY_SIZE(valid_schemes); j++) {
+ if (0 == strcmp(grpc_mdstr_as_c_string(valid_schemes[j]->value),
+ args->args[i].value.string)) {
+ return valid_schemes[j];
+ }
+ }
}
}
}
- return "http";
+ return GRPC_MDELEM_SCHEME_HTTP;
}
-/* Constructor for channel_data */
-static void init_channel_elem(grpc_channel_element *elem,
- const grpc_channel_args *args, grpc_mdctx *mdctx,
- int is_first, int is_last) {
- /* grab pointers to our data from the channel element */
- channel_data *channeld = elem->channel_data;
+static grpc_mdstr *user_agent_from_args(const grpc_channel_args *args) {
+ gpr_strvec v;
+ size_t i;
+ int is_first = 1;
+ char *tmp;
+ grpc_mdstr *result;
+
+ gpr_strvec_init(&v);
+
+ for (i = 0; args && i < args->num_args; i++) {
+ if (0 == strcmp(args->args[i].key, GRPC_ARG_PRIMARY_USER_AGENT_STRING)) {
+ if (args->args[i].type != GRPC_ARG_STRING) {
+ gpr_log(GPR_ERROR, "Channel argument '%s' should be a string",
+ GRPC_ARG_PRIMARY_USER_AGENT_STRING);
+ } else {
+ if (!is_first) gpr_strvec_add(&v, gpr_strdup(" "));
+ is_first = 0;
+ gpr_strvec_add(&v, gpr_strdup(args->args[i].value.string));
+ }
+ }
+ }
- /* The first and the last filters tend to be implemented differently to
- handle the case that there's no 'next' filter to call on the up or down
- path */
- GPR_ASSERT(!is_first);
- GPR_ASSERT(!is_last);
-
- /* initialize members */
- channeld->te_trailers = grpc_mdelem_from_strings(mdctx, "te", "trailers");
- channeld->method = grpc_mdelem_from_strings(mdctx, ":method", "POST");
- channeld->scheme =
- grpc_mdelem_from_strings(mdctx, ":scheme", scheme_from_args(args));
- channeld->content_type =
- grpc_mdelem_from_strings(mdctx, "content-type", "application/grpc");
- channeld->status = grpc_mdelem_from_strings(mdctx, ":status", "200");
+ gpr_asprintf(&tmp, "%sgrpc-c/%s (%s)", is_first ? "" : " ",
+ grpc_version_string(), GPR_PLATFORM_STRING);
+ is_first = 0;
+ gpr_strvec_add(&v, tmp);
+
+ for (i = 0; args && i < args->num_args; i++) {
+ if (0 == strcmp(args->args[i].key, GRPC_ARG_SECONDARY_USER_AGENT_STRING)) {
+ if (args->args[i].type != GRPC_ARG_STRING) {
+ gpr_log(GPR_ERROR, "Channel argument '%s' should be a string",
+ GRPC_ARG_SECONDARY_USER_AGENT_STRING);
+ } else {
+ if (!is_first) gpr_strvec_add(&v, gpr_strdup(" "));
+ is_first = 0;
+ gpr_strvec_add(&v, gpr_strdup(args->args[i].value.string));
+ }
+ }
+ }
+
+ tmp = gpr_strvec_flatten(&v, NULL);
+ gpr_strvec_destroy(&v);
+ result = grpc_mdstr_from_string(tmp);
+ gpr_free(tmp);
+
+ return result;
}
-/* Destructor for channel data */
-static void destroy_channel_elem(grpc_channel_element *elem) {
- /* grab pointers to our data from the channel element */
- channel_data *channeld = elem->channel_data;
+/* Constructor for channel_data */
+static void init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
+ channel_data *chand = elem->channel_data;
+ GPR_ASSERT(!args->is_last);
+ chand->static_scheme = scheme_from_args(args->channel_args);
+ chand->user_agent = grpc_mdelem_from_metadata_strings(
+ GRPC_MDSTR_USER_AGENT, user_agent_from_args(args->channel_args));
+}
- grpc_mdelem_unref(channeld->te_trailers);
- grpc_mdelem_unref(channeld->method);
- grpc_mdelem_unref(channeld->scheme);
- grpc_mdelem_unref(channeld->content_type);
- grpc_mdelem_unref(channeld->status);
+/* Destructor for channel data */
+static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem) {
+ channel_data *chand = elem->channel_data;
+ GRPC_MDELEM_UNREF(chand->user_agent);
}
const grpc_channel_filter grpc_http_client_filter = {
- hc_start_transport_op, channel_op, sizeof(call_data), init_call_elem,
- destroy_call_elem, sizeof(channel_data), init_channel_elem,
- destroy_channel_elem, "http-client"};
+ hc_start_transport_op, grpc_channel_next_op, sizeof(call_data),
+ init_call_elem, grpc_call_stack_ignore_set_pollset, destroy_call_elem,
+ sizeof(channel_data), init_channel_elem, destroy_channel_elem,
+ grpc_call_next_get_peer, "http-client"};
diff --git a/src/core/channel/http_client_filter.h b/src/core/channel/http_client_filter.h
index 04eb839e00..6f619bbf00 100644
--- a/src/core/channel/http_client_filter.h
+++ b/src/core/channel/http_client_filter.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_CHANNEL_HTTP_CLIENT_FILTER_H
-#define GRPC_INTERNAL_CORE_CHANNEL_HTTP_CLIENT_FILTER_H
+#ifndef GRPC_CORE_CHANNEL_HTTP_CLIENT_FILTER_H
+#define GRPC_CORE_CHANNEL_HTTP_CLIENT_FILTER_H
#include "src/core/channel/channel_stack.h"
@@ -41,4 +41,4 @@ extern const grpc_channel_filter grpc_http_client_filter;
#define GRPC_ARG_HTTP2_SCHEME "grpc.http2_scheme"
-#endif /* GRPC_INTERNAL_CORE_CHANNEL_HTTP_CLIENT_FILTER_H */
+#endif /* GRPC_CORE_CHANNEL_HTTP_CLIENT_FILTER_H */
diff --git a/src/core/channel/http_server_filter.c b/src/core/channel/http_server_filter.c
index d3a01fd1a8..370f8dbe42 100644
--- a/src/core/channel/http_server_filter.c
+++ b/src/core/channel/http_server_filter.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -33,69 +33,59 @@
#include "src/core/channel/http_server_filter.h"
-#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
+#include <string.h>
+#include "src/core/profiling/timers.h"
+#include "src/core/transport/static_metadata.h"
typedef struct call_data {
- gpr_uint8 got_initial_metadata;
- gpr_uint8 seen_path;
- gpr_uint8 seen_post;
- gpr_uint8 sent_status;
- gpr_uint8 seen_scheme;
- gpr_uint8 seen_te_trailers;
+ uint8_t seen_path;
+ uint8_t seen_post;
+ uint8_t sent_status;
+ uint8_t seen_scheme;
+ uint8_t seen_te_trailers;
+ uint8_t seen_authority;
grpc_linked_mdelem status;
+ grpc_linked_mdelem content_type;
- grpc_stream_op_buffer *recv_ops;
+ grpc_metadata_batch *recv_initial_metadata;
/** Closure to call when finished with the hs_on_recv hook */
- grpc_iomgr_closure *on_done_recv;
+ grpc_closure *on_done_recv;
/** Receive closures are chained: we inject this closure as the on_done_recv
up-call on transport_op, and remember to call our on_done_recv member
after handling it. */
- grpc_iomgr_closure hs_on_recv;
+ grpc_closure hs_on_recv;
} call_data;
-typedef struct channel_data {
- grpc_mdelem *te_trailers;
- grpc_mdelem *method_post;
- grpc_mdelem *http_scheme;
- grpc_mdelem *https_scheme;
- /* TODO(klempner): Remove this once we stop using it */
- grpc_mdelem *grpc_scheme;
- grpc_mdelem *content_type;
- grpc_mdelem *status_ok;
- grpc_mdelem *status_not_found;
- grpc_mdstr *path_key;
- grpc_mdstr *authority_key;
- grpc_mdstr *host_key;
-
- grpc_mdctx *mdctx;
-} channel_data;
+typedef struct channel_data { uint8_t unused; } channel_data;
-/* used to silence 'variable not used' warnings */
-static void ignore_unused(void *ignored) {}
+typedef struct {
+ grpc_call_element *elem;
+ grpc_exec_ctx *exec_ctx;
+} server_filter_args;
static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
- grpc_call_element *elem = user_data;
- channel_data *channeld = elem->channel_data;
+ server_filter_args *a = user_data;
+ grpc_call_element *elem = a->elem;
call_data *calld = elem->call_data;
/* Check if it is one of the headers we care about. */
- if (md == channeld->te_trailers || md == channeld->method_post ||
- md == channeld->http_scheme || md == channeld->https_scheme ||
- md == channeld->grpc_scheme || md == channeld->content_type) {
+ if (md == GRPC_MDELEM_TE_TRAILERS || md == GRPC_MDELEM_METHOD_POST ||
+ md == GRPC_MDELEM_SCHEME_HTTP || md == GRPC_MDELEM_SCHEME_HTTPS ||
+ md == GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC) {
/* swallow it */
- if (md == channeld->method_post) {
+ if (md == GRPC_MDELEM_METHOD_POST) {
calld->seen_post = 1;
- } else if (md->key == channeld->http_scheme->key) {
+ } else if (md->key == GRPC_MDSTR_SCHEME) {
calld->seen_scheme = 1;
- } else if (md == channeld->te_trailers) {
+ } else if (md == GRPC_MDELEM_TE_TRAILERS) {
calld->seen_te_trailers = 1;
}
/* TODO(klempner): Track that we've seen all the headers we should
require */
return NULL;
- } else if (md->key == channeld->content_type->key) {
+ } else if (md->key == GRPC_MDSTR_CONTENT_TYPE) {
if (strncmp(grpc_mdstr_as_c_string(md->value), "application/grpc+", 17) ==
0) {
/* Although the C implementation doesn't (currently) generate them,
@@ -107,196 +97,137 @@ static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
/* TODO(klempner): We're currently allowing this, but we shouldn't
see it without a proxy so log for now. */
gpr_log(GPR_INFO, "Unexpected content-type %s",
- channeld->content_type->key);
+ grpc_mdstr_as_c_string(md->value));
}
return NULL;
- } else if (md->key == channeld->te_trailers->key ||
- md->key == channeld->method_post->key ||
- md->key == channeld->http_scheme->key ||
- md->key == channeld->content_type->key) {
+ } else if (md->key == GRPC_MDSTR_TE || md->key == GRPC_MDSTR_METHOD ||
+ md->key == GRPC_MDSTR_SCHEME) {
gpr_log(GPR_ERROR, "Invalid %s: header: '%s'",
grpc_mdstr_as_c_string(md->key), grpc_mdstr_as_c_string(md->value));
/* swallow it and error everything out. */
/* TODO(klempner): We ought to generate more descriptive error messages
on the wire here. */
- grpc_call_element_send_cancel(elem);
+ grpc_call_element_send_cancel(a->exec_ctx, elem);
return NULL;
- } else if (md->key == channeld->path_key) {
+ } else if (md->key == GRPC_MDSTR_PATH) {
if (calld->seen_path) {
gpr_log(GPR_ERROR, "Received :path twice");
return NULL;
}
calld->seen_path = 1;
return md;
- } else if (md->key == channeld->host_key) {
+ } else if (md->key == GRPC_MDSTR_AUTHORITY) {
+ calld->seen_authority = 1;
+ return md;
+ } else if (md->key == GRPC_MDSTR_HOST) {
/* translate host to :authority since :authority may be
omitted */
grpc_mdelem *authority = grpc_mdelem_from_metadata_strings(
- channeld->mdctx, grpc_mdstr_ref(channeld->authority_key),
- grpc_mdstr_ref(md->value));
- grpc_mdelem_unref(md);
+ GRPC_MDSTR_AUTHORITY, GRPC_MDSTR_REF(md->value));
+ calld->seen_authority = 1;
return authority;
} else {
return md;
}
}
-static void hs_on_recv(void *user_data, int success) {
+static void hs_on_recv(grpc_exec_ctx *exec_ctx, void *user_data, bool success) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
if (success) {
- size_t i;
- size_t nops = calld->recv_ops->nops;
- grpc_stream_op *ops = calld->recv_ops->ops;
- for (i = 0; i < nops; i++) {
- grpc_stream_op *op = &ops[i];
- if (op->type != GRPC_OP_METADATA) continue;
- calld->got_initial_metadata = 1;
- grpc_metadata_batch_filter(&op->data.metadata, server_filter, elem);
- /* Have we seen the required http2 transport headers?
- (:method, :scheme, content-type, with :path and :authority covered
- at the channel level right now) */
- if (calld->seen_post && calld->seen_scheme && calld->seen_te_trailers &&
- calld->seen_path) {
- /* do nothing */
- } else {
- if (!calld->seen_path) {
- gpr_log(GPR_ERROR, "Missing :path header");
- }
- if (!calld->seen_post) {
- gpr_log(GPR_ERROR, "Missing :method header");
- }
- if (!calld->seen_scheme) {
- gpr_log(GPR_ERROR, "Missing :scheme header");
- }
- if (!calld->seen_te_trailers) {
- gpr_log(GPR_ERROR, "Missing te trailers header");
- }
- /* Error this call out */
- success = 0;
- grpc_call_element_send_cancel(elem);
+ server_filter_args a;
+ a.elem = elem;
+ a.exec_ctx = exec_ctx;
+ grpc_metadata_batch_filter(calld->recv_initial_metadata, server_filter, &a);
+ /* Have we seen the required http2 transport headers?
+ (:method, :scheme, content-type, with :path and :authority covered
+ at the channel level right now) */
+ if (calld->seen_post && calld->seen_scheme && calld->seen_te_trailers &&
+ calld->seen_path && calld->seen_authority) {
+ /* do nothing */
+ } else {
+ if (!calld->seen_path) {
+ gpr_log(GPR_ERROR, "Missing :path header");
}
+ if (!calld->seen_authority) {
+ gpr_log(GPR_ERROR, "Missing :authority header");
+ }
+ if (!calld->seen_post) {
+ gpr_log(GPR_ERROR, "Missing :method header");
+ }
+ if (!calld->seen_scheme) {
+ gpr_log(GPR_ERROR, "Missing :scheme header");
+ }
+ if (!calld->seen_te_trailers) {
+ gpr_log(GPR_ERROR, "Missing te trailers header");
+ }
+ /* Error this call out */
+ success = 0;
+ grpc_call_element_send_cancel(exec_ctx, elem);
}
}
- calld->on_done_recv->cb(calld->on_done_recv->cb_arg, success);
+ calld->on_done_recv->cb(exec_ctx, calld->on_done_recv->cb_arg, success);
}
-static void hs_mutate_op(grpc_call_element *elem, grpc_transport_op *op) {
+static void hs_mutate_op(grpc_call_element *elem,
+ grpc_transport_stream_op *op) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
- channel_data *channeld = elem->channel_data;
- size_t i;
- if (op->send_ops && !calld->sent_status) {
- size_t nops = op->send_ops->nops;
- grpc_stream_op *ops = op->send_ops->ops;
- for (i = 0; i < nops; i++) {
- grpc_stream_op *op = &ops[i];
- if (op->type != GRPC_OP_METADATA) continue;
- calld->sent_status = 1;
- grpc_metadata_batch_add_head(&op->data.metadata, &calld->status,
- grpc_mdelem_ref(channeld->status_ok));
- break;
- }
+ if (op->send_initial_metadata != NULL && !calld->sent_status) {
+ calld->sent_status = 1;
+ grpc_metadata_batch_add_head(op->send_initial_metadata, &calld->status,
+ GRPC_MDELEM_STATUS_200);
+ grpc_metadata_batch_add_tail(
+ op->send_initial_metadata, &calld->content_type,
+ GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC);
}
- if (op->recv_ops && !calld->got_initial_metadata) {
+ if (op->recv_initial_metadata) {
/* substitute our callback for the higher callback */
- calld->recv_ops = op->recv_ops;
- calld->on_done_recv = op->on_done_recv;
- op->on_done_recv = &calld->hs_on_recv;
+ calld->recv_initial_metadata = op->recv_initial_metadata;
+ calld->on_done_recv = op->recv_initial_metadata_ready;
+ op->recv_initial_metadata_ready = &calld->hs_on_recv;
}
}
-static void hs_start_transport_op(grpc_call_element *elem,
- grpc_transport_op *op) {
+static void hs_start_transport_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_transport_stream_op *op) {
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
+ GPR_TIMER_BEGIN("hs_start_transport_op", 0);
hs_mutate_op(elem, op);
- grpc_call_next_op(elem, op);
-}
-
-/* Called on special channel events, such as disconnection or new incoming
- calls on the server */
-static void channel_op(grpc_channel_element *elem,
- grpc_channel_element *from_elem, grpc_channel_op *op) {
- /* grab pointers to our data from the channel element */
- channel_data *channeld = elem->channel_data;
-
- ignore_unused(channeld);
-
- switch (op->type) {
- default:
- /* pass control up or down the stack depending on op->dir */
- grpc_channel_next_op(elem, op);
- break;
- }
+ grpc_call_next_op(exec_ctx, elem, op);
+ GPR_TIMER_END("hs_start_transport_op", 0);
}
/* Constructor for call_data */
-static void init_call_elem(grpc_call_element *elem,
- const void *server_transport_data,
- grpc_transport_op *initial_op) {
+static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_call_element_args *args) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
/* initialize members */
memset(calld, 0, sizeof(*calld));
- grpc_iomgr_closure_init(&calld->hs_on_recv, hs_on_recv, elem);
- if (initial_op) hs_mutate_op(elem, initial_op);
+ grpc_closure_init(&calld->hs_on_recv, hs_on_recv, elem);
}
/* Destructor for call_data */
-static void destroy_call_elem(grpc_call_element *elem) {}
+static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {}
/* Constructor for channel_data */
-static void init_channel_elem(grpc_channel_element *elem,
- const grpc_channel_args *args, grpc_mdctx *mdctx,
- int is_first, int is_last) {
- /* grab pointers to our data from the channel element */
- channel_data *channeld = elem->channel_data;
-
- /* The first and the last filters tend to be implemented differently to
- handle the case that there's no 'next' filter to call on the up or down
- path */
- GPR_ASSERT(!is_first);
- GPR_ASSERT(!is_last);
-
- /* initialize members */
- channeld->te_trailers = grpc_mdelem_from_strings(mdctx, "te", "trailers");
- channeld->status_ok = grpc_mdelem_from_strings(mdctx, ":status", "200");
- channeld->status_not_found =
- grpc_mdelem_from_strings(mdctx, ":status", "404");
- channeld->method_post = grpc_mdelem_from_strings(mdctx, ":method", "POST");
- channeld->http_scheme = grpc_mdelem_from_strings(mdctx, ":scheme", "http");
- channeld->https_scheme = grpc_mdelem_from_strings(mdctx, ":scheme", "https");
- channeld->grpc_scheme = grpc_mdelem_from_strings(mdctx, ":scheme", "grpc");
- channeld->path_key = grpc_mdstr_from_string(mdctx, ":path");
- channeld->authority_key = grpc_mdstr_from_string(mdctx, ":authority");
- channeld->host_key = grpc_mdstr_from_string(mdctx, "host");
- channeld->content_type =
- grpc_mdelem_from_strings(mdctx, "content-type", "application/grpc");
-
- channeld->mdctx = mdctx;
+static void init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
+ GPR_ASSERT(!args->is_last);
}
/* Destructor for channel data */
-static void destroy_channel_elem(grpc_channel_element *elem) {
- /* grab pointers to our data from the channel element */
- channel_data *channeld = elem->channel_data;
-
- grpc_mdelem_unref(channeld->te_trailers);
- grpc_mdelem_unref(channeld->status_ok);
- grpc_mdelem_unref(channeld->status_not_found);
- grpc_mdelem_unref(channeld->method_post);
- grpc_mdelem_unref(channeld->http_scheme);
- grpc_mdelem_unref(channeld->https_scheme);
- grpc_mdelem_unref(channeld->grpc_scheme);
- grpc_mdelem_unref(channeld->content_type);
- grpc_mdstr_unref(channeld->path_key);
- grpc_mdstr_unref(channeld->authority_key);
- grpc_mdstr_unref(channeld->host_key);
-}
+static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem) {}
const grpc_channel_filter grpc_http_server_filter = {
- hs_start_transport_op, channel_op, sizeof(call_data), init_call_elem,
- destroy_call_elem, sizeof(channel_data), init_channel_elem,
- destroy_channel_elem, "http-server"};
+ hs_start_transport_op, grpc_channel_next_op, sizeof(call_data),
+ init_call_elem, grpc_call_stack_ignore_set_pollset, destroy_call_elem,
+ sizeof(channel_data), init_channel_elem, destroy_channel_elem,
+ grpc_call_next_get_peer, "http-server"};
diff --git a/src/core/channel/http_server_filter.h b/src/core/channel/http_server_filter.h
index 42f76ed17f..528c8648fd 100644
--- a/src/core/channel/http_server_filter.h
+++ b/src/core/channel/http_server_filter.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,12 +31,12 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_CHANNEL_HTTP_SERVER_FILTER_H
-#define GRPC_INTERNAL_CORE_CHANNEL_HTTP_SERVER_FILTER_H
+#ifndef GRPC_CORE_CHANNEL_HTTP_SERVER_FILTER_H
+#define GRPC_CORE_CHANNEL_HTTP_SERVER_FILTER_H
#include "src/core/channel/channel_stack.h"
/* Processes metadata on the client side for HTTP2 transports */
extern const grpc_channel_filter grpc_http_server_filter;
-#endif /* GRPC_INTERNAL_CORE_CHANNEL_HTTP_SERVER_FILTER_H */
+#endif /* GRPC_CORE_CHANNEL_HTTP_SERVER_FILTER_H */
diff --git a/src/core/channel/noop_filter.c b/src/core/channel/noop_filter.c
deleted file mode 100644
index 1d2be716d7..0000000000
--- a/src/core/channel/noop_filter.c
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include "src/core/channel/noop_filter.h"
-#include <grpc/support/log.h>
-
-typedef struct call_data {
- int unused; /* C89 requires at least one struct element */
-} call_data;
-
-typedef struct channel_data {
- int unused; /* C89 requires at least one struct element */
-} channel_data;
-
-/* used to silence 'variable not used' warnings */
-static void ignore_unused(void *ignored) {}
-
-static void noop_mutate_op(grpc_call_element *elem, grpc_transport_op *op) {
- /* grab pointers to our data from the call element */
- call_data *calld = elem->call_data;
- channel_data *channeld = elem->channel_data;
-
- ignore_unused(calld);
- ignore_unused(channeld);
-
- /* do nothing */
-}
-
-/* Called either:
- - in response to an API call (or similar) from above, to send something
- - a network event (or similar) from below, to receive something
- op contains type and call direction information, in addition to the data
- that is being sent or received. */
-static void noop_start_transport_op(grpc_call_element *elem,
- grpc_transport_op *op) {
- noop_mutate_op(elem, op);
-
- /* pass control down the stack */
- grpc_call_next_op(elem, op);
-}
-
-/* Called on special channel events, such as disconnection or new incoming
- calls on the server */
-static void channel_op(grpc_channel_element *elem,
- grpc_channel_element *from_elem, grpc_channel_op *op) {
- /* grab pointers to our data from the channel element */
- channel_data *channeld = elem->channel_data;
-
- ignore_unused(channeld);
-
- switch (op->type) {
- default:
- /* pass control up or down the stack depending on op->dir */
- grpc_channel_next_op(elem, op);
- break;
- }
-}
-
-/* Constructor for call_data */
-static void init_call_elem(grpc_call_element *elem,
- const void *server_transport_data,
- grpc_transport_op *initial_op) {
- /* grab pointers to our data from the call element */
- call_data *calld = elem->call_data;
- channel_data *channeld = elem->channel_data;
-
- /* initialize members */
- calld->unused = channeld->unused;
-
- if (initial_op) noop_mutate_op(elem, initial_op);
-}
-
-/* Destructor for call_data */
-static void destroy_call_elem(grpc_call_element *elem) {
- /* grab pointers to our data from the call element */
- call_data *calld = elem->call_data;
- channel_data *channeld = elem->channel_data;
-
- ignore_unused(calld);
- ignore_unused(channeld);
-}
-
-/* Constructor for channel_data */
-static void init_channel_elem(grpc_channel_element *elem,
- const grpc_channel_args *args, grpc_mdctx *mdctx,
- int is_first, int is_last) {
- /* grab pointers to our data from the channel element */
- channel_data *channeld = elem->channel_data;
-
- /* The first and the last filters tend to be implemented differently to
- handle the case that there's no 'next' filter to call on the up or down
- path */
- GPR_ASSERT(!is_first);
- GPR_ASSERT(!is_last);
-
- /* initialize members */
- channeld->unused = 0;
-}
-
-/* Destructor for channel data */
-static void destroy_channel_elem(grpc_channel_element *elem) {
- /* grab pointers to our data from the channel element */
- channel_data *channeld = elem->channel_data;
-
- ignore_unused(channeld);
-}
-
-const grpc_channel_filter grpc_no_op_filter = {
- noop_start_transport_op, channel_op, sizeof(call_data), init_call_elem,
- destroy_call_elem, sizeof(channel_data), init_channel_elem,
- destroy_channel_elem, "no-op"};
diff --git a/src/core/channel/subchannel_call_holder.c b/src/core/channel/subchannel_call_holder.c
new file mode 100644
index 0000000000..9c087dc2a1
--- /dev/null
+++ b/src/core/channel/subchannel_call_holder.c
@@ -0,0 +1,259 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/channel/subchannel_call_holder.h"
+
+#include <grpc/support/alloc.h>
+
+#include "src/core/profiling/timers.h"
+
+#define GET_CALL(holder) \
+ ((grpc_subchannel_call *)(gpr_atm_acq_load(&(holder)->subchannel_call)))
+
+#define CANCELLED_CALL ((grpc_subchannel_call *)1)
+
+static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *holder,
+ bool success);
+static void retry_ops(grpc_exec_ctx *exec_ctx, void *retry_ops_args,
+ bool success);
+
+static void add_waiting_locked(grpc_subchannel_call_holder *holder,
+ grpc_transport_stream_op *op);
+static void fail_locked(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_call_holder *holder);
+static void retry_waiting_locked(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_call_holder *holder);
+
+void grpc_subchannel_call_holder_init(
+ grpc_subchannel_call_holder *holder,
+ grpc_subchannel_call_holder_pick_subchannel pick_subchannel,
+ void *pick_subchannel_arg, grpc_call_stack *owning_call) {
+ gpr_atm_rel_store(&holder->subchannel_call, 0);
+ holder->pick_subchannel = pick_subchannel;
+ holder->pick_subchannel_arg = pick_subchannel_arg;
+ gpr_mu_init(&holder->mu);
+ holder->connected_subchannel = NULL;
+ holder->waiting_ops = NULL;
+ holder->waiting_ops_count = 0;
+ holder->waiting_ops_capacity = 0;
+ holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
+ holder->owning_call = owning_call;
+}
+
+void grpc_subchannel_call_holder_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_call_holder *holder) {
+ grpc_subchannel_call *call = GET_CALL(holder);
+ if (call != NULL && call != CANCELLED_CALL) {
+ GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, call, "holder");
+ }
+ GPR_ASSERT(holder->creation_phase ==
+ GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING);
+ gpr_mu_destroy(&holder->mu);
+ GPR_ASSERT(holder->waiting_ops_count == 0);
+ gpr_free(holder->waiting_ops);
+}
+
+void grpc_subchannel_call_holder_perform_op(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_call_holder *holder,
+ grpc_transport_stream_op *op) {
+ /* try to (atomically) get the call */
+ grpc_subchannel_call *call = GET_CALL(holder);
+ GPR_TIMER_BEGIN("grpc_subchannel_call_holder_perform_op", 0);
+ if (call == CANCELLED_CALL) {
+ grpc_transport_stream_op_finish_with_failure(exec_ctx, op);
+ GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
+ return;
+ }
+ if (call != NULL) {
+ grpc_subchannel_call_process_op(exec_ctx, call, op);
+ GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
+ return;
+ }
+ /* we failed; lock and figure out what to do */
+ gpr_mu_lock(&holder->mu);
+retry:
+ /* need to recheck that another thread hasn't set the call */
+ call = GET_CALL(holder);
+ if (call == CANCELLED_CALL) {
+ gpr_mu_unlock(&holder->mu);
+ grpc_transport_stream_op_finish_with_failure(exec_ctx, op);
+ GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
+ return;
+ }
+ if (call != NULL) {
+ gpr_mu_unlock(&holder->mu);
+ grpc_subchannel_call_process_op(exec_ctx, call, op);
+ GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
+ return;
+ }
+ /* if this is a cancellation, then we can raise our cancelled flag */
+ if (op->cancel_with_status != GRPC_STATUS_OK) {
+ if (!gpr_atm_rel_cas(&holder->subchannel_call, 0, 1)) {
+ goto retry;
+ } else {
+ switch (holder->creation_phase) {
+ case GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING:
+ fail_locked(exec_ctx, holder);
+ break;
+ case GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL:
+ holder->pick_subchannel(exec_ctx, holder->pick_subchannel_arg, NULL,
+ &holder->connected_subchannel, NULL);
+ break;
+ }
+ gpr_mu_unlock(&holder->mu);
+ grpc_transport_stream_op_finish_with_failure(exec_ctx, op);
+ GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
+ return;
+ }
+ }
+ /* if we don't have a subchannel, try to get one */
+ if (holder->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
+ holder->connected_subchannel == NULL &&
+ op->send_initial_metadata != NULL) {
+ holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL;
+ grpc_closure_init(&holder->next_step, subchannel_ready, holder);
+ GRPC_CALL_STACK_REF(holder->owning_call, "pick_subchannel");
+ if (holder->pick_subchannel(
+ exec_ctx, holder->pick_subchannel_arg, op->send_initial_metadata,
+ &holder->connected_subchannel, &holder->next_step)) {
+ holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
+ GRPC_CALL_STACK_UNREF(exec_ctx, holder->owning_call, "pick_subchannel");
+ }
+ }
+ /* if we've got a subchannel, then let's ask it to create a call */
+ if (holder->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
+ holder->connected_subchannel != NULL) {
+ gpr_atm_rel_store(
+ &holder->subchannel_call,
+ (gpr_atm)(uintptr_t)grpc_connected_subchannel_create_call(
+ exec_ctx, holder->connected_subchannel, holder->pollset));
+ retry_waiting_locked(exec_ctx, holder);
+ goto retry;
+ }
+ /* nothing to be done but wait */
+ add_waiting_locked(holder, op);
+ gpr_mu_unlock(&holder->mu);
+ GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
+}
+
+static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
+ grpc_subchannel_call_holder *holder = arg;
+ gpr_mu_lock(&holder->mu);
+ GPR_ASSERT(holder->creation_phase ==
+ GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL);
+ holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
+ if (holder->connected_subchannel == NULL) {
+ fail_locked(exec_ctx, holder);
+ } else if (1 == gpr_atm_acq_load(&holder->subchannel_call)) {
+ /* already cancelled before subchannel became ready */
+ fail_locked(exec_ctx, holder);
+ } else {
+ gpr_atm_rel_store(
+ &holder->subchannel_call,
+ (gpr_atm)(uintptr_t)grpc_connected_subchannel_create_call(
+ exec_ctx, holder->connected_subchannel, holder->pollset));
+ retry_waiting_locked(exec_ctx, holder);
+ }
+ gpr_mu_unlock(&holder->mu);
+ GRPC_CALL_STACK_UNREF(exec_ctx, holder->owning_call, "pick_subchannel");
+}
+
+typedef struct {
+ grpc_transport_stream_op *ops;
+ size_t nops;
+ grpc_subchannel_call *call;
+} retry_ops_args;
+
+static void retry_waiting_locked(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_call_holder *holder) {
+ retry_ops_args *a = gpr_malloc(sizeof(*a));
+ a->ops = holder->waiting_ops;
+ a->nops = holder->waiting_ops_count;
+ a->call = GET_CALL(holder);
+ if (a->call == CANCELLED_CALL) {
+ gpr_free(a);
+ fail_locked(exec_ctx, holder);
+ return;
+ }
+ holder->waiting_ops = NULL;
+ holder->waiting_ops_count = 0;
+ holder->waiting_ops_capacity = 0;
+ GRPC_SUBCHANNEL_CALL_REF(a->call, "retry_ops");
+ grpc_exec_ctx_enqueue(exec_ctx, grpc_closure_create(retry_ops, a), true,
+ NULL);
+}
+
+static void retry_ops(grpc_exec_ctx *exec_ctx, void *args, bool success) {
+ retry_ops_args *a = args;
+ size_t i;
+ for (i = 0; i < a->nops; i++) {
+ grpc_subchannel_call_process_op(exec_ctx, a->call, &a->ops[i]);
+ }
+ GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, a->call, "retry_ops");
+ gpr_free(a->ops);
+ gpr_free(a);
+}
+
+static void add_waiting_locked(grpc_subchannel_call_holder *holder,
+ grpc_transport_stream_op *op) {
+ GPR_TIMER_BEGIN("add_waiting_locked", 0);
+ if (holder->waiting_ops_count == holder->waiting_ops_capacity) {
+ holder->waiting_ops_capacity = GPR_MAX(3, 2 * holder->waiting_ops_capacity);
+ holder->waiting_ops =
+ gpr_realloc(holder->waiting_ops, holder->waiting_ops_capacity *
+ sizeof(*holder->waiting_ops));
+ }
+ holder->waiting_ops[holder->waiting_ops_count++] = *op;
+ GPR_TIMER_END("add_waiting_locked", 0);
+}
+
+static void fail_locked(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_call_holder *holder) {
+ size_t i;
+ for (i = 0; i < holder->waiting_ops_count; i++) {
+ grpc_transport_stream_op_finish_with_failure(exec_ctx,
+ &holder->waiting_ops[i]);
+ }
+ holder->waiting_ops_count = 0;
+}
+
+char *grpc_subchannel_call_holder_get_peer(
+ grpc_exec_ctx *exec_ctx, grpc_subchannel_call_holder *holder) {
+ grpc_subchannel_call *subchannel_call = GET_CALL(holder);
+
+ if (subchannel_call) {
+ return grpc_subchannel_call_get_peer(exec_ctx, subchannel_call);
+ } else {
+ return NULL;
+ }
+}
diff --git a/src/core/channel/subchannel_call_holder.h b/src/core/channel/subchannel_call_holder.h
new file mode 100644
index 0000000000..9086cdc882
--- /dev/null
+++ b/src/core/channel/subchannel_call_holder.h
@@ -0,0 +1,98 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_CHANNEL_SUBCHANNEL_CALL_HOLDER_H
+#define GRPC_CORE_CHANNEL_SUBCHANNEL_CALL_HOLDER_H
+
+#include "src/core/client_config/subchannel.h"
+
+/** Pick a subchannel for grpc_subchannel_call_holder;
+ Return 1 if subchannel is available immediately (in which case on_ready
+ should not be called), or 0 otherwise (in which case on_ready should be
+ called when the subchannel is available) */
+typedef int (*grpc_subchannel_call_holder_pick_subchannel)(
+ grpc_exec_ctx *exec_ctx, void *arg, grpc_metadata_batch *initial_metadata,
+ grpc_connected_subchannel **connected_subchannel, grpc_closure *on_ready);
+
+typedef enum {
+ GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING,
+ GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL
+} grpc_subchannel_call_holder_creation_phase;
+
+/** Wrapper for holding a pointer to grpc_subchannel_call, and the
+ associated machinery to create such a pointer.
+ Handles queueing of stream ops until a call object is ready, waiting
+ for initial metadata before trying to create a call object,
+ and handling cancellation gracefully.
+
+ Both the channel and uchannel filter use this as their call_data. */
+typedef struct grpc_subchannel_call_holder {
+ /** either 0 for no call, 1 for cancelled, or a pointer to a
+ grpc_subchannel_call */
+ gpr_atm subchannel_call;
+ /** Helper function to choose the subchannel on which to create
+ the call object. Channel filter delegates to the load
+ balancing policy (once it's ready); uchannel returns
+ immediately */
+ grpc_subchannel_call_holder_pick_subchannel pick_subchannel;
+ void *pick_subchannel_arg;
+
+ gpr_mu mu;
+
+ grpc_subchannel_call_holder_creation_phase creation_phase;
+ grpc_connected_subchannel *connected_subchannel;
+ grpc_pollset *pollset;
+
+ grpc_transport_stream_op *waiting_ops;
+ size_t waiting_ops_count;
+ size_t waiting_ops_capacity;
+
+ grpc_closure next_step;
+
+ grpc_call_stack *owning_call;
+} grpc_subchannel_call_holder;
+
+void grpc_subchannel_call_holder_init(
+ grpc_subchannel_call_holder *holder,
+ grpc_subchannel_call_holder_pick_subchannel pick_subchannel,
+ void *pick_subchannel_arg, grpc_call_stack *owning_call);
+void grpc_subchannel_call_holder_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_call_holder *holder);
+
+void grpc_subchannel_call_holder_perform_op(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_call_holder *holder,
+ grpc_transport_stream_op *op);
+char *grpc_subchannel_call_holder_get_peer(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_call_holder *holder);
+
+#endif /* GRPC_CORE_CHANNEL_SUBCHANNEL_CALL_HOLDER_H */
diff --git a/src/core/client_config/README.md b/src/core/client_config/README.md
new file mode 100644
index 0000000000..fff7a5af5b
--- /dev/null
+++ b/src/core/client_config/README.md
@@ -0,0 +1,66 @@
+Client Configuration Support for GRPC
+=====================================
+
+This library provides high level configuration machinery to construct client
+channels and load balance between them.
+
+Each grpc_channel is created with a grpc_resolver. It is the resolver's duty
+to resolve a name into configuration data for the channel. Such configuration
+data might include:
+
+- a list of (ip, port) addresses to connect to
+- a load balancing policy to decide which server to send a request to
+- a set of filters to mutate outgoing requests (say, by adding metadata)
+
+The resolver provides this data as a stream of grpc_client_config objects to
+the channel. We represent configuration as a stream so that it can be changed
+by the resolver during execution, by reacting to external events (such as a
+new configuration file being pushed to some store).
+
+
+Load Balancing
+--------------
+
+Load balancing configuration is provided by a grpc_lb_policy object, stored as
+part of grpc_client_config.
+
+The primary job of the load balancing policies is to pick a target server given only the
+initial metadata for a request. It does this by providing a grpc_subchannel
+object to the owning channel.
+
+
+Sub-Channels
+------------
+
+A sub-channel provides a connection to a server for a client channel. It has a
+connectivity state like a regular channel, and so can be connected or
+disconnected. This connectivity state can be used to inform load balancing
+decisions (for example, by avoiding disconnected backends).
+
+Configured sub-channels are fully setup to participate in the grpc data plane.
+Their behavior is specified by a set of grpc channel filters defined at their
+construction. To customize this behavior, resolvers build
+grpc_subchannel_factory objects, which use the decorator pattern to customize
+construction arguments for concrete grpc_subchannel instances.
+
+
+Naming for GRPC
+===============
+
+Names in GRPC are represented by a URI (as defined in
+[RFC 3986](https://tools.ietf.org/html/rfc3986)).
+
+The following schemes are currently supported:
+
+dns:///host:port - dns schemes are currently supported so long as authority is
+ empty (authority based dns resolution is expected in a future
+ release)
+
+unix:path - the unix scheme is used to create and connect to unix domain
+ sockets - the authority must be empty, and the path
+ represents the absolute or relative path to the desired
+ socket
+
+ipv4:host:port - a pre-resolved ipv4 dotted decimal address/port combination
+
+ipv6:[host]:port - a pre-resolved ipv6 address/port combination
diff --git a/src/core/iomgr/alarm_internal.h b/src/core/client_config/client_config.c
index 0268a01bad..c500af25ee 100644
--- a/src/core/iomgr/alarm_internal.h
+++ b/src/core/client_config/client_config.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,32 +31,44 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_IOMGR_ALARM_INTERNAL_H
-#define GRPC_INTERNAL_CORE_IOMGR_ALARM_INTERNAL_H
+#include "src/core/client_config/client_config.h"
-#include <grpc/support/sync.h>
-#include <grpc/support/time.h>
+#include <string.h>
-/* iomgr internal api for dealing with alarms */
+#include <grpc/support/alloc.h>
-/* Check for alarms to be run, and run them.
- Return non zero if alarm callbacks were executed.
- Drops drop_mu if it is non-null before executing callbacks.
- If next is non-null, TRY to update *next with the next running alarm
- IF that alarm occurs before *next current value.
- *next is never guaranteed to be updated on any given execution; however,
- with high probability at least one thread in the system will see an update
- at any time slice. */
+struct grpc_client_config {
+ gpr_refcount refs;
+ grpc_lb_policy *lb_policy;
+};
-int grpc_alarm_check(gpr_mu *drop_mu, gpr_timespec now, gpr_timespec *next);
+grpc_client_config *grpc_client_config_create() {
+ grpc_client_config *c = gpr_malloc(sizeof(*c));
+ memset(c, 0, sizeof(*c));
+ gpr_ref_init(&c->refs, 1);
+ return c;
+}
-void grpc_alarm_list_init(gpr_timespec now);
-void grpc_alarm_list_shutdown(void);
+void grpc_client_config_ref(grpc_client_config *c) { gpr_ref(&c->refs); }
-gpr_timespec grpc_alarm_list_next_timeout(void);
+void grpc_client_config_unref(grpc_exec_ctx *exec_ctx, grpc_client_config *c) {
+ if (gpr_unref(&c->refs)) {
+ if (c->lb_policy != NULL) {
+ GRPC_LB_POLICY_UNREF(exec_ctx, c->lb_policy, "client_config");
+ }
+ gpr_free(c);
+ }
+}
-/* the following must be implemented by each iomgr implementation */
+void grpc_client_config_set_lb_policy(grpc_client_config *c,
+ grpc_lb_policy *lb_policy) {
+ GPR_ASSERT(c->lb_policy == NULL);
+ if (lb_policy) {
+ GRPC_LB_POLICY_REF(lb_policy, "client_config");
+ }
+ c->lb_policy = lb_policy;
+}
-void grpc_kick_poller(void);
-
-#endif /* GRPC_INTERNAL_CORE_IOMGR_ALARM_INTERNAL_H */
+grpc_lb_policy *grpc_client_config_get_lb_policy(grpc_client_config *c) {
+ return c->lb_policy;
+}
diff --git a/src/core/client_config/client_config.h b/src/core/client_config/client_config.h
new file mode 100644
index 0000000000..9b37fdc211
--- /dev/null
+++ b/src/core/client_config/client_config.h
@@ -0,0 +1,53 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_CLIENT_CONFIG_CLIENT_CONFIG_H
+#define GRPC_CORE_CLIENT_CONFIG_CLIENT_CONFIG_H
+
+#include "src/core/client_config/lb_policy.h"
+
+/** Total configuration for a client. Provided, and updated, by
+ grpc_resolver */
+typedef struct grpc_client_config grpc_client_config;
+
+grpc_client_config *grpc_client_config_create();
+void grpc_client_config_ref(grpc_client_config *client_config);
+void grpc_client_config_unref(grpc_exec_ctx *exec_ctx,
+ grpc_client_config *client_config);
+
+void grpc_client_config_set_lb_policy(grpc_client_config *client_config,
+ grpc_lb_policy *lb_policy);
+grpc_lb_policy *grpc_client_config_get_lb_policy(
+ grpc_client_config *client_config);
+
+#endif /* GRPC_CORE_CLIENT_CONFIG_CLIENT_CONFIG_H */
diff --git a/src/core/client_config/connector.c b/src/core/client_config/connector.c
new file mode 100644
index 0000000000..aa34aa7fab
--- /dev/null
+++ b/src/core/client_config/connector.c
@@ -0,0 +1,55 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/client_config/connector.h"
+
+grpc_connector* grpc_connector_ref(grpc_connector* connector) {
+ connector->vtable->ref(connector);
+ return connector;
+}
+
+void grpc_connector_unref(grpc_exec_ctx* exec_ctx, grpc_connector* connector) {
+ connector->vtable->unref(exec_ctx, connector);
+}
+
+void grpc_connector_connect(grpc_exec_ctx* exec_ctx, grpc_connector* connector,
+ const grpc_connect_in_args* in_args,
+ grpc_connect_out_args* out_args,
+ grpc_closure* notify) {
+ connector->vtable->connect(exec_ctx, connector, in_args, out_args, notify);
+}
+
+void grpc_connector_shutdown(grpc_exec_ctx* exec_ctx,
+ grpc_connector* connector) {
+ connector->vtable->shutdown(exec_ctx, connector);
+}
diff --git a/src/core/client_config/connector.h b/src/core/client_config/connector.h
new file mode 100644
index 0000000000..93248fca4b
--- /dev/null
+++ b/src/core/client_config/connector.h
@@ -0,0 +1,92 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_CLIENT_CONFIG_CONNECTOR_H
+#define GRPC_CORE_CLIENT_CONFIG_CONNECTOR_H
+
+#include "src/core/channel/channel_stack.h"
+#include "src/core/iomgr/sockaddr.h"
+#include "src/core/transport/transport.h"
+
+typedef struct grpc_connector grpc_connector;
+typedef struct grpc_connector_vtable grpc_connector_vtable;
+
+struct grpc_connector {
+ const grpc_connector_vtable *vtable;
+};
+
+typedef struct {
+ /** set of pollsets interested in this connection */
+ grpc_pollset_set *interested_parties;
+ /** address to connect to */
+ const struct sockaddr *addr;
+ size_t addr_len;
+ /** initial connect string to send */
+ gpr_slice initial_connect_string;
+ /** deadline for connection */
+ gpr_timespec deadline;
+ /** channel arguments (to be passed to transport) */
+ const grpc_channel_args *channel_args;
+} grpc_connect_in_args;
+
+typedef struct {
+ /** the connected transport */
+ grpc_transport *transport;
+
+ /** channel arguments (to be passed to the filters) */
+ const grpc_channel_args *channel_args;
+} grpc_connect_out_args;
+
+struct grpc_connector_vtable {
+ void (*ref)(grpc_connector *connector);
+ void (*unref)(grpc_exec_ctx *exec_ctx, grpc_connector *connector);
+ /** Implementation of grpc_connector_shutdown */
+ void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_connector *connector);
+ /** Implementation of grpc_connector_connect */
+ void (*connect)(grpc_exec_ctx *exec_ctx, grpc_connector *connector,
+ const grpc_connect_in_args *in_args,
+ grpc_connect_out_args *out_args, grpc_closure *notify);
+};
+
+grpc_connector *grpc_connector_ref(grpc_connector *connector);
+void grpc_connector_unref(grpc_exec_ctx *exec_ctx, grpc_connector *connector);
+/** Connect using the connector: max one outstanding call at a time */
+void grpc_connector_connect(grpc_exec_ctx *exec_ctx, grpc_connector *connector,
+ const grpc_connect_in_args *in_args,
+ grpc_connect_out_args *out_args,
+ grpc_closure *notify);
+/** Cancel any pending connection */
+void grpc_connector_shutdown(grpc_exec_ctx *exec_ctx,
+ grpc_connector *connector);
+
+#endif /* GRPC_CORE_CLIENT_CONFIG_CONNECTOR_H */
diff --git a/src/core/client_config/default_initial_connect_string.c b/src/core/client_config/default_initial_connect_string.c
new file mode 100644
index 0000000000..6a4e23e6f2
--- /dev/null
+++ b/src/core/client_config/default_initial_connect_string.c
@@ -0,0 +1,39 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/support/slice.h>
+#include "src/core/iomgr/sockaddr.h"
+
+void grpc_set_default_initial_connect_string(struct sockaddr **addr,
+ size_t *addr_len,
+ gpr_slice *initial_str) {}
diff --git a/src/core/client_config/initial_connect_string.c b/src/core/client_config/initial_connect_string.c
new file mode 100644
index 0000000000..19afa1675a
--- /dev/null
+++ b/src/core/client_config/initial_connect_string.c
@@ -0,0 +1,53 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/client_config/initial_connect_string.h"
+
+#include <stddef.h>
+
+extern void grpc_set_default_initial_connect_string(struct sockaddr **addr,
+ size_t *addr_len,
+ gpr_slice *initial_str);
+
+static grpc_set_initial_connect_string_func g_set_initial_connect_string_func =
+ grpc_set_default_initial_connect_string;
+
+void grpc_test_set_initial_connect_string_function(
+ grpc_set_initial_connect_string_func func) {
+ g_set_initial_connect_string_func = func;
+}
+
+void grpc_set_initial_connect_string(struct sockaddr **addr, size_t *addr_len,
+ gpr_slice *initial_str) {
+ g_set_initial_connect_string_func(addr, addr_len, initial_str);
+}
diff --git a/src/core/client_config/initial_connect_string.h b/src/core/client_config/initial_connect_string.h
new file mode 100644
index 0000000000..e6d2d8f8fe
--- /dev/null
+++ b/src/core/client_config/initial_connect_string.h
@@ -0,0 +1,50 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_CLIENT_CONFIG_INITIAL_CONNECT_STRING_H
+#define GRPC_CORE_CLIENT_CONFIG_INITIAL_CONNECT_STRING_H
+
+#include <grpc/support/slice.h>
+#include "src/core/iomgr/sockaddr.h"
+
+typedef void (*grpc_set_initial_connect_string_func)(struct sockaddr **addr,
+ size_t *addr_len,
+ gpr_slice *initial_str);
+void grpc_test_set_initial_connect_string_function(
+ grpc_set_initial_connect_string_func func);
+
+/** Set a string to be sent once connected. Optionally reset addr. */
+void grpc_set_initial_connect_string(struct sockaddr **addr, size_t *addr_len,
+ gpr_slice *connect_string);
+
+#endif /* GRPC_CORE_CLIENT_CONFIG_INITIAL_CONNECT_STRING_H */
diff --git a/src/core/client_config/lb_policies/load_balancer_api.c b/src/core/client_config/lb_policies/load_balancer_api.c
new file mode 100644
index 0000000000..a6b5785fe4
--- /dev/null
+++ b/src/core/client_config/lb_policies/load_balancer_api.c
@@ -0,0 +1,163 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/client_config/lb_policies/load_balancer_api.h"
+#include "third_party/nanopb/pb_decode.h"
+#include "third_party/nanopb/pb_encode.h"
+
+#include <grpc/support/alloc.h>
+
+typedef struct decode_serverlist_arg {
+ int first_pass;
+ int i;
+ size_t num_servers;
+ grpc_grpclb_server **servers;
+} decode_serverlist_arg;
+
+/* invoked once for every Server in ServerList */
+static bool decode_serverlist(pb_istream_t *stream, const pb_field_t *field,
+ void **arg) {
+ decode_serverlist_arg *dec_arg = *arg;
+ if (dec_arg->first_pass != 0) { /* first pass */
+ grpc_grpclb_server server;
+ if (!pb_decode(stream, grpc_lb_v0_Server_fields, &server)) {
+ return false;
+ }
+ dec_arg->num_servers++;
+ } else { /* second pass */
+ grpc_grpclb_server *server = gpr_malloc(sizeof(grpc_grpclb_server));
+ GPR_ASSERT(dec_arg->num_servers > 0);
+ if (dec_arg->i == 0) { /* first iteration of second pass */
+ dec_arg->servers =
+ gpr_malloc(sizeof(grpc_grpclb_server *) * dec_arg->num_servers);
+ }
+ if (!pb_decode(stream, grpc_lb_v0_Server_fields, server)) {
+ return false;
+ }
+ dec_arg->servers[dec_arg->i++] = server;
+ }
+
+ return true;
+}
+
+grpc_grpclb_request *grpc_grpclb_request_create(const char *lb_service_name) {
+ grpc_grpclb_request *req = gpr_malloc(sizeof(grpc_grpclb_request));
+
+ req->has_client_stats = 0; /* TODO(dgq): add support for stats once defined */
+ req->has_initial_request = 1;
+ req->initial_request.has_name = 1;
+ strncpy(req->initial_request.name, lb_service_name,
+ GRPC_GRPCLB_SERVICE_NAME_MAX_LENGTH);
+ return req;
+}
+
+gpr_slice grpc_grpclb_request_encode(const grpc_grpclb_request *request) {
+ size_t encoded_length;
+ pb_ostream_t sizestream;
+ pb_ostream_t outputstream;
+ gpr_slice slice;
+ memset(&sizestream, 0, sizeof(pb_ostream_t));
+ pb_encode(&sizestream, grpc_lb_v0_LoadBalanceRequest_fields, request);
+ encoded_length = sizestream.bytes_written;
+
+ slice = gpr_slice_malloc(encoded_length);
+ outputstream =
+ pb_ostream_from_buffer(GPR_SLICE_START_PTR(slice), encoded_length);
+ GPR_ASSERT(pb_encode(&outputstream, grpc_lb_v0_LoadBalanceRequest_fields,
+ request) != 0);
+ return slice;
+}
+
+void grpc_grpclb_request_destroy(grpc_grpclb_request *request) {
+ gpr_free(request);
+}
+
+grpc_grpclb_response *grpc_grpclb_response_parse(gpr_slice encoded_response) {
+ bool status;
+ pb_istream_t stream =
+ pb_istream_from_buffer(GPR_SLICE_START_PTR(encoded_response),
+ GPR_SLICE_LENGTH(encoded_response));
+ grpc_grpclb_response *res = gpr_malloc(sizeof(grpc_grpclb_response));
+ memset(res, 0, sizeof(*res));
+ status = pb_decode(&stream, grpc_lb_v0_LoadBalanceResponse_fields, res);
+ GPR_ASSERT(status == true);
+ return res;
+}
+
+grpc_grpclb_serverlist *grpc_grpclb_response_parse_serverlist(
+ gpr_slice encoded_response) {
+ grpc_grpclb_serverlist *sl = gpr_malloc(sizeof(grpc_grpclb_serverlist));
+ bool status;
+ decode_serverlist_arg arg;
+ pb_istream_t stream =
+ pb_istream_from_buffer(GPR_SLICE_START_PTR(encoded_response),
+ GPR_SLICE_LENGTH(encoded_response));
+ pb_istream_t stream_at_start = stream;
+ grpc_grpclb_response *res = gpr_malloc(sizeof(grpc_grpclb_response));
+ memset(res, 0, sizeof(*res));
+ memset(&arg, 0, sizeof(decode_serverlist_arg));
+
+ res->server_list.servers.funcs.decode = decode_serverlist;
+ res->server_list.servers.arg = &arg;
+ arg.first_pass = 1;
+ status = pb_decode(&stream, grpc_lb_v0_LoadBalanceResponse_fields, res);
+ GPR_ASSERT(status == true);
+ GPR_ASSERT(arg.num_servers > 0);
+
+ arg.first_pass = 0;
+ status =
+ pb_decode(&stream_at_start, grpc_lb_v0_LoadBalanceResponse_fields, res);
+ GPR_ASSERT(status == true);
+ GPR_ASSERT(arg.servers != NULL);
+
+ sl->num_servers = arg.num_servers;
+ sl->servers = arg.servers;
+ if (res->server_list.has_expiration_interval) {
+ sl->expiration_interval = res->server_list.expiration_interval;
+ }
+ grpc_grpclb_response_destroy(res);
+ return sl;
+}
+
+void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist *serverlist) {
+ size_t i;
+ for (i = 0; i < serverlist->num_servers; i++) {
+ gpr_free(serverlist->servers[i]);
+ }
+ gpr_free(serverlist->servers);
+ gpr_free(serverlist);
+}
+
+void grpc_grpclb_response_destroy(grpc_grpclb_response *response) {
+ gpr_free(response);
+}
diff --git a/src/core/client_config/lb_policies/load_balancer_api.h b/src/core/client_config/lb_policies/load_balancer_api.h
new file mode 100644
index 0000000000..b7a4c9c8f5
--- /dev/null
+++ b/src/core/client_config/lb_policies/load_balancer_api.h
@@ -0,0 +1,85 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_CLIENT_CONFIG_LB_POLICIES_LOAD_BALANCER_API_H
+#define GRPC_CORE_CLIENT_CONFIG_LB_POLICIES_LOAD_BALANCER_API_H
+
+#include <grpc/support/slice_buffer.h>
+
+#include "src/core/client_config/lb_policy_factory.h"
+#include "src/core/proto/grpc/lb/v0/load_balancer.pb.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define GRPC_GRPCLB_SERVICE_NAME_MAX_LENGTH 128
+
+typedef grpc_lb_v0_LoadBalanceRequest grpc_grpclb_request;
+typedef grpc_lb_v0_LoadBalanceResponse grpc_grpclb_response;
+typedef grpc_lb_v0_Server grpc_grpclb_server;
+typedef grpc_lb_v0_Duration grpc_grpclb_duration;
+typedef struct grpc_grpclb_serverlist {
+ grpc_grpclb_server **servers;
+ size_t num_servers;
+ grpc_grpclb_duration expiration_interval;
+} grpc_grpclb_serverlist;
+
+/** Create a request for a gRPC LB service under \a lb_service_name */
+grpc_grpclb_request *grpc_grpclb_request_create(const char *lb_service_name);
+
+/** Protocol Buffers v3-encode \a request */
+gpr_slice grpc_grpclb_request_encode(const grpc_grpclb_request *request);
+
+/** Destroy \a request */
+void grpc_grpclb_request_destroy(grpc_grpclb_request *request);
+
+/** Parse (ie, decode) the bytes in \a encoded_response as a \a
+ * grpc_grpclb_response */
+grpc_grpclb_response *grpc_grpclb_response_parse(gpr_slice encoded_response);
+
+/** Destroy \a serverlist */
+void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist *serverlist);
+
+/** Parse the list of servers from an encoded \a grpc_grpclb_response */
+grpc_grpclb_serverlist *grpc_grpclb_response_parse_serverlist(
+ gpr_slice encoded_response);
+
+/** Destroy \a response */
+void grpc_grpclb_response_destroy(grpc_grpclb_response *response);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRPC_CORE_CLIENT_CONFIG_LB_POLICIES_LOAD_BALANCER_API_H */
diff --git a/src/core/client_config/lb_policies/pick_first.c b/src/core/client_config/lb_policies/pick_first.c
new file mode 100644
index 0000000000..8ed1223d39
--- /dev/null
+++ b/src/core/client_config/lb_policies/pick_first.c
@@ -0,0 +1,415 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/client_config/lb_policies/pick_first.h"
+#include "src/core/client_config/lb_policy_factory.h"
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include "src/core/transport/connectivity_state.h"
+
+typedef struct pending_pick {
+ struct pending_pick *next;
+ grpc_pollset *pollset;
+ grpc_connected_subchannel **target;
+ grpc_closure *on_complete;
+} pending_pick;
+
+typedef struct {
+ /** base policy: must be first */
+ grpc_lb_policy base;
+ /** all our subchannels */
+ grpc_subchannel **subchannels;
+ size_t num_subchannels;
+
+ grpc_closure connectivity_changed;
+
+ /** the selected channel (a grpc_connected_subchannel) */
+ gpr_atm selected;
+
+ /** mutex protecting remaining members */
+ gpr_mu mu;
+ /** have we started picking? */
+ int started_picking;
+ /** are we shut down? */
+ int shutdown;
+ /** which subchannel are we watching? */
+ size_t checking_subchannel;
+ /** what is the connectivity of that channel? */
+ grpc_connectivity_state checking_connectivity;
+ /** list of picks that are waiting on connectivity */
+ pending_pick *pending_picks;
+
+ /** our connectivity state tracker */
+ grpc_connectivity_state_tracker state_tracker;
+} pick_first_lb_policy;
+
+#define GET_SELECTED(p) \
+ ((grpc_connected_subchannel *)gpr_atm_acq_load(&(p)->selected))
+
+void pf_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
+ pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+ grpc_connected_subchannel *selected = GET_SELECTED(p);
+ size_t i;
+ GPR_ASSERT(p->pending_picks == NULL);
+ for (i = 0; i < p->num_subchannels; i++) {
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[i], "pick_first");
+ }
+ if (selected != NULL) {
+ GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, selected, "picked_first");
+ }
+ grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
+ gpr_free(p->subchannels);
+ gpr_mu_destroy(&p->mu);
+ gpr_free(p);
+}
+
+void pf_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
+ pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+ pending_pick *pp;
+ grpc_connected_subchannel *selected;
+ gpr_mu_lock(&p->mu);
+ selected = GET_SELECTED(p);
+ p->shutdown = 1;
+ pp = p->pending_picks;
+ p->pending_picks = NULL;
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_FATAL_FAILURE, "shutdown");
+ /* cancel subscription */
+ if (selected != NULL) {
+ grpc_connected_subchannel_notify_on_state_change(
+ exec_ctx, selected, NULL, NULL, &p->connectivity_changed);
+ } else {
+ grpc_subchannel_notify_on_state_change(
+ exec_ctx, p->subchannels[p->checking_subchannel], NULL, NULL,
+ &p->connectivity_changed);
+ }
+ gpr_mu_unlock(&p->mu);
+ while (pp != NULL) {
+ pending_pick *next = pp->next;
+ *pp->target = NULL;
+ grpc_pollset_set_del_pollset(exec_ctx, p->base.interested_parties,
+ pp->pollset);
+ grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, true, NULL);
+ gpr_free(pp);
+ pp = next;
+ }
+}
+
+static void pf_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ grpc_connected_subchannel **target) {
+ pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+ pending_pick *pp;
+ gpr_mu_lock(&p->mu);
+ pp = p->pending_picks;
+ p->pending_picks = NULL;
+ while (pp != NULL) {
+ pending_pick *next = pp->next;
+ if (pp->target == target) {
+ grpc_pollset_set_del_pollset(exec_ctx, p->base.interested_parties,
+ pp->pollset);
+ *target = NULL;
+ grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, false, NULL);
+ gpr_free(pp);
+ } else {
+ pp->next = p->pending_picks;
+ p->pending_picks = pp;
+ }
+ pp = next;
+ }
+ gpr_mu_unlock(&p->mu);
+}
+
+static void start_picking(grpc_exec_ctx *exec_ctx, pick_first_lb_policy *p) {
+ p->started_picking = 1;
+ p->checking_subchannel = 0;
+ p->checking_connectivity = GRPC_CHANNEL_IDLE;
+ GRPC_LB_POLICY_WEAK_REF(&p->base, "pick_first_connectivity");
+ grpc_subchannel_notify_on_state_change(
+ exec_ctx, p->subchannels[p->checking_subchannel],
+ p->base.interested_parties, &p->checking_connectivity,
+ &p->connectivity_changed);
+}
+
+void pf_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
+ pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+ gpr_mu_lock(&p->mu);
+ if (!p->started_picking) {
+ start_picking(exec_ctx, p);
+ }
+ gpr_mu_unlock(&p->mu);
+}
+
+int pf_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_pollset *pollset,
+ grpc_metadata_batch *initial_metadata,
+ grpc_connected_subchannel **target, grpc_closure *on_complete) {
+ pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+ pending_pick *pp;
+
+ /* Check atomically for a selected channel */
+ grpc_connected_subchannel *selected = GET_SELECTED(p);
+ if (selected != NULL) {
+ *target = selected;
+ return 1;
+ }
+
+ /* No subchannel selected yet, so acquire lock and then attempt again */
+ gpr_mu_lock(&p->mu);
+ selected = GET_SELECTED(p);
+ if (selected) {
+ gpr_mu_unlock(&p->mu);
+ *target = selected;
+ return 1;
+ } else {
+ if (!p->started_picking) {
+ start_picking(exec_ctx, p);
+ }
+ grpc_pollset_set_add_pollset(exec_ctx, p->base.interested_parties, pollset);
+ pp = gpr_malloc(sizeof(*pp));
+ pp->next = p->pending_picks;
+ pp->pollset = pollset;
+ pp->target = target;
+ pp->on_complete = on_complete;
+ p->pending_picks = pp;
+ gpr_mu_unlock(&p->mu);
+ return 0;
+ }
+}
+
+static void destroy_subchannels(grpc_exec_ctx *exec_ctx, void *arg,
+ bool iomgr_success) {
+ pick_first_lb_policy *p = arg;
+ size_t i;
+ size_t num_subchannels = p->num_subchannels;
+ grpc_subchannel **subchannels;
+
+ gpr_mu_lock(&p->mu);
+ subchannels = p->subchannels;
+ p->num_subchannels = 0;
+ p->subchannels = NULL;
+ gpr_mu_unlock(&p->mu);
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "destroy_subchannels");
+
+ for (i = 0; i < num_subchannels; i++) {
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, subchannels[i], "pick_first");
+ }
+
+ gpr_free(subchannels);
+}
+
+static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
+ bool iomgr_success) {
+ pick_first_lb_policy *p = arg;
+ grpc_subchannel *selected_subchannel;
+ pending_pick *pp;
+ grpc_connected_subchannel *selected;
+
+ gpr_mu_lock(&p->mu);
+
+ selected = GET_SELECTED(p);
+
+ if (p->shutdown) {
+ gpr_mu_unlock(&p->mu);
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
+ return;
+ } else if (selected != NULL) {
+ if (p->checking_connectivity == GRPC_CHANNEL_TRANSIENT_FAILURE) {
+ /* if the selected channel goes bad, we're done */
+ p->checking_connectivity = GRPC_CHANNEL_FATAL_FAILURE;
+ }
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ p->checking_connectivity, "selected_changed");
+ if (p->checking_connectivity != GRPC_CHANNEL_FATAL_FAILURE) {
+ grpc_connected_subchannel_notify_on_state_change(
+ exec_ctx, selected, p->base.interested_parties,
+ &p->checking_connectivity, &p->connectivity_changed);
+ } else {
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
+ }
+ } else {
+ loop:
+ switch (p->checking_connectivity) {
+ case GRPC_CHANNEL_READY:
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_READY, "connecting_ready");
+ selected_subchannel = p->subchannels[p->checking_subchannel];
+ selected =
+ grpc_subchannel_get_connected_subchannel(selected_subchannel);
+ GPR_ASSERT(selected != NULL);
+ GRPC_CONNECTED_SUBCHANNEL_REF(selected, "picked_first");
+ /* drop the pick list: we are connected now */
+ GRPC_LB_POLICY_WEAK_REF(&p->base, "destroy_subchannels");
+ gpr_atm_rel_store(&p->selected, (gpr_atm)selected);
+ grpc_exec_ctx_enqueue(
+ exec_ctx, grpc_closure_create(destroy_subchannels, p), true, NULL);
+ /* update any calls that were waiting for a pick */
+ while ((pp = p->pending_picks)) {
+ p->pending_picks = pp->next;
+ *pp->target = selected;
+ grpc_pollset_set_del_pollset(exec_ctx, p->base.interested_parties,
+ pp->pollset);
+ grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, true, NULL);
+ gpr_free(pp);
+ }
+ grpc_connected_subchannel_notify_on_state_change(
+ exec_ctx, selected, p->base.interested_parties,
+ &p->checking_connectivity, &p->connectivity_changed);
+ break;
+ case GRPC_CHANNEL_TRANSIENT_FAILURE:
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_TRANSIENT_FAILURE,
+ "connecting_transient_failure");
+ p->checking_subchannel =
+ (p->checking_subchannel + 1) % p->num_subchannels;
+ p->checking_connectivity = grpc_subchannel_check_connectivity(
+ p->subchannels[p->checking_subchannel]);
+ if (p->checking_connectivity == GRPC_CHANNEL_TRANSIENT_FAILURE) {
+ grpc_subchannel_notify_on_state_change(
+ exec_ctx, p->subchannels[p->checking_subchannel],
+ p->base.interested_parties, &p->checking_connectivity,
+ &p->connectivity_changed);
+ } else {
+ goto loop;
+ }
+ break;
+ case GRPC_CHANNEL_CONNECTING:
+ case GRPC_CHANNEL_IDLE:
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_CONNECTING,
+ "connecting_changed");
+ grpc_subchannel_notify_on_state_change(
+ exec_ctx, p->subchannels[p->checking_subchannel],
+ p->base.interested_parties, &p->checking_connectivity,
+ &p->connectivity_changed);
+ break;
+ case GRPC_CHANNEL_FATAL_FAILURE:
+ p->num_subchannels--;
+ GPR_SWAP(grpc_subchannel *, p->subchannels[p->checking_subchannel],
+ p->subchannels[p->num_subchannels]);
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[p->num_subchannels],
+ "pick_first");
+ if (p->num_subchannels == 0) {
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_FATAL_FAILURE,
+ "no_more_channels");
+ while ((pp = p->pending_picks)) {
+ p->pending_picks = pp->next;
+ *pp->target = NULL;
+ grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, true, NULL);
+ gpr_free(pp);
+ }
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base,
+ "pick_first_connectivity");
+ } else {
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_TRANSIENT_FAILURE,
+ "subchannel_failed");
+ p->checking_subchannel %= p->num_subchannels;
+ p->checking_connectivity = grpc_subchannel_check_connectivity(
+ p->subchannels[p->checking_subchannel]);
+ goto loop;
+ }
+ }
+ }
+
+ gpr_mu_unlock(&p->mu);
+}
+
+static grpc_connectivity_state pf_check_connectivity(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *pol) {
+ pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+ grpc_connectivity_state st;
+ gpr_mu_lock(&p->mu);
+ st = grpc_connectivity_state_check(&p->state_tracker);
+ gpr_mu_unlock(&p->mu);
+ return st;
+}
+
+void pf_notify_on_state_change(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ grpc_connectivity_state *current,
+ grpc_closure *notify) {
+ pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+ gpr_mu_lock(&p->mu);
+ grpc_connectivity_state_notify_on_state_change(exec_ctx, &p->state_tracker,
+ current, notify);
+ gpr_mu_unlock(&p->mu);
+}
+
+void pf_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ grpc_closure *closure) {
+ pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+ grpc_connected_subchannel *selected = GET_SELECTED(p);
+ if (selected) {
+ grpc_connected_subchannel_ping(exec_ctx, selected, closure);
+ } else {
+ grpc_exec_ctx_enqueue(exec_ctx, closure, false, NULL);
+ }
+}
+
+static const grpc_lb_policy_vtable pick_first_lb_policy_vtable = {
+ pf_destroy, pf_shutdown, pf_pick, pf_cancel_pick, pf_ping_one, pf_exit_idle,
+ pf_check_connectivity, pf_notify_on_state_change};
+
+static void pick_first_factory_ref(grpc_lb_policy_factory *factory) {}
+
+static void pick_first_factory_unref(grpc_lb_policy_factory *factory) {}
+
+static grpc_lb_policy *create_pick_first(grpc_lb_policy_factory *factory,
+ grpc_lb_policy_args *args) {
+ if (args->num_subchannels == 0) return NULL;
+ pick_first_lb_policy *p = gpr_malloc(sizeof(*p));
+ memset(p, 0, sizeof(*p));
+ grpc_lb_policy_init(&p->base, &pick_first_lb_policy_vtable);
+ p->subchannels =
+ gpr_malloc(sizeof(grpc_subchannel *) * args->num_subchannels);
+ p->num_subchannels = args->num_subchannels;
+ grpc_connectivity_state_init(&p->state_tracker, GRPC_CHANNEL_IDLE,
+ "pick_first");
+ memcpy(p->subchannels, args->subchannels,
+ sizeof(grpc_subchannel *) * args->num_subchannels);
+ grpc_closure_init(&p->connectivity_changed, pf_connectivity_changed, p);
+ gpr_mu_init(&p->mu);
+ return &p->base;
+}
+
+static const grpc_lb_policy_factory_vtable pick_first_factory_vtable = {
+ pick_first_factory_ref, pick_first_factory_unref, create_pick_first,
+ "pick_first"};
+
+static grpc_lb_policy_factory pick_first_lb_policy_factory = {
+ &pick_first_factory_vtable};
+
+grpc_lb_policy_factory *grpc_pick_first_lb_factory_create() {
+ return &pick_first_lb_policy_factory;
+}
diff --git a/src/core/client_config/lb_policies/pick_first.h b/src/core/client_config/lb_policies/pick_first.h
new file mode 100644
index 0000000000..3a3f195df5
--- /dev/null
+++ b/src/core/client_config/lb_policies/pick_first.h
@@ -0,0 +1,43 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_CLIENT_CONFIG_LB_POLICIES_PICK_FIRST_H
+#define GRPC_CORE_CLIENT_CONFIG_LB_POLICIES_PICK_FIRST_H
+
+#include "src/core/client_config/lb_policy_factory.h"
+
+/** Returns a load balancing factory for the pick first policy, which picks up
+ * the first subchannel from \a subchannels to succesfully connect */
+grpc_lb_policy_factory *grpc_pick_first_lb_factory_create();
+
+#endif /* GRPC_CORE_CLIENT_CONFIG_LB_POLICIES_PICK_FIRST_H */
diff --git a/src/core/client_config/lb_policies/round_robin.c b/src/core/client_config/lb_policies/round_robin.c
new file mode 100644
index 0000000000..98d9acc75b
--- /dev/null
+++ b/src/core/client_config/lb_policies/round_robin.c
@@ -0,0 +1,536 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/client_config/lb_policies/round_robin.h"
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include "src/core/transport/connectivity_state.h"
+
+typedef struct round_robin_lb_policy round_robin_lb_policy;
+
+int grpc_lb_round_robin_trace = 0;
+
+/** List of entities waiting for a pick.
+ *
+ * Once a pick is available, \a target is updated and \a on_complete called. */
+typedef struct pending_pick {
+ struct pending_pick *next;
+ grpc_pollset *pollset;
+ grpc_connected_subchannel **target;
+ grpc_closure *on_complete;
+} pending_pick;
+
+/** List of subchannels in a connectivity READY state */
+typedef struct ready_list {
+ grpc_subchannel *subchannel;
+ struct ready_list *next;
+ struct ready_list *prev;
+} ready_list;
+
+typedef struct {
+ /** index within policy->subchannels */
+ size_t index;
+ /** backpointer to owning policy */
+ round_robin_lb_policy *policy;
+ /** subchannel itself */
+ grpc_subchannel *subchannel;
+ /** notification that connectivity has changed on subchannel */
+ grpc_closure connectivity_changed_closure;
+ /** this subchannels current position in subchannel->ready_list */
+ ready_list *ready_list_node;
+ /** last observed connectivity */
+ grpc_connectivity_state connectivity_state;
+} subchannel_data;
+
+struct round_robin_lb_policy {
+ /** base policy: must be first */
+ grpc_lb_policy base;
+
+ /** all our subchannels */
+ size_t num_subchannels;
+ subchannel_data **subchannels;
+
+ /** mutex protecting remaining members */
+ gpr_mu mu;
+ /** have we started picking? */
+ int started_picking;
+ /** are we shutting down? */
+ int shutdown;
+ /** List of picks that are waiting on connectivity */
+ pending_pick *pending_picks;
+
+ /** our connectivity state tracker */
+ grpc_connectivity_state_tracker state_tracker;
+
+ /** (Dummy) root of the doubly linked list containing READY subchannels */
+ ready_list ready_list;
+ /** Last pick from the ready list. */
+ ready_list *ready_list_last_pick;
+};
+
+/** Returns the next subchannel from the connected list or NULL if the list is
+ * empty.
+ *
+ * Note that this function does *not* advance p->ready_list_last_pick. Use \a
+ * advance_last_picked_locked() for that. */
+static ready_list *peek_next_connected_locked(const round_robin_lb_policy *p) {
+ ready_list *selected;
+ selected = p->ready_list_last_pick->next;
+
+ while (selected != NULL) {
+ if (selected == &p->ready_list) {
+ GPR_ASSERT(selected->subchannel == NULL);
+ /* skip dummy root */
+ selected = selected->next;
+ } else {
+ GPR_ASSERT(selected->subchannel != NULL);
+ return selected;
+ }
+ }
+ return NULL;
+}
+
+/** Advance the \a ready_list picking head. */
+static void advance_last_picked_locked(round_robin_lb_policy *p) {
+ if (p->ready_list_last_pick->next != NULL) { /* non-empty list */
+ p->ready_list_last_pick = p->ready_list_last_pick->next;
+ if (p->ready_list_last_pick == &p->ready_list) {
+ /* skip dummy root */
+ p->ready_list_last_pick = p->ready_list_last_pick->next;
+ }
+ } else { /* should be an empty list */
+ GPR_ASSERT(p->ready_list_last_pick == &p->ready_list);
+ }
+
+ if (grpc_lb_round_robin_trace) {
+ gpr_log(GPR_DEBUG, "[READYLIST] ADVANCED LAST PICK. NOW AT NODE %p (SC %p)",
+ p->ready_list_last_pick, p->ready_list_last_pick->subchannel);
+ }
+}
+
+/** Prepends (relative to the root at p->ready_list) the connected subchannel \a
+ * csc to the list of ready subchannels. */
+static ready_list *add_connected_sc_locked(round_robin_lb_policy *p,
+ grpc_subchannel *sc) {
+ ready_list *new_elem = gpr_malloc(sizeof(ready_list));
+ new_elem->subchannel = sc;
+ if (p->ready_list.prev == NULL) {
+ /* first element */
+ new_elem->next = &p->ready_list;
+ new_elem->prev = &p->ready_list;
+ p->ready_list.next = new_elem;
+ p->ready_list.prev = new_elem;
+ } else {
+ new_elem->next = &p->ready_list;
+ new_elem->prev = p->ready_list.prev;
+ p->ready_list.prev->next = new_elem;
+ p->ready_list.prev = new_elem;
+ }
+ if (grpc_lb_round_robin_trace) {
+ gpr_log(GPR_DEBUG, "[READYLIST] ADDING NODE %p (SC %p)", new_elem, sc);
+ }
+ return new_elem;
+}
+
+/** Removes \a node from the list of connected subchannels */
+static void remove_disconnected_sc_locked(round_robin_lb_policy *p,
+ ready_list *node) {
+ if (node == NULL) {
+ return;
+ }
+ if (node == p->ready_list_last_pick) {
+ /* If removing the lastly picked node, reset the last pick pointer to the
+ * dummy root of the list */
+ p->ready_list_last_pick = &p->ready_list;
+ }
+
+ /* removing last item */
+ if (node->next == &p->ready_list && node->prev == &p->ready_list) {
+ GPR_ASSERT(p->ready_list.next == node);
+ GPR_ASSERT(p->ready_list.prev == node);
+ p->ready_list.next = NULL;
+ p->ready_list.prev = NULL;
+ } else {
+ node->prev->next = node->next;
+ node->next->prev = node->prev;
+ }
+
+ if (grpc_lb_round_robin_trace) {
+ gpr_log(GPR_DEBUG, "[READYLIST] REMOVED NODE %p (SC %p)", node,
+ node->subchannel);
+ }
+
+ node->next = NULL;
+ node->prev = NULL;
+ node->subchannel = NULL;
+
+ gpr_free(node);
+}
+
+void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
+ round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+ size_t i;
+ ready_list *elem;
+ for (i = 0; i < p->num_subchannels; i++) {
+ subchannel_data *sd = p->subchannels[i];
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "round_robin");
+ gpr_free(sd);
+ }
+
+ grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
+ gpr_free(p->subchannels);
+ gpr_mu_destroy(&p->mu);
+
+ elem = p->ready_list.next;
+ while (elem != NULL && elem != &p->ready_list) {
+ ready_list *tmp;
+ tmp = elem->next;
+ elem->next = NULL;
+ elem->prev = NULL;
+ elem->subchannel = NULL;
+ gpr_free(elem);
+ elem = tmp;
+ }
+ gpr_free(p);
+}
+
+void rr_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
+ round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+ pending_pick *pp;
+ size_t i;
+
+ gpr_mu_lock(&p->mu);
+
+ p->shutdown = 1;
+ while ((pp = p->pending_picks)) {
+ p->pending_picks = pp->next;
+ *pp->target = NULL;
+ grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, false, NULL);
+ gpr_free(pp);
+ }
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_FATAL_FAILURE, "shutdown");
+ for (i = 0; i < p->num_subchannels; i++) {
+ subchannel_data *sd = p->subchannels[i];
+ grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL, NULL,
+ &sd->connectivity_changed_closure);
+ }
+ gpr_mu_unlock(&p->mu);
+}
+
+static void rr_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ grpc_connected_subchannel **target) {
+ round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+ pending_pick *pp;
+ gpr_mu_lock(&p->mu);
+ pp = p->pending_picks;
+ p->pending_picks = NULL;
+ while (pp != NULL) {
+ pending_pick *next = pp->next;
+ if (pp->target == target) {
+ grpc_pollset_set_del_pollset(exec_ctx, p->base.interested_parties,
+ pp->pollset);
+ *target = NULL;
+ grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, false, NULL);
+ gpr_free(pp);
+ } else {
+ pp->next = p->pending_picks;
+ p->pending_picks = pp;
+ }
+ pp = next;
+ }
+ gpr_mu_unlock(&p->mu);
+}
+
+static void start_picking(grpc_exec_ctx *exec_ctx, round_robin_lb_policy *p) {
+ size_t i;
+ p->started_picking = 1;
+
+ gpr_log(GPR_DEBUG, "LB_POLICY: p=%p num_subchannels=%d", p,
+ p->num_subchannels);
+
+ for (i = 0; i < p->num_subchannels; i++) {
+ subchannel_data *sd = p->subchannels[i];
+ sd->connectivity_state = GRPC_CHANNEL_IDLE;
+ grpc_subchannel_notify_on_state_change(
+ exec_ctx, sd->subchannel, p->base.interested_parties,
+ &sd->connectivity_state, &sd->connectivity_changed_closure);
+ GRPC_LB_POLICY_WEAK_REF(&p->base, "round_robin_connectivity");
+ }
+}
+
+void rr_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
+ round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+ gpr_mu_lock(&p->mu);
+ if (!p->started_picking) {
+ start_picking(exec_ctx, p);
+ }
+ gpr_mu_unlock(&p->mu);
+}
+
+int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_pollset *pollset,
+ grpc_metadata_batch *initial_metadata,
+ grpc_connected_subchannel **target, grpc_closure *on_complete) {
+ round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+ pending_pick *pp;
+ ready_list *selected;
+ gpr_mu_lock(&p->mu);
+ if ((selected = peek_next_connected_locked(p))) {
+ gpr_mu_unlock(&p->mu);
+ *target = grpc_subchannel_get_connected_subchannel(selected->subchannel);
+ if (grpc_lb_round_robin_trace) {
+ gpr_log(GPR_DEBUG,
+ "[RR PICK] TARGET <-- CONNECTED SUBCHANNEL %p (NODE %p)",
+ selected->subchannel, selected);
+ }
+ /* only advance the last picked pointer if the selection was used */
+ advance_last_picked_locked(p);
+ return 1;
+ } else {
+ if (!p->started_picking) {
+ start_picking(exec_ctx, p);
+ }
+ grpc_pollset_set_add_pollset(exec_ctx, p->base.interested_parties, pollset);
+ pp = gpr_malloc(sizeof(*pp));
+ pp->next = p->pending_picks;
+ pp->pollset = pollset;
+ pp->target = target;
+ pp->on_complete = on_complete;
+ p->pending_picks = pp;
+ gpr_mu_unlock(&p->mu);
+ return 0;
+ }
+}
+
+static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
+ bool iomgr_success) {
+ subchannel_data *sd = arg;
+ round_robin_lb_policy *p = sd->policy;
+ pending_pick *pp;
+ ready_list *selected;
+
+ int unref = 0;
+
+ gpr_mu_lock(&p->mu);
+
+ if (p->shutdown) {
+ unref = 1;
+ } else {
+ switch (sd->connectivity_state) {
+ case GRPC_CHANNEL_READY:
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_READY, "connecting_ready");
+ /* add the newly connected subchannel to the list of connected ones.
+ * Note that it goes to the "end of the line". */
+ sd->ready_list_node = add_connected_sc_locked(p, sd->subchannel);
+ /* at this point we know there's at least one suitable subchannel. Go
+ * ahead and pick one and notify the pending suitors in
+ * p->pending_picks. This preemtively replicates rr_pick()'s actions. */
+ selected = peek_next_connected_locked(p);
+ if (p->pending_picks != NULL) {
+ /* if the selected subchannel is going to be used for the pending
+ * picks, update the last picked pointer */
+ advance_last_picked_locked(p);
+ }
+ while ((pp = p->pending_picks)) {
+ p->pending_picks = pp->next;
+ *pp->target =
+ grpc_subchannel_get_connected_subchannel(selected->subchannel);
+ if (grpc_lb_round_robin_trace) {
+ gpr_log(GPR_DEBUG,
+ "[RR CONN CHANGED] TARGET <-- SUBCHANNEL %p (NODE %p)",
+ selected->subchannel, selected);
+ }
+ grpc_pollset_set_del_pollset(exec_ctx, p->base.interested_parties,
+ pp->pollset);
+ grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, true, NULL);
+ gpr_free(pp);
+ }
+ grpc_subchannel_notify_on_state_change(
+ exec_ctx, sd->subchannel, p->base.interested_parties,
+ &sd->connectivity_state, &sd->connectivity_changed_closure);
+ break;
+ case GRPC_CHANNEL_CONNECTING:
+ case GRPC_CHANNEL_IDLE:
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ sd->connectivity_state,
+ "connecting_changed");
+ grpc_subchannel_notify_on_state_change(
+ exec_ctx, sd->subchannel, p->base.interested_parties,
+ &sd->connectivity_state, &sd->connectivity_changed_closure);
+ break;
+ case GRPC_CHANNEL_TRANSIENT_FAILURE:
+ /* renew state notification */
+ grpc_subchannel_notify_on_state_change(
+ exec_ctx, sd->subchannel, p->base.interested_parties,
+ &sd->connectivity_state, &sd->connectivity_changed_closure);
+
+ /* remove from ready list if still present */
+ if (sd->ready_list_node != NULL) {
+ remove_disconnected_sc_locked(p, sd->ready_list_node);
+ sd->ready_list_node = NULL;
+ }
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_TRANSIENT_FAILURE,
+ "connecting_transient_failure");
+ break;
+ case GRPC_CHANNEL_FATAL_FAILURE:
+ if (sd->ready_list_node != NULL) {
+ remove_disconnected_sc_locked(p, sd->ready_list_node);
+ sd->ready_list_node = NULL;
+ }
+
+ p->num_subchannels--;
+ GPR_SWAP(subchannel_data *, p->subchannels[sd->index],
+ p->subchannels[p->num_subchannels]);
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "round_robin");
+ p->subchannels[sd->index]->index = sd->index;
+ gpr_free(sd);
+
+ unref = 1;
+ if (p->num_subchannels == 0) {
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_FATAL_FAILURE,
+ "no_more_channels");
+ while ((pp = p->pending_picks)) {
+ p->pending_picks = pp->next;
+ *pp->target = NULL;
+ grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, true, NULL);
+ gpr_free(pp);
+ }
+ } else {
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_TRANSIENT_FAILURE,
+ "subchannel_failed");
+ }
+ } /* switch */
+ } /* !unref */
+
+ gpr_mu_unlock(&p->mu);
+
+ if (unref) {
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "round_robin_connectivity");
+ }
+}
+
+static grpc_connectivity_state rr_check_connectivity(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *pol) {
+ round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+ grpc_connectivity_state st;
+ gpr_mu_lock(&p->mu);
+ st = grpc_connectivity_state_check(&p->state_tracker);
+ gpr_mu_unlock(&p->mu);
+ return st;
+}
+
+static void rr_notify_on_state_change(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *pol,
+ grpc_connectivity_state *current,
+ grpc_closure *notify) {
+ round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+ gpr_mu_lock(&p->mu);
+ grpc_connectivity_state_notify_on_state_change(exec_ctx, &p->state_tracker,
+ current, notify);
+ gpr_mu_unlock(&p->mu);
+}
+
+static void rr_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ grpc_closure *closure) {
+ round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+ ready_list *selected;
+ grpc_connected_subchannel *target;
+ gpr_mu_lock(&p->mu);
+ if ((selected = peek_next_connected_locked(p))) {
+ gpr_mu_unlock(&p->mu);
+ target = grpc_subchannel_get_connected_subchannel(selected->subchannel);
+ grpc_connected_subchannel_ping(exec_ctx, target, closure);
+ } else {
+ gpr_mu_unlock(&p->mu);
+ grpc_exec_ctx_enqueue(exec_ctx, closure, false, NULL);
+ }
+}
+
+static const grpc_lb_policy_vtable round_robin_lb_policy_vtable = {
+ rr_destroy, rr_shutdown, rr_pick, rr_cancel_pick, rr_ping_one, rr_exit_idle,
+ rr_check_connectivity, rr_notify_on_state_change};
+
+static void round_robin_factory_ref(grpc_lb_policy_factory *factory) {}
+
+static void round_robin_factory_unref(grpc_lb_policy_factory *factory) {}
+
+static grpc_lb_policy *create_round_robin(grpc_lb_policy_factory *factory,
+ grpc_lb_policy_args *args) {
+ size_t i;
+ round_robin_lb_policy *p = gpr_malloc(sizeof(*p));
+ GPR_ASSERT(args->num_subchannels > 0);
+ memset(p, 0, sizeof(*p));
+ grpc_lb_policy_init(&p->base, &round_robin_lb_policy_vtable);
+ p->num_subchannels = args->num_subchannels;
+ p->subchannels = gpr_malloc(sizeof(*p->subchannels) * p->num_subchannels);
+ memset(p->subchannels, 0, sizeof(*p->subchannels) * p->num_subchannels);
+ grpc_connectivity_state_init(&p->state_tracker, GRPC_CHANNEL_IDLE,
+ "round_robin");
+
+ gpr_mu_init(&p->mu);
+ for (i = 0; i < args->num_subchannels; i++) {
+ subchannel_data *sd = gpr_malloc(sizeof(*sd));
+ memset(sd, 0, sizeof(*sd));
+ p->subchannels[i] = sd;
+ sd->policy = p;
+ sd->index = i;
+ sd->subchannel = args->subchannels[i];
+ grpc_closure_init(&sd->connectivity_changed_closure,
+ rr_connectivity_changed, sd);
+ }
+
+ /* The (dummy node) root of the ready list */
+ p->ready_list.subchannel = NULL;
+ p->ready_list.prev = NULL;
+ p->ready_list.next = NULL;
+ p->ready_list_last_pick = &p->ready_list;
+
+ return &p->base;
+}
+
+static const grpc_lb_policy_factory_vtable round_robin_factory_vtable = {
+ round_robin_factory_ref, round_robin_factory_unref, create_round_robin,
+ "round_robin"};
+
+static grpc_lb_policy_factory round_robin_lb_policy_factory = {
+ &round_robin_factory_vtable};
+
+grpc_lb_policy_factory *grpc_round_robin_lb_factory_create() {
+ return &round_robin_lb_policy_factory;
+}
diff --git a/src/core/client_config/lb_policies/round_robin.h b/src/core/client_config/lb_policies/round_robin.h
new file mode 100644
index 0000000000..7e6f1769e4
--- /dev/null
+++ b/src/core/client_config/lb_policies/round_robin.h
@@ -0,0 +1,46 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_CLIENT_CONFIG_LB_POLICIES_ROUND_ROBIN_H
+#define GRPC_CORE_CLIENT_CONFIG_LB_POLICIES_ROUND_ROBIN_H
+
+#include "src/core/client_config/lb_policy.h"
+
+extern int grpc_lb_round_robin_trace;
+
+#include "src/core/client_config/lb_policy_factory.h"
+
+/** Returns a load balancing factory for the round robin policy */
+grpc_lb_policy_factory *grpc_round_robin_lb_factory_create();
+
+#endif /* GRPC_CORE_CLIENT_CONFIG_LB_POLICIES_ROUND_ROBIN_H */
diff --git a/src/core/client_config/lb_policy.c b/src/core/client_config/lb_policy.c
new file mode 100644
index 0000000000..0d8b007336
--- /dev/null
+++ b/src/core/client_config/lb_policy.c
@@ -0,0 +1,134 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/client_config/lb_policy.h"
+
+#define WEAK_REF_BITS 16
+
+void grpc_lb_policy_init(grpc_lb_policy *policy,
+ const grpc_lb_policy_vtable *vtable) {
+ policy->vtable = vtable;
+ gpr_atm_no_barrier_store(&policy->ref_pair, 1 << WEAK_REF_BITS);
+ policy->interested_parties = grpc_pollset_set_create();
+}
+
+#ifdef GRPC_LB_POLICY_REFCOUNT_DEBUG
+#define REF_FUNC_EXTRA_ARGS , const char *file, int line, const char *reason
+#define REF_MUTATE_EXTRA_ARGS REF_FUNC_EXTRA_ARGS, const char *purpose
+#define REF_FUNC_PASS_ARGS(new_reason) , file, line, new_reason
+#define REF_MUTATE_PASS_ARGS(purpose) , file, line, reason, purpose
+#else
+#define REF_FUNC_EXTRA_ARGS
+#define REF_MUTATE_EXTRA_ARGS
+#define REF_FUNC_PASS_ARGS(new_reason)
+#define REF_MUTATE_PASS_ARGS(x)
+#endif
+
+static gpr_atm ref_mutate(grpc_lb_policy *c, gpr_atm delta,
+ int barrier REF_MUTATE_EXTRA_ARGS) {
+ gpr_atm old_val = barrier ? gpr_atm_full_fetch_add(&c->ref_pair, delta)
+ : gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta);
+#ifdef GRPC_LB_POLICY_REFCOUNT_DEBUG
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "LB_POLICY: %p % 12s 0x%08x -> 0x%08x [%s]", c, purpose, old_val,
+ old_val + delta, reason);
+#endif
+ return old_val;
+}
+
+void grpc_lb_policy_ref(grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
+ ref_mutate(policy, 1 << WEAK_REF_BITS, 0 REF_MUTATE_PASS_ARGS("STRONG_REF"));
+}
+
+void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
+ gpr_atm old_val =
+ ref_mutate(policy, (gpr_atm)1 - (gpr_atm)(1 << WEAK_REF_BITS),
+ 1 REF_MUTATE_PASS_ARGS("STRONG_UNREF"));
+ gpr_atm mask = ~(gpr_atm)((1 << WEAK_REF_BITS) - 1);
+ gpr_atm check = 1 << WEAK_REF_BITS;
+ if ((old_val & mask) == check) {
+ policy->vtable->shutdown(exec_ctx, policy);
+ }
+ grpc_lb_policy_weak_unref(exec_ctx,
+ policy REF_FUNC_PASS_ARGS("strong-unref"));
+}
+
+void grpc_lb_policy_weak_ref(grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
+ ref_mutate(policy, 1, 0 REF_MUTATE_PASS_ARGS("WEAK_REF"));
+}
+
+void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
+ gpr_atm old_val =
+ ref_mutate(policy, -(gpr_atm)1, 1 REF_MUTATE_PASS_ARGS("WEAK_UNREF"));
+ if (old_val == 1) {
+ grpc_pollset_set_destroy(policy->interested_parties);
+ policy->vtable->destroy(exec_ctx, policy);
+ }
+}
+
+int grpc_lb_policy_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ grpc_pollset *pollset,
+ grpc_metadata_batch *initial_metadata,
+ grpc_connected_subchannel **target,
+ grpc_closure *on_complete) {
+ return policy->vtable->pick(exec_ctx, policy, pollset, initial_metadata,
+ target, on_complete);
+}
+
+void grpc_lb_policy_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ grpc_connected_subchannel **target) {
+ policy->vtable->cancel_pick(exec_ctx, policy, target);
+}
+
+void grpc_lb_policy_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy) {
+ policy->vtable->exit_idle(exec_ctx, policy);
+}
+
+void grpc_lb_policy_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ grpc_closure *closure) {
+ policy->vtable->ping_one(exec_ctx, policy, closure);
+}
+
+void grpc_lb_policy_notify_on_state_change(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *policy,
+ grpc_connectivity_state *state,
+ grpc_closure *closure) {
+ policy->vtable->notify_on_state_change(exec_ctx, policy, state, closure);
+}
+
+grpc_connectivity_state grpc_lb_policy_check_connectivity(
+ grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy) {
+ return policy->vtable->check_connectivity(exec_ctx, policy);
+}
diff --git a/src/core/client_config/lb_policy.h b/src/core/client_config/lb_policy.h
new file mode 100644
index 0000000000..ffebc2a69c
--- /dev/null
+++ b/src/core/client_config/lb_policy.h
@@ -0,0 +1,144 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_CLIENT_CONFIG_LB_POLICY_H
+#define GRPC_CORE_CLIENT_CONFIG_LB_POLICY_H
+
+#include "src/core/client_config/subchannel.h"
+#include "src/core/transport/connectivity_state.h"
+
+/** A load balancing policy: specified by a vtable and a struct (which
+ is expected to be extended to contain some parameters) */
+typedef struct grpc_lb_policy grpc_lb_policy;
+typedef struct grpc_lb_policy_vtable grpc_lb_policy_vtable;
+
+typedef void (*grpc_lb_completion)(void *cb_arg, grpc_subchannel *subchannel,
+ grpc_status_code status, const char *errmsg);
+
+struct grpc_lb_policy {
+ const grpc_lb_policy_vtable *vtable;
+ gpr_atm ref_pair;
+ /* owned pointer to interested parties in load balancing decisions */
+ grpc_pollset_set *interested_parties;
+};
+
+struct grpc_lb_policy_vtable {
+ void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
+
+ void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
+
+ /** implement grpc_lb_policy_pick */
+ int (*pick)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ grpc_pollset *pollset, grpc_metadata_batch *initial_metadata,
+ grpc_connected_subchannel **target, grpc_closure *on_complete);
+ void (*cancel_pick)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ grpc_connected_subchannel **target);
+
+ void (*ping_one)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ grpc_closure *closure);
+
+ /** try to enter a READY connectivity state */
+ void (*exit_idle)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
+
+ /** check the current connectivity of the lb_policy */
+ grpc_connectivity_state (*check_connectivity)(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *policy);
+
+ /** call notify when the connectivity state of a channel changes from *state.
+ Updates *state with the new state of the policy */
+ void (*notify_on_state_change)(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *policy,
+ grpc_connectivity_state *state,
+ grpc_closure *closure);
+};
+
+/*#define GRPC_LB_POLICY_REFCOUNT_DEBUG*/
+#ifdef GRPC_LB_POLICY_REFCOUNT_DEBUG
+#define GRPC_LB_POLICY_REF(p, r) \
+ grpc_lb_policy_ref((p), __FILE__, __LINE__, (r))
+#define GRPC_LB_POLICY_UNREF(exec_ctx, p, r) \
+ grpc_lb_policy_unref((exec_ctx), (p), __FILE__, __LINE__, (r))
+#define GRPC_LB_POLICY_WEAK_REF(p, r) \
+ grpc_lb_policy_weak_ref((p), __FILE__, __LINE__, (r))
+#define GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, p, r) \
+ grpc_lb_policy_weak_unref((exec_ctx), (p), __FILE__, __LINE__, (r))
+void grpc_lb_policy_ref(grpc_lb_policy *policy, const char *file, int line,
+ const char *reason);
+void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ const char *file, int line, const char *reason);
+void grpc_lb_policy_weak_ref(grpc_lb_policy *policy, const char *file, int line,
+ const char *reason);
+void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ const char *file, int line, const char *reason);
+#else
+#define GRPC_LB_POLICY_REF(p, r) grpc_lb_policy_ref((p))
+#define GRPC_LB_POLICY_UNREF(cl, p, r) grpc_lb_policy_unref((cl), (p))
+#define GRPC_LB_POLICY_WEAK_REF(p, r) grpc_lb_policy_weak_ref((p))
+#define GRPC_LB_POLICY_WEAK_UNREF(cl, p, r) grpc_lb_policy_weak_unref((cl), (p))
+void grpc_lb_policy_ref(grpc_lb_policy *policy);
+void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
+void grpc_lb_policy_weak_ref(grpc_lb_policy *policy);
+void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
+#endif
+
+/** called by concrete implementations to initialize the base struct */
+void grpc_lb_policy_init(grpc_lb_policy *policy,
+ const grpc_lb_policy_vtable *vtable);
+
+/** Given initial metadata in \a initial_metadata, find an appropriate
+ target for this rpc, and 'return' it by calling \a on_complete after setting
+ \a target.
+ Picking can be asynchronous. Any IO should be done under \a pollset. */
+int grpc_lb_policy_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ grpc_pollset *pollset,
+ grpc_metadata_batch *initial_metadata,
+ grpc_connected_subchannel **target,
+ grpc_closure *on_complete);
+
+void grpc_lb_policy_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ grpc_closure *closure);
+
+void grpc_lb_policy_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ grpc_connected_subchannel **target);
+
+void grpc_lb_policy_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
+
+void grpc_lb_policy_notify_on_state_change(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *policy,
+ grpc_connectivity_state *state,
+ grpc_closure *closure);
+
+grpc_connectivity_state grpc_lb_policy_check_connectivity(
+ grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
+
+#endif /* GRPC_CORE_CLIENT_CONFIG_LB_POLICY_H */
diff --git a/src/core/channel/noop_filter.h b/src/core/client_config/lb_policy_factory.c
index 96463e5322..e49de544e3 100644
--- a/src/core/channel/noop_filter.h
+++ b/src/core/client_config/lb_policy_factory.c
@@ -31,14 +31,18 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_CHANNEL_NOOP_FILTER_H
-#define GRPC_INTERNAL_CORE_CHANNEL_NOOP_FILTER_H
+#include "src/core/client_config/lb_policy_factory.h"
-#include "src/core/channel/channel_stack.h"
+void grpc_lb_policy_factory_ref(grpc_lb_policy_factory* factory) {
+ factory->vtable->ref(factory);
+}
-/* No-op filter: simply takes everything it's given, and passes it on to the
- next filter. Exists simply as a starting point that others can take and
- customize for their own filters */
-extern const grpc_channel_filter grpc_no_op_filter;
+void grpc_lb_policy_factory_unref(grpc_lb_policy_factory* factory) {
+ factory->vtable->unref(factory);
+}
-#endif /* GRPC_INTERNAL_CORE_CHANNEL_NOOP_FILTER_H */
+grpc_lb_policy* grpc_lb_policy_factory_create_lb_policy(
+ grpc_lb_policy_factory* factory, grpc_lb_policy_args* args) {
+ if (factory == NULL) return NULL;
+ return factory->vtable->create_lb_policy(factory, args);
+}
diff --git a/src/core/client_config/lb_policy_factory.h b/src/core/client_config/lb_policy_factory.h
new file mode 100644
index 0000000000..842ba96098
--- /dev/null
+++ b/src/core/client_config/lb_policy_factory.h
@@ -0,0 +1,73 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_CLIENT_CONFIG_LB_POLICY_FACTORY_H
+#define GRPC_CORE_CLIENT_CONFIG_LB_POLICY_FACTORY_H
+
+#include "src/core/client_config/lb_policy.h"
+#include "src/core/client_config/subchannel.h"
+
+typedef struct grpc_lb_policy_factory grpc_lb_policy_factory;
+typedef struct grpc_lb_policy_factory_vtable grpc_lb_policy_factory_vtable;
+
+/** grpc_lb_policy provides grpc_client_config objects to grpc_channel
+ objects */
+struct grpc_lb_policy_factory {
+ const grpc_lb_policy_factory_vtable *vtable;
+};
+
+typedef struct grpc_lb_policy_args {
+ grpc_subchannel **subchannels;
+ size_t num_subchannels;
+} grpc_lb_policy_args;
+
+struct grpc_lb_policy_factory_vtable {
+ void (*ref)(grpc_lb_policy_factory *factory);
+ void (*unref)(grpc_lb_policy_factory *factory);
+
+ /** Implementation of grpc_lb_policy_factory_create_lb_policy */
+ grpc_lb_policy *(*create_lb_policy)(grpc_lb_policy_factory *factory,
+ grpc_lb_policy_args *args);
+
+ /** Name for the LB policy this factory implements */
+ const char *name;
+};
+
+void grpc_lb_policy_factory_ref(grpc_lb_policy_factory *factory);
+void grpc_lb_policy_factory_unref(grpc_lb_policy_factory *factory);
+
+/** Create a lb_policy instance. */
+grpc_lb_policy *grpc_lb_policy_factory_create_lb_policy(
+ grpc_lb_policy_factory *factory, grpc_lb_policy_args *args);
+
+#endif /* GRPC_CORE_CLIENT_CONFIG_LB_POLICY_FACTORY_H */
diff --git a/src/core/client_config/lb_policy_registry.c b/src/core/client_config/lb_policy_registry.c
new file mode 100644
index 0000000000..fc302e82d7
--- /dev/null
+++ b/src/core/client_config/lb_policy_registry.c
@@ -0,0 +1,88 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/client_config/lb_policy_registry.h"
+
+#include <string.h>
+
+#define MAX_POLICIES 10
+
+static grpc_lb_policy_factory *g_all_of_the_lb_policies[MAX_POLICIES];
+static int g_number_of_lb_policies = 0;
+
+static grpc_lb_policy_factory *g_default_lb_policy_factory;
+
+void grpc_lb_policy_registry_init(grpc_lb_policy_factory *default_factory) {
+ g_number_of_lb_policies = 0;
+ g_default_lb_policy_factory = default_factory;
+}
+
+void grpc_lb_policy_registry_shutdown(void) {
+ int i;
+ for (i = 0; i < g_number_of_lb_policies; i++) {
+ grpc_lb_policy_factory_unref(g_all_of_the_lb_policies[i]);
+ }
+}
+
+void grpc_register_lb_policy(grpc_lb_policy_factory *factory) {
+ int i;
+ for (i = 0; i < g_number_of_lb_policies; i++) {
+ GPR_ASSERT(0 != strcmp(factory->vtable->name,
+ g_all_of_the_lb_policies[i]->vtable->name));
+ }
+ GPR_ASSERT(g_number_of_lb_policies != MAX_POLICIES);
+ grpc_lb_policy_factory_ref(factory);
+ g_all_of_the_lb_policies[g_number_of_lb_policies++] = factory;
+}
+
+static grpc_lb_policy_factory *lookup_factory(const char *name) {
+ int i;
+
+ if (name == NULL) return NULL;
+
+ for (i = 0; i < g_number_of_lb_policies; i++) {
+ if (0 == strcmp(name, g_all_of_the_lb_policies[i]->vtable->name)) {
+ return g_all_of_the_lb_policies[i];
+ }
+ }
+
+ return NULL;
+}
+
+grpc_lb_policy *grpc_lb_policy_create(const char *name,
+ grpc_lb_policy_args *args) {
+ grpc_lb_policy_factory *factory = lookup_factory(name);
+ grpc_lb_policy *lb_policy =
+ grpc_lb_policy_factory_create_lb_policy(factory, args);
+ return lb_policy;
+}
diff --git a/src/core/surface/byte_buffer_queue.h b/src/core/client_config/lb_policy_registry.h
index 32c57f8756..f3a08a3558 100644
--- a/src/core/surface/byte_buffer_queue.h
+++ b/src/core/client_config/lb_policy_registry.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,30 +31,24 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_SURFACE_BYTE_BUFFER_QUEUE_H
-#define GRPC_INTERNAL_CORE_SURFACE_BYTE_BUFFER_QUEUE_H
+#ifndef GRPC_CORE_CLIENT_CONFIG_LB_POLICY_REGISTRY_H
+#define GRPC_CORE_CLIENT_CONFIG_LB_POLICY_REGISTRY_H
-#include <grpc/byte_buffer.h>
+#include "src/core/client_config/lb_policy_factory.h"
-/* TODO(ctiller): inline an element or two into this struct to avoid per-call
- allocations */
-typedef struct {
- grpc_byte_buffer **data;
- size_t count;
- size_t capacity;
-} grpc_bbq_array;
+/** Initialize the registry and set \a default_factory as the factory to be
+ * returned when no name is provided in a lookup */
+void grpc_lb_policy_registry_init(grpc_lb_policy_factory *default_factory);
+void grpc_lb_policy_registry_shutdown(void);
-/* should be initialized by zeroing memory */
-typedef struct {
- size_t drain_pos;
- grpc_bbq_array filling;
- grpc_bbq_array draining;
-} grpc_byte_buffer_queue;
+/** Register a LB policy factory. */
+void grpc_register_lb_policy(grpc_lb_policy_factory *factory);
-void grpc_bbq_destroy(grpc_byte_buffer_queue *q);
-grpc_byte_buffer *grpc_bbq_pop(grpc_byte_buffer_queue *q);
-void grpc_bbq_flush(grpc_byte_buffer_queue *q);
-int grpc_bbq_empty(grpc_byte_buffer_queue *q);
-void grpc_bbq_push(grpc_byte_buffer_queue *q, grpc_byte_buffer *bb);
+/** Create a \a grpc_lb_policy instance.
+ *
+ * If \a name is NULL, the default factory from \a grpc_lb_policy_registry_init
+ * will be returned. */
+grpc_lb_policy *grpc_lb_policy_create(const char *name,
+ grpc_lb_policy_args *args);
-#endif /* GRPC_INTERNAL_CORE_SURFACE_BYTE_BUFFER_QUEUE_H */
+#endif /* GRPC_CORE_CLIENT_CONFIG_LB_POLICY_REGISTRY_H */
diff --git a/src/core/client_config/resolver.c b/src/core/client_config/resolver.c
new file mode 100644
index 0000000000..eda01e72ba
--- /dev/null
+++ b/src/core/client_config/resolver.c
@@ -0,0 +1,82 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/client_config/resolver.h"
+
+void grpc_resolver_init(grpc_resolver *resolver,
+ const grpc_resolver_vtable *vtable) {
+ resolver->vtable = vtable;
+ gpr_ref_init(&resolver->refs, 1);
+}
+
+#ifdef GRPC_RESOLVER_REFCOUNT_DEBUG
+void grpc_resolver_ref(grpc_resolver *resolver, grpc_closure_list *closure_list,
+ const char *file, int line, const char *reason) {
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "RESOLVER:%p ref %d -> %d %s",
+ resolver, (int)resolver->refs.count, (int)resolver->refs.count + 1,
+ reason);
+#else
+void grpc_resolver_ref(grpc_resolver *resolver) {
+#endif
+ gpr_ref(&resolver->refs);
+}
+
+#ifdef GRPC_RESOLVER_REFCOUNT_DEBUG
+void grpc_resolver_unref(grpc_resolver *resolver,
+ grpc_closure_list *closure_list, const char *file,
+ int line, const char *reason) {
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "RESOLVER:%p unref %d -> %d %s",
+ resolver, (int)resolver->refs.count, (int)resolver->refs.count - 1,
+ reason);
+#else
+void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) {
+#endif
+ if (gpr_unref(&resolver->refs)) {
+ resolver->vtable->destroy(exec_ctx, resolver);
+ }
+}
+
+void grpc_resolver_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) {
+ resolver->vtable->shutdown(exec_ctx, resolver);
+}
+
+void grpc_resolver_channel_saw_error(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver) {
+ resolver->vtable->channel_saw_error(exec_ctx, resolver);
+}
+
+void grpc_resolver_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
+ grpc_client_config **target_config,
+ grpc_closure *on_complete) {
+ resolver->vtable->next(exec_ctx, resolver, target_config, on_complete);
+}
diff --git a/src/core/client_config/resolver.h b/src/core/client_config/resolver.h
new file mode 100644
index 0000000000..96f88fef84
--- /dev/null
+++ b/src/core/client_config/resolver.h
@@ -0,0 +1,94 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_CLIENT_CONFIG_RESOLVER_H
+#define GRPC_CORE_CLIENT_CONFIG_RESOLVER_H
+
+#include "src/core/client_config/client_config.h"
+#include "src/core/client_config/subchannel.h"
+#include "src/core/iomgr/iomgr.h"
+
+typedef struct grpc_resolver grpc_resolver;
+typedef struct grpc_resolver_vtable grpc_resolver_vtable;
+
+/** grpc_resolver provides grpc_client_config objects to grpc_channel
+ objects */
+struct grpc_resolver {
+ const grpc_resolver_vtable *vtable;
+ gpr_refcount refs;
+};
+
+struct grpc_resolver_vtable {
+ void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
+ void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
+ void (*channel_saw_error)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
+ void (*next)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
+ grpc_client_config **target_config, grpc_closure *on_complete);
+};
+
+#ifdef GRPC_RESOLVER_REFCOUNT_DEBUG
+#define GRPC_RESOLVER_REF(p, r) grpc_resolver_ref((p), __FILE__, __LINE__, (r))
+#define GRPC_RESOLVER_UNREF(cl, p, r) \
+ grpc_resolver_unref((cl), (p), __FILE__, __LINE__, (r))
+void grpc_resolver_ref(grpc_resolver *policy, const char *file, int line,
+ const char *reason);
+void grpc_resolver_unref(grpc_resolver *policy, grpc_closure_list *closure_list,
+ const char *file, int line, const char *reason);
+#else
+#define GRPC_RESOLVER_REF(p, r) grpc_resolver_ref((p))
+#define GRPC_RESOLVER_UNREF(cl, p, r) grpc_resolver_unref((cl), (p))
+void grpc_resolver_ref(grpc_resolver *policy);
+void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *policy);
+#endif
+
+void grpc_resolver_init(grpc_resolver *resolver,
+ const grpc_resolver_vtable *vtable);
+
+void grpc_resolver_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
+
+/** Notification that the channel has seen an error on some address.
+ Can be used as a hint that re-resolution is desirable soon. */
+void grpc_resolver_channel_saw_error(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver);
+
+/** Get the next client config. Called by the channel to fetch a new
+ configuration. Expected to set *target_config with a new configuration,
+ and then schedule on_complete for execution.
+
+ If resolution is fatally broken, set *target_config to NULL and
+ schedule on_complete. */
+void grpc_resolver_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
+ grpc_client_config **target_config,
+ grpc_closure *on_complete);
+
+#endif /* GRPC_CORE_CLIENT_CONFIG_RESOLVER_H */
diff --git a/src/core/client_config/resolver_factory.c b/src/core/client_config/resolver_factory.c
new file mode 100644
index 0000000000..e7e9196ac4
--- /dev/null
+++ b/src/core/client_config/resolver_factory.c
@@ -0,0 +1,55 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/client_config/resolver_factory.h"
+
+void grpc_resolver_factory_ref(grpc_resolver_factory* factory) {
+ factory->vtable->ref(factory);
+}
+
+void grpc_resolver_factory_unref(grpc_resolver_factory* factory) {
+ factory->vtable->unref(factory);
+}
+
+/** Create a resolver instance for a name */
+grpc_resolver* grpc_resolver_factory_create_resolver(
+ grpc_resolver_factory* factory, grpc_resolver_args* args) {
+ if (factory == NULL) return NULL;
+ return factory->vtable->create_resolver(factory, args);
+}
+
+char* grpc_resolver_factory_get_default_authority(
+ grpc_resolver_factory* factory, grpc_uri* uri) {
+ if (factory == NULL) return NULL;
+ return factory->vtable->get_default_authority(factory, uri);
+}
diff --git a/src/core/client_config/resolver_factory.h b/src/core/client_config/resolver_factory.h
new file mode 100644
index 0000000000..477f8db7f7
--- /dev/null
+++ b/src/core/client_config/resolver_factory.h
@@ -0,0 +1,82 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_CLIENT_CONFIG_RESOLVER_FACTORY_H
+#define GRPC_CORE_CLIENT_CONFIG_RESOLVER_FACTORY_H
+
+#include "src/core/client_config/resolver.h"
+#include "src/core/client_config/subchannel_factory.h"
+#include "src/core/client_config/uri_parser.h"
+
+typedef struct grpc_resolver_factory grpc_resolver_factory;
+typedef struct grpc_resolver_factory_vtable grpc_resolver_factory_vtable;
+
+/** grpc_resolver provides grpc_client_config objects to grpc_channel
+ objects */
+struct grpc_resolver_factory {
+ const grpc_resolver_factory_vtable *vtable;
+};
+
+typedef struct grpc_resolver_args {
+ grpc_uri *uri;
+ grpc_subchannel_factory *subchannel_factory;
+} grpc_resolver_args;
+
+struct grpc_resolver_factory_vtable {
+ void (*ref)(grpc_resolver_factory *factory);
+ void (*unref)(grpc_resolver_factory *factory);
+
+ /** Implementation of grpc_resolver_factory_create_resolver */
+ grpc_resolver *(*create_resolver)(grpc_resolver_factory *factory,
+ grpc_resolver_args *args);
+
+ /** Implementation of grpc_resolver_factory_get_default_authority */
+ char *(*get_default_authority)(grpc_resolver_factory *factory, grpc_uri *uri);
+
+ /** URI scheme that this factory implements */
+ const char *scheme;
+};
+
+void grpc_resolver_factory_ref(grpc_resolver_factory *resolver);
+void grpc_resolver_factory_unref(grpc_resolver_factory *resolver);
+
+/** Create a resolver instance for a name */
+grpc_resolver *grpc_resolver_factory_create_resolver(
+ grpc_resolver_factory *factory, grpc_resolver_args *args);
+
+/** Return a (freshly allocated with gpr_malloc) string representing
+ the default authority to use for this scheme. */
+char *grpc_resolver_factory_get_default_authority(
+ grpc_resolver_factory *factory, grpc_uri *uri);
+
+#endif /* GRPC_CORE_CLIENT_CONFIG_RESOLVER_FACTORY_H */
diff --git a/src/core/client_config/resolver_registry.c b/src/core/client_config/resolver_registry.c
new file mode 100644
index 0000000000..89a945c2d3
--- /dev/null
+++ b/src/core/client_config/resolver_registry.c
@@ -0,0 +1,137 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/client_config/resolver_registry.h"
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+
+#define MAX_RESOLVERS 10
+
+static grpc_resolver_factory *g_all_of_the_resolvers[MAX_RESOLVERS];
+static int g_number_of_resolvers = 0;
+
+static char *g_default_resolver_prefix;
+
+void grpc_resolver_registry_init(const char *default_resolver_prefix) {
+ g_number_of_resolvers = 0;
+ g_default_resolver_prefix = gpr_strdup(default_resolver_prefix);
+}
+
+void grpc_resolver_registry_shutdown(void) {
+ int i;
+ for (i = 0; i < g_number_of_resolvers; i++) {
+ grpc_resolver_factory_unref(g_all_of_the_resolvers[i]);
+ }
+ gpr_free(g_default_resolver_prefix);
+}
+
+void grpc_register_resolver_type(grpc_resolver_factory *factory) {
+ int i;
+ for (i = 0; i < g_number_of_resolvers; i++) {
+ GPR_ASSERT(0 != strcmp(factory->vtable->scheme,
+ g_all_of_the_resolvers[i]->vtable->scheme));
+ }
+ GPR_ASSERT(g_number_of_resolvers != MAX_RESOLVERS);
+ grpc_resolver_factory_ref(factory);
+ g_all_of_the_resolvers[g_number_of_resolvers++] = factory;
+}
+
+static grpc_resolver_factory *lookup_factory(grpc_uri *uri) {
+ int i;
+
+ /* handling NULL uri's here simplifies grpc_resolver_create */
+ if (!uri) return NULL;
+
+ for (i = 0; i < g_number_of_resolvers; i++) {
+ if (0 == strcmp(uri->scheme, g_all_of_the_resolvers[i]->vtable->scheme)) {
+ return g_all_of_the_resolvers[i];
+ }
+ }
+
+ return NULL;
+}
+
+static grpc_resolver_factory *resolve_factory(const char *target,
+ grpc_uri **uri) {
+ char *tmp;
+ grpc_resolver_factory *factory = NULL;
+
+ GPR_ASSERT(uri != NULL);
+ *uri = grpc_uri_parse(target, 1);
+ factory = lookup_factory(*uri);
+ if (factory == NULL) {
+ if (g_default_resolver_prefix != NULL) {
+ grpc_uri_destroy(*uri);
+ gpr_asprintf(&tmp, "%s%s", g_default_resolver_prefix, target);
+ *uri = grpc_uri_parse(tmp, 1);
+ factory = lookup_factory(*uri);
+ if (factory == NULL) {
+ grpc_uri_destroy(grpc_uri_parse(target, 0));
+ grpc_uri_destroy(grpc_uri_parse(tmp, 0));
+ gpr_log(GPR_ERROR, "don't know how to resolve '%s' or '%s'", target,
+ tmp);
+ }
+ gpr_free(tmp);
+ } else {
+ grpc_uri_destroy(grpc_uri_parse(target, 0));
+ gpr_log(GPR_ERROR, "don't know how to resolve '%s'", target);
+ }
+ }
+ return factory;
+}
+
+grpc_resolver *grpc_resolver_create(
+ const char *target, grpc_subchannel_factory *subchannel_factory) {
+ grpc_uri *uri = NULL;
+ grpc_resolver_factory *factory = resolve_factory(target, &uri);
+ grpc_resolver *resolver;
+ grpc_resolver_args args;
+ memset(&args, 0, sizeof(args));
+ args.uri = uri;
+ args.subchannel_factory = subchannel_factory;
+ resolver = grpc_resolver_factory_create_resolver(factory, &args);
+ grpc_uri_destroy(uri);
+ return resolver;
+}
+
+char *grpc_get_default_authority(const char *target) {
+ grpc_uri *uri = NULL;
+ grpc_resolver_factory *factory = resolve_factory(target, &uri);
+ char *authority = grpc_resolver_factory_get_default_authority(factory, uri);
+ grpc_uri_destroy(uri);
+ return authority;
+}
diff --git a/src/core/client_config/resolver_registry.h b/src/core/client_config/resolver_registry.h
new file mode 100644
index 0000000000..1e4cebee0b
--- /dev/null
+++ b/src/core/client_config/resolver_registry.h
@@ -0,0 +1,65 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_CLIENT_CONFIG_RESOLVER_REGISTRY_H
+#define GRPC_CORE_CLIENT_CONFIG_RESOLVER_REGISTRY_H
+
+#include "src/core/client_config/resolver_factory.h"
+
+void grpc_resolver_registry_init(const char *default_prefix);
+void grpc_resolver_registry_shutdown(void);
+
+/** Register a resolver type.
+ URI's of \a scheme will be resolved with the given resolver.
+ If \a priority is greater than zero, then the resolver will be eligible
+ to resolve names that are passed in with no scheme. Higher priority
+ resolvers will be tried before lower priority schemes. */
+void grpc_register_resolver_type(grpc_resolver_factory *factory);
+
+/** Create a resolver given \a target.
+ First tries to parse \a target as a URI. If this succeeds, tries
+ to locate a registered resolver factory based on the URI scheme.
+ If parsing or location fails, prefixes default_prefix from
+ grpc_resolver_registry_init to target, and tries again (if default_prefix
+ was not NULL).
+ If a resolver factory was found, use it to instantiate a resolver and
+ return it.
+ If a resolver factory was not found, return NULL. */
+grpc_resolver *grpc_resolver_create(
+ const char *target, grpc_subchannel_factory *subchannel_factory);
+
+/** Given a target, return a (freshly allocated with gpr_malloc) string
+ representing the default authority to pass from a client. */
+char *grpc_get_default_authority(const char *target);
+
+#endif /* GRPC_CORE_CLIENT_CONFIG_RESOLVER_REGISTRY_H */
diff --git a/src/core/client_config/resolvers/dns_resolver.c b/src/core/client_config/resolvers/dns_resolver.c
new file mode 100644
index 0000000000..e28e4757a1
--- /dev/null
+++ b/src/core/client_config/resolvers/dns_resolver.c
@@ -0,0 +1,298 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/client_config/resolvers/dns_resolver.h"
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/host_port.h>
+#include <grpc/support/string_util.h>
+
+#include "src/core/client_config/lb_policy_registry.h"
+#include "src/core/iomgr/resolve_address.h"
+#include "src/core/iomgr/timer.h"
+#include "src/core/support/string.h"
+
+typedef struct {
+ /** base class: must be first */
+ grpc_resolver base;
+ /** refcount */
+ gpr_refcount refs;
+ /** name to resolve */
+ char *name;
+ /** default port to use */
+ char *default_port;
+ /** subchannel factory */
+ grpc_subchannel_factory *subchannel_factory;
+ /** load balancing policy name */
+ char *lb_policy_name;
+
+ /** mutex guarding the rest of the state */
+ gpr_mu mu;
+ /** are we currently resolving? */
+ int resolving;
+ /** which version of resolved_config have we published? */
+ int published_version;
+ /** which version of resolved_config is current? */
+ int resolved_version;
+ /** pending next completion, or NULL */
+ grpc_closure *next_completion;
+ /** target config address for next completion */
+ grpc_client_config **target_config;
+ /** current (fully resolved) config */
+ grpc_client_config *resolved_config;
+ /** retry timer */
+ bool have_retry_timer;
+ grpc_timer retry_timer;
+} dns_resolver;
+
+static void dns_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
+
+static void dns_start_resolving_locked(dns_resolver *r);
+static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
+ dns_resolver *r);
+
+static void dns_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
+static void dns_channel_saw_error(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
+static void dns_next(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
+ grpc_client_config **target_config,
+ grpc_closure *on_complete);
+
+static const grpc_resolver_vtable dns_resolver_vtable = {
+ dns_destroy, dns_shutdown, dns_channel_saw_error, dns_next};
+
+static void dns_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) {
+ dns_resolver *r = (dns_resolver *)resolver;
+ gpr_mu_lock(&r->mu);
+ if (r->have_retry_timer) {
+ grpc_timer_cancel(exec_ctx, &r->retry_timer);
+ }
+ if (r->next_completion != NULL) {
+ *r->target_config = NULL;
+ grpc_exec_ctx_enqueue(exec_ctx, r->next_completion, true, NULL);
+ r->next_completion = NULL;
+ }
+ gpr_mu_unlock(&r->mu);
+}
+
+static void dns_channel_saw_error(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver) {
+ dns_resolver *r = (dns_resolver *)resolver;
+ gpr_mu_lock(&r->mu);
+ if (!r->resolving) {
+ dns_start_resolving_locked(r);
+ }
+ gpr_mu_unlock(&r->mu);
+}
+
+static void dns_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
+ grpc_client_config **target_config,
+ grpc_closure *on_complete) {
+ dns_resolver *r = (dns_resolver *)resolver;
+ gpr_mu_lock(&r->mu);
+ GPR_ASSERT(!r->next_completion);
+ r->next_completion = on_complete;
+ r->target_config = target_config;
+ if (r->resolved_version == 0 && !r->resolving) {
+ dns_start_resolving_locked(r);
+ } else {
+ dns_maybe_finish_next_locked(exec_ctx, r);
+ }
+ gpr_mu_unlock(&r->mu);
+}
+
+static void dns_on_retry_timer(grpc_exec_ctx *exec_ctx, void *arg,
+ bool success) {
+ dns_resolver *r = arg;
+
+ gpr_mu_lock(&r->mu);
+ r->have_retry_timer = false;
+ if (success) {
+ if (!r->resolving) {
+ dns_start_resolving_locked(r);
+ }
+ }
+ gpr_mu_unlock(&r->mu);
+
+ GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "retry-timer");
+}
+
+static void dns_on_resolved(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_resolved_addresses *addresses) {
+ dns_resolver *r = arg;
+ grpc_client_config *config = NULL;
+ grpc_subchannel **subchannels;
+ grpc_subchannel_args args;
+ grpc_lb_policy *lb_policy;
+ size_t i;
+ gpr_mu_lock(&r->mu);
+ GPR_ASSERT(r->resolving);
+ r->resolving = 0;
+ if (addresses != NULL) {
+ grpc_lb_policy_args lb_policy_args;
+ config = grpc_client_config_create();
+ subchannels = gpr_malloc(sizeof(grpc_subchannel *) * addresses->naddrs);
+ size_t naddrs = 0;
+ for (i = 0; i < addresses->naddrs; i++) {
+ memset(&args, 0, sizeof(args));
+ args.addr = (struct sockaddr *)(addresses->addrs[i].addr);
+ args.addr_len = (size_t)addresses->addrs[i].len;
+ grpc_subchannel *subchannel = grpc_subchannel_factory_create_subchannel(
+ exec_ctx, r->subchannel_factory, &args);
+ if (subchannel != NULL) {
+ subchannels[naddrs++] = subchannel;
+ }
+ }
+ memset(&lb_policy_args, 0, sizeof(lb_policy_args));
+ lb_policy_args.subchannels = subchannels;
+ lb_policy_args.num_subchannels = naddrs;
+ lb_policy = grpc_lb_policy_create(r->lb_policy_name, &lb_policy_args);
+ if (lb_policy != NULL) {
+ grpc_client_config_set_lb_policy(config, lb_policy);
+ GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "construction");
+ }
+ grpc_resolved_addresses_destroy(addresses);
+ gpr_free(subchannels);
+ } else {
+ int retry_seconds = 15;
+ gpr_log(GPR_DEBUG, "dns resolution failed: retrying in %d seconds",
+ retry_seconds);
+ GPR_ASSERT(!r->have_retry_timer);
+ r->have_retry_timer = true;
+ gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
+ GRPC_RESOLVER_REF(&r->base, "retry-timer");
+ grpc_timer_init(
+ exec_ctx, &r->retry_timer,
+ gpr_time_add(now, gpr_time_from_seconds(retry_seconds, GPR_TIMESPAN)),
+ dns_on_retry_timer, r, now);
+ }
+ if (r->resolved_config) {
+ grpc_client_config_unref(exec_ctx, r->resolved_config);
+ }
+ r->resolved_config = config;
+ r->resolved_version++;
+ dns_maybe_finish_next_locked(exec_ctx, r);
+ gpr_mu_unlock(&r->mu);
+
+ GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "dns-resolving");
+}
+
+static void dns_start_resolving_locked(dns_resolver *r) {
+ GRPC_RESOLVER_REF(&r->base, "dns-resolving");
+ GPR_ASSERT(!r->resolving);
+ r->resolving = 1;
+ grpc_resolve_address(r->name, r->default_port, dns_on_resolved, r);
+}
+
+static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
+ dns_resolver *r) {
+ if (r->next_completion != NULL &&
+ r->resolved_version != r->published_version) {
+ *r->target_config = r->resolved_config;
+ if (r->resolved_config) {
+ grpc_client_config_ref(r->resolved_config);
+ }
+ grpc_exec_ctx_enqueue(exec_ctx, r->next_completion, true, NULL);
+ r->next_completion = NULL;
+ r->published_version = r->resolved_version;
+ }
+}
+
+static void dns_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
+ dns_resolver *r = (dns_resolver *)gr;
+ gpr_mu_destroy(&r->mu);
+ if (r->resolved_config) {
+ grpc_client_config_unref(exec_ctx, r->resolved_config);
+ }
+ grpc_subchannel_factory_unref(exec_ctx, r->subchannel_factory);
+ gpr_free(r->name);
+ gpr_free(r->default_port);
+ gpr_free(r->lb_policy_name);
+ gpr_free(r);
+}
+
+static grpc_resolver *dns_create(grpc_resolver_args *args,
+ const char *default_port,
+ const char *lb_policy_name) {
+ dns_resolver *r;
+ const char *path = args->uri->path;
+
+ if (0 != strcmp(args->uri->authority, "")) {
+ gpr_log(GPR_ERROR, "authority based dns uri's not supported");
+ return NULL;
+ }
+
+ if (path[0] == '/') ++path;
+
+ r = gpr_malloc(sizeof(dns_resolver));
+ memset(r, 0, sizeof(*r));
+ gpr_ref_init(&r->refs, 1);
+ gpr_mu_init(&r->mu);
+ grpc_resolver_init(&r->base, &dns_resolver_vtable);
+ r->name = gpr_strdup(path);
+ r->default_port = gpr_strdup(default_port);
+ r->subchannel_factory = args->subchannel_factory;
+ grpc_subchannel_factory_ref(r->subchannel_factory);
+ r->lb_policy_name = gpr_strdup(lb_policy_name);
+ return &r->base;
+}
+
+/*
+ * FACTORY
+ */
+
+static void dns_factory_ref(grpc_resolver_factory *factory) {}
+
+static void dns_factory_unref(grpc_resolver_factory *factory) {}
+
+static grpc_resolver *dns_factory_create_resolver(
+ grpc_resolver_factory *factory, grpc_resolver_args *args) {
+ return dns_create(args, "https", "pick_first");
+}
+
+char *dns_factory_get_default_host_name(grpc_resolver_factory *factory,
+ grpc_uri *uri) {
+ const char *path = uri->path;
+ if (path[0] == '/') ++path;
+ return gpr_strdup(path);
+}
+
+static const grpc_resolver_factory_vtable dns_factory_vtable = {
+ dns_factory_ref, dns_factory_unref, dns_factory_create_resolver,
+ dns_factory_get_default_host_name, "dns"};
+static grpc_resolver_factory dns_resolver_factory = {&dns_factory_vtable};
+
+grpc_resolver_factory *grpc_dns_resolver_factory_create() {
+ return &dns_resolver_factory;
+}
diff --git a/src/core/client_config/resolvers/dns_resolver.h b/src/core/client_config/resolvers/dns_resolver.h
new file mode 100644
index 0000000000..b24280b507
--- /dev/null
+++ b/src/core/client_config/resolvers/dns_resolver.h
@@ -0,0 +1,42 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_CLIENT_CONFIG_RESOLVERS_DNS_RESOLVER_H
+#define GRPC_CORE_CLIENT_CONFIG_RESOLVERS_DNS_RESOLVER_H
+
+#include "src/core/client_config/resolver_factory.h"
+
+/** Create a dns resolver factory */
+grpc_resolver_factory *grpc_dns_resolver_factory_create(void);
+
+#endif /* GRPC_CORE_CLIENT_CONFIG_RESOLVERS_DNS_RESOLVER_H */
diff --git a/src/core/client_config/resolvers/sockaddr_resolver.c b/src/core/client_config/resolvers/sockaddr_resolver.c
new file mode 100644
index 0000000000..68910ad975
--- /dev/null
+++ b/src/core/client_config/resolvers/sockaddr_resolver.c
@@ -0,0 +1,391 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/client_config/resolvers/sockaddr_resolver.h"
+
+#include <stdio.h>
+#include <string.h>
+#ifdef GPR_POSIX_SOCKET
+#include <sys/un.h>
+#endif
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/host_port.h>
+#include <grpc/support/string_util.h>
+
+#include "src/core/client_config/lb_policy_registry.h"
+#include "src/core/iomgr/resolve_address.h"
+#include "src/core/support/string.h"
+
+typedef struct {
+ /** base class: must be first */
+ grpc_resolver base;
+ /** refcount */
+ gpr_refcount refs;
+ /** subchannel factory */
+ grpc_subchannel_factory *subchannel_factory;
+ /** load balancing policy name */
+ char *lb_policy_name;
+
+ /** the addresses that we've 'resolved' */
+ struct sockaddr_storage *addrs;
+ /** the corresponding length of the addresses */
+ size_t *addrs_len;
+ /** how many elements in \a addrs */
+ size_t num_addrs;
+
+ /** mutex guarding the rest of the state */
+ gpr_mu mu;
+ /** have we published? */
+ int published;
+ /** pending next completion, or NULL */
+ grpc_closure *next_completion;
+ /** target config address for next completion */
+ grpc_client_config **target_config;
+} sockaddr_resolver;
+
+static void sockaddr_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
+
+static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
+ sockaddr_resolver *r);
+
+static void sockaddr_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
+static void sockaddr_channel_saw_error(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *r);
+static void sockaddr_next(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
+ grpc_client_config **target_config,
+ grpc_closure *on_complete);
+
+static const grpc_resolver_vtable sockaddr_resolver_vtable = {
+ sockaddr_destroy, sockaddr_shutdown, sockaddr_channel_saw_error,
+ sockaddr_next};
+
+static void sockaddr_shutdown(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver) {
+ sockaddr_resolver *r = (sockaddr_resolver *)resolver;
+ gpr_mu_lock(&r->mu);
+ if (r->next_completion != NULL) {
+ *r->target_config = NULL;
+ grpc_exec_ctx_enqueue(exec_ctx, r->next_completion, true, NULL);
+ r->next_completion = NULL;
+ }
+ gpr_mu_unlock(&r->mu);
+}
+
+static void sockaddr_channel_saw_error(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver) {
+ sockaddr_resolver *r = (sockaddr_resolver *)resolver;
+ gpr_mu_lock(&r->mu);
+ r->published = 0;
+ sockaddr_maybe_finish_next_locked(exec_ctx, r);
+ gpr_mu_unlock(&r->mu);
+}
+
+static void sockaddr_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
+ grpc_client_config **target_config,
+ grpc_closure *on_complete) {
+ sockaddr_resolver *r = (sockaddr_resolver *)resolver;
+ gpr_mu_lock(&r->mu);
+ GPR_ASSERT(!r->next_completion);
+ r->next_completion = on_complete;
+ r->target_config = target_config;
+ sockaddr_maybe_finish_next_locked(exec_ctx, r);
+ gpr_mu_unlock(&r->mu);
+}
+
+static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
+ sockaddr_resolver *r) {
+ grpc_client_config *cfg;
+ grpc_lb_policy *lb_policy;
+ grpc_lb_policy_args lb_policy_args;
+ grpc_subchannel **subchannels;
+ grpc_subchannel_args args;
+
+ if (r->next_completion != NULL && !r->published) {
+ size_t i;
+ cfg = grpc_client_config_create();
+ subchannels = gpr_malloc(sizeof(grpc_subchannel *) * r->num_addrs);
+ for (i = 0; i < r->num_addrs; i++) {
+ memset(&args, 0, sizeof(args));
+ args.addr = (struct sockaddr *)&r->addrs[i];
+ args.addr_len = r->addrs_len[i];
+ subchannels[i] = grpc_subchannel_factory_create_subchannel(
+ exec_ctx, r->subchannel_factory, &args);
+ }
+ memset(&lb_policy_args, 0, sizeof(lb_policy_args));
+ lb_policy_args.subchannels = subchannels;
+ lb_policy_args.num_subchannels = r->num_addrs;
+ lb_policy = grpc_lb_policy_create(r->lb_policy_name, &lb_policy_args);
+ gpr_free(subchannels);
+ grpc_client_config_set_lb_policy(cfg, lb_policy);
+ GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "sockaddr");
+ r->published = 1;
+ *r->target_config = cfg;
+ grpc_exec_ctx_enqueue(exec_ctx, r->next_completion, true, NULL);
+ r->next_completion = NULL;
+ }
+}
+
+static void sockaddr_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
+ sockaddr_resolver *r = (sockaddr_resolver *)gr;
+ gpr_mu_destroy(&r->mu);
+ grpc_subchannel_factory_unref(exec_ctx, r->subchannel_factory);
+ gpr_free(r->addrs);
+ gpr_free(r->addrs_len);
+ gpr_free(r->lb_policy_name);
+ gpr_free(r);
+}
+
+#ifdef GPR_POSIX_SOCKET
+static int parse_unix(grpc_uri *uri, struct sockaddr_storage *addr,
+ size_t *len) {
+ struct sockaddr_un *un = (struct sockaddr_un *)addr;
+
+ un->sun_family = AF_UNIX;
+ strcpy(un->sun_path, uri->path);
+ *len = strlen(un->sun_path) + sizeof(un->sun_family) + 1;
+
+ return 1;
+}
+
+static char *unix_get_default_authority(grpc_resolver_factory *factory,
+ grpc_uri *uri) {
+ return gpr_strdup("localhost");
+}
+#endif
+
+static char *ip_get_default_authority(grpc_uri *uri) {
+ const char *path = uri->path;
+ if (path[0] == '/') ++path;
+ return gpr_strdup(path);
+}
+
+static char *ipv4_get_default_authority(grpc_resolver_factory *factory,
+ grpc_uri *uri) {
+ return ip_get_default_authority(uri);
+}
+
+static char *ipv6_get_default_authority(grpc_resolver_factory *factory,
+ grpc_uri *uri) {
+ return ip_get_default_authority(uri);
+}
+
+static int parse_ipv4(grpc_uri *uri, struct sockaddr_storage *addr,
+ size_t *len) {
+ const char *host_port = uri->path;
+ char *host;
+ char *port;
+ int port_num;
+ int result = 0;
+ struct sockaddr_in *in = (struct sockaddr_in *)addr;
+
+ if (*host_port == '/') ++host_port;
+ if (!gpr_split_host_port(host_port, &host, &port)) {
+ return 0;
+ }
+
+ memset(in, 0, sizeof(*in));
+ *len = sizeof(*in);
+ in->sin_family = AF_INET;
+ if (inet_pton(AF_INET, host, &in->sin_addr) == 0) {
+ gpr_log(GPR_ERROR, "invalid ipv4 address: '%s'", host);
+ goto done;
+ }
+
+ if (port != NULL) {
+ if (sscanf(port, "%d", &port_num) != 1 || port_num < 0 ||
+ port_num > 65535) {
+ gpr_log(GPR_ERROR, "invalid ipv4 port: '%s'", port);
+ goto done;
+ }
+ in->sin_port = htons((uint16_t)port_num);
+ } else {
+ gpr_log(GPR_ERROR, "no port given for ipv4 scheme");
+ goto done;
+ }
+
+ result = 1;
+done:
+ gpr_free(host);
+ gpr_free(port);
+ return result;
+}
+
+static int parse_ipv6(grpc_uri *uri, struct sockaddr_storage *addr,
+ size_t *len) {
+ const char *host_port = uri->path;
+ char *host;
+ char *port;
+ int port_num;
+ int result = 0;
+ struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)addr;
+
+ if (*host_port == '/') ++host_port;
+ if (!gpr_split_host_port(host_port, &host, &port)) {
+ return 0;
+ }
+
+ memset(in6, 0, sizeof(*in6));
+ *len = sizeof(*in6);
+ in6->sin6_family = AF_INET6;
+ if (inet_pton(AF_INET6, host, &in6->sin6_addr) == 0) {
+ gpr_log(GPR_ERROR, "invalid ipv6 address: '%s'", host);
+ goto done;
+ }
+
+ if (port != NULL) {
+ if (sscanf(port, "%d", &port_num) != 1 || port_num < 0 ||
+ port_num > 65535) {
+ gpr_log(GPR_ERROR, "invalid ipv6 port: '%s'", port);
+ goto done;
+ }
+ in6->sin6_port = htons((uint16_t)port_num);
+ } else {
+ gpr_log(GPR_ERROR, "no port given for ipv6 scheme");
+ goto done;
+ }
+
+ result = 1;
+done:
+ gpr_free(host);
+ gpr_free(port);
+ return result;
+}
+
+static void do_nothing(void *ignored) {}
+
+static grpc_resolver *sockaddr_create(
+ grpc_resolver_args *args, const char *default_lb_policy_name,
+ int parse(grpc_uri *uri, struct sockaddr_storage *dst, size_t *len)) {
+ size_t i;
+ int errors_found = 0; /* GPR_FALSE */
+ sockaddr_resolver *r;
+ gpr_slice path_slice;
+ gpr_slice_buffer path_parts;
+
+ if (0 != strcmp(args->uri->authority, "")) {
+ gpr_log(GPR_ERROR, "authority based uri's not supported by the %s scheme",
+ args->uri->scheme);
+ return NULL;
+ }
+
+ r = gpr_malloc(sizeof(sockaddr_resolver));
+ memset(r, 0, sizeof(*r));
+
+ r->lb_policy_name = NULL;
+ if (0 != strcmp(args->uri->query, "")) {
+ gpr_slice query_slice;
+ gpr_slice_buffer query_parts;
+
+ query_slice =
+ gpr_slice_new(args->uri->query, strlen(args->uri->query), do_nothing);
+ gpr_slice_buffer_init(&query_parts);
+ gpr_slice_split(query_slice, "=", &query_parts);
+ GPR_ASSERT(query_parts.count == 2);
+ if (0 == gpr_slice_str_cmp(query_parts.slices[0], "lb_policy")) {
+ r->lb_policy_name = gpr_dump_slice(query_parts.slices[1], GPR_DUMP_ASCII);
+ }
+ gpr_slice_buffer_destroy(&query_parts);
+ gpr_slice_unref(query_slice);
+ }
+ if (r->lb_policy_name == NULL) {
+ r->lb_policy_name = gpr_strdup(default_lb_policy_name);
+ }
+
+ path_slice =
+ gpr_slice_new(args->uri->path, strlen(args->uri->path), do_nothing);
+ gpr_slice_buffer_init(&path_parts);
+
+ gpr_slice_split(path_slice, ",", &path_parts);
+ r->num_addrs = path_parts.count;
+ r->addrs = gpr_malloc(sizeof(struct sockaddr_storage) * r->num_addrs);
+ r->addrs_len = gpr_malloc(sizeof(*r->addrs_len) * r->num_addrs);
+
+ for (i = 0; i < r->num_addrs; i++) {
+ grpc_uri ith_uri = *args->uri;
+ char *part_str = gpr_dump_slice(path_parts.slices[i], GPR_DUMP_ASCII);
+ ith_uri.path = part_str;
+ if (!parse(&ith_uri, &r->addrs[i], &r->addrs_len[i])) {
+ errors_found = 1; /* GPR_TRUE */
+ }
+ gpr_free(part_str);
+ if (errors_found) break;
+ }
+
+ gpr_slice_buffer_destroy(&path_parts);
+ gpr_slice_unref(path_slice);
+ if (errors_found) {
+ gpr_free(r->lb_policy_name);
+ gpr_free(r->addrs);
+ gpr_free(r->addrs_len);
+ gpr_free(r);
+ return NULL;
+ }
+
+ gpr_ref_init(&r->refs, 1);
+ gpr_mu_init(&r->mu);
+ grpc_resolver_init(&r->base, &sockaddr_resolver_vtable);
+ r->subchannel_factory = args->subchannel_factory;
+ grpc_subchannel_factory_ref(r->subchannel_factory);
+
+ return &r->base;
+}
+
+/*
+ * FACTORY
+ */
+
+static void sockaddr_factory_ref(grpc_resolver_factory *factory) {}
+
+static void sockaddr_factory_unref(grpc_resolver_factory *factory) {}
+
+#define DECL_FACTORY(name) \
+ static grpc_resolver *name##_factory_create_resolver( \
+ grpc_resolver_factory *factory, grpc_resolver_args *args) { \
+ return sockaddr_create(args, "pick_first", parse_##name); \
+ } \
+ static const grpc_resolver_factory_vtable name##_factory_vtable = { \
+ sockaddr_factory_ref, sockaddr_factory_unref, \
+ name##_factory_create_resolver, name##_get_default_authority, #name}; \
+ static grpc_resolver_factory name##_resolver_factory = { \
+ &name##_factory_vtable}; \
+ grpc_resolver_factory *grpc_##name##_resolver_factory_create() { \
+ return &name##_resolver_factory; \
+ }
+
+#ifdef GPR_POSIX_SOCKET
+DECL_FACTORY(unix)
+#endif
+DECL_FACTORY(ipv4) DECL_FACTORY(ipv6)
diff --git a/src/core/client_config/resolvers/sockaddr_resolver.h b/src/core/client_config/resolvers/sockaddr_resolver.h
new file mode 100644
index 0000000000..f050329431
--- /dev/null
+++ b/src/core/client_config/resolvers/sockaddr_resolver.h
@@ -0,0 +1,50 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_CLIENT_CONFIG_RESOLVERS_SOCKADDR_RESOLVER_H
+#define GRPC_CORE_CLIENT_CONFIG_RESOLVERS_SOCKADDR_RESOLVER_H
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/client_config/resolver_factory.h"
+
+grpc_resolver_factory *grpc_ipv4_resolver_factory_create(void);
+
+grpc_resolver_factory *grpc_ipv6_resolver_factory_create(void);
+
+#ifdef GPR_POSIX_SOCKET
+/** Create a unix resolver factory */
+grpc_resolver_factory *grpc_unix_resolver_factory_create(void);
+#endif
+
+#endif /* GRPC_CORE_CLIENT_CONFIG_RESOLVERS_SOCKADDR_RESOLVER_H */
diff --git a/src/core/client_config/resolvers/zookeeper_resolver.c b/src/core/client_config/resolvers/zookeeper_resolver.c
new file mode 100644
index 0000000000..166738e768
--- /dev/null
+++ b/src/core/client_config/resolvers/zookeeper_resolver.c
@@ -0,0 +1,520 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/client_config/resolvers/zookeeper_resolver.h"
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/string_util.h>
+
+#include <grpc/grpc_zookeeper.h>
+#include <zookeeper/zookeeper.h>
+
+#include "src/core/client_config/lb_policy_registry.h"
+#include "src/core/client_config/resolver_registry.h"
+#include "src/core/iomgr/resolve_address.h"
+#include "src/core/support/string.h"
+#include "src/core/surface/api_trace.h"
+#include "src/core/json/json.h"
+
+/** Zookeeper session expiration time in milliseconds */
+#define GRPC_ZOOKEEPER_SESSION_TIMEOUT 15000
+
+typedef struct {
+ /** base class: must be first */
+ grpc_resolver base;
+ /** refcount */
+ gpr_refcount refs;
+ /** name to resolve */
+ char *name;
+ /** subchannel factory */
+ grpc_subchannel_factory *subchannel_factory;
+ /** load balancing policy name */
+ char *lb_policy_name;
+
+ /** mutex guarding the rest of the state */
+ gpr_mu mu;
+ /** are we currently resolving? */
+ int resolving;
+ /** which version of resolved_config have we published? */
+ int published_version;
+ /** which version of resolved_config is current? */
+ int resolved_version;
+ /** pending next completion, or NULL */
+ grpc_closure *next_completion;
+ /** target config address for next completion */
+ grpc_client_config **target_config;
+ /** current (fully resolved) config */
+ grpc_client_config *resolved_config;
+
+ /** zookeeper handle */
+ zhandle_t *zookeeper_handle;
+ /** zookeeper resolved addresses */
+ grpc_resolved_addresses *resolved_addrs;
+ /** total number of addresses to be resolved */
+ int resolved_total;
+ /** number of addresses resolved */
+ int resolved_num;
+} zookeeper_resolver;
+
+static void zookeeper_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
+
+static void zookeeper_start_resolving_locked(zookeeper_resolver *r);
+static void zookeeper_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
+ zookeeper_resolver *r);
+
+static void zookeeper_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
+static void zookeeper_channel_saw_error(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *r);
+static void zookeeper_next(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
+ grpc_client_config **target_config,
+ grpc_closure *on_complete);
+
+static const grpc_resolver_vtable zookeeper_resolver_vtable = {
+ zookeeper_destroy, zookeeper_shutdown, zookeeper_channel_saw_error,
+ zookeeper_next};
+
+static void zookeeper_shutdown(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver) {
+ zookeeper_resolver *r = (zookeeper_resolver *)resolver;
+ grpc_closure *call = NULL;
+ gpr_mu_lock(&r->mu);
+ if (r->next_completion != NULL) {
+ *r->target_config = NULL;
+ call = r->next_completion;
+ r->next_completion = NULL;
+ }
+ zookeeper_close(r->zookeeper_handle);
+ gpr_mu_unlock(&r->mu);
+ if (call != NULL) {
+ call->cb(exec_ctx, call->cb_arg, 1);
+ }
+}
+
+static void zookeeper_channel_saw_error(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver) {
+ zookeeper_resolver *r = (zookeeper_resolver *)resolver;
+ gpr_mu_lock(&r->mu);
+ if (r->resolving == 0) {
+ zookeeper_start_resolving_locked(r);
+ }
+ gpr_mu_unlock(&r->mu);
+}
+
+static void zookeeper_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
+ grpc_client_config **target_config,
+ grpc_closure *on_complete) {
+ zookeeper_resolver *r = (zookeeper_resolver *)resolver;
+ gpr_mu_lock(&r->mu);
+ GPR_ASSERT(r->next_completion == NULL);
+ r->next_completion = on_complete;
+ r->target_config = target_config;
+ if (r->resolved_version == 0 && r->resolving == 0) {
+ zookeeper_start_resolving_locked(r);
+ } else {
+ zookeeper_maybe_finish_next_locked(exec_ctx, r);
+ }
+ gpr_mu_unlock(&r->mu);
+}
+
+/** Zookeeper global watcher for connection management
+ TODO: better connection management besides logs */
+static void zookeeper_global_watcher(zhandle_t *zookeeper_handle, int type,
+ int state, const char *path,
+ void *watcher_ctx) {
+ if (type == ZOO_SESSION_EVENT) {
+ if (state == ZOO_EXPIRED_SESSION_STATE) {
+ gpr_log(GPR_ERROR, "Zookeeper session expired");
+ } else if (state == ZOO_AUTH_FAILED_STATE) {
+ gpr_log(GPR_ERROR, "Zookeeper authentication failed");
+ }
+ }
+}
+
+/** Zookeeper watcher triggered by changes to watched nodes
+ Once triggered, it tries to resolve again to get updated addresses */
+static void zookeeper_watcher(zhandle_t *zookeeper_handle, int type, int state,
+ const char *path, void *watcher_ctx) {
+ if (watcher_ctx != NULL) {
+ zookeeper_resolver *r = (zookeeper_resolver *)watcher_ctx;
+ if (state == ZOO_CONNECTED_STATE) {
+ gpr_mu_lock(&r->mu);
+ if (r->resolving == 0) {
+ zookeeper_start_resolving_locked(r);
+ }
+ gpr_mu_unlock(&r->mu);
+ }
+ }
+}
+
+/** Callback function after getting all resolved addresses
+ Creates a subchannel for each address */
+static void zookeeper_on_resolved(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_resolved_addresses *addresses) {
+ zookeeper_resolver *r = arg;
+ grpc_client_config *config = NULL;
+ grpc_subchannel **subchannels;
+ grpc_subchannel_args args;
+ grpc_lb_policy *lb_policy;
+ size_t i;
+ if (addresses != NULL) {
+ grpc_lb_policy_args lb_policy_args;
+ config = grpc_client_config_create();
+ subchannels = gpr_malloc(sizeof(grpc_subchannel *) * addresses->naddrs);
+ for (i = 0; i < addresses->naddrs; i++) {
+ memset(&args, 0, sizeof(args));
+ args.addr = (struct sockaddr *)(addresses->addrs[i].addr);
+ args.addr_len = addresses->addrs[i].len;
+ subchannels[i] = grpc_subchannel_factory_create_subchannel(
+ exec_ctx, r->subchannel_factory, &args);
+ }
+ lb_policy_args.subchannels = subchannels;
+ lb_policy_args.num_subchannels = addresses->naddrs;
+ lb_policy = grpc_lb_policy_create(r->lb_policy_name, &lb_policy_args);
+ grpc_client_config_set_lb_policy(config, lb_policy);
+ GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "construction");
+ grpc_resolved_addresses_destroy(addresses);
+ gpr_free(subchannels);
+ }
+ gpr_mu_lock(&r->mu);
+ GPR_ASSERT(r->resolving == 1);
+ r->resolving = 0;
+ if (r->resolved_config != NULL) {
+ grpc_client_config_unref(exec_ctx, r->resolved_config);
+ }
+ r->resolved_config = config;
+ r->resolved_version++;
+ zookeeper_maybe_finish_next_locked(exec_ctx, r);
+ gpr_mu_unlock(&r->mu);
+
+ GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "zookeeper-resolving");
+}
+
+/** Callback function for each DNS resolved address */
+static void zookeeper_dns_resolved(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_resolved_addresses *addresses) {
+ size_t i;
+ zookeeper_resolver *r = arg;
+ int resolve_done = 0;
+
+ gpr_mu_lock(&r->mu);
+ r->resolved_num++;
+ r->resolved_addrs->addrs =
+ gpr_realloc(r->resolved_addrs->addrs,
+ sizeof(grpc_resolved_address) *
+ (r->resolved_addrs->naddrs + addresses->naddrs));
+ for (i = 0; i < addresses->naddrs; i++) {
+ memcpy(r->resolved_addrs->addrs[i + r->resolved_addrs->naddrs].addr,
+ addresses->addrs[i].addr, addresses->addrs[i].len);
+ r->resolved_addrs->addrs[i + r->resolved_addrs->naddrs].len =
+ addresses->addrs[i].len;
+ }
+
+ r->resolved_addrs->naddrs += addresses->naddrs;
+ grpc_resolved_addresses_destroy(addresses);
+
+ /** Wait for all addresses to be resolved */
+ resolve_done = (r->resolved_num == r->resolved_total);
+ gpr_mu_unlock(&r->mu);
+ if (resolve_done) {
+ zookeeper_on_resolved(exec_ctx, r, r->resolved_addrs);
+ }
+}
+
+/** Parses JSON format address of a zookeeper node */
+static char *zookeeper_parse_address(const char *value, size_t value_len) {
+ grpc_json *json;
+ grpc_json *cur;
+ const char *host;
+ const char *port;
+ char *buffer;
+ char *address = NULL;
+
+ buffer = gpr_malloc(value_len);
+ memcpy(buffer, value, value_len);
+ json = grpc_json_parse_string_with_len(buffer, value_len);
+ if (json != NULL) {
+ host = NULL;
+ port = NULL;
+ for (cur = json->child; cur != NULL; cur = cur->next) {
+ if (!strcmp(cur->key, "host")) {
+ host = cur->value;
+ if (port != NULL) {
+ break;
+ }
+ } else if (!strcmp(cur->key, "port")) {
+ port = cur->value;
+ if (host != NULL) {
+ break;
+ }
+ }
+ }
+ if (host != NULL && port != NULL) {
+ gpr_asprintf(&address, "%s:%s", host, port);
+ }
+ grpc_json_destroy(json);
+ }
+ gpr_free(buffer);
+
+ return address;
+}
+
+static void zookeeper_get_children_node_completion(int rc, const char *value,
+ int value_len,
+ const struct Stat *stat,
+ const void *arg) {
+ char *address = NULL;
+ zookeeper_resolver *r = (zookeeper_resolver *)arg;
+ int resolve_done = 0;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
+ if (rc != 0) {
+ gpr_log(GPR_ERROR, "Error in getting a child node of %s", r->name);
+ grpc_exec_ctx_finish(&exec_ctx);
+ return;
+ }
+
+ address = zookeeper_parse_address(value, (size_t)value_len);
+ if (address != NULL) {
+ /** Further resolves address by DNS */
+ grpc_resolve_address(address, NULL, zookeeper_dns_resolved, r);
+ gpr_free(address);
+ } else {
+ gpr_log(GPR_ERROR, "Error in resolving a child node of %s", r->name);
+ gpr_mu_lock(&r->mu);
+ r->resolved_total--;
+ resolve_done = (r->resolved_num == r->resolved_total);
+ gpr_mu_unlock(&r->mu);
+ if (resolve_done) {
+ zookeeper_on_resolved(&exec_ctx, r, r->resolved_addrs);
+ }
+ }
+
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+static void zookeeper_get_children_completion(
+ int rc, const struct String_vector *children, const void *arg) {
+ char *path;
+ int status;
+ int i;
+ zookeeper_resolver *r = (zookeeper_resolver *)arg;
+
+ if (rc != 0) {
+ gpr_log(GPR_ERROR, "Error in getting zookeeper children of %s", r->name);
+ return;
+ }
+
+ if (children->count == 0) {
+ gpr_log(GPR_ERROR, "Error in resolving zookeeper address %s", r->name);
+ return;
+ }
+
+ r->resolved_addrs = gpr_malloc(sizeof(grpc_resolved_addresses));
+ r->resolved_addrs->addrs = NULL;
+ r->resolved_addrs->naddrs = 0;
+ r->resolved_total = children->count;
+
+ /** TODO: Replace expensive heap allocation with stack
+ if we can get maximum length of zookeeper path */
+ for (i = 0; i < children->count; i++) {
+ gpr_asprintf(&path, "%s/%s", r->name, children->data[i]);
+ status = zoo_awget(r->zookeeper_handle, path, zookeeper_watcher, r,
+ zookeeper_get_children_node_completion, r);
+ gpr_free(path);
+ if (status != 0) {
+ gpr_log(GPR_ERROR, "Error in getting zookeeper node %s", path);
+ }
+ }
+}
+
+static void zookeeper_get_node_completion(int rc, const char *value,
+ int value_len,
+ const struct Stat *stat,
+ const void *arg) {
+ int status;
+ char *address = NULL;
+ zookeeper_resolver *r = (zookeeper_resolver *)arg;
+ r->resolved_addrs = NULL;
+ r->resolved_total = 0;
+ r->resolved_num = 0;
+
+ if (rc != 0) {
+ gpr_log(GPR_ERROR, "Error in getting zookeeper node %s", r->name);
+ return;
+ }
+
+ /** If zookeeper node of path r->name does not have address
+ (i.e. service node), get its children */
+ address = zookeeper_parse_address(value, (size_t)value_len);
+ if (address != NULL) {
+ r->resolved_addrs = gpr_malloc(sizeof(grpc_resolved_addresses));
+ r->resolved_addrs->addrs = NULL;
+ r->resolved_addrs->naddrs = 0;
+ r->resolved_total = 1;
+ /** Further resolves address by DNS */
+ grpc_resolve_address(address, NULL, zookeeper_dns_resolved, r);
+ gpr_free(address);
+ return;
+ }
+
+ status = zoo_awget_children(r->zookeeper_handle, r->name, zookeeper_watcher,
+ r, zookeeper_get_children_completion, r);
+ if (status != 0) {
+ gpr_log(GPR_ERROR, "Error in getting zookeeper children of %s", r->name);
+ }
+}
+
+static void zookeeper_resolve_address(zookeeper_resolver *r) {
+ int status;
+ status = zoo_awget(r->zookeeper_handle, r->name, zookeeper_watcher, r,
+ zookeeper_get_node_completion, r);
+ if (status != 0) {
+ gpr_log(GPR_ERROR, "Error in getting zookeeper node %s", r->name);
+ }
+}
+
+static void zookeeper_start_resolving_locked(zookeeper_resolver *r) {
+ GRPC_RESOLVER_REF(&r->base, "zookeeper-resolving");
+ GPR_ASSERT(r->resolving == 0);
+ r->resolving = 1;
+ zookeeper_resolve_address(r);
+}
+
+static void zookeeper_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
+ zookeeper_resolver *r) {
+ if (r->next_completion != NULL &&
+ r->resolved_version != r->published_version) {
+ *r->target_config = r->resolved_config;
+ if (r->resolved_config != NULL) {
+ grpc_client_config_ref(r->resolved_config);
+ }
+ grpc_exec_ctx_enqueue(exec_ctx, r->next_completion, true, NULL);
+ r->next_completion = NULL;
+ r->published_version = r->resolved_version;
+ }
+}
+
+static void zookeeper_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
+ zookeeper_resolver *r = (zookeeper_resolver *)gr;
+ gpr_mu_destroy(&r->mu);
+ if (r->resolved_config != NULL) {
+ grpc_client_config_unref(exec_ctx, r->resolved_config);
+ }
+ grpc_subchannel_factory_unref(exec_ctx, r->subchannel_factory);
+ gpr_free(r->name);
+ gpr_free(r->lb_policy_name);
+ gpr_free(r);
+}
+
+static grpc_resolver *zookeeper_create(grpc_resolver_args *args,
+ const char *lb_policy_name) {
+ zookeeper_resolver *r;
+ size_t length;
+ char *path = args->uri->path;
+
+ if (0 == strcmp(args->uri->authority, "")) {
+ gpr_log(GPR_ERROR, "No authority specified in zookeeper uri");
+ return NULL;
+ }
+
+ /** Removes the trailing slash if exists */
+ length = strlen(path);
+ if (length > 1 && path[length - 1] == '/') {
+ path[length - 1] = 0;
+ }
+
+ r = gpr_malloc(sizeof(zookeeper_resolver));
+ memset(r, 0, sizeof(*r));
+ gpr_ref_init(&r->refs, 1);
+ gpr_mu_init(&r->mu);
+ grpc_resolver_init(&r->base, &zookeeper_resolver_vtable);
+ r->name = gpr_strdup(path);
+
+ r->subchannel_factory = args->subchannel_factory;
+ grpc_subchannel_factory_ref(r->subchannel_factory);
+
+ r->lb_policy_name = gpr_strdup(lb_policy_name);
+
+ /** Initializes zookeeper client */
+ zoo_set_debug_level(ZOO_LOG_LEVEL_WARN);
+ r->zookeeper_handle =
+ zookeeper_init(args->uri->authority, zookeeper_global_watcher,
+ GRPC_ZOOKEEPER_SESSION_TIMEOUT, 0, 0, 0);
+ if (r->zookeeper_handle == NULL) {
+ gpr_log(GPR_ERROR, "Unable to connect to zookeeper server");
+ return NULL;
+ }
+
+ return &r->base;
+}
+
+static void zookeeper_plugin_init() {
+ grpc_register_resolver_type(grpc_zookeeper_resolver_factory_create());
+}
+
+void grpc_zookeeper_register() {
+ GRPC_API_TRACE("grpc_zookeeper_register(void)", 0, ());
+ grpc_register_plugin(zookeeper_plugin_init, NULL);
+}
+
+/*
+ * FACTORY
+ */
+
+static void zookeeper_factory_ref(grpc_resolver_factory *factory) {}
+
+static void zookeeper_factory_unref(grpc_resolver_factory *factory) {}
+
+static char *zookeeper_factory_get_default_hostname(
+ grpc_resolver_factory *factory, grpc_uri *uri) {
+ return NULL;
+}
+
+static grpc_resolver *zookeeper_factory_create_resolver(
+ grpc_resolver_factory *factory, grpc_resolver_args *args) {
+ return zookeeper_create(args, "pick_first");
+}
+
+static const grpc_resolver_factory_vtable zookeeper_factory_vtable = {
+ zookeeper_factory_ref, zookeeper_factory_unref,
+ zookeeper_factory_create_resolver, zookeeper_factory_get_default_hostname,
+ "zookeeper"};
+
+static grpc_resolver_factory zookeeper_resolver_factory = {
+ &zookeeper_factory_vtable};
+
+grpc_resolver_factory *grpc_zookeeper_resolver_factory_create() {
+ return &zookeeper_resolver_factory;
+}
diff --git a/src/core/client_config/resolvers/zookeeper_resolver.h b/src/core/client_config/resolvers/zookeeper_resolver.h
new file mode 100644
index 0000000000..04bd3ca875
--- /dev/null
+++ b/src/core/client_config/resolvers/zookeeper_resolver.h
@@ -0,0 +1,42 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_CLIENT_CONFIG_RESOLVERS_ZOOKEEPER_RESOLVER_H
+#define GRPC_CORE_CLIENT_CONFIG_RESOLVERS_ZOOKEEPER_RESOLVER_H
+
+#include "src/core/client_config/resolver_factory.h"
+
+/** Create a zookeeper resolver factory */
+grpc_resolver_factory *grpc_zookeeper_resolver_factory_create(void);
+
+#endif /* GRPC_CORE_CLIENT_CONFIG_RESOLVERS_ZOOKEEPER_RESOLVER_H */
diff --git a/src/core/client_config/subchannel.c b/src/core/client_config/subchannel.c
new file mode 100644
index 0000000000..8f150a8d81
--- /dev/null
+++ b/src/core/client_config/subchannel.c
@@ -0,0 +1,678 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/client_config/subchannel.h"
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/avl.h>
+
+#include "src/core/channel/channel_args.h"
+#include "src/core/channel/client_channel.h"
+#include "src/core/channel/connected_channel.h"
+#include "src/core/client_config/initial_connect_string.h"
+#include "src/core/client_config/subchannel_index.h"
+#include "src/core/iomgr/timer.h"
+#include "src/core/profiling/timers.h"
+#include "src/core/support/backoff.h"
+#include "src/core/surface/channel.h"
+#include "src/core/surface/channel_init.h"
+#include "src/core/transport/connectivity_state.h"
+
+#define INTERNAL_REF_BITS 16
+#define STRONG_REF_MASK (~(gpr_atm)((1 << INTERNAL_REF_BITS) - 1))
+
+#define GRPC_SUBCHANNEL_MIN_CONNECT_TIMEOUT_SECONDS 20
+#define GRPC_SUBCHANNEL_INITIAL_CONNECT_BACKOFF_SECONDS 1
+#define GRPC_SUBCHANNEL_RECONNECT_BACKOFF_MULTIPLIER 1.6
+#define GRPC_SUBCHANNEL_RECONNECT_MAX_BACKOFF_SECONDS 120
+#define GRPC_SUBCHANNEL_RECONNECT_JITTER 0.2
+
+#define GET_CONNECTED_SUBCHANNEL(subchannel, barrier) \
+ ((grpc_connected_subchannel *)(gpr_atm_##barrier##_load( \
+ &(subchannel)->connected_subchannel)))
+
+typedef struct {
+ grpc_closure closure;
+ grpc_subchannel *subchannel;
+ grpc_connectivity_state connectivity_state;
+} state_watcher;
+
+typedef struct external_state_watcher {
+ grpc_subchannel *subchannel;
+ grpc_pollset_set *pollset_set;
+ grpc_closure *notify;
+ grpc_closure closure;
+ struct external_state_watcher *next;
+ struct external_state_watcher *prev;
+} external_state_watcher;
+
+struct grpc_subchannel {
+ grpc_connector *connector;
+
+ /** refcount
+ - lower INTERNAL_REF_BITS bits are for internal references:
+ these do not keep the subchannel open.
+ - upper remaining bits are for public references: these do
+ keep the subchannel open */
+ gpr_atm ref_pair;
+
+ /** non-transport related channel filters */
+ const grpc_channel_filter **filters;
+ size_t num_filters;
+ /** channel arguments */
+ grpc_channel_args *args;
+ /** address to connect to */
+ struct sockaddr *addr;
+ size_t addr_len;
+
+ grpc_subchannel_key *key;
+
+ /** initial string to send to peer */
+ gpr_slice initial_connect_string;
+
+ /** set during connection */
+ grpc_connect_out_args connecting_result;
+
+ /** callback for connection finishing */
+ grpc_closure connected;
+
+ /** pollset_set tracking who's interested in a connection
+ being setup */
+ grpc_pollset_set *pollset_set;
+
+ /** active connection, or null; of type grpc_connected_subchannel */
+ gpr_atm connected_subchannel;
+
+ /** mutex protecting remaining elements */
+ gpr_mu mu;
+
+ /** have we seen a disconnection? */
+ int disconnected;
+ /** are we connecting */
+ int connecting;
+ /** connectivity state tracking */
+ grpc_connectivity_state_tracker state_tracker;
+
+ external_state_watcher root_external_state_watcher;
+
+ /** next connect attempt time */
+ gpr_timespec next_attempt;
+ /** backoff state */
+ gpr_backoff backoff_state;
+ /** do we have an active alarm? */
+ int have_alarm;
+ /** our alarm */
+ grpc_timer alarm;
+ /** current random value */
+ uint32_t random;
+};
+
+struct grpc_subchannel_call {
+ grpc_connected_subchannel *connection;
+};
+
+#define SUBCHANNEL_CALL_TO_CALL_STACK(call) ((grpc_call_stack *)((call) + 1))
+#define CHANNEL_STACK_FROM_CONNECTION(con) ((grpc_channel_stack *)(con))
+#define CALLSTACK_TO_SUBCHANNEL_CALL(callstack) \
+ (((grpc_subchannel_call *)(callstack)) - 1)
+
+static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *subchannel,
+ bool iomgr_success);
+
+#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#define REF_REASON reason
+#define REF_LOG(name, p) \
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "%s: %p ref %d -> %d %s", \
+ (name), (p), (p)->refs.count, (p)->refs.count + 1, reason)
+#define UNREF_LOG(name, p) \
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "%s: %p unref %d -> %d %s", \
+ (name), (p), (p)->refs.count, (p)->refs.count - 1, reason)
+#define REF_MUTATE_EXTRA_ARGS \
+ GRPC_SUBCHANNEL_REF_EXTRA_ARGS, const char *purpose
+#define REF_MUTATE_PURPOSE(x) , file, line, reason, x
+#else
+#define REF_REASON ""
+#define REF_LOG(name, p) \
+ do { \
+ } while (0)
+#define UNREF_LOG(name, p) \
+ do { \
+ } while (0)
+#define REF_MUTATE_EXTRA_ARGS
+#define REF_MUTATE_PURPOSE(x)
+#endif
+
+/*
+ * connection implementation
+ */
+
+static void connection_destroy(grpc_exec_ctx *exec_ctx, void *arg,
+ bool success) {
+ grpc_connected_subchannel *c = arg;
+ grpc_channel_stack_destroy(exec_ctx, CHANNEL_STACK_FROM_CONNECTION(c));
+ gpr_free(c);
+}
+
+void grpc_connected_subchannel_ref(grpc_connected_subchannel *c
+ GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+ GRPC_CHANNEL_STACK_REF(CHANNEL_STACK_FROM_CONNECTION(c), REF_REASON);
+}
+
+void grpc_connected_subchannel_unref(grpc_exec_ctx *exec_ctx,
+ grpc_connected_subchannel *c
+ GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+ GRPC_CHANNEL_STACK_UNREF(exec_ctx, CHANNEL_STACK_FROM_CONNECTION(c),
+ REF_REASON);
+}
+
+/*
+ * grpc_subchannel implementation
+ */
+
+static void subchannel_destroy(grpc_exec_ctx *exec_ctx, void *arg,
+ bool success) {
+ grpc_subchannel *c = arg;
+ gpr_free((void *)c->filters);
+ grpc_channel_args_destroy(c->args);
+ gpr_free(c->addr);
+ gpr_slice_unref(c->initial_connect_string);
+ grpc_connectivity_state_destroy(exec_ctx, &c->state_tracker);
+ grpc_connector_unref(exec_ctx, c->connector);
+ grpc_pollset_set_destroy(c->pollset_set);
+ grpc_subchannel_key_destroy(exec_ctx, c->key);
+ gpr_free(c);
+}
+
+static gpr_atm ref_mutate(grpc_subchannel *c, gpr_atm delta,
+ int barrier REF_MUTATE_EXTRA_ARGS) {
+ gpr_atm old_val = barrier ? gpr_atm_full_fetch_add(&c->ref_pair, delta)
+ : gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta);
+#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "SUBCHANNEL: %p % 12s 0x%08x -> 0x%08x [%s]", c, purpose, old_val,
+ old_val + delta, reason);
+#endif
+ return old_val;
+}
+
+grpc_subchannel *grpc_subchannel_ref(grpc_subchannel *c
+ GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+ gpr_atm old_refs;
+ old_refs = ref_mutate(c, (1 << INTERNAL_REF_BITS),
+ 0 REF_MUTATE_PURPOSE("STRONG_REF"));
+ GPR_ASSERT((old_refs & STRONG_REF_MASK) != 0);
+ return c;
+}
+
+grpc_subchannel *grpc_subchannel_weak_ref(grpc_subchannel *c
+ GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+ gpr_atm old_refs;
+ old_refs = ref_mutate(c, 1, 0 REF_MUTATE_PURPOSE("WEAK_REF"));
+ GPR_ASSERT(old_refs != 0);
+ return c;
+}
+
+grpc_subchannel *grpc_subchannel_ref_from_weak_ref(
+ grpc_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+ if (!c) return NULL;
+ for (;;) {
+ gpr_atm old_refs = gpr_atm_acq_load(&c->ref_pair);
+ if (old_refs >= (1 << INTERNAL_REF_BITS)) {
+ gpr_atm new_refs = old_refs + (1 << INTERNAL_REF_BITS);
+ if (gpr_atm_rel_cas(&c->ref_pair, old_refs, new_refs)) {
+ return c;
+ }
+ } else {
+ return NULL;
+ }
+ }
+}
+
+static void disconnect(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
+ grpc_connected_subchannel *con;
+ grpc_subchannel_index_unregister(exec_ctx, c->key, c);
+ gpr_mu_lock(&c->mu);
+ GPR_ASSERT(!c->disconnected);
+ c->disconnected = 1;
+ grpc_connector_shutdown(exec_ctx, c->connector);
+ con = GET_CONNECTED_SUBCHANNEL(c, no_barrier);
+ if (con != NULL) {
+ GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, con, "connection");
+ gpr_atm_no_barrier_store(&c->connected_subchannel, 0xdeadbeef);
+ }
+ gpr_mu_unlock(&c->mu);
+}
+
+void grpc_subchannel_unref(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+ gpr_atm old_refs;
+ old_refs = ref_mutate(c, (gpr_atm)1 - (gpr_atm)(1 << INTERNAL_REF_BITS),
+ 1 REF_MUTATE_PURPOSE("STRONG_UNREF"));
+ if ((old_refs & STRONG_REF_MASK) == (1 << INTERNAL_REF_BITS)) {
+ disconnect(exec_ctx, c);
+ }
+ GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "strong-unref");
+}
+
+void grpc_subchannel_weak_unref(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel *c
+ GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+ gpr_atm old_refs;
+ old_refs = ref_mutate(c, -(gpr_atm)1, 1 REF_MUTATE_PURPOSE("WEAK_UNREF"));
+ if (old_refs == 1) {
+ grpc_exec_ctx_enqueue(exec_ctx, grpc_closure_create(subchannel_destroy, c),
+ true, NULL);
+ }
+}
+
+static uint32_t random_seed() {
+ return (uint32_t)(gpr_time_to_millis(gpr_now(GPR_CLOCK_MONOTONIC)));
+}
+
+grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
+ grpc_connector *connector,
+ grpc_subchannel_args *args) {
+ grpc_subchannel_key *key = grpc_subchannel_key_create(connector, args);
+ grpc_subchannel *c = grpc_subchannel_index_find(exec_ctx, key);
+ if (c) {
+ grpc_subchannel_key_destroy(exec_ctx, key);
+ return c;
+ }
+
+ c = gpr_malloc(sizeof(*c));
+ memset(c, 0, sizeof(*c));
+ c->key = key;
+ gpr_atm_no_barrier_store(&c->ref_pair, 1 << INTERNAL_REF_BITS);
+ c->connector = connector;
+ grpc_connector_ref(c->connector);
+ c->num_filters = args->filter_count;
+ if (c->num_filters > 0) {
+ c->filters = gpr_malloc(sizeof(grpc_channel_filter *) * c->num_filters);
+ memcpy((void *)c->filters, args->filters,
+ sizeof(grpc_channel_filter *) * c->num_filters);
+ } else {
+ c->filters = NULL;
+ }
+ c->addr = gpr_malloc(args->addr_len);
+ memcpy(c->addr, args->addr, args->addr_len);
+ c->pollset_set = grpc_pollset_set_create();
+ c->addr_len = args->addr_len;
+ grpc_set_initial_connect_string(&c->addr, &c->addr_len,
+ &c->initial_connect_string);
+ c->args = grpc_channel_args_copy(args->args);
+ c->random = random_seed();
+ c->root_external_state_watcher.next = c->root_external_state_watcher.prev =
+ &c->root_external_state_watcher;
+ grpc_closure_init(&c->connected, subchannel_connected, c);
+ grpc_connectivity_state_init(&c->state_tracker, GRPC_CHANNEL_IDLE,
+ "subchannel");
+ gpr_backoff_init(&c->backoff_state,
+ GRPC_SUBCHANNEL_RECONNECT_BACKOFF_MULTIPLIER,
+ GRPC_SUBCHANNEL_RECONNECT_JITTER,
+ GRPC_SUBCHANNEL_INITIAL_CONNECT_BACKOFF_SECONDS * 1000,
+ GRPC_SUBCHANNEL_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
+ if (c->args) {
+ for (size_t i = 0; i < c->args->num_args; i++) {
+ if (0 == strcmp(c->args->args[i].key,
+ "grpc.testing.fixed_reconnect_backoff")) {
+ GPR_ASSERT(c->args->args[i].type == GRPC_ARG_INTEGER);
+ gpr_backoff_init(&c->backoff_state, 1.0, 0.0,
+ c->args->args[i].value.integer,
+ c->args->args[i].value.integer);
+ }
+ }
+ }
+ gpr_mu_init(&c->mu);
+
+ return grpc_subchannel_index_register(exec_ctx, key, c);
+}
+
+static void continue_connect(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
+ grpc_connect_in_args args;
+
+ args.interested_parties = c->pollset_set;
+ args.addr = c->addr;
+ args.addr_len = c->addr_len;
+ args.deadline = c->next_attempt;
+ args.channel_args = c->args;
+ args.initial_connect_string = c->initial_connect_string;
+
+ grpc_connectivity_state_set(exec_ctx, &c->state_tracker,
+ GRPC_CHANNEL_CONNECTING, "state_change");
+ grpc_connector_connect(exec_ctx, c->connector, &args, &c->connecting_result,
+ &c->connected);
+}
+
+static void start_connect(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
+ c->next_attempt =
+ gpr_backoff_begin(&c->backoff_state, gpr_now(GPR_CLOCK_MONOTONIC));
+ continue_connect(exec_ctx, c);
+}
+
+grpc_connectivity_state grpc_subchannel_check_connectivity(grpc_subchannel *c) {
+ grpc_connectivity_state state;
+ gpr_mu_lock(&c->mu);
+ state = grpc_connectivity_state_check(&c->state_tracker);
+ gpr_mu_unlock(&c->mu);
+ return state;
+}
+
+static void on_external_state_watcher_done(grpc_exec_ctx *exec_ctx, void *arg,
+ bool success) {
+ external_state_watcher *w = arg;
+ grpc_closure *follow_up = w->notify;
+ if (w->pollset_set != NULL) {
+ grpc_pollset_set_del_pollset_set(exec_ctx, w->subchannel->pollset_set,
+ w->pollset_set);
+ }
+ gpr_mu_lock(&w->subchannel->mu);
+ w->next->prev = w->prev;
+ w->prev->next = w->next;
+ gpr_mu_unlock(&w->subchannel->mu);
+ GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, w->subchannel, "external_state_watcher");
+ gpr_free(w);
+ follow_up->cb(exec_ctx, follow_up->cb_arg, success);
+}
+
+void grpc_subchannel_notify_on_state_change(
+ grpc_exec_ctx *exec_ctx, grpc_subchannel *c,
+ grpc_pollset_set *interested_parties, grpc_connectivity_state *state,
+ grpc_closure *notify) {
+ external_state_watcher *w;
+
+ if (state == NULL) {
+ gpr_mu_lock(&c->mu);
+ for (w = c->root_external_state_watcher.next;
+ w != &c->root_external_state_watcher; w = w->next) {
+ if (w->notify == notify) {
+ grpc_connectivity_state_notify_on_state_change(
+ exec_ctx, &c->state_tracker, NULL, &w->closure);
+ }
+ }
+ gpr_mu_unlock(&c->mu);
+ } else {
+ w = gpr_malloc(sizeof(*w));
+ w->subchannel = c;
+ w->pollset_set = interested_parties;
+ w->notify = notify;
+ grpc_closure_init(&w->closure, on_external_state_watcher_done, w);
+ if (interested_parties != NULL) {
+ grpc_pollset_set_add_pollset_set(exec_ctx, c->pollset_set,
+ interested_parties);
+ }
+ GRPC_SUBCHANNEL_WEAK_REF(c, "external_state_watcher");
+ gpr_mu_lock(&c->mu);
+ w->next = &c->root_external_state_watcher;
+ w->prev = w->next->prev;
+ w->next->prev = w->prev->next = w;
+ if (grpc_connectivity_state_notify_on_state_change(
+ exec_ctx, &c->state_tracker, state, &w->closure)) {
+ c->connecting = 1;
+ /* released by connection */
+ GRPC_SUBCHANNEL_WEAK_REF(c, "connecting");
+ start_connect(exec_ctx, c);
+ }
+ gpr_mu_unlock(&c->mu);
+ }
+}
+
+void grpc_connected_subchannel_process_transport_op(
+ grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *con,
+ grpc_transport_op *op) {
+ grpc_channel_stack *channel_stack = CHANNEL_STACK_FROM_CONNECTION(con);
+ grpc_channel_element *top_elem = grpc_channel_stack_element(channel_stack, 0);
+ top_elem->filter->start_transport_op(exec_ctx, top_elem, op);
+}
+
+static void subchannel_on_child_state_changed(grpc_exec_ctx *exec_ctx, void *p,
+ bool iomgr_success) {
+ state_watcher *sw = p;
+ grpc_subchannel *c = sw->subchannel;
+ gpr_mu *mu = &c->mu;
+
+ gpr_mu_lock(mu);
+
+ /* if we failed just leave this closure */
+ if (iomgr_success) {
+ if (sw->connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
+ /* any errors on a subchannel ==> we're done, create a new one */
+ sw->connectivity_state = GRPC_CHANNEL_FATAL_FAILURE;
+ }
+ grpc_connectivity_state_set(exec_ctx, &c->state_tracker,
+ sw->connectivity_state, "reflect_child");
+ if (sw->connectivity_state != GRPC_CHANNEL_FATAL_FAILURE) {
+ grpc_connected_subchannel_notify_on_state_change(
+ exec_ctx, GET_CONNECTED_SUBCHANNEL(c, no_barrier), NULL,
+ &sw->connectivity_state, &sw->closure);
+ GRPC_SUBCHANNEL_WEAK_REF(c, "state_watcher");
+ sw = NULL;
+ }
+ }
+
+ gpr_mu_unlock(mu);
+ GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "state_watcher");
+ gpr_free(sw);
+}
+
+static void connected_subchannel_state_op(grpc_exec_ctx *exec_ctx,
+ grpc_connected_subchannel *con,
+ grpc_pollset_set *interested_parties,
+ grpc_connectivity_state *state,
+ grpc_closure *closure) {
+ grpc_transport_op op;
+ grpc_channel_element *elem;
+ memset(&op, 0, sizeof(op));
+ op.connectivity_state = state;
+ op.on_connectivity_state_change = closure;
+ op.bind_pollset_set = interested_parties;
+ elem = grpc_channel_stack_element(CHANNEL_STACK_FROM_CONNECTION(con), 0);
+ elem->filter->start_transport_op(exec_ctx, elem, &op);
+}
+
+void grpc_connected_subchannel_notify_on_state_change(
+ grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *con,
+ grpc_pollset_set *interested_parties, grpc_connectivity_state *state,
+ grpc_closure *closure) {
+ connected_subchannel_state_op(exec_ctx, con, interested_parties, state,
+ closure);
+}
+
+void grpc_connected_subchannel_ping(grpc_exec_ctx *exec_ctx,
+ grpc_connected_subchannel *con,
+ grpc_closure *closure) {
+ grpc_transport_op op;
+ grpc_channel_element *elem;
+ memset(&op, 0, sizeof(op));
+ op.send_ping = closure;
+ elem = grpc_channel_stack_element(CHANNEL_STACK_FROM_CONNECTION(con), 0);
+ elem->filter->start_transport_op(exec_ctx, elem, &op);
+}
+
+static void publish_transport_locked(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel *c) {
+ grpc_connected_subchannel *con;
+ grpc_channel_stack *stk;
+ state_watcher *sw_subchannel;
+
+ /* construct channel stack */
+ con = grpc_channel_init_create_stack(
+ exec_ctx, GRPC_CLIENT_SUBCHANNEL, 0, c->connecting_result.channel_args, 1,
+ connection_destroy, NULL, c->connecting_result.transport);
+ stk = CHANNEL_STACK_FROM_CONNECTION(con);
+ memset(&c->connecting_result, 0, sizeof(c->connecting_result));
+
+ /* initialize state watcher */
+ sw_subchannel = gpr_malloc(sizeof(*sw_subchannel));
+ sw_subchannel->subchannel = c;
+ sw_subchannel->connectivity_state = GRPC_CHANNEL_READY;
+ grpc_closure_init(&sw_subchannel->closure, subchannel_on_child_state_changed,
+ sw_subchannel);
+
+ if (c->disconnected) {
+ gpr_free(sw_subchannel);
+ grpc_channel_stack_destroy(exec_ctx, stk);
+ gpr_free(con);
+ GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "connecting");
+ return;
+ }
+
+ /* publish */
+ /* TODO(ctiller): this full barrier seems to clear up a TSAN failure.
+ I'd have expected the rel_cas below to be enough, but
+ seemingly it's not.
+ Re-evaluate if we really need this. */
+ gpr_atm_full_barrier();
+ GPR_ASSERT(gpr_atm_rel_cas(&c->connected_subchannel, 0, (gpr_atm)con));
+ c->connecting = 0;
+
+ /* setup subchannel watching connected subchannel for changes; subchannel ref
+ for connecting is donated
+ to the state watcher */
+ GRPC_SUBCHANNEL_WEAK_REF(c, "state_watcher");
+ GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "connecting");
+ grpc_connected_subchannel_notify_on_state_change(
+ exec_ctx, con, c->pollset_set, &sw_subchannel->connectivity_state,
+ &sw_subchannel->closure);
+
+ /* signal completion */
+ grpc_connectivity_state_set(exec_ctx, &c->state_tracker, GRPC_CHANNEL_READY,
+ "connected");
+}
+
+static void on_alarm(grpc_exec_ctx *exec_ctx, void *arg, bool iomgr_success) {
+ grpc_subchannel *c = arg;
+ gpr_mu_lock(&c->mu);
+ c->have_alarm = 0;
+ if (c->disconnected) {
+ iomgr_success = 0;
+ }
+ if (iomgr_success) {
+ c->next_attempt =
+ gpr_backoff_step(&c->backoff_state, gpr_now(GPR_CLOCK_MONOTONIC));
+ continue_connect(exec_ctx, c);
+ gpr_mu_unlock(&c->mu);
+ } else {
+ gpr_mu_unlock(&c->mu);
+ GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "connecting");
+ }
+}
+
+static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *arg,
+ bool iomgr_success) {
+ grpc_subchannel *c = arg;
+
+ GRPC_SUBCHANNEL_WEAK_REF(c, "connected");
+ gpr_mu_lock(&c->mu);
+ if (c->connecting_result.transport != NULL) {
+ publish_transport_locked(exec_ctx, c);
+ } else if (c->disconnected) {
+ GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "connecting");
+ } else {
+ gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
+ GPR_ASSERT(!c->have_alarm);
+ c->have_alarm = 1;
+ grpc_connectivity_state_set(exec_ctx, &c->state_tracker,
+ GRPC_CHANNEL_TRANSIENT_FAILURE,
+ "connect_failed");
+ grpc_timer_init(exec_ctx, &c->alarm, c->next_attempt, on_alarm, c, now);
+ }
+ gpr_mu_unlock(&c->mu);
+ GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "connecting");
+}
+
+/*
+ * grpc_subchannel_call implementation
+ */
+
+static void subchannel_call_destroy(grpc_exec_ctx *exec_ctx, void *call,
+ bool success) {
+ grpc_subchannel_call *c = call;
+ GPR_TIMER_BEGIN("grpc_subchannel_call_unref.destroy", 0);
+ grpc_call_stack_destroy(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c));
+ GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, c->connection, "subchannel_call");
+ gpr_free(c);
+ GPR_TIMER_END("grpc_subchannel_call_unref.destroy", 0);
+}
+
+void grpc_subchannel_call_ref(grpc_subchannel_call *c
+ GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+ GRPC_CALL_STACK_REF(SUBCHANNEL_CALL_TO_CALL_STACK(c), REF_REASON);
+}
+
+void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_call *c
+ GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+ GRPC_CALL_STACK_UNREF(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c), REF_REASON);
+}
+
+char *grpc_subchannel_call_get_peer(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_call *call) {
+ grpc_call_stack *call_stack = SUBCHANNEL_CALL_TO_CALL_STACK(call);
+ grpc_call_element *top_elem = grpc_call_stack_element(call_stack, 0);
+ return top_elem->filter->get_peer(exec_ctx, top_elem);
+}
+
+void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_call *call,
+ grpc_transport_stream_op *op) {
+ grpc_call_stack *call_stack = SUBCHANNEL_CALL_TO_CALL_STACK(call);
+ grpc_call_element *top_elem = grpc_call_stack_element(call_stack, 0);
+ top_elem->filter->start_transport_stream_op(exec_ctx, top_elem, op);
+}
+
+grpc_connected_subchannel *grpc_subchannel_get_connected_subchannel(
+ grpc_subchannel *c) {
+ return GET_CONNECTED_SUBCHANNEL(c, acq);
+}
+
+grpc_subchannel_call *grpc_connected_subchannel_create_call(
+ grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *con,
+ grpc_pollset *pollset) {
+ grpc_channel_stack *chanstk = CHANNEL_STACK_FROM_CONNECTION(con);
+ grpc_subchannel_call *call =
+ gpr_malloc(sizeof(grpc_subchannel_call) + chanstk->call_stack_size);
+ grpc_call_stack *callstk = SUBCHANNEL_CALL_TO_CALL_STACK(call);
+ call->connection = con;
+ GRPC_CONNECTED_SUBCHANNEL_REF(con, "subchannel_call");
+ grpc_call_stack_init(exec_ctx, chanstk, 1, subchannel_call_destroy, call,
+ NULL, NULL, callstk);
+ grpc_call_stack_set_pollset(exec_ctx, callstk, pollset);
+ return call;
+}
+
+grpc_call_stack *grpc_subchannel_call_get_call_stack(
+ grpc_subchannel_call *subchannel_call) {
+ return SUBCHANNEL_CALL_TO_CALL_STACK(subchannel_call);
+}
diff --git a/src/core/client_config/subchannel.h b/src/core/client_config/subchannel.h
new file mode 100644
index 0000000000..ef9f2f1d1e
--- /dev/null
+++ b/src/core/client_config/subchannel.h
@@ -0,0 +1,174 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_CLIENT_CONFIG_SUBCHANNEL_H
+#define GRPC_CORE_CLIENT_CONFIG_SUBCHANNEL_H
+
+#include "src/core/channel/channel_stack.h"
+#include "src/core/client_config/connector.h"
+#include "src/core/transport/connectivity_state.h"
+
+/** A (sub-)channel that knows how to connect to exactly one target
+ address. Provides a target for load balancing. */
+typedef struct grpc_subchannel grpc_subchannel;
+typedef struct grpc_connected_subchannel grpc_connected_subchannel;
+typedef struct grpc_subchannel_call grpc_subchannel_call;
+typedef struct grpc_subchannel_args grpc_subchannel_args;
+
+#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#define GRPC_SUBCHANNEL_REF(p, r) \
+ grpc_subchannel_ref((p), __FILE__, __LINE__, (r))
+#define GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(p, r) \
+ grpc_subchannel_ref_from_weak_ref((p), __FILE__, __LINE__, (r))
+#define GRPC_SUBCHANNEL_UNREF(cl, p, r) \
+ grpc_subchannel_unref((cl), (p), __FILE__, __LINE__, (r))
+#define GRPC_SUBCHANNEL_WEAK_REF(p, r) \
+ grpc_subchannel_weak_ref((p), __FILE__, __LINE__, (r))
+#define GRPC_SUBCHANNEL_WEAK_UNREF(cl, p, r) \
+ grpc_subchannel_weak_unref((cl), (p), __FILE__, __LINE__, (r))
+#define GRPC_CONNECTED_SUBCHANNEL_REF(p, r) \
+ grpc_connected_subchannel_ref((p), __FILE__, __LINE__, (r))
+#define GRPC_CONNECTED_SUBCHANNEL_UNREF(cl, p, r) \
+ grpc_connected_subchannel_unref((cl), (p), __FILE__, __LINE__, (r))
+#define GRPC_SUBCHANNEL_CALL_REF(p, r) \
+ grpc_subchannel_call_ref((p), __FILE__, __LINE__, (r))
+#define GRPC_SUBCHANNEL_CALL_UNREF(cl, p, r) \
+ grpc_subchannel_call_unref((cl), (p), __FILE__, __LINE__, (r))
+#define GRPC_SUBCHANNEL_REF_EXTRA_ARGS \
+ , const char *file, int line, const char *reason
+#else
+#define GRPC_SUBCHANNEL_REF(p, r) grpc_subchannel_ref((p))
+#define GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(p, r) \
+ grpc_subchannel_ref_from_weak_ref((p))
+#define GRPC_SUBCHANNEL_UNREF(cl, p, r) grpc_subchannel_unref((cl), (p))
+#define GRPC_SUBCHANNEL_WEAK_REF(p, r) grpc_subchannel_weak_ref((p))
+#define GRPC_SUBCHANNEL_WEAK_UNREF(cl, p, r) \
+ grpc_subchannel_weak_unref((cl), (p))
+#define GRPC_CONNECTED_SUBCHANNEL_REF(p, r) grpc_connected_subchannel_ref((p))
+#define GRPC_CONNECTED_SUBCHANNEL_UNREF(cl, p, r) \
+ grpc_connected_subchannel_unref((cl), (p))
+#define GRPC_SUBCHANNEL_CALL_REF(p, r) grpc_subchannel_call_ref((p))
+#define GRPC_SUBCHANNEL_CALL_UNREF(cl, p, r) \
+ grpc_subchannel_call_unref((cl), (p))
+#define GRPC_SUBCHANNEL_REF_EXTRA_ARGS
+#endif
+
+grpc_subchannel *grpc_subchannel_ref(grpc_subchannel *channel
+ GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+grpc_subchannel *grpc_subchannel_ref_from_weak_ref(
+ grpc_subchannel *channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+void grpc_subchannel_unref(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel *channel
+ GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+grpc_subchannel *grpc_subchannel_weak_ref(grpc_subchannel *channel
+ GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+void grpc_subchannel_weak_unref(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel *channel
+ GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+void grpc_connected_subchannel_ref(grpc_connected_subchannel *channel
+ GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+void grpc_connected_subchannel_unref(grpc_exec_ctx *exec_ctx,
+ grpc_connected_subchannel *channel
+ GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+void grpc_subchannel_call_ref(grpc_subchannel_call *call
+ GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_call *call
+ GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+
+/** construct a subchannel call */
+grpc_subchannel_call *grpc_connected_subchannel_create_call(
+ grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *connected_subchannel,
+ grpc_pollset *pollset);
+
+/** process a transport level op */
+void grpc_connected_subchannel_process_transport_op(
+ grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *subchannel,
+ grpc_transport_op *op);
+
+/** poll the current connectivity state of a channel */
+grpc_connectivity_state grpc_subchannel_check_connectivity(
+ grpc_subchannel *channel);
+
+/** call notify when the connectivity state of a channel changes from *state.
+ Updates *state with the new state of the channel */
+void grpc_subchannel_notify_on_state_change(
+ grpc_exec_ctx *exec_ctx, grpc_subchannel *channel,
+ grpc_pollset_set *interested_parties, grpc_connectivity_state *state,
+ grpc_closure *notify);
+void grpc_connected_subchannel_notify_on_state_change(
+ grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *channel,
+ grpc_pollset_set *interested_parties, grpc_connectivity_state *state,
+ grpc_closure *notify);
+void grpc_connected_subchannel_ping(grpc_exec_ctx *exec_ctx,
+ grpc_connected_subchannel *channel,
+ grpc_closure *notify);
+
+/** retrieve the grpc_connected_subchannel - or NULL if called before
+ the subchannel becomes connected */
+grpc_connected_subchannel *grpc_subchannel_get_connected_subchannel(
+ grpc_subchannel *subchannel);
+
+/** continue processing a transport op */
+void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_call *subchannel_call,
+ grpc_transport_stream_op *op);
+
+/** continue querying for peer */
+char *grpc_subchannel_call_get_peer(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_call *subchannel_call);
+
+grpc_call_stack *grpc_subchannel_call_get_call_stack(
+ grpc_subchannel_call *subchannel_call);
+
+struct grpc_subchannel_args {
+ /* When updating this struct, also update subchannel_index.c */
+
+ /** Channel filters for this channel - wrapped factories will likely
+ want to mutate this */
+ const grpc_channel_filter **filters;
+ /** The number of filters in the above array */
+ size_t filter_count;
+ /** Channel arguments to be supplied to the newly created channel */
+ const grpc_channel_args *args;
+ /** Address to connect to */
+ struct sockaddr *addr;
+ size_t addr_len;
+};
+
+/** create a subchannel given a connector */
+grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
+ grpc_connector *connector,
+ grpc_subchannel_args *args);
+
+#endif /* GRPC_CORE_CLIENT_CONFIG_SUBCHANNEL_H */
diff --git a/src/core/client_config/subchannel_factory.c b/src/core/client_config/subchannel_factory.c
new file mode 100644
index 0000000000..2c64219e8b
--- /dev/null
+++ b/src/core/client_config/subchannel_factory.c
@@ -0,0 +1,49 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/client_config/subchannel_factory.h"
+
+void grpc_subchannel_factory_ref(grpc_subchannel_factory* factory) {
+ factory->vtable->ref(factory);
+}
+
+void grpc_subchannel_factory_unref(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel_factory* factory) {
+ factory->vtable->unref(exec_ctx, factory);
+}
+
+grpc_subchannel* grpc_subchannel_factory_create_subchannel(
+ grpc_exec_ctx* exec_ctx, grpc_subchannel_factory* factory,
+ grpc_subchannel_args* args) {
+ return factory->vtable->create_subchannel(exec_ctx, factory, args);
+}
diff --git a/src/core/channel/child_channel.h b/src/core/client_config/subchannel_factory.h
index 556a1c731c..c638f377a6 100644
--- a/src/core/channel/child_channel.h
+++ b/src/core/client_config/subchannel_factory.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,35 +31,36 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_CHANNEL_CHILD_CHANNEL_H
-#define GRPC_INTERNAL_CORE_CHANNEL_CHILD_CHANNEL_H
+#ifndef GRPC_CORE_CLIENT_CONFIG_SUBCHANNEL_FACTORY_H
+#define GRPC_CORE_CLIENT_CONFIG_SUBCHANNEL_FACTORY_H
#include "src/core/channel/channel_stack.h"
+#include "src/core/client_config/subchannel.h"
-/* helper for filters that need to host child channel stacks... handles
- lifetime and upwards propagation cleanly */
+typedef struct grpc_subchannel_factory grpc_subchannel_factory;
+typedef struct grpc_subchannel_factory_vtable grpc_subchannel_factory_vtable;
-extern const grpc_channel_filter grpc_child_channel_top_filter;
+/** Constructor for new configured channels.
+ Creating decorators around this type is encouraged to adapt behavior. */
+struct grpc_subchannel_factory {
+ const grpc_subchannel_factory_vtable *vtable;
+};
-typedef grpc_channel_stack grpc_child_channel;
-typedef grpc_call_stack grpc_child_call;
+struct grpc_subchannel_factory_vtable {
+ void (*ref)(grpc_subchannel_factory *factory);
+ void (*unref)(grpc_exec_ctx *exec_ctx, grpc_subchannel_factory *factory);
+ grpc_subchannel *(*create_subchannel)(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_factory *factory,
+ grpc_subchannel_args *args);
+};
-/* filters[0] must be &grpc_child_channel_top_filter */
-grpc_child_channel *grpc_child_channel_create(
- grpc_channel_element *parent, const grpc_channel_filter **filters,
- size_t filter_count, const grpc_channel_args *args,
- grpc_mdctx *metadata_context);
-void grpc_child_channel_handle_op(grpc_child_channel *channel,
- grpc_channel_op *op);
-grpc_channel_element *grpc_child_channel_get_bottom_element(
- grpc_child_channel *channel);
-void grpc_child_channel_destroy(grpc_child_channel *channel,
- int wait_for_callbacks);
+void grpc_subchannel_factory_ref(grpc_subchannel_factory *factory);
+void grpc_subchannel_factory_unref(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_factory *factory);
-grpc_child_call *grpc_child_channel_create_call(grpc_child_channel *channel,
- grpc_call_element *parent,
- grpc_transport_op *initial_op);
-grpc_call_element *grpc_child_call_get_top_element(grpc_child_call *call);
-void grpc_child_call_destroy(grpc_child_call *call);
+/** Create a new grpc_subchannel */
+grpc_subchannel *grpc_subchannel_factory_create_subchannel(
+ grpc_exec_ctx *exec_ctx, grpc_subchannel_factory *factory,
+ grpc_subchannel_args *args);
-#endif /* GRPC_INTERNAL_CORE_CHANNEL_CHILD_CHANNEL_H */
+#endif /* GRPC_CORE_CLIENT_CONFIG_SUBCHANNEL_FACTORY_H */
diff --git a/src/core/client_config/subchannel_index.c b/src/core/client_config/subchannel_index.c
new file mode 100644
index 0000000000..24cc76cf22
--- /dev/null
+++ b/src/core/client_config/subchannel_index.c
@@ -0,0 +1,262 @@
+//
+//
+// Copyright 2016, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+//
+
+#include "src/core/client_config/subchannel_index.h"
+
+#include <stdbool.h>
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/avl.h>
+#include <grpc/support/tls.h>
+
+#include "src/core/channel/channel_args.h"
+
+// a map of subchannel_key --> subchannel, used for detecting connections
+// to the same destination in order to share them
+static gpr_avl g_subchannel_index;
+
+static gpr_mu g_mu;
+
+struct grpc_subchannel_key {
+ grpc_connector *connector;
+ grpc_subchannel_args args;
+};
+
+GPR_TLS_DECL(subchannel_index_exec_ctx);
+
+static void enter_ctx(grpc_exec_ctx *exec_ctx) {
+ GPR_ASSERT(gpr_tls_get(&subchannel_index_exec_ctx) == 0);
+ gpr_tls_set(&subchannel_index_exec_ctx, (intptr_t)exec_ctx);
+}
+
+static void leave_ctx(grpc_exec_ctx *exec_ctx) {
+ GPR_ASSERT(gpr_tls_get(&subchannel_index_exec_ctx) == (intptr_t)exec_ctx);
+ gpr_tls_set(&subchannel_index_exec_ctx, 0);
+}
+
+static grpc_exec_ctx *current_ctx() {
+ grpc_exec_ctx *c = (grpc_exec_ctx *)gpr_tls_get(&subchannel_index_exec_ctx);
+ GPR_ASSERT(c != NULL);
+ return c;
+}
+
+static grpc_subchannel_key *create_key(
+ grpc_connector *connector, grpc_subchannel_args *args,
+ grpc_channel_args *(*copy_channel_args)(const grpc_channel_args *args)) {
+ grpc_subchannel_key *k = gpr_malloc(sizeof(*k));
+ k->connector = grpc_connector_ref(connector);
+ k->args.filter_count = args->filter_count;
+ k->args.filters = gpr_malloc(sizeof(*k->args.filters) * k->args.filter_count);
+ memcpy((grpc_channel_filter *)k->args.filters, args->filters,
+ sizeof(*k->args.filters) * k->args.filter_count);
+ k->args.addr_len = args->addr_len;
+ k->args.addr = gpr_malloc(args->addr_len);
+ memcpy(k->args.addr, args->addr, k->args.addr_len);
+ k->args.args = copy_channel_args(args->args);
+ return k;
+}
+
+grpc_subchannel_key *grpc_subchannel_key_create(grpc_connector *connector,
+ grpc_subchannel_args *args) {
+ return create_key(connector, args, grpc_channel_args_normalize);
+}
+
+static grpc_subchannel_key *subchannel_key_copy(grpc_subchannel_key *k) {
+ return create_key(k->connector, &k->args, grpc_channel_args_copy);
+}
+
+static int subchannel_key_compare(grpc_subchannel_key *a,
+ grpc_subchannel_key *b) {
+ int c = GPR_ICMP(a->connector, b->connector);
+ if (c != 0) return c;
+ c = GPR_ICMP(a->args.addr_len, b->args.addr_len);
+ if (c != 0) return c;
+ c = GPR_ICMP(a->args.filter_count, b->args.filter_count);
+ if (c != 0) return c;
+ c = memcmp(a->args.addr, b->args.addr, a->args.addr_len);
+ if (c != 0) return c;
+ c = memcmp(a->args.filters, b->args.filters,
+ a->args.filter_count * sizeof(*a->args.filters));
+ if (c != 0) return c;
+ return grpc_channel_args_compare(a->args.args, b->args.args);
+}
+
+void grpc_subchannel_key_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_key *k) {
+ grpc_connector_unref(exec_ctx, k->connector);
+ gpr_free(k->args.addr);
+ gpr_free((grpc_channel_args *)k->args.filters);
+ grpc_channel_args_destroy((grpc_channel_args *)k->args.args);
+ gpr_free(k);
+}
+
+static void sck_avl_destroy(void *p) {
+ grpc_subchannel_key_destroy(current_ctx(), p);
+}
+
+static void *sck_avl_copy(void *p) { return subchannel_key_copy(p); }
+
+static long sck_avl_compare(void *a, void *b) {
+ return subchannel_key_compare(a, b);
+}
+
+static void scv_avl_destroy(void *p) {
+ GRPC_SUBCHANNEL_WEAK_UNREF(current_ctx(), p, "subchannel_index");
+}
+
+static void *scv_avl_copy(void *p) {
+ GRPC_SUBCHANNEL_WEAK_REF(p, "subchannel_index");
+ return p;
+}
+
+static const gpr_avl_vtable subchannel_avl_vtable = {
+ .destroy_key = sck_avl_destroy,
+ .copy_key = sck_avl_copy,
+ .compare_keys = sck_avl_compare,
+ .destroy_value = scv_avl_destroy,
+ .copy_value = scv_avl_copy};
+
+void grpc_subchannel_index_init(void) {
+ g_subchannel_index = gpr_avl_create(&subchannel_avl_vtable);
+ gpr_mu_init(&g_mu);
+ gpr_tls_init(&subchannel_index_exec_ctx);
+}
+
+void grpc_subchannel_index_shutdown(void) {
+ gpr_mu_destroy(&g_mu);
+ gpr_avl_unref(g_subchannel_index);
+ gpr_tls_destroy(&subchannel_index_exec_ctx);
+}
+
+grpc_subchannel *grpc_subchannel_index_find(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_key *key) {
+ enter_ctx(exec_ctx);
+
+ // Lock, and take a reference to the subchannel index.
+ // We don't need to do the search under a lock as avl's are immutable.
+ gpr_mu_lock(&g_mu);
+ gpr_avl index = gpr_avl_ref(g_subchannel_index);
+ gpr_mu_unlock(&g_mu);
+
+ grpc_subchannel *c =
+ GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(gpr_avl_get(index, key), "index_find");
+ gpr_avl_unref(index);
+
+ leave_ctx(exec_ctx);
+ return c;
+}
+
+grpc_subchannel *grpc_subchannel_index_register(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_key *key,
+ grpc_subchannel *constructed) {
+ enter_ctx(exec_ctx);
+
+ grpc_subchannel *c = NULL;
+
+ while (c == NULL) {
+ // Compare and swap loop:
+ // - take a reference to the current index
+ gpr_mu_lock(&g_mu);
+ gpr_avl index = gpr_avl_ref(g_subchannel_index);
+ gpr_mu_unlock(&g_mu);
+
+ // - Check to see if a subchannel already exists
+ c = gpr_avl_get(index, key);
+ if (c != NULL) {
+ // yes -> we're done
+ GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, constructed, "index_register");
+ } else {
+ // no -> update the avl and compare/swap
+ gpr_avl updated =
+ gpr_avl_add(gpr_avl_ref(index), subchannel_key_copy(key),
+ GRPC_SUBCHANNEL_WEAK_REF(constructed, "index_register"));
+
+ // it may happen (but it's expected to be unlikely)
+ // that some other thread has changed the index:
+ // compare/swap here to check that, and retry as necessary
+ gpr_mu_lock(&g_mu);
+ if (index.root == g_subchannel_index.root) {
+ GPR_SWAP(gpr_avl, updated, g_subchannel_index);
+ c = constructed;
+ }
+ gpr_mu_unlock(&g_mu);
+
+ gpr_avl_unref(updated);
+ }
+ gpr_avl_unref(index);
+ }
+
+ leave_ctx(exec_ctx);
+
+ return c;
+}
+
+void grpc_subchannel_index_unregister(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_key *key,
+ grpc_subchannel *constructed) {
+ enter_ctx(exec_ctx);
+
+ bool done = false;
+ while (!done) {
+ // Compare and swap loop:
+ // - take a reference to the current index
+ gpr_mu_lock(&g_mu);
+ gpr_avl index = gpr_avl_ref(g_subchannel_index);
+ gpr_mu_unlock(&g_mu);
+
+ // Check to see if this key still refers to the previously
+ // registered subchannel
+ grpc_subchannel *c = gpr_avl_get(index, key);
+ if (c != constructed) {
+ gpr_avl_unref(index);
+ break;
+ }
+
+ // compare and swap the update (some other thread may have
+ // mutated the index behind us)
+ gpr_avl updated = gpr_avl_remove(gpr_avl_ref(index), key);
+
+ gpr_mu_lock(&g_mu);
+ if (index.root == g_subchannel_index.root) {
+ GPR_SWAP(gpr_avl, updated, g_subchannel_index);
+ done = true;
+ }
+ gpr_mu_unlock(&g_mu);
+
+ gpr_avl_unref(updated);
+ gpr_avl_unref(index);
+ }
+
+ leave_ctx(exec_ctx);
+}
diff --git a/src/core/client_config/subchannel_index.h b/src/core/client_config/subchannel_index.h
new file mode 100644
index 0000000000..3cd5d12349
--- /dev/null
+++ b/src/core/client_config/subchannel_index.h
@@ -0,0 +1,77 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_CLIENT_CONFIG_SUBCHANNEL_INDEX_H
+#define GRPC_CORE_CLIENT_CONFIG_SUBCHANNEL_INDEX_H
+
+#include "src/core/client_config/connector.h"
+#include "src/core/client_config/subchannel.h"
+
+/** \file Provides an index of active subchannels so that they can be
+ shared amongst channels */
+
+typedef struct grpc_subchannel_key grpc_subchannel_key;
+
+/** Create a key that can be used to uniquely identify a subchannel */
+grpc_subchannel_key *grpc_subchannel_key_create(grpc_connector *con,
+ grpc_subchannel_args *args);
+
+/** Destroy a subchannel key */
+void grpc_subchannel_key_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_key *key);
+
+/** Given a subchannel key, find the subchannel registered for it.
+ Returns NULL if no such channel exists.
+ Thread-safe. */
+grpc_subchannel *grpc_subchannel_index_find(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_key *key);
+
+/** Register a subchannel against a key.
+ Takes ownership of \a constructed.
+ Returns the registered subchannel. This may be different from
+ \a constructed in the case of a registration race. */
+grpc_subchannel *grpc_subchannel_index_register(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_key *key,
+ grpc_subchannel *constructed);
+
+/** Remove \a constructed as the registered subchannel for \a key. */
+void grpc_subchannel_index_unregister(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_key *key,
+ grpc_subchannel *constructed);
+
+/** Initialize the subchannel index (global) */
+void grpc_subchannel_index_init(void);
+/** Shutdown the subchannel index (global) */
+void grpc_subchannel_index_shutdown(void);
+
+#endif /* GRPC_CORE_CLIENT_CONFIG_SUBCHANNEL_INDEX_H */
diff --git a/src/core/client_config/uri_parser.c b/src/core/client_config/uri_parser.c
new file mode 100644
index 0000000000..cbdfffcf8e
--- /dev/null
+++ b/src/core/client_config/uri_parser.c
@@ -0,0 +1,242 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/client_config/uri_parser.h"
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/port_platform.h>
+#include <grpc/support/string_util.h>
+
+/** a size_t default value... maps to all 1's */
+#define NOT_SET (~(size_t)0)
+
+static grpc_uri *bad_uri(const char *uri_text, size_t pos, const char *section,
+ int suppress_errors) {
+ char *line_prefix;
+ size_t pfx_len;
+
+ if (!suppress_errors) {
+ gpr_asprintf(&line_prefix, "bad uri.%s: '", section);
+ pfx_len = strlen(line_prefix) + pos;
+ gpr_log(GPR_ERROR, "%s%s'", line_prefix, uri_text);
+ gpr_free(line_prefix);
+
+ line_prefix = gpr_malloc(pfx_len + 1);
+ memset(line_prefix, ' ', pfx_len);
+ line_prefix[pfx_len] = 0;
+ gpr_log(GPR_ERROR, "%s^ here", line_prefix);
+ gpr_free(line_prefix);
+ }
+
+ return NULL;
+}
+
+/** Returns a copy of \a src[begin, end) */
+static char *copy_component(const char *src, size_t begin, size_t end) {
+ char *out = gpr_malloc(end - begin + 1);
+ memcpy(out, src + begin, end - begin);
+ out[end - begin] = 0;
+ return out;
+}
+
+/** Returns how many chars to advance if \a uri_text[i] begins a valid \a pchar
+ * production. If \a uri_text[i] introduces an invalid \a pchar (such as percent
+ * sign not followed by two hex digits), NOT_SET is returned. */
+static size_t parse_pchar(const char *uri_text, size_t i) {
+ /* pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
+ * unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
+ * pct-encoded = "%" HEXDIG HEXDIG
+ * sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
+ / "*" / "+" / "," / ";" / "=" */
+ char c = uri_text[i];
+ if (((c >= 'A') && (c <= 'Z')) || ((c >= 'a') && (c <= 'z')) ||
+ ((c >= '0') && (c <= '9')) ||
+ (c == '-' || c == '.' || c == '_' || c == '~') || /* unreserved */
+ (c == '!' || c == '$' || c == '&' || c == '\'' || c == '$' || c == '&' ||
+ c == '(' || c == ')' || c == '*' || c == '+' || c == ',' || c == ';' ||
+ c == '=') /* sub-delims */) {
+ return 1;
+ }
+ if (c == '%') { /* pct-encoded */
+ size_t j;
+ if (uri_text[i + 1] == 0 || uri_text[i + 2] == 0) {
+ return NOT_SET;
+ }
+ for (j = i + 1; j < 2; j++) {
+ c = uri_text[j];
+ if (!(((c >= '0') && (c <= '9')) || ((c >= 'a') && (c <= 'f')) ||
+ ((c >= 'A') && (c <= 'F')))) {
+ return NOT_SET;
+ }
+ }
+ return 2;
+ }
+ return 0;
+}
+
+/* *( pchar / "?" / "/" ) */
+static int parse_fragment_or_query(const char *uri_text, size_t *i) {
+ char c;
+ while ((c = uri_text[*i]) != 0) {
+ const size_t advance = parse_pchar(uri_text, *i); /* pchar */
+ switch (advance) {
+ case 0: /* uri_text[i] isn't in pchar */
+ /* maybe it's ? or / */
+ if (uri_text[*i] == '?' || uri_text[*i] == '/') {
+ (*i)++;
+ break;
+ } else {
+ return 1;
+ }
+ GPR_UNREACHABLE_CODE(return 0);
+ default:
+ (*i) += advance;
+ break;
+ case NOT_SET: /* uri_text[i] introduces an invalid URI */
+ return 0;
+ }
+ }
+ /* *i is the first uri_text position past the \a query production, maybe \0 */
+ return 1;
+}
+
+grpc_uri *grpc_uri_parse(const char *uri_text, int suppress_errors) {
+ grpc_uri *uri;
+ size_t scheme_begin = 0;
+ size_t scheme_end = NOT_SET;
+ size_t authority_begin = NOT_SET;
+ size_t authority_end = NOT_SET;
+ size_t path_begin = NOT_SET;
+ size_t path_end = NOT_SET;
+ size_t query_begin = NOT_SET;
+ size_t query_end = NOT_SET;
+ size_t fragment_begin = NOT_SET;
+ size_t fragment_end = NOT_SET;
+ size_t i;
+
+ for (i = scheme_begin; uri_text[i] != 0; i++) {
+ if (uri_text[i] == ':') {
+ scheme_end = i;
+ break;
+ }
+ if (uri_text[i] >= 'a' && uri_text[i] <= 'z') continue;
+ if (uri_text[i] >= 'A' && uri_text[i] <= 'Z') continue;
+ if (i != scheme_begin) {
+ if (uri_text[i] >= '0' && uri_text[i] <= '9') continue;
+ if (uri_text[i] == '+') continue;
+ if (uri_text[i] == '-') continue;
+ if (uri_text[i] == '.') continue;
+ }
+ break;
+ }
+ if (scheme_end == NOT_SET) {
+ return bad_uri(uri_text, i, "scheme", suppress_errors);
+ }
+
+ if (uri_text[scheme_end + 1] == '/' && uri_text[scheme_end + 2] == '/') {
+ authority_begin = scheme_end + 3;
+ for (i = authority_begin; uri_text[i] != 0 && authority_end == NOT_SET;
+ i++) {
+ if (uri_text[i] == '/' || uri_text[i] == '?' || uri_text[i] == '#') {
+ authority_end = i;
+ }
+ }
+ if (authority_end == NOT_SET && uri_text[i] == 0) {
+ authority_end = i;
+ }
+ if (authority_end == NOT_SET) {
+ return bad_uri(uri_text, i, "authority", suppress_errors);
+ }
+ /* TODO(ctiller): parse the authority correctly */
+ path_begin = authority_end;
+ } else {
+ path_begin = scheme_end + 1;
+ }
+
+ for (i = path_begin; uri_text[i] != 0; i++) {
+ if (uri_text[i] == '?' || uri_text[i] == '#') {
+ path_end = i;
+ break;
+ }
+ }
+ if (path_end == NOT_SET && uri_text[i] == 0) {
+ path_end = i;
+ }
+ if (path_end == NOT_SET) {
+ return bad_uri(uri_text, i, "path", suppress_errors);
+ }
+
+ if (uri_text[i] == '?') {
+ query_begin = ++i;
+ if (!parse_fragment_or_query(uri_text, &i)) {
+ return bad_uri(uri_text, i, "query", suppress_errors);
+ } else if (uri_text[i] != 0 && uri_text[i] != '#') {
+ /* We must be at the end or at the beginning of a fragment */
+ return bad_uri(uri_text, i, "query", suppress_errors);
+ }
+ query_end = i;
+ }
+ if (uri_text[i] == '#') {
+ fragment_begin = ++i;
+ if (!parse_fragment_or_query(uri_text, &i)) {
+ return bad_uri(uri_text, i - fragment_end, "fragment", suppress_errors);
+ } else if (uri_text[i] != 0) {
+ /* We must be at the end */
+ return bad_uri(uri_text, i, "fragment", suppress_errors);
+ }
+ fragment_end = i;
+ }
+
+ uri = gpr_malloc(sizeof(*uri));
+ memset(uri, 0, sizeof(*uri));
+ uri->scheme = copy_component(uri_text, scheme_begin, scheme_end);
+ uri->authority = copy_component(uri_text, authority_begin, authority_end);
+ uri->path = copy_component(uri_text, path_begin, path_end);
+ uri->query = copy_component(uri_text, query_begin, query_end);
+ uri->fragment = copy_component(uri_text, fragment_begin, fragment_end);
+
+ return uri;
+}
+
+void grpc_uri_destroy(grpc_uri *uri) {
+ if (!uri) return;
+ gpr_free(uri->scheme);
+ gpr_free(uri->authority);
+ gpr_free(uri->path);
+ gpr_free(uri->query);
+ gpr_free(uri->fragment);
+ gpr_free(uri);
+}
diff --git a/src/core/client_config/uri_parser.h b/src/core/client_config/uri_parser.h
new file mode 100644
index 0000000000..af013d8cac
--- /dev/null
+++ b/src/core/client_config/uri_parser.h
@@ -0,0 +1,51 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_CLIENT_CONFIG_URI_PARSER_H
+#define GRPC_CORE_CLIENT_CONFIG_URI_PARSER_H
+
+typedef struct {
+ char *scheme;
+ char *authority;
+ char *path;
+ char *query;
+ char *fragment;
+} grpc_uri;
+
+/** parse a uri, return NULL on failure */
+grpc_uri *grpc_uri_parse(const char *uri_text, int suppress_errors);
+
+/** destroy a uri */
+void grpc_uri_destroy(grpc_uri *uri);
+
+#endif /* GRPC_CORE_CLIENT_CONFIG_URI_PARSER_H */
diff --git a/src/core/compression/algorithm_metadata.h b/src/core/compression/algorithm_metadata.h
new file mode 100644
index 0000000000..34abf1dba2
--- /dev/null
+++ b/src/core/compression/algorithm_metadata.h
@@ -0,0 +1,53 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_COMPRESSION_ALGORITHM_METADATA_H
+#define GRPC_CORE_COMPRESSION_ALGORITHM_METADATA_H
+
+#include <grpc/compression.h>
+#include "src/core/transport/metadata.h"
+
+/** Return compression algorithm based metadata value */
+grpc_mdstr *grpc_compression_algorithm_mdstr(
+ grpc_compression_algorithm algorithm);
+
+/** Return compression algorithm based metadata element (grpc-encoding: xxx) */
+grpc_mdelem *grpc_compression_encoding_mdelem(
+ grpc_compression_algorithm algorithm);
+
+/** Find compression algorithm based on passed in mdstr - returns
+ * GRPC_COMPRESS_ALGORITHM_COUNT on failure */
+grpc_compression_algorithm grpc_compression_algorithm_from_mdstr(
+ grpc_mdstr *str);
+
+#endif /* GRPC_CORE_COMPRESSION_ALGORITHM_METADATA_H */
diff --git a/src/core/compression/compression_algorithm.c b/src/core/compression/compression_algorithm.c
new file mode 100644
index 0000000000..2810a38b68
--- /dev/null
+++ b/src/core/compression/compression_algorithm.c
@@ -0,0 +1,203 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <grpc/compression.h>
+#include <grpc/support/useful.h>
+
+#include "src/core/compression/algorithm_metadata.h"
+#include "src/core/surface/api_trace.h"
+#include "src/core/transport/static_metadata.h"
+
+int grpc_compression_algorithm_parse(const char *name, size_t name_length,
+ grpc_compression_algorithm *algorithm) {
+ /* we use strncmp not only because it's safer (even though in this case it
+ * doesn't matter, given that we are comparing against string literals, but
+ * because this way we needn't have "name" nil-terminated (useful for slice
+ * data, for example) */
+ GRPC_API_TRACE(
+ "grpc_compression_algorithm_parse("
+ "name=%*.*s, name_length=%lu, algorithm=%p)",
+ 5, ((int)name_length, (int)name_length, name, (unsigned long)name_length,
+ algorithm));
+ if (name_length == 0) {
+ return 0;
+ }
+ if (strncmp(name, "identity", name_length) == 0) {
+ *algorithm = GRPC_COMPRESS_NONE;
+ } else if (strncmp(name, "gzip", name_length) == 0) {
+ *algorithm = GRPC_COMPRESS_GZIP;
+ } else if (strncmp(name, "deflate", name_length) == 0) {
+ *algorithm = GRPC_COMPRESS_DEFLATE;
+ } else {
+ return 0;
+ }
+ return 1;
+}
+
+int grpc_compression_algorithm_name(grpc_compression_algorithm algorithm,
+ char **name) {
+ GRPC_API_TRACE("grpc_compression_algorithm_parse(algorithm=%d, name=%p)", 2,
+ ((int)algorithm, name));
+ switch (algorithm) {
+ case GRPC_COMPRESS_NONE:
+ *name = "identity";
+ return 1;
+ case GRPC_COMPRESS_DEFLATE:
+ *name = "deflate";
+ return 1;
+ case GRPC_COMPRESS_GZIP:
+ *name = "gzip";
+ return 1;
+ case GRPC_COMPRESS_ALGORITHMS_COUNT:
+ return 0;
+ }
+ return 0;
+}
+
+grpc_compression_algorithm grpc_compression_algorithm_from_mdstr(
+ grpc_mdstr *str) {
+ if (str == GRPC_MDSTR_IDENTITY) return GRPC_COMPRESS_NONE;
+ if (str == GRPC_MDSTR_DEFLATE) return GRPC_COMPRESS_DEFLATE;
+ if (str == GRPC_MDSTR_GZIP) return GRPC_COMPRESS_GZIP;
+ return GRPC_COMPRESS_ALGORITHMS_COUNT;
+}
+
+grpc_mdstr *grpc_compression_algorithm_mdstr(
+ grpc_compression_algorithm algorithm) {
+ switch (algorithm) {
+ case GRPC_COMPRESS_NONE:
+ return GRPC_MDSTR_IDENTITY;
+ case GRPC_COMPRESS_DEFLATE:
+ return GRPC_MDSTR_DEFLATE;
+ case GRPC_COMPRESS_GZIP:
+ return GRPC_MDSTR_GZIP;
+ case GRPC_COMPRESS_ALGORITHMS_COUNT:
+ return NULL;
+ }
+ return NULL;
+}
+
+grpc_mdelem *grpc_compression_encoding_mdelem(
+ grpc_compression_algorithm algorithm) {
+ switch (algorithm) {
+ case GRPC_COMPRESS_NONE:
+ return GRPC_MDELEM_GRPC_ENCODING_IDENTITY;
+ case GRPC_COMPRESS_DEFLATE:
+ return GRPC_MDELEM_GRPC_ENCODING_DEFLATE;
+ case GRPC_COMPRESS_GZIP:
+ return GRPC_MDELEM_GRPC_ENCODING_GZIP;
+ default:
+ break;
+ }
+ return NULL;
+}
+
+/* TODO(dgq): Add the ability to specify parameters to the individual
+ * compression algorithms */
+grpc_compression_algorithm grpc_compression_algorithm_for_level(
+ grpc_compression_level level, uint32_t accepted_encodings) {
+ GRPC_API_TRACE("grpc_compression_algorithm_for_level(level=%d)", 1,
+ ((int)level));
+ if (level > GRPC_COMPRESS_LEVEL_HIGH) {
+ gpr_log(GPR_ERROR, "Unknown compression level %d.", (int)level);
+ abort();
+ }
+
+ const size_t num_supported =
+ GPR_BITCOUNT(accepted_encodings) - 1; /* discard NONE */
+ if (level == GRPC_COMPRESS_LEVEL_NONE || num_supported == 0) {
+ return GRPC_COMPRESS_NONE;
+ }
+
+ GPR_ASSERT(level > 0);
+
+ /* Establish a "ranking" or compression algorithms in increasing order of
+ * compression.
+ * This is simplistic and we will probably want to introduce other dimensions
+ * in the future (cpu/memory cost, etc). */
+ const grpc_compression_algorithm algos_ranking[] = {GRPC_COMPRESS_GZIP,
+ GRPC_COMPRESS_DEFLATE};
+
+ /* intersect algos_ranking with the supported ones keeping the ranked order */
+ grpc_compression_algorithm
+ sorted_supported_algos[GRPC_COMPRESS_ALGORITHMS_COUNT];
+ size_t algos_supported_idx = 0;
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(algos_ranking); i++) {
+ const grpc_compression_algorithm alg = algos_ranking[i];
+ for (size_t j = 0; j < num_supported; j++) {
+ if (GPR_BITGET(accepted_encodings, alg) == 1) {
+ /* if \a alg in supported */
+ sorted_supported_algos[algos_supported_idx++] = alg;
+ break;
+ }
+ }
+ if (algos_supported_idx == num_supported) break;
+ }
+
+ switch (level) {
+ case GRPC_COMPRESS_LEVEL_NONE:
+ abort(); /* should have been handled already */
+ case GRPC_COMPRESS_LEVEL_LOW:
+ return sorted_supported_algos[0];
+ case GRPC_COMPRESS_LEVEL_MED:
+ return sorted_supported_algos[num_supported / 2];
+ case GRPC_COMPRESS_LEVEL_HIGH:
+ return sorted_supported_algos[num_supported - 1];
+ default:
+ abort();
+ };
+}
+
+void grpc_compression_options_init(grpc_compression_options *opts) {
+ opts->enabled_algorithms_bitset = (1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1;
+ opts->default_compression_algorithm = GRPC_COMPRESS_NONE;
+}
+
+void grpc_compression_options_enable_algorithm(
+ grpc_compression_options *opts, grpc_compression_algorithm algorithm) {
+ GPR_BITSET(&opts->enabled_algorithms_bitset, algorithm);
+}
+
+void grpc_compression_options_disable_algorithm(
+ grpc_compression_options *opts, grpc_compression_algorithm algorithm) {
+ GPR_BITCLEAR(&opts->enabled_algorithms_bitset, algorithm);
+}
+
+int grpc_compression_options_is_algorithm_enabled(
+ const grpc_compression_options *opts,
+ grpc_compression_algorithm algorithm) {
+ return GPR_BITGET(opts->enabled_algorithms_bitset, algorithm);
+}
diff --git a/src/core/compression/message_compress.c b/src/core/compression/message_compress.c
index 7856f40dd1..edc21a9eb7 100644
--- a/src/core/compression/message_compress.c
+++ b/src/core/compression/message_compress.c
@@ -42,31 +42,35 @@
#define OUTPUT_BLOCK_SIZE 1024
-static int zlib_body(z_stream *zs, gpr_slice_buffer *input,
- gpr_slice_buffer *output,
- int (*flate)(z_stream *zs, int flush)) {
+static int zlib_body(z_stream* zs, gpr_slice_buffer* input,
+ gpr_slice_buffer* output,
+ int (*flate)(z_stream* zs, int flush)) {
int r;
int flush;
size_t i;
gpr_slice outbuf = gpr_slice_malloc(OUTPUT_BLOCK_SIZE);
+ const uInt uint_max = ~(uInt)0;
- zs->avail_out = GPR_SLICE_LENGTH(outbuf);
+ GPR_ASSERT(GPR_SLICE_LENGTH(outbuf) <= uint_max);
+ zs->avail_out = (uInt)GPR_SLICE_LENGTH(outbuf);
zs->next_out = GPR_SLICE_START_PTR(outbuf);
flush = Z_NO_FLUSH;
for (i = 0; i < input->count; i++) {
if (i == input->count - 1) flush = Z_FINISH;
- zs->avail_in = GPR_SLICE_LENGTH(input->slices[i]);
+ GPR_ASSERT(GPR_SLICE_LENGTH(input->slices[i]) <= uint_max);
+ zs->avail_in = (uInt)GPR_SLICE_LENGTH(input->slices[i]);
zs->next_in = GPR_SLICE_START_PTR(input->slices[i]);
do {
if (zs->avail_out == 0) {
gpr_slice_buffer_add_indexed(output, outbuf);
outbuf = gpr_slice_malloc(OUTPUT_BLOCK_SIZE);
- zs->avail_out = GPR_SLICE_LENGTH(outbuf);
+ GPR_ASSERT(GPR_SLICE_LENGTH(outbuf) <= uint_max);
+ zs->avail_out = (uInt)GPR_SLICE_LENGTH(outbuf);
zs->next_out = GPR_SLICE_START_PTR(outbuf);
}
r = flate(zs, flush);
- if (r == Z_STREAM_ERROR) {
- gpr_log(GPR_INFO, "zlib: stream error");
+ if (r < 0 && r != Z_BUF_ERROR /* not fatal */) {
+ gpr_log(GPR_INFO, "zlib error (%d)", r);
goto error;
}
} while (zs->avail_out == 0);
@@ -87,7 +91,13 @@ error:
return 0;
}
-static int zlib_compress(gpr_slice_buffer *input, gpr_slice_buffer *output,
+static void* zalloc_gpr(void* opaque, unsigned int items, unsigned int size) {
+ return gpr_malloc(items * size);
+}
+
+static void zfree_gpr(void* opaque, void* address) { gpr_free(address); }
+
+static int zlib_compress(gpr_slice_buffer* input, gpr_slice_buffer* output,
int gzip) {
z_stream zs;
int r;
@@ -95,12 +105,11 @@ static int zlib_compress(gpr_slice_buffer *input, gpr_slice_buffer *output,
size_t count_before = output->count;
size_t length_before = output->length;
memset(&zs, 0, sizeof(zs));
+ zs.zalloc = zalloc_gpr;
+ zs.zfree = zfree_gpr;
r = deflateInit2(&zs, Z_DEFAULT_COMPRESSION, Z_DEFLATED, 15 | (gzip ? 16 : 0),
8, Z_DEFAULT_STRATEGY);
- if (r != Z_OK) {
- gpr_log(GPR_ERROR, "deflateInit2 returns %d", r);
- return 0;
- }
+ GPR_ASSERT(r == Z_OK);
r = zlib_body(&zs, input, output, deflate) && output->length < input->length;
if (!r) {
for (i = count_before; i < output->count; i++) {
@@ -113,7 +122,7 @@ static int zlib_compress(gpr_slice_buffer *input, gpr_slice_buffer *output,
return r;
}
-static int zlib_decompress(gpr_slice_buffer *input, gpr_slice_buffer *output,
+static int zlib_decompress(gpr_slice_buffer* input, gpr_slice_buffer* output,
int gzip) {
z_stream zs;
int r;
@@ -121,11 +130,10 @@ static int zlib_decompress(gpr_slice_buffer *input, gpr_slice_buffer *output,
size_t count_before = output->count;
size_t length_before = output->length;
memset(&zs, 0, sizeof(zs));
+ zs.zalloc = zalloc_gpr;
+ zs.zfree = zfree_gpr;
r = inflateInit2(&zs, 15 | (gzip ? 16 : 0));
- if (r != Z_OK) {
- gpr_log(GPR_ERROR, "inflateInit2 returns %d", r);
- return 0;
- }
+ GPR_ASSERT(r == Z_OK);
r = zlib_body(&zs, input, output, inflate);
if (!r) {
for (i = count_before; i < output->count; i++) {
@@ -138,7 +146,7 @@ static int zlib_decompress(gpr_slice_buffer *input, gpr_slice_buffer *output,
return r;
}
-static int copy(gpr_slice_buffer *input, gpr_slice_buffer *output) {
+static int copy(gpr_slice_buffer* input, gpr_slice_buffer* output) {
size_t i;
for (i = 0; i < input->count; i++) {
gpr_slice_buffer_add(output, gpr_slice_ref(input->slices[i]));
@@ -146,8 +154,8 @@ static int copy(gpr_slice_buffer *input, gpr_slice_buffer *output) {
return 1;
}
-int compress_inner(grpc_compression_algorithm algorithm,
- gpr_slice_buffer *input, gpr_slice_buffer *output) {
+static int compress_inner(grpc_compression_algorithm algorithm,
+ gpr_slice_buffer* input, gpr_slice_buffer* output) {
switch (algorithm) {
case GRPC_COMPRESS_NONE:
/* the fallback path always needs to be send uncompressed: we simply
@@ -165,7 +173,7 @@ int compress_inner(grpc_compression_algorithm algorithm,
}
int grpc_msg_compress(grpc_compression_algorithm algorithm,
- gpr_slice_buffer *input, gpr_slice_buffer *output) {
+ gpr_slice_buffer* input, gpr_slice_buffer* output) {
if (!compress_inner(algorithm, input, output)) {
copy(input, output);
return 0;
@@ -174,7 +182,7 @@ int grpc_msg_compress(grpc_compression_algorithm algorithm,
}
int grpc_msg_decompress(grpc_compression_algorithm algorithm,
- gpr_slice_buffer *input, gpr_slice_buffer *output) {
+ gpr_slice_buffer* input, gpr_slice_buffer* output) {
switch (algorithm) {
case GRPC_COMPRESS_NONE:
return copy(input, output);
diff --git a/src/core/compression/message_compress.h b/src/core/compression/message_compress.h
index b3eb8f579f..20b78c063b 100644
--- a/src/core/compression/message_compress.h
+++ b/src/core/compression/message_compress.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_COMPRESSION_MESSAGE_COMPRESS_H
-#define GRPC_INTERNAL_CORE_COMPRESSION_MESSAGE_COMPRESS_H
+#ifndef GRPC_CORE_COMPRESSION_MESSAGE_COMPRESS_H
+#define GRPC_CORE_COMPRESSION_MESSAGE_COMPRESS_H
#include <grpc/compression.h>
#include <grpc/support/slice_buffer.h>
@@ -41,12 +41,12 @@
On success, appends compressed slices to output and returns 1.
On failure, appends uncompressed slices to output and returns 0. */
int grpc_msg_compress(grpc_compression_algorithm algorithm,
- gpr_slice_buffer *input, gpr_slice_buffer *output);
+ gpr_slice_buffer* input, gpr_slice_buffer* output);
/* decompress 'input' to 'output' using 'algorithm'.
On success, appends slices to output and returns 1.
On failure, output is unchanged, and returns 0. */
int grpc_msg_decompress(grpc_compression_algorithm algorithm,
- gpr_slice_buffer *input, gpr_slice_buffer *output);
+ gpr_slice_buffer* input, gpr_slice_buffer* output);
-#endif /* GRPC_INTERNAL_CORE_COMPRESSION_MESSAGE_COMPRESS_H */
+#endif /* GRPC_CORE_COMPRESSION_MESSAGE_COMPRESS_H */
diff --git a/src/core/debug/trace.c b/src/core/debug/trace.c
index b53dfe804b..3b35d81cd8 100644
--- a/src/core/debug/trace.c
+++ b/src/core/debug/trace.c
@@ -59,10 +59,14 @@ void grpc_register_tracer(const char *name, int *flag) {
static void add(const char *beg, const char *end, char ***ss, size_t *ns) {
size_t n = *ns;
size_t np = n + 1;
- char *s = gpr_malloc(end - beg + 1);
- memcpy(s, beg, end - beg);
- s[end-beg] = 0;
- *ss = gpr_realloc(*ss, sizeof(char**) * np);
+ char *s;
+ size_t len;
+ GPR_ASSERT(end >= beg);
+ len = (size_t)(end - beg);
+ s = gpr_malloc(len + 1);
+ memcpy(s, beg, len);
+ s[len] = 0;
+ *ss = gpr_realloc(*ss, sizeof(char **) * np);
(*ss)[n] = s;
*ns = np;
}
@@ -73,7 +77,7 @@ static void split(const char *s, char ***ss, size_t *ns) {
add(s, s + strlen(s), ss, ns);
} else {
add(s, c, ss, ns);
- split(c+1, ss, ns);
+ split(c + 1, ss, ns);
}
}
@@ -125,7 +129,7 @@ int grpc_tracer_set_enabled(const char *name, int enabled) {
}
if (!found) {
gpr_log(GPR_ERROR, "Unknown trace var: '%s'", name);
- return 0; /* early return */
+ return 0; /* early return */
}
}
return 1;
diff --git a/src/core/debug/trace.h b/src/core/debug/trace.h
index fc8615bc69..91ec14052e 100644
--- a/src/core/debug/trace.h
+++ b/src/core/debug/trace.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_DEBUG_TRACE_H
-#define GRPC_INTERNAL_CORE_DEBUG_TRACE_H
+#ifndef GRPC_CORE_DEBUG_TRACE_H
+#define GRPC_CORE_DEBUG_TRACE_H
#include <grpc/support/port_platform.h>
@@ -40,4 +40,4 @@ void grpc_register_tracer(const char *name, int *flag);
void grpc_tracer_init(const char *env_var_name);
void grpc_tracer_shutdown(void);
-#endif /* GRPC_INTERNAL_CORE_DEBUG_TRACE_H */
+#endif /* GRPC_CORE_DEBUG_TRACE_H */
diff --git a/src/core/httpcli/format_request.c b/src/core/httpcli/format_request.c
index e875423e87..04f2a2d99a 100644
--- a/src/core/httpcli/format_request.c
+++ b/src/core/httpcli/format_request.c
@@ -43,7 +43,8 @@
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
-static void fill_common_header(const grpc_httpcli_request *request, gpr_strvec *buf) {
+static void fill_common_header(const grpc_httpcli_request *request,
+ gpr_strvec *buf) {
size_t i;
gpr_strvec_add(buf, gpr_strdup(request->path));
gpr_strvec_add(buf, gpr_strdup(" HTTP/1.0\r\n"));
@@ -52,7 +53,8 @@ static void fill_common_header(const grpc_httpcli_request *request, gpr_strvec *
gpr_strvec_add(buf, gpr_strdup(request->host));
gpr_strvec_add(buf, gpr_strdup("\r\n"));
gpr_strvec_add(buf, gpr_strdup("Connection: close\r\n"));
- gpr_strvec_add(buf, gpr_strdup("User-Agent: "GRPC_HTTPCLI_USER_AGENT"\r\n"));
+ gpr_strvec_add(buf,
+ gpr_strdup("User-Agent: " GRPC_HTTPCLI_USER_AGENT "\r\n"));
/* user supplied headers */
for (i = 0; i < request->hdr_count; i++) {
gpr_strvec_add(buf, gpr_strdup(request->hdrs[i].key));
@@ -91,7 +93,7 @@ gpr_slice grpc_httpcli_format_post_request(const grpc_httpcli_request *request,
gpr_strvec_add(&out, gpr_strdup("POST "));
fill_common_header(request, &out);
if (body_bytes) {
- gpr_uint8 has_content_type = 0;
+ uint8_t has_content_type = 0;
for (i = 0; i < request->hdr_count; i++) {
if (strcmp(request->hdrs[i].key, "Content-Type") == 0) {
has_content_type = 1;
diff --git a/src/core/httpcli/format_request.h b/src/core/httpcli/format_request.h
index 8bfb20bfd0..eb47cc90ca 100644
--- a/src/core/httpcli/format_request.h
+++ b/src/core/httpcli/format_request.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_HTTPCLI_FORMAT_REQUEST_H
-#define GRPC_INTERNAL_CORE_HTTPCLI_FORMAT_REQUEST_H
+#ifndef GRPC_CORE_HTTPCLI_FORMAT_REQUEST_H
+#define GRPC_CORE_HTTPCLI_FORMAT_REQUEST_H
#include "src/core/httpcli/httpcli.h"
#include <grpc/support/slice.h>
@@ -42,4 +42,4 @@ gpr_slice grpc_httpcli_format_post_request(const grpc_httpcli_request *request,
const char *body_bytes,
size_t body_size);
-#endif /* GRPC_INTERNAL_CORE_HTTPCLI_FORMAT_REQUEST_H */
+#endif /* GRPC_CORE_HTTPCLI_FORMAT_REQUEST_H */
diff --git a/src/core/httpcli/httpcli.c b/src/core/httpcli/httpcli.c
index 914355a408..1219c444c7 100644
--- a/src/core/httpcli/httpcli.c
+++ b/src/core/httpcli/httpcli.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,22 +31,22 @@
*
*/
-#include "src/core/iomgr/sockaddr.h"
#include "src/core/httpcli/httpcli.h"
+#include "src/core/iomgr/sockaddr.h"
#include <string.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+
+#include "src/core/httpcli/format_request.h"
+#include "src/core/httpcli/parser.h"
#include "src/core/iomgr/endpoint.h"
+#include "src/core/iomgr/iomgr_internal.h"
#include "src/core/iomgr/resolve_address.h"
#include "src/core/iomgr/tcp_client.h"
-#include "src/core/httpcli/format_request.h"
-#include "src/core/httpcli/httpcli_security_connector.h"
-#include "src/core/httpcli/parser.h"
-#include "src/core/security/secure_transport_setup.h"
#include "src/core/support/string.h"
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/string_util.h>
typedef struct {
gpr_slice request_text;
@@ -55,242 +55,230 @@ typedef struct {
size_t next_address;
grpc_endpoint *ep;
char *host;
+ char *ssl_host_override;
gpr_timespec deadline;
int have_read_byte;
- int use_ssl;
+ const grpc_httpcli_handshaker *handshaker;
grpc_httpcli_response_cb on_response;
void *user_data;
grpc_httpcli_context *context;
grpc_pollset *pollset;
grpc_iomgr_object iomgr_obj;
+ gpr_slice_buffer incoming;
+ gpr_slice_buffer outgoing;
+ grpc_closure on_read;
+ grpc_closure done_write;
+ grpc_closure connected;
} internal_request;
static grpc_httpcli_get_override g_get_override = NULL;
static grpc_httpcli_post_override g_post_override = NULL;
+static void plaintext_handshake(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_endpoint *endpoint, const char *host,
+ void (*on_done)(grpc_exec_ctx *exec_ctx,
+ void *arg,
+ grpc_endpoint *endpoint)) {
+ on_done(exec_ctx, arg, endpoint);
+}
+
+const grpc_httpcli_handshaker grpc_httpcli_plaintext = {"http",
+ plaintext_handshake};
+
void grpc_httpcli_context_init(grpc_httpcli_context *context) {
- grpc_pollset_set_init(&context->pollset_set);
+ context->pollset_set = grpc_pollset_set_create();
}
void grpc_httpcli_context_destroy(grpc_httpcli_context *context) {
- grpc_pollset_set_destroy(&context->pollset_set);
+ grpc_pollset_set_destroy(context->pollset_set);
}
-static void next_address(internal_request *req);
+static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req);
-static void finish(internal_request *req, int success) {
- grpc_pollset_set_del_pollset(&req->context->pollset_set, req->pollset);
- req->on_response(req->user_data, success ? &req->parser.r : NULL);
+static void finish(grpc_exec_ctx *exec_ctx, internal_request *req,
+ int success) {
+ grpc_pollset_set_del_pollset(exec_ctx, req->context->pollset_set,
+ req->pollset);
+ req->on_response(exec_ctx, req->user_data, success ? &req->parser.r : NULL);
grpc_httpcli_parser_destroy(&req->parser);
if (req->addresses != NULL) {
grpc_resolved_addresses_destroy(req->addresses);
}
if (req->ep != NULL) {
- grpc_endpoint_destroy(req->ep);
+ grpc_endpoint_destroy(exec_ctx, req->ep);
}
gpr_slice_unref(req->request_text);
gpr_free(req->host);
+ gpr_free(req->ssl_host_override);
grpc_iomgr_unregister_object(&req->iomgr_obj);
+ gpr_slice_buffer_destroy(&req->incoming);
+ gpr_slice_buffer_destroy(&req->outgoing);
gpr_free(req);
}
-static void on_read(void *user_data, gpr_slice *slices, size_t nslices,
- grpc_endpoint_cb_status status) {
+static void on_read(grpc_exec_ctx *exec_ctx, void *user_data, bool success);
+
+static void do_read(grpc_exec_ctx *exec_ctx, internal_request *req) {
+ grpc_endpoint_read(exec_ctx, req->ep, &req->incoming, &req->on_read);
+}
+
+static void on_read(grpc_exec_ctx *exec_ctx, void *user_data, bool success) {
internal_request *req = user_data;
size_t i;
- for (i = 0; i < nslices; i++) {
- if (GPR_SLICE_LENGTH(slices[i])) {
+ for (i = 0; i < req->incoming.count; i++) {
+ if (GPR_SLICE_LENGTH(req->incoming.slices[i])) {
req->have_read_byte = 1;
- if (!grpc_httpcli_parser_parse(&req->parser, slices[i])) {
- finish(req, 0);
- goto done;
+ if (!grpc_httpcli_parser_parse(&req->parser, req->incoming.slices[i])) {
+ finish(exec_ctx, req, 0);
+ return;
}
}
}
- switch (status) {
- case GRPC_ENDPOINT_CB_OK:
- grpc_endpoint_notify_on_read(req->ep, on_read, req);
- break;
- case GRPC_ENDPOINT_CB_EOF:
- case GRPC_ENDPOINT_CB_ERROR:
- case GRPC_ENDPOINT_CB_SHUTDOWN:
- if (!req->have_read_byte) {
- next_address(req);
- } else {
- finish(req, grpc_httpcli_parser_eof(&req->parser));
- }
- break;
- }
-
-done:
- for (i = 0; i < nslices; i++) {
- gpr_slice_unref(slices[i]);
+ if (success) {
+ do_read(exec_ctx, req);
+ } else if (!req->have_read_byte) {
+ next_address(exec_ctx, req);
+ } else {
+ finish(exec_ctx, req, grpc_httpcli_parser_eof(&req->parser));
}
}
-static void on_written(internal_request *req) {
- grpc_endpoint_notify_on_read(req->ep, on_read, req);
+static void on_written(grpc_exec_ctx *exec_ctx, internal_request *req) {
+ do_read(exec_ctx, req);
}
-static void done_write(void *arg, grpc_endpoint_cb_status status) {
+static void done_write(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
internal_request *req = arg;
- switch (status) {
- case GRPC_ENDPOINT_CB_OK:
- on_written(req);
- break;
- case GRPC_ENDPOINT_CB_EOF:
- case GRPC_ENDPOINT_CB_SHUTDOWN:
- case GRPC_ENDPOINT_CB_ERROR:
- next_address(req);
- break;
+ if (success) {
+ on_written(exec_ctx, req);
+ } else {
+ next_address(exec_ctx, req);
}
}
-static void start_write(internal_request *req) {
+static void start_write(grpc_exec_ctx *exec_ctx, internal_request *req) {
gpr_slice_ref(req->request_text);
- switch (
- grpc_endpoint_write(req->ep, &req->request_text, 1, done_write, req)) {
- case GRPC_ENDPOINT_WRITE_DONE:
- on_written(req);
- break;
- case GRPC_ENDPOINT_WRITE_PENDING:
- break;
- case GRPC_ENDPOINT_WRITE_ERROR:
- finish(req, 0);
- break;
- }
+ gpr_slice_buffer_add(&req->outgoing, req->request_text);
+ grpc_endpoint_write(exec_ctx, req->ep, &req->outgoing, &req->done_write);
}
-static void on_secure_transport_setup_done(void *rp,
- grpc_security_status status,
- grpc_endpoint *secure_endpoint) {
- internal_request *req = rp;
- if (status != GRPC_SECURITY_OK) {
- gpr_log(GPR_ERROR, "Secure transport setup failed with error %d.", status);
- finish(req, 0);
- } else {
- req->ep = secure_endpoint;
- start_write(req);
+static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_endpoint *ep) {
+ internal_request *req = arg;
+
+ if (!ep) {
+ next_address(exec_ctx, req);
+ return;
}
+
+ req->ep = ep;
+ start_write(exec_ctx, req);
}
-static void on_connected(void *arg, grpc_endpoint *tcp) {
+static void on_connected(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
internal_request *req = arg;
- if (!tcp) {
- next_address(req);
+ if (!req->ep) {
+ next_address(exec_ctx, req);
return;
}
- req->ep = tcp;
- if (req->use_ssl) {
- grpc_channel_security_connector *sc = NULL;
- const unsigned char *pem_root_certs = NULL;
- size_t pem_root_certs_size = grpc_get_default_ssl_roots(&pem_root_certs);
- if (pem_root_certs == NULL || pem_root_certs_size == 0) {
- gpr_log(GPR_ERROR, "Could not get default pem root certs.");
- finish(req, 0);
- return;
- }
- GPR_ASSERT(grpc_httpcli_ssl_channel_security_connector_create(
- pem_root_certs, pem_root_certs_size, req->host, &sc) ==
- GRPC_SECURITY_OK);
- grpc_setup_secure_transport(&sc->base, tcp, on_secure_transport_setup_done,
- req);
- grpc_security_connector_unref(&sc->base);
- } else {
- start_write(req);
- }
+ req->handshaker->handshake(
+ exec_ctx, req, req->ep,
+ req->ssl_host_override ? req->ssl_host_override : req->host,
+ on_handshake_done);
}
-static void next_address(internal_request *req) {
+static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req) {
grpc_resolved_address *addr;
if (req->next_address == req->addresses->naddrs) {
- finish(req, 0);
+ finish(exec_ctx, req, 0);
return;
}
addr = &req->addresses->addrs[req->next_address++];
- grpc_tcp_client_connect(on_connected, req, &req->context->pollset_set,
- (struct sockaddr *)&addr->addr, addr->len,
- req->deadline);
+ grpc_closure_init(&req->connected, on_connected, req);
+ grpc_tcp_client_connect(
+ exec_ctx, &req->connected, &req->ep, req->context->pollset_set,
+ (struct sockaddr *)&addr->addr, addr->len, req->deadline);
}
-static void on_resolved(void *arg, grpc_resolved_addresses *addresses) {
+static void on_resolved(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_resolved_addresses *addresses) {
internal_request *req = arg;
if (!addresses) {
- finish(req, 0);
+ finish(exec_ctx, req, 0);
return;
}
req->addresses = addresses;
req->next_address = 0;
- next_address(req);
+ next_address(exec_ctx, req);
}
-void grpc_httpcli_get(grpc_httpcli_context *context, grpc_pollset *pollset,
- const grpc_httpcli_request *request,
- gpr_timespec deadline,
- grpc_httpcli_response_cb on_response, void *user_data) {
- internal_request *req;
- char *name;
- if (g_get_override &&
- g_get_override(request, deadline, on_response, user_data)) {
- return;
- }
- req = gpr_malloc(sizeof(internal_request));
+static void internal_request_begin(
+ grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
+ grpc_pollset *pollset, const grpc_httpcli_request *request,
+ gpr_timespec deadline, grpc_httpcli_response_cb on_response,
+ void *user_data, const char *name, gpr_slice request_text) {
+ internal_request *req = gpr_malloc(sizeof(internal_request));
memset(req, 0, sizeof(*req));
- req->request_text = grpc_httpcli_format_get_request(request);
+ req->request_text = request_text;
grpc_httpcli_parser_init(&req->parser);
req->on_response = on_response;
req->user_data = user_data;
req->deadline = deadline;
- req->use_ssl = request->use_ssl;
+ req->handshaker =
+ request->handshaker ? request->handshaker : &grpc_httpcli_plaintext;
req->context = context;
req->pollset = pollset;
- gpr_asprintf(&name, "HTTP:GET:%s:%s", request->host, request->path);
+ grpc_closure_init(&req->on_read, on_read, req);
+ grpc_closure_init(&req->done_write, done_write, req);
+ gpr_slice_buffer_init(&req->incoming);
+ gpr_slice_buffer_init(&req->outgoing);
grpc_iomgr_register_object(&req->iomgr_obj, name);
- gpr_free(name);
- if (req->use_ssl) {
- req->host = gpr_strdup(request->host);
- }
+ req->host = gpr_strdup(request->host);
+ req->ssl_host_override = gpr_strdup(request->ssl_host_override);
- grpc_pollset_set_add_pollset(&req->context->pollset_set, req->pollset);
- grpc_resolve_address(request->host, req->use_ssl ? "https" : "http",
+ grpc_pollset_set_add_pollset(exec_ctx, req->context->pollset_set,
+ req->pollset);
+ grpc_resolve_address(request->host, req->handshaker->default_port,
on_resolved, req);
}
-void grpc_httpcli_post(grpc_httpcli_context *context, grpc_pollset *pollset,
+void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
+ grpc_pollset *pollset,
+ const grpc_httpcli_request *request,
+ gpr_timespec deadline,
+ grpc_httpcli_response_cb on_response, void *user_data) {
+ char *name;
+ if (g_get_override &&
+ g_get_override(exec_ctx, request, deadline, on_response, user_data)) {
+ return;
+ }
+ gpr_asprintf(&name, "HTTP:GET:%s:%s", request->host, request->path);
+ internal_request_begin(exec_ctx, context, pollset, request, deadline,
+ on_response, user_data, name,
+ grpc_httpcli_format_get_request(request));
+ gpr_free(name);
+}
+
+void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
+ grpc_pollset *pollset,
const grpc_httpcli_request *request,
const char *body_bytes, size_t body_size,
gpr_timespec deadline,
grpc_httpcli_response_cb on_response, void *user_data) {
- internal_request *req;
char *name;
- if (g_post_override && g_post_override(request, body_bytes, body_size,
- deadline, on_response, user_data)) {
+ if (g_post_override &&
+ g_post_override(exec_ctx, request, body_bytes, body_size, deadline,
+ on_response, user_data)) {
return;
}
- req = gpr_malloc(sizeof(internal_request));
- memset(req, 0, sizeof(*req));
- req->request_text =
- grpc_httpcli_format_post_request(request, body_bytes, body_size);
- grpc_httpcli_parser_init(&req->parser);
- req->on_response = on_response;
- req->user_data = user_data;
- req->deadline = deadline;
- req->use_ssl = request->use_ssl;
- req->context = context;
- req->pollset = pollset;
- gpr_asprintf(&name, "HTTP:GET:%s:%s", request->host, request->path);
- grpc_iomgr_register_object(&req->iomgr_obj, name);
+ gpr_asprintf(&name, "HTTP:POST:%s:%s", request->host, request->path);
+ internal_request_begin(
+ exec_ctx, context, pollset, request, deadline, on_response, user_data,
+ name, grpc_httpcli_format_post_request(request, body_bytes, body_size));
gpr_free(name);
- if (req->use_ssl) {
- req->host = gpr_strdup(request->host);
- }
-
- grpc_pollset_set_add_pollset(&req->context->pollset_set, req->pollset);
- grpc_resolve_address(request->host, req->use_ssl ? "https" : "http",
- on_resolved, req);
}
void grpc_httpcli_set_override(grpc_httpcli_get_override get,
diff --git a/src/core/httpcli/httpcli.h b/src/core/httpcli/httpcli.h
index 06699e88c2..1fe5782657 100644
--- a/src/core/httpcli/httpcli.h
+++ b/src/core/httpcli/httpcli.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,13 +31,15 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_HTTPCLI_HTTPCLI_H
-#define GRPC_INTERNAL_CORE_HTTPCLI_HTTPCLI_H
+#ifndef GRPC_CORE_HTTPCLI_HTTPCLI_H
+#define GRPC_CORE_HTTPCLI_HTTPCLI_H
#include <stddef.h>
#include <grpc/support/time.h>
+#include "src/core/iomgr/endpoint.h"
+#include "src/core/iomgr/iomgr_internal.h"
#include "src/core/iomgr/pollset_set.h"
/* User agent this library reports */
@@ -55,22 +57,35 @@ typedef struct grpc_httpcli_header {
TODO(ctiller): allow caching and capturing multiple requests for the
same content and combining them */
typedef struct grpc_httpcli_context {
- grpc_pollset_set pollset_set;
+ grpc_pollset_set *pollset_set;
} grpc_httpcli_context;
+typedef struct {
+ const char *default_port;
+ void (*handshake)(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *endpoint,
+ const char *host,
+ void (*on_done)(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_endpoint *endpoint));
+} grpc_httpcli_handshaker;
+
+extern const grpc_httpcli_handshaker grpc_httpcli_plaintext;
+extern const grpc_httpcli_handshaker grpc_httpcli_ssl;
+
/* A request */
typedef struct grpc_httpcli_request {
/* The host name to connect to */
char *host;
+ /* The host to verify in the SSL handshake (or NULL) */
+ char *ssl_host_override;
/* The path of the resource to fetch */
char *path;
/* Additional headers: count and key/values; the following are supplied
automatically and MUST NOT be set here:
- Host, Connection, User-Agent */
+ Host, Connection, User-Agent */
size_t hdr_count;
grpc_httpcli_header *hdrs;
- /* whether to use ssl for the request */
- int use_ssl;
+ /* handshaker to use ssl for the request */
+ const grpc_httpcli_handshaker *handshaker;
} grpc_httpcli_request;
/* A response */
@@ -85,8 +100,9 @@ typedef struct grpc_httpcli_response {
char *body;
} grpc_httpcli_response;
-/* Callback for grpc_httpcli_get */
-typedef void (*grpc_httpcli_response_cb)(void *user_data,
+/* Callback for grpc_httpcli_get and grpc_httpcli_post. */
+typedef void (*grpc_httpcli_response_cb)(grpc_exec_ctx *exec_ctx,
+ void *user_data,
const grpc_httpcli_response *response);
void grpc_httpcli_context_init(grpc_httpcli_context *context);
@@ -100,11 +116,10 @@ void grpc_httpcli_context_destroy(grpc_httpcli_context *context);
'request' contains request parameters - these are caller owned and can be
destroyed once the call returns
'deadline' contains a deadline for the request (or gpr_inf_future)
- 'em' points to a caller owned event manager that must be alive for the
- lifetime of the request
'on_response' is a callback to report results to (and 'user_data' is a user
supplied pointer to pass to said call) */
-void grpc_httpcli_get(grpc_httpcli_context *context, grpc_pollset *pollset,
+void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
+ grpc_pollset *pollset,
const grpc_httpcli_request *request,
gpr_timespec deadline,
grpc_httpcli_response_cb on_response, void *user_data);
@@ -124,25 +139,25 @@ void grpc_httpcli_get(grpc_httpcli_context *context, grpc_pollset *pollset,
'on_response' is a callback to report results to (and 'user_data' is a user
supplied pointer to pass to said call)
Does not support ?var1=val1&var2=val2 in the path. */
-void grpc_httpcli_post(grpc_httpcli_context *context, grpc_pollset *pollset,
+void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
+ grpc_pollset *pollset,
const grpc_httpcli_request *request,
const char *body_bytes, size_t body_size,
gpr_timespec deadline,
grpc_httpcli_response_cb on_response, void *user_data);
/* override functions return 1 if they handled the request, 0 otherwise */
-typedef int (*grpc_httpcli_get_override)(const grpc_httpcli_request *request,
+typedef int (*grpc_httpcli_get_override)(grpc_exec_ctx *exec_ctx,
+ const grpc_httpcli_request *request,
gpr_timespec deadline,
grpc_httpcli_response_cb on_response,
void *user_data);
-typedef int (*grpc_httpcli_post_override)(const grpc_httpcli_request *request,
- const char *body_bytes,
- size_t body_size,
- gpr_timespec deadline,
- grpc_httpcli_response_cb on_response,
- void *user_data);
+typedef int (*grpc_httpcli_post_override)(
+ grpc_exec_ctx *exec_ctx, const grpc_httpcli_request *request,
+ const char *body_bytes, size_t body_size, gpr_timespec deadline,
+ grpc_httpcli_response_cb on_response, void *user_data);
void grpc_httpcli_set_override(grpc_httpcli_get_override get,
grpc_httpcli_post_override post);
-#endif /* GRPC_INTERNAL_CORE_HTTPCLI_HTTPCLI_H */
+#endif /* GRPC_CORE_HTTPCLI_HTTPCLI_H */
diff --git a/src/core/httpcli/httpcli_security_connector.c b/src/core/httpcli/httpcli_security_connector.c
index ce0d3d5a70..156961a377 100644
--- a/src/core/httpcli/httpcli_security_connector.c
+++ b/src/core/httpcli/httpcli_security_connector.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,11 +31,11 @@
*
*/
-#include "src/core/httpcli/httpcli_security_connector.h"
+#include "src/core/httpcli/httpcli.h"
#include <string.h>
-#include "src/core/security/secure_transport_setup.h"
+#include "src/core/security/handshake.h"
#include "src/core/support/string.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
@@ -58,26 +58,35 @@ static void httpcli_ssl_destroy(grpc_security_connector *sc) {
gpr_free(sc);
}
-static grpc_security_status httpcli_ssl_create_handshaker(
- grpc_security_connector *sc, tsi_handshaker **handshaker) {
+static void httpcli_ssl_do_handshake(grpc_exec_ctx *exec_ctx,
+ grpc_channel_security_connector *sc,
+ grpc_endpoint *nonsecure_endpoint,
+ grpc_security_handshake_done_cb cb,
+ void *user_data) {
grpc_httpcli_ssl_channel_security_connector *c =
(grpc_httpcli_ssl_channel_security_connector *)sc;
tsi_result result = TSI_OK;
- if (c->handshaker_factory == NULL) return GRPC_SECURITY_ERROR;
+ tsi_handshaker *handshaker;
+ if (c->handshaker_factory == NULL) {
+ cb(exec_ctx, user_data, GRPC_SECURITY_ERROR, NULL, NULL);
+ return;
+ }
result = tsi_ssl_handshaker_factory_create_handshaker(
- c->handshaker_factory, c->secure_peer_name, handshaker);
+ c->handshaker_factory, c->secure_peer_name, &handshaker);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker creation failed with error %s.",
tsi_result_to_string(result));
- return GRPC_SECURITY_ERROR;
+ cb(exec_ctx, user_data, GRPC_SECURITY_ERROR, NULL, NULL);
+ } else {
+ grpc_do_security_handshake(exec_ctx, handshaker, &sc->base, true,
+ nonsecure_endpoint, cb, user_data);
}
- return GRPC_SECURITY_OK;
}
-static grpc_security_status httpcli_ssl_check_peer(grpc_security_connector *sc,
- tsi_peer peer,
- grpc_security_check_cb cb,
- void *user_data) {
+static void httpcli_ssl_check_peer(grpc_exec_ctx *exec_ctx,
+ grpc_security_connector *sc, tsi_peer peer,
+ grpc_security_peer_check_cb cb,
+ void *user_data) {
grpc_httpcli_ssl_channel_security_connector *c =
(grpc_httpcli_ssl_channel_security_connector *)sc;
grpc_security_status status = GRPC_SECURITY_OK;
@@ -89,14 +98,14 @@ static grpc_security_status httpcli_ssl_check_peer(grpc_security_connector *sc,
c->secure_peer_name);
status = GRPC_SECURITY_ERROR;
}
+ cb(exec_ctx, user_data, status, NULL);
tsi_peer_destruct(&peer);
- return status;
}
static grpc_security_connector_vtable httpcli_ssl_vtable = {
- httpcli_ssl_destroy, httpcli_ssl_create_handshaker, httpcli_ssl_check_peer};
+ httpcli_ssl_destroy, httpcli_ssl_check_peer};
-grpc_security_status grpc_httpcli_ssl_channel_security_connector_create(
+static grpc_security_status httpcli_ssl_channel_security_connector_create(
const unsigned char *pem_root_certs, size_t pem_root_certs_size,
const char *secure_peer_name, grpc_channel_security_connector **sc) {
tsi_result result = TSI_OK;
@@ -112,7 +121,6 @@ grpc_security_status grpc_httpcli_ssl_channel_security_connector_create(
memset(c, 0, sizeof(grpc_httpcli_ssl_channel_security_connector));
gpr_ref_init(&c->base.base.refcount, 1);
- c->base.base.is_client_side = 1;
c->base.base.vtable = &httpcli_ssl_vtable;
if (secure_peer_name != NULL) {
c->secure_peer_name = gpr_strdup(secure_peer_name);
@@ -127,6 +135,54 @@ grpc_security_status grpc_httpcli_ssl_channel_security_connector_create(
*sc = NULL;
return GRPC_SECURITY_ERROR;
}
+ c->base.do_handshake = httpcli_ssl_do_handshake;
*sc = &c->base;
return GRPC_SECURITY_OK;
}
+
+/* handshaker */
+
+typedef struct {
+ void (*func)(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *endpoint);
+ void *arg;
+} on_done_closure;
+
+static void on_secure_transport_setup_done(grpc_exec_ctx *exec_ctx, void *rp,
+ grpc_security_status status,
+ grpc_endpoint *secure_endpoint,
+ grpc_auth_context *auth_context) {
+ on_done_closure *c = rp;
+ if (status != GRPC_SECURITY_OK) {
+ gpr_log(GPR_ERROR, "Secure transport setup failed with error %d.", status);
+ c->func(exec_ctx, c->arg, NULL);
+ } else {
+ c->func(exec_ctx, c->arg, secure_endpoint);
+ }
+ gpr_free(c);
+}
+
+static void ssl_handshake(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_endpoint *tcp, const char *host,
+ void (*on_done)(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_endpoint *endpoint)) {
+ grpc_channel_security_connector *sc = NULL;
+ const unsigned char *pem_root_certs = NULL;
+ on_done_closure *c = gpr_malloc(sizeof(*c));
+ size_t pem_root_certs_size = grpc_get_default_ssl_roots(&pem_root_certs);
+ if (pem_root_certs == NULL || pem_root_certs_size == 0) {
+ gpr_log(GPR_ERROR, "Could not get default pem root certs.");
+ on_done(exec_ctx, arg, NULL);
+ gpr_free(c);
+ return;
+ }
+ c->func = on_done;
+ c->arg = arg;
+ GPR_ASSERT(httpcli_ssl_channel_security_connector_create(
+ pem_root_certs, pem_root_certs_size, host, &sc) ==
+ GRPC_SECURITY_OK);
+ grpc_channel_security_connector_do_handshake(
+ exec_ctx, sc, tcp, on_secure_transport_setup_done, c);
+ GRPC_SECURITY_CONNECTOR_UNREF(&sc->base, "httpcli");
+}
+
+const grpc_httpcli_handshaker grpc_httpcli_ssl = {"https", ssl_handshake};
diff --git a/src/core/httpcli/httpcli_security_connector.h b/src/core/httpcli/httpcli_security_connector.h
deleted file mode 100644
index c50f25905e..0000000000
--- a/src/core/httpcli/httpcli_security_connector.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#ifndef GRPC_INTERNAL_CORE_HTTPCLI_HTTPCLI_SECURITY_CONNECTOR_H
-#define GRPC_INTERNAL_CORE_HTTPCLI_HTTPCLI_SECURITY_CONNECTOR_H
-
-#include "src/core/security/security_connector.h"
-
-grpc_security_status grpc_httpcli_ssl_channel_security_connector_create(
- const unsigned char *pem_root_certs, size_t pem_root_certs_size,
- const char *secure_peer_name, grpc_channel_security_connector **sc);
-
-#endif /* GRPC_INTERNAL_CORE_HTTPCLI_HTTPCLI_SECURITY_CONNECTOR_H */
diff --git a/src/core/httpcli/parser.c b/src/core/httpcli/parser.c
index 7b2a62060c..c314f025a0 100644
--- a/src/core/httpcli/parser.c
+++ b/src/core/httpcli/parser.c
@@ -40,9 +40,9 @@
#include <grpc/support/useful.h>
static int handle_response_line(grpc_httpcli_parser *parser) {
- gpr_uint8 *beg = parser->cur_line;
- gpr_uint8 *cur = beg;
- gpr_uint8 *end = beg + parser->cur_line_length;
+ uint8_t *beg = parser->cur_line;
+ uint8_t *cur = beg;
+ uint8_t *end = beg + parser->cur_line_length;
if (cur == end || *cur++ != 'H') goto error;
if (cur == end || *cur++ != 'T') goto error;
@@ -77,9 +77,9 @@ static char *buf2str(void *buffer, size_t length) {
}
static int add_header(grpc_httpcli_parser *parser) {
- gpr_uint8 *beg = parser->cur_line;
- gpr_uint8 *cur = beg;
- gpr_uint8 *end = beg + parser->cur_line_length;
+ uint8_t *beg = parser->cur_line;
+ uint8_t *cur = beg;
+ uint8_t *end = beg + parser->cur_line_length;
grpc_httpcli_header hdr = {NULL, NULL};
GPR_ASSERT(cur != end);
@@ -96,13 +96,15 @@ static int add_header(grpc_httpcli_parser *parser) {
gpr_log(GPR_ERROR, "Didn't find ':' in header string");
goto error;
}
- hdr.key = buf2str(beg, cur - beg);
+ GPR_ASSERT(cur >= beg);
+ hdr.key = buf2str(beg, (size_t)(cur - beg));
cur++; /* skip : */
while (cur != end && (*cur == ' ' || *cur == '\t')) {
cur++;
}
- hdr.value = buf2str(cur, end - cur - 2);
+ GPR_ASSERT(end - cur >= 2);
+ hdr.value = buf2str(cur, (size_t)(end - cur) - 2);
if (parser->r.hdr_count == parser->hdr_capacity) {
parser->hdr_capacity =
@@ -137,15 +139,14 @@ static int finish_line(grpc_httpcli_parser *parser) {
}
break;
case GRPC_HTTPCLI_BODY:
- gpr_log(GPR_ERROR, "should never reach here");
- abort();
+ GPR_UNREACHABLE_CODE(return 0);
}
parser->cur_line_length = 0;
return 1;
}
-static int addbyte(grpc_httpcli_parser *parser, gpr_uint8 byte) {
+static int addbyte(grpc_httpcli_parser *parser, uint8_t byte) {
switch (parser->state) {
case GRPC_HTTPCLI_INITIAL_RESPONSE:
case GRPC_HTTPCLI_HEADERS:
@@ -163,22 +164,18 @@ static int addbyte(grpc_httpcli_parser *parser, gpr_uint8 byte) {
} else {
return 1;
}
- gpr_log(GPR_ERROR, "should never reach here");
- abort();
+ GPR_UNREACHABLE_CODE(return 0);
case GRPC_HTTPCLI_BODY:
if (parser->r.body_length == parser->body_capacity) {
parser->body_capacity = GPR_MAX(8, parser->body_capacity * 3 / 2);
parser->r.body =
gpr_realloc((void *)parser->r.body, parser->body_capacity);
}
- ((char *)parser->r.body)[parser->r.body_length] = byte;
+ parser->r.body[parser->r.body_length] = (char)byte;
parser->r.body_length++;
return 1;
}
- gpr_log(GPR_ERROR, "should never reach here");
- abort();
-
- return 0;
+ GPR_UNREACHABLE_CODE(return 0);
}
void grpc_httpcli_parser_init(grpc_httpcli_parser *parser) {
diff --git a/src/core/httpcli/parser.h b/src/core/httpcli/parser.h
index 71280e7479..cd4a737245 100644
--- a/src/core/httpcli/parser.h
+++ b/src/core/httpcli/parser.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_HTTPCLI_PARSER_H
-#define GRPC_INTERNAL_CORE_HTTPCLI_PARSER_H
+#ifndef GRPC_CORE_HTTPCLI_PARSER_H
+#define GRPC_CORE_HTTPCLI_PARSER_H
#include "src/core/httpcli/httpcli.h"
#include <grpc/support/port_platform.h>
@@ -51,14 +51,14 @@ typedef struct {
size_t body_capacity;
size_t hdr_capacity;
- gpr_uint8 cur_line[GRPC_HTTPCLI_MAX_HEADER_LENGTH];
+ uint8_t cur_line[GRPC_HTTPCLI_MAX_HEADER_LENGTH];
size_t cur_line_length;
} grpc_httpcli_parser;
-void grpc_httpcli_parser_init(grpc_httpcli_parser *parser);
-void grpc_httpcli_parser_destroy(grpc_httpcli_parser *parser);
+void grpc_httpcli_parser_init(grpc_httpcli_parser* parser);
+void grpc_httpcli_parser_destroy(grpc_httpcli_parser* parser);
-int grpc_httpcli_parser_parse(grpc_httpcli_parser *parser, gpr_slice slice);
-int grpc_httpcli_parser_eof(grpc_httpcli_parser *parser);
+int grpc_httpcli_parser_parse(grpc_httpcli_parser* parser, gpr_slice slice);
+int grpc_httpcli_parser_eof(grpc_httpcli_parser* parser);
-#endif /* GRPC_INTERNAL_CORE_HTTPCLI_PARSER_H */
+#endif /* GRPC_CORE_HTTPCLI_PARSER_H */
diff --git a/src/core/iomgr/closure.c b/src/core/iomgr/closure.c
new file mode 100644
index 0000000000..3a96f7385f
--- /dev/null
+++ b/src/core/iomgr/closure.c
@@ -0,0 +1,98 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/iomgr/closure.h"
+
+#include <grpc/support/alloc.h>
+
+void grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
+ void *cb_arg) {
+ closure->cb = cb;
+ closure->cb_arg = cb_arg;
+ closure->final_data = 0;
+}
+
+void grpc_closure_list_add(grpc_closure_list *closure_list,
+ grpc_closure *closure, bool success) {
+ if (closure == NULL) return;
+ closure->final_data = (success != 0);
+ if (closure_list->head == NULL) {
+ closure_list->head = closure;
+ } else {
+ closure_list->tail->final_data |= (uintptr_t)closure;
+ }
+ closure_list->tail = closure;
+}
+
+bool grpc_closure_list_empty(grpc_closure_list closure_list) {
+ return closure_list.head == NULL;
+}
+
+void grpc_closure_list_move(grpc_closure_list *src, grpc_closure_list *dst) {
+ if (src->head == NULL) {
+ return;
+ }
+ if (dst->head == NULL) {
+ *dst = *src;
+ } else {
+ dst->tail->final_data |= (uintptr_t)src->head;
+ dst->tail = src->tail;
+ }
+ src->head = src->tail = NULL;
+}
+
+typedef struct {
+ grpc_iomgr_cb_func cb;
+ void *cb_arg;
+ grpc_closure wrapper;
+} wrapped_closure;
+
+static void closure_wrapper(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
+ wrapped_closure *wc = arg;
+ grpc_iomgr_cb_func cb = wc->cb;
+ void *cb_arg = wc->cb_arg;
+ gpr_free(wc);
+ cb(exec_ctx, cb_arg, success);
+}
+
+grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg) {
+ wrapped_closure *wc = gpr_malloc(sizeof(*wc));
+ wc->cb = cb;
+ wc->cb_arg = cb_arg;
+ grpc_closure_init(&wc->wrapper, closure_wrapper, wc);
+ return &wc->wrapper;
+}
+
+grpc_closure *grpc_closure_next(grpc_closure *closure) {
+ return (grpc_closure *)(closure->final_data & ~(uintptr_t)1);
+}
diff --git a/src/core/iomgr/closure.h b/src/core/iomgr/closure.h
new file mode 100644
index 0000000000..d5e1f455b9
--- /dev/null
+++ b/src/core/iomgr/closure.h
@@ -0,0 +1,98 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_IOMGR_CLOSURE_H
+#define GRPC_CORE_IOMGR_CLOSURE_H
+
+#include <grpc/support/port_platform.h>
+#include <stdbool.h>
+
+struct grpc_closure;
+typedef struct grpc_closure grpc_closure;
+
+/* forward declaration for exec_ctx.h */
+struct grpc_exec_ctx;
+typedef struct grpc_exec_ctx grpc_exec_ctx;
+
+typedef struct grpc_closure_list {
+ grpc_closure *head;
+ grpc_closure *tail;
+} grpc_closure_list;
+
+/** gRPC Callback definition.
+ *
+ * \param arg Arbitrary input.
+ * \param success An indication on the state of the iomgr. On false, cleanup
+ * actions should be taken (eg, shutdown). */
+typedef void (*grpc_iomgr_cb_func)(grpc_exec_ctx *exec_ctx, void *arg,
+ bool success);
+
+/** A closure over a grpc_iomgr_cb_func. */
+struct grpc_closure {
+ /** Bound callback. */
+ grpc_iomgr_cb_func cb;
+
+ /** Arguments to be passed to "cb". */
+ void *cb_arg;
+
+ /** Once enqueued, contains in the lower bit the success of the closure,
+ and in the upper bits the pointer to the next closure in the list.
+ Before enqueing for execution, this is usable for scratch data. */
+ uintptr_t final_data;
+};
+
+/** Initializes \a closure with \a cb and \a cb_arg. */
+void grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
+ void *cb_arg);
+
+/* Create a heap allocated closure: try to avoid except for very rare events */
+grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg);
+
+#define GRPC_CLOSURE_LIST_INIT \
+ { NULL, NULL }
+
+/** add \a closure to the end of \a list and set \a closure's success to \a
+ * success */
+void grpc_closure_list_add(grpc_closure_list *list, grpc_closure *closure,
+ bool success);
+
+/** append all closures from \a src to \a dst and empty \a src. */
+void grpc_closure_list_move(grpc_closure_list *src, grpc_closure_list *dst);
+
+/** return whether \a list is empty. */
+bool grpc_closure_list_empty(grpc_closure_list list);
+
+/** return the next pointer for a queued closure list */
+grpc_closure *grpc_closure_next(grpc_closure *closure);
+
+#endif /* GRPC_CORE_IOMGR_CLOSURE_H */
diff --git a/src/core/iomgr/endpoint.c b/src/core/iomgr/endpoint.c
index 96487958a7..bd64707669 100644
--- a/src/core/iomgr/endpoint.c
+++ b/src/core/iomgr/endpoint.c
@@ -33,23 +33,35 @@
#include "src/core/iomgr/endpoint.h"
-void grpc_endpoint_notify_on_read(grpc_endpoint *ep, grpc_endpoint_read_cb cb,
- void *user_data) {
- ep->vtable->notify_on_read(ep, cb, user_data);
+void grpc_endpoint_read(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ gpr_slice_buffer* slices, grpc_closure* cb) {
+ ep->vtable->read(exec_ctx, ep, slices, cb);
}
-grpc_endpoint_write_status grpc_endpoint_write(grpc_endpoint *ep,
- gpr_slice *slices,
- size_t nslices,
- grpc_endpoint_write_cb cb,
- void *user_data) {
- return ep->vtable->write(ep, slices, nslices, cb, user_data);
+void grpc_endpoint_write(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ gpr_slice_buffer* slices, grpc_closure* cb) {
+ ep->vtable->write(exec_ctx, ep, slices, cb);
}
-void grpc_endpoint_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset) {
- ep->vtable->add_to_pollset(ep, pollset);
+void grpc_endpoint_add_to_pollset(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_pollset* pollset) {
+ ep->vtable->add_to_pollset(exec_ctx, ep, pollset);
}
-void grpc_endpoint_shutdown(grpc_endpoint *ep) { ep->vtable->shutdown(ep); }
+void grpc_endpoint_add_to_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_endpoint* ep,
+ grpc_pollset_set* pollset_set) {
+ ep->vtable->add_to_pollset_set(exec_ctx, ep, pollset_set);
+}
+
+void grpc_endpoint_shutdown(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep) {
+ ep->vtable->shutdown(exec_ctx, ep);
+}
-void grpc_endpoint_destroy(grpc_endpoint *ep) { ep->vtable->destroy(ep); }
+void grpc_endpoint_destroy(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep) {
+ ep->vtable->destroy(exec_ctx, ep);
+}
+
+char* grpc_endpoint_get_peer(grpc_endpoint* ep) {
+ return ep->vtable->get_peer(ep);
+}
diff --git a/src/core/iomgr/endpoint.h b/src/core/iomgr/endpoint.h
index 881e851800..788f3ac5bc 100644
--- a/src/core/iomgr/endpoint.h
+++ b/src/core/iomgr/endpoint.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,11 +31,13 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_IOMGR_ENDPOINT_H
-#define GRPC_INTERNAL_CORE_IOMGR_ENDPOINT_H
+#ifndef GRPC_CORE_IOMGR_ENDPOINT_H
+#define GRPC_CORE_IOMGR_ENDPOINT_H
#include "src/core/iomgr/pollset.h"
+#include "src/core/iomgr/pollset_set.h"
#include <grpc/support/slice.h>
+#include <grpc/support/slice_buffer.h>
#include <grpc/support/time.h>
/* An endpoint caps a streaming channel between two communicating processes.
@@ -44,63 +46,57 @@
typedef struct grpc_endpoint grpc_endpoint;
typedef struct grpc_endpoint_vtable grpc_endpoint_vtable;
-typedef enum grpc_endpoint_cb_status {
- GRPC_ENDPOINT_CB_OK = 0, /* Call completed successfully */
- GRPC_ENDPOINT_CB_EOF, /* Call completed successfully, end of file reached */
- GRPC_ENDPOINT_CB_SHUTDOWN, /* Call interrupted by shutdown */
- GRPC_ENDPOINT_CB_ERROR /* Call interrupted by socket error */
-} grpc_endpoint_cb_status;
-
-typedef enum grpc_endpoint_write_status {
- GRPC_ENDPOINT_WRITE_DONE, /* completed immediately, cb won't be called */
- GRPC_ENDPOINT_WRITE_PENDING, /* cb will be called when completed */
- GRPC_ENDPOINT_WRITE_ERROR /* write errored out, cb won't be called */
-} grpc_endpoint_write_status;
-
-typedef void (*grpc_endpoint_read_cb)(void *user_data, gpr_slice *slices,
- size_t nslices,
- grpc_endpoint_cb_status error);
-typedef void (*grpc_endpoint_write_cb)(void *user_data,
- grpc_endpoint_cb_status error);
-
struct grpc_endpoint_vtable {
- void (*notify_on_read)(grpc_endpoint *ep, grpc_endpoint_read_cb cb,
- void *user_data);
- grpc_endpoint_write_status (*write)(grpc_endpoint *ep, gpr_slice *slices,
- size_t nslices, grpc_endpoint_write_cb cb,
- void *user_data);
- void (*add_to_pollset)(grpc_endpoint *ep, grpc_pollset *pollset);
- void (*shutdown)(grpc_endpoint *ep);
- void (*destroy)(grpc_endpoint *ep);
+ void (*read)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ gpr_slice_buffer *slices, grpc_closure *cb);
+ void (*write)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ gpr_slice_buffer *slices, grpc_closure *cb);
+ void (*add_to_pollset)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ grpc_pollset *pollset);
+ void (*add_to_pollset_set)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ grpc_pollset_set *pollset);
+ void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep);
+ void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep);
+ char *(*get_peer)(grpc_endpoint *ep);
};
-/* When data is available on the connection, calls the callback with slices. */
-void grpc_endpoint_notify_on_read(grpc_endpoint *ep, grpc_endpoint_read_cb cb,
- void *user_data);
+/* When data is available on the connection, calls the callback with slices.
+ Callback success indicates that the endpoint can accept more reads, failure
+ indicates the endpoint is closed.
+ Valid slices may be placed into \a slices even on callback success == 0. */
+void grpc_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ gpr_slice_buffer *slices, grpc_closure *cb);
+
+char *grpc_endpoint_get_peer(grpc_endpoint *ep);
/* Write slices out to the socket.
If the connection is ready for more data after the end of the call, it
- returns GRPC_ENDPOINT_WRITE_DONE.
- Otherwise it returns GRPC_ENDPOINT_WRITE_PENDING and calls cb when the
- connection is ready for more data. */
-grpc_endpoint_write_status grpc_endpoint_write(grpc_endpoint *ep,
- gpr_slice *slices,
- size_t nslices,
- grpc_endpoint_write_cb cb,
- void *user_data);
+ returns GRPC_ENDPOINT_DONE.
+ Otherwise it returns GRPC_ENDPOINT_PENDING and calls cb when the
+ connection is ready for more data.
+ \a slices may be mutated at will by the endpoint until cb is called.
+ No guarantee is made to the content of slices after a write EXCEPT that
+ it is a valid slice buffer.
+ */
+void grpc_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ gpr_slice_buffer *slices, grpc_closure *cb);
/* Causes any pending read/write callbacks to run immediately with
- GRPC_ENDPOINT_CB_SHUTDOWN status */
-void grpc_endpoint_shutdown(grpc_endpoint *ep);
-void grpc_endpoint_destroy(grpc_endpoint *ep);
+ success==0 */
+void grpc_endpoint_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep);
+void grpc_endpoint_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep);
/* Add an endpoint to a pollset, so that when the pollset is polled, events from
this endpoint are considered */
-void grpc_endpoint_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset);
+void grpc_endpoint_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ grpc_pollset *pollset);
+void grpc_endpoint_add_to_pollset_set(grpc_exec_ctx *exec_ctx,
+ grpc_endpoint *ep,
+ grpc_pollset_set *pollset_set);
struct grpc_endpoint {
const grpc_endpoint_vtable *vtable;
};
-#endif /* GRPC_INTERNAL_CORE_IOMGR_ENDPOINT_H */
+#endif /* GRPC_CORE_IOMGR_ENDPOINT_H */
diff --git a/src/core/iomgr/endpoint_pair.h b/src/core/iomgr/endpoint_pair.h
index 25087be0c7..59015d8ffb 100644
--- a/src/core/iomgr/endpoint_pair.h
+++ b/src/core/iomgr/endpoint_pair.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_IOMGR_ENDPOINT_PAIR_H
-#define GRPC_INTERNAL_CORE_IOMGR_ENDPOINT_PAIR_H
+#ifndef GRPC_CORE_IOMGR_ENDPOINT_PAIR_H
+#define GRPC_CORE_IOMGR_ENDPOINT_PAIR_H
#include "src/core/iomgr/endpoint.h"
@@ -44,4 +44,4 @@ typedef struct {
grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name,
size_t read_slice_size);
-#endif /* GRPC_INTERNAL_CORE_IOMGR_ENDPOINT_PAIR_H */
+#endif /* GRPC_CORE_IOMGR_ENDPOINT_PAIR_H */
diff --git a/src/core/iomgr/endpoint_pair_posix.c b/src/core/iomgr/endpoint_pair_posix.c
index fa2d2555d6..56f6f146fd 100644
--- a/src/core/iomgr/endpoint_pair_posix.c
+++ b/src/core/iomgr/endpoint_pair_posix.c
@@ -36,6 +36,7 @@
#ifdef GPR_POSIX_SOCKET
#include "src/core/iomgr/endpoint_pair.h"
+#include "src/core/iomgr/socket_utils_posix.h"
#include <errno.h>
#include <fcntl.h>
@@ -56,6 +57,8 @@ static void create_sockets(int sv[2]) {
GPR_ASSERT(fcntl(sv[0], F_SETFL, flags | O_NONBLOCK) == 0);
flags = fcntl(sv[1], F_GETFL, 0);
GPR_ASSERT(fcntl(sv[1], F_SETFL, flags | O_NONBLOCK) == 0);
+ GPR_ASSERT(grpc_set_socket_no_sigpipe_if_possible(sv[0]));
+ GPR_ASSERT(grpc_set_socket_no_sigpipe_if_possible(sv[1]));
}
grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name,
@@ -66,12 +69,12 @@ grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name,
create_sockets(sv);
gpr_asprintf(&final_name, "%s:client", name);
- p.client =
- grpc_tcp_create(grpc_fd_create(sv[1], final_name), read_slice_size);
+ p.client = grpc_tcp_create(grpc_fd_create(sv[1], final_name), read_slice_size,
+ "socketpair-server");
gpr_free(final_name);
gpr_asprintf(&final_name, "%s:server", name);
- p.server =
- grpc_tcp_create(grpc_fd_create(sv[0], final_name), read_slice_size);
+ p.server = grpc_tcp_create(grpc_fd_create(sv[0], final_name), read_slice_size,
+ "socketpair-client");
gpr_free(final_name);
return p;
}
diff --git a/src/core/iomgr/endpoint_pair_windows.c b/src/core/iomgr/endpoint_pair_windows.c
index c6790b2937..db9d092dca 100644
--- a/src/core/iomgr/endpoint_pair_windows.c
+++ b/src/core/iomgr/endpoint_pair_windows.c
@@ -52,21 +52,26 @@ static void create_sockets(SOCKET sv[2]) {
SOCKADDR_IN addr;
int addr_len = sizeof(addr);
- lst_sock = WSASocket(AF_INET, SOCK_STREAM, IPPROTO_TCP, NULL, 0, WSA_FLAG_OVERLAPPED);
+ lst_sock = WSASocket(AF_INET, SOCK_STREAM, IPPROTO_TCP, NULL, 0,
+ WSA_FLAG_OVERLAPPED);
GPR_ASSERT(lst_sock != INVALID_SOCKET);
memset(&addr, 0, sizeof(addr));
addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
addr.sin_family = AF_INET;
- GPR_ASSERT(bind(lst_sock, (struct sockaddr*)&addr, sizeof(addr)) != SOCKET_ERROR);
+ GPR_ASSERT(bind(lst_sock, (struct sockaddr *)&addr, sizeof(addr)) !=
+ SOCKET_ERROR);
GPR_ASSERT(listen(lst_sock, SOMAXCONN) != SOCKET_ERROR);
- GPR_ASSERT(getsockname(lst_sock, (struct sockaddr*)&addr, &addr_len) != SOCKET_ERROR);
+ GPR_ASSERT(getsockname(lst_sock, (struct sockaddr *)&addr, &addr_len) !=
+ SOCKET_ERROR);
- cli_sock = WSASocket(AF_INET, SOCK_STREAM, IPPROTO_TCP, NULL, 0, WSA_FLAG_OVERLAPPED);
+ cli_sock = WSASocket(AF_INET, SOCK_STREAM, IPPROTO_TCP, NULL, 0,
+ WSA_FLAG_OVERLAPPED);
GPR_ASSERT(cli_sock != INVALID_SOCKET);
- GPR_ASSERT(WSAConnect(cli_sock, (struct sockaddr*)&addr, addr_len, NULL, NULL, NULL, NULL) == 0);
- svr_sock = accept(lst_sock, (struct sockaddr*)&addr, &addr_len);
+ GPR_ASSERT(WSAConnect(cli_sock, (struct sockaddr *)&addr, addr_len, NULL,
+ NULL, NULL, NULL) == 0);
+ svr_sock = accept(lst_sock, (struct sockaddr *)&addr, &addr_len);
GPR_ASSERT(svr_sock != INVALID_SOCKET);
closesocket(lst_sock);
@@ -77,12 +82,15 @@ static void create_sockets(SOCKET sv[2]) {
sv[0] = svr_sock;
}
-grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name, size_t read_slice_size) {
+grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name,
+ size_t read_slice_size) {
SOCKET sv[2];
grpc_endpoint_pair p;
create_sockets(sv);
- p.client = grpc_tcp_create(grpc_winsocket_create(sv[1], "endpoint:client"));
- p.server = grpc_tcp_create(grpc_winsocket_create(sv[0], "endpoint:server"));
+ p.client = grpc_tcp_create(grpc_winsocket_create(sv[1], "endpoint:client"),
+ "endpoint:server");
+ p.server = grpc_tcp_create(grpc_winsocket_create(sv[0], "endpoint:server"),
+ "endpoint:client");
return p;
}
diff --git a/src/core/iomgr/exec_ctx.c b/src/core/iomgr/exec_ctx.c
new file mode 100644
index 0000000000..893fe4515c
--- /dev/null
+++ b/src/core/iomgr/exec_ctx.c
@@ -0,0 +1,151 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/iomgr/exec_ctx.h"
+
+#include <grpc/support/log.h>
+#include <grpc/support/sync.h>
+#include <grpc/support/thd.h>
+
+#include "src/core/profiling/timers.h"
+
+#ifndef GRPC_EXECUTION_CONTEXT_SANITIZER
+bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
+ bool did_something = 0;
+ GPR_TIMER_BEGIN("grpc_exec_ctx_flush", 0);
+ while (!grpc_closure_list_empty(exec_ctx->closure_list)) {
+ grpc_closure *c = exec_ctx->closure_list.head;
+ exec_ctx->closure_list.head = exec_ctx->closure_list.tail = NULL;
+ while (c != NULL) {
+ bool success = (bool)(c->final_data & 1);
+ grpc_closure *next = (grpc_closure *)(c->final_data & ~(uintptr_t)1);
+ did_something = true;
+ GPR_TIMER_BEGIN("grpc_exec_ctx_flush.cb", 0);
+ c->cb(exec_ctx, c->cb_arg, success);
+ GPR_TIMER_END("grpc_exec_ctx_flush.cb", 0);
+ c = next;
+ }
+ }
+ GPR_TIMER_END("grpc_exec_ctx_flush", 0);
+ return did_something;
+}
+
+void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx) {
+ grpc_exec_ctx_flush(exec_ctx);
+}
+
+void grpc_exec_ctx_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+ bool success,
+ grpc_workqueue *offload_target_or_null) {
+ GPR_ASSERT(offload_target_or_null == NULL);
+ grpc_closure_list_add(&exec_ctx->closure_list, closure, success);
+}
+
+void grpc_exec_ctx_enqueue_list(grpc_exec_ctx *exec_ctx,
+ grpc_closure_list *list,
+ grpc_workqueue *offload_target_or_null) {
+ GPR_ASSERT(offload_target_or_null == NULL);
+ grpc_closure_list_move(list, &exec_ctx->closure_list);
+}
+
+void grpc_exec_ctx_global_init(void) {}
+void grpc_exec_ctx_global_shutdown(void) {}
+#else
+static gpr_mu g_mu;
+static gpr_cv g_cv;
+static int g_threads = 0;
+
+static void run_closure(void *arg) {
+ grpc_closure *closure = arg;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ closure->cb(&exec_ctx, closure->cb_arg, (closure->final_data & 1) != 0);
+ grpc_exec_ctx_finish(&exec_ctx);
+ gpr_mu_lock(&g_mu);
+ if (--g_threads == 0) {
+ gpr_cv_signal(&g_cv);
+ }
+ gpr_mu_unlock(&g_mu);
+}
+
+static void start_closure(grpc_closure *closure) {
+ gpr_thd_id id;
+ gpr_mu_lock(&g_mu);
+ g_threads++;
+ gpr_mu_unlock(&g_mu);
+ gpr_thd_new(&id, run_closure, closure, NULL);
+}
+
+bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) { return false; }
+
+void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx) {}
+
+void grpc_exec_ctx_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+ bool success,
+ grpc_workqueue *offload_target_or_null) {
+ GPR_ASSERT(offload_target_or_null == NULL);
+ if (closure == NULL) return;
+ closure->final_data = success;
+ start_closure(closure);
+}
+
+void grpc_exec_ctx_enqueue_list(grpc_exec_ctx *exec_ctx,
+ grpc_closure_list *list,
+ grpc_workqueue *offload_target_or_null) {
+ GPR_ASSERT(offload_target_or_null == NULL);
+ if (list == NULL) return;
+ grpc_closure *p = list->head;
+ while (p) {
+ grpc_closure *start = p;
+ p = grpc_closure_next(start);
+ start_closure(start);
+ }
+ grpc_closure_list r = GRPC_CLOSURE_LIST_INIT;
+ *list = r;
+}
+
+void grpc_exec_ctx_global_init(void) {
+ gpr_mu_init(&g_mu);
+ gpr_cv_init(&g_cv);
+}
+
+void grpc_exec_ctx_global_shutdown(void) {
+ gpr_mu_lock(&g_mu);
+ while (g_threads != 0) {
+ gpr_cv_wait(&g_cv, &g_mu, gpr_inf_future(GPR_CLOCK_REALTIME));
+ }
+ gpr_mu_unlock(&g_mu);
+
+ gpr_mu_destroy(&g_mu);
+ gpr_cv_destroy(&g_cv);
+}
+#endif
diff --git a/src/core/iomgr/exec_ctx.h b/src/core/iomgr/exec_ctx.h
new file mode 100644
index 0000000000..07b54a0ab8
--- /dev/null
+++ b/src/core/iomgr/exec_ctx.h
@@ -0,0 +1,98 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_IOMGR_EXEC_CTX_H
+#define GRPC_CORE_IOMGR_EXEC_CTX_H
+
+#include "src/core/iomgr/closure.h"
+
+/* #define GRPC_EXECUTION_CONTEXT_SANITIZER 1 */
+
+/** A workqueue represents a list of work to be executed asynchronously.
+ Forward declared here to avoid a circular dependency with workqueue.h. */
+struct grpc_workqueue;
+typedef struct grpc_workqueue grpc_workqueue;
+
+#ifndef GRPC_EXECUTION_CONTEXT_SANITIZER
+/** Execution context.
+ * A bag of data that collects information along a callstack.
+ * Generally created at public API entry points, and passed down as
+ * pointer to child functions that manipulate it.
+ *
+ * Specific responsibilities (this may grow in the future):
+ * - track a list of work that needs to be delayed until the top of the
+ * call stack (this provides a convenient mechanism to run callbacks
+ * without worrying about locking issues)
+ *
+ * CONVENTIONS:
+ * Instance of this must ALWAYS be constructed on the stack, never
+ * heap allocated. Instances and pointers to them must always be called
+ * exec_ctx. Instances are always passed as the first argument
+ * to a function that takes it, and always as a pointer (grpc_exec_ctx
+ * is never copied).
+ */
+struct grpc_exec_ctx {
+ grpc_closure_list closure_list;
+};
+
+#define GRPC_EXEC_CTX_INIT \
+ { GRPC_CLOSURE_LIST_INIT }
+#else
+struct grpc_exec_ctx {
+ int unused;
+};
+#define GRPC_EXEC_CTX_INIT \
+ { 0 }
+#endif
+
+/** Flush any work that has been enqueued onto this grpc_exec_ctx.
+ * Caller must guarantee that no interfering locks are held.
+ * Returns true if work was performed, false otherwise. */
+bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx);
+/** Finish any pending work for a grpc_exec_ctx. Must be called before
+ * the instance is destroyed, or work may be lost. */
+void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx);
+/** Add a closure to be executed at the next flush/finish point */
+void grpc_exec_ctx_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+ bool success,
+ grpc_workqueue *offload_target_or_null);
+/** Add a list of closures to be executed at the next flush/finish point.
+ * Leaves \a list empty. */
+void grpc_exec_ctx_enqueue_list(grpc_exec_ctx *exec_ctx,
+ grpc_closure_list *list,
+ grpc_workqueue *offload_target_or_null);
+
+void grpc_exec_ctx_global_init(void);
+void grpc_exec_ctx_global_shutdown(void);
+
+#endif /* GRPC_CORE_IOMGR_EXEC_CTX_H */
diff --git a/src/core/iomgr/executor.c b/src/core/iomgr/executor.c
new file mode 100644
index 0000000000..f22d8f30ac
--- /dev/null
+++ b/src/core/iomgr/executor.c
@@ -0,0 +1,143 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/iomgr/executor.h"
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/sync.h>
+#include <grpc/support/thd.h>
+#include "src/core/iomgr/exec_ctx.h"
+
+typedef struct grpc_executor_data {
+ int busy; /**< is the thread currently running? */
+ int shutting_down; /**< has \a grpc_shutdown() been invoked? */
+ int pending_join; /**< has the thread finished but not been joined? */
+ grpc_closure_list closures; /**< collection of pending work */
+ gpr_thd_id tid; /**< thread id of the thread, only valid if \a busy or \a
+ pending_join are true */
+ gpr_thd_options options;
+ gpr_mu mu;
+} grpc_executor;
+
+static grpc_executor g_executor;
+
+void grpc_executor_init() {
+ memset(&g_executor, 0, sizeof(grpc_executor));
+ gpr_mu_init(&g_executor.mu);
+ g_executor.options = gpr_thd_options_default();
+ gpr_thd_options_set_joinable(&g_executor.options);
+}
+
+/* thread body */
+static void closure_exec_thread_func(void *ignored) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ while (1) {
+ gpr_mu_lock(&g_executor.mu);
+ if (g_executor.shutting_down != 0) {
+ gpr_mu_unlock(&g_executor.mu);
+ break;
+ }
+ if (grpc_closure_list_empty(g_executor.closures)) {
+ /* no more work, time to die */
+ GPR_ASSERT(g_executor.busy == 1);
+ g_executor.busy = 0;
+ gpr_mu_unlock(&g_executor.mu);
+ break;
+ } else {
+ grpc_exec_ctx_enqueue_list(&exec_ctx, &g_executor.closures, NULL);
+ }
+ gpr_mu_unlock(&g_executor.mu);
+ grpc_exec_ctx_flush(&exec_ctx);
+ }
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+/* Spawn the thread if new work has arrived a no thread is up */
+static void maybe_spawn_locked() {
+ if (grpc_closure_list_empty(g_executor.closures) == 1) {
+ return;
+ }
+ if (g_executor.shutting_down == 1) {
+ return;
+ }
+
+ if (g_executor.busy != 0) {
+ /* Thread still working. New work will be picked up by already running
+ * thread. Not spawning anything. */
+ return;
+ } else if (g_executor.pending_join != 0) {
+ /* Pickup the remains of the previous incarnations of the thread. */
+ gpr_thd_join(g_executor.tid);
+ g_executor.pending_join = 0;
+ }
+
+ /* All previous instances of the thread should have been joined at this point.
+ * Spawn time! */
+ g_executor.busy = 1;
+ gpr_thd_new(&g_executor.tid, closure_exec_thread_func, NULL,
+ &g_executor.options);
+ g_executor.pending_join = 1;
+}
+
+void grpc_executor_enqueue(grpc_closure *closure, bool success) {
+ gpr_mu_lock(&g_executor.mu);
+ if (g_executor.shutting_down == 0) {
+ grpc_closure_list_add(&g_executor.closures, closure, success);
+ maybe_spawn_locked();
+ }
+ gpr_mu_unlock(&g_executor.mu);
+}
+
+void grpc_executor_shutdown() {
+ int pending_join;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
+ gpr_mu_lock(&g_executor.mu);
+ pending_join = g_executor.pending_join;
+ g_executor.shutting_down = 1;
+ gpr_mu_unlock(&g_executor.mu);
+ /* we can release the lock at this point despite the access to the closure
+ * list below because we aren't accepting new work */
+
+ /* Execute pending callbacks, some may be performing cleanups */
+ grpc_exec_ctx_enqueue_list(&exec_ctx, &g_executor.closures, NULL);
+ grpc_exec_ctx_finish(&exec_ctx);
+ GPR_ASSERT(grpc_closure_list_empty(g_executor.closures));
+ if (pending_join) {
+ gpr_thd_join(g_executor.tid);
+ }
+ gpr_mu_destroy(&g_executor.mu);
+}
diff --git a/src/core/iomgr/executor.h b/src/core/iomgr/executor.h
new file mode 100644
index 0000000000..f66b3560e3
--- /dev/null
+++ b/src/core/iomgr/executor.h
@@ -0,0 +1,53 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_IOMGR_EXECUTOR_H
+#define GRPC_CORE_IOMGR_EXECUTOR_H
+
+#include "src/core/iomgr/closure.h"
+
+/** Initialize the global executor.
+ *
+ * This mechanism is meant to outsource work (grpc_closure instances) to a
+ * thread, for those cases where blocking isn't an option but there isn't a
+ * non-blocking solution available. */
+void grpc_executor_init();
+
+/** Enqueue \a closure for its eventual execution of \a f(arg) on a separate
+ * thread */
+void grpc_executor_enqueue(grpc_closure *closure, bool success);
+
+/** Shutdown the executor, running all pending work as part of the call */
+void grpc_executor_shutdown();
+
+#endif /* GRPC_CORE_IOMGR_EXECUTOR_H */
diff --git a/src/core/iomgr/fd_posix.c b/src/core/iomgr/fd_posix.c
index 1297145d1a..3edafa0b07 100644
--- a/src/core/iomgr/fd_posix.c
+++ b/src/core/iomgr/fd_posix.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -43,12 +43,13 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
-enum descriptor_state {
- NOT_READY = 0,
- READY = 1
-}; /* or a pointer to a closure to call */
+#include "src/core/iomgr/pollset_posix.h"
+
+#define CLOSURE_NOT_READY ((grpc_closure *)0)
+#define CLOSURE_READY ((grpc_closure *)1)
/* We need to keep a freelist not because of any concerns of malloc performance
* but instead so that implementations with multiple threads in (for example)
@@ -71,6 +72,9 @@ static grpc_fd *fd_freelist = NULL;
static gpr_mu fd_freelist_mu;
static void freelist_fd(grpc_fd *fd) {
+ // Note that this function must be called after a release store (or
+ // full-barrier operation) on refst so that prior actions on the fd are
+ // ordered before the fd becomes visible to the freelist
gpr_mu_lock(&fd_freelist_mu);
fd->freelist_next = fd_freelist;
fd_freelist = fd;
@@ -88,26 +92,30 @@ static grpc_fd *alloc_fd(int fd) {
gpr_mu_unlock(&fd_freelist_mu);
if (r == NULL) {
r = gpr_malloc(sizeof(grpc_fd));
- gpr_mu_init(&r->set_state_mu);
- gpr_mu_init(&r->watcher_mu);
+ gpr_mu_init(&r->mu);
}
- gpr_atm_rel_store(&r->refst, 1);
- gpr_atm_rel_store(&r->readst, NOT_READY);
- gpr_atm_rel_store(&r->writest, NOT_READY);
- gpr_atm_rel_store(&r->shutdown, 0);
+ r->shutdown = 0;
+ r->read_closure = CLOSURE_NOT_READY;
+ r->write_closure = CLOSURE_NOT_READY;
r->fd = fd;
r->inactive_watcher_root.next = r->inactive_watcher_root.prev =
&r->inactive_watcher_root;
r->freelist_next = NULL;
r->read_watcher = r->write_watcher = NULL;
r->on_done_closure = NULL;
+ r->closed = 0;
+ r->released = 0;
+ // The last operation on r before returning it should be a release-store
+ // so that all the above fields are globally visible before the value of
+ // r could escape to another thread. Our refcount itself needs a release-store
+ // so use this
+ gpr_atm_rel_store(&r->refst, 1);
return r;
}
static void destroy(grpc_fd *fd) {
- gpr_mu_destroy(&fd->set_state_mu);
- gpr_mu_destroy(&fd->watcher_mu);
+ gpr_mu_destroy(&fd->mu);
gpr_free(fd);
}
@@ -116,7 +124,7 @@ static void destroy(grpc_fd *fd) {
#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
int line) {
- gpr_log(GPR_DEBUG, "FD %d %p ref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n,
+ gpr_log(GPR_DEBUG, "FD %d %p ref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n,
gpr_atm_no_barrier_load(&fd->refst),
gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
#else
@@ -149,6 +157,8 @@ static void unref_by(grpc_fd *fd, int n) {
void grpc_fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
void grpc_fd_global_shutdown(void) {
+ gpr_mu_lock(&fd_freelist_mu);
+ gpr_mu_unlock(&fd_freelist_mu);
while (fd_freelist != NULL) {
grpc_fd *fd = fd_freelist;
fd_freelist = fd_freelist->freelist_next;
@@ -159,7 +169,13 @@ void grpc_fd_global_shutdown(void) {
grpc_fd *grpc_fd_create(int fd, const char *name) {
grpc_fd *r = alloc_fd(fd);
- grpc_iomgr_register_object(&r->iomgr_object, name);
+ char *name2;
+ gpr_asprintf(&name2, "%s fd=%d", name, fd);
+ grpc_iomgr_register_object(&r->iomgr_object, name2);
+ gpr_free(name2);
+#ifdef GRPC_FD_REF_COUNT_DEBUG
+ gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, r, name);
+#endif
return r;
}
@@ -167,55 +183,78 @@ int grpc_fd_is_orphaned(grpc_fd *fd) {
return (gpr_atm_acq_load(&fd->refst) & 1) == 0;
}
+static void pollset_kick_locked(grpc_fd_watcher *watcher) {
+ gpr_mu_lock(&watcher->pollset->mu);
+ GPR_ASSERT(watcher->worker);
+ grpc_pollset_kick_ext(watcher->pollset, watcher->worker,
+ GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP);
+ gpr_mu_unlock(&watcher->pollset->mu);
+}
+
static void maybe_wake_one_watcher_locked(grpc_fd *fd) {
if (fd->inactive_watcher_root.next != &fd->inactive_watcher_root) {
- grpc_pollset_force_kick(fd->inactive_watcher_root.next->pollset);
+ pollset_kick_locked(fd->inactive_watcher_root.next);
} else if (fd->read_watcher) {
- grpc_pollset_force_kick(fd->read_watcher->pollset);
+ pollset_kick_locked(fd->read_watcher);
} else if (fd->write_watcher) {
- grpc_pollset_force_kick(fd->write_watcher->pollset);
+ pollset_kick_locked(fd->write_watcher);
}
}
-static void maybe_wake_one_watcher(grpc_fd *fd) {
- gpr_mu_lock(&fd->watcher_mu);
- maybe_wake_one_watcher_locked(fd);
- gpr_mu_unlock(&fd->watcher_mu);
-}
-
static void wake_all_watchers_locked(grpc_fd *fd) {
grpc_fd_watcher *watcher;
for (watcher = fd->inactive_watcher_root.next;
watcher != &fd->inactive_watcher_root; watcher = watcher->next) {
- grpc_pollset_force_kick(watcher->pollset);
+ pollset_kick_locked(watcher);
}
if (fd->read_watcher) {
- grpc_pollset_force_kick(fd->read_watcher->pollset);
+ pollset_kick_locked(fd->read_watcher);
}
if (fd->write_watcher && fd->write_watcher != fd->read_watcher) {
- grpc_pollset_force_kick(fd->write_watcher->pollset);
+ pollset_kick_locked(fd->write_watcher);
}
}
static int has_watchers(grpc_fd *fd) {
- return fd->read_watcher != NULL || fd->write_watcher != NULL || fd->inactive_watcher_root.next != &fd->inactive_watcher_root;
+ return fd->read_watcher != NULL || fd->write_watcher != NULL ||
+ fd->inactive_watcher_root.next != &fd->inactive_watcher_root;
}
-void grpc_fd_orphan(grpc_fd *fd, grpc_iomgr_closure *on_done,
- const char *reason) {
+static void close_fd_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
+ fd->closed = 1;
+ if (!fd->released) {
+ close(fd->fd);
+ } else {
+ grpc_remove_fd_from_all_epoll_sets(fd->fd);
+ }
+ grpc_exec_ctx_enqueue(exec_ctx, fd->on_done_closure, true, NULL);
+}
+
+int grpc_fd_wrapped_fd(grpc_fd *fd) {
+ if (fd->released || fd->closed) {
+ return -1;
+ } else {
+ return fd->fd;
+ }
+}
+
+void grpc_fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *on_done,
+ int *release_fd, const char *reason) {
fd->on_done_closure = on_done;
- shutdown(fd->fd, SHUT_RDWR);
+ fd->released = release_fd != NULL;
+ if (!fd->released) {
+ shutdown(fd->fd, SHUT_RDWR);
+ } else {
+ *release_fd = fd->fd;
+ }
+ gpr_mu_lock(&fd->mu);
REF_BY(fd, 1, reason); /* remove active status, but keep referenced */
- gpr_mu_lock(&fd->watcher_mu);
if (!has_watchers(fd)) {
- close(fd->fd);
- if (fd->on_done_closure) {
- grpc_iomgr_add_callback(fd->on_done_closure);
- }
+ close_fd_locked(exec_ctx, fd);
} else {
wake_all_watchers_locked(fd);
}
- gpr_mu_unlock(&fd->watcher_mu);
+ gpr_mu_unlock(&fd->mu);
UNREF_BY(fd, 2, reason); /* drop the reference */
}
@@ -235,166 +274,127 @@ void grpc_fd_ref(grpc_fd *fd) { ref_by(fd, 2); }
void grpc_fd_unref(grpc_fd *fd) { unref_by(fd, 2); }
#endif
-static void process_callback(grpc_iomgr_closure *closure, int success,
- int allow_synchronous_callback) {
- if (allow_synchronous_callback) {
- closure->cb(closure->cb_arg, success);
+static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
+ grpc_closure **st, grpc_closure *closure) {
+ if (*st == CLOSURE_NOT_READY) {
+ /* not ready ==> switch to a waiting state by setting the closure */
+ *st = closure;
+ } else if (*st == CLOSURE_READY) {
+ /* already ready ==> queue the closure to run immediately */
+ *st = CLOSURE_NOT_READY;
+ grpc_exec_ctx_enqueue(exec_ctx, closure, !fd->shutdown, NULL);
+ maybe_wake_one_watcher_locked(fd);
} else {
- grpc_iomgr_add_delayed_callback(closure, success);
- }
-}
-
-static void process_callbacks(grpc_iomgr_closure *callbacks, size_t n,
- int success, int allow_synchronous_callback) {
- size_t i;
- for (i = 0; i < n; i++) {
- process_callback(callbacks + i, success, allow_synchronous_callback);
- }
-}
-
-static void notify_on(grpc_fd *fd, gpr_atm *st, grpc_iomgr_closure *closure,
- int allow_synchronous_callback) {
- switch (gpr_atm_acq_load(st)) {
- case NOT_READY:
- /* There is no race if the descriptor is already ready, so we skip
- the interlocked op in that case. As long as the app doesn't
- try to set the same upcall twice (which it shouldn't) then
- oldval should never be anything other than READY or NOT_READY. We
- don't
- check for user error on the fast path. */
- if (gpr_atm_rel_cas(st, NOT_READY, (gpr_intptr)closure)) {
- /* swap was successful -- the closure will run after the next
- set_ready call. NOTE: we don't have an ABA problem here,
- since we should never have concurrent calls to the same
- notify_on function. */
- maybe_wake_one_watcher(fd);
- return;
- }
- /* swap was unsuccessful due to an intervening set_ready call.
- Fall through to the READY code below */
- case READY:
- GPR_ASSERT(gpr_atm_no_barrier_load(st) == READY);
- gpr_atm_rel_store(st, NOT_READY);
- process_callback(closure, !gpr_atm_acq_load(&fd->shutdown),
- allow_synchronous_callback);
- return;
- default: /* WAITING */
- /* upcallptr was set to a different closure. This is an error! */
- gpr_log(GPR_ERROR,
- "User called a notify_on function with a previous callback still "
- "pending");
- abort();
+ /* upcallptr was set to a different closure. This is an error! */
+ gpr_log(GPR_ERROR,
+ "User called a notify_on function with a previous callback still "
+ "pending");
+ abort();
}
- gpr_log(GPR_ERROR, "Corrupt memory in &st->state");
- abort();
}
-static void set_ready_locked(gpr_atm *st, grpc_iomgr_closure **callbacks,
- size_t *ncallbacks) {
- gpr_intptr state = gpr_atm_acq_load(st);
-
- switch (state) {
- case READY:
- /* duplicate ready, ignore */
- return;
- case NOT_READY:
- if (gpr_atm_rel_cas(st, NOT_READY, READY)) {
- /* swap was successful -- the closure will run after the next
- notify_on call. */
- return;
- }
- /* swap was unsuccessful due to an intervening set_ready call.
- Fall through to the WAITING code below */
- state = gpr_atm_acq_load(st);
- default: /* waiting */
- GPR_ASSERT(gpr_atm_no_barrier_load(st) != READY &&
- gpr_atm_no_barrier_load(st) != NOT_READY);
- callbacks[(*ncallbacks)++] = (grpc_iomgr_closure *)state;
- gpr_atm_rel_store(st, NOT_READY);
- return;
+/* returns 1 if state becomes not ready */
+static int set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
+ grpc_closure **st) {
+ if (*st == CLOSURE_READY) {
+ /* duplicate ready ==> ignore */
+ return 0;
+ } else if (*st == CLOSURE_NOT_READY) {
+ /* not ready, and not waiting ==> flag ready */
+ *st = CLOSURE_READY;
+ return 0;
+ } else {
+ /* waiting ==> queue closure */
+ grpc_exec_ctx_enqueue(exec_ctx, *st, !fd->shutdown, NULL);
+ *st = CLOSURE_NOT_READY;
+ return 1;
}
}
-static void set_ready(grpc_fd *fd, gpr_atm *st,
- int allow_synchronous_callback) {
+static void set_ready(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure **st) {
/* only one set_ready can be active at once (but there may be a racing
notify_on) */
- int success;
- grpc_iomgr_closure *closure;
- size_t ncb = 0;
-
- gpr_mu_lock(&fd->set_state_mu);
- set_ready_locked(st, &closure, &ncb);
- gpr_mu_unlock(&fd->set_state_mu);
- success = !gpr_atm_acq_load(&fd->shutdown);
- GPR_ASSERT(ncb <= 1);
- if (ncb > 0) {
- process_callbacks(closure, ncb, success, allow_synchronous_callback);
- }
+ gpr_mu_lock(&fd->mu);
+ set_ready_locked(exec_ctx, fd, st);
+ gpr_mu_unlock(&fd->mu);
}
-void grpc_fd_shutdown(grpc_fd *fd) {
- size_t ncb = 0;
- gpr_mu_lock(&fd->set_state_mu);
- GPR_ASSERT(!gpr_atm_no_barrier_load(&fd->shutdown));
- gpr_atm_rel_store(&fd->shutdown, 1);
- set_ready_locked(&fd->readst, &fd->shutdown_closures[0], &ncb);
- set_ready_locked(&fd->writest, &fd->shutdown_closures[0], &ncb);
- gpr_mu_unlock(&fd->set_state_mu);
- GPR_ASSERT(ncb <= 2);
- process_callbacks(fd->shutdown_closures[0], ncb, 0 /* GPR_FALSE */,
- 0 /* GPR_FALSE */);
+void grpc_fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
+ gpr_mu_lock(&fd->mu);
+ GPR_ASSERT(!fd->shutdown);
+ fd->shutdown = 1;
+ set_ready_locked(exec_ctx, fd, &fd->read_closure);
+ set_ready_locked(exec_ctx, fd, &fd->write_closure);
+ gpr_mu_unlock(&fd->mu);
}
-void grpc_fd_notify_on_read(grpc_fd *fd, grpc_iomgr_closure *closure) {
- notify_on(fd, &fd->readst, closure, 0);
+void grpc_fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
+ grpc_closure *closure) {
+ gpr_mu_lock(&fd->mu);
+ notify_on_locked(exec_ctx, fd, &fd->read_closure, closure);
+ gpr_mu_unlock(&fd->mu);
}
-void grpc_fd_notify_on_write(grpc_fd *fd, grpc_iomgr_closure *closure) {
- notify_on(fd, &fd->writest, closure, 0);
+void grpc_fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
+ grpc_closure *closure) {
+ gpr_mu_lock(&fd->mu);
+ notify_on_locked(exec_ctx, fd, &fd->write_closure, closure);
+ gpr_mu_unlock(&fd->mu);
}
-gpr_uint32 grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
- gpr_uint32 read_mask, gpr_uint32 write_mask,
- grpc_fd_watcher *watcher) {
- gpr_uint32 mask = 0;
+uint32_t grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
+ grpc_pollset_worker *worker, uint32_t read_mask,
+ uint32_t write_mask, grpc_fd_watcher *watcher) {
+ uint32_t mask = 0;
+ grpc_closure *cur;
+ int requested;
/* keep track of pollers that have requested our events, in case they change
*/
GRPC_FD_REF(fd, "poll");
- gpr_mu_lock(&fd->watcher_mu);
+ gpr_mu_lock(&fd->mu);
+
/* if we are shutdown, then don't add to the watcher set */
- if (gpr_atm_no_barrier_load(&fd->shutdown)) {
+ if (fd->shutdown) {
watcher->fd = NULL;
watcher->pollset = NULL;
- gpr_mu_unlock(&fd->watcher_mu);
+ watcher->worker = NULL;
+ gpr_mu_unlock(&fd->mu);
+ GRPC_FD_UNREF(fd, "poll");
return 0;
}
+
/* if there is nobody polling for read, but we need to, then start doing so */
- if (read_mask && !fd->read_watcher && gpr_atm_acq_load(&fd->readst) > READY) {
+ cur = fd->read_closure;
+ requested = cur != CLOSURE_READY;
+ if (read_mask && fd->read_watcher == NULL && requested) {
fd->read_watcher = watcher;
mask |= read_mask;
}
/* if there is nobody polling for write, but we need to, then start doing so
*/
- if (write_mask && !fd->write_watcher && gpr_atm_acq_load(&fd->writest) > READY) {
+ cur = fd->write_closure;
+ requested = cur != CLOSURE_READY;
+ if (write_mask && fd->write_watcher == NULL && requested) {
fd->write_watcher = watcher;
mask |= write_mask;
}
/* if not polling, remember this watcher in case we need someone to later */
- if (mask == 0) {
+ if (mask == 0 && worker != NULL) {
watcher->next = &fd->inactive_watcher_root;
watcher->prev = watcher->next->prev;
watcher->next->prev = watcher->prev->next = watcher;
}
watcher->pollset = pollset;
+ watcher->worker = worker;
watcher->fd = fd;
- gpr_mu_unlock(&fd->watcher_mu);
+ gpr_mu_unlock(&fd->mu);
return mask;
}
-void grpc_fd_end_poll(grpc_fd_watcher *watcher, int got_read, int got_write) {
+void grpc_fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher,
+ int got_read, int got_write) {
int was_polling = 0;
int kick = 0;
grpc_fd *fd = watcher->fd;
@@ -403,44 +403,56 @@ void grpc_fd_end_poll(grpc_fd_watcher *watcher, int got_read, int got_write) {
return;
}
- gpr_mu_lock(&fd->watcher_mu);
+ gpr_mu_lock(&fd->mu);
+
if (watcher == fd->read_watcher) {
/* remove read watcher, kick if we still need a read */
was_polling = 1;
- kick = kick || !got_read;
+ if (!got_read) {
+ kick = 1;
+ }
fd->read_watcher = NULL;
}
if (watcher == fd->write_watcher) {
/* remove write watcher, kick if we still need a write */
was_polling = 1;
- kick = kick || !got_write;
+ if (!got_write) {
+ kick = 1;
+ }
fd->write_watcher = NULL;
}
- if (!was_polling) {
+ if (!was_polling && watcher->worker != NULL) {
/* remove from inactive list */
watcher->next->prev = watcher->prev;
watcher->prev->next = watcher->next;
}
+ if (got_read) {
+ if (set_ready_locked(exec_ctx, fd, &fd->read_closure)) {
+ kick = 1;
+ }
+ }
+ if (got_write) {
+ if (set_ready_locked(exec_ctx, fd, &fd->write_closure)) {
+ kick = 1;
+ }
+ }
if (kick) {
maybe_wake_one_watcher_locked(fd);
}
- if (grpc_fd_is_orphaned(fd) && !has_watchers(fd)) {
- close(fd->fd);
- if (fd->on_done_closure != NULL) {
- grpc_iomgr_add_callback(fd->on_done_closure);
- }
+ if (grpc_fd_is_orphaned(fd) && !has_watchers(fd) && !fd->closed) {
+ close_fd_locked(exec_ctx, fd);
}
- gpr_mu_unlock(&fd->watcher_mu);
+ gpr_mu_unlock(&fd->mu);
GRPC_FD_UNREF(fd, "poll");
}
-void grpc_fd_become_readable(grpc_fd *fd, int allow_synchronous_callback) {
- set_ready(fd, &fd->readst, allow_synchronous_callback);
+void grpc_fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
+ set_ready(exec_ctx, fd, &fd->read_closure);
}
-void grpc_fd_become_writable(grpc_fd *fd, int allow_synchronous_callback) {
- set_ready(fd, &fd->writest, allow_synchronous_callback);
+void grpc_fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
+ set_ready(exec_ctx, fd, &fd->write_closure);
}
#endif
diff --git a/src/core/iomgr/fd_posix.h b/src/core/iomgr/fd_posix.h
index 5ca291b1ab..1993ada79f 100644
--- a/src/core/iomgr/fd_posix.h
+++ b/src/core/iomgr/fd_posix.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,14 +31,14 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_IOMGR_FD_POSIX_H
-#define GRPC_INTERNAL_CORE_IOMGR_FD_POSIX_H
+#ifndef GRPC_CORE_IOMGR_FD_POSIX_H
+#define GRPC_CORE_IOMGR_FD_POSIX_H
-#include "src/core/iomgr/iomgr_internal.h"
-#include "src/core/iomgr/pollset.h"
#include <grpc/support/atm.h>
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
+#include "src/core/iomgr/iomgr_internal.h"
+#include "src/core/iomgr/pollset.h"
typedef struct grpc_fd grpc_fd;
@@ -46,20 +46,23 @@ typedef struct grpc_fd_watcher {
struct grpc_fd_watcher *next;
struct grpc_fd_watcher *prev;
grpc_pollset *pollset;
+ grpc_pollset_worker *worker;
grpc_fd *fd;
} grpc_fd_watcher;
struct grpc_fd {
int fd;
/* refst format:
- bit0: 1=active/0=orphaned
- bit1-n: refcount
+ bit0: 1=active/0=orphaned
+ bit1-n: refcount
meaning that mostly we ref by two to avoid altering the orphaned bit,
and just unref by 1 when we're ready to flag the object as orphaned */
gpr_atm refst;
- gpr_mu set_state_mu;
- gpr_atm shutdown;
+ gpr_mu mu;
+ int shutdown;
+ int closed;
+ int released;
/* The watcher list.
@@ -83,18 +86,16 @@ struct grpc_fd {
If at a later time there becomes need of a poller to poll, one of
the inactive pollers may be kicked out of their poll loops to take
that responsibility. */
- gpr_mu watcher_mu;
grpc_fd_watcher inactive_watcher_root;
grpc_fd_watcher *read_watcher;
grpc_fd_watcher *write_watcher;
- gpr_atm readst;
- gpr_atm writest;
+ grpc_closure *read_closure;
+ grpc_closure *write_closure;
struct grpc_fd *freelist_next;
- grpc_iomgr_closure *on_done_closure;
- grpc_iomgr_closure *shutdown_closures[2];
+ grpc_closure *on_done_closure;
grpc_iomgr_object iomgr_object;
};
@@ -104,13 +105,18 @@ struct grpc_fd {
This takes ownership of closing fd. */
grpc_fd *grpc_fd_create(int fd, const char *name);
+/* Return the wrapped fd, or -1 if it has been released or closed. */
+int grpc_fd_wrapped_fd(grpc_fd *fd);
+
/* Releases fd to be asynchronously destroyed.
on_done is called when the underlying file descriptor is definitely close()d.
If on_done is NULL, no callback will be made.
+ If release_fd is not NULL, it's set to fd and fd will not be closed.
Requires: *fd initialized; no outstanding notify_on_read or
- notify_on_write. */
-void grpc_fd_orphan(grpc_fd *fd, grpc_iomgr_closure *on_done,
- const char *reason);
+ notify_on_write.
+ MUST NOT be called with a pollset lock taken */
+void grpc_fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *on_done,
+ int *release_fd, const char *reason);
/* Begin polling on an fd.
Registers that the given pollset is interested in this fd - so that if read
@@ -121,18 +127,23 @@ void grpc_fd_orphan(grpc_fd *fd, grpc_iomgr_closure *on_done,
i.e. a combination of read_mask and write_mask determined by the fd's current
interest in said events.
Polling strategies that do not need to alter their behavior depending on the
- fd's current interest (such as epoll) do not need to call this function. */
-gpr_uint32 grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
- gpr_uint32 read_mask, gpr_uint32 write_mask,
- grpc_fd_watcher *rec);
-/* Complete polling previously started with grpc_fd_begin_poll */
-void grpc_fd_end_poll(grpc_fd_watcher *rec, int got_read, int got_write);
+ fd's current interest (such as epoll) do not need to call this function.
+ MUST NOT be called with a pollset lock taken */
+uint32_t grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
+ grpc_pollset_worker *worker, uint32_t read_mask,
+ uint32_t write_mask, grpc_fd_watcher *rec);
+/* Complete polling previously started with grpc_fd_begin_poll
+ MUST NOT be called with a pollset lock taken
+ if got_read or got_write are 1, also does the become_{readable,writable} as
+ appropriate. */
+void grpc_fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *rec,
+ int got_read, int got_write);
/* Return 1 if this fd is orphaned, 0 otherwise */
int grpc_fd_is_orphaned(grpc_fd *fd);
/* Cause any current callbacks to error out with GRPC_CALLBACK_CANCELLED. */
-void grpc_fd_shutdown(grpc_fd *fd);
+void grpc_fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd);
/* Register read interest, causing read_cb to be called once when fd becomes
readable, on deadline specified by deadline, or on shutdown triggered by
@@ -147,20 +158,22 @@ void grpc_fd_shutdown(grpc_fd *fd);
underlying platform. This means that users must drain fd in read_cb before
calling notify_on_read again. Users are also expected to handle spurious
events, i.e read_cb is called while nothing can be readable from fd */
-void grpc_fd_notify_on_read(grpc_fd *fd, grpc_iomgr_closure *closure);
+void grpc_fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
+ grpc_closure *closure);
/* Exactly the same semantics as above, except based on writable events. */
-void grpc_fd_notify_on_write(grpc_fd *fd, grpc_iomgr_closure *closure);
+void grpc_fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
+ grpc_closure *closure);
/* Notification from the poller to an fd that it has become readable or
writable.
If allow_synchronous_callback is 1, allow running the fd callback inline
in this callstack, otherwise register an asynchronous callback and return */
-void grpc_fd_become_readable(grpc_fd *fd, int allow_synchronous_callback);
-void grpc_fd_become_writable(grpc_fd *fd, int allow_synchronous_callback);
+void grpc_fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd);
+void grpc_fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd);
/* Reference counting for fds */
-#define GRPC_FD_REF_COUNT_DEBUG
+/*#define GRPC_FD_REF_COUNT_DEBUG*/
#ifdef GRPC_FD_REF_COUNT_DEBUG
void grpc_fd_ref(grpc_fd *fd, const char *reason, const char *file, int line);
void grpc_fd_unref(grpc_fd *fd, const char *reason, const char *file, int line);
@@ -176,4 +189,4 @@ void grpc_fd_unref(grpc_fd *fd);
void grpc_fd_global_init(void);
void grpc_fd_global_shutdown(void);
-#endif /* GRPC_INTERNAL_CORE_IOMGR_FD_POSIX_H */
+#endif /* GRPC_CORE_IOMGR_FD_POSIX_H */
diff --git a/src/core/iomgr/iocp_windows.c b/src/core/iomgr/iocp_windows.c
index 0c62bfccd5..fa87e5246b 100644
--- a/src/core/iomgr/iocp_windows.c
+++ b/src/core/iomgr/iocp_windows.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -42,7 +42,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/thd.h>
-#include "src/core/iomgr/alarm_internal.h"
+#include "src/core/iomgr/timer.h"
#include "src/core/iomgr/iocp_windows.h"
#include "src/core/iomgr/iomgr_internal.h"
#include "src/core/iomgr/socket_windows.h"
@@ -50,14 +50,29 @@
static ULONG g_iocp_kick_token;
static OVERLAPPED g_iocp_custom_overlap;
-static gpr_event g_shutdown_iocp;
-static gpr_event g_iocp_done;
-static gpr_atm g_orphans = 0;
static gpr_atm g_custom_events = 0;
static HANDLE g_iocp;
-static void do_iocp_work() {
+static DWORD deadline_to_millis_timeout(gpr_timespec deadline,
+ gpr_timespec now) {
+ gpr_timespec timeout;
+ static const int64_t max_spin_polling_us = 10;
+ if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) {
+ return INFINITE;
+ }
+ if (gpr_time_cmp(deadline, gpr_time_add(now, gpr_time_from_micros(
+ max_spin_polling_us,
+ GPR_TIMESPAN))) <= 0) {
+ return 0;
+ }
+ timeout = gpr_time_sub(deadline, now);
+ return (DWORD)gpr_time_to_millis(gpr_time_add(
+ timeout, gpr_time_from_nanos(GPR_NS_PER_MS - 1, GPR_TIMESPAN)));
+}
+
+grpc_iocp_work_status grpc_iocp_work(grpc_exec_ctx *exec_ctx,
+ gpr_timespec deadline) {
BOOL success;
DWORD bytes = 0;
DWORD flags = 0;
@@ -65,26 +80,25 @@ static void do_iocp_work() {
LPOVERLAPPED overlapped;
grpc_winsocket *socket;
grpc_winsocket_callback_info *info;
- void(*f)(void *, int) = NULL;
- void *opaque = NULL;
- success = GetQueuedCompletionStatus(g_iocp, &bytes,
- &completion_key, &overlapped,
- INFINITE);
- /* success = 0 and overlapped = NULL means the deadline got attained.
- Which is impossible. since our wait time is +inf */
- GPR_ASSERT(success || overlapped);
+ grpc_closure *closure = NULL;
+ success = GetQueuedCompletionStatus(
+ g_iocp, &bytes, &completion_key, &overlapped,
+ deadline_to_millis_timeout(deadline, gpr_now(deadline.clock_type)));
+ if (success == 0 && overlapped == NULL) {
+ return GRPC_IOCP_WORK_TIMEOUT;
+ }
GPR_ASSERT(completion_key && overlapped);
if (overlapped == &g_iocp_custom_overlap) {
gpr_atm_full_fetch_add(&g_custom_events, -1);
- if (completion_key == (ULONG_PTR) &g_iocp_kick_token) {
+ if (completion_key == (ULONG_PTR)&g_iocp_kick_token) {
/* We were awoken from a kick. */
- return;
+ return GRPC_IOCP_WORK_KICK;
}
gpr_log(GPR_ERROR, "Unknown custom completion key.");
abort();
}
- socket = (grpc_winsocket*) completion_key;
+ socket = (grpc_winsocket *)completion_key;
if (overlapped == &socket->write_info.overlapped) {
info = &socket->write_info;
} else if (overlapped == &socket->read_info.overlapped) {
@@ -93,81 +107,64 @@ static void do_iocp_work() {
gpr_log(GPR_ERROR, "Unknown IOCP operation");
abort();
}
- GPR_ASSERT(info->outstanding);
- if (socket->orphan) {
- info->outstanding = 0;
- if (!socket->read_info.outstanding && !socket->write_info.outstanding) {
- grpc_winsocket_destroy(socket);
- gpr_atm_full_fetch_add(&g_orphans, -1);
- }
- return;
- }
success = WSAGetOverlappedResult(socket->socket, &info->overlapped, &bytes,
FALSE, &flags);
info->bytes_transfered = bytes;
info->wsa_error = success ? 0 : WSAGetLastError();
GPR_ASSERT(overlapped == &info->overlapped);
- gpr_mu_lock(&socket->state_mu);
GPR_ASSERT(!info->has_pending_iocp);
- if (info->cb) {
- f = info->cb;
- opaque = info->opaque;
- info->cb = NULL;
+ gpr_mu_lock(&socket->state_mu);
+ if (info->closure) {
+ closure = info->closure;
+ info->closure = NULL;
} else {
info->has_pending_iocp = 1;
}
gpr_mu_unlock(&socket->state_mu);
- if (f) f(opaque, 1);
-}
-
-static void iocp_loop(void *p) {
- while (gpr_atm_acq_load(&g_orphans) ||
- gpr_atm_acq_load(&g_custom_events) ||
- !gpr_event_get(&g_shutdown_iocp)) {
- grpc_maybe_call_delayed_callbacks(NULL, 1);
- do_iocp_work();
- }
- gpr_log(GPR_DEBUG, "iocp_loop is done");
-
- gpr_event_set(&g_iocp_done, (void *)1);
+ grpc_exec_ctx_enqueue(exec_ctx, closure, true, NULL);
+ return GRPC_IOCP_WORK_WORK;
}
void grpc_iocp_init(void) {
- gpr_thd_id id;
-
- g_iocp = CreateIoCompletionPort(INVALID_HANDLE_VALUE,
- NULL, (ULONG_PTR)NULL, 0);
+ g_iocp =
+ CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, (ULONG_PTR)NULL, 0);
GPR_ASSERT(g_iocp);
-
- gpr_event_init(&g_iocp_done);
- gpr_event_init(&g_shutdown_iocp);
- gpr_thd_new(&id, iocp_loop, NULL, NULL);
}
void grpc_iocp_kick(void) {
BOOL success;
gpr_atm_full_fetch_add(&g_custom_events, 1);
- success = PostQueuedCompletionStatus(g_iocp, 0,
- (ULONG_PTR) &g_iocp_kick_token,
+ success = PostQueuedCompletionStatus(g_iocp, 0, (ULONG_PTR)&g_iocp_kick_token,
&g_iocp_custom_overlap);
GPR_ASSERT(success);
}
+void grpc_iocp_flush(void) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_iocp_work_status work_status;
+
+ do {
+ work_status = grpc_iocp_work(&exec_ctx, gpr_inf_past(GPR_CLOCK_MONOTONIC));
+ } while (work_status == GRPC_IOCP_WORK_KICK ||
+ grpc_exec_ctx_flush(&exec_ctx));
+}
+
void grpc_iocp_shutdown(void) {
- BOOL success;
- gpr_event_set(&g_shutdown_iocp, (void *)1);
- grpc_iocp_kick();
- gpr_event_wait(&g_iocp_done, gpr_inf_future);
- success = CloseHandle(g_iocp);
- GPR_ASSERT(success);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ while (gpr_atm_acq_load(&g_custom_events)) {
+ grpc_iocp_work(&exec_ctx, gpr_inf_future(GPR_CLOCK_MONOTONIC));
+ grpc_exec_ctx_flush(&exec_ctx);
+ }
+ grpc_exec_ctx_finish(&exec_ctx);
+ GPR_ASSERT(CloseHandle(g_iocp));
}
void grpc_iocp_add_socket(grpc_winsocket *socket) {
HANDLE ret;
if (socket->added_to_iocp) return;
- ret = CreateIoCompletionPort((HANDLE)socket->socket,
- g_iocp, (gpr_uintptr) socket, 0);
+ ret = CreateIoCompletionPort((HANDLE)socket->socket, g_iocp,
+ (uintptr_t)socket, 0);
if (!ret) {
char *utf8_message = gpr_format_message(WSAGetLastError());
gpr_log(GPR_ERROR, "Unable to add socket to iocp: %s", utf8_message);
@@ -179,41 +176,33 @@ void grpc_iocp_add_socket(grpc_winsocket *socket) {
GPR_ASSERT(ret == g_iocp);
}
-void grpc_iocp_socket_orphan(grpc_winsocket *socket) {
- GPR_ASSERT(!socket->orphan);
- gpr_atm_full_fetch_add(&g_orphans, 1);
- socket->orphan = 1;
-}
-
/* Calling notify_on_read or write means either of two things:
-) The IOCP already completed in the background, and we need to call
the callback now.
-) The IOCP hasn't completed yet, and we're queuing it for later. */
-static void socket_notify_on_iocp(grpc_winsocket *socket,
- void(*cb)(void *, int), void *opaque,
+static void socket_notify_on_iocp(grpc_exec_ctx *exec_ctx,
+ grpc_winsocket *socket, grpc_closure *closure,
grpc_winsocket_callback_info *info) {
- int run_now = 0;
- GPR_ASSERT(!info->cb);
+ GPR_ASSERT(info->closure == NULL);
gpr_mu_lock(&socket->state_mu);
if (info->has_pending_iocp) {
- run_now = 1;
info->has_pending_iocp = 0;
+ grpc_exec_ctx_enqueue(exec_ctx, closure, true, NULL);
} else {
- info->cb = cb;
- info->opaque = opaque;
+ info->closure = closure;
}
gpr_mu_unlock(&socket->state_mu);
- if (run_now) cb(opaque, 1);
}
-void grpc_socket_notify_on_write(grpc_winsocket *socket,
- void(*cb)(void *, int), void *opaque) {
- socket_notify_on_iocp(socket, cb, opaque, &socket->write_info);
+void grpc_socket_notify_on_write(grpc_exec_ctx *exec_ctx,
+ grpc_winsocket *socket,
+ grpc_closure *closure) {
+ socket_notify_on_iocp(exec_ctx, socket, closure, &socket->write_info);
}
-void grpc_socket_notify_on_read(grpc_winsocket *socket,
- void(*cb)(void *, int), void *opaque) {
- socket_notify_on_iocp(socket, cb, opaque, &socket->read_info);
+void grpc_socket_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket,
+ grpc_closure *closure) {
+ socket_notify_on_iocp(exec_ctx, socket, closure, &socket->read_info);
}
-#endif /* GPR_WINSOCK_SOCKET */
+#endif /* GPR_WINSOCK_SOCKET */
diff --git a/src/core/iomgr/iocp_windows.h b/src/core/iomgr/iocp_windows.h
index 4503fdd55e..570b8925aa 100644
--- a/src/core/iomgr/iocp_windows.h
+++ b/src/core/iomgr/iocp_windows.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,24 +31,33 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_IOMGR_IOCP_WINDOWS_H
-#define GRPC_INTERNAL_CORE_IOMGR_IOCP_WINDOWS_H
+#ifndef GRPC_CORE_IOMGR_IOCP_WINDOWS_H
+#define GRPC_CORE_IOMGR_IOCP_WINDOWS_H
-#include <windows.h>
#include <grpc/support/sync.h>
#include "src/core/iomgr/socket_windows.h"
+typedef enum {
+ GRPC_IOCP_WORK_WORK,
+ GRPC_IOCP_WORK_TIMEOUT,
+ GRPC_IOCP_WORK_KICK
+} grpc_iocp_work_status;
+
+grpc_iocp_work_status grpc_iocp_work(grpc_exec_ctx *exec_ctx,
+ gpr_timespec deadline);
void grpc_iocp_init(void);
void grpc_iocp_kick(void);
+void grpc_iocp_flush(void);
void grpc_iocp_shutdown(void);
void grpc_iocp_add_socket(grpc_winsocket *);
-void grpc_iocp_socket_orphan(grpc_winsocket *);
-void grpc_socket_notify_on_write(grpc_winsocket *, void(*cb)(void *, int success),
- void *opaque);
+void grpc_socket_notify_on_write(grpc_exec_ctx *exec_ctx,
+ grpc_winsocket *winsocket,
+ grpc_closure *closure);
-void grpc_socket_notify_on_read(grpc_winsocket *, void(*cb)(void *, int success),
- void *opaque);
+void grpc_socket_notify_on_read(grpc_exec_ctx *exec_ctx,
+ grpc_winsocket *winsocket,
+ grpc_closure *closure);
-#endif /* GRPC_INTERNAL_CORE_IOMGR_IOCP_WINDOWS_H */
+#endif /* GRPC_CORE_IOMGR_IOCP_WINDOWS_H */
diff --git a/src/core/iomgr/iomgr.c b/src/core/iomgr/iomgr.c
index c47528aa94..3ab4430668 100644
--- a/src/core/iomgr/iomgr.c
+++ b/src/core/iomgr/iomgr.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -34,67 +34,36 @@
#include "src/core/iomgr/iomgr.h"
#include <stdlib.h>
+#include <string.h>
-#include "src/core/iomgr/iomgr_internal.h"
-#include "src/core/iomgr/alarm_internal.h"
-#include "src/core/support/string.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
#include <grpc/support/thd.h>
+#include <grpc/support/useful.h>
+
+#include "src/core/iomgr/exec_ctx.h"
+#include "src/core/iomgr/iomgr_internal.h"
+#include "src/core/iomgr/timer.h"
+#include "src/core/support/env.h"
+#include "src/core/support/string.h"
static gpr_mu g_mu;
static gpr_cv g_rcv;
-static grpc_iomgr_closure *g_cbs_head = NULL;
-static grpc_iomgr_closure *g_cbs_tail = NULL;
static int g_shutdown;
-static gpr_event g_background_callback_executor_done;
static grpc_iomgr_object g_root_object;
-/* Execute followup callbacks continuously.
- Other threads may check in and help during pollset_work() */
-static void background_callback_executor(void *ignored) {
- gpr_mu_lock(&g_mu);
- while (!g_shutdown) {
- gpr_timespec deadline = gpr_inf_future;
- gpr_timespec short_deadline =
- gpr_time_add(gpr_now(), gpr_time_from_millis(100));
- if (g_cbs_head) {
- grpc_iomgr_closure *closure = g_cbs_head;
- g_cbs_head = closure->next;
- if (!g_cbs_head) g_cbs_tail = NULL;
- gpr_mu_unlock(&g_mu);
- closure->cb(closure->cb_arg, closure->success);
- gpr_mu_lock(&g_mu);
- } else if (grpc_alarm_check(&g_mu, gpr_now(), &deadline)) {
- } else {
- gpr_mu_unlock(&g_mu);
- gpr_sleep_until(gpr_time_min(short_deadline, deadline));
- gpr_mu_lock(&g_mu);
- }
- }
- gpr_mu_unlock(&g_mu);
- gpr_event_set(&g_background_callback_executor_done, (void *)1);
-}
-
-void grpc_kick_poller(void) {
- /* Empty. The background callback executor polls periodically. The activity
- * the kicker is trying to draw the executor's attention to will be picked up
- * either by one of the periodic wakeups or by one of the polling application
- * threads. */
-}
-
void grpc_iomgr_init(void) {
- gpr_thd_id id;
+ g_shutdown = 0;
gpr_mu_init(&g_mu);
gpr_cv_init(&g_rcv);
- grpc_alarm_list_init(gpr_now());
+ grpc_exec_ctx_global_init();
+ grpc_timer_list_init(gpr_now(GPR_CLOCK_MONOTONIC));
g_root_object.next = g_root_object.prev = &g_root_object;
g_root_object.name = "root";
grpc_iomgr_platform_init();
- gpr_event_init(&g_background_callback_executor_done);
- gpr_thd_new(&id, background_callback_executor, NULL, NULL);
+ grpc_pollset_global_init();
}
static size_t count_objects(void) {
@@ -106,81 +75,81 @@ static size_t count_objects(void) {
return n;
}
-void grpc_iomgr_shutdown(void) {
+static void dump_objects(const char *kind) {
grpc_iomgr_object *obj;
- grpc_iomgr_closure *closure;
- gpr_timespec shutdown_deadline =
- gpr_time_add(gpr_now(), gpr_time_from_seconds(10));
+ for (obj = g_root_object.next; obj != &g_root_object; obj = obj->next) {
+ gpr_log(GPR_DEBUG, "%s OBJECT: %s %p", kind, obj->name, obj);
+ }
+}
+
+void grpc_iomgr_shutdown(void) {
+ gpr_timespec shutdown_deadline = gpr_time_add(
+ gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_seconds(10, GPR_TIMESPAN));
+ gpr_timespec last_warning_time = gpr_now(GPR_CLOCK_REALTIME);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
+ grpc_iomgr_platform_flush();
gpr_mu_lock(&g_mu);
g_shutdown = 1;
- while (g_cbs_head != NULL || g_root_object.next != &g_root_object) {
- if (g_cbs_head != NULL && g_root_object.next != &g_root_object) {
- gpr_log(GPR_DEBUG,
- "Waiting for %d iomgr objects to be destroyed and executing "
- "final callbacks",
- count_objects());
- } else if (g_cbs_head != NULL) {
- gpr_log(GPR_DEBUG, "Executing final iomgr callbacks");
- } else {
- gpr_log(GPR_DEBUG, "Waiting for %d iomgr objects to be destroyed",
- count_objects());
- }
- if (g_cbs_head) {
- do {
- closure = g_cbs_head;
- g_cbs_head = closure->next;
- if (!g_cbs_head) g_cbs_tail = NULL;
- gpr_mu_unlock(&g_mu);
-
- closure->cb(closure->cb_arg, 0);
- gpr_mu_lock(&g_mu);
- } while (g_cbs_head);
- continue;
+ while (g_root_object.next != &g_root_object) {
+ if (gpr_time_cmp(
+ gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), last_warning_time),
+ gpr_time_from_seconds(1, GPR_TIMESPAN)) >= 0) {
+ if (g_root_object.next != &g_root_object) {
+ gpr_log(GPR_DEBUG, "Waiting for %d iomgr objects to be destroyed",
+ count_objects());
+ }
+ last_warning_time = gpr_now(GPR_CLOCK_REALTIME);
}
- if (grpc_alarm_check(&g_mu, gpr_inf_future, NULL)) {
- gpr_log(GPR_DEBUG, "got late alarm");
+ if (grpc_timer_check(&exec_ctx, gpr_inf_future(GPR_CLOCK_MONOTONIC),
+ NULL)) {
+ gpr_mu_unlock(&g_mu);
+ grpc_exec_ctx_flush(&exec_ctx);
+ gpr_mu_lock(&g_mu);
continue;
}
if (g_root_object.next != &g_root_object) {
- int timeout = 0;
- gpr_timespec short_deadline =
- gpr_time_add(gpr_now(), gpr_time_from_millis(100));
- while (gpr_cv_wait(&g_rcv, &g_mu, short_deadline) && g_cbs_head == NULL) {
- if (gpr_time_cmp(gpr_now(), shutdown_deadline) > 0) {
- timeout = 1;
+ gpr_timespec short_deadline = gpr_time_add(
+ gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(100, GPR_TIMESPAN));
+ if (gpr_cv_wait(&g_rcv, &g_mu, short_deadline)) {
+ if (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), shutdown_deadline) > 0) {
+ if (g_root_object.next != &g_root_object) {
+ gpr_log(GPR_DEBUG,
+ "Failed to free %d iomgr objects before shutdown deadline: "
+ "memory leaks are likely",
+ count_objects());
+ dump_objects("LEAKED");
+ if (grpc_iomgr_abort_on_leaks()) {
+ abort();
+ }
+ }
break;
}
}
- if (timeout) {
- gpr_log(GPR_DEBUG,
- "Failed to free %d iomgr objects before shutdown deadline: "
- "memory leaks are likely",
- count_objects());
- for (obj = g_root_object.next; obj != &g_root_object; obj = obj->next) {
- gpr_log(GPR_DEBUG, "LEAKED OBJECT: %s", obj->name);
- }
- break;
- }
}
}
gpr_mu_unlock(&g_mu);
- grpc_kick_poller();
- gpr_event_wait(&g_background_callback_executor_done, gpr_inf_future);
+ grpc_timer_list_shutdown(&exec_ctx);
+ grpc_exec_ctx_finish(&exec_ctx);
- grpc_alarm_list_shutdown();
+ /* ensure all threads have left g_mu */
+ gpr_mu_lock(&g_mu);
+ gpr_mu_unlock(&g_mu);
+ grpc_pollset_global_shutdown();
grpc_iomgr_platform_shutdown();
+ grpc_exec_ctx_global_shutdown();
gpr_mu_destroy(&g_mu);
gpr_cv_destroy(&g_rcv);
}
void grpc_iomgr_register_object(grpc_iomgr_object *obj, const char *name) {
- gpr_mu_lock(&g_mu);
obj->name = gpr_strdup(name);
+ gpr_mu_lock(&g_mu);
obj->next = &g_root_object;
- obj->prev = obj->next->prev;
+ obj->prev = g_root_object.prev;
obj->next->prev = obj->prev->next = obj;
gpr_mu_unlock(&g_mu);
}
@@ -189,66 +158,18 @@ void grpc_iomgr_unregister_object(grpc_iomgr_object *obj) {
gpr_mu_lock(&g_mu);
obj->next->prev = obj->prev;
obj->prev->next = obj->next;
- gpr_free(obj->name);
gpr_cv_signal(&g_rcv);
gpr_mu_unlock(&g_mu);
+ gpr_free(obj->name);
}
-void grpc_iomgr_closure_init(grpc_iomgr_closure *closure, grpc_iomgr_cb_func cb,
- void *cb_arg) {
- closure->cb = cb;
- closure->cb_arg = cb_arg;
- closure->next = NULL;
-}
-
-void grpc_iomgr_add_delayed_callback(grpc_iomgr_closure *closure, int success) {
- closure->success = success;
- gpr_mu_lock(&g_mu);
- closure->next = NULL;
- if (!g_cbs_tail) {
- g_cbs_head = g_cbs_tail = closure;
- } else {
- g_cbs_tail->next = closure;
- g_cbs_tail = closure;
- }
- if (g_shutdown) {
- gpr_cv_signal(&g_rcv);
- }
- gpr_mu_unlock(&g_mu);
-}
-
-void grpc_iomgr_add_callback(grpc_iomgr_closure *closure) {
- grpc_iomgr_add_delayed_callback(closure, 1 /* GPR_TRUE */);
-}
-
-int grpc_maybe_call_delayed_callbacks(gpr_mu *drop_mu, int success) {
- int n = 0;
- gpr_mu *retake_mu = NULL;
- grpc_iomgr_closure *closure;
- for (;;) {
- /* check for new work */
- if (!gpr_mu_trylock(&g_mu)) {
- break;
- }
- closure = g_cbs_head;
- if (!closure) {
- gpr_mu_unlock(&g_mu);
- break;
- }
- g_cbs_head = closure->next;
- if (!g_cbs_head) g_cbs_tail = NULL;
- gpr_mu_unlock(&g_mu);
- /* if we have a mutex to drop, do so before executing work */
- if (drop_mu) {
- gpr_mu_unlock(drop_mu);
- retake_mu = drop_mu;
- drop_mu = NULL;
- }
- closure->cb(closure->cb_arg, success && closure->success);
- n++;
- }
- if (retake_mu) {
- gpr_mu_lock(retake_mu);
+bool grpc_iomgr_abort_on_leaks(void) {
+ char *env = gpr_getenv("GRPC_ABORT_ON_LEAKS");
+ if (env == NULL) return false;
+ static const char *truthy[] = {"yes", "Yes", "YES", "true",
+ "True", "TRUE", "1"};
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(truthy); i++) {
+ if (0 == strcmp(env, truthy[i])) return true;
}
- return n;
+ return false;
}
diff --git a/src/core/iomgr/iomgr.h b/src/core/iomgr/iomgr.h
index 6d4a82917b..e1237a4533 100644
--- a/src/core/iomgr/iomgr.h
+++ b/src/core/iomgr/iomgr.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,36 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_IOMGR_IOMGR_H
-#define GRPC_INTERNAL_CORE_IOMGR_IOMGR_H
-
-/** gRPC Callback definition.
- *
- * \param arg Arbitrary input.
- * \param success An indication on the state of the iomgr. On false, cleanup
- * actions should be taken (eg, shutdown). */
-typedef void (*grpc_iomgr_cb_func)(void *arg, int success);
-
-/** A closure over a grpc_iomgr_cb_func. */
-typedef struct grpc_iomgr_closure {
- /** Bound callback. */
- grpc_iomgr_cb_func cb;
-
- /** Arguments to be passed to "cb". */
- void *cb_arg;
-
- /** Internal. A boolean indication to "cb" on the state of the iomgr.
- * For instance, closures created during a shutdown would have this field set
- * to false. */
- int success;
-
- /**< Internal. Do not touch */
- struct grpc_iomgr_closure *next;
-} grpc_iomgr_closure;
-
-/** Initializes \a closure with \a cb and \a cb_arg. */
-void grpc_iomgr_closure_init(grpc_iomgr_closure *closure, grpc_iomgr_cb_func cb,
- void *cb_arg);
+#ifndef GRPC_CORE_IOMGR_IOMGR_H
+#define GRPC_CORE_IOMGR_IOMGR_H
/** Initializes the iomgr. */
void grpc_iomgr_init(void);
@@ -68,13 +40,4 @@ void grpc_iomgr_init(void);
/** Signals the intention to shutdown the iomgr. */
void grpc_iomgr_shutdown(void);
-/** Registers a closure to be invoked at some point in the future.
- *
- * Can be called from within a callback or from anywhere else */
-void grpc_iomgr_add_callback(grpc_iomgr_closure *closure);
-
-/** As per grpc_iomgr_add_callback, with the ability to set the success
- argument. */
-void grpc_iomgr_add_delayed_callback(grpc_iomgr_closure *iocb, int success);
-
-#endif /* GRPC_INTERNAL_CORE_IOMGR_IOMGR_H */
+#endif /* GRPC_CORE_IOMGR_IOMGR_H */
diff --git a/src/core/iomgr/iomgr_internal.h b/src/core/iomgr/iomgr_internal.h
index 6c1e0e1799..d06b068b1c 100644
--- a/src/core/iomgr/iomgr_internal.h
+++ b/src/core/iomgr/iomgr_internal.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,10 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_IOMGR_IOMGR_INTERNAL_H
-#define GRPC_INTERNAL_CORE_IOMGR_IOMGR_INTERNAL_H
+#ifndef GRPC_CORE_IOMGR_IOMGR_INTERNAL_H
+#define GRPC_CORE_IOMGR_IOMGR_INTERNAL_H
+
+#include <stdbool.h>
#include "src/core/iomgr/iomgr.h"
#include <grpc/support/sync.h>
@@ -43,13 +45,18 @@ typedef struct grpc_iomgr_object {
struct grpc_iomgr_object *prev;
} grpc_iomgr_object;
-int grpc_maybe_call_delayed_callbacks(gpr_mu *drop_mu, int success);
-void grpc_iomgr_add_delayed_callback(grpc_iomgr_closure *iocb, int success);
+void grpc_pollset_global_init(void);
+void grpc_pollset_global_shutdown(void);
void grpc_iomgr_register_object(grpc_iomgr_object *obj, const char *name);
void grpc_iomgr_unregister_object(grpc_iomgr_object *obj);
void grpc_iomgr_platform_init(void);
+/** flush any globally queued work from iomgr */
+void grpc_iomgr_platform_flush(void);
+/** tear down all platform specific global iomgr structures */
void grpc_iomgr_platform_shutdown(void);
-#endif /* GRPC_INTERNAL_CORE_IOMGR_IOMGR_INTERNAL_H */
+bool grpc_iomgr_abort_on_leaks(void);
+
+#endif /* GRPC_CORE_IOMGR_IOMGR_INTERNAL_H */
diff --git a/src/core/iomgr/iomgr_posix.c b/src/core/iomgr/iomgr_posix.c
index 758ae77b86..fecb7b9760 100644
--- a/src/core/iomgr/iomgr_posix.c
+++ b/src/core/iomgr/iomgr_posix.c
@@ -42,13 +42,11 @@
void grpc_iomgr_platform_init(void) {
grpc_fd_global_init();
- grpc_pollset_global_init();
grpc_register_tracer("tcp", &grpc_tcp_trace);
}
-void grpc_iomgr_platform_shutdown(void) {
- grpc_pollset_global_shutdown();
- grpc_fd_global_shutdown();
-}
+void grpc_iomgr_platform_flush(void) {}
+
+void grpc_iomgr_platform_shutdown(void) { grpc_fd_global_shutdown(); }
-#endif /* GRPC_POSIX_SOCKET */
+#endif /* GRPC_POSIX_SOCKET */
diff --git a/src/core/iomgr/iomgr_posix.h b/src/core/iomgr/iomgr_posix.h
index a404f6433e..698fb6aee7 100644
--- a/src/core/iomgr/iomgr_posix.h
+++ b/src/core/iomgr/iomgr_posix.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,12 +31,9 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_IOMGR_IOMGR_POSIX_H
-#define GRPC_INTERNAL_CORE_IOMGR_IOMGR_POSIX_H
+#ifndef GRPC_CORE_IOMGR_IOMGR_POSIX_H
+#define GRPC_CORE_IOMGR_IOMGR_POSIX_H
#include "src/core/iomgr/iomgr_internal.h"
-void grpc_pollset_global_init(void);
-void grpc_pollset_global_shutdown(void);
-
-#endif /* GRPC_INTERNAL_CORE_IOMGR_IOMGR_POSIX_H */
+#endif /* GRPC_CORE_IOMGR_IOMGR_POSIX_H */
diff --git a/src/core/iomgr/iomgr_windows.c b/src/core/iomgr/iomgr_windows.c
index 74cd5a829b..14775516bb 100644
--- a/src/core/iomgr/iomgr_windows.c
+++ b/src/core/iomgr/iomgr_windows.c
@@ -63,9 +63,11 @@ void grpc_iomgr_platform_init(void) {
grpc_iocp_init();
}
+void grpc_iomgr_platform_flush(void) { grpc_iocp_flush(); }
+
void grpc_iomgr_platform_shutdown(void) {
grpc_iocp_shutdown();
winsock_shutdown();
}
-#endif /* GRPC_WINSOCK_SOCKET */
+#endif /* GRPC_WINSOCK_SOCKET */
diff --git a/src/core/iomgr/pollset.h b/src/core/iomgr/pollset.h
index c40188b3c9..9500b1a73a 100644
--- a/src/core/iomgr/pollset.h
+++ b/src/core/iomgr/pollset.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,12 +31,17 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_IOMGR_POLLSET_H
-#define GRPC_INTERNAL_CORE_IOMGR_POLLSET_H
+#ifndef GRPC_CORE_IOMGR_POLLSET_H
+#define GRPC_CORE_IOMGR_POLLSET_H
#include <grpc/support/port_platform.h>
+#include <grpc/support/sync.h>
#include <grpc/support/time.h>
+#include "src/core/iomgr/exec_ctx.h"
+
+#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker *)1)
+
/* A grpc_pollset is a set of file descriptors that a higher level item is
interested in. For example:
- a server will typically keep a pollset containing all connected channels,
@@ -44,32 +49,46 @@
- a completion queue might keep a pollset with an entry for each transport
that is servicing a call that it's tracking */
-#ifdef GPR_POSIX_SOCKET
-#include "src/core/iomgr/pollset_posix.h"
-#endif
-
-#ifdef GPR_WIN32
-#include "src/core/iomgr/pollset_windows.h"
-#endif
+typedef struct grpc_pollset grpc_pollset;
+typedef struct grpc_pollset_worker grpc_pollset_worker;
-void grpc_pollset_init(grpc_pollset *pollset);
-void grpc_pollset_shutdown(grpc_pollset *pollset,
- void (*shutdown_done)(void *arg),
- void *shutdown_done_arg);
+size_t grpc_pollset_size(void);
+void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu);
+/* Begin shutting down the pollset, and call closure when done.
+ * pollset's mutex must be held */
+void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_closure *closure);
+/** Reset the pollset to its initial state (perhaps with some cached objects);
+ * must have been previously shutdown */
+void grpc_pollset_reset(grpc_pollset *pollset);
void grpc_pollset_destroy(grpc_pollset *pollset);
/* Do some work on a pollset.
May involve invoking asynchronous callbacks, or actually polling file
descriptors.
- Requires GRPC_POLLSET_MU(pollset) locked.
- May unlock GRPC_POLLSET_MU(pollset) during its execution.
-
- Returns true if some work has been done, and false if the deadline
- got attained. */
-int grpc_pollset_work(grpc_pollset *pollset, gpr_timespec deadline);
+ Requires pollset's mutex locked.
+ May unlock its mutex during its execution.
+
+ worker is a (platform-specific) handle that can be used to wake up
+ from grpc_pollset_work before any events are received and before the timeout
+ has expired. It is both initialized and destroyed by grpc_pollset_work.
+ Initialization of worker is guaranteed to occur BEFORE the
+ pollset's mutex is released for the first time by grpc_pollset_work
+ and it is guaranteed that it will not be released by grpc_pollset_work
+ AFTER worker has been destroyed.
+
+ Tries not to block past deadline.
+ May call grpc_closure_list_run on grpc_closure_list, without holding the
+ pollset
+ lock */
+void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_pollset_worker **worker, gpr_timespec now,
+ gpr_timespec deadline);
/* Break one polling thread out of polling work for this pollset.
- Requires GRPC_POLLSET_MU(pollset) locked. */
-void grpc_pollset_kick(grpc_pollset *pollset);
+ If specific_worker is GRPC_POLLSET_KICK_BROADCAST, kick ALL the workers.
+ Otherwise, if specific_worker is non-NULL, then kick that worker. */
+void grpc_pollset_kick(grpc_pollset *pollset,
+ grpc_pollset_worker *specific_worker);
-#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_H */
+#endif /* GRPC_CORE_IOMGR_POLLSET_H */
diff --git a/src/core/iomgr/pollset_kick_posix.c b/src/core/iomgr/pollset_kick_posix.c
deleted file mode 100644
index 51021784f2..0000000000
--- a/src/core/iomgr/pollset_kick_posix.c
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <grpc/support/port_platform.h>
-
-#ifdef GPR_POSIX_SOCKET
-#include "src/core/iomgr/pollset_kick_posix.h"
-
-#include <errno.h>
-#include <string.h>
-#include <unistd.h>
-
-#include "src/core/iomgr/socket_utils_posix.h"
-#include "src/core/iomgr/wakeup_fd_posix.h"
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-
-/* This implementation is based on a freelist of wakeup fds, with extra logic to
- * handle kicks while there is no attached fd. */
-
-/* TODO(klempner): Autosize this, and consider providing a way to disable the
- * cap entirely on systems with large fd limits */
-#define GRPC_MAX_CACHED_WFDS 50
-
-static grpc_kick_fd_info *fd_freelist = NULL;
-static int fd_freelist_count = 0;
-static gpr_mu fd_freelist_mu;
-
-static grpc_kick_fd_info *allocate_wfd(void) {
- grpc_kick_fd_info *info = NULL;
- gpr_mu_lock(&fd_freelist_mu);
- if (fd_freelist != NULL) {
- info = fd_freelist;
- fd_freelist = fd_freelist->next;
- --fd_freelist_count;
- }
- gpr_mu_unlock(&fd_freelist_mu);
- if (info == NULL) {
- info = gpr_malloc(sizeof(*info));
- grpc_wakeup_fd_create(&info->wakeup_fd);
- info->next = NULL;
- }
- return info;
-}
-
-static void destroy_wfd(grpc_kick_fd_info *wfd) {
- grpc_wakeup_fd_destroy(&wfd->wakeup_fd);
- gpr_free(wfd);
-}
-
-static void free_wfd(grpc_kick_fd_info *fd_info) {
- gpr_mu_lock(&fd_freelist_mu);
- if (fd_freelist_count < GRPC_MAX_CACHED_WFDS) {
- fd_info->next = fd_freelist;
- fd_freelist = fd_info;
- fd_freelist_count++;
- fd_info = NULL;
- }
- gpr_mu_unlock(&fd_freelist_mu);
-
- if (fd_info) {
- destroy_wfd(fd_info);
- }
-}
-
-void grpc_pollset_kick_init(grpc_pollset_kick_state *kick_state) {
- gpr_mu_init(&kick_state->mu);
- kick_state->kicked = 0;
- kick_state->fd_list.next = kick_state->fd_list.prev = &kick_state->fd_list;
-}
-
-void grpc_pollset_kick_destroy(grpc_pollset_kick_state *kick_state) {
- gpr_mu_destroy(&kick_state->mu);
- GPR_ASSERT(kick_state->fd_list.next == &kick_state->fd_list);
-}
-
-grpc_kick_fd_info *grpc_pollset_kick_pre_poll(
- grpc_pollset_kick_state *kick_state) {
- grpc_kick_fd_info *fd_info;
- gpr_mu_lock(&kick_state->mu);
- if (kick_state->kicked) {
- kick_state->kicked = 0;
- gpr_mu_unlock(&kick_state->mu);
- return NULL;
- }
- fd_info = allocate_wfd();
- fd_info->next = &kick_state->fd_list;
- fd_info->prev = fd_info->next->prev;
- fd_info->next->prev = fd_info->prev->next = fd_info;
- gpr_mu_unlock(&kick_state->mu);
- return fd_info;
-}
-
-void grpc_pollset_kick_consume(grpc_pollset_kick_state *kick_state,
- grpc_kick_fd_info *fd_info) {
- grpc_wakeup_fd_consume_wakeup(&fd_info->wakeup_fd);
-}
-
-void grpc_pollset_kick_post_poll(grpc_pollset_kick_state *kick_state,
- grpc_kick_fd_info *fd_info) {
- gpr_mu_lock(&kick_state->mu);
- fd_info->next->prev = fd_info->prev;
- fd_info->prev->next = fd_info->next;
- free_wfd(fd_info);
- gpr_mu_unlock(&kick_state->mu);
-}
-
-void grpc_pollset_kick_kick(grpc_pollset_kick_state *kick_state) {
- gpr_mu_lock(&kick_state->mu);
- if (kick_state->fd_list.next != &kick_state->fd_list) {
- grpc_wakeup_fd_wakeup(&kick_state->fd_list.next->wakeup_fd);
- } else {
- kick_state->kicked = 1;
- }
- gpr_mu_unlock(&kick_state->mu);
-}
-
-void grpc_pollset_kick_global_init_fallback_fd(void) {
- gpr_mu_init(&fd_freelist_mu);
- grpc_wakeup_fd_global_init_force_fallback();
-}
-
-void grpc_pollset_kick_global_init(void) {
- gpr_mu_init(&fd_freelist_mu);
- grpc_wakeup_fd_global_init();
-}
-
-void grpc_pollset_kick_global_destroy(void) {
- while (fd_freelist != NULL) {
- grpc_kick_fd_info *current = fd_freelist;
- fd_freelist = fd_freelist->next;
- destroy_wfd(current);
- }
- grpc_wakeup_fd_global_destroy();
- gpr_mu_destroy(&fd_freelist_mu);
-}
-
-#endif /* GPR_POSIX_SOCKET */
diff --git a/src/core/iomgr/pollset_kick_posix.h b/src/core/iomgr/pollset_kick_posix.h
deleted file mode 100644
index 77e32a8d51..0000000000
--- a/src/core/iomgr/pollset_kick_posix.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#ifndef GRPC_INTERNAL_CORE_IOMGR_POLLSET_KICK_POSIX_H
-#define GRPC_INTERNAL_CORE_IOMGR_POLLSET_KICK_POSIX_H
-
-#include "src/core/iomgr/wakeup_fd_posix.h"
-#include <grpc/support/sync.h>
-
-/* pollset kicking allows breaking a thread out of polling work for
- a given pollset.
- writing a byte to a pipe is used as a posix-ly portable base
- mechanism, and eventfds are utilized on Linux for better performance. */
-
-typedef struct grpc_kick_fd_info {
- grpc_wakeup_fd_info wakeup_fd;
- /* used for polling list and free list */
- struct grpc_kick_fd_info *next;
- /* only used when polling */
- struct grpc_kick_fd_info *prev;
-} grpc_kick_fd_info;
-
-typedef struct grpc_pollset_kick_state {
- gpr_mu mu;
- int kicked;
- struct grpc_kick_fd_info fd_list;
-} grpc_pollset_kick_state;
-
-#define GRPC_POLLSET_KICK_GET_FD(kick_fd_info) \
- GRPC_WAKEUP_FD_GET_READ_FD(&(kick_fd_info)->wakeup_fd)
-
-/* This is an abstraction around the typical pipe mechanism for waking up a
- thread sitting in a poll() style call. */
-
-void grpc_pollset_kick_global_init(void);
-void grpc_pollset_kick_global_destroy(void);
-
-void grpc_pollset_kick_init(grpc_pollset_kick_state *kick_state);
-void grpc_pollset_kick_destroy(grpc_pollset_kick_state *kick_state);
-
-/* Guarantees a pure posix implementation rather than a specialized one, if
- * applicable. Intended for testing. */
-void grpc_pollset_kick_global_init_fallback_fd(void);
-
-/* Must be called before entering poll(). If return value is NULL, this consumed
- an existing kick. Otherwise the return value is an FD to add to the poll set.
- */
-grpc_kick_fd_info *grpc_pollset_kick_pre_poll(
- grpc_pollset_kick_state *kick_state);
-
-/* Consume an existing kick. Must be called after poll returns that the fd was
- readable, and before calling kick_post_poll. */
-void grpc_pollset_kick_consume(grpc_pollset_kick_state *kick_state,
- grpc_kick_fd_info *fd_info);
-
-/* Must be called after pre_poll, and after consume if applicable */
-void grpc_pollset_kick_post_poll(grpc_pollset_kick_state *kick_state,
- grpc_kick_fd_info *fd_info);
-
-/* Actually kick */
-void grpc_pollset_kick_kick(grpc_pollset_kick_state *kick_state);
-
-#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_KICK_POSIX_H */
diff --git a/src/core/iomgr/pollset_multipoller_with_epoll.c b/src/core/iomgr/pollset_multipoller_with_epoll.c
index 1900bbf9e1..2e0f27fab8 100644
--- a/src/core/iomgr/pollset_multipoller_with_epoll.c
+++ b/src/core/iomgr/pollset_multipoller_with_epoll.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -36,21 +36,87 @@
#ifdef GPR_LINUX_MULTIPOLL_WITH_EPOLL
#include <errno.h>
+#include <poll.h>
#include <string.h>
#include <sys/epoll.h>
#include <unistd.h>
-#include "src/core/iomgr/fd_posix.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
+#include <grpc/support/useful.h>
+#include "src/core/iomgr/fd_posix.h"
+#include "src/core/iomgr/pollset_posix.h"
+#include "src/core/profiling/timers.h"
+#include "src/core/support/block_annotate.h"
+
+struct epoll_fd_list {
+ int *epoll_fds;
+ size_t count;
+ size_t capacity;
+};
+
+static struct epoll_fd_list epoll_fd_global_list;
+static gpr_once init_epoll_fd_list_mu = GPR_ONCE_INIT;
+static gpr_mu epoll_fd_list_mu;
+
+static void init_mu(void) { gpr_mu_init(&epoll_fd_list_mu); }
+
+static void add_epoll_fd_to_global_list(int epoll_fd) {
+ gpr_once_init(&init_epoll_fd_list_mu, init_mu);
+
+ gpr_mu_lock(&epoll_fd_list_mu);
+ if (epoll_fd_global_list.count == epoll_fd_global_list.capacity) {
+ epoll_fd_global_list.capacity =
+ GPR_MAX((size_t)8, epoll_fd_global_list.capacity * 2);
+ epoll_fd_global_list.epoll_fds =
+ gpr_realloc(epoll_fd_global_list.epoll_fds,
+ epoll_fd_global_list.capacity * sizeof(int));
+ }
+ epoll_fd_global_list.epoll_fds[epoll_fd_global_list.count++] = epoll_fd;
+ gpr_mu_unlock(&epoll_fd_list_mu);
+}
+
+static void remove_epoll_fd_from_global_list(int epoll_fd) {
+ gpr_mu_lock(&epoll_fd_list_mu);
+ GPR_ASSERT(epoll_fd_global_list.count > 0);
+ for (size_t i = 0; i < epoll_fd_global_list.count; i++) {
+ if (epoll_fd == epoll_fd_global_list.epoll_fds[i]) {
+ epoll_fd_global_list.epoll_fds[i] =
+ epoll_fd_global_list.epoll_fds[--(epoll_fd_global_list.count)];
+ break;
+ }
+ }
+ gpr_mu_unlock(&epoll_fd_list_mu);
+}
+
+void grpc_remove_fd_from_all_epoll_sets(int fd) {
+ int err;
+ gpr_once_init(&init_epoll_fd_list_mu, init_mu);
+ gpr_mu_lock(&epoll_fd_list_mu);
+ if (epoll_fd_global_list.count == 0) {
+ gpr_mu_unlock(&epoll_fd_list_mu);
+ return;
+ }
+ for (size_t i = 0; i < epoll_fd_global_list.count; i++) {
+ err = epoll_ctl(epoll_fd_global_list.epoll_fds[i], EPOLL_CTL_DEL, fd, NULL);
+ if (err < 0 && errno != ENOENT) {
+ gpr_log(GPR_ERROR, "epoll_ctl del for %d failed: %s", fd,
+ strerror(errno));
+ }
+ }
+ gpr_mu_unlock(&epoll_fd_list_mu);
+}
typedef struct {
- int epoll_fd;
- grpc_wakeup_fd_info wakeup_fd;
-} pollset_hdr;
+ grpc_pollset *pollset;
+ grpc_fd *fd;
+ grpc_closure closure;
+} delayed_add;
-static void multipoll_with_epoll_pollset_add_fd(grpc_pollset *pollset,
- grpc_fd *fd) {
+typedef struct { int epoll_fd; } pollset_hdr;
+
+static void finally_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_fd *fd) {
pollset_hdr *h = pollset->data.ptr;
struct epoll_event ev;
int err;
@@ -59,9 +125,9 @@ static void multipoll_with_epoll_pollset_add_fd(grpc_pollset *pollset,
/* We pretend to be polling whilst adding an fd to keep the fd from being
closed during the add. This may result in a spurious wakeup being assigned
to this pollset whilst adding, but that should be benign. */
- GPR_ASSERT(grpc_fd_begin_poll(fd, pollset, 0, 0, &watcher) == 0);
+ GPR_ASSERT(grpc_fd_begin_poll(fd, pollset, NULL, 0, 0, &watcher) == 0);
if (watcher.fd != NULL) {
- ev.events = EPOLLIN | EPOLLOUT | EPOLLET;
+ ev.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET);
ev.data.ptr = fd;
err = epoll_ctl(h->epoll_fd, EPOLL_CTL_ADD, fd->fd, &ev);
if (err < 0) {
@@ -72,32 +138,63 @@ static void multipoll_with_epoll_pollset_add_fd(grpc_pollset *pollset,
}
}
}
- grpc_fd_end_poll(&watcher, 0, 0);
+ grpc_fd_end_poll(exec_ctx, &watcher, 0, 0);
}
-static void multipoll_with_epoll_pollset_del_fd(grpc_pollset *pollset,
- grpc_fd *fd) {
- pollset_hdr *h = pollset->data.ptr;
- int err;
- /* Note that this can race with concurrent poll, but that should be fine since
- * at worst it creates a spurious read event on a reused grpc_fd object. */
- err = epoll_ctl(h->epoll_fd, EPOLL_CTL_DEL, fd->fd, NULL);
- if (err < 0) {
- gpr_log(GPR_ERROR, "epoll_ctl del for %d failed: %s", fd->fd,
- strerror(errno));
+static void perform_delayed_add(grpc_exec_ctx *exec_ctx, void *arg,
+ bool iomgr_status) {
+ delayed_add *da = arg;
+
+ if (!grpc_fd_is_orphaned(da->fd)) {
+ finally_add_fd(exec_ctx, da->pollset, da->fd);
+ }
+
+ gpr_mu_lock(&da->pollset->mu);
+ da->pollset->in_flight_cbs--;
+ if (da->pollset->shutting_down) {
+ /* We don't care about this pollset anymore. */
+ if (da->pollset->in_flight_cbs == 0 && !da->pollset->called_shutdown) {
+ da->pollset->called_shutdown = 1;
+ grpc_exec_ctx_enqueue(exec_ctx, da->pollset->shutdown_done, true, NULL);
+ }
+ }
+ gpr_mu_unlock(&da->pollset->mu);
+
+ GRPC_FD_UNREF(da->fd, "delayed_add");
+
+ gpr_free(da);
+}
+
+static void multipoll_with_epoll_pollset_add_fd(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *pollset,
+ grpc_fd *fd,
+ int and_unlock_pollset) {
+ if (and_unlock_pollset) {
+ gpr_mu_unlock(&pollset->mu);
+ finally_add_fd(exec_ctx, pollset, fd);
+ } else {
+ delayed_add *da = gpr_malloc(sizeof(*da));
+ da->pollset = pollset;
+ da->fd = fd;
+ GRPC_FD_REF(fd, "delayed_add");
+ grpc_closure_init(&da->closure, perform_delayed_add, da);
+ pollset->in_flight_cbs++;
+ grpc_exec_ctx_enqueue(exec_ctx, &da->closure, true, NULL);
}
}
/* TODO(klempner): We probably want to turn this down a bit */
#define GRPC_EPOLL_MAX_EVENTS 1000
-static void multipoll_with_epoll_pollset_maybe_work(
- grpc_pollset *pollset, gpr_timespec deadline, gpr_timespec now,
- int allow_synchronous_callback) {
+static void multipoll_with_epoll_pollset_maybe_work_and_unlock(
+ grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker *worker,
+ gpr_timespec deadline, gpr_timespec now) {
struct epoll_event ep_ev[GRPC_EPOLL_MAX_EVENTS];
int ep_rv;
+ int poll_rv;
pollset_hdr *h = pollset->data.ptr;
int timeout_ms;
+ struct pollfd pfds[2];
/* If you want to ignore epoll's ability to sanely handle parallel pollers,
* for a more apples-to-apples performance comparison with poll, add a
@@ -105,42 +202,67 @@ static void multipoll_with_epoll_pollset_maybe_work(
* here.
*/
- timeout_ms = grpc_poll_deadline_to_millis_timeout(deadline, now);
- pollset->counter += 1;
gpr_mu_unlock(&pollset->mu);
- do {
- ep_rv = epoll_wait(h->epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, timeout_ms);
- if (ep_rv < 0) {
- if (errno != EINTR) {
- gpr_log(GPR_ERROR, "epoll_wait() failed: %s", strerror(errno));
- }
- } else {
- int i;
- for (i = 0; i < ep_rv; ++i) {
- if (ep_ev[i].data.ptr == 0) {
- grpc_wakeup_fd_consume_wakeup(&h->wakeup_fd);
- } else {
- grpc_fd *fd = ep_ev[i].data.ptr;
- /* TODO(klempner): We might want to consider making err and pri
- * separate events */
- int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP);
- int read = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
- int write = ep_ev[i].events & EPOLLOUT;
- if (read || cancel) {
- grpc_fd_become_readable(fd, allow_synchronous_callback);
+ timeout_ms = grpc_poll_deadline_to_millis_timeout(deadline, now);
+
+ pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd->fd);
+ pfds[0].events = POLLIN;
+ pfds[0].revents = 0;
+ pfds[1].fd = h->epoll_fd;
+ pfds[1].events = POLLIN;
+ pfds[1].revents = 0;
+
+ /* TODO(vpai): Consider first doing a 0 timeout poll here to avoid
+ even going into the blocking annotation if possible */
+ GPR_TIMER_BEGIN("poll", 0);
+ GRPC_SCHEDULING_START_BLOCKING_REGION;
+ poll_rv = grpc_poll_function(pfds, 2, timeout_ms);
+ GRPC_SCHEDULING_END_BLOCKING_REGION;
+ GPR_TIMER_END("poll", 0);
+
+ if (poll_rv < 0) {
+ if (errno != EINTR) {
+ gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
+ }
+ } else if (poll_rv == 0) {
+ /* do nothing */
+ } else {
+ if (pfds[0].revents) {
+ grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd);
+ }
+ if (pfds[1].revents) {
+ do {
+ /* The following epoll_wait never blocks; it has a timeout of 0 */
+ ep_rv = epoll_wait(h->epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, 0);
+ if (ep_rv < 0) {
+ if (errno != EINTR) {
+ gpr_log(GPR_ERROR, "epoll_wait() failed: %s", strerror(errno));
}
- if (write || cancel) {
- grpc_fd_become_writable(fd, allow_synchronous_callback);
+ } else {
+ int i;
+ for (i = 0; i < ep_rv; ++i) {
+ grpc_fd *fd = ep_ev[i].data.ptr;
+ /* TODO(klempner): We might want to consider making err and pri
+ * separate events */
+ int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP);
+ int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
+ int write_ev = ep_ev[i].events & EPOLLOUT;
+ if (fd == NULL) {
+ grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd);
+ } else {
+ if (read_ev || cancel) {
+ grpc_fd_become_readable(exec_ctx, fd);
+ }
+ if (write_ev || cancel) {
+ grpc_fd_become_writable(exec_ctx, fd);
+ }
+ }
}
}
- }
+ } while (ep_rv == GRPC_EPOLL_MAX_EVENTS);
}
- timeout_ms = 0;
- } while (ep_rv == GRPC_EPOLL_MAX_EVENTS);
-
- gpr_mu_lock(&pollset->mu);
- pollset->counter -= 1;
+ }
}
static void multipoll_with_epoll_pollset_finish_shutdown(
@@ -148,25 +270,19 @@ static void multipoll_with_epoll_pollset_finish_shutdown(
static void multipoll_with_epoll_pollset_destroy(grpc_pollset *pollset) {
pollset_hdr *h = pollset->data.ptr;
- grpc_wakeup_fd_destroy(&h->wakeup_fd);
close(h->epoll_fd);
+ remove_epoll_fd_from_global_list(h->epoll_fd);
gpr_free(h);
}
-static void epoll_kick(grpc_pollset *pollset) {
- pollset_hdr *h = pollset->data.ptr;
- grpc_wakeup_fd_wakeup(&h->wakeup_fd);
-}
-
static const grpc_pollset_vtable multipoll_with_epoll_pollset = {
multipoll_with_epoll_pollset_add_fd,
- multipoll_with_epoll_pollset_del_fd,
- multipoll_with_epoll_pollset_maybe_work,
- epoll_kick,
+ multipoll_with_epoll_pollset_maybe_work_and_unlock,
multipoll_with_epoll_pollset_finish_shutdown,
multipoll_with_epoll_pollset_destroy};
-static void epoll_become_multipoller(grpc_pollset *pollset, grpc_fd **fds,
+static void epoll_become_multipoller(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *pollset, grpc_fd **fds,
size_t nfds) {
size_t i;
pollset_hdr *h = gpr_malloc(sizeof(pollset_hdr));
@@ -181,22 +297,28 @@ static void epoll_become_multipoller(grpc_pollset *pollset, grpc_fd **fds,
gpr_log(GPR_ERROR, "epoll_create1 failed: %s", strerror(errno));
abort();
}
- for (i = 0; i < nfds; i++) {
- multipoll_with_epoll_pollset_add_fd(pollset, fds[i]);
- }
+ add_epoll_fd_to_global_list(h->epoll_fd);
- grpc_wakeup_fd_create(&h->wakeup_fd);
- ev.events = EPOLLIN;
- ev.data.ptr = 0;
+ ev.events = (uint32_t)(EPOLLIN | EPOLLET);
+ ev.data.ptr = NULL;
err = epoll_ctl(h->epoll_fd, EPOLL_CTL_ADD,
- GRPC_WAKEUP_FD_GET_READ_FD(&h->wakeup_fd), &ev);
+ GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd), &ev);
if (err < 0) {
- gpr_log(GPR_ERROR, "Wakeup fd epoll_ctl failed: %s", strerror(errno));
- abort();
+ gpr_log(GPR_ERROR, "epoll_ctl add for %d failed: %s",
+ GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd),
+ strerror(errno));
+ }
+
+ for (i = 0; i < nfds; i++) {
+ multipoll_with_epoll_pollset_add_fd(exec_ctx, pollset, fds[i], 0);
}
}
grpc_platform_become_multipoller_type grpc_platform_become_multipoller =
epoll_become_multipoller;
+#else /* GPR_LINUX_MULTIPOLL_WITH_EPOLL */
+
+void grpc_remove_fd_from_all_epoll_sets(int fd) {}
+
#endif /* GPR_LINUX_MULTIPOLL_WITH_EPOLL */
diff --git a/src/core/iomgr/pollset_multipoller_with_poll_posix.c b/src/core/iomgr/pollset_multipoller_with_poll_posix.c
index 7b717bd159..92d6fb7241 100644
--- a/src/core/iomgr/pollset_multipoller_with_poll_posix.c
+++ b/src/core/iomgr/pollset_multipoller_with_poll_posix.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -42,36 +42,35 @@
#include <stdlib.h>
#include <string.h>
-#include "src/core/iomgr/fd_posix.h"
-#include "src/core/iomgr/iomgr_internal.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/useful.h>
+#include "src/core/iomgr/fd_posix.h"
+#include "src/core/iomgr/iomgr_internal.h"
+#include "src/core/iomgr/pollset_posix.h"
+#include "src/core/support/block_annotate.h"
+
typedef struct {
/* all polled fds */
size_t fd_count;
size_t fd_capacity;
grpc_fd **fds;
- /* fds being polled by the current poller: parallel arrays of pollfd, and
- a grpc_fd_watcher */
- size_t pfd_count;
- size_t pfd_capacity;
- grpc_fd_watcher *watchers;
- struct pollfd *pfds;
/* fds that have been removed from the pollset explicitly */
size_t del_count;
size_t del_capacity;
grpc_fd **dels;
} pollset_hdr;
-static void multipoll_with_poll_pollset_add_fd(grpc_pollset *pollset,
- grpc_fd *fd) {
+static void multipoll_with_poll_pollset_add_fd(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *pollset,
+ grpc_fd *fd,
+ int and_unlock_pollset) {
size_t i;
pollset_hdr *h = pollset->data.ptr;
/* TODO(ctiller): this is O(num_fds^2); maybe switch to a hash set here */
for (i = 0; i < h->fd_count; i++) {
- if (h->fds[i] == fd) return;
+ if (h->fds[i] == fd) goto exit;
}
if (h->fd_count == h->fd_capacity) {
h->fd_capacity = GPR_MAX(h->fd_capacity + 8, h->fd_count * 3 / 2);
@@ -79,131 +78,111 @@ static void multipoll_with_poll_pollset_add_fd(grpc_pollset *pollset,
}
h->fds[h->fd_count++] = fd;
GRPC_FD_REF(fd, "multipoller");
-}
-
-static void multipoll_with_poll_pollset_del_fd(grpc_pollset *pollset,
- grpc_fd *fd) {
- /* will get removed next poll cycle */
- pollset_hdr *h = pollset->data.ptr;
- if (h->del_count == h->del_capacity) {
- h->del_capacity = GPR_MAX(h->del_capacity + 8, h->del_count * 3 / 2);
- h->dels = gpr_realloc(h->dels, sizeof(grpc_fd *) * h->del_capacity);
+exit:
+ if (and_unlock_pollset) {
+ gpr_mu_unlock(&pollset->mu);
}
- h->dels[h->del_count++] = fd;
- GRPC_FD_REF(fd, "multipoller_del");
}
-static void end_polling(grpc_pollset *pollset) {
- size_t i;
- pollset_hdr *h;
- h = pollset->data.ptr;
- for (i = 1; i < h->pfd_count; i++) {
- grpc_fd_end_poll(&h->watchers[i], h->pfds[i].revents & POLLIN,
- h->pfds[i].revents & POLLOUT);
- }
-}
+static void multipoll_with_poll_pollset_maybe_work_and_unlock(
+ grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker *worker,
+ gpr_timespec deadline, gpr_timespec now) {
+#define POLLOUT_CHECK (POLLOUT | POLLHUP | POLLERR)
+#define POLLIN_CHECK (POLLIN | POLLHUP | POLLERR)
-static void multipoll_with_poll_pollset_maybe_work(
- grpc_pollset *pollset, gpr_timespec deadline, gpr_timespec now,
- int allow_synchronous_callback) {
int timeout;
int r;
- size_t i, np, nf, nd;
+ size_t i, j, fd_count;
+ nfds_t pfd_count;
pollset_hdr *h;
- grpc_kick_fd_info *kfd;
+ /* TODO(ctiller): inline some elements to avoid an allocation */
+ grpc_fd_watcher *watchers;
+ struct pollfd *pfds;
h = pollset->data.ptr;
timeout = grpc_poll_deadline_to_millis_timeout(deadline, now);
- if (h->pfd_capacity < h->fd_count + 1) {
- h->pfd_capacity = GPR_MAX(h->pfd_capacity * 3 / 2, h->fd_count + 1);
- gpr_free(h->pfds);
- gpr_free(h->watchers);
- h->pfds = gpr_malloc(sizeof(struct pollfd) * h->pfd_capacity);
- h->watchers = gpr_malloc(sizeof(grpc_fd_watcher) * h->pfd_capacity);
- }
- nf = 0;
- np = 1;
- kfd = grpc_pollset_kick_pre_poll(&pollset->kick_state);
- if (kfd == NULL) {
- /* Already kicked */
- return;
- }
- h->pfds[0].fd = GRPC_POLLSET_KICK_GET_FD(kfd);
- h->pfds[0].events = POLLIN;
- h->pfds[0].revents = POLLOUT;
+ /* TODO(ctiller): perform just one malloc here if we exceed the inline case */
+ pfds = gpr_malloc(sizeof(*pfds) * (h->fd_count + 2));
+ watchers = gpr_malloc(sizeof(*watchers) * (h->fd_count + 2));
+ fd_count = 0;
+ pfd_count = 2;
+ pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd);
+ pfds[0].events = POLLIN;
+ pfds[0].revents = 0;
+ pfds[1].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd->fd);
+ pfds[1].events = POLLIN;
+ pfds[1].revents = 0;
for (i = 0; i < h->fd_count; i++) {
int remove = grpc_fd_is_orphaned(h->fds[i]);
- for (nd = 0; nd < h->del_count; nd++) {
- if (h->fds[i] == h->dels[nd]) remove = 1;
+ for (j = 0; !remove && j < h->del_count; j++) {
+ if (h->fds[i] == h->dels[j]) remove = 1;
}
if (remove) {
GRPC_FD_UNREF(h->fds[i], "multipoller");
} else {
- h->fds[nf++] = h->fds[i];
- h->watchers[np].fd = h->fds[i];
- h->pfds[np].fd = h->fds[i]->fd;
- h->pfds[np].revents = 0;
- np++;
+ h->fds[fd_count++] = h->fds[i];
+ watchers[pfd_count].fd = h->fds[i];
+ GRPC_FD_REF(watchers[pfd_count].fd, "multipoller_start");
+ pfds[pfd_count].fd = h->fds[i]->fd;
+ pfds[pfd_count].revents = 0;
+ pfd_count++;
}
}
- h->pfd_count = np;
- h->fd_count = nf;
- for (nd = 0; nd < h->del_count; nd++) {
- GRPC_FD_UNREF(h->dels[nd], "multipoller_del");
+ for (j = 0; j < h->del_count; j++) {
+ GRPC_FD_UNREF(h->dels[j], "multipoller_del");
}
h->del_count = 0;
- if (h->pfd_count == 0) {
- end_polling(pollset);
- return;
- }
- pollset->counter++;
+ h->fd_count = fd_count;
gpr_mu_unlock(&pollset->mu);
- for (i = 1; i < np; i++) {
- h->pfds[i].events = grpc_fd_begin_poll(h->watchers[i].fd, pollset, POLLIN,
- POLLOUT, &h->watchers[i]);
+ for (i = 2; i < pfd_count; i++) {
+ grpc_fd *fd = watchers[i].fd;
+ pfds[i].events = (short)grpc_fd_begin_poll(fd, pollset, worker, POLLIN,
+ POLLOUT, &watchers[i]);
+ GRPC_FD_UNREF(fd, "multipoller_start");
}
- r = poll(h->pfds, h->pfd_count, timeout);
-
- end_polling(pollset);
+ /* TODO(vpai): Consider first doing a 0 timeout poll here to avoid
+ even going into the blocking annotation if possible */
+ GRPC_SCHEDULING_START_BLOCKING_REGION;
+ r = grpc_poll_function(pfds, pfd_count, timeout);
+ GRPC_SCHEDULING_END_BLOCKING_REGION;
if (r < 0) {
if (errno != EINTR) {
gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
}
+ for (i = 2; i < pfd_count; i++) {
+ grpc_fd_end_poll(exec_ctx, &watchers[i], 0, 0);
+ }
} else if (r == 0) {
- /* do nothing */
+ for (i = 2; i < pfd_count; i++) {
+ grpc_fd_end_poll(exec_ctx, &watchers[i], 0, 0);
+ }
} else {
- if (h->pfds[0].revents & POLLIN) {
- grpc_pollset_kick_consume(&pollset->kick_state, kfd);
+ if (pfds[0].revents & POLLIN_CHECK) {
+ grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd);
}
- for (i = 1; i < np; i++) {
- if (h->watchers[i].fd == NULL) {
+ if (pfds[1].revents & POLLIN_CHECK) {
+ grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd);
+ }
+ for (i = 2; i < pfd_count; i++) {
+ if (watchers[i].fd == NULL) {
+ grpc_fd_end_poll(exec_ctx, &watchers[i], 0, 0);
continue;
}
- if (h->pfds[i].revents & (POLLIN | POLLHUP | POLLERR)) {
- grpc_fd_become_readable(h->watchers[i].fd, allow_synchronous_callback);
- }
- if (h->pfds[i].revents & (POLLOUT | POLLHUP | POLLERR)) {
- grpc_fd_become_writable(h->watchers[i].fd, allow_synchronous_callback);
- }
+ grpc_fd_end_poll(exec_ctx, &watchers[i], pfds[i].revents & POLLIN_CHECK,
+ pfds[i].revents & POLLOUT_CHECK);
}
}
- grpc_pollset_kick_post_poll(&pollset->kick_state, kfd);
-
- gpr_mu_lock(&pollset->mu);
- pollset->counter--;
-}
-static void multipoll_with_poll_pollset_kick(grpc_pollset *p) {
- grpc_pollset_force_kick(p);
+ gpr_free(pfds);
+ gpr_free(watchers);
}
static void multipoll_with_poll_pollset_finish_shutdown(grpc_pollset *pollset) {
size_t i;
pollset_hdr *h = pollset->data.ptr;
- GPR_ASSERT(pollset->counter == 0);
for (i = 0; i < h->fd_count; i++) {
GRPC_FD_UNREF(h->fds[i], "multipoller");
}
@@ -217,8 +196,6 @@ static void multipoll_with_poll_pollset_finish_shutdown(grpc_pollset *pollset) {
static void multipoll_with_poll_pollset_destroy(grpc_pollset *pollset) {
pollset_hdr *h = pollset->data.ptr;
multipoll_with_poll_pollset_finish_shutdown(pollset);
- gpr_free(h->pfds);
- gpr_free(h->watchers);
gpr_free(h->fds);
gpr_free(h->dels);
gpr_free(h);
@@ -226,13 +203,12 @@ static void multipoll_with_poll_pollset_destroy(grpc_pollset *pollset) {
static const grpc_pollset_vtable multipoll_with_poll_pollset = {
multipoll_with_poll_pollset_add_fd,
- multipoll_with_poll_pollset_del_fd,
- multipoll_with_poll_pollset_maybe_work,
- multipoll_with_poll_pollset_kick,
+ multipoll_with_poll_pollset_maybe_work_and_unlock,
multipoll_with_poll_pollset_finish_shutdown,
multipoll_with_poll_pollset_destroy};
-void grpc_poll_become_multipoller(grpc_pollset *pollset, grpc_fd **fds,
+void grpc_poll_become_multipoller(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *pollset, grpc_fd **fds,
size_t nfds) {
size_t i;
pollset_hdr *h = gpr_malloc(sizeof(pollset_hdr));
@@ -241,10 +217,6 @@ void grpc_poll_become_multipoller(grpc_pollset *pollset, grpc_fd **fds,
h->fd_count = nfds;
h->fd_capacity = nfds;
h->fds = gpr_malloc(nfds * sizeof(grpc_fd *));
- h->pfd_count = 0;
- h->pfd_capacity = 0;
- h->pfds = NULL;
- h->watchers = NULL;
h->del_count = 0;
h->del_capacity = 0;
h->dels = NULL;
diff --git a/src/core/iomgr/pollset_posix.c b/src/core/iomgr/pollset_posix.c
index 15ed8e75e6..e895a77884 100644
--- a/src/core/iomgr/pollset_posix.c
+++ b/src/core/iomgr/pollset_posix.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -38,169 +38,368 @@
#include "src/core/iomgr/pollset_posix.h"
#include <errno.h>
-#include <poll.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
-#include "src/core/iomgr/alarm_internal.h"
-#include "src/core/iomgr/fd_posix.h"
-#include "src/core/iomgr/iomgr_internal.h"
-#include "src/core/iomgr/socket_utils_posix.h"
-#include "src/core/profiling/timers.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/thd.h>
#include <grpc/support/tls.h>
#include <grpc/support/useful.h>
+#include "src/core/iomgr/fd_posix.h"
+#include "src/core/iomgr/iomgr_internal.h"
+#include "src/core/iomgr/socket_utils_posix.h"
+#include "src/core/profiling/timers.h"
+#include "src/core/support/block_annotate.h"
GPR_TLS_DECL(g_current_thread_poller);
+GPR_TLS_DECL(g_current_thread_worker);
-void grpc_pollset_kick(grpc_pollset *p) {
- if (gpr_tls_get(&g_current_thread_poller) != (gpr_intptr)p && p->counter) {
- p->vtable->kick(p);
- }
+/** Default poll() function - a pointer so that it can be overridden by some
+ * tests */
+grpc_poll_function_type grpc_poll_function = poll;
+
+/** The alarm system needs to be able to wakeup 'some poller' sometimes
+ * (specifically when a new alarm needs to be triggered earlier than the next
+ * alarm 'epoch').
+ * This wakeup_fd gives us something to alert on when such a case occurs. */
+grpc_wakeup_fd grpc_global_wakeup_fd;
+
+static void remove_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
+ worker->prev->next = worker->next;
+ worker->next->prev = worker->prev;
}
-void grpc_pollset_force_kick(grpc_pollset *p) {
- if (gpr_tls_get(&g_current_thread_poller) != (gpr_intptr)p) {
- grpc_pollset_kick_kick(&p->kick_state);
+int grpc_pollset_has_workers(grpc_pollset *p) {
+ return p->root_worker.next != &p->root_worker;
+}
+
+static grpc_pollset_worker *pop_front_worker(grpc_pollset *p) {
+ if (grpc_pollset_has_workers(p)) {
+ grpc_pollset_worker *w = p->root_worker.next;
+ remove_worker(p, w);
+ return w;
+ } else {
+ return NULL;
}
}
-static void kick_using_pollset_kick(grpc_pollset *p) {
- if (gpr_tls_get(&g_current_thread_poller) != (gpr_intptr)p) {
- grpc_pollset_kick_kick(&p->kick_state);
+static void push_back_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
+ worker->next = &p->root_worker;
+ worker->prev = worker->next->prev;
+ worker->prev->next = worker->next->prev = worker;
+}
+
+static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
+ worker->prev = &p->root_worker;
+ worker->next = worker->prev->next;
+ worker->prev->next = worker->next->prev = worker;
+}
+
+size_t grpc_pollset_size(void) { return sizeof(grpc_pollset); }
+
+void grpc_pollset_kick_ext(grpc_pollset *p,
+ grpc_pollset_worker *specific_worker,
+ uint32_t flags) {
+ GPR_TIMER_BEGIN("grpc_pollset_kick_ext", 0);
+
+ /* pollset->mu already held */
+ if (specific_worker != NULL) {
+ if (specific_worker == GRPC_POLLSET_KICK_BROADCAST) {
+ GPR_TIMER_BEGIN("grpc_pollset_kick_ext.broadcast", 0);
+ GPR_ASSERT((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) == 0);
+ for (specific_worker = p->root_worker.next;
+ specific_worker != &p->root_worker;
+ specific_worker = specific_worker->next) {
+ grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd);
+ }
+ p->kicked_without_pollers = 1;
+ GPR_TIMER_END("grpc_pollset_kick_ext.broadcast", 0);
+ } else if (gpr_tls_get(&g_current_thread_worker) !=
+ (intptr_t)specific_worker) {
+ GPR_TIMER_MARK("different_thread_worker", 0);
+ if ((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) != 0) {
+ specific_worker->reevaluate_polling_on_wakeup = 1;
+ }
+ specific_worker->kicked_specifically = 1;
+ grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd);
+ } else if ((flags & GRPC_POLLSET_CAN_KICK_SELF) != 0) {
+ GPR_TIMER_MARK("kick_yoself", 0);
+ if ((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) != 0) {
+ specific_worker->reevaluate_polling_on_wakeup = 1;
+ }
+ specific_worker->kicked_specifically = 1;
+ grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd);
+ }
+ } else if (gpr_tls_get(&g_current_thread_poller) != (intptr_t)p) {
+ GPR_ASSERT((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) == 0);
+ GPR_TIMER_MARK("kick_anonymous", 0);
+ specific_worker = pop_front_worker(p);
+ if (specific_worker != NULL) {
+ if (gpr_tls_get(&g_current_thread_worker) == (intptr_t)specific_worker) {
+ GPR_TIMER_MARK("kick_anonymous_not_self", 0);
+ push_back_worker(p, specific_worker);
+ specific_worker = pop_front_worker(p);
+ if ((flags & GRPC_POLLSET_CAN_KICK_SELF) == 0 &&
+ gpr_tls_get(&g_current_thread_worker) ==
+ (intptr_t)specific_worker) {
+ push_back_worker(p, specific_worker);
+ specific_worker = NULL;
+ }
+ }
+ if (specific_worker != NULL) {
+ GPR_TIMER_MARK("finally_kick", 0);
+ push_back_worker(p, specific_worker);
+ grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd);
+ }
+ } else {
+ GPR_TIMER_MARK("kicked_no_pollers", 0);
+ p->kicked_without_pollers = 1;
+ }
}
+
+ GPR_TIMER_END("grpc_pollset_kick_ext", 0);
+}
+
+void grpc_pollset_kick(grpc_pollset *p, grpc_pollset_worker *specific_worker) {
+ grpc_pollset_kick_ext(p, specific_worker, 0);
}
/* global state management */
void grpc_pollset_global_init(void) {
gpr_tls_init(&g_current_thread_poller);
-
- /* Initialize kick fd state */
- grpc_pollset_kick_global_init();
+ gpr_tls_init(&g_current_thread_worker);
+ grpc_wakeup_fd_global_init();
+ grpc_wakeup_fd_init(&grpc_global_wakeup_fd);
}
void grpc_pollset_global_shutdown(void) {
- /* destroy the kick pipes */
- grpc_pollset_kick_global_destroy();
-
+ grpc_wakeup_fd_destroy(&grpc_global_wakeup_fd);
gpr_tls_destroy(&g_current_thread_poller);
+ gpr_tls_destroy(&g_current_thread_worker);
+ grpc_wakeup_fd_global_destroy();
}
+void grpc_kick_poller(void) { grpc_wakeup_fd_wakeup(&grpc_global_wakeup_fd); }
+
/* main interface */
static void become_basic_pollset(grpc_pollset *pollset, grpc_fd *fd_or_null);
-void grpc_pollset_init(grpc_pollset *pollset) {
+void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
gpr_mu_init(&pollset->mu);
- grpc_pollset_kick_init(&pollset->kick_state);
+ *mu = &pollset->mu;
+ pollset->root_worker.next = pollset->root_worker.prev = &pollset->root_worker;
pollset->in_flight_cbs = 0;
pollset->shutting_down = 0;
pollset->called_shutdown = 0;
+ pollset->kicked_without_pollers = 0;
+ pollset->idle_jobs.head = pollset->idle_jobs.tail = NULL;
+ pollset->local_wakeup_cache = NULL;
+ pollset->kicked_without_pollers = 0;
become_basic_pollset(pollset, NULL);
}
-void grpc_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) {
- gpr_mu_lock(&pollset->mu);
- pollset->vtable->add_fd(pollset, fd);
- gpr_mu_unlock(&pollset->mu);
+void grpc_pollset_destroy(grpc_pollset *pollset) {
+ GPR_ASSERT(pollset->in_flight_cbs == 0);
+ GPR_ASSERT(!grpc_pollset_has_workers(pollset));
+ GPR_ASSERT(pollset->idle_jobs.head == pollset->idle_jobs.tail);
+ pollset->vtable->destroy(pollset);
+ while (pollset->local_wakeup_cache) {
+ grpc_cached_wakeup_fd *next = pollset->local_wakeup_cache->next;
+ grpc_wakeup_fd_destroy(&pollset->local_wakeup_cache->fd);
+ gpr_free(pollset->local_wakeup_cache);
+ pollset->local_wakeup_cache = next;
+ }
}
-void grpc_pollset_del_fd(grpc_pollset *pollset, grpc_fd *fd) {
+void grpc_pollset_reset(grpc_pollset *pollset) {
+ GPR_ASSERT(pollset->shutting_down);
+ GPR_ASSERT(pollset->in_flight_cbs == 0);
+ GPR_ASSERT(!grpc_pollset_has_workers(pollset));
+ GPR_ASSERT(pollset->idle_jobs.head == pollset->idle_jobs.tail);
+ pollset->vtable->destroy(pollset);
+ pollset->shutting_down = 0;
+ pollset->called_shutdown = 0;
+ pollset->kicked_without_pollers = 0;
+ become_basic_pollset(pollset, NULL);
+}
+
+void grpc_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_fd *fd) {
+ gpr_mu_lock(&pollset->mu);
+ pollset->vtable->add_fd(exec_ctx, pollset, fd, 1);
+/* the following (enabled only in debug) will reacquire and then release
+ our lock - meaning that if the unlocking flag passed to add_fd above is
+ not respected, the code will deadlock (in a way that we have a chance of
+ debugging) */
+#ifndef NDEBUG
gpr_mu_lock(&pollset->mu);
- pollset->vtable->del_fd(pollset, fd);
gpr_mu_unlock(&pollset->mu);
+#endif
}
-static void finish_shutdown(grpc_pollset *pollset) {
+static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
+ GPR_ASSERT(grpc_closure_list_empty(pollset->idle_jobs));
pollset->vtable->finish_shutdown(pollset);
- pollset->shutdown_done_cb(pollset->shutdown_done_arg);
+ grpc_exec_ctx_enqueue(exec_ctx, pollset->shutdown_done, true, NULL);
}
-int grpc_pollset_work(grpc_pollset *pollset, gpr_timespec deadline) {
+void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_pollset_worker **worker_hdl, gpr_timespec now,
+ gpr_timespec deadline) {
+ grpc_pollset_worker worker;
+ *worker_hdl = &worker;
+
/* pollset->mu already held */
- gpr_timespec now = gpr_now();
- if (gpr_time_cmp(now, deadline) > 0) {
- return 0;
- }
- if (grpc_maybe_call_delayed_callbacks(&pollset->mu, 1)) {
- return 1;
+ int added_worker = 0;
+ int locked = 1;
+ int queued_work = 0;
+ int keep_polling = 0;
+ GPR_TIMER_BEGIN("grpc_pollset_work", 0);
+ /* this must happen before we (potentially) drop pollset->mu */
+ worker.next = worker.prev = NULL;
+ worker.reevaluate_polling_on_wakeup = 0;
+ if (pollset->local_wakeup_cache != NULL) {
+ worker.wakeup_fd = pollset->local_wakeup_cache;
+ pollset->local_wakeup_cache = worker.wakeup_fd->next;
+ } else {
+ worker.wakeup_fd = gpr_malloc(sizeof(*worker.wakeup_fd));
+ grpc_wakeup_fd_init(&worker.wakeup_fd->fd);
}
- if (grpc_alarm_check(&pollset->mu, now, &deadline)) {
- return 1;
+ worker.kicked_specifically = 0;
+ /* If there's work waiting for the pollset to be idle, and the
+ pollset is idle, then do that work */
+ if (!grpc_pollset_has_workers(pollset) &&
+ !grpc_closure_list_empty(pollset->idle_jobs)) {
+ GPR_TIMER_MARK("grpc_pollset_work.idle_jobs", 0);
+ grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL);
+ goto done;
}
+ /* If we're shutting down then we don't execute any extended work */
if (pollset->shutting_down) {
- return 1;
+ GPR_TIMER_MARK("grpc_pollset_work.shutting_down", 0);
+ goto done;
}
- gpr_tls_set(&g_current_thread_poller, (gpr_intptr)pollset);
- pollset->vtable->maybe_work(pollset, deadline, now, 1);
- gpr_tls_set(&g_current_thread_poller, 0);
+ /* Give do_promote priority so we don't starve it out */
+ if (pollset->in_flight_cbs) {
+ GPR_TIMER_MARK("grpc_pollset_work.in_flight_cbs", 0);
+ gpr_mu_unlock(&pollset->mu);
+ locked = 0;
+ goto done;
+ }
+ /* Start polling, and keep doing so while we're being asked to
+ re-evaluate our pollers (this allows poll() based pollers to
+ ensure they don't miss wakeups) */
+ keep_polling = 1;
+ while (keep_polling) {
+ keep_polling = 0;
+ if (!pollset->kicked_without_pollers) {
+ if (!added_worker) {
+ push_front_worker(pollset, &worker);
+ added_worker = 1;
+ gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
+ }
+ gpr_tls_set(&g_current_thread_poller, (intptr_t)pollset);
+ GPR_TIMER_BEGIN("maybe_work_and_unlock", 0);
+ pollset->vtable->maybe_work_and_unlock(exec_ctx, pollset, &worker,
+ deadline, now);
+ GPR_TIMER_END("maybe_work_and_unlock", 0);
+ locked = 0;
+ gpr_tls_set(&g_current_thread_poller, 0);
+ } else {
+ GPR_TIMER_MARK("grpc_pollset_work.kicked_without_pollers", 0);
+ pollset->kicked_without_pollers = 0;
+ }
+ /* Finished execution - start cleaning up.
+ Note that we may arrive here from outside the enclosing while() loop.
+ In that case we won't loop though as we haven't added worker to the
+ worker list, which means nobody could ask us to re-evaluate polling). */
+ done:
+ if (!locked) {
+ queued_work |= grpc_exec_ctx_flush(exec_ctx);
+ gpr_mu_lock(&pollset->mu);
+ locked = 1;
+ }
+ /* If we're forced to re-evaluate polling (via grpc_pollset_kick with
+ GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) then we land here and force
+ a loop */
+ if (worker.reevaluate_polling_on_wakeup) {
+ worker.reevaluate_polling_on_wakeup = 0;
+ pollset->kicked_without_pollers = 0;
+ if (queued_work || worker.kicked_specifically) {
+ /* If there's queued work on the list, then set the deadline to be
+ immediate so we get back out of the polling loop quickly */
+ deadline = gpr_inf_past(GPR_CLOCK_MONOTONIC);
+ }
+ keep_polling = 1;
+ }
+ }
+ if (added_worker) {
+ remove_worker(pollset, &worker);
+ gpr_tls_set(&g_current_thread_worker, 0);
+ }
+ /* release wakeup fd to the local pool */
+ worker.wakeup_fd->next = pollset->local_wakeup_cache;
+ pollset->local_wakeup_cache = worker.wakeup_fd;
+ /* check shutdown conditions */
if (pollset->shutting_down) {
- if (pollset->counter > 0) {
- grpc_pollset_kick(pollset);
+ if (grpc_pollset_has_workers(pollset)) {
+ grpc_pollset_kick(pollset, NULL);
} else if (!pollset->called_shutdown && pollset->in_flight_cbs == 0) {
pollset->called_shutdown = 1;
gpr_mu_unlock(&pollset->mu);
- finish_shutdown(pollset);
+ finish_shutdown(exec_ctx, pollset);
+ grpc_exec_ctx_flush(exec_ctx);
/* Continuing to access pollset here is safe -- it is the caller's
* responsibility to not destroy when it has outstanding calls to
* grpc_pollset_work.
* TODO(dklempner): Can we refactor the shutdown logic to avoid this? */
gpr_mu_lock(&pollset->mu);
+ } else if (!grpc_closure_list_empty(pollset->idle_jobs)) {
+ grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL);
+ gpr_mu_unlock(&pollset->mu);
+ grpc_exec_ctx_flush(exec_ctx);
+ gpr_mu_lock(&pollset->mu);
}
}
- return 1;
+ *worker_hdl = NULL;
+ GPR_TIMER_END("grpc_pollset_work", 0);
}
-void grpc_pollset_shutdown(grpc_pollset *pollset,
- void (*shutdown_done)(void *arg),
- void *shutdown_done_arg) {
- int call_shutdown = 0;
- gpr_mu_lock(&pollset->mu);
+void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_closure *closure) {
GPR_ASSERT(!pollset->shutting_down);
pollset->shutting_down = 1;
+ pollset->shutdown_done = closure;
+ grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
+ if (!grpc_pollset_has_workers(pollset)) {
+ grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL);
+ }
if (!pollset->called_shutdown && pollset->in_flight_cbs == 0 &&
- pollset->counter == 0) {
+ !grpc_pollset_has_workers(pollset)) {
pollset->called_shutdown = 1;
- call_shutdown = 1;
- }
- pollset->shutdown_done_cb = shutdown_done;
- pollset->shutdown_done_arg = shutdown_done_arg;
- if (pollset->counter > 0) {
- grpc_pollset_kick(pollset);
- }
- gpr_mu_unlock(&pollset->mu);
-
- if (call_shutdown) {
- finish_shutdown(pollset);
+ finish_shutdown(exec_ctx, pollset);
}
}
-void grpc_pollset_destroy(grpc_pollset *pollset) {
- GPR_ASSERT(pollset->shutting_down);
- GPR_ASSERT(pollset->in_flight_cbs == 0);
- pollset->vtable->destroy(pollset);
- grpc_pollset_kick_destroy(&pollset->kick_state);
- gpr_mu_destroy(&pollset->mu);
-}
-
-int grpc_poll_deadline_to_millis_timeout(gpr_timespec deadline, gpr_timespec now) {
+int grpc_poll_deadline_to_millis_timeout(gpr_timespec deadline,
+ gpr_timespec now) {
gpr_timespec timeout;
- static const int max_spin_polling_us = 10;
- if (gpr_time_cmp(deadline, gpr_inf_future) == 0) {
+ static const int64_t max_spin_polling_us = 10;
+ if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) {
return -1;
}
- if (gpr_time_cmp(
- deadline,
- gpr_time_add(now, gpr_time_from_micros(max_spin_polling_us))) <= 0) {
+ if (gpr_time_cmp(deadline, gpr_time_add(now, gpr_time_from_micros(
+ max_spin_polling_us,
+ GPR_TIMESPAN))) <= 0) {
return 0;
}
timeout = gpr_time_sub(deadline, now);
- return gpr_time_to_millis(
- gpr_time_add(timeout, gpr_time_from_nanos(GPR_NS_PER_SEC - 1)));
+ return gpr_time_to_millis(gpr_time_add(
+ timeout, gpr_time_from_nanos(GPR_NS_PER_MS - 1, GPR_TIMESPAN)));
}
/*
@@ -212,15 +411,15 @@ typedef struct grpc_unary_promote_args {
const grpc_pollset_vtable *original_vtable;
grpc_pollset *pollset;
grpc_fd *fd;
- grpc_iomgr_closure promotion_closure;
+ grpc_closure promotion_closure;
} grpc_unary_promote_args;
-static void basic_do_promote(void *args, int success) {
+static void basic_do_promote(grpc_exec_ctx *exec_ctx, void *args,
+ bool success) {
grpc_unary_promote_args *up_args = args;
const grpc_pollset_vtable *original_vtable = up_args->original_vtable;
grpc_pollset *pollset = up_args->pollset;
grpc_fd *fd = up_args->fd;
- int do_shutdown_cb = 0;
/*
* This is quite tricky. There are a number of cases to keep in mind here:
@@ -233,12 +432,7 @@ static void basic_do_promote(void *args, int success) {
gpr_mu_lock(&pollset->mu);
/* First we need to ensure that nobody is polling concurrently */
- if (pollset->counter != 0) {
- grpc_pollset_kick(pollset);
- grpc_iomgr_add_callback(&up_args->promotion_closure);
- gpr_mu_unlock(&pollset->mu);
- return;
- }
+ GPR_ASSERT(!grpc_pollset_has_workers(pollset));
gpr_free(up_args);
/* At this point the pollset may no longer be a unary poller. In that case
@@ -249,20 +443,22 @@ static void basic_do_promote(void *args, int success) {
pollset->in_flight_cbs--;
if (pollset->shutting_down) {
/* We don't care about this pollset anymore. */
- if (pollset->in_flight_cbs == 0 && pollset->counter == 0) {
- do_shutdown_cb = 1;
+ if (pollset->in_flight_cbs == 0 && !pollset->called_shutdown) {
+ pollset->called_shutdown = 1;
+ finish_shutdown(exec_ctx, pollset);
}
} else if (grpc_fd_is_orphaned(fd)) {
/* Don't try to add it to anything, we'll drop our ref on it below */
} else if (pollset->vtable != original_vtable) {
- pollset->vtable->add_fd(pollset, fd);
+ pollset->vtable->add_fd(exec_ctx, pollset, fd, 0);
} else if (fd != pollset->data.ptr) {
grpc_fd *fds[2];
fds[0] = pollset->data.ptr;
fds[1] = fd;
if (fds[0] && !grpc_fd_is_orphaned(fds[0])) {
- grpc_platform_become_multipoller(pollset, fds, GPR_ARRAY_SIZE(fds));
+ grpc_platform_become_multipoller(exec_ctx, pollset, fds,
+ GPR_ARRAY_SIZE(fds));
GRPC_FD_UNREF(fds[0], "basicpoll");
} else {
/* old fd is orphaned and we haven't cleaned it up until now, so remain a
@@ -277,20 +473,17 @@ static void basic_do_promote(void *args, int success) {
gpr_mu_unlock(&pollset->mu);
- if (do_shutdown_cb) {
- pollset->shutdown_done_cb(pollset->shutdown_done_arg);
- }
-
/* Matching ref in basic_pollset_add_fd */
GRPC_FD_UNREF(fd, "basicpoll_add");
}
-static void basic_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) {
+static void basic_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_fd *fd, int and_unlock_pollset) {
grpc_unary_promote_args *up_args;
GPR_ASSERT(fd);
- if (fd == pollset->data.ptr) return;
+ if (fd == pollset->data.ptr) goto exit;
- if (!pollset->counter) {
+ if (!grpc_pollset_has_workers(pollset)) {
/* Fast path -- no in flight cbs */
/* TODO(klempner): Comment this out and fix any test failures or establish
* they are due to timing issues */
@@ -302,7 +495,8 @@ static void basic_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) {
pollset->data.ptr = fd;
GRPC_FD_REF(fd, "basicpoll");
} else if (!grpc_fd_is_orphaned(fds[0])) {
- grpc_platform_become_multipoller(pollset, fds, GPR_ARRAY_SIZE(fds));
+ grpc_platform_become_multipoller(exec_ctx, pollset, fds,
+ GPR_ARRAY_SIZE(fds));
GRPC_FD_UNREF(fds[0], "basicpoll");
} else {
/* old fd is orphaned and we haven't cleaned it up until now, so remain a
@@ -311,7 +505,7 @@ static void basic_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) {
pollset->data.ptr = fd;
GRPC_FD_REF(fd, "basicpoll");
}
- return;
+ goto exit;
}
/* Now we need to promote. This needs to happen when we're not polling. Since
@@ -319,108 +513,105 @@ static void basic_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) {
GRPC_FD_REF(fd, "basicpoll_add");
pollset->in_flight_cbs++;
up_args = gpr_malloc(sizeof(*up_args));
- up_args->pollset = pollset;
up_args->fd = fd;
up_args->original_vtable = pollset->vtable;
+ up_args->pollset = pollset;
up_args->promotion_closure.cb = basic_do_promote;
up_args->promotion_closure.cb_arg = up_args;
- grpc_iomgr_add_callback(&up_args->promotion_closure);
- grpc_pollset_kick(pollset);
-}
+ grpc_closure_list_add(&pollset->idle_jobs, &up_args->promotion_closure, 1);
+ grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
-static void basic_pollset_del_fd(grpc_pollset *pollset, grpc_fd *fd) {
- GPR_ASSERT(fd);
- if (fd == pollset->data.ptr) {
- GRPC_FD_UNREF(pollset->data.ptr, "basicpoll");
- pollset->data.ptr = NULL;
+exit:
+ if (and_unlock_pollset) {
+ gpr_mu_unlock(&pollset->mu);
}
}
-static void basic_pollset_maybe_work(grpc_pollset *pollset,
- gpr_timespec deadline, gpr_timespec now,
- int allow_synchronous_callback) {
- struct pollfd pfd[2];
+static void basic_pollset_maybe_work_and_unlock(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *pollset,
+ grpc_pollset_worker *worker,
+ gpr_timespec deadline,
+ gpr_timespec now) {
+#define POLLOUT_CHECK (POLLOUT | POLLHUP | POLLERR)
+#define POLLIN_CHECK (POLLIN | POLLHUP | POLLERR)
+
+ struct pollfd pfd[3];
grpc_fd *fd;
grpc_fd_watcher fd_watcher;
- grpc_kick_fd_info *kfd;
int timeout;
int r;
- int nfds;
+ nfds_t nfds;
- if (pollset->in_flight_cbs) {
- /* Give do_promote priority so we don't starve it out */
- gpr_mu_unlock(&pollset->mu);
- gpr_mu_lock(&pollset->mu);
- return;
- }
fd = pollset->data.ptr;
if (fd && grpc_fd_is_orphaned(fd)) {
GRPC_FD_UNREF(fd, "basicpoll");
fd = pollset->data.ptr = NULL;
}
timeout = grpc_poll_deadline_to_millis_timeout(deadline, now);
- kfd = grpc_pollset_kick_pre_poll(&pollset->kick_state);
- if (kfd == NULL) {
- /* Already kicked */
- return;
- }
- pfd[0].fd = GRPC_POLLSET_KICK_GET_FD(kfd);
+ pfd[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd);
pfd[0].events = POLLIN;
pfd[0].revents = 0;
- nfds = 1;
- pollset->counter++;
+ pfd[1].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd->fd);
+ pfd[1].events = POLLIN;
+ pfd[1].revents = 0;
+ nfds = 2;
if (fd) {
- pfd[1].fd = fd->fd;
- pfd[1].revents = 0;
+ pfd[2].fd = fd->fd;
+ pfd[2].revents = 0;
+ GRPC_FD_REF(fd, "basicpoll_begin");
gpr_mu_unlock(&pollset->mu);
- pfd[1].events =
- grpc_fd_begin_poll(fd, pollset, POLLIN, POLLOUT, &fd_watcher);
- if (pfd[1].events != 0) {
+ pfd[2].events = (short)grpc_fd_begin_poll(fd, pollset, worker, POLLIN,
+ POLLOUT, &fd_watcher);
+ if (pfd[2].events != 0) {
nfds++;
}
} else {
gpr_mu_unlock(&pollset->mu);
}
+ /* TODO(vpai): Consider first doing a 0 timeout poll here to avoid
+ even going into the blocking annotation if possible */
/* poll fd count (argument 2) is shortened by one if we have no events
to poll on - such that it only includes the kicker */
- r = poll(pfd, nfds, timeout);
- GRPC_TIMER_MARK(GRPC_PTAG_POLL_FINISHED, r);
-
- if (fd) {
- grpc_fd_end_poll(&fd_watcher, pfd[1].revents & POLLIN,
- pfd[1].revents & POLLOUT);
- }
+ GPR_TIMER_BEGIN("poll", 0);
+ GRPC_SCHEDULING_START_BLOCKING_REGION;
+ r = grpc_poll_function(pfd, nfds, timeout);
+ GRPC_SCHEDULING_END_BLOCKING_REGION;
+ GPR_TIMER_END("poll", 0);
if (r < 0) {
if (errno != EINTR) {
gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
}
+ if (fd) {
+ grpc_fd_end_poll(exec_ctx, &fd_watcher, 0, 0);
+ }
} else if (r == 0) {
- /* do nothing */
+ if (fd) {
+ grpc_fd_end_poll(exec_ctx, &fd_watcher, 0, 0);
+ }
} else {
- if (pfd[0].revents & POLLIN) {
- grpc_pollset_kick_consume(&pollset->kick_state, kfd);
+ if (pfd[0].revents & POLLIN_CHECK) {
+ grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd);
}
- if (nfds > 1) {
- if (pfd[1].revents & (POLLIN | POLLHUP | POLLERR)) {
- grpc_fd_become_readable(fd, allow_synchronous_callback);
- }
- if (pfd[1].revents & (POLLOUT | POLLHUP | POLLERR)) {
- grpc_fd_become_writable(fd, allow_synchronous_callback);
- }
+ if (pfd[1].revents & POLLIN_CHECK) {
+ grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd);
+ }
+ if (nfds > 2) {
+ grpc_fd_end_poll(exec_ctx, &fd_watcher, pfd[2].revents & POLLIN_CHECK,
+ pfd[2].revents & POLLOUT_CHECK);
+ } else if (fd) {
+ grpc_fd_end_poll(exec_ctx, &fd_watcher, 0, 0);
}
}
- grpc_pollset_kick_post_poll(&pollset->kick_state, kfd);
-
- gpr_mu_lock(&pollset->mu);
- pollset->counter--;
+ if (fd) {
+ GRPC_FD_UNREF(fd, "basicpoll_begin");
+ }
}
static void basic_pollset_destroy(grpc_pollset *pollset) {
- GPR_ASSERT(pollset->counter == 0);
if (pollset->data.ptr != NULL) {
GRPC_FD_UNREF(pollset->data.ptr, "basicpoll");
pollset->data.ptr = NULL;
@@ -428,14 +619,13 @@ static void basic_pollset_destroy(grpc_pollset *pollset) {
}
static const grpc_pollset_vtable basic_pollset = {
- basic_pollset_add_fd, basic_pollset_del_fd, basic_pollset_maybe_work,
- kick_using_pollset_kick, basic_pollset_destroy, basic_pollset_destroy};
+ basic_pollset_add_fd, basic_pollset_maybe_work_and_unlock,
+ basic_pollset_destroy, basic_pollset_destroy};
static void become_basic_pollset(grpc_pollset *pollset, grpc_fd *fd_or_null) {
pollset->vtable = &basic_pollset;
- pollset->counter = 0;
pollset->data.ptr = fd_or_null;
- if (fd_or_null) {
+ if (fd_or_null != NULL) {
GRPC_FD_REF(fd_or_null, "basicpoll");
}
}
diff --git a/src/core/iomgr/pollset_posix.h b/src/core/iomgr/pollset_posix.h
index 53585a2886..e0cfc44395 100644
--- a/src/core/iomgr/pollset_posix.h
+++ b/src/core/iomgr/pollset_posix.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,12 +31,17 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_IOMGR_POLLSET_POSIX_H
-#define GRPC_INTERNAL_CORE_IOMGR_POLLSET_POSIX_H
+#ifndef GRPC_CORE_IOMGR_POLLSET_POSIX_H
+#define GRPC_CORE_IOMGR_POLLSET_POSIX_H
+
+#include <poll.h>
#include <grpc/support/sync.h>
-#include "src/core/iomgr/pollset_kick_posix.h"
+#include "src/core/iomgr/exec_ctx.h"
+#include "src/core/iomgr/iomgr.h"
+#include "src/core/iomgr/pollset.h"
+#include "src/core/iomgr/wakeup_fd_posix.h"
typedef struct grpc_pollset_vtable grpc_pollset_vtable;
@@ -45,71 +50,104 @@ typedef struct grpc_pollset_vtable grpc_pollset_vtable;
use the struct tag */
struct grpc_fd;
-typedef struct grpc_pollset {
+typedef struct grpc_cached_wakeup_fd {
+ grpc_wakeup_fd fd;
+ struct grpc_cached_wakeup_fd *next;
+} grpc_cached_wakeup_fd;
+
+struct grpc_pollset_worker {
+ grpc_cached_wakeup_fd *wakeup_fd;
+ int reevaluate_polling_on_wakeup;
+ int kicked_specifically;
+ struct grpc_pollset_worker *next;
+ struct grpc_pollset_worker *prev;
+};
+
+struct grpc_pollset {
/* pollsets under posix can mutate representation as fds are added and
removed.
For example, we may choose a poll() based implementation on linux for
few fds, and an epoll() based implementation for many fds */
const grpc_pollset_vtable *vtable;
gpr_mu mu;
- grpc_pollset_kick_state kick_state;
- int counter;
+ grpc_pollset_worker root_worker;
int in_flight_cbs;
int shutting_down;
int called_shutdown;
- void (*shutdown_done_cb)(void *arg);
- void *shutdown_done_arg;
+ int kicked_without_pollers;
+ grpc_closure *shutdown_done;
+ grpc_closure_list idle_jobs;
union {
int fd;
void *ptr;
} data;
-} grpc_pollset;
+ /* Local cache of eventfds for workers */
+ grpc_cached_wakeup_fd *local_wakeup_cache;
+};
struct grpc_pollset_vtable {
- void (*add_fd)(grpc_pollset *pollset, struct grpc_fd *fd);
- void (*del_fd)(grpc_pollset *pollset, struct grpc_fd *fd);
- void (*maybe_work)(grpc_pollset *pollset, gpr_timespec deadline,
- gpr_timespec now, int allow_synchronous_callback);
- void (*kick)(grpc_pollset *pollset);
+ void (*add_fd)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ struct grpc_fd *fd, int and_unlock_pollset);
+ void (*maybe_work_and_unlock)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_pollset_worker *worker,
+ gpr_timespec deadline, gpr_timespec now);
void (*finish_shutdown)(grpc_pollset *pollset);
void (*destroy)(grpc_pollset *pollset);
};
-#define GRPC_POLLSET_MU(pollset) (&(pollset)->mu)
-
/* Add an fd to a pollset */
-void grpc_pollset_add_fd(grpc_pollset *pollset, struct grpc_fd *fd);
-/* Force remove an fd from a pollset (normally they are removed on the next
- poll after an fd is orphaned) */
-void grpc_pollset_del_fd(grpc_pollset *pollset, struct grpc_fd *fd);
-
-/* Force any current pollers to break polling: it's the callers responsibility
- to ensure that the pollset indeed needs to be kicked - no verification that
- the pollset is actually performing polling work is done. At worst this will
- result in spurious wakeups if performed at the wrong moment.
- Does not touch pollset->mu. */
-void grpc_pollset_force_kick(grpc_pollset *pollset);
+void grpc_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ struct grpc_fd *fd);
+
/* Returns the fd to listen on for kicks */
int grpc_kick_read_fd(grpc_pollset *p);
/* Call after polling has been kicked to leave the kicked state */
void grpc_kick_drain(grpc_pollset *p);
/* Convert a timespec to milliseconds:
- - very small or negative poll times are clamped to zero to do a
+ - very small or negative poll times are clamped to zero to do a
non-blocking poll (which becomes spin polling)
- other small values are rounded up to one millisecond
- - longer than a millisecond polls are rounded up to the next nearest
+ - longer than a millisecond polls are rounded up to the next nearest
millisecond to avoid spinning
- infinite timeouts are converted to -1 */
-int grpc_poll_deadline_to_millis_timeout(gpr_timespec deadline, gpr_timespec now);
+int grpc_poll_deadline_to_millis_timeout(gpr_timespec deadline,
+ gpr_timespec now);
+
+/* Allow kick to wakeup the currently polling worker */
+#define GRPC_POLLSET_CAN_KICK_SELF 1
+/* Force the wakee to repoll when awoken */
+#define GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP 2
+/* As per grpc_pollset_kick, with an extended set of flags (defined above)
+ -- mostly for fd_posix's use. */
+void grpc_pollset_kick_ext(grpc_pollset *p,
+ grpc_pollset_worker *specific_worker,
+ uint32_t flags);
/* turn a pollset into a multipoller: platform specific */
-typedef void (*grpc_platform_become_multipoller_type)(grpc_pollset *pollset,
+typedef void (*grpc_platform_become_multipoller_type)(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *pollset,
struct grpc_fd **fds,
size_t fd_count);
extern grpc_platform_become_multipoller_type grpc_platform_become_multipoller;
-void grpc_poll_become_multipoller(grpc_pollset *pollset, struct grpc_fd **fds,
+void grpc_poll_become_multipoller(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *pollset, struct grpc_fd **fds,
size_t fd_count);
-#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_POSIX_H */
+/* Return 1 if the pollset has active threads in grpc_pollset_work (pollset must
+ * be locked) */
+int grpc_pollset_has_workers(grpc_pollset *pollset);
+
+void grpc_remove_fd_from_all_epoll_sets(int fd);
+
+/* override to allow tests to hook poll() usage */
+/* NOTE: Any changes to grpc_poll_function must take place when the gRPC
+ is certainly not doing any polling anywhere.
+ Otherwise, there might be a race between changing the variable and actually
+ doing a polling operation */
+typedef int (*grpc_poll_function_type)(struct pollfd *, nfds_t, int);
+extern grpc_poll_function_type grpc_poll_function;
+extern grpc_wakeup_fd grpc_global_wakeup_fd;
+
+#endif /* GRPC_CORE_IOMGR_POLLSET_POSIX_H */
diff --git a/src/core/iomgr/pollset_set.h b/src/core/iomgr/pollset_set.h
index 98e3b552a7..204c625933 100644
--- a/src/core/iomgr/pollset_set.h
+++ b/src/core/iomgr/pollset_set.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,29 +31,31 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_IOMGR_POLLSET_SET_H
-#define GRPC_INTERNAL_CORE_IOMGR_POLLSET_SET_H
+#ifndef GRPC_CORE_IOMGR_POLLSET_SET_H
+#define GRPC_CORE_IOMGR_POLLSET_SET_H
#include "src/core/iomgr/pollset.h"
/* A grpc_pollset_set is a set of pollsets that are interested in an
action. Adding a pollset to a pollset_set automatically adds any
- fd's (etc) that have been registered with the set_set with that pollset.
+ fd's (etc) that have been registered with the set_set to that pollset.
Registering fd's automatically adds them to all current pollsets. */
-#ifdef GPR_POSIX_SOCKET
-#include "src/core/iomgr/pollset_set_posix.h"
-#endif
+typedef struct grpc_pollset_set grpc_pollset_set;
-#ifdef GPR_WIN32
-#include "src/core/iomgr/pollset_set_windows.h"
-#endif
-
-void grpc_pollset_set_init(grpc_pollset_set *pollset_set);
+grpc_pollset_set *grpc_pollset_set_create(void);
void grpc_pollset_set_destroy(grpc_pollset_set *pollset_set);
-void grpc_pollset_set_add_pollset(grpc_pollset_set *pollset_set,
+void grpc_pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *pollset_set,
grpc_pollset *pollset);
-void grpc_pollset_set_del_pollset(grpc_pollset_set *pollset_set,
+void grpc_pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *pollset_set,
grpc_pollset *pollset);
+void grpc_pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *bag,
+ grpc_pollset_set *item);
+void grpc_pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *bag,
+ grpc_pollset_set *item);
-#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_H */
+#endif /* GRPC_CORE_IOMGR_POLLSET_SET_H */
diff --git a/src/core/iomgr/pollset_set_posix.c b/src/core/iomgr/pollset_set_posix.c
index 005e938398..9dc9aff4a8 100644
--- a/src/core/iomgr/pollset_set_posix.c
+++ b/src/core/iomgr/pollset_set_posix.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -41,26 +41,48 @@
#include <grpc/support/alloc.h>
#include <grpc/support/useful.h>
-#include "src/core/iomgr/pollset_set.h"
+#include "src/core/iomgr/pollset_posix.h"
+#include "src/core/iomgr/pollset_set_posix.h"
-void grpc_pollset_set_init(grpc_pollset_set *pollset_set) {
+struct grpc_pollset_set {
+ gpr_mu mu;
+
+ size_t pollset_count;
+ size_t pollset_capacity;
+ grpc_pollset **pollsets;
+
+ size_t pollset_set_count;
+ size_t pollset_set_capacity;
+ struct grpc_pollset_set **pollset_sets;
+
+ size_t fd_count;
+ size_t fd_capacity;
+ grpc_fd **fds;
+};
+
+grpc_pollset_set *grpc_pollset_set_create(void) {
+ grpc_pollset_set *pollset_set = gpr_malloc(sizeof(*pollset_set));
memset(pollset_set, 0, sizeof(*pollset_set));
gpr_mu_init(&pollset_set->mu);
+ return pollset_set;
}
void grpc_pollset_set_destroy(grpc_pollset_set *pollset_set) {
size_t i;
gpr_mu_destroy(&pollset_set->mu);
for (i = 0; i < pollset_set->fd_count; i++) {
- GRPC_FD_UNREF(pollset_set->fds[i], "pollset");
+ GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set");
}
gpr_free(pollset_set->pollsets);
+ gpr_free(pollset_set->pollset_sets);
gpr_free(pollset_set->fds);
+ gpr_free(pollset_set);
}
-void grpc_pollset_set_add_pollset(grpc_pollset_set *pollset_set,
+void grpc_pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *pollset_set,
grpc_pollset *pollset) {
- size_t i;
+ size_t i, j;
gpr_mu_lock(&pollset_set->mu);
if (pollset_set->pollset_count == pollset_set->pollset_capacity) {
pollset_set->pollset_capacity =
@@ -70,13 +92,20 @@ void grpc_pollset_set_add_pollset(grpc_pollset_set *pollset_set,
sizeof(*pollset_set->pollsets));
}
pollset_set->pollsets[pollset_set->pollset_count++] = pollset;
- for (i = 0; i < pollset_set->fd_count; i++) {
- grpc_pollset_add_fd(pollset, pollset_set->fds[i]);
+ for (i = 0, j = 0; i < pollset_set->fd_count; i++) {
+ if (grpc_fd_is_orphaned(pollset_set->fds[i])) {
+ GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set");
+ } else {
+ grpc_pollset_add_fd(exec_ctx, pollset, pollset_set->fds[i]);
+ pollset_set->fds[j++] = pollset_set->fds[i];
+ }
}
+ pollset_set->fd_count = j;
gpr_mu_unlock(&pollset_set->mu);
}
-void grpc_pollset_set_del_pollset(grpc_pollset_set *pollset_set,
+void grpc_pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *pollset_set,
grpc_pollset *pollset) {
size_t i;
gpr_mu_lock(&pollset_set->mu);
@@ -91,7 +120,48 @@ void grpc_pollset_set_del_pollset(grpc_pollset_set *pollset_set,
gpr_mu_unlock(&pollset_set->mu);
}
-void grpc_pollset_set_add_fd(grpc_pollset_set *pollset_set, grpc_fd *fd) {
+void grpc_pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *bag,
+ grpc_pollset_set *item) {
+ size_t i, j;
+ gpr_mu_lock(&bag->mu);
+ if (bag->pollset_set_count == bag->pollset_set_capacity) {
+ bag->pollset_set_capacity = GPR_MAX(8, 2 * bag->pollset_set_capacity);
+ bag->pollset_sets =
+ gpr_realloc(bag->pollset_sets,
+ bag->pollset_set_capacity * sizeof(*bag->pollset_sets));
+ }
+ bag->pollset_sets[bag->pollset_set_count++] = item;
+ for (i = 0, j = 0; i < bag->fd_count; i++) {
+ if (grpc_fd_is_orphaned(bag->fds[i])) {
+ GRPC_FD_UNREF(bag->fds[i], "pollset_set");
+ } else {
+ grpc_pollset_set_add_fd(exec_ctx, item, bag->fds[i]);
+ bag->fds[j++] = bag->fds[i];
+ }
+ }
+ bag->fd_count = j;
+ gpr_mu_unlock(&bag->mu);
+}
+
+void grpc_pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *bag,
+ grpc_pollset_set *item) {
+ size_t i;
+ gpr_mu_lock(&bag->mu);
+ for (i = 0; i < bag->pollset_set_count; i++) {
+ if (bag->pollset_sets[i] == item) {
+ bag->pollset_set_count--;
+ GPR_SWAP(grpc_pollset_set *, bag->pollset_sets[i],
+ bag->pollset_sets[bag->pollset_set_count]);
+ break;
+ }
+ }
+ gpr_mu_unlock(&bag->mu);
+}
+
+void grpc_pollset_set_add_fd(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *pollset_set, grpc_fd *fd) {
size_t i;
gpr_mu_lock(&pollset_set->mu);
if (pollset_set->fd_count == pollset_set->fd_capacity) {
@@ -102,23 +172,30 @@ void grpc_pollset_set_add_fd(grpc_pollset_set *pollset_set, grpc_fd *fd) {
GRPC_FD_REF(fd, "pollset_set");
pollset_set->fds[pollset_set->fd_count++] = fd;
for (i = 0; i < pollset_set->pollset_count; i++) {
- grpc_pollset_add_fd(pollset_set->pollsets[i], fd);
+ grpc_pollset_add_fd(exec_ctx, pollset_set->pollsets[i], fd);
+ }
+ for (i = 0; i < pollset_set->pollset_set_count; i++) {
+ grpc_pollset_set_add_fd(exec_ctx, pollset_set->pollset_sets[i], fd);
}
gpr_mu_unlock(&pollset_set->mu);
}
-void grpc_pollset_set_del_fd(grpc_pollset_set *pollset_set, grpc_fd *fd) {
+void grpc_pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *pollset_set, grpc_fd *fd) {
size_t i;
gpr_mu_lock(&pollset_set->mu);
for (i = 0; i < pollset_set->fd_count; i++) {
if (pollset_set->fds[i] == fd) {
pollset_set->fd_count--;
GPR_SWAP(grpc_fd *, pollset_set->fds[i],
- pollset_set->fds[pollset_set->pollset_count]);
+ pollset_set->fds[pollset_set->fd_count]);
GRPC_FD_UNREF(fd, "pollset_set");
break;
}
}
+ for (i = 0; i < pollset_set->pollset_set_count; i++) {
+ grpc_pollset_set_del_fd(exec_ctx, pollset_set->pollset_sets[i], fd);
+ }
gpr_mu_unlock(&pollset_set->mu);
}
diff --git a/src/core/iomgr/pollset_set_posix.h b/src/core/iomgr/pollset_set_posix.h
index e88740bde1..80f487718e 100644
--- a/src/core/iomgr/pollset_set_posix.h
+++ b/src/core/iomgr/pollset_set_posix.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,25 +31,15 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_IOMGR_POLLSET_SET_POSIX_H
-#define GRPC_INTERNAL_CORE_IOMGR_POLLSET_SET_POSIX_H
+#ifndef GRPC_CORE_IOMGR_POLLSET_SET_POSIX_H
+#define GRPC_CORE_IOMGR_POLLSET_SET_POSIX_H
#include "src/core/iomgr/fd_posix.h"
-#include "src/core/iomgr/pollset_posix.h"
+#include "src/core/iomgr/pollset_set.h"
-typedef struct grpc_pollset_set {
- gpr_mu mu;
+void grpc_pollset_set_add_fd(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *pollset_set, grpc_fd *fd);
+void grpc_pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *pollset_set, grpc_fd *fd);
- size_t pollset_count;
- size_t pollset_capacity;
- grpc_pollset **pollsets;
-
- size_t fd_count;
- size_t fd_capacity;
- grpc_fd **fds;
-} grpc_pollset_set;
-
-void grpc_pollset_set_add_fd(grpc_pollset_set *pollset_set, grpc_fd *fd);
-void grpc_pollset_set_del_fd(grpc_pollset_set *pollset_set, grpc_fd *fd);
-
-#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_WINDOWS_H */
+#endif /* GRPC_CORE_IOMGR_POLLSET_SET_POSIX_H */
diff --git a/src/core/iomgr/pollset_set_windows.c b/src/core/iomgr/pollset_set_windows.c
index b9c209cd2c..3b8eca28e6 100644
--- a/src/core/iomgr/pollset_set_windows.c
+++ b/src/core/iomgr/pollset_set_windows.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -35,16 +35,26 @@
#ifdef GPR_WINSOCK_SOCKET
-#include "src/core/iomgr/pollset_set.h"
+#include "src/core/iomgr/pollset_set_windows.h"
-void grpc_pollset_set_init(grpc_pollset_set *pollset_set) {}
+grpc_pollset_set* grpc_pollset_set_create(pollset_set) { return NULL; }
-void grpc_pollset_set_destroy(grpc_pollset_set *pollset_set) {}
+void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set) {}
-void grpc_pollset_set_add_pollset(grpc_pollset_set *pollset_set,
- grpc_pollset *pollset) {}
+void grpc_pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set,
+ grpc_pollset* pollset) {}
-void grpc_pollset_set_del_pollset(grpc_pollset_set *pollset_set,
- grpc_pollset *pollset) {}
+void grpc_pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set,
+ grpc_pollset* pollset) {}
+
+void grpc_pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
+ grpc_pollset_set* item) {}
+
+void grpc_pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
+ grpc_pollset_set* item) {}
#endif /* GPR_WINSOCK_SOCKET */
diff --git a/src/core/iomgr/pollset_set_windows.h b/src/core/iomgr/pollset_set_windows.h
index cada0d2b61..0f040fef82 100644
--- a/src/core/iomgr/pollset_set_windows.h
+++ b/src/core/iomgr/pollset_set_windows.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,9 +31,9 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_IOMGR_POLLSET_SET_WINDOWS_H
-#define GRPC_INTERNAL_CORE_IOMGR_POLLSET_SET_WINDOWS_H
+#ifndef GRPC_CORE_IOMGR_POLLSET_SET_WINDOWS_H
+#define GRPC_CORE_IOMGR_POLLSET_SET_WINDOWS_H
-typedef struct grpc_pollset_set { void *unused; } grpc_pollset_set;
+#include "src/core/iomgr/pollset_set.h"
-#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_WINDOWS_H */
+#endif /* GRPC_CORE_IOMGR_POLLSET_SET_WINDOWS_H */
diff --git a/src/core/iomgr/pollset_windows.c b/src/core/iomgr/pollset_windows.c
index 8d6bc79c96..c7f30f435f 100644
--- a/src/core/iomgr/pollset_windows.c
+++ b/src/core/iomgr/pollset_windows.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -35,59 +35,206 @@
#ifdef GPR_WINSOCK_SOCKET
+#include <grpc/support/log.h>
#include <grpc/support/thd.h>
-#include "src/core/iomgr/alarm_internal.h"
#include "src/core/iomgr/iomgr_internal.h"
+#include "src/core/iomgr/iocp_windows.h"
#include "src/core/iomgr/pollset.h"
#include "src/core/iomgr/pollset_windows.h"
+gpr_mu grpc_polling_mu;
+static grpc_pollset_worker *g_active_poller;
+static grpc_pollset_worker g_global_root_worker;
+
+void grpc_pollset_global_init() {
+ gpr_mu_init(&grpc_polling_mu);
+ g_active_poller = NULL;
+ g_global_root_worker.links[GRPC_POLLSET_WORKER_LINK_GLOBAL].next =
+ g_global_root_worker.links[GRPC_POLLSET_WORKER_LINK_GLOBAL].prev =
+ &g_global_root_worker;
+}
+
+void grpc_pollset_global_shutdown() { gpr_mu_destroy(&grpc_polling_mu); }
+
+static void remove_worker(grpc_pollset_worker *worker,
+ grpc_pollset_worker_link_type type) {
+ worker->links[type].prev->links[type].next = worker->links[type].next;
+ worker->links[type].next->links[type].prev = worker->links[type].prev;
+ worker->links[type].next = worker->links[type].prev = worker;
+}
+
+static int has_workers(grpc_pollset_worker *root,
+ grpc_pollset_worker_link_type type) {
+ return root->links[type].next != root;
+}
+
+static grpc_pollset_worker *pop_front_worker(
+ grpc_pollset_worker *root, grpc_pollset_worker_link_type type) {
+ if (has_workers(root, type)) {
+ grpc_pollset_worker *w = root->links[type].next;
+ remove_worker(w, type);
+ return w;
+ } else {
+ return NULL;
+ }
+}
+
+static void push_front_worker(grpc_pollset_worker *root,
+ grpc_pollset_worker_link_type type,
+ grpc_pollset_worker *worker) {
+ worker->links[type].prev = root;
+ worker->links[type].next = worker->links[type].prev->links[type].next;
+ worker->links[type].prev->links[type].next =
+ worker->links[type].next->links[type].prev = worker;
+}
+
+size_t grpc_pollset_size(void) { return sizeof(grpc_pollset); }
+
/* There isn't really any such thing as a pollset under Windows, due to the
nature of the IO completion ports. We're still going to provide a minimal
set of features for the sake of the rest of grpc. But grpc_pollset_work
won't actually do any polling, and return as quickly as possible. */
-void grpc_pollset_init(grpc_pollset *pollset) {
+void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
+ *mu = &grpc_polling_mu;
memset(pollset, 0, sizeof(*pollset));
- gpr_mu_init(&pollset->mu);
- gpr_cv_init(&pollset->cv);
+ pollset->root_worker.links[GRPC_POLLSET_WORKER_LINK_POLLSET].next =
+ pollset->root_worker.links[GRPC_POLLSET_WORKER_LINK_POLLSET].prev =
+ &pollset->root_worker;
}
-void grpc_pollset_shutdown(grpc_pollset *pollset,
- void (*shutdown_done)(void *arg),
- void *shutdown_done_arg) {
- gpr_mu_lock(&pollset->mu);
+void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_closure *closure) {
pollset->shutting_down = 1;
- gpr_cv_broadcast(&pollset->cv);
- gpr_mu_unlock(&pollset->mu);
- shutdown_done(shutdown_done_arg);
+ grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
+ if (!pollset->is_iocp_worker) {
+ grpc_exec_ctx_enqueue(exec_ctx, closure, true, NULL);
+ } else {
+ pollset->on_shutdown = closure;
+ }
}
-void grpc_pollset_destroy(grpc_pollset *pollset) {
- gpr_mu_destroy(&pollset->mu);
- gpr_cv_destroy(&pollset->cv);
+void grpc_pollset_destroy(grpc_pollset *pollset) {}
+
+void grpc_pollset_reset(grpc_pollset *pollset) {
+ GPR_ASSERT(pollset->shutting_down);
+ GPR_ASSERT(
+ !has_workers(&pollset->root_worker, GRPC_POLLSET_WORKER_LINK_POLLSET));
+ pollset->shutting_down = 0;
+ pollset->is_iocp_worker = 0;
+ pollset->kicked_without_pollers = 0;
+ pollset->on_shutdown = NULL;
}
-int grpc_pollset_work(grpc_pollset *pollset, gpr_timespec deadline) {
- gpr_timespec now;
- now = gpr_now();
- if (gpr_time_cmp(now, deadline) > 0) {
- return 0 /* GPR_FALSE */;
- }
- if (grpc_maybe_call_delayed_callbacks(&pollset->mu, 1 /* GPR_TRUE */)) {
- return 1 /* GPR_TRUE */;
+void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_pollset_worker **worker_hdl, gpr_timespec now,
+ gpr_timespec deadline) {
+ grpc_pollset_worker worker;
+ *worker_hdl = &worker;
+
+ int added_worker = 0;
+ worker.links[GRPC_POLLSET_WORKER_LINK_POLLSET].next =
+ worker.links[GRPC_POLLSET_WORKER_LINK_POLLSET].prev =
+ worker.links[GRPC_POLLSET_WORKER_LINK_GLOBAL].next =
+ worker.links[GRPC_POLLSET_WORKER_LINK_GLOBAL].prev = NULL;
+ worker.kicked = 0;
+ worker.pollset = pollset;
+ gpr_cv_init(&worker.cv);
+ if (!pollset->kicked_without_pollers && !pollset->shutting_down) {
+ if (g_active_poller == NULL) {
+ grpc_pollset_worker *next_worker;
+ /* become poller */
+ pollset->is_iocp_worker = 1;
+ g_active_poller = &worker;
+ gpr_mu_unlock(&grpc_polling_mu);
+ grpc_iocp_work(exec_ctx, deadline);
+ grpc_exec_ctx_flush(exec_ctx);
+ gpr_mu_lock(&grpc_polling_mu);
+ pollset->is_iocp_worker = 0;
+ g_active_poller = NULL;
+ /* try to get a worker from this pollsets worker list */
+ next_worker = pop_front_worker(&pollset->root_worker,
+ GRPC_POLLSET_WORKER_LINK_POLLSET);
+ if (next_worker == NULL) {
+ /* try to get a worker from the global list */
+ next_worker = pop_front_worker(&g_global_root_worker,
+ GRPC_POLLSET_WORKER_LINK_GLOBAL);
+ }
+ if (next_worker != NULL) {
+ next_worker->kicked = 1;
+ gpr_cv_signal(&next_worker->cv);
+ }
+
+ if (pollset->shutting_down && pollset->on_shutdown != NULL) {
+ grpc_exec_ctx_enqueue(exec_ctx, pollset->on_shutdown, true, NULL);
+ pollset->on_shutdown = NULL;
+ }
+ goto done;
+ }
+ push_front_worker(&g_global_root_worker, GRPC_POLLSET_WORKER_LINK_GLOBAL,
+ &worker);
+ push_front_worker(&pollset->root_worker, GRPC_POLLSET_WORKER_LINK_POLLSET,
+ &worker);
+ added_worker = 1;
+ while (!worker.kicked) {
+ if (gpr_cv_wait(&worker.cv, &grpc_polling_mu, deadline)) {
+ break;
+ }
+ }
+ } else {
+ pollset->kicked_without_pollers = 0;
}
- if (grpc_alarm_check(&pollset->mu, now, &deadline)) {
- return 1 /* GPR_TRUE */;
+done:
+ if (!grpc_closure_list_empty(exec_ctx->closure_list)) {
+ gpr_mu_unlock(&grpc_polling_mu);
+ grpc_exec_ctx_flush(exec_ctx);
+ gpr_mu_lock(&grpc_polling_mu);
}
- if (!pollset->shutting_down) {
- gpr_cv_wait(&pollset->cv, &pollset->mu, deadline);
+ if (added_worker) {
+ remove_worker(&worker, GRPC_POLLSET_WORKER_LINK_GLOBAL);
+ remove_worker(&worker, GRPC_POLLSET_WORKER_LINK_POLLSET);
}
- return 1 /* GPR_TRUE */;
+ gpr_cv_destroy(&worker.cv);
+ *worker_hdl = NULL;
}
-void grpc_pollset_kick(grpc_pollset *p) {
- gpr_cv_signal(&p->cv);
+void grpc_pollset_kick(grpc_pollset *p, grpc_pollset_worker *specific_worker) {
+ if (specific_worker != NULL) {
+ if (specific_worker == GRPC_POLLSET_KICK_BROADCAST) {
+ for (specific_worker =
+ p->root_worker.links[GRPC_POLLSET_WORKER_LINK_POLLSET].next;
+ specific_worker != &p->root_worker;
+ specific_worker =
+ specific_worker->links[GRPC_POLLSET_WORKER_LINK_POLLSET].next) {
+ specific_worker->kicked = 1;
+ gpr_cv_signal(&specific_worker->cv);
+ }
+ p->kicked_without_pollers = 1;
+ if (p->is_iocp_worker) {
+ grpc_iocp_kick();
+ }
+ } else {
+ if (p->is_iocp_worker && g_active_poller == specific_worker) {
+ grpc_iocp_kick();
+ } else {
+ specific_worker->kicked = 1;
+ gpr_cv_signal(&specific_worker->cv);
+ }
+ }
+ } else {
+ specific_worker =
+ pop_front_worker(&p->root_worker, GRPC_POLLSET_WORKER_LINK_POLLSET);
+ if (specific_worker != NULL) {
+ grpc_pollset_kick(p, specific_worker);
+ } else if (p->is_iocp_worker) {
+ grpc_iocp_kick();
+ } else {
+ p->kicked_without_pollers = 1;
+ }
+ }
}
+void grpc_kick_poller(void) { grpc_iocp_kick(); }
+
#endif /* GPR_WINSOCK_SOCKET */
diff --git a/src/core/iomgr/pollset_windows.h b/src/core/iomgr/pollset_windows.h
index 57a2907926..f1d1585922 100644
--- a/src/core/iomgr/pollset_windows.h
+++ b/src/core/iomgr/pollset_windows.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,24 +31,45 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_IOMGR_POLLSET_WINDOWS_H
-#define GRPC_INTERNAL_CORE_IOMGR_POLLSET_WINDOWS_H
+#ifndef GRPC_CORE_IOMGR_POLLSET_WINDOWS_H
+#define GRPC_CORE_IOMGR_POLLSET_WINDOWS_H
-#include <windows.h>
#include <grpc/support/sync.h>
#include "src/core/iomgr/socket_windows.h"
/* There isn't really any such thing as a pollset under Windows, due to the
nature of the IO completion ports. A Windows "pollset" is merely a mutex
- and a condition variable, used to synchronize with the IOCP. */
+ used to synchronize with the IOCP, and workers are condition variables
+ used to block threads until work is ready. */
-typedef struct grpc_pollset {
- gpr_mu mu;
+typedef enum {
+ GRPC_POLLSET_WORKER_LINK_POLLSET = 0,
+ GRPC_POLLSET_WORKER_LINK_GLOBAL,
+ GRPC_POLLSET_WORKER_LINK_TYPES
+} grpc_pollset_worker_link_type;
+
+typedef struct grpc_pollset_worker_link {
+ struct grpc_pollset_worker *next;
+ struct grpc_pollset_worker *prev;
+} grpc_pollset_worker_link;
+
+struct grpc_pollset;
+typedef struct grpc_pollset grpc_pollset;
+
+typedef struct grpc_pollset_worker {
gpr_cv cv;
- int shutting_down;
-} grpc_pollset;
+ int kicked;
+ struct grpc_pollset *pollset;
+ grpc_pollset_worker_link links[GRPC_POLLSET_WORKER_LINK_TYPES];
+} grpc_pollset_worker;
-#define GRPC_POLLSET_MU(pollset) (&(pollset)->mu)
+struct grpc_pollset {
+ int shutting_down;
+ int kicked_without_pollers;
+ int is_iocp_worker;
+ grpc_pollset_worker root_worker;
+ grpc_closure *on_shutdown;
+};
-#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_WINDOWS_H */
+#endif /* GRPC_CORE_IOMGR_POLLSET_WINDOWS_H */
diff --git a/src/core/iomgr/resolve_address.h b/src/core/iomgr/resolve_address.h
index 8f1d7a22bb..aa0d7d194b 100644
--- a/src/core/iomgr/resolve_address.h
+++ b/src/core/iomgr/resolve_address.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,16 +31,18 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_IOMGR_RESOLVE_ADDRESS_H
-#define GRPC_INTERNAL_CORE_IOMGR_RESOLVE_ADDRESS_H
+#ifndef GRPC_CORE_IOMGR_RESOLVE_ADDRESS_H
+#define GRPC_CORE_IOMGR_RESOLVE_ADDRESS_H
#include <stddef.h>
+#include "src/core/iomgr/exec_ctx.h"
+#include "src/core/iomgr/iomgr.h"
#define GRPC_MAX_SOCKADDR_SIZE 128
typedef struct {
char addr[GRPC_MAX_SOCKADDR_SIZE];
- int len;
+ size_t len;
} grpc_resolved_address;
typedef struct {
@@ -52,7 +54,8 @@ typedef struct {
On success: addresses is the result, and the callee must call
grpc_resolved_addresses_destroy when it's done with them
On failure: addresses is NULL */
-typedef void (*grpc_resolve_cb)(void *arg, grpc_resolved_addresses *addresses);
+typedef void (*grpc_resolve_cb)(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_resolved_addresses *addresses);
/* Asynchronously resolve addr. Use default_port if a port isn't designated
in addr, otherwise use the port in addr. */
/* TODO(ctiller): add a timeout here */
@@ -63,7 +66,7 @@ void grpc_resolved_addresses_destroy(grpc_resolved_addresses *addresses);
/* Resolve addr in a blocking fashion. Returns NULL on failure. On success,
result must be freed with grpc_resolved_addresses_destroy. */
-grpc_resolved_addresses *grpc_blocking_resolve_address(
- const char *addr, const char *default_port);
+extern grpc_resolved_addresses *(*grpc_blocking_resolve_address)(
+ const char *name, const char *default_port);
-#endif /* GRPC_INTERNAL_CORE_IOMGR_RESOLVE_ADDRESS_H */
+#endif /* GRPC_CORE_IOMGR_RESOLVE_ADDRESS_H */
diff --git a/src/core/iomgr/resolve_address_posix.c b/src/core/iomgr/resolve_address_posix.c
index dbf884c769..a6c9893f23 100644
--- a/src/core/iomgr/resolve_address_posix.c
+++ b/src/core/iomgr/resolve_address_posix.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -34,32 +34,35 @@
#include <grpc/support/port_platform.h>
#ifdef GPR_POSIX_SOCKET
-#include "src/core/iomgr/sockaddr.h"
#include "src/core/iomgr/resolve_address.h"
+#include "src/core/iomgr/sockaddr.h"
+#include <string.h>
#include <sys/types.h>
#include <sys/un.h>
-#include <string.h>
-#include "src/core/iomgr/iomgr_internal.h"
-#include "src/core/iomgr/sockaddr_utils.h"
-#include "src/core/support/string.h"
#include <grpc/support/alloc.h>
#include <grpc/support/host_port.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/thd.h>
#include <grpc/support/time.h>
+#include <grpc/support/useful.h>
+#include "src/core/iomgr/executor.h"
+#include "src/core/iomgr/iomgr_internal.h"
+#include "src/core/iomgr/sockaddr_utils.h"
+#include "src/core/support/block_annotate.h"
+#include "src/core/support/string.h"
typedef struct {
char *name;
char *default_port;
grpc_resolve_cb cb;
+ grpc_closure request_closure;
void *arg;
- grpc_iomgr_object iomgr_object;
} request;
-grpc_resolved_addresses *grpc_blocking_resolve_address(
+static grpc_resolved_addresses *blocking_resolve_address_impl(
const char *name, const char *default_port) {
struct addrinfo hints;
struct addrinfo *result = NULL, *resp;
@@ -102,17 +105,18 @@ grpc_resolved_addresses *grpc_blocking_resolve_address(
hints.ai_socktype = SOCK_STREAM; /* stream socket */
hints.ai_flags = AI_PASSIVE; /* for wildcard IP address */
+ GRPC_SCHEDULING_START_BLOCKING_REGION;
s = getaddrinfo(host, port, &hints, &result);
+ GRPC_SCHEDULING_END_BLOCKING_REGION;
+
if (s != 0) {
/* Retry if well-known service name is recognized */
- char *svc[][2] = {
- {"http", "80"},
- {"https", "443"}
- };
- int i;
- for (i = 0; i < (int)(sizeof(svc) / sizeof(svc[0])); i++) {
+ char *svc[][2] = {{"http", "80"}, {"https", "443"}};
+ for (i = 0; i < GPR_ARRAY_SIZE(svc); i++) {
if (strcmp(port, svc[i][0]) == 0) {
+ GRPC_SCHEDULING_START_BLOCKING_REGION;
s = getaddrinfo(host, svc[i][1], &hints, &result);
+ GRPC_SCHEDULING_END_BLOCKING_REGION;
break;
}
}
@@ -146,8 +150,12 @@ done:
return addrs;
}
-/* Thread function to asynch-ify grpc_blocking_resolve_address */
-static void do_request(void *rp) {
+grpc_resolved_addresses *(*grpc_blocking_resolve_address)(
+ const char *name, const char *default_port) = blocking_resolve_address_impl;
+
+/* Callback to be passed to grpc_executor to asynch-ify
+ * grpc_blocking_resolve_address */
+static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp, bool success) {
request *r = rp;
grpc_resolved_addresses *resolved =
grpc_blocking_resolve_address(r->name, r->default_port);
@@ -155,8 +163,7 @@ static void do_request(void *rp) {
grpc_resolve_cb cb = r->cb;
gpr_free(r->name);
gpr_free(r->default_port);
- cb(arg, resolved);
- grpc_iomgr_unregister_object(&r->iomgr_object);
+ cb(exec_ctx, arg, resolved);
gpr_free(r);
}
@@ -168,17 +175,12 @@ void grpc_resolved_addresses_destroy(grpc_resolved_addresses *addrs) {
void grpc_resolve_address(const char *name, const char *default_port,
grpc_resolve_cb cb, void *arg) {
request *r = gpr_malloc(sizeof(request));
- gpr_thd_id id;
- char *tmp;
- gpr_asprintf(&tmp, "resolve_address:name='%s':default_port='%s'", name,
- default_port);
- grpc_iomgr_register_object(&r->iomgr_object, tmp);
- gpr_free(tmp);
+ grpc_closure_init(&r->request_closure, do_request_thread, r);
r->name = gpr_strdup(name);
r->default_port = gpr_strdup(default_port);
r->cb = cb;
r->arg = arg;
- gpr_thd_new(&id, do_request, r, NULL);
+ grpc_executor_enqueue(&r->request_closure, 1);
}
#endif
diff --git a/src/core/iomgr/resolve_address_windows.c b/src/core/iomgr/resolve_address_windows.c
index fb5fd0d4f6..472e797163 100644
--- a/src/core/iomgr/resolve_address_windows.c
+++ b/src/core/iomgr/resolve_address_windows.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -34,31 +34,34 @@
#include <grpc/support/port_platform.h>
#ifdef GPR_WINSOCK_SOCKET
-#include "src/core/iomgr/sockaddr.h"
#include "src/core/iomgr/resolve_address.h"
+#include "src/core/iomgr/sockaddr.h"
-#include <sys/types.h>
#include <string.h>
+#include <sys/types.h>
-#include "src/core/iomgr/iomgr_internal.h"
-#include "src/core/iomgr/sockaddr_utils.h"
-#include "src/core/support/string.h"
#include <grpc/support/alloc.h>
#include <grpc/support/host_port.h>
#include <grpc/support/log.h>
+#include <grpc/support/log_win32.h>
#include <grpc/support/string_util.h>
#include <grpc/support/thd.h>
#include <grpc/support/time.h>
+#include "src/core/iomgr/executor.h"
+#include "src/core/iomgr/iomgr_internal.h"
+#include "src/core/iomgr/sockaddr_utils.h"
+#include "src/core/support/block_annotate.h"
+#include "src/core/support/string.h"
typedef struct {
char *name;
char *default_port;
grpc_resolve_cb cb;
+ grpc_closure request_closure;
void *arg;
- grpc_iomgr_object iomgr_object;
} request;
-grpc_resolved_addresses *grpc_blocking_resolve_address(
+static grpc_resolved_addresses *blocking_resolve_address_impl(
const char *name, const char *default_port) {
struct addrinfo hints;
struct addrinfo *result = NULL, *resp;
@@ -88,9 +91,13 @@ grpc_resolved_addresses *grpc_blocking_resolve_address(
hints.ai_socktype = SOCK_STREAM; /* stream socket */
hints.ai_flags = AI_PASSIVE; /* for wildcard IP address */
+ GRPC_SCHEDULING_START_BLOCKING_REGION;
s = getaddrinfo(host, port, &hints, &result);
+ GRPC_SCHEDULING_END_BLOCKING_REGION;
if (s != 0) {
- gpr_log(GPR_ERROR, "getaddrinfo: %s", gai_strerror(s));
+ char *error_message = gpr_format_message(s);
+ gpr_log(GPR_ERROR, "getaddrinfo: %s", error_message);
+ gpr_free(error_message);
goto done;
}
@@ -126,8 +133,12 @@ done:
return addrs;
}
-/* Thread function to asynch-ify grpc_blocking_resolve_address */
-static void do_request(void *rp) {
+grpc_resolved_addresses *(*grpc_blocking_resolve_address)(
+ const char *name, const char *default_port) = blocking_resolve_address_impl;
+
+/* Callback to be passed to grpc_executor to asynch-ify
+ * grpc_blocking_resolve_address */
+static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp, bool success) {
request *r = rp;
grpc_resolved_addresses *resolved =
grpc_blocking_resolve_address(r->name, r->default_port);
@@ -135,9 +146,8 @@ static void do_request(void *rp) {
grpc_resolve_cb cb = r->cb;
gpr_free(r->name);
gpr_free(r->default_port);
- grpc_iomgr_unregister_object(&r->iomgr_object);
+ cb(exec_ctx, arg, resolved);
gpr_free(r);
- cb(arg, resolved);
}
void grpc_resolved_addresses_destroy(grpc_resolved_addresses *addrs) {
@@ -148,16 +158,12 @@ void grpc_resolved_addresses_destroy(grpc_resolved_addresses *addrs) {
void grpc_resolve_address(const char *name, const char *default_port,
grpc_resolve_cb cb, void *arg) {
request *r = gpr_malloc(sizeof(request));
- gpr_thd_id id;
- const char *label;
- gpr_asprintf(&label, "resolve:%s", name);
- grpc_iomgr_register_object(&r->iomgr_object, label);
- gpr_free(label);
+ grpc_closure_init(&r->request_closure, do_request_thread, r);
r->name = gpr_strdup(name);
r->default_port = gpr_strdup(default_port);
r->cb = cb;
r->arg = arg;
- gpr_thd_new(&id, do_request, r, NULL);
+ grpc_executor_enqueue(&r->request_closure, 1);
}
#endif
diff --git a/src/core/iomgr/sockaddr.h b/src/core/iomgr/sockaddr.h
index 7528db73b8..68241bdd55 100644
--- a/src/core/iomgr/sockaddr.h
+++ b/src/core/iomgr/sockaddr.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_IOMGR_SOCKADDR_H
-#define GRPC_INTERNAL_CORE_IOMGR_SOCKADDR_H
+#ifndef GRPC_CORE_IOMGR_SOCKADDR_H
+#define GRPC_CORE_IOMGR_SOCKADDR_H
#include <grpc/support/port_platform.h>
@@ -44,4 +44,4 @@
#include "src/core/iomgr/sockaddr_posix.h"
#endif
-#endif /* GRPC_INTERNAL_CORE_IOMGR_SOCKADDR_H */
+#endif /* GRPC_CORE_IOMGR_SOCKADDR_H */
diff --git a/src/core/iomgr/sockaddr_posix.h b/src/core/iomgr/sockaddr_posix.h
index 2a3d932f70..e4425ed735 100644
--- a/src/core/iomgr/sockaddr_posix.h
+++ b/src/core/iomgr/sockaddr_posix.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_IOMGR_SOCKADDR_POSIX_H
-#define GRPC_INTERNAL_CORE_IOMGR_SOCKADDR_POSIX_H
+#ifndef GRPC_CORE_IOMGR_SOCKADDR_POSIX_H
+#define GRPC_CORE_IOMGR_SOCKADDR_POSIX_H
#include <arpa/inet.h>
#include <sys/socket.h>
@@ -41,4 +41,4 @@
#include <netdb.h>
#include <unistd.h>
-#endif /* GRPC_INTERNAL_CORE_IOMGR_SOCKADDR_POSIX_H */
+#endif /* GRPC_CORE_IOMGR_SOCKADDR_POSIX_H */
diff --git a/src/core/iomgr/sockaddr_utils.c b/src/core/iomgr/sockaddr_utils.c
index e91b94f8c8..61006d7a7a 100644
--- a/src/core/iomgr/sockaddr_utils.c
+++ b/src/core/iomgr/sockaddr_utils.c
@@ -36,14 +36,20 @@
#include <errno.h>
#include <string.h>
-#include "src/core/support/string.h"
+#ifdef GPR_POSIX_SOCKET
+#include <sys/un.h>
+#endif
+
+#include <grpc/support/alloc.h>
#include <grpc/support/host_port.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include <grpc/support/string_util.h>
-static const gpr_uint8 kV4MappedPrefix[] = {0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0xff, 0xff};
+#include "src/core/support/string.h"
+
+static const uint8_t kV4MappedPrefix[] = {0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0xff, 0xff};
int grpc_sockaddr_is_v4mapped(const struct sockaddr *addr,
struct sockaddr_in *addr4_out) {
@@ -117,15 +123,17 @@ void grpc_sockaddr_make_wildcards(int port, struct sockaddr_in *wild4_out,
}
void grpc_sockaddr_make_wildcard4(int port, struct sockaddr_in *wild_out) {
+ GPR_ASSERT(port >= 0 && port < 65536);
memset(wild_out, 0, sizeof(*wild_out));
wild_out->sin_family = AF_INET;
- wild_out->sin_port = htons(port);
+ wild_out->sin_port = htons((uint16_t)port);
}
void grpc_sockaddr_make_wildcard6(int port, struct sockaddr_in6 *wild_out) {
+ GPR_ASSERT(port >= 0 && port < 65536);
memset(wild_out, 0, sizeof(*wild_out));
wild_out->sin6_family = AF_INET6;
- wild_out->sin6_port = htons(port);
+ wild_out->sin6_port = htons((uint16_t)port);
}
int grpc_sockaddr_to_string(char **out, const struct sockaddr *addr,
@@ -150,8 +158,10 @@ int grpc_sockaddr_to_string(char **out, const struct sockaddr *addr,
ip = &addr6->sin6_addr;
port = ntohs(addr6->sin6_port);
}
+ /* Windows inet_ntop wants a mutable ip pointer */
if (ip != NULL &&
- inet_ntop(addr->sa_family, ip, ntop_buf, sizeof(ntop_buf)) != NULL) {
+ inet_ntop(addr->sa_family, (void *)ip, ntop_buf, sizeof(ntop_buf)) !=
+ NULL) {
ret = gpr_join_host_port(out, ntop_buf, port);
} else {
ret = gpr_asprintf(out, "(sockaddr family=%d)", addr->sa_family);
@@ -161,6 +171,36 @@ int grpc_sockaddr_to_string(char **out, const struct sockaddr *addr,
return ret;
}
+char *grpc_sockaddr_to_uri(const struct sockaddr *addr) {
+ char *temp;
+ char *result;
+ struct sockaddr_in addr_normalized;
+
+ if (grpc_sockaddr_is_v4mapped(addr, &addr_normalized)) {
+ addr = (const struct sockaddr *)&addr_normalized;
+ }
+
+ switch (addr->sa_family) {
+ case AF_INET:
+ grpc_sockaddr_to_string(&temp, addr, 0);
+ gpr_asprintf(&result, "ipv4:%s", temp);
+ gpr_free(temp);
+ return result;
+ case AF_INET6:
+ grpc_sockaddr_to_string(&temp, addr, 0);
+ gpr_asprintf(&result, "ipv6:%s", temp);
+ gpr_free(temp);
+ return result;
+#ifdef GPR_POSIX_SOCKET
+ case AF_UNIX:
+ gpr_asprintf(&result, "unix:%s", ((struct sockaddr_un *)addr)->sun_path);
+ return result;
+#endif
+ }
+
+ return NULL;
+}
+
int grpc_sockaddr_get_port(const struct sockaddr *addr) {
switch (addr->sa_family) {
case AF_INET:
@@ -170,7 +210,8 @@ int grpc_sockaddr_get_port(const struct sockaddr *addr) {
case AF_UNIX:
return 1;
default:
- gpr_log(GPR_ERROR, "Unknown socket family %d in grpc_sockaddr_get_port", addr->sa_family);
+ gpr_log(GPR_ERROR, "Unknown socket family %d in grpc_sockaddr_get_port",
+ addr->sa_family);
return 0;
}
}
@@ -178,13 +219,16 @@ int grpc_sockaddr_get_port(const struct sockaddr *addr) {
int grpc_sockaddr_set_port(const struct sockaddr *addr, int port) {
switch (addr->sa_family) {
case AF_INET:
- ((struct sockaddr_in *)addr)->sin_port = htons(port);
+ GPR_ASSERT(port >= 0 && port < 65536);
+ ((struct sockaddr_in *)addr)->sin_port = htons((uint16_t)port);
return 1;
case AF_INET6:
- ((struct sockaddr_in6 *)addr)->sin6_port = htons(port);
+ GPR_ASSERT(port >= 0 && port < 65536);
+ ((struct sockaddr_in6 *)addr)->sin6_port = htons((uint16_t)port);
return 1;
default:
- gpr_log(GPR_ERROR, "Unknown socket family %d in grpc_sockaddr_set_port", addr->sa_family);
+ gpr_log(GPR_ERROR, "Unknown socket family %d in grpc_sockaddr_set_port",
+ addr->sa_family);
return 0;
}
}
diff --git a/src/core/iomgr/sockaddr_utils.h b/src/core/iomgr/sockaddr_utils.h
index bdfb83479b..43dc7a45ec 100644
--- a/src/core/iomgr/sockaddr_utils.h
+++ b/src/core/iomgr/sockaddr_utils.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_IOMGR_SOCKADDR_UTILS_H
-#define GRPC_INTERNAL_CORE_IOMGR_SOCKADDR_UTILS_H
+#ifndef GRPC_CORE_IOMGR_SOCKADDR_UTILS_H
+#define GRPC_CORE_IOMGR_SOCKADDR_UTILS_H
#include "src/core/iomgr/sockaddr.h"
@@ -84,4 +84,6 @@ int grpc_sockaddr_set_port(const struct sockaddr *addr, int port);
int grpc_sockaddr_to_string(char **out, const struct sockaddr *addr,
int normalize);
-#endif /* GRPC_INTERNAL_CORE_IOMGR_SOCKADDR_UTILS_H */
+char *grpc_sockaddr_to_uri(const struct sockaddr *addr);
+
+#endif /* GRPC_CORE_IOMGR_SOCKADDR_UTILS_H */
diff --git a/src/core/iomgr/sockaddr_win32.h b/src/core/iomgr/sockaddr_win32.h
index c0385ea614..7acb8f7f57 100644
--- a/src/core/iomgr/sockaddr_win32.h
+++ b/src/core/iomgr/sockaddr_win32.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,16 +31,11 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_IOMGR_SOCKADDR_WIN32_H
-#define GRPC_INTERNAL_CORE_IOMGR_SOCKADDR_WIN32_H
+#ifndef GRPC_CORE_IOMGR_SOCKADDR_WIN32_H
+#define GRPC_CORE_IOMGR_SOCKADDR_WIN32_H
-#include <ws2tcpip.h>
#include <winsock2.h>
+#include <ws2tcpip.h>
#include <mswsock.h>
-#ifdef __MINGW32__
-/* mingw seems to be missing that definition. */
-const char *inet_ntop(int af, const void *src, char *dst, socklen_t size);
-#endif
-
-#endif /* GRPC_INTERNAL_CORE_IOMGR_SOCKADDR_WIN32_H */
+#endif /* GRPC_CORE_IOMGR_SOCKADDR_WIN32_H */
diff --git a/src/core/iomgr/socket_utils_posix.h b/src/core/iomgr/socket_utils_posix.h
index d2a315b462..b01e28b6f2 100644
--- a/src/core/iomgr/socket_utils_posix.h
+++ b/src/core/iomgr/socket_utils_posix.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_IOMGR_SOCKET_UTILS_POSIX_H
-#define GRPC_INTERNAL_CORE_IOMGR_SOCKET_UTILS_POSIX_H
+#ifndef GRPC_CORE_IOMGR_SOCKET_UTILS_POSIX_H
+#define GRPC_CORE_IOMGR_SOCKET_UTILS_POSIX_H
#include <unistd.h>
#include <sys/socket.h>
@@ -110,4 +110,4 @@ extern int grpc_forbid_dualstack_sockets_for_testing;
int grpc_create_dualstack_socket(const struct sockaddr *addr, int type,
int protocol, grpc_dualstack_mode *dsmode);
-#endif /* GRPC_INTERNAL_CORE_IOMGR_SOCKET_UTILS_POSIX_H */
+#endif /* GRPC_CORE_IOMGR_SOCKET_UTILS_POSIX_H */
diff --git a/src/core/iomgr/socket_windows.c b/src/core/iomgr/socket_windows.c
index fbf3fdc949..fafb7b6622 100644
--- a/src/core/iomgr/socket_windows.c
+++ b/src/core/iomgr/socket_windows.c
@@ -35,8 +35,13 @@
#ifdef GPR_WINSOCK_SOCKET
+#include <winsock2.h>
+#include <mswsock.h>
+
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
+#include <grpc/support/log_win32.h>
+#include <grpc/support/string_util.h>
#include "src/core/iomgr/iocp_windows.h"
#include "src/core/iomgr/iomgr_internal.h"
@@ -45,11 +50,14 @@
#include "src/core/iomgr/socket_windows.h"
grpc_winsocket *grpc_winsocket_create(SOCKET socket, const char *name) {
+ char *final_name;
grpc_winsocket *r = gpr_malloc(sizeof(grpc_winsocket));
memset(r, 0, sizeof(grpc_winsocket));
r->socket = socket;
gpr_mu_init(&r->state_mu);
- grpc_iomgr_register_object(&r->iomgr_object, name);
+ gpr_asprintf(&final_name, "%s:socket=0x%p", name, r);
+ grpc_iomgr_register_object(&r->iomgr_object, final_name);
+ gpr_free(final_name);
grpc_iocp_add_socket(r);
return r;
}
@@ -58,45 +66,33 @@ grpc_winsocket *grpc_winsocket_create(SOCKET socket, const char *name) {
operations to abort them. We need to do that this way because of the
various callsites of that function, which happens to be in various
mutex hold states, and that'd be unsafe to call them directly. */
-int grpc_winsocket_shutdown(grpc_winsocket *socket) {
- int callbacks_set = 0;
- gpr_mu_lock(&socket->state_mu);
- if (socket->read_info.cb) {
- callbacks_set++;
- grpc_iomgr_closure_init(&socket->shutdown_closure, socket->read_info.cb,
- socket->read_info.opaque);
- grpc_iomgr_add_delayed_callback(&socket->shutdown_closure, 0);
- }
- if (socket->write_info.cb) {
- callbacks_set++;
- grpc_iomgr_closure_init(&socket->shutdown_closure, socket->write_info.cb,
- socket->write_info.opaque);
- grpc_iomgr_add_delayed_callback(&socket->shutdown_closure, 0);
- }
- gpr_mu_unlock(&socket->state_mu);
- return callbacks_set;
-}
+void grpc_winsocket_shutdown(grpc_winsocket *winsocket) {
+ /* Grab the function pointer for DisconnectEx for that specific socket.
+ It may change depending on the interface. */
+ int status;
+ GUID guid = WSAID_DISCONNECTEX;
+ LPFN_DISCONNECTEX DisconnectEx;
+ DWORD ioctl_num_bytes;
-/* Abandons a socket. Either we're going to queue it up for garbage collecting
- from the IO Completion Port thread, or destroy it immediately. Note that this
- mechanisms assumes that we're either always waiting for an operation, or we
- explicitly know that we don't. If there is a future case where we can have
- an "idle" socket which is neither trying to read or write, we'd start leaking
- both memory and sockets. */
-void grpc_winsocket_orphan(grpc_winsocket *winsocket) {
- SOCKET socket = winsocket->socket;
- grpc_iomgr_unregister_object(&winsocket->iomgr_object);
- if (winsocket->read_info.outstanding || winsocket->write_info.outstanding) {
- grpc_iocp_socket_orphan(winsocket);
+ status = WSAIoctl(winsocket->socket, SIO_GET_EXTENSION_FUNCTION_POINTER,
+ &guid, sizeof(guid), &DisconnectEx, sizeof(DisconnectEx),
+ &ioctl_num_bytes, NULL, NULL);
+
+ if (status == 0) {
+ DisconnectEx(winsocket->socket, NULL, 0, 0);
} else {
- grpc_winsocket_destroy(winsocket);
+ char *utf8_message = gpr_format_message(WSAGetLastError());
+ gpr_log(GPR_ERROR, "Unable to retrieve DisconnectEx pointer : %s",
+ utf8_message);
+ gpr_free(utf8_message);
}
- closesocket(socket);
+ closesocket(winsocket->socket);
}
void grpc_winsocket_destroy(grpc_winsocket *winsocket) {
+ grpc_iomgr_unregister_object(&winsocket->iomgr_object);
gpr_mu_destroy(&winsocket->state_mu);
gpr_free(winsocket);
}
-#endif /* GPR_WINSOCK_SOCKET */
+#endif /* GPR_WINSOCK_SOCKET */
diff --git a/src/core/iomgr/socket_windows.h b/src/core/iomgr/socket_windows.h
index 7080919af0..8e50e7a953 100644
--- a/src/core/iomgr/socket_windows.h
+++ b/src/core/iomgr/socket_windows.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,15 +31,17 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_IOMGR_SOCKET_WINDOWS_H
-#define GRPC_INTERNAL_CORE_IOMGR_SOCKET_WINDOWS_H
+#ifndef GRPC_CORE_IOMGR_SOCKET_WINDOWS_H
+#define GRPC_CORE_IOMGR_SOCKET_WINDOWS_H
-#include <windows.h>
+#include <grpc/support/port_platform.h>
+#include <winsock2.h>
#include <grpc/support/sync.h>
#include <grpc/support/atm.h>
#include "src/core/iomgr/iomgr_internal.h"
+#include "src/core/iomgr/exec_ctx.h"
/* This holds the data for an outstanding read or write on a socket.
The mutex to protect the concurrent access to that data is the one
@@ -53,8 +55,7 @@ typedef struct grpc_winsocket_callback_info {
OVERLAPPED overlapped;
/* The callback information for the pending operation. May be empty if the
caller hasn't registered a callback yet. */
- void(*cb)(void *opaque, int success);
- void *opaque;
+ grpc_closure *closure;
/* A boolean to describe if the IO Completion Port got a notification for
that operation. This will happen if the operation completed before the
called had time to register a callback. We could avoid that behavior
@@ -67,8 +68,6 @@ typedef struct grpc_winsocket_callback_info {
/* The results of the overlapped operation. */
DWORD bytes_transfered;
int wsa_error;
- /* A boolean indicating that we started an operation. */
- int outstanding;
} grpc_winsocket_callback_info;
/* This is a wrapper to a Windows socket. A socket can have one outstanding
@@ -91,12 +90,8 @@ typedef struct grpc_winsocket {
/* You can't add the same socket twice to the same IO Completion Port.
This prevents that. */
int added_to_iocp;
- /* A boolean to indicate that the caller has abandoned that socket, but
- there is a pending operation that the IO Completion Port will have to
- wait for. The socket will be collected at that time. */
- int orphan;
- grpc_iomgr_closure shutdown_closure;
+ grpc_closure shutdown_closure;
/* A label for iomgr to track outstanding objects */
grpc_iomgr_object iomgr_object;
@@ -107,14 +102,10 @@ typedef struct grpc_winsocket {
grpc_winsocket *grpc_winsocket_create(SOCKET socket, const char *name);
/* Initiate an asynchronous shutdown of the socket. Will call off any pending
- operation to cancel them. Returns the number of callbacks that got setup. */
-int grpc_winsocket_shutdown(grpc_winsocket *socket);
+ operation to cancel them. */
+void grpc_winsocket_shutdown(grpc_winsocket *socket);
-/* Abandon a socket. */
-void grpc_winsocket_orphan(grpc_winsocket *socket);
-
-/* Destroy a socket. Should only be called by the IO Completion Port thread,
- or by grpc_winsocket_orphan if there's no pending operation. */
+/* Destroy a socket. Should only be called if there's no pending operation. */
void grpc_winsocket_destroy(grpc_winsocket *socket);
-#endif /* GRPC_INTERNAL_CORE_IOMGR_SOCKET_WINDOWS_H */
+#endif /* GRPC_CORE_IOMGR_SOCKET_WINDOWS_H */
diff --git a/src/core/iomgr/tcp_client.h b/src/core/iomgr/tcp_client.h
index 0fa08b52b0..2e29833b70 100644
--- a/src/core/iomgr/tcp_client.h
+++ b/src/core/iomgr/tcp_client.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_IOMGR_TCP_CLIENT_H
-#define GRPC_INTERNAL_CORE_IOMGR_TCP_CLIENT_H
+#ifndef GRPC_CORE_IOMGR_TCP_CLIENT_H
+#define GRPC_CORE_IOMGR_TCP_CLIENT_H
#include "src/core/iomgr/endpoint.h"
#include "src/core/iomgr/pollset_set.h"
@@ -41,12 +41,13 @@
/* Asynchronously connect to an address (specified as (addr, len)), and call
cb with arg and the completed connection when done (or call cb with arg and
- NULL on failure).
+ NULL on failure).
interested_parties points to a set of pollsets that would be interested
in this connection being established (in order to continue their work) */
-void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *tcp),
- void *arg, grpc_pollset_set *interested_parties,
- const struct sockaddr *addr, int addr_len,
+void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *on_connect,
+ grpc_endpoint **endpoint,
+ grpc_pollset_set *interested_parties,
+ const struct sockaddr *addr, size_t addr_len,
gpr_timespec deadline);
-#endif /* GRPC_INTERNAL_CORE_IOMGR_TCP_CLIENT_H */
+#endif /* GRPC_CORE_IOMGR_TCP_CLIENT_H */
diff --git a/src/core/iomgr/tcp_client_posix.c b/src/core/iomgr/tcp_client_posix.c
index bbf7711588..15727856ab 100644
--- a/src/core/iomgr/tcp_client_posix.c
+++ b/src/core/iomgr/tcp_client_posix.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -42,27 +42,33 @@
#include <string.h>
#include <unistd.h>
-#include "src/core/iomgr/alarm.h"
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/time.h>
+
#include "src/core/iomgr/iomgr_posix.h"
#include "src/core/iomgr/pollset_posix.h"
+#include "src/core/iomgr/pollset_set_posix.h"
#include "src/core/iomgr/sockaddr_utils.h"
#include "src/core/iomgr/socket_utils_posix.h"
#include "src/core/iomgr/tcp_posix.h"
+#include "src/core/iomgr/timer.h"
#include "src/core/support/string.h"
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/string_util.h>
-#include <grpc/support/time.h>
+
+extern int grpc_tcp_trace;
typedef struct {
- void (*cb)(void *arg, grpc_endpoint *tcp);
- void *cb_arg;
gpr_mu mu;
grpc_fd *fd;
gpr_timespec deadline;
- grpc_alarm alarm;
+ grpc_timer alarm;
int refs;
- grpc_iomgr_closure write_closure;
+ grpc_closure write_closure;
+ grpc_pollset_set *interested_parties;
+ char *addr_str;
+ grpc_endpoint **ep;
+ grpc_closure *closure;
} async_connect;
static int prepare_socket(const struct sockaddr *addr, int fd) {
@@ -87,39 +93,58 @@ error:
return 0;
}
-static void on_alarm(void *acp, int success) {
+static void tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp, bool success) {
int done;
async_connect *ac = acp;
+ if (grpc_tcp_trace) {
+ gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_alarm: success=%d", ac->addr_str,
+ success);
+ }
gpr_mu_lock(&ac->mu);
- if (ac->fd != NULL && success) {
- grpc_fd_shutdown(ac->fd);
+ if (ac->fd != NULL) {
+ grpc_fd_shutdown(exec_ctx, ac->fd);
}
done = (--ac->refs == 0);
gpr_mu_unlock(&ac->mu);
if (done) {
gpr_mu_destroy(&ac->mu);
+ gpr_free(ac->addr_str);
gpr_free(ac);
}
}
-static void on_writable(void *acp, int success) {
+static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, bool success) {
async_connect *ac = acp;
int so_error = 0;
socklen_t so_error_size;
int err;
- int fd = ac->fd->fd;
int done;
- grpc_endpoint *ep = NULL;
- void (*cb)(void *arg, grpc_endpoint *tcp) = ac->cb;
- void *cb_arg = ac->cb_arg;
+ grpc_endpoint **ep = ac->ep;
+ grpc_closure *closure = ac->closure;
+ grpc_fd *fd;
+ if (grpc_tcp_trace) {
+ gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_writable: success=%d",
+ ac->addr_str, success);
+ }
+
+ gpr_mu_lock(&ac->mu);
+ GPR_ASSERT(ac->fd);
+ fd = ac->fd;
+ ac->fd = NULL;
+ gpr_mu_unlock(&ac->mu);
+
+ grpc_timer_cancel(exec_ctx, &ac->alarm);
+
+ gpr_mu_lock(&ac->mu);
if (success) {
do {
so_error_size = sizeof(so_error);
- err = getsockopt(fd, SOL_SOCKET, SO_ERROR, &so_error, &so_error_size);
+ err = getsockopt(fd->fd, SOL_SOCKET, SO_ERROR, &so_error, &so_error_size);
} while (err < 0 && errno == EINTR);
if (err < 0) {
- gpr_log(GPR_ERROR, "getsockopt(ERROR): %s", strerror(errno));
+ gpr_log(GPR_ERROR, "failed to connect to '%s': getsockopt(ERROR): %s",
+ ac->addr_str, strerror(errno));
goto finish;
} else if (so_error != 0) {
if (so_error == ENOBUFS) {
@@ -138,49 +163,58 @@ static void on_writable(void *acp, int success) {
opened too many network connections. The "easy" fix:
don't do that! */
gpr_log(GPR_ERROR, "kernel out of buffers");
- grpc_fd_notify_on_write(ac->fd, &ac->write_closure);
+ gpr_mu_unlock(&ac->mu);
+ grpc_fd_notify_on_write(exec_ctx, fd, &ac->write_closure);
return;
} else {
switch (so_error) {
case ECONNREFUSED:
- gpr_log(GPR_ERROR, "socket error: connection refused");
+ gpr_log(
+ GPR_ERROR,
+ "failed to connect to '%s': socket error: connection refused",
+ ac->addr_str);
break;
default:
- gpr_log(GPR_ERROR, "socket error: %d", so_error);
+ gpr_log(GPR_ERROR, "failed to connect to '%s': socket error: %d",
+ ac->addr_str, so_error);
break;
}
goto finish;
}
} else {
- ep = grpc_tcp_create(ac->fd, GRPC_TCP_DEFAULT_READ_SLICE_SIZE);
+ grpc_pollset_set_del_fd(exec_ctx, ac->interested_parties, fd);
+ *ep = grpc_tcp_create(fd, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, ac->addr_str);
+ fd = NULL;
goto finish;
}
} else {
- gpr_log(GPR_ERROR, "on_writable failed during connect");
+ gpr_log(GPR_ERROR, "failed to connect to '%s': timeout occurred",
+ ac->addr_str);
goto finish;
}
- abort();
+ GPR_UNREACHABLE_CODE(return );
finish:
- gpr_mu_lock(&ac->mu);
- if (!ep) {
- grpc_fd_orphan(ac->fd, NULL, "tcp_client_orphan");
+ if (fd != NULL) {
+ grpc_pollset_set_del_fd(exec_ctx, ac->interested_parties, fd);
+ grpc_fd_orphan(exec_ctx, fd, NULL, NULL, "tcp_client_orphan");
+ fd = NULL;
}
done = (--ac->refs == 0);
gpr_mu_unlock(&ac->mu);
if (done) {
gpr_mu_destroy(&ac->mu);
+ gpr_free(ac->addr_str);
gpr_free(ac);
- } else {
- grpc_alarm_cancel(&ac->alarm);
}
- cb(cb_arg, ep);
+ grpc_exec_ctx_enqueue(exec_ctx, closure, *ep != NULL, NULL);
}
-void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *ep),
- void *arg, grpc_pollset_set *interested_parties,
- const struct sockaddr *addr, int addr_len,
+void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+ grpc_endpoint **ep,
+ grpc_pollset_set *interested_parties,
+ const struct sockaddr *addr, size_t addr_len,
gpr_timespec deadline) {
int fd;
grpc_dualstack_mode dsmode;
@@ -192,6 +226,8 @@ void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *ep),
char *name;
char *addr_str;
+ *ep = NULL;
+
/* Use dualstack sockets where available. */
if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) {
addr = (const struct sockaddr *)&addr6_v4mapped;
@@ -209,45 +245,57 @@ void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *ep),
addr_len = sizeof(addr4_copy);
}
if (!prepare_socket(addr, fd)) {
- cb(arg, NULL);
+ grpc_exec_ctx_enqueue(exec_ctx, closure, false, NULL);
return;
}
do {
- err = connect(fd, addr, addr_len);
+ GPR_ASSERT(addr_len < ~(socklen_t)0);
+ err = connect(fd, addr, (socklen_t)addr_len);
} while (err < 0 && errno == EINTR);
- grpc_sockaddr_to_string(&addr_str, addr, 1);
+ addr_str = grpc_sockaddr_to_uri(addr);
gpr_asprintf(&name, "tcp-client:%s", addr_str);
fdobj = grpc_fd_create(fd, name);
if (err >= 0) {
- cb(arg, grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE));
+ *ep = grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str);
+ grpc_exec_ctx_enqueue(exec_ctx, closure, true, NULL);
goto done;
}
if (errno != EWOULDBLOCK && errno != EINPROGRESS) {
gpr_log(GPR_ERROR, "connect error to '%s': %s", addr_str, strerror(errno));
- grpc_fd_orphan(fdobj, NULL, "tcp_client_connect_error");
- cb(arg, NULL);
+ grpc_fd_orphan(exec_ctx, fdobj, NULL, NULL, "tcp_client_connect_error");
+ grpc_exec_ctx_enqueue(exec_ctx, closure, false, NULL);
goto done;
}
- grpc_pollset_set_add_fd(interested_parties, fdobj);
+ grpc_pollset_set_add_fd(exec_ctx, interested_parties, fdobj);
ac = gpr_malloc(sizeof(async_connect));
- ac->cb = cb;
- ac->cb_arg = arg;
+ ac->closure = closure;
+ ac->ep = ep;
ac->fd = fdobj;
+ ac->interested_parties = interested_parties;
+ ac->addr_str = addr_str;
+ addr_str = NULL;
gpr_mu_init(&ac->mu);
ac->refs = 2;
ac->write_closure.cb = on_writable;
ac->write_closure.cb_arg = ac;
+ if (grpc_tcp_trace) {
+ gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: asynchronously connecting",
+ ac->addr_str);
+ }
+
gpr_mu_lock(&ac->mu);
- grpc_alarm_init(&ac->alarm, deadline, on_alarm, ac, gpr_now());
- grpc_fd_notify_on_write(ac->fd, &ac->write_closure);
+ grpc_timer_init(exec_ctx, &ac->alarm,
+ gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
+ tc_on_alarm, ac, gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_fd_notify_on_write(exec_ctx, ac->fd, &ac->write_closure);
gpr_mu_unlock(&ac->mu);
done:
diff --git a/src/core/iomgr/tcp_client_windows.c b/src/core/iomgr/tcp_client_windows.c
index b1a169b519..689c6f7b10 100644
--- a/src/core/iomgr/tcp_client_windows.c
+++ b/src/core/iomgr/tcp_client_windows.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -43,7 +43,7 @@
#include <grpc/support/slice_buffer.h>
#include <grpc/support/useful.h>
-#include "src/core/iomgr/alarm.h"
+#include "src/core/iomgr/timer.h"
#include "src/core/iomgr/iocp_windows.h"
#include "src/core/iomgr/tcp_client.h"
#include "src/core/iomgr/tcp_windows.h"
@@ -52,94 +52,77 @@
#include "src/core/iomgr/socket_windows.h"
typedef struct {
- void (*cb)(void *arg, grpc_endpoint *tcp);
- void *cb_arg;
+ grpc_closure *on_done;
gpr_mu mu;
grpc_winsocket *socket;
gpr_timespec deadline;
- grpc_alarm alarm;
+ grpc_timer alarm;
+ char *addr_name;
int refs;
- int aborted;
+ grpc_closure on_connect;
+ grpc_endpoint **endpoint;
} async_connect;
-static void async_connect_cleanup(async_connect *ac) {
+static void async_connect_unlock_and_cleanup(async_connect *ac) {
int done = (--ac->refs == 0);
gpr_mu_unlock(&ac->mu);
if (done) {
+ if (ac->socket != NULL) grpc_winsocket_destroy(ac->socket);
gpr_mu_destroy(&ac->mu);
+ gpr_free(ac->addr_name);
gpr_free(ac);
}
}
-static void on_alarm(void *acp, int occured) {
+static void on_alarm(grpc_exec_ctx *exec_ctx, void *acp, bool occured) {
async_connect *ac = acp;
gpr_mu_lock(&ac->mu);
/* If the alarm didn't occur, it got cancelled. */
if (ac->socket != NULL && occured) {
grpc_winsocket_shutdown(ac->socket);
}
- async_connect_cleanup(ac);
+ async_connect_unlock_and_cleanup(ac);
}
-static void on_connect(void *acp, int from_iocp) {
+static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, bool from_iocp) {
async_connect *ac = acp;
SOCKET sock = ac->socket->socket;
- grpc_endpoint *ep = NULL;
+ grpc_endpoint **ep = ac->endpoint;
grpc_winsocket_callback_info *info = &ac->socket->write_info;
- void (*cb)(void *arg, grpc_endpoint *tcp) = ac->cb;
- void *cb_arg = ac->cb_arg;
- int aborted;
+ grpc_closure *on_done = ac->on_done;
- grpc_alarm_cancel(&ac->alarm);
+ grpc_timer_cancel(exec_ctx, &ac->alarm);
gpr_mu_lock(&ac->mu);
- aborted = ac->aborted;
if (from_iocp) {
DWORD transfered_bytes = 0;
DWORD flags;
BOOL wsa_success = WSAGetOverlappedResult(sock, &info->overlapped,
&transfered_bytes, FALSE, &flags);
- info->outstanding = 0;
GPR_ASSERT(transfered_bytes == 0);
if (!wsa_success) {
char *utf8_message = gpr_format_message(WSAGetLastError());
gpr_log(GPR_ERROR, "on_connect error: %s", utf8_message);
gpr_free(utf8_message);
- } else if (!aborted) {
- ep = grpc_tcp_create(ac->socket);
+ } else {
+ *ep = grpc_tcp_create(ac->socket, ac->addr_name);
+ ac->socket = NULL;
}
- } else {
- gpr_log(GPR_ERROR, "on_connect is shutting down");
- /* If the connection timeouts, we will still get a notification from
- the IOCP whatever happens. So we're just going to flag that connection
- as being in the process of being aborted, and wait for the IOCP. We
- can't just orphan the socket now, because the IOCP might already have
- gotten a successful connection, which is our worst-case scenario.
- We need to call our callback now to respect the deadline. */
- ac->aborted = 1;
- gpr_mu_unlock(&ac->mu);
- cb(cb_arg, NULL);
- return;
}
- ac->socket->write_info.outstanding = 0;
-
- /* If we don't have an endpoint, it means the connection failed,
- so it doesn't matter if it aborted or failed. We need to orphan
- that socket. */
- if (!ep || aborted) grpc_winsocket_orphan(ac->socket);
- async_connect_cleanup(ac);
+ async_connect_unlock_and_cleanup(ac);
/* If the connection was aborted, the callback was already called when
the deadline was met. */
- if (!aborted) cb(cb_arg, ep);
+ on_done->cb(exec_ctx, on_done->cb_arg, *ep != NULL);
}
/* Tries to issue one async connection, then schedules both an IOCP
notification request for the connection, and one timeout alert. */
-void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *tcp),
- void *arg, grpc_pollset_set *interested_parties,
- const struct sockaddr *addr, int addr_len,
+void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *on_done,
+ grpc_endpoint **endpoint,
+ grpc_pollset_set *interested_parties,
+ const struct sockaddr *addr, size_t addr_len,
gpr_timespec deadline) {
SOCKET sock = INVALID_SOCKET;
BOOL success;
@@ -155,6 +138,8 @@ void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *tcp),
char *utf8_message;
grpc_winsocket_callback_info *info;
+ *endpoint = NULL;
+
/* Use dualstack sockets where available. */
if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) {
addr = (const struct sockaddr *)&addr6_v4mapped;
@@ -194,8 +179,8 @@ void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *tcp),
socket = grpc_winsocket_create(sock, "client");
info = &socket->write_info;
- info->outstanding = 1;
- success = ConnectEx(sock, addr, addr_len, NULL, 0, NULL, &info->overlapped);
+ success =
+ ConnectEx(sock, addr, (int)addr_len, NULL, 0, NULL, &info->overlapped);
/* It wouldn't be unusual to get a success immediately. But we'll still get
an IOCP notification, so let's ignore it. */
@@ -208,28 +193,29 @@ void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *tcp),
}
ac = gpr_malloc(sizeof(async_connect));
- ac->cb = cb;
- ac->cb_arg = arg;
+ ac->on_done = on_done;
ac->socket = socket;
gpr_mu_init(&ac->mu);
ac->refs = 2;
- ac->aborted = 0;
+ ac->addr_name = grpc_sockaddr_to_uri(addr);
+ ac->endpoint = endpoint;
+ grpc_closure_init(&ac->on_connect, on_connect, ac);
- grpc_alarm_init(&ac->alarm, deadline, on_alarm, ac, gpr_now());
- socket->write_info.outstanding = 1;
- grpc_socket_notify_on_write(socket, on_connect, ac);
+ grpc_timer_init(exec_ctx, &ac->alarm, deadline, on_alarm, ac,
+ gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_socket_notify_on_write(exec_ctx, socket, &ac->on_connect);
return;
failure:
utf8_message = gpr_format_message(WSAGetLastError());
gpr_log(GPR_ERROR, message, utf8_message);
gpr_free(utf8_message);
- if (socket) {
- grpc_winsocket_orphan(socket);
+ if (socket != NULL) {
+ grpc_winsocket_destroy(socket);
} else if (sock != INVALID_SOCKET) {
closesocket(sock);
}
- cb(arg, NULL);
+ grpc_exec_ctx_enqueue(exec_ctx, on_done, false, NULL);
}
#endif /* GPR_WINSOCK_SOCKET */
diff --git a/src/core/iomgr/tcp_posix.c b/src/core/iomgr/tcp_posix.c
index 9ad089af66..f74eb3fe51 100644
--- a/src/core/iomgr/tcp_posix.c
+++ b/src/core/iomgr/tcp_posix.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -40,310 +40,161 @@
#include <errno.h>
#include <stdlib.h>
#include <string.h>
-#include <sys/types.h>
#include <sys/socket.h>
+#include <sys/types.h>
#include <unistd.h>
-#include "src/core/support/string.h"
-#include "src/core/debug/trace.h"
-#include "src/core/profiling/timers.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/slice.h>
+#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
+#include "src/core/debug/trace.h"
+#include "src/core/iomgr/pollset_posix.h"
+#include "src/core/iomgr/pollset_set_posix.h"
+#include "src/core/profiling/timers.h"
+#include "src/core/support/string.h"
+
#ifdef GPR_HAVE_MSG_NOSIGNAL
#define SENDMSG_FLAGS MSG_NOSIGNAL
#else
#define SENDMSG_FLAGS 0
#endif
-/* Holds a slice array and associated state. */
-typedef struct grpc_tcp_slice_state {
- gpr_slice *slices; /* Array of slices */
- size_t nslices; /* Size of slices array. */
- ssize_t first_slice; /* First valid slice in array */
- ssize_t last_slice; /* Last valid slice in array */
- gpr_slice working_slice; /* pointer to original final slice */
- int working_slice_valid; /* True if there is a working slice */
- int memory_owned; /* True if slices array is owned */
-} grpc_tcp_slice_state;
+#ifdef GPR_MSG_IOVLEN_TYPE
+typedef GPR_MSG_IOVLEN_TYPE msg_iovlen_type;
+#else
+typedef size_t msg_iovlen_type;
+#endif
int grpc_tcp_trace = 0;
-static void slice_state_init(grpc_tcp_slice_state *state, gpr_slice *slices,
- size_t nslices, size_t valid_slices) {
- state->slices = slices;
- state->nslices = nslices;
- if (valid_slices == 0) {
- state->first_slice = -1;
- } else {
- state->first_slice = 0;
- }
- state->last_slice = valid_slices - 1;
- state->working_slice_valid = 0;
- state->memory_owned = 0;
-}
-
-/* Returns true if there is still available data */
-static int slice_state_has_available(grpc_tcp_slice_state *state) {
- return state->first_slice != -1 && state->last_slice >= state->first_slice;
-}
-
-static ssize_t slice_state_slices_allocated(grpc_tcp_slice_state *state) {
- if (state->first_slice == -1) {
- return 0;
- } else {
- return state->last_slice - state->first_slice + 1;
- }
-}
-
-static void slice_state_realloc(grpc_tcp_slice_state *state, size_t new_size) {
- /* TODO(klempner): use realloc instead when first_slice is 0 */
- /* TODO(klempner): Avoid a realloc in cases where it is unnecessary */
- gpr_slice *slices = state->slices;
- size_t original_size = slice_state_slices_allocated(state);
- size_t i;
- gpr_slice *new_slices = gpr_malloc(sizeof(gpr_slice) * new_size);
-
- for (i = 0; i < original_size; ++i) {
- new_slices[i] = slices[i + state->first_slice];
- }
-
- state->slices = new_slices;
- state->last_slice = original_size - 1;
- if (original_size > 0) {
- state->first_slice = 0;
- } else {
- state->first_slice = -1;
- }
- state->nslices = new_size;
-
- if (state->memory_owned) {
- gpr_free(slices);
- }
- state->memory_owned = 1;
-}
-
-static void slice_state_remove_prefix(grpc_tcp_slice_state *state,
- size_t prefix_bytes) {
- gpr_slice *current_slice = &state->slices[state->first_slice];
- size_t current_slice_size;
-
- while (slice_state_has_available(state)) {
- current_slice_size = GPR_SLICE_LENGTH(*current_slice);
- if (current_slice_size > prefix_bytes) {
- /* TODO(klempner): Get rid of the extra refcount created here by adding a
- native "trim the first N bytes" operation to splice */
- /* TODO(klempner): This really shouldn't be modifying the current slice
- unless we own the slices array. */
- gpr_slice tail;
- tail = gpr_slice_split_tail(current_slice, prefix_bytes);
- gpr_slice_unref(*current_slice);
- *current_slice = tail;
- return;
- } else {
- gpr_slice_unref(*current_slice);
- ++state->first_slice;
- ++current_slice;
- prefix_bytes -= current_slice_size;
- }
- }
-}
-
-static void slice_state_destroy(grpc_tcp_slice_state *state) {
- while (slice_state_has_available(state)) {
- gpr_slice_unref(state->slices[state->first_slice]);
- ++state->first_slice;
- }
-
- if (state->memory_owned) {
- gpr_free(state->slices);
- state->memory_owned = 0;
- }
-}
-
-void slice_state_transfer_ownership(grpc_tcp_slice_state *state,
- gpr_slice **slices, size_t *nslices) {
- *slices = state->slices + state->first_slice;
- *nslices = state->last_slice - state->first_slice + 1;
-
- state->first_slice = -1;
- state->last_slice = -1;
-}
-
-/* Fills iov with the first min(iov_size, available) slices, returns number
- filled */
-static size_t slice_state_to_iovec(grpc_tcp_slice_state *state,
- struct iovec *iov, size_t iov_size) {
- size_t nslices = state->last_slice - state->first_slice + 1;
- gpr_slice *slices = state->slices + state->first_slice;
- size_t i;
- if (nslices < iov_size) {
- iov_size = nslices;
- }
-
- for (i = 0; i < iov_size; ++i) {
- iov[i].iov_base = GPR_SLICE_START_PTR(slices[i]);
- iov[i].iov_len = GPR_SLICE_LENGTH(slices[i]);
- }
- return iov_size;
-}
-
-/* Makes n blocks available at the end of state, writes them into iov, and
- returns the number of bytes allocated */
-static size_t slice_state_append_blocks_into_iovec(grpc_tcp_slice_state *state,
- struct iovec *iov, size_t n,
- size_t slice_size) {
- size_t target_size;
- size_t i;
- size_t allocated_bytes;
- ssize_t allocated_slices = slice_state_slices_allocated(state);
-
- if (n - state->working_slice_valid >= state->nslices - state->last_slice) {
- /* Need to grow the slice array */
- target_size = state->nslices;
- do {
- target_size = target_size * 2;
- } while (target_size < allocated_slices + n - state->working_slice_valid);
- /* TODO(klempner): If this ever needs to support both prefix removal and
- append, we should be smarter about the growth logic here */
- slice_state_realloc(state, target_size);
- }
-
- i = 0;
- allocated_bytes = 0;
-
- if (state->working_slice_valid) {
- iov[0].iov_base = GPR_SLICE_END_PTR(state->slices[state->last_slice]);
- iov[0].iov_len = GPR_SLICE_LENGTH(state->working_slice) -
- GPR_SLICE_LENGTH(state->slices[state->last_slice]);
- allocated_bytes += iov[0].iov_len;
- ++i;
- state->slices[state->last_slice] = state->working_slice;
- state->working_slice_valid = 0;
- }
-
- for (; i < n; ++i) {
- ++state->last_slice;
- state->slices[state->last_slice] = gpr_slice_malloc(slice_size);
- iov[i].iov_base = GPR_SLICE_START_PTR(state->slices[state->last_slice]);
- iov[i].iov_len = slice_size;
- allocated_bytes += slice_size;
- }
- if (state->first_slice == -1) {
- state->first_slice = 0;
- }
- return allocated_bytes;
-}
-
-/* Remove the last n bytes from state */
-/* TODO(klempner): Consider having this defer actual deletion until later */
-static void slice_state_remove_last(grpc_tcp_slice_state *state, size_t bytes) {
- while (bytes > 0 && slice_state_has_available(state)) {
- if (GPR_SLICE_LENGTH(state->slices[state->last_slice]) > bytes) {
- state->working_slice = state->slices[state->last_slice];
- state->working_slice_valid = 1;
- /* TODO(klempner): Combine these into a single operation that doesn't need
- to refcount */
- gpr_slice_unref(gpr_slice_split_tail(
- &state->slices[state->last_slice],
- GPR_SLICE_LENGTH(state->slices[state->last_slice]) - bytes));
- bytes = 0;
- } else {
- bytes -= GPR_SLICE_LENGTH(state->slices[state->last_slice]);
- gpr_slice_unref(state->slices[state->last_slice]);
- --state->last_slice;
- if (state->last_slice == -1) {
- state->first_slice = -1;
- }
- }
- }
-}
-
typedef struct {
grpc_endpoint base;
grpc_fd *em_fd;
int fd;
- int iov_size; /* Number of slices to allocate per read attempt */
int finished_edge;
+ msg_iovlen_type iov_size; /* Number of slices to allocate per read attempt */
size_t slice_size;
gpr_refcount refcount;
- grpc_endpoint_read_cb read_cb;
- void *read_user_data;
- grpc_endpoint_write_cb write_cb;
- void *write_user_data;
+ /* garbage after the last read */
+ gpr_slice_buffer last_read_buffer;
+
+ gpr_slice_buffer *incoming_buffer;
+ gpr_slice_buffer *outgoing_buffer;
+ /** slice within outgoing_buffer to write next */
+ size_t outgoing_slice_idx;
+ /** byte within outgoing_buffer->slices[outgoing_slice_idx] to write next */
+ size_t outgoing_byte_idx;
- grpc_tcp_slice_state write_state;
+ grpc_closure *read_cb;
+ grpc_closure *write_cb;
+ grpc_closure *release_fd_cb;
+ int *release_fd;
- grpc_iomgr_closure read_closure;
- grpc_iomgr_closure write_closure;
+ grpc_closure read_closure;
+ grpc_closure write_closure;
- grpc_iomgr_closure handle_read_closure;
+ char *peer_string;
} grpc_tcp;
-static void grpc_tcp_handle_read(void *arg /* grpc_tcp */, int success);
-static void grpc_tcp_handle_write(void *arg /* grpc_tcp */, int success);
+static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
+ bool success);
+static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
+ bool success);
-static void grpc_tcp_shutdown(grpc_endpoint *ep) {
+static void tcp_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *)ep;
- grpc_fd_shutdown(tcp->em_fd);
+ grpc_fd_shutdown(exec_ctx, tcp->em_fd);
+}
+
+static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+ grpc_fd_orphan(exec_ctx, tcp->em_fd, tcp->release_fd_cb, tcp->release_fd,
+ "tcp_unref_orphan");
+ gpr_slice_buffer_destroy(&tcp->last_read_buffer);
+ gpr_free(tcp->peer_string);
+ gpr_free(tcp);
+}
+
+/*#define GRPC_TCP_REFCOUNT_DEBUG*/
+#ifdef GRPC_TCP_REFCOUNT_DEBUG
+#define TCP_UNREF(cl, tcp, reason) \
+ tcp_unref((cl), (tcp), (reason), __FILE__, __LINE__)
+#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__)
+static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
+ const char *reason, const char *file, int line) {
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP unref %p : %s %d -> %d", tcp,
+ reason, tcp->refcount.count, tcp->refcount.count - 1);
+ if (gpr_unref(&tcp->refcount)) {
+ tcp_free(exec_ctx, tcp);
+ }
}
-static void grpc_tcp_unref(grpc_tcp *tcp) {
- int refcount_zero = gpr_unref(&tcp->refcount);
- if (refcount_zero) {
- grpc_fd_orphan(tcp->em_fd, NULL, "tcp_unref_orphan");
- gpr_free(tcp);
+static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file,
+ int line) {
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP ref %p : %s %d -> %d", tcp,
+ reason, tcp->refcount.count, tcp->refcount.count + 1);
+ gpr_ref(&tcp->refcount);
+}
+#else
+#define TCP_UNREF(cl, tcp, reason) tcp_unref((cl), (tcp))
+#define TCP_REF(tcp, reason) tcp_ref((tcp))
+static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+ if (gpr_unref(&tcp->refcount)) {
+ tcp_free(exec_ctx, tcp);
}
}
-static void grpc_tcp_destroy(grpc_endpoint *ep) {
+static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); }
+#endif
+
+static void tcp_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *)ep;
- grpc_tcp_unref(tcp);
+ TCP_UNREF(exec_ctx, tcp, "destroy");
}
-static void call_read_cb(grpc_tcp *tcp, gpr_slice *slices, size_t nslices,
- grpc_endpoint_cb_status status) {
- grpc_endpoint_read_cb cb = tcp->read_cb;
+static void call_read_cb(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp, int success) {
+ grpc_closure *cb = tcp->read_cb;
if (grpc_tcp_trace) {
size_t i;
- gpr_log(GPR_DEBUG, "read: status=%d", status);
- for (i = 0; i < nslices; i++) {
- char *dump =
- gpr_hexdump((char *)GPR_SLICE_START_PTR(slices[i]),
- GPR_SLICE_LENGTH(slices[i]), GPR_HEXDUMP_PLAINTEXT);
- gpr_log(GPR_DEBUG, "READ: %s", dump);
+ gpr_log(GPR_DEBUG, "read: success=%d", success);
+ for (i = 0; i < tcp->incoming_buffer->count; i++) {
+ char *dump = gpr_dump_slice(tcp->incoming_buffer->slices[i],
+ GPR_DUMP_HEX | GPR_DUMP_ASCII);
+ gpr_log(GPR_DEBUG, "READ %p: %s", tcp, dump);
gpr_free(dump);
}
}
tcp->read_cb = NULL;
- cb(tcp->read_user_data, slices, nslices, status);
+ tcp->incoming_buffer = NULL;
+ cb->cb(exec_ctx, cb->cb_arg, success);
}
-#define INLINE_SLICE_BUFFER_SIZE 8
#define MAX_READ_IOVEC 4
-static void grpc_tcp_continue_read(grpc_tcp *tcp) {
- gpr_slice static_read_slices[INLINE_SLICE_BUFFER_SIZE];
+static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
struct msghdr msg;
struct iovec iov[MAX_READ_IOVEC];
ssize_t read_bytes;
- ssize_t allocated_bytes;
- struct grpc_tcp_slice_state read_state;
- gpr_slice *final_slices;
- size_t final_nslices;
+ size_t i;
GPR_ASSERT(!tcp->finished_edge);
- GRPC_TIMER_BEGIN(GRPC_PTAG_HANDLE_READ, 0);
- slice_state_init(&read_state, static_read_slices, INLINE_SLICE_BUFFER_SIZE,
- 0);
+ GPR_ASSERT(tcp->iov_size <= MAX_READ_IOVEC);
+ GPR_ASSERT(tcp->incoming_buffer->count <= MAX_READ_IOVEC);
+ GPR_TIMER_BEGIN("tcp_continue_read", 0);
- allocated_bytes = slice_state_append_blocks_into_iovec(
- &read_state, iov, tcp->iov_size, tcp->slice_size);
+ while (tcp->incoming_buffer->count < (size_t)tcp->iov_size) {
+ gpr_slice_buffer_add_indexed(tcp->incoming_buffer,
+ gpr_slice_malloc(tcp->slice_size));
+ }
+ for (i = 0; i < tcp->incoming_buffer->count; i++) {
+ iov[i].iov_base = GPR_SLICE_START_PTR(tcp->incoming_buffer->slices[i]);
+ iov[i].iov_len = GPR_SLICE_LENGTH(tcp->incoming_buffer->slices[i]);
+ }
msg.msg_name = NULL;
msg.msg_namelen = 0;
@@ -353,112 +204,114 @@ static void grpc_tcp_continue_read(grpc_tcp *tcp) {
msg.msg_controllen = 0;
msg.msg_flags = 0;
- GRPC_TIMER_BEGIN(GRPC_PTAG_RECVMSG, 0);
+ GPR_TIMER_BEGIN("recvmsg", 1);
do {
read_bytes = recvmsg(tcp->fd, &msg, 0);
} while (read_bytes < 0 && errno == EINTR);
- GRPC_TIMER_END(GRPC_PTAG_RECVMSG, 0);
-
- if (read_bytes < allocated_bytes) {
- /* TODO(klempner): Consider a second read first, in hopes of getting a
- * quick EAGAIN and saving a bunch of allocations. */
- slice_state_remove_last(&read_state, read_bytes < 0
- ? allocated_bytes
- : allocated_bytes - read_bytes);
- }
+ GPR_TIMER_END("recvmsg", 0);
if (read_bytes < 0) {
- /* NB: After calling the user_cb a parallel call of the read handler may
+ /* NB: After calling call_read_cb a parallel call of the read handler may
* be running. */
if (errno == EAGAIN) {
if (tcp->iov_size > 1) {
tcp->iov_size /= 2;
}
- if (slice_state_has_available(&read_state)) {
- /* TODO(klempner): We should probably do the call into the application
- without all this junk on the stack */
- /* FIXME(klempner): Refcount properly */
- slice_state_transfer_ownership(&read_state, &final_slices,
- &final_nslices);
- tcp->finished_edge = 1;
- call_read_cb(tcp, final_slices, final_nslices, GRPC_ENDPOINT_CB_OK);
- slice_state_destroy(&read_state);
- grpc_tcp_unref(tcp);
- } else {
- /* We've consumed the edge, request a new one */
- slice_state_destroy(&read_state);
- grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_closure);
- }
+ /* We've consumed the edge, request a new one */
+ grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_closure);
} else {
/* TODO(klempner): Log interesting errors */
- call_read_cb(tcp, NULL, 0, GRPC_ENDPOINT_CB_ERROR);
- slice_state_destroy(&read_state);
- grpc_tcp_unref(tcp);
+ gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer);
+ call_read_cb(exec_ctx, tcp, 0);
+ TCP_UNREF(exec_ctx, tcp, "read");
}
} else if (read_bytes == 0) {
/* 0 read size ==> end of stream */
- if (slice_state_has_available(&read_state)) {
- /* there were bytes already read: pass them up to the application */
- slice_state_transfer_ownership(&read_state, &final_slices,
- &final_nslices);
- call_read_cb(tcp, final_slices, final_nslices, GRPC_ENDPOINT_CB_EOF);
- } else {
- call_read_cb(tcp, NULL, 0, GRPC_ENDPOINT_CB_EOF);
- }
- slice_state_destroy(&read_state);
- grpc_tcp_unref(tcp);
+ gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer);
+ call_read_cb(exec_ctx, tcp, 0);
+ TCP_UNREF(exec_ctx, tcp, "read");
} else {
- if (tcp->iov_size < MAX_READ_IOVEC) {
+ GPR_ASSERT((size_t)read_bytes <= tcp->incoming_buffer->length);
+ if ((size_t)read_bytes < tcp->incoming_buffer->length) {
+ gpr_slice_buffer_trim_end(
+ tcp->incoming_buffer,
+ tcp->incoming_buffer->length - (size_t)read_bytes,
+ &tcp->last_read_buffer);
+ } else if (tcp->iov_size < MAX_READ_IOVEC) {
++tcp->iov_size;
}
- GPR_ASSERT(slice_state_has_available(&read_state));
- slice_state_transfer_ownership(&read_state, &final_slices, &final_nslices);
- call_read_cb(tcp, final_slices, final_nslices, GRPC_ENDPOINT_CB_OK);
- slice_state_destroy(&read_state);
- grpc_tcp_unref(tcp);
+ GPR_ASSERT((size_t)read_bytes == tcp->incoming_buffer->length);
+ call_read_cb(exec_ctx, tcp, 1);
+ TCP_UNREF(exec_ctx, tcp, "read");
}
- GRPC_TIMER_END(GRPC_PTAG_HANDLE_READ, 0);
+ GPR_TIMER_END("tcp_continue_read", 0);
}
-static void grpc_tcp_handle_read(void *arg /* grpc_tcp */, int success) {
+static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
+ bool success) {
grpc_tcp *tcp = (grpc_tcp *)arg;
GPR_ASSERT(!tcp->finished_edge);
if (!success) {
- call_read_cb(tcp, NULL, 0, GRPC_ENDPOINT_CB_SHUTDOWN);
- grpc_tcp_unref(tcp);
+ gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer);
+ call_read_cb(exec_ctx, tcp, 0);
+ TCP_UNREF(exec_ctx, tcp, "read");
} else {
- grpc_tcp_continue_read(tcp);
+ tcp_continue_read(exec_ctx, tcp);
}
}
-static void grpc_tcp_notify_on_read(grpc_endpoint *ep, grpc_endpoint_read_cb cb,
- void *user_data) {
+static void tcp_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ gpr_slice_buffer *incoming_buffer, grpc_closure *cb) {
grpc_tcp *tcp = (grpc_tcp *)ep;
GPR_ASSERT(tcp->read_cb == NULL);
tcp->read_cb = cb;
- tcp->read_user_data = user_data;
- gpr_ref(&tcp->refcount);
+ tcp->incoming_buffer = incoming_buffer;
+ gpr_slice_buffer_reset_and_unref(incoming_buffer);
+ gpr_slice_buffer_swap(incoming_buffer, &tcp->last_read_buffer);
+ TCP_REF(tcp, "read");
if (tcp->finished_edge) {
tcp->finished_edge = 0;
- grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_closure);
+ grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_closure);
} else {
- tcp->handle_read_closure.cb_arg = tcp;
- grpc_iomgr_add_callback(&tcp->handle_read_closure);
+ grpc_exec_ctx_enqueue(exec_ctx, &tcp->read_closure, true, NULL);
}
}
+typedef enum { FLUSH_DONE, FLUSH_PENDING, FLUSH_ERROR } flush_result;
+
#define MAX_WRITE_IOVEC 16
-static grpc_endpoint_write_status grpc_tcp_flush(grpc_tcp *tcp) {
+static flush_result tcp_flush(grpc_tcp *tcp) {
struct msghdr msg;
struct iovec iov[MAX_WRITE_IOVEC];
- int iov_size;
+ msg_iovlen_type iov_size;
ssize_t sent_length;
- grpc_tcp_slice_state *state = &tcp->write_state;
+ size_t sending_length;
+ size_t trailing;
+ size_t unwind_slice_idx;
+ size_t unwind_byte_idx;
for (;;) {
- iov_size = slice_state_to_iovec(state, iov, MAX_WRITE_IOVEC);
+ sending_length = 0;
+ unwind_slice_idx = tcp->outgoing_slice_idx;
+ unwind_byte_idx = tcp->outgoing_byte_idx;
+ for (iov_size = 0; tcp->outgoing_slice_idx != tcp->outgoing_buffer->count &&
+ iov_size != MAX_WRITE_IOVEC;
+ iov_size++) {
+ iov[iov_size].iov_base =
+ GPR_SLICE_START_PTR(
+ tcp->outgoing_buffer->slices[tcp->outgoing_slice_idx]) +
+ tcp->outgoing_byte_idx;
+ iov[iov_size].iov_len =
+ GPR_SLICE_LENGTH(
+ tcp->outgoing_buffer->slices[tcp->outgoing_slice_idx]) -
+ tcp->outgoing_byte_idx;
+ sending_length += iov[iov_size].iov_len;
+ tcp->outgoing_slice_idx++;
+ tcp->outgoing_byte_idx = 0;
+ }
+ GPR_ASSERT(iov_size > 0);
msg.msg_name = NULL;
msg.msg_namelen = 0;
@@ -468,135 +321,173 @@ static grpc_endpoint_write_status grpc_tcp_flush(grpc_tcp *tcp) {
msg.msg_controllen = 0;
msg.msg_flags = 0;
- GRPC_TIMER_BEGIN(GRPC_PTAG_SENDMSG, 0);
+ GPR_TIMER_BEGIN("sendmsg", 1);
do {
/* TODO(klempner): Cork if this is a partial write */
sent_length = sendmsg(tcp->fd, &msg, SENDMSG_FLAGS);
} while (sent_length < 0 && errno == EINTR);
- GRPC_TIMER_END(GRPC_PTAG_SENDMSG, 0);
+ GPR_TIMER_END("sendmsg", 0);
if (sent_length < 0) {
if (errno == EAGAIN) {
- return GRPC_ENDPOINT_WRITE_PENDING;
+ tcp->outgoing_slice_idx = unwind_slice_idx;
+ tcp->outgoing_byte_idx = unwind_byte_idx;
+ return FLUSH_PENDING;
} else {
/* TODO(klempner): Log some of these */
- slice_state_destroy(state);
- return GRPC_ENDPOINT_WRITE_ERROR;
+ return FLUSH_ERROR;
}
}
- /* TODO(klempner): Probably better to batch this after we finish flushing */
- slice_state_remove_prefix(state, sent_length);
+ GPR_ASSERT(tcp->outgoing_byte_idx == 0);
+ trailing = sending_length - (size_t)sent_length;
+ while (trailing > 0) {
+ size_t slice_length;
+
+ tcp->outgoing_slice_idx--;
+ slice_length = GPR_SLICE_LENGTH(
+ tcp->outgoing_buffer->slices[tcp->outgoing_slice_idx]);
+ if (slice_length > trailing) {
+ tcp->outgoing_byte_idx = slice_length - trailing;
+ break;
+ } else {
+ trailing -= slice_length;
+ }
+ }
- if (!slice_state_has_available(state)) {
- return GRPC_ENDPOINT_WRITE_DONE;
+ if (tcp->outgoing_slice_idx == tcp->outgoing_buffer->count) {
+ return FLUSH_DONE;
}
};
}
-static void grpc_tcp_handle_write(void *arg /* grpc_tcp */, int success) {
+static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
+ bool success) {
grpc_tcp *tcp = (grpc_tcp *)arg;
- grpc_endpoint_write_status write_status;
- grpc_endpoint_cb_status cb_status;
- grpc_endpoint_write_cb cb;
+ flush_result status;
+ grpc_closure *cb;
if (!success) {
- slice_state_destroy(&tcp->write_state);
cb = tcp->write_cb;
tcp->write_cb = NULL;
- cb(tcp->write_user_data, GRPC_ENDPOINT_CB_SHUTDOWN);
- grpc_tcp_unref(tcp);
+ cb->cb(exec_ctx, cb->cb_arg, 0);
+ TCP_UNREF(exec_ctx, tcp, "write");
return;
}
- GRPC_TIMER_BEGIN(GRPC_PTAG_TCP_CB_WRITE, 0);
- write_status = grpc_tcp_flush(tcp);
- if (write_status == GRPC_ENDPOINT_WRITE_PENDING) {
- grpc_fd_notify_on_write(tcp->em_fd, &tcp->write_closure);
+ status = tcp_flush(tcp);
+ if (status == FLUSH_PENDING) {
+ grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_closure);
} else {
- slice_state_destroy(&tcp->write_state);
- if (write_status == GRPC_ENDPOINT_WRITE_DONE) {
- cb_status = GRPC_ENDPOINT_CB_OK;
- } else {
- cb_status = GRPC_ENDPOINT_CB_ERROR;
- }
cb = tcp->write_cb;
tcp->write_cb = NULL;
- cb(tcp->write_user_data, cb_status);
- grpc_tcp_unref(tcp);
+ GPR_TIMER_BEGIN("tcp_handle_write.cb", 0);
+ cb->cb(exec_ctx, cb->cb_arg, status == FLUSH_DONE);
+ GPR_TIMER_END("tcp_handle_write.cb", 0);
+ TCP_UNREF(exec_ctx, tcp, "write");
}
- GRPC_TIMER_END(GRPC_PTAG_TCP_CB_WRITE, 0);
}
-static grpc_endpoint_write_status grpc_tcp_write(grpc_endpoint *ep,
- gpr_slice *slices,
- size_t nslices,
- grpc_endpoint_write_cb cb,
- void *user_data) {
+static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ gpr_slice_buffer *buf, grpc_closure *cb) {
grpc_tcp *tcp = (grpc_tcp *)ep;
- grpc_endpoint_write_status status;
+ flush_result status;
if (grpc_tcp_trace) {
size_t i;
- for (i = 0; i < nslices; i++) {
+ for (i = 0; i < buf->count; i++) {
char *data =
- gpr_hexdump((char *)GPR_SLICE_START_PTR(slices[i]),
- GPR_SLICE_LENGTH(slices[i]), GPR_HEXDUMP_PLAINTEXT);
+ gpr_dump_slice(buf->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "WRITE %p: %s", tcp, data);
gpr_free(data);
}
}
- GRPC_TIMER_BEGIN(GRPC_PTAG_TCP_WRITE, 0);
+ GPR_TIMER_BEGIN("tcp_write", 0);
GPR_ASSERT(tcp->write_cb == NULL);
- slice_state_init(&tcp->write_state, slices, nslices, nslices);
- status = grpc_tcp_flush(tcp);
- if (status == GRPC_ENDPOINT_WRITE_PENDING) {
- /* TODO(klempner): Consider inlining rather than malloc for small nslices */
- slice_state_realloc(&tcp->write_state, nslices);
- gpr_ref(&tcp->refcount);
+ if (buf->length == 0) {
+ GPR_TIMER_END("tcp_write", 0);
+ grpc_exec_ctx_enqueue(exec_ctx, cb, true, NULL);
+ return;
+ }
+ tcp->outgoing_buffer = buf;
+ tcp->outgoing_slice_idx = 0;
+ tcp->outgoing_byte_idx = 0;
+
+ status = tcp_flush(tcp);
+ if (status == FLUSH_PENDING) {
+ TCP_REF(tcp, "write");
tcp->write_cb = cb;
- tcp->write_user_data = user_data;
- grpc_fd_notify_on_write(tcp->em_fd, &tcp->write_closure);
+ grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_closure);
+ } else {
+ grpc_exec_ctx_enqueue(exec_ctx, cb, status == FLUSH_DONE, NULL);
}
- GRPC_TIMER_END(GRPC_PTAG_TCP_WRITE, 0);
- return status;
+ GPR_TIMER_END("tcp_write", 0);
+}
+
+static void tcp_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ grpc_pollset *pollset) {
+ grpc_tcp *tcp = (grpc_tcp *)ep;
+ grpc_pollset_add_fd(exec_ctx, pollset, tcp->em_fd);
+}
+
+static void tcp_add_to_pollset_set(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ grpc_pollset_set *pollset_set) {
+ grpc_tcp *tcp = (grpc_tcp *)ep;
+ grpc_pollset_set_add_fd(exec_ctx, pollset_set, tcp->em_fd);
}
-static void grpc_tcp_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset) {
+static char *tcp_get_peer(grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *)ep;
- grpc_pollset_add_fd(pollset, tcp->em_fd);
+ return gpr_strdup(tcp->peer_string);
}
static const grpc_endpoint_vtable vtable = {
- grpc_tcp_notify_on_read, grpc_tcp_write, grpc_tcp_add_to_pollset,
- grpc_tcp_shutdown, grpc_tcp_destroy};
+ tcp_read, tcp_write, tcp_add_to_pollset, tcp_add_to_pollset_set,
+ tcp_shutdown, tcp_destroy, tcp_get_peer};
-grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size) {
+grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size,
+ const char *peer_string) {
grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp));
tcp->base.vtable = &vtable;
+ tcp->peer_string = gpr_strdup(peer_string);
tcp->fd = em_fd->fd;
tcp->read_cb = NULL;
tcp->write_cb = NULL;
- tcp->read_user_data = NULL;
- tcp->write_user_data = NULL;
+ tcp->release_fd_cb = NULL;
+ tcp->release_fd = NULL;
+ tcp->incoming_buffer = NULL;
tcp->slice_size = slice_size;
tcp->iov_size = 1;
tcp->finished_edge = 1;
- slice_state_init(&tcp->write_state, NULL, 0, 0);
/* paired with unref in grpc_tcp_destroy */
gpr_ref_init(&tcp->refcount, 1);
tcp->em_fd = em_fd;
- tcp->read_closure.cb = grpc_tcp_handle_read;
+ tcp->read_closure.cb = tcp_handle_read;
tcp->read_closure.cb_arg = tcp;
- tcp->write_closure.cb = grpc_tcp_handle_write;
+ tcp->write_closure.cb = tcp_handle_write;
tcp->write_closure.cb_arg = tcp;
+ gpr_slice_buffer_init(&tcp->last_read_buffer);
- tcp->handle_read_closure.cb = grpc_tcp_handle_read;
return &tcp->base;
}
+int grpc_tcp_fd(grpc_endpoint *ep) {
+ grpc_tcp *tcp = (grpc_tcp *)ep;
+ GPR_ASSERT(ep->vtable == &vtable);
+ return grpc_fd_wrapped_fd(tcp->em_fd);
+}
+
+void grpc_tcp_destroy_and_release_fd(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ int *fd, grpc_closure *done) {
+ grpc_tcp *tcp = (grpc_tcp *)ep;
+ GPR_ASSERT(ep->vtable == &vtable);
+ tcp->release_fd = fd;
+ tcp->release_fd_cb = done;
+ TCP_UNREF(exec_ctx, tcp, "destroy");
+}
+
#endif
diff --git a/src/core/iomgr/tcp_posix.h b/src/core/iomgr/tcp_posix.h
index 44279d5a26..d846ec570f 100644
--- a/src/core/iomgr/tcp_posix.h
+++ b/src/core/iomgr/tcp_posix.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_IOMGR_TCP_POSIX_H
-#define GRPC_INTERNAL_CORE_IOMGR_TCP_POSIX_H
+#ifndef GRPC_CORE_IOMGR_TCP_POSIX_H
+#define GRPC_CORE_IOMGR_TCP_POSIX_H
/*
Low level TCP "bottom half" implementation, for use by transports built on
top of a TCP connection.
@@ -53,6 +53,19 @@ extern int grpc_tcp_trace;
/* Create a tcp endpoint given a file desciptor and a read slice size.
Takes ownership of fd. */
-grpc_endpoint *grpc_tcp_create(grpc_fd *fd, size_t read_slice_size);
+grpc_endpoint *grpc_tcp_create(grpc_fd *fd, size_t read_slice_size,
+ const char *peer_string);
-#endif /* GRPC_INTERNAL_CORE_IOMGR_TCP_POSIX_H */
+/* Return the tcp endpoint's fd, or -1 if this is not available. Does not
+ release the fd.
+ Requires: ep must be a tcp endpoint.
+ */
+int grpc_tcp_fd(grpc_endpoint *ep);
+
+/* Destroy the tcp endpoint without closing its fd. *fd will be set and done
+ * will be called when the endpoint is destroyed.
+ * Requires: ep must be a tcp endpoint and fd must not be NULL. */
+void grpc_tcp_destroy_and_release_fd(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ int *fd, grpc_closure *done);
+
+#endif /* GRPC_CORE_IOMGR_TCP_POSIX_H */
diff --git a/src/core/iomgr/tcp_server.h b/src/core/iomgr/tcp_server.h
index 66bb3ef701..93247e9e4e 100644
--- a/src/core/iomgr/tcp_server.h
+++ b/src/core/iomgr/tcp_server.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,48 +31,73 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_IOMGR_TCP_SERVER_H
-#define GRPC_INTERNAL_CORE_IOMGR_TCP_SERVER_H
+#ifndef GRPC_CORE_IOMGR_TCP_SERVER_H
+#define GRPC_CORE_IOMGR_TCP_SERVER_H
+#include "src/core/iomgr/closure.h"
#include "src/core/iomgr/endpoint.h"
/* Forward decl of grpc_tcp_server */
typedef struct grpc_tcp_server grpc_tcp_server;
-/* New server callback: tcp is the newly connected tcp connection */
-typedef void (*grpc_tcp_server_cb)(void *arg, grpc_endpoint *ep);
+typedef struct grpc_tcp_server_acceptor {
+ /* grpc_tcp_server_cb functions share a ref on from_server that is valid
+ until the function returns. */
+ grpc_tcp_server *from_server;
+ /* Indices that may be passed to grpc_tcp_server_port_fd(). */
+ unsigned port_index;
+ unsigned fd_index;
+} grpc_tcp_server_acceptor;
-/* Create a server, initially not bound to any ports */
-grpc_tcp_server *grpc_tcp_server_create(void);
+/* Called for newly connected TCP connections. */
+typedef void (*grpc_tcp_server_cb)(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_endpoint *ep,
+ grpc_tcp_server_acceptor *acceptor);
+
+/* Create a server, initially not bound to any ports. The caller owns one ref.
+ If shutdown_complete is not NULL, it will be used by
+ grpc_tcp_server_unref() when the ref count reaches zero. */
+grpc_tcp_server *grpc_tcp_server_create(grpc_closure *shutdown_complete);
/* Start listening to bound ports */
-void grpc_tcp_server_start(grpc_tcp_server *server, grpc_pollset **pollsets,
- size_t pollset_count, grpc_tcp_server_cb cb,
- void *cb_arg);
+void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *server,
+ grpc_pollset **pollsets, size_t pollset_count,
+ grpc_tcp_server_cb on_accept_cb, void *cb_arg);
-/* Add a port to the server, returning port number on success, or negative
- on failure.
+/* Add a port to the server, returning the newly allocated port on success, or
+ -1 on failure.
The :: and 0.0.0.0 wildcard addresses are treated identically, accepting
both IPv4 and IPv6 connections, but :: is the preferred style. This usually
creates one socket, but possibly two on systems which support IPv6,
- but not dualstack sockets.
-
- For raw access to the underlying sockets, see grpc_tcp_server_get_fd(). */
+ but not dualstack sockets. */
/* TODO(ctiller): deprecate this, and make grpc_tcp_server_add_ports to handle
all of the multiple socket port matching logic in one place */
int grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
- int addr_len);
+ size_t addr_len);
+
+/* Number of fds at the given port_index, or 0 if port_index is out of
+ bounds. */
+unsigned grpc_tcp_server_port_fd_count(grpc_tcp_server *s, unsigned port_index);
+
+/* Returns the file descriptor of the Mth (fd_index) listening socket of the Nth
+ (port_index) call to add_port() on this server, or -1 if the indices are out
+ of bounds. The file descriptor remains owned by the server, and will be
+ cleaned up when the ref count reaches zero. */
+int grpc_tcp_server_port_fd(grpc_tcp_server *s, unsigned port_index,
+ unsigned fd_index);
-/* Returns the file descriptor of the Nth listening socket on this server,
- or -1 if the index is out of bounds.
+/* Ref s and return s. */
+grpc_tcp_server *grpc_tcp_server_ref(grpc_tcp_server *s);
- The file descriptor remains owned by the server, and will be cleaned
- up when grpc_tcp_server_destroy is called. */
-int grpc_tcp_server_get_fd(grpc_tcp_server *s, unsigned index);
+/* shutdown_starting is called when ref count has reached zero and the server is
+ about to be destroyed. The server will be deleted after it returns. Calling
+ grpc_tcp_server_ref() from it has no effect. */
+void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server *s,
+ grpc_closure *shutdown_starting);
-void grpc_tcp_server_destroy(grpc_tcp_server *server,
- void (*shutdown_done)(void *shutdown_done_arg),
- void *shutdown_done_arg);
+/* If the refcount drops to zero, delete s, and call (exec_ctx==NULL) or enqueue
+ a call (exec_ctx!=NULL) to shutdown_complete. */
+void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s);
-#endif /* GRPC_INTERNAL_CORE_IOMGR_TCP_SERVER_H */
+#endif /* GRPC_CORE_IOMGR_TCP_SERVER_H */
diff --git a/src/core/iomgr/tcp_server_posix.c b/src/core/iomgr/tcp_server_posix.c
index 5854031c9b..5e07f8261c 100644
--- a/src/core/iomgr/tcp_server_posix.c
+++ b/src/core/iomgr/tcp_server_posix.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -67,26 +67,36 @@
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
-#define INIT_PORT_CAP 2
#define MIN_SAFE_ACCEPT_QUEUE_SIZE 100
static gpr_once s_init_max_accept_queue_size;
static int s_max_accept_queue_size;
/* one listening port */
-typedef struct {
+typedef struct grpc_tcp_listener grpc_tcp_listener;
+struct grpc_tcp_listener {
int fd;
grpc_fd *emfd;
grpc_tcp_server *server;
union {
- gpr_uint8 untyped[GRPC_MAX_SOCKADDR_SIZE];
+ uint8_t untyped[GRPC_MAX_SOCKADDR_SIZE];
struct sockaddr sockaddr;
struct sockaddr_un un;
} addr;
- int addr_len;
- grpc_iomgr_closure read_closure;
- grpc_iomgr_closure destroyed_closure;
-} server_port;
+ size_t addr_len;
+ int port;
+ unsigned port_index;
+ unsigned fd_index;
+ grpc_closure read_closure;
+ grpc_closure destroyed_closure;
+ struct grpc_tcp_listener *next;
+ /* When we add a listener, more than one can be created, mainly because of
+ IPv6. A sibling will still be in the normal list, but will be flagged
+ as such. Any action, such as ref or unref, will affect all of the
+ siblings in the list. */
+ struct grpc_tcp_listener *sibling;
+ int is_sibling;
+};
static void unlink_if_unix_domain_socket(const struct sockaddr_un *un) {
struct stat st;
@@ -98,8 +108,10 @@ static void unlink_if_unix_domain_socket(const struct sockaddr_un *un) {
/* the overall server */
struct grpc_tcp_server {
- grpc_tcp_server_cb cb;
- void *cb_arg;
+ gpr_refcount refs;
+ /* Called whenever accept() succeeds on a server port. */
+ grpc_tcp_server_cb on_accept_cb;
+ void *on_accept_cb_arg;
gpr_mu mu;
@@ -111,14 +123,16 @@ struct grpc_tcp_server {
/* is this server shutting down? (boolean) */
int shutdown;
- /* all listening ports */
- server_port *ports;
- size_t nports;
- size_t port_capacity;
+ /* linked list of server ports */
+ grpc_tcp_listener *head;
+ grpc_tcp_listener *tail;
+ unsigned nports;
+
+ /* List of closures passed to shutdown_starting_add(). */
+ grpc_closure_list shutdown_starting;
/* shutdown callback */
- void (*shutdown_complete)(void *);
- void *shutdown_complete_arg;
+ grpc_closure *shutdown_complete;
/* all pollsets interested in new connections */
grpc_pollset **pollsets;
@@ -126,49 +140,58 @@ struct grpc_tcp_server {
size_t pollset_count;
};
-grpc_tcp_server *grpc_tcp_server_create(void) {
+grpc_tcp_server *grpc_tcp_server_create(grpc_closure *shutdown_complete) {
grpc_tcp_server *s = gpr_malloc(sizeof(grpc_tcp_server));
+ gpr_ref_init(&s->refs, 1);
gpr_mu_init(&s->mu);
s->active_ports = 0;
s->destroyed_ports = 0;
s->shutdown = 0;
- s->cb = NULL;
- s->cb_arg = NULL;
- s->ports = gpr_malloc(sizeof(server_port) * INIT_PORT_CAP);
+ s->shutdown_starting.head = NULL;
+ s->shutdown_starting.tail = NULL;
+ s->shutdown_complete = shutdown_complete;
+ s->on_accept_cb = NULL;
+ s->on_accept_cb_arg = NULL;
+ s->head = NULL;
+ s->tail = NULL;
s->nports = 0;
- s->port_capacity = INIT_PORT_CAP;
return s;
}
-static void finish_shutdown(grpc_tcp_server *s) {
- s->shutdown_complete(s->shutdown_complete_arg);
+static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
+ if (s->shutdown_complete != NULL) {
+ grpc_exec_ctx_enqueue(exec_ctx, s->shutdown_complete, true, NULL);
+ }
gpr_mu_destroy(&s->mu);
- gpr_free(s->ports);
+ while (s->head) {
+ grpc_tcp_listener *sp = s->head;
+ s->head = sp->next;
+ gpr_free(sp);
+ }
+
gpr_free(s);
}
-static void destroyed_port(void *server, int success) {
+static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server,
+ bool success) {
grpc_tcp_server *s = server;
gpr_mu_lock(&s->mu);
s->destroyed_ports++;
if (s->destroyed_ports == s->nports) {
gpr_mu_unlock(&s->mu);
- finish_shutdown(s);
+ finish_shutdown(exec_ctx, s);
} else {
+ GPR_ASSERT(s->destroyed_ports < s->nports);
gpr_mu_unlock(&s->mu);
}
}
-static void dont_care_about_shutdown_completion(void *ignored) {}
-
/* called when all listening endpoints have been shutdown, so no further
events will be received on them - at this point it's safe to destroy
things */
-static void deactivated_all_ports(grpc_tcp_server *s) {
- size_t i;
-
+static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
/* delete ALL the things */
gpr_mu_lock(&s->mu);
@@ -177,46 +200,40 @@ static void deactivated_all_ports(grpc_tcp_server *s) {
return;
}
- if (s->nports) {
- for (i = 0; i < s->nports; i++) {
- server_port *sp = &s->ports[i];
+ if (s->head) {
+ grpc_tcp_listener *sp;
+ for (sp = s->head; sp; sp = sp->next) {
if (sp->addr.sockaddr.sa_family == AF_UNIX) {
unlink_if_unix_domain_socket(&sp->addr.un);
}
sp->destroyed_closure.cb = destroyed_port;
sp->destroyed_closure.cb_arg = s;
- grpc_fd_orphan(sp->emfd, &sp->destroyed_closure, "tcp_listener_shutdown");
+ grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure, NULL,
+ "tcp_listener_shutdown");
}
gpr_mu_unlock(&s->mu);
} else {
gpr_mu_unlock(&s->mu);
- finish_shutdown(s);
+ finish_shutdown(exec_ctx, s);
}
}
-void grpc_tcp_server_destroy(
- grpc_tcp_server *s, void (*shutdown_complete)(void *shutdown_complete_arg),
- void *shutdown_complete_arg) {
- size_t i;
+static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
gpr_mu_lock(&s->mu);
GPR_ASSERT(!s->shutdown);
s->shutdown = 1;
- s->shutdown_complete = shutdown_complete
- ? shutdown_complete
- : dont_care_about_shutdown_completion;
- s->shutdown_complete_arg = shutdown_complete_arg;
-
/* shutdown all fd's */
if (s->active_ports) {
- for (i = 0; i < s->nports; i++) {
- grpc_fd_shutdown(s->ports[i].emfd);
+ grpc_tcp_listener *sp;
+ for (sp = s->head; sp; sp = sp->next) {
+ grpc_fd_shutdown(exec_ctx, sp->emfd);
}
gpr_mu_unlock(&s->mu);
} else {
gpr_mu_unlock(&s->mu);
- deactivated_all_ports(s);
+ deactivated_all_ports(exec_ctx, s);
}
}
@@ -234,7 +251,7 @@ static void init_max_accept_queue_size(void) {
char *end;
long i = strtol(buf, &end, 10);
if (i > 0 && i <= INT_MAX && end && *end == 0) {
- n = i;
+ n = (int)i;
}
}
fclose(fp);
@@ -254,7 +271,8 @@ static int get_max_accept_queue_size(void) {
}
/* Prepare a recently-created socket for listening. */
-static int prepare_socket(int fd, const struct sockaddr *addr, int addr_len) {
+static int prepare_socket(int fd, const struct sockaddr *addr,
+ size_t addr_len) {
struct sockaddr_storage sockname_temp;
socklen_t sockname_len;
@@ -271,7 +289,8 @@ static int prepare_socket(int fd, const struct sockaddr *addr, int addr_len) {
goto error;
}
- if (bind(fd, addr, addr_len) < 0) {
+ GPR_ASSERT(addr_len < ~(socklen_t)0);
+ if (bind(fd, addr, (socklen_t)addr_len) < 0) {
char *addr_str;
grpc_sockaddr_to_string(&addr_str, addr, 0);
gpr_log(GPR_ERROR, "bind addr=%s: %s", addr_str, strerror(errno));
@@ -299,8 +318,10 @@ error:
}
/* event manager callback when reads are ready */
-static void on_read(void *arg, int success) {
- server_port *sp = arg;
+static void on_read(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
+ grpc_tcp_listener *sp = arg;
+ grpc_tcp_server_acceptor acceptor = {sp->server, sp->port_index,
+ sp->fd_index};
grpc_fd *fdobj;
size_t i;
@@ -315,14 +336,14 @@ static void on_read(void *arg, int success) {
char *addr_str;
char *name;
/* Note: If we ever decide to return this address to the user, remember to
- strip off the ::ffff:0.0.0.0/96 prefix first. */
+ strip off the ::ffff:0.0.0.0/96 prefix first. */
int fd = grpc_accept4(sp->fd, (struct sockaddr *)&addr, &addrlen, 1, 1);
if (fd < 0) {
switch (errno) {
case EINTR:
continue;
case EAGAIN:
- grpc_fd_notify_on_read(sp->emfd, &sp->read_closure);
+ grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
return;
default:
gpr_log(GPR_ERROR, "Failed accept4: %s", strerror(errno));
@@ -332,38 +353,47 @@ static void on_read(void *arg, int success) {
grpc_set_socket_no_sigpipe_if_possible(fd);
- grpc_sockaddr_to_string(&addr_str, (struct sockaddr *)&addr, 1);
+ addr_str = grpc_sockaddr_to_uri((struct sockaddr *)&addr);
gpr_asprintf(&name, "tcp-server-connection:%s", addr_str);
+ if (grpc_tcp_trace) {
+ gpr_log(GPR_DEBUG, "SERVER_CONNECT: incoming connection: %s", addr_str);
+ }
+
fdobj = grpc_fd_create(fd, name);
/* TODO(ctiller): revise this when we have server-side sharding
of channels -- we certainly should not be automatically adding every
incoming channel to every pollset owned by the server */
for (i = 0; i < sp->server->pollset_count; i++) {
- grpc_pollset_add_fd(sp->server->pollsets[i], fdobj);
+ grpc_pollset_add_fd(exec_ctx, sp->server->pollsets[i], fdobj);
}
- sp->server->cb(sp->server->cb_arg,
- grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE));
+ sp->server->on_accept_cb(
+ exec_ctx, sp->server->on_accept_cb_arg,
+ grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str),
+ &acceptor);
gpr_free(name);
gpr_free(addr_str);
}
- abort();
+ GPR_UNREACHABLE_CODE(return );
error:
gpr_mu_lock(&sp->server->mu);
if (0 == --sp->server->active_ports) {
gpr_mu_unlock(&sp->server->mu);
- deactivated_all_ports(sp->server);
+ deactivated_all_ports(exec_ctx, sp->server);
} else {
gpr_mu_unlock(&sp->server->mu);
}
}
-static int add_socket_to_server(grpc_tcp_server *s, int fd,
- const struct sockaddr *addr, int addr_len) {
- server_port *sp;
+static grpc_tcp_listener *add_socket_to_server(grpc_tcp_server *s, int fd,
+ const struct sockaddr *addr,
+ size_t addr_len,
+ unsigned port_index,
+ unsigned fd_index) {
+ grpc_tcp_listener *sp = NULL;
int port;
char *addr_str;
char *name;
@@ -373,32 +403,39 @@ static int add_socket_to_server(grpc_tcp_server *s, int fd,
grpc_sockaddr_to_string(&addr_str, (struct sockaddr *)&addr, 1);
gpr_asprintf(&name, "tcp-server-listener:%s", addr_str);
gpr_mu_lock(&s->mu);
- GPR_ASSERT(!s->cb && "must add ports before starting server");
- /* append it to the list under a lock */
- if (s->nports == s->port_capacity) {
- s->port_capacity *= 2;
- s->ports = gpr_realloc(s->ports, sizeof(server_port) * s->port_capacity);
+ s->nports++;
+ GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server");
+ sp = gpr_malloc(sizeof(grpc_tcp_listener));
+ sp->next = NULL;
+ if (s->head == NULL) {
+ s->head = sp;
+ } else {
+ s->tail->next = sp;
}
- sp = &s->ports[s->nports++];
+ s->tail = sp;
sp->server = s;
sp->fd = fd;
sp->emfd = grpc_fd_create(fd, name);
memcpy(sp->addr.untyped, addr, addr_len);
sp->addr_len = addr_len;
+ sp->port = port;
+ sp->port_index = port_index;
+ sp->fd_index = fd_index;
+ sp->is_sibling = 0;
+ sp->sibling = NULL;
GPR_ASSERT(sp->emfd);
gpr_mu_unlock(&s->mu);
gpr_free(addr_str);
gpr_free(name);
}
- return port;
+ return sp;
}
int grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
- int addr_len) {
- int allocated_port1 = -1;
- int allocated_port2 = -1;
- unsigned i;
+ size_t addr_len) {
+ grpc_tcp_listener *sp;
+ grpc_tcp_listener *sp2 = NULL;
int fd;
grpc_dualstack_mode dsmode;
struct sockaddr_in6 addr6_v4mapped;
@@ -409,7 +446,11 @@ int grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
struct sockaddr_storage sockname_temp;
socklen_t sockname_len;
int port;
-
+ unsigned port_index = 0;
+ unsigned fd_index = 0;
+ if (s->tail != NULL) {
+ port_index = s->tail->port_index + 1;
+ }
if (((struct sockaddr *)addr)->sa_family == AF_UNIX) {
unlink_if_unix_domain_socket(addr);
}
@@ -417,9 +458,9 @@ int grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
/* Check if this is a wildcard port, and if so, try to keep the port the same
as some previously created listener. */
if (grpc_sockaddr_get_port(addr) == 0) {
- for (i = 0; i < s->nports; i++) {
+ for (sp = s->head; sp; sp = sp->next) {
sockname_len = sizeof(sockname_temp);
- if (0 == getsockname(s->ports[i].fd, (struct sockaddr *)&sockname_temp,
+ if (0 == getsockname(sp->fd, (struct sockaddr *)&sockname_temp,
&sockname_len)) {
port = grpc_sockaddr_get_port((struct sockaddr *)&sockname_temp);
if (port > 0) {
@@ -433,6 +474,8 @@ int grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
}
}
+ sp = NULL;
+
if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) {
addr = (const struct sockaddr *)&addr6_v4mapped;
addr_len = sizeof(addr6_v4mapped);
@@ -446,14 +489,17 @@ int grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
addr = (struct sockaddr *)&wild6;
addr_len = sizeof(wild6);
fd = grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, &dsmode);
- allocated_port1 = add_socket_to_server(s, fd, addr, addr_len);
+ sp = add_socket_to_server(s, fd, addr, addr_len, port_index, fd_index);
if (fd >= 0 && dsmode == GRPC_DSMODE_DUALSTACK) {
goto done;
}
-
+ if (sp != NULL) {
+ ++fd_index;
+ }
/* If we didn't get a dualstack socket, also listen on 0.0.0.0. */
- if (port == 0 && allocated_port1 > 0) {
- grpc_sockaddr_set_port((struct sockaddr *)&wild4, allocated_port1);
+ if (port == 0 && sp != NULL) {
+ grpc_sockaddr_set_port((struct sockaddr *)&wild4, sp->port);
+ sp2 = sp;
}
addr = (struct sockaddr *)&wild4;
addr_len = sizeof(wild4);
@@ -468,39 +514,106 @@ int grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
addr = (struct sockaddr *)&addr4_copy;
addr_len = sizeof(addr4_copy);
}
- allocated_port2 = add_socket_to_server(s, fd, addr, addr_len);
+ sp = add_socket_to_server(s, fd, addr, addr_len, port_index, fd_index);
+ if (sp2 != NULL && sp != NULL) {
+ sp2->sibling = sp;
+ sp->is_sibling = 1;
+ }
done:
gpr_free(allocated_addr);
- return allocated_port1 >= 0 ? allocated_port1 : allocated_port2;
+ if (sp != NULL) {
+ return sp->port;
+ } else {
+ return -1;
+ }
}
-int grpc_tcp_server_get_fd(grpc_tcp_server *s, unsigned index) {
- return (index < s->nports) ? s->ports[index].fd : -1;
+unsigned grpc_tcp_server_port_fd_count(grpc_tcp_server *s,
+ unsigned port_index) {
+ unsigned num_fds = 0;
+ grpc_tcp_listener *sp;
+ for (sp = s->head; sp && port_index != 0; sp = sp->next) {
+ if (!sp->is_sibling) {
+ --port_index;
+ }
+ }
+ for (; sp; sp = sp->sibling, ++num_fds)
+ ;
+ return num_fds;
}
-void grpc_tcp_server_start(grpc_tcp_server *s, grpc_pollset **pollsets,
- size_t pollset_count, grpc_tcp_server_cb cb,
- void *cb_arg) {
- size_t i, j;
- GPR_ASSERT(cb);
+int grpc_tcp_server_port_fd(grpc_tcp_server *s, unsigned port_index,
+ unsigned fd_index) {
+ grpc_tcp_listener *sp;
+ for (sp = s->head; sp && port_index != 0; sp = sp->next) {
+ if (!sp->is_sibling) {
+ --port_index;
+ }
+ }
+ for (; sp && fd_index != 0; sp = sp->sibling, --fd_index)
+ ;
+ if (sp) {
+ return sp->fd;
+ } else {
+ return -1;
+ }
+}
+
+void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
+ grpc_pollset **pollsets, size_t pollset_count,
+ grpc_tcp_server_cb on_accept_cb,
+ void *on_accept_cb_arg) {
+ size_t i;
+ grpc_tcp_listener *sp;
+ GPR_ASSERT(on_accept_cb);
gpr_mu_lock(&s->mu);
- GPR_ASSERT(!s->cb);
+ GPR_ASSERT(!s->on_accept_cb);
GPR_ASSERT(s->active_ports == 0);
- s->cb = cb;
- s->cb_arg = cb_arg;
+ s->on_accept_cb = on_accept_cb;
+ s->on_accept_cb_arg = on_accept_cb_arg;
s->pollsets = pollsets;
s->pollset_count = pollset_count;
- for (i = 0; i < s->nports; i++) {
- for (j = 0; j < pollset_count; j++) {
- grpc_pollset_add_fd(pollsets[j], s->ports[i].emfd);
+ for (sp = s->head; sp; sp = sp->next) {
+ for (i = 0; i < pollset_count; i++) {
+ grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd);
}
- s->ports[i].read_closure.cb = on_read;
- s->ports[i].read_closure.cb_arg = &s->ports[i];
- grpc_fd_notify_on_read(s->ports[i].emfd, &s->ports[i].read_closure);
+ sp->read_closure.cb = on_read;
+ sp->read_closure.cb_arg = sp;
+ grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
s->active_ports++;
}
gpr_mu_unlock(&s->mu);
}
+grpc_tcp_server *grpc_tcp_server_ref(grpc_tcp_server *s) {
+ gpr_ref(&s->refs);
+ return s;
+}
+
+void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server *s,
+ grpc_closure *shutdown_starting) {
+ gpr_mu_lock(&s->mu);
+ grpc_closure_list_add(&s->shutdown_starting, shutdown_starting, 1);
+ gpr_mu_unlock(&s->mu);
+}
+
+void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
+ if (gpr_unref(&s->refs)) {
+ /* Complete shutdown_starting work before destroying. */
+ grpc_exec_ctx local_exec_ctx = GRPC_EXEC_CTX_INIT;
+ gpr_mu_lock(&s->mu);
+ grpc_exec_ctx_enqueue_list(&local_exec_ctx, &s->shutdown_starting, NULL);
+ gpr_mu_unlock(&s->mu);
+ if (exec_ctx == NULL) {
+ grpc_exec_ctx_flush(&local_exec_ctx);
+ tcp_server_destroy(&local_exec_ctx, s);
+ grpc_exec_ctx_finish(&local_exec_ctx);
+ } else {
+ grpc_exec_ctx_finish(&local_exec_ctx);
+ tcp_server_destroy(exec_ctx, s);
+ }
+ }
+}
+
#endif
diff --git a/src/core/iomgr/tcp_server_windows.c b/src/core/iomgr/tcp_server_windows.c
index d70968de88..a4abc5b974 100644
--- a/src/core/iomgr/tcp_server_windows.c
+++ b/src/core/iomgr/tcp_server_windows.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -35,7 +35,8 @@
#ifdef GPR_WINSOCK_SOCKET
-#define _GNU_SOURCE
+#include <io.h>
+
#include "src/core/iomgr/sockaddr_utils.h"
#include <grpc/support/alloc.h>
@@ -51,92 +52,143 @@
#include "src/core/iomgr/tcp_server.h"
#include "src/core/iomgr/tcp_windows.h"
-#define INIT_PORT_CAP 2
#define MIN_SAFE_ACCEPT_QUEUE_SIZE 100
/* one listening port */
-typedef struct server_port {
+typedef struct grpc_tcp_listener grpc_tcp_listener;
+struct grpc_tcp_listener {
/* This seemingly magic number comes from AcceptEx's documentation. each
address buffer needs to have at least 16 more bytes at their end. */
- gpr_uint8 addresses[(sizeof(struct sockaddr_in6) + 16) * 2];
+ uint8_t addresses[(sizeof(struct sockaddr_in6) + 16) * 2];
/* This will hold the socket for the next accept. */
SOCKET new_socket;
- /* The listener winsocked. */
+ /* The listener winsocket. */
grpc_winsocket *socket;
+ /* The actual TCP port number. */
+ int port;
+ unsigned port_index;
grpc_tcp_server *server;
/* The cached AcceptEx for that port. */
LPFN_ACCEPTEX AcceptEx;
int shutting_down;
-} server_port;
+ /* closure for socket notification of accept being ready */
+ grpc_closure on_accept;
+ /* linked list */
+ struct grpc_tcp_listener *next;
+};
/* the overall server */
struct grpc_tcp_server {
- grpc_tcp_server_cb cb;
- void *cb_arg;
+ gpr_refcount refs;
+ /* Called whenever accept() succeeds on a server port. */
+ grpc_tcp_server_cb on_accept_cb;
+ void *on_accept_cb_arg;
gpr_mu mu;
- gpr_cv cv;
/* active port count: how many ports are actually still listening */
int active_ports;
- /* all listening ports */
- server_port *ports;
- size_t nports;
- size_t port_capacity;
+ /* linked list of server ports */
+ grpc_tcp_listener *head;
+ grpc_tcp_listener *tail;
+
+ /* List of closures passed to shutdown_starting_add(). */
+ grpc_closure_list shutdown_starting;
+
+ /* shutdown callback */
+ grpc_closure *shutdown_complete;
};
/* Public function. Allocates the proper data structures to hold a
grpc_tcp_server. */
-grpc_tcp_server *grpc_tcp_server_create(void) {
+grpc_tcp_server *grpc_tcp_server_create(grpc_closure *shutdown_complete) {
grpc_tcp_server *s = gpr_malloc(sizeof(grpc_tcp_server));
+ gpr_ref_init(&s->refs, 1);
gpr_mu_init(&s->mu);
- gpr_cv_init(&s->cv);
s->active_ports = 0;
- s->cb = NULL;
- s->cb_arg = NULL;
- s->ports = gpr_malloc(sizeof(server_port) * INIT_PORT_CAP);
- s->nports = 0;
- s->port_capacity = INIT_PORT_CAP;
+ s->on_accept_cb = NULL;
+ s->on_accept_cb_arg = NULL;
+ s->head = NULL;
+ s->tail = NULL;
+ s->shutdown_starting.head = NULL;
+ s->shutdown_starting.tail = NULL;
+ s->shutdown_complete = shutdown_complete;
return s;
}
-/* Public function. Stops and destroys a grpc_tcp_server. */
-void grpc_tcp_server_destroy(grpc_tcp_server *s,
- void (*shutdown_done)(void *shutdown_done_arg),
- void *shutdown_done_arg) {
- size_t i;
- gpr_mu_lock(&s->mu);
- /* First, shutdown all fd's. This will queue abortion calls for all
- of the pending accepts. */
- for (i = 0; i < s->nports; i++) {
- server_port *sp = &s->ports[i];
- grpc_winsocket_shutdown(sp->socket);
- }
- /* This happens asynchronously. Wait while that happens. */
- while (s->active_ports) {
- gpr_cv_wait(&s->cv, &s->mu, gpr_inf_future);
+static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
+ if (s->shutdown_complete != NULL) {
+ grpc_exec_ctx_enqueue(exec_ctx, s->shutdown_complete, true, NULL);
}
- gpr_mu_unlock(&s->mu);
/* Now that the accepts have been aborted, we can destroy the sockets.
The IOCP won't get notified on these, so we can flag them as already
closed by the system. */
- for (i = 0; i < s->nports; i++) {
- server_port *sp = &s->ports[i];
- grpc_winsocket_orphan(sp->socket);
+ while (s->head) {
+ grpc_tcp_listener *sp = s->head;
+ s->head = sp->next;
+ sp->next = NULL;
+ grpc_winsocket_destroy(sp->socket);
+ gpr_free(sp);
}
- gpr_free(s->ports);
gpr_free(s);
+}
+
+grpc_tcp_server *grpc_tcp_server_ref(grpc_tcp_server *s) {
+ gpr_ref(&s->refs);
+ return s;
+}
- if (shutdown_done) {
- shutdown_done(shutdown_done_arg);
+void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server *s,
+ grpc_closure *shutdown_starting) {
+ gpr_mu_lock(&s->mu);
+ grpc_closure_list_add(&s->shutdown_starting, shutdown_starting, 1);
+ gpr_mu_unlock(&s->mu);
+}
+
+static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
+ int immediately_done = 0;
+ grpc_tcp_listener *sp;
+ gpr_mu_lock(&s->mu);
+
+ /* First, shutdown all fd's. This will queue abortion calls for all
+ of the pending accepts due to the normal operation mechanism. */
+ if (s->active_ports == 0) {
+ immediately_done = 1;
+ }
+ for (sp = s->head; sp; sp = sp->next) {
+ sp->shutting_down = 1;
+ grpc_winsocket_shutdown(sp->socket);
+ }
+ gpr_mu_unlock(&s->mu);
+
+ if (immediately_done) {
+ finish_shutdown(exec_ctx, s);
+ }
+}
+
+void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
+ if (gpr_unref(&s->refs)) {
+ /* Complete shutdown_starting work before destroying. */
+ grpc_exec_ctx local_exec_ctx = GRPC_EXEC_CTX_INIT;
+ gpr_mu_lock(&s->mu);
+ grpc_exec_ctx_enqueue_list(&local_exec_ctx, &s->shutdown_starting, NULL);
+ gpr_mu_unlock(&s->mu);
+ if (exec_ctx == NULL) {
+ grpc_exec_ctx_flush(&local_exec_ctx);
+ tcp_server_destroy(&local_exec_ctx, s);
+ grpc_exec_ctx_finish(&local_exec_ctx);
+ } else {
+ grpc_exec_ctx_finish(&local_exec_ctx);
+ tcp_server_destroy(exec_ctx, s);
+ }
}
}
/* Prepare (bind) a recently-created socket for listening. */
static int prepare_socket(SOCKET sock, const struct sockaddr *addr,
- int addr_len) {
+ size_t addr_len) {
struct sockaddr_storage sockname_temp;
socklen_t sockname_len;
@@ -149,7 +201,7 @@ static int prepare_socket(SOCKET sock, const struct sockaddr *addr,
goto error;
}
- if (bind(sock, addr, addr_len) == SOCKET_ERROR) {
+ if (bind(sock, addr, (int)addr_len) == SOCKET_ERROR) {
char *addr_str;
char *utf8_message = gpr_format_message(WSAGetLastError());
grpc_sockaddr_to_string(&addr_str, addr, 0);
@@ -182,12 +234,24 @@ error:
return -1;
}
-/* start_accept will reference that for the IOCP notification request. */
-static void on_accept(void *arg, int from_iocp);
+static void decrement_active_ports_and_notify(grpc_exec_ctx *exec_ctx,
+ grpc_tcp_listener *sp) {
+ int notify = 0;
+ sp->shutting_down = 0;
+ gpr_mu_lock(&sp->server->mu);
+ GPR_ASSERT(sp->server->active_ports > 0);
+ if (0 == --sp->server->active_ports) {
+ notify = 1;
+ }
+ gpr_mu_unlock(&sp->server->mu);
+ if (notify) {
+ finish_shutdown(exec_ctx, sp->server);
+ }
+}
/* In order to do an async accept, we need to create a socket first which
will be the one assigned to the new incoming connection. */
-static void start_accept(server_port *port) {
+static void start_accept(grpc_exec_ctx *exec_ctx, grpc_tcp_listener *port) {
SOCKET sock = INVALID_SOCKET;
char *message;
char *utf8_message;
@@ -226,10 +290,19 @@ static void start_accept(server_port *port) {
/* We're ready to do the accept. Calling grpc_socket_notify_on_read may
immediately process an accept that happened in the meantime. */
port->new_socket = sock;
- grpc_socket_notify_on_read(port->socket, on_accept, port);
+ grpc_socket_notify_on_read(exec_ctx, port->socket, &port->on_accept);
return;
failure:
+ if (port->shutting_down) {
+ /* We are abandoning the listener port, take that into account to prevent
+ occasional hangs on shutdown. The hang happens when sp->shutting_down
+ change is not seen by on_accept and we proceed to trying new accept,
+ but we fail there because the listening port has been closed in the
+ meantime. */
+ decrement_active_ports_and_notify(exec_ctx, port);
+ return;
+ }
utf8_message = gpr_format_message(WSAGetLastError());
gpr_log(GPR_ERROR, message, utf8_message);
gpr_free(utf8_message);
@@ -237,81 +310,97 @@ failure:
}
/* Event manager callback when reads are ready. */
-static void on_accept(void *arg, int from_iocp) {
- server_port *sp = arg;
+static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, bool from_iocp) {
+ grpc_tcp_listener *sp = arg;
+ grpc_tcp_server_acceptor acceptor = {sp->server, sp->port_index, 0};
SOCKET sock = sp->new_socket;
grpc_winsocket_callback_info *info = &sp->socket->read_info;
grpc_endpoint *ep = NULL;
-
- /* The shutdown sequence is done in two parts. This is the second
- part here, acknowledging the IOCP notification, and doing nothing
- else, especially not queuing a new accept. */
- if (sp->shutting_down) {
- GPR_ASSERT(from_iocp);
- sp->shutting_down = 0;
- sp->socket->read_info.outstanding = 0;
- gpr_mu_lock(&sp->server->mu);
- if (0 == --sp->server->active_ports) {
- gpr_cv_broadcast(&sp->server->cv);
- }
- gpr_mu_unlock(&sp->server->mu);
+ struct sockaddr_storage peer_name;
+ char *peer_name_string;
+ char *fd_name;
+ int peer_name_len = sizeof(peer_name);
+ DWORD transfered_bytes;
+ DWORD flags;
+ BOOL wsa_success;
+ int err;
+
+ /* The general mechanism for shutting down is to queue abortion calls. While
+ this is necessary in the read/write case, it's useless for the accept
+ case. We only need to adjust the pending callback count */
+ if (!from_iocp) {
return;
}
- if (from_iocp) {
- /* The IOCP notified us of a completed operation. Let's grab the results,
- and act accordingly. */
- DWORD transfered_bytes = 0;
- DWORD flags;
- BOOL wsa_success = WSAGetOverlappedResult(sock, &info->overlapped,
- &transfered_bytes, FALSE, &flags);
- if (!wsa_success) {
+ /* The IOCP notified us of a completed operation. Let's grab the results,
+ and act accordingly. */
+ transfered_bytes = 0;
+ wsa_success = WSAGetOverlappedResult(sock, &info->overlapped,
+ &transfered_bytes, FALSE, &flags);
+ if (!wsa_success) {
+ if (sp->shutting_down) {
+ /* During the shutdown case, we ARE expecting an error. So that's well,
+ and we can wake up the shutdown thread. */
+ decrement_active_ports_and_notify(exec_ctx, sp);
+ return;
+ } else {
char *utf8_message = gpr_format_message(WSAGetLastError());
gpr_log(GPR_ERROR, "on_accept error: %s", utf8_message);
gpr_free(utf8_message);
closesocket(sock);
- } else {
- /* TODO(ctiller): add sockaddr address to label */
- ep = grpc_tcp_create(grpc_winsocket_create(sock, "server"));
}
} else {
- /* If we're not notified from the IOCP, it means we are asked to shutdown.
- This will initiate that shutdown. Calling closesocket will trigger an
- IOCP notification, that will call this function a second time, from
- the IOCP thread. Of course, this only works if the socket was, in fact,
- listening. If that's not the case, we'd wait indefinitely. That's a bit
- of a degenerate case, but it can happen if you create a server, but
- don't start it. So let's support that by recursing once. */
- sp->shutting_down = 1;
- sp->new_socket = INVALID_SOCKET;
- if (sock != INVALID_SOCKET) {
- closesocket(sock);
+ if (!sp->shutting_down) {
+ peer_name_string = NULL;
+ err = setsockopt(sock, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT,
+ (char *)&sp->socket->socket, sizeof(sp->socket->socket));
+ if (err) {
+ char *utf8_message = gpr_format_message(WSAGetLastError());
+ gpr_log(GPR_ERROR, "setsockopt error: %s", utf8_message);
+ gpr_free(utf8_message);
+ }
+ err = getpeername(sock, (struct sockaddr *)&peer_name, &peer_name_len);
+ if (!err) {
+ peer_name_string = grpc_sockaddr_to_uri((struct sockaddr *)&peer_name);
+ } else {
+ char *utf8_message = gpr_format_message(WSAGetLastError());
+ gpr_log(GPR_ERROR, "getpeername error: %s", utf8_message);
+ gpr_free(utf8_message);
+ }
+ gpr_asprintf(&fd_name, "tcp_server:%s", peer_name_string);
+ ep = grpc_tcp_create(grpc_winsocket_create(sock, fd_name),
+ peer_name_string);
+ gpr_free(fd_name);
+ gpr_free(peer_name_string);
} else {
- on_accept(sp, 1);
+ closesocket(sock);
}
- return;
}
/* The only time we should call our callback, is where we successfully
managed to accept a connection, and created an endpoint. */
- if (ep) sp->server->cb(sp->server->cb_arg, ep);
+ if (ep)
+ sp->server->on_accept_cb(exec_ctx, sp->server->on_accept_cb_arg, ep,
+ &acceptor);
/* As we were notified from the IOCP of one and exactly one accept,
- the former socked we created has now either been destroy or assigned
- to the new connection. We need to create a new one for the next
- connection. */
- start_accept(sp);
+ the former socked we created has now either been destroy or assigned
+ to the new connection. We need to create a new one for the next
+ connection. */
+ start_accept(exec_ctx, sp);
}
-static int add_socket_to_server(grpc_tcp_server *s, SOCKET sock,
- const struct sockaddr *addr, int addr_len) {
- server_port *sp;
+static grpc_tcp_listener *add_socket_to_server(grpc_tcp_server *s, SOCKET sock,
+ const struct sockaddr *addr,
+ size_t addr_len,
+ unsigned port_index) {
+ grpc_tcp_listener *sp = NULL;
int port;
int status;
GUID guid = WSAID_ACCEPTEX;
DWORD ioctl_num_bytes;
LPFN_ACCEPTEX AcceptEx;
- if (sock == INVALID_SOCKET) return -1;
+ if (sock == INVALID_SOCKET) return NULL;
/* We need to grab the AcceptEx pointer for that port, as it may be
interface-dependent. We'll cache it to avoid doing that again. */
@@ -324,35 +413,39 @@ static int add_socket_to_server(grpc_tcp_server *s, SOCKET sock,
gpr_log(GPR_ERROR, "on_connect error: %s", utf8_message);
gpr_free(utf8_message);
closesocket(sock);
- return -1;
+ return NULL;
}
port = prepare_socket(sock, addr, addr_len);
if (port >= 0) {
gpr_mu_lock(&s->mu);
- GPR_ASSERT(!s->cb && "must add ports before starting server");
- /* append it to the list under a lock */
- if (s->nports == s->port_capacity) {
- s->port_capacity *= 2;
- s->ports = gpr_realloc(s->ports, sizeof(server_port) * s->port_capacity);
+ GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server");
+ sp = gpr_malloc(sizeof(grpc_tcp_listener));
+ sp->next = NULL;
+ if (s->head == NULL) {
+ s->head = sp;
+ } else {
+ s->tail->next = sp;
}
- sp = &s->ports[s->nports++];
+ s->tail = sp;
sp->server = s;
sp->socket = grpc_winsocket_create(sock, "listener");
sp->shutting_down = 0;
sp->AcceptEx = AcceptEx;
sp->new_socket = INVALID_SOCKET;
+ sp->port = port;
+ sp->port_index = port_index;
+ grpc_closure_init(&sp->on_accept, on_accept, sp);
GPR_ASSERT(sp->socket);
gpr_mu_unlock(&s->mu);
}
- return port;
+ return sp;
}
int grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
- int addr_len) {
- int allocated_port = -1;
- unsigned i;
+ size_t addr_len) {
+ grpc_tcp_listener *sp;
SOCKET sock;
struct sockaddr_in6 addr6_v4mapped;
struct sockaddr_in6 wildcard;
@@ -360,13 +453,17 @@ int grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
struct sockaddr_storage sockname_temp;
socklen_t sockname_len;
int port;
+ unsigned port_index = 0;
+ if (s->tail != NULL) {
+ port_index = s->tail->port_index + 1;
+ }
/* Check if this is a wildcard port, and if so, try to keep the port the same
as some previously created listener. */
if (grpc_sockaddr_get_port(addr) == 0) {
- for (i = 0; i < s->nports; i++) {
+ for (sp = s->head; sp; sp = sp->next) {
sockname_len = sizeof(sockname_temp);
- if (0 == getsockname(s->ports[i].socket->socket,
+ if (0 == getsockname(sp->socket->socket,
(struct sockaddr *)&sockname_temp, &sockname_len)) {
port = grpc_sockaddr_get_port((struct sockaddr *)&sockname_temp);
if (port > 0) {
@@ -401,29 +498,57 @@ int grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
gpr_free(utf8_message);
}
- allocated_port = add_socket_to_server(s, sock, addr, addr_len);
+ sp = add_socket_to_server(s, sock, addr, addr_len, port_index);
gpr_free(allocated_addr);
- return allocated_port;
+ if (sp) {
+ return sp->port;
+ } else {
+ return -1;
+ }
+}
+
+unsigned grpc_tcp_server_port_fd_count(grpc_tcp_server *s,
+ unsigned port_index) {
+ grpc_tcp_listener *sp;
+ for (sp = s->head; sp && port_index != 0; sp = sp->next, --port_index)
+ ;
+ if (sp) {
+ return 1;
+ } else {
+ return 0;
+ }
}
-SOCKET grpc_tcp_server_get_socket(grpc_tcp_server *s, unsigned index) {
- return (index < s->nports) ? s->ports[index].socket->socket : INVALID_SOCKET;
+int grpc_tcp_server_port_fd(grpc_tcp_server *s, unsigned port_index,
+ unsigned fd_index) {
+ grpc_tcp_listener *sp;
+ if (fd_index != 0) {
+ /* Windows implementation has only one fd per port_index. */
+ return -1;
+ }
+ for (sp = s->head; sp && port_index != 0; sp = sp->next, --port_index)
+ ;
+ if (sp) {
+ return _open_osfhandle((intptr_t)sp->socket->socket, 0);
+ } else {
+ return -1;
+ }
}
-void grpc_tcp_server_start(grpc_tcp_server *s, grpc_pollset **pollset,
- size_t pollset_count, grpc_tcp_server_cb cb,
- void *cb_arg) {
- size_t i;
- GPR_ASSERT(cb);
+void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
+ grpc_pollset **pollset, size_t pollset_count,
+ grpc_tcp_server_cb on_accept_cb,
+ void *on_accept_cb_arg) {
+ grpc_tcp_listener *sp;
+ GPR_ASSERT(on_accept_cb);
gpr_mu_lock(&s->mu);
- GPR_ASSERT(!s->cb);
+ GPR_ASSERT(!s->on_accept_cb);
GPR_ASSERT(s->active_ports == 0);
- s->cb = cb;
- s->cb_arg = cb_arg;
- for (i = 0; i < s->nports; i++) {
- s->ports[i].socket->read_info.outstanding = 1;
- start_accept(s->ports + i);
+ s->on_accept_cb = on_accept_cb;
+ s->on_accept_cb_arg = on_accept_cb_arg;
+ for (sp = s->head; sp; sp = sp->next) {
+ start_accept(exec_ctx, sp);
s->active_ports++;
}
gpr_mu_unlock(&s->mu);
diff --git a/src/core/iomgr/tcp_windows.c b/src/core/iomgr/tcp_windows.c
index 15759c398a..038e4158c8 100644
--- a/src/core/iomgr/tcp_windows.c
+++ b/src/core/iomgr/tcp_windows.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -44,7 +44,7 @@
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
-#include "src/core/iomgr/alarm.h"
+#include "src/core/iomgr/timer.h"
#include "src/core/iomgr/iocp_windows.h"
#include "src/core/iomgr/sockaddr.h"
#include "src/core/iomgr/sockaddr_utils.h"
@@ -55,24 +55,22 @@ static int set_non_block(SOCKET sock) {
int status;
unsigned long param = 1;
DWORD ret;
- status = WSAIoctl(sock, FIONBIO, &param, sizeof(param), NULL, 0, &ret,
- NULL, NULL);
+ status =
+ WSAIoctl(sock, FIONBIO, &param, sizeof(param), NULL, 0, &ret, NULL, NULL);
return status == 0;
}
static int set_dualstack(SOCKET sock) {
int status;
unsigned long param = 0;
- status = setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY,
- (const char *) &param, sizeof(param));
+ status = setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY, (const char *)&param,
+ sizeof(param));
return status == 0;
}
int grpc_tcp_prepare_socket(SOCKET sock) {
- if (!set_non_block(sock))
- return 0;
- if (!set_dualstack(sock))
- return 0;
+ if (!set_non_block(sock)) return 0;
+ if (!set_dualstack(sock)) return 0;
return 1;
}
@@ -84,95 +82,100 @@ typedef struct grpc_tcp {
/* Refcounting how many operations are in progress. */
gpr_refcount refcount;
- grpc_endpoint_read_cb read_cb;
- void *read_user_data;
- gpr_slice read_slice;
+ grpc_closure on_read;
+ grpc_closure on_write;
- grpc_endpoint_write_cb write_cb;
- void *write_user_data;
- gpr_slice_buffer write_slices;
+ grpc_closure *read_cb;
+ grpc_closure *write_cb;
+ gpr_slice read_slice;
+ gpr_slice_buffer *write_slices;
+ gpr_slice_buffer *read_slices;
/* The IO Completion Port runs from another thread. We need some mechanism
to protect ourselves when requesting a shutdown. */
gpr_mu mu;
int shutting_down;
+
+ char *peer_string;
} grpc_tcp;
-static void tcp_ref(grpc_tcp *tcp) {
- gpr_ref(&tcp->refcount);
+static void tcp_free(grpc_tcp *tcp) {
+ grpc_winsocket_destroy(tcp->socket);
+ gpr_mu_destroy(&tcp->mu);
+ gpr_free(tcp->peer_string);
+ gpr_free(tcp);
+}
+
+/*#define GRPC_TCP_REFCOUNT_DEBUG*/
+#ifdef GRPC_TCP_REFCOUNT_DEBUG
+#define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__)
+#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__)
+static void tcp_unref(grpc_tcp *tcp, const char *reason, const char *file,
+ int line) {
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP unref %p : %s %d -> %d", tcp,
+ reason, tcp->refcount.count, tcp->refcount.count - 1);
+ if (gpr_unref(&tcp->refcount)) {
+ tcp_free(tcp);
+ }
}
+static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file,
+ int line) {
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP ref %p : %s %d -> %d", tcp,
+ reason, tcp->refcount.count, tcp->refcount.count + 1);
+ gpr_ref(&tcp->refcount);
+}
+#else
+#define TCP_UNREF(tcp, reason) tcp_unref((tcp))
+#define TCP_REF(tcp, reason) tcp_ref((tcp))
static void tcp_unref(grpc_tcp *tcp) {
if (gpr_unref(&tcp->refcount)) {
- gpr_slice_buffer_destroy(&tcp->write_slices);
- grpc_winsocket_orphan(tcp->socket);
- gpr_mu_destroy(&tcp->mu);
- gpr_free(tcp);
+ tcp_free(tcp);
}
}
+static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); }
+#endif
+
/* Asynchronous callback from the IOCP, or the background thread. */
-static void on_read(void *tcpp, int from_iocp) {
- grpc_tcp *tcp = (grpc_tcp *) tcpp;
+static void on_read(grpc_exec_ctx *exec_ctx, void *tcpp, bool success) {
+ grpc_tcp *tcp = tcpp;
+ grpc_closure *cb = tcp->read_cb;
grpc_winsocket *socket = tcp->socket;
gpr_slice sub;
- gpr_slice *slice = NULL;
- size_t nslices = 0;
- grpc_endpoint_cb_status status;
- grpc_endpoint_read_cb cb;
grpc_winsocket_callback_info *info = &socket->read_info;
- void *opaque = tcp->read_user_data;
- int do_abort = 0;
- gpr_mu_lock(&tcp->mu);
- cb = tcp->read_cb;
- tcp->read_cb = NULL;
- if (!from_iocp || tcp->shutting_down) {
- /* If we are here with from_iocp set to true, it means we got raced to
- shutting down the endpoint. No actual abort callback will happen
- though, so we're going to do it from here. */
- do_abort = 1;
- }
- gpr_mu_unlock(&tcp->mu);
-
- if (do_abort) {
- if (from_iocp) {
- tcp->socket->read_info.outstanding = 0;
+ if (success) {
+ if (socket->read_info.wsa_error != 0 && !tcp->shutting_down) {
+ if (socket->read_info.wsa_error != WSAECONNRESET) {
+ char *utf8_message = gpr_format_message(info->wsa_error);
+ gpr_log(GPR_ERROR, "ReadFile overlapped error: %s", utf8_message);
+ gpr_free(utf8_message);
+ }
+ success = 0;
gpr_slice_unref(tcp->read_slice);
- }
- tcp_unref(tcp);
- if (cb) cb(opaque, NULL, 0, GRPC_ENDPOINT_CB_SHUTDOWN);
- return;
- }
-
- GPR_ASSERT(tcp->socket->read_info.outstanding);
-
- if (socket->read_info.wsa_error != 0) {
- char *utf8_message = gpr_format_message(info->wsa_error);
- gpr_log(GPR_ERROR, "ReadFile overlapped error: %s", utf8_message);
- gpr_free(utf8_message);
- status = GRPC_ENDPOINT_CB_ERROR;
- } else {
- if (info->bytes_transfered != 0) {
- sub = gpr_slice_sub_no_ref(tcp->read_slice, 0, info->bytes_transfered);
- status = GRPC_ENDPOINT_CB_OK;
- slice = &sub;
- nslices = 1;
} else {
- gpr_slice_unref(tcp->read_slice);
- status = GRPC_ENDPOINT_CB_EOF;
+ if (info->bytes_transfered != 0 && !tcp->shutting_down) {
+ sub = gpr_slice_sub_no_ref(tcp->read_slice, 0, info->bytes_transfered);
+ gpr_slice_buffer_add(tcp->read_slices, sub);
+ success = 1;
+ } else {
+ gpr_slice_unref(tcp->read_slice);
+ success = 0;
+ }
}
}
- tcp->socket->read_info.outstanding = 0;
-
- tcp_unref(tcp);
- cb(opaque, slice, nslices, status);
+ tcp->read_cb = NULL;
+ TCP_UNREF(tcp, "read");
+ if (cb) {
+ cb->cb(exec_ctx, cb->cb_arg, success);
+ }
}
-static void win_notify_on_read(grpc_endpoint *ep,
- grpc_endpoint_read_cb cb, void *arg) {
- grpc_tcp *tcp = (grpc_tcp *) ep;
+static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ gpr_slice_buffer *read_slices, grpc_closure *cb) {
+ grpc_tcp *tcp = (grpc_tcp *)ep;
grpc_winsocket *handle = tcp->socket;
grpc_winsocket_callback_info *info = &handle->read_info;
int status;
@@ -180,31 +183,32 @@ static void win_notify_on_read(grpc_endpoint *ep,
DWORD flags = 0;
WSABUF buffer;
- GPR_ASSERT(!tcp->socket->read_info.outstanding);
if (tcp->shutting_down) {
- cb(arg, NULL, 0, GRPC_ENDPOINT_CB_SHUTDOWN);
+ grpc_exec_ctx_enqueue(exec_ctx, cb, false, NULL);
return;
}
- tcp_ref(tcp);
- tcp->socket->read_info.outstanding = 1;
+
tcp->read_cb = cb;
- tcp->read_user_data = arg;
+ tcp->read_slices = read_slices;
+ gpr_slice_buffer_reset_and_unref(read_slices);
tcp->read_slice = gpr_slice_malloc(8192);
- buffer.len = GPR_SLICE_LENGTH(tcp->read_slice);
+ buffer.len = (ULONG)GPR_SLICE_LENGTH(
+ tcp->read_slice); // we know slice size fits in 32bit.
buffer.buf = (char *)GPR_SLICE_START_PTR(tcp->read_slice);
+ TCP_REF(tcp, "read");
+
/* First let's try a synchronous, non-blocking read. */
- status = WSARecv(tcp->socket->socket, &buffer, 1, &bytes_read, &flags,
- NULL, NULL);
+ status =
+ WSARecv(tcp->socket->socket, &buffer, 1, &bytes_read, &flags, NULL, NULL);
info->wsa_error = status == 0 ? 0 : WSAGetLastError();
/* Did we get data immediately ? Yay. */
if (info->wsa_error != WSAEWOULDBLOCK) {
info->bytes_transfered = bytes_read;
- /* This might heavily recurse. */
- on_read(tcp, 1);
+ grpc_exec_ctx_enqueue(exec_ctx, &tcp->on_read, true, NULL);
return;
}
@@ -217,69 +221,47 @@ static void win_notify_on_read(grpc_endpoint *ep,
int wsa_error = WSAGetLastError();
if (wsa_error != WSA_IO_PENDING) {
info->wsa_error = wsa_error;
- on_read(tcp, 1);
+ grpc_exec_ctx_enqueue(exec_ctx, &tcp->on_read, false, NULL);
return;
}
}
- grpc_socket_notify_on_read(tcp->socket, on_read, tcp);
+ grpc_socket_notify_on_read(exec_ctx, tcp->socket, &tcp->on_read);
}
/* Asynchronous callback from the IOCP, or the background thread. */
-static void on_write(void *tcpp, int from_iocp) {
- grpc_tcp *tcp = (grpc_tcp *) tcpp;
+static void on_write(grpc_exec_ctx *exec_ctx, void *tcpp, bool success) {
+ grpc_tcp *tcp = (grpc_tcp *)tcpp;
grpc_winsocket *handle = tcp->socket;
grpc_winsocket_callback_info *info = &handle->write_info;
- grpc_endpoint_cb_status status = GRPC_ENDPOINT_CB_OK;
- grpc_endpoint_write_cb cb;
- void *opaque = tcp->write_user_data;
- int do_abort = 0;
+ grpc_closure *cb;
gpr_mu_lock(&tcp->mu);
cb = tcp->write_cb;
tcp->write_cb = NULL;
- if (!from_iocp || tcp->shutting_down) {
- /* If we are here with from_iocp set to true, it means we got raced to
- shutting down the endpoint. No actual abort callback will happen
- though, so we're going to do it from here. */
- do_abort = 1;
- }
gpr_mu_unlock(&tcp->mu);
- if (do_abort) {
- if (from_iocp) {
- tcp->socket->write_info.outstanding = 0;
- gpr_slice_buffer_reset_and_unref(&tcp->write_slices);
+ if (success) {
+ if (info->wsa_error != 0) {
+ if (info->wsa_error != WSAECONNRESET) {
+ char *utf8_message = gpr_format_message(info->wsa_error);
+ gpr_log(GPR_ERROR, "WSASend overlapped error: %s", utf8_message);
+ gpr_free(utf8_message);
+ }
+ success = 0;
+ } else {
+ GPR_ASSERT(info->bytes_transfered == tcp->write_slices->length);
}
- tcp_unref(tcp);
- if (cb) cb(opaque, GRPC_ENDPOINT_CB_SHUTDOWN);
- return;
}
- GPR_ASSERT(tcp->socket->write_info.outstanding);
-
- if (info->wsa_error != 0) {
- char *utf8_message = gpr_format_message(info->wsa_error);
- gpr_log(GPR_ERROR, "WSASend overlapped error: %s", utf8_message);
- gpr_free(utf8_message);
- status = GRPC_ENDPOINT_CB_ERROR;
- } else {
- GPR_ASSERT(info->bytes_transfered == tcp->write_slices.length);
- }
-
- gpr_slice_buffer_reset_and_unref(&tcp->write_slices);
- tcp->socket->write_info.outstanding = 0;
-
- tcp_unref(tcp);
- cb(opaque, status);
+ TCP_UNREF(tcp, "write");
+ cb->cb(exec_ctx, cb->cb_arg, success);
}
/* Initiates a write. */
-static grpc_endpoint_write_status win_write(grpc_endpoint *ep,
- gpr_slice *slices, size_t nslices,
- grpc_endpoint_write_cb cb,
- void *arg) {
- grpc_tcp *tcp = (grpc_tcp *) ep;
+static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ gpr_slice_buffer *slices, grpc_closure *cb) {
+ grpc_tcp *tcp = (grpc_tcp *)ep;
grpc_winsocket *socket = tcp->socket;
grpc_winsocket_callback_info *info = &socket->write_info;
unsigned i;
@@ -288,31 +270,30 @@ static grpc_endpoint_write_status win_write(grpc_endpoint *ep,
WSABUF local_buffers[16];
WSABUF *allocated = NULL;
WSABUF *buffers = local_buffers;
+ size_t len;
- GPR_ASSERT(!tcp->socket->write_info.outstanding);
if (tcp->shutting_down) {
- return GRPC_ENDPOINT_WRITE_ERROR;
+ grpc_exec_ctx_enqueue(exec_ctx, cb, false, NULL);
+ return;
}
- tcp_ref(tcp);
- tcp->socket->write_info.outstanding = 1;
tcp->write_cb = cb;
- tcp->write_user_data = arg;
-
- gpr_slice_buffer_addn(&tcp->write_slices, slices, nslices);
-
- if (tcp->write_slices.count > GPR_ARRAY_SIZE(local_buffers)) {
- buffers = (WSABUF *) gpr_malloc(sizeof(WSABUF) * tcp->write_slices.count);
+ tcp->write_slices = slices;
+ GPR_ASSERT(tcp->write_slices->count <= UINT_MAX);
+ if (tcp->write_slices->count > GPR_ARRAY_SIZE(local_buffers)) {
+ buffers = (WSABUF *)gpr_malloc(sizeof(WSABUF) * tcp->write_slices->count);
allocated = buffers;
}
- for (i = 0; i < tcp->write_slices.count; i++) {
- buffers[i].len = GPR_SLICE_LENGTH(tcp->write_slices.slices[i]);
- buffers[i].buf = (char *)GPR_SLICE_START_PTR(tcp->write_slices.slices[i]);
+ for (i = 0; i < tcp->write_slices->count; i++) {
+ len = GPR_SLICE_LENGTH(tcp->write_slices->slices[i]);
+ GPR_ASSERT(len <= ULONG_MAX);
+ buffers[i].len = (ULONG)len;
+ buffers[i].buf = (char *)GPR_SLICE_START_PTR(tcp->write_slices->slices[i]);
}
/* First, let's try a synchronous, non-blocking write. */
- status = WSASend(socket->socket, buffers, tcp->write_slices.count,
+ status = WSASend(socket->socket, buffers, (DWORD)tcp->write_slices->count,
&bytes_sent, 0, NULL, NULL);
info->wsa_error = status == 0 ? 0 : WSAGetLastError();
@@ -320,47 +301,58 @@ static grpc_endpoint_write_status win_write(grpc_endpoint *ep,
connection that has its send queue filled up. But if we don't, then we can
avoid doing an async write operation at all. */
if (info->wsa_error != WSAEWOULDBLOCK) {
- grpc_endpoint_write_status ret = GRPC_ENDPOINT_WRITE_ERROR;
+ bool ok = false;
if (status == 0) {
- ret = GRPC_ENDPOINT_WRITE_DONE;
- GPR_ASSERT(bytes_sent == tcp->write_slices.length);
+ ok = true;
+ GPR_ASSERT(bytes_sent == tcp->write_slices->length);
} else {
- char *utf8_message = gpr_format_message(info->wsa_error);
- gpr_log(GPR_ERROR, "WSASend error: %s", utf8_message);
- gpr_free(utf8_message);
+ if (socket->read_info.wsa_error != WSAECONNRESET) {
+ char *utf8_message = gpr_format_message(info->wsa_error);
+ gpr_log(GPR_ERROR, "WSASend error: %s", utf8_message);
+ gpr_free(utf8_message);
+ }
}
if (allocated) gpr_free(allocated);
- gpr_slice_buffer_reset_and_unref(&tcp->write_slices);
- tcp->socket->write_info.outstanding = 0;
- tcp_unref(tcp);
- return ret;
+ grpc_exec_ctx_enqueue(exec_ctx, cb, ok, NULL);
+ return;
}
+ TCP_REF(tcp, "write");
+
/* If we got a WSAEWOULDBLOCK earlier, then we need to re-do the same
operation, this time asynchronously. */
memset(&socket->write_info.overlapped, 0, sizeof(OVERLAPPED));
- status = WSASend(socket->socket, buffers, tcp->write_slices.count,
+ status = WSASend(socket->socket, buffers, (DWORD)tcp->write_slices->count,
&bytes_sent, 0, &socket->write_info.overlapped, NULL);
if (allocated) gpr_free(allocated);
if (status != 0) {
int wsa_error = WSAGetLastError();
if (wsa_error != WSA_IO_PENDING) {
- gpr_slice_buffer_reset_and_unref(&tcp->write_slices);
- tcp->socket->write_info.outstanding = 0;
- tcp_unref(tcp);
- return GRPC_ENDPOINT_WRITE_ERROR;
+ TCP_UNREF(tcp, "write");
+ grpc_exec_ctx_enqueue(exec_ctx, cb, false, NULL);
+ return;
}
}
/* As all is now setup, we can now ask for the IOCP notification. It may
trigger the callback immediately however, but no matter. */
- grpc_socket_notify_on_write(socket, on_write, tcp);
- return GRPC_ENDPOINT_WRITE_PENDING;
+ grpc_socket_notify_on_write(exec_ctx, socket, &tcp->on_write);
}
-static void win_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset) {
- grpc_tcp *tcp = (grpc_tcp *) ep;
+static void win_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ grpc_pollset *ps) {
+ grpc_tcp *tcp;
+ (void)ps;
+ tcp = (grpc_tcp *)ep;
+ grpc_iocp_add_socket(tcp->socket);
+}
+
+static void win_add_to_pollset_set(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ grpc_pollset_set *pss) {
+ grpc_tcp *tcp;
+ (void)pss;
+ tcp = (grpc_tcp *)ep;
grpc_iocp_add_socket(tcp->socket);
}
@@ -370,36 +362,41 @@ static void win_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset) {
we're not going to protect against these. However the IO Completion Port
callback will happen from another thread, so we need to protect against
concurrent access of the data structure in that regard. */
-static void win_shutdown(grpc_endpoint *ep) {
- grpc_tcp *tcp = (grpc_tcp *) ep;
- int extra_refs = 0;
+static void win_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
+ grpc_tcp *tcp = (grpc_tcp *)ep;
gpr_mu_lock(&tcp->mu);
/* At that point, what may happen is that we're already inside the IOCP
callback. See the comments in on_read and on_write. */
tcp->shutting_down = 1;
- extra_refs = grpc_winsocket_shutdown(tcp->socket);
- while (extra_refs--) tcp_ref(tcp);
+ grpc_winsocket_shutdown(tcp->socket);
gpr_mu_unlock(&tcp->mu);
}
-static void win_destroy(grpc_endpoint *ep) {
- grpc_tcp *tcp = (grpc_tcp *) ep;
- tcp_unref(tcp);
+static void win_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
+ grpc_tcp *tcp = (grpc_tcp *)ep;
+ TCP_UNREF(tcp, "destroy");
+}
+
+static char *win_get_peer(grpc_endpoint *ep) {
+ grpc_tcp *tcp = (grpc_tcp *)ep;
+ return gpr_strdup(tcp->peer_string);
}
-static grpc_endpoint_vtable vtable = {
- win_notify_on_read, win_write, win_add_to_pollset, win_shutdown, win_destroy
-};
+static grpc_endpoint_vtable vtable = {win_read, win_write, win_add_to_pollset,
+ win_add_to_pollset_set, win_shutdown,
+ win_destroy, win_get_peer};
-grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket) {
- grpc_tcp *tcp = (grpc_tcp *) gpr_malloc(sizeof(grpc_tcp));
+grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket, char *peer_string) {
+ grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp));
memset(tcp, 0, sizeof(grpc_tcp));
tcp->base.vtable = &vtable;
tcp->socket = socket;
gpr_mu_init(&tcp->mu);
- gpr_slice_buffer_init(&tcp->write_slices);
gpr_ref_init(&tcp->refcount, 1);
+ grpc_closure_init(&tcp->on_read, on_read, tcp);
+ grpc_closure_init(&tcp->on_write, on_write, tcp);
+ tcp->peer_string = gpr_strdup(peer_string);
return &tcp->base;
}
-#endif /* GPR_WINSOCK_SOCKET */
+#endif /* GPR_WINSOCK_SOCKET */
diff --git a/src/core/iomgr/tcp_windows.h b/src/core/iomgr/tcp_windows.h
index 4cbc12c53a..78bc13389a 100644
--- a/src/core/iomgr/tcp_windows.h
+++ b/src/core/iomgr/tcp_windows.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_IOMGR_TCP_WINDOWS_H
-#define GRPC_INTERNAL_CORE_IOMGR_TCP_WINDOWS_H
+#ifndef GRPC_CORE_IOMGR_TCP_WINDOWS_H
+#define GRPC_CORE_IOMGR_TCP_WINDOWS_H
/*
Low level TCP "bottom half" implementation, for use by transports built on
top of a TCP connection.
@@ -50,8 +50,8 @@
/* Create a tcp endpoint given a winsock handle.
* Takes ownership of the handle.
*/
-grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket);
+grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket, char *peer_string);
int grpc_tcp_prepare_socket(SOCKET sock);
-#endif /* GRPC_INTERNAL_CORE_IOMGR_TCP_WINDOWS_H */
+#endif /* GRPC_CORE_IOMGR_TCP_WINDOWS_H */
diff --git a/src/core/iomgr/time_averaged_stats.c b/src/core/iomgr/time_averaged_stats.c
index f881dde9fc..e075db4373 100644
--- a/src/core/iomgr/time_averaged_stats.c
+++ b/src/core/iomgr/time_averaged_stats.c
@@ -33,7 +33,7 @@
#include "src/core/iomgr/time_averaged_stats.h"
-void grpc_time_averaged_stats_init(grpc_time_averaged_stats *stats,
+void grpc_time_averaged_stats_init(grpc_time_averaged_stats* stats,
double init_avg, double regress_weight,
double persistence_factor) {
stats->init_avg = init_avg;
@@ -45,14 +45,14 @@ void grpc_time_averaged_stats_init(grpc_time_averaged_stats *stats,
stats->aggregate_weighted_avg = init_avg;
}
-void grpc_time_averaged_stats_add_sample(grpc_time_averaged_stats *stats,
+void grpc_time_averaged_stats_add_sample(grpc_time_averaged_stats* stats,
double value) {
stats->batch_total_value += value;
++stats->batch_num_samples;
}
double grpc_time_averaged_stats_update_average(
- grpc_time_averaged_stats *stats) {
+ grpc_time_averaged_stats* stats) {
/* Start with the current batch: */
double weighted_sum = stats->batch_total_value;
double total_weight = stats->batch_num_samples;
diff --git a/src/core/iomgr/time_averaged_stats.h b/src/core/iomgr/time_averaged_stats.h
index 13894b2640..048e244bcc 100644
--- a/src/core/iomgr/time_averaged_stats.h
+++ b/src/core/iomgr/time_averaged_stats.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_IOMGR_TIME_AVERAGED_STATS_H
-#define GRPC_INTERNAL_CORE_IOMGR_TIME_AVERAGED_STATS_H
+#ifndef GRPC_CORE_IOMGR_TIME_AVERAGED_STATS_H
+#define GRPC_CORE_IOMGR_TIME_AVERAGED_STATS_H
/* This tracks a time-decaying weighted average. It works by collecting
batches of samples and then mixing their average into a time-decaying
@@ -75,14 +75,14 @@ typedef struct {
/* See the comments on the members above for an explanation of init_avg,
regress_weight, and persistence_factor. */
-void grpc_time_averaged_stats_init(grpc_time_averaged_stats *stats,
+void grpc_time_averaged_stats_init(grpc_time_averaged_stats* stats,
double init_avg, double regress_weight,
double persistence_factor);
/* Add a sample to the current batch. */
-void grpc_time_averaged_stats_add_sample(grpc_time_averaged_stats *stats,
+void grpc_time_averaged_stats_add_sample(grpc_time_averaged_stats* stats,
double value);
/* Complete a batch and compute the new estimate of the average sample
value. */
-double grpc_time_averaged_stats_update_average(grpc_time_averaged_stats *stats);
+double grpc_time_averaged_stats_update_average(grpc_time_averaged_stats* stats);
-#endif /* GRPC_INTERNAL_CORE_IOMGR_TIME_AVERAGED_STATS_H */
+#endif /* GRPC_CORE_IOMGR_TIME_AVERAGED_STATS_H */
diff --git a/src/core/iomgr/alarm.c b/src/core/iomgr/timer.c
index 5860834de3..f444643428 100644
--- a/src/core/iomgr/alarm.c
+++ b/src/core/iomgr/timer.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,19 +31,18 @@
*
*/
-#include "src/core/iomgr/alarm.h"
+#include "src/core/iomgr/timer.h"
-#include "src/core/iomgr/alarm_heap.h"
-#include "src/core/iomgr/alarm_internal.h"
-#include "src/core/iomgr/time_averaged_stats.h"
+#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/useful.h>
+#include "src/core/iomgr/time_averaged_stats.h"
+#include "src/core/iomgr/timer_heap.h"
#define INVALID_HEAP_INDEX 0xffffffffu
#define LOG2_NUM_SHARDS 5
#define NUM_SHARDS (1 << LOG2_NUM_SHARDS)
-#define MAX_ALARMS_PER_CHECK 128
#define ADD_DEADLINE_SCALE 0.33
#define MIN_QUEUE_WINDOW_DURATION 0.01
#define MAX_QUEUE_WINDOW_DURATION 1
@@ -51,40 +50,42 @@
typedef struct {
gpr_mu mu;
grpc_time_averaged_stats stats;
- /* All and only alarms with deadlines <= this will be in the heap. */
+ /* All and only timers with deadlines <= this will be in the heap. */
gpr_timespec queue_deadline_cap;
gpr_timespec min_deadline;
/* Index in the g_shard_queue */
- gpr_uint32 shard_queue_index;
- /* This holds all alarms with deadlines < queue_deadline_cap. Alarms in this
+ uint32_t shard_queue_index;
+ /* This holds all timers with deadlines < queue_deadline_cap. Timers in this
list have the top bit of their deadline set to 0. */
- grpc_alarm_heap heap;
- /* This holds alarms whose deadline is >= queue_deadline_cap. */
- grpc_alarm list;
+ grpc_timer_heap heap;
+ /* This holds timers whose deadline is >= queue_deadline_cap. */
+ grpc_timer list;
} shard_type;
/* Protects g_shard_queue */
static gpr_mu g_mu;
-/* Allow only one run_some_expired_alarms at once */
+/* Allow only one run_some_expired_timers at once */
static gpr_mu g_checker_mu;
+static gpr_clock_type g_clock_type;
static shard_type g_shards[NUM_SHARDS];
/* Protected by g_mu */
static shard_type *g_shard_queue[NUM_SHARDS];
-static int run_some_expired_alarms(gpr_mu *drop_mu, gpr_timespec now,
+static int run_some_expired_timers(grpc_exec_ctx *exec_ctx, gpr_timespec now,
gpr_timespec *next, int success);
static gpr_timespec compute_min_deadline(shard_type *shard) {
- return grpc_alarm_heap_is_empty(&shard->heap)
+ return grpc_timer_heap_is_empty(&shard->heap)
? shard->queue_deadline_cap
- : grpc_alarm_heap_top(&shard->heap)->deadline;
+ : grpc_timer_heap_top(&shard->heap)->deadline;
}
-void grpc_alarm_list_init(gpr_timespec now) {
- int i;
+void grpc_timer_list_init(gpr_timespec now) {
+ uint32_t i;
gpr_mu_init(&g_mu);
gpr_mu_init(&g_checker_mu);
+ g_clock_type = now.clock_type;
for (i = 0; i < NUM_SHARDS; i++) {
shard_type *shard = &g_shards[i];
@@ -93,55 +94,55 @@ void grpc_alarm_list_init(gpr_timespec now) {
0.5);
shard->queue_deadline_cap = now;
shard->shard_queue_index = i;
- grpc_alarm_heap_init(&shard->heap);
+ grpc_timer_heap_init(&shard->heap);
shard->list.next = shard->list.prev = &shard->list;
shard->min_deadline = compute_min_deadline(shard);
g_shard_queue[i] = shard;
}
}
-void grpc_alarm_list_shutdown(void) {
+void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) {
int i;
- while (run_some_expired_alarms(NULL, gpr_inf_future, NULL, 0))
- ;
+ run_some_expired_timers(exec_ctx, gpr_inf_future(g_clock_type), NULL, 0);
for (i = 0; i < NUM_SHARDS; i++) {
shard_type *shard = &g_shards[i];
gpr_mu_destroy(&shard->mu);
- grpc_alarm_heap_destroy(&shard->heap);
+ grpc_timer_heap_destroy(&shard->heap);
}
gpr_mu_destroy(&g_mu);
gpr_mu_destroy(&g_checker_mu);
}
/* This is a cheap, but good enough, pointer hash for sharding the tasks: */
-static size_t shard_idx(const grpc_alarm *info) {
+static size_t shard_idx(const grpc_timer *info) {
size_t x = (size_t)info;
return ((x >> 4) ^ (x >> 9) ^ (x >> 14)) & (NUM_SHARDS - 1);
}
static double ts_to_dbl(gpr_timespec ts) {
- return ts.tv_sec + 1e-9 * ts.tv_nsec;
+ return (double)ts.tv_sec + 1e-9 * ts.tv_nsec;
}
static gpr_timespec dbl_to_ts(double d) {
gpr_timespec ts;
- ts.tv_sec = d;
- ts.tv_nsec = 1e9 * (d - ts.tv_sec);
+ ts.tv_sec = (int64_t)d;
+ ts.tv_nsec = (int32_t)(1e9 * (d - (double)ts.tv_sec));
+ ts.clock_type = GPR_TIMESPAN;
return ts;
}
-static void list_join(grpc_alarm *head, grpc_alarm *alarm) {
- alarm->next = head;
- alarm->prev = head->prev;
- alarm->next->prev = alarm->prev->next = alarm;
+static void list_join(grpc_timer *head, grpc_timer *timer) {
+ timer->next = head;
+ timer->prev = head->prev;
+ timer->next->prev = timer->prev->next = timer;
}
-static void list_remove(grpc_alarm *alarm) {
- alarm->next->prev = alarm->prev;
- alarm->prev->next = alarm->next;
+static void list_remove(grpc_timer *timer) {
+ timer->next->prev = timer->prev;
+ timer->prev->next = timer->next;
}
-static void swap_adjacent_shards_in_queue(size_t first_shard_queue_index) {
+static void swap_adjacent_shards_in_queue(uint32_t first_shard_queue_index) {
shard_type *temp;
temp = g_shard_queue[first_shard_queue_index];
g_shard_queue[first_shard_queue_index] =
@@ -168,15 +169,16 @@ static void note_deadline_change(shard_type *shard) {
}
}
-void grpc_alarm_init(grpc_alarm *alarm, gpr_timespec deadline,
- grpc_iomgr_cb_func alarm_cb, void *alarm_cb_arg,
- gpr_timespec now) {
- int is_first_alarm = 0;
- shard_type *shard = &g_shards[shard_idx(alarm)];
- alarm->cb = alarm_cb;
- alarm->cb_arg = alarm_cb_arg;
- alarm->deadline = deadline;
- alarm->triggered = 0;
+void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
+ gpr_timespec deadline, grpc_iomgr_cb_func timer_cb,
+ void *timer_cb_arg, gpr_timespec now) {
+ int is_first_timer = 0;
+ shard_type *shard = &g_shards[shard_idx(timer)];
+ GPR_ASSERT(deadline.clock_type == g_clock_type);
+ GPR_ASSERT(now.clock_type == g_clock_type);
+ grpc_closure_init(&timer->closure, timer_cb, timer_cb_arg);
+ timer->deadline = deadline;
+ timer->triggered = 0;
/* TODO(ctiller): check deadline expired */
@@ -184,25 +186,25 @@ void grpc_alarm_init(grpc_alarm *alarm, gpr_timespec deadline,
grpc_time_averaged_stats_add_sample(&shard->stats,
ts_to_dbl(gpr_time_sub(deadline, now)));
if (gpr_time_cmp(deadline, shard->queue_deadline_cap) < 0) {
- is_first_alarm = grpc_alarm_heap_add(&shard->heap, alarm);
+ is_first_timer = grpc_timer_heap_add(&shard->heap, timer);
} else {
- alarm->heap_index = INVALID_HEAP_INDEX;
- list_join(&shard->list, alarm);
+ timer->heap_index = INVALID_HEAP_INDEX;
+ list_join(&shard->list, timer);
}
gpr_mu_unlock(&shard->mu);
/* Deadline may have decreased, we need to adjust the master queue. Note
that there is a potential racy unlocked region here. There could be a
- reordering of multiple grpc_alarm_init calls, at this point, but the < test
+ reordering of multiple grpc_timer_init calls, at this point, but the < test
below should ensure that we err on the side of caution. There could
- also be a race with grpc_alarm_check, which might beat us to the lock. In
- that case, it is possible that the alarm that we added will have already
+ also be a race with grpc_timer_check, which might beat us to the lock. In
+ that case, it is possible that the timer that we added will have already
run by the time we hold the lock, but that too is a safe error.
- Finally, it's possible that the grpc_alarm_check that intervened failed to
- trigger the new alarm because the min_deadline hadn't yet been reduced.
- In that case, the alarm will simply have to wait for the next
- grpc_alarm_check. */
- if (is_first_alarm) {
+ Finally, it's possible that the grpc_timer_check that intervened failed to
+ trigger the new timer because the min_deadline hadn't yet been reduced.
+ In that case, the timer will simply have to wait for the next
+ grpc_timer_check. */
+ if (is_first_timer) {
gpr_mu_lock(&g_mu);
if (gpr_time_cmp(deadline, shard->min_deadline) < 0) {
gpr_timespec old_min_deadline = g_shard_queue[0]->min_deadline;
@@ -217,29 +219,24 @@ void grpc_alarm_init(grpc_alarm *alarm, gpr_timespec deadline,
}
}
-void grpc_alarm_cancel(grpc_alarm *alarm) {
- shard_type *shard = &g_shards[shard_idx(alarm)];
- int triggered = 0;
+void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
+ shard_type *shard = &g_shards[shard_idx(timer)];
gpr_mu_lock(&shard->mu);
- if (!alarm->triggered) {
- triggered = 1;
- alarm->triggered = 1;
- if (alarm->heap_index == INVALID_HEAP_INDEX) {
- list_remove(alarm);
+ if (!timer->triggered) {
+ grpc_exec_ctx_enqueue(exec_ctx, &timer->closure, false, NULL);
+ timer->triggered = 1;
+ if (timer->heap_index == INVALID_HEAP_INDEX) {
+ list_remove(timer);
} else {
- grpc_alarm_heap_remove(&shard->heap, alarm);
+ grpc_timer_heap_remove(&shard->heap, timer);
}
}
gpr_mu_unlock(&shard->mu);
-
- if (triggered) {
- alarm->cb(alarm->cb_arg, 0);
- }
}
/* This is called when the queue is empty and "now" has reached the
queue_deadline_cap. We compute a new queue deadline and then scan the map
- for alarms that fall at or under it. Returns true if the queue is no
+ for timers that fall at or under it. Returns true if the queue is no
longer empty.
REQUIRES: shard->mu locked */
static int refill_queue(shard_type *shard, gpr_timespec now) {
@@ -250,80 +247,78 @@ static int refill_queue(shard_type *shard, gpr_timespec now) {
double deadline_delta =
GPR_CLAMP(computed_deadline_delta, MIN_QUEUE_WINDOW_DURATION,
MAX_QUEUE_WINDOW_DURATION);
- grpc_alarm *alarm, *next;
+ grpc_timer *timer, *next;
- /* Compute the new cap and put all alarms under it into the queue: */
+ /* Compute the new cap and put all timers under it into the queue: */
shard->queue_deadline_cap = gpr_time_add(
gpr_time_max(now, shard->queue_deadline_cap), dbl_to_ts(deadline_delta));
- for (alarm = shard->list.next; alarm != &shard->list; alarm = next) {
- next = alarm->next;
+ for (timer = shard->list.next; timer != &shard->list; timer = next) {
+ next = timer->next;
- if (gpr_time_cmp(alarm->deadline, shard->queue_deadline_cap) < 0) {
- list_remove(alarm);
- grpc_alarm_heap_add(&shard->heap, alarm);
+ if (gpr_time_cmp(timer->deadline, shard->queue_deadline_cap) < 0) {
+ list_remove(timer);
+ grpc_timer_heap_add(&shard->heap, timer);
}
}
- return !grpc_alarm_heap_is_empty(&shard->heap);
+ return !grpc_timer_heap_is_empty(&shard->heap);
}
-/* This pops the next non-cancelled alarm with deadline <= now from the queue,
+/* This pops the next non-cancelled timer with deadline <= now from the queue,
or returns NULL if there isn't one.
REQUIRES: shard->mu locked */
-static grpc_alarm *pop_one(shard_type *shard, gpr_timespec now) {
- grpc_alarm *alarm;
+static grpc_timer *pop_one(shard_type *shard, gpr_timespec now) {
+ grpc_timer *timer;
for (;;) {
- if (grpc_alarm_heap_is_empty(&shard->heap)) {
+ if (grpc_timer_heap_is_empty(&shard->heap)) {
if (gpr_time_cmp(now, shard->queue_deadline_cap) < 0) return NULL;
if (!refill_queue(shard, now)) return NULL;
}
- alarm = grpc_alarm_heap_top(&shard->heap);
- if (gpr_time_cmp(alarm->deadline, now) > 0) return NULL;
- alarm->triggered = 1;
- grpc_alarm_heap_pop(&shard->heap);
- return alarm;
+ timer = grpc_timer_heap_top(&shard->heap);
+ if (gpr_time_cmp(timer->deadline, now) > 0) return NULL;
+ timer->triggered = 1;
+ grpc_timer_heap_pop(&shard->heap);
+ return timer;
}
}
/* REQUIRES: shard->mu unlocked */
-static size_t pop_alarms(shard_type *shard, gpr_timespec now,
- grpc_alarm **alarms, size_t max_alarms,
- gpr_timespec *new_min_deadline) {
+static size_t pop_timers(grpc_exec_ctx *exec_ctx, shard_type *shard,
+ gpr_timespec now, gpr_timespec *new_min_deadline,
+ int success) {
size_t n = 0;
- grpc_alarm *alarm;
+ grpc_timer *timer;
gpr_mu_lock(&shard->mu);
- while (n < max_alarms && (alarm = pop_one(shard, now))) {
- alarms[n++] = alarm;
+ while ((timer = pop_one(shard, now))) {
+ grpc_exec_ctx_enqueue(exec_ctx, &timer->closure, success, NULL);
+ n++;
}
*new_min_deadline = compute_min_deadline(shard);
gpr_mu_unlock(&shard->mu);
return n;
}
-static int run_some_expired_alarms(gpr_mu *drop_mu, gpr_timespec now,
+static int run_some_expired_timers(grpc_exec_ctx *exec_ctx, gpr_timespec now,
gpr_timespec *next, int success) {
size_t n = 0;
- size_t i;
- grpc_alarm *alarms[MAX_ALARMS_PER_CHECK];
- /* TODO(ctiller): verify that there are any alarms (atomically) here */
+ /* TODO(ctiller): verify that there are any timers (atomically) here */
if (gpr_mu_trylock(&g_checker_mu)) {
gpr_mu_lock(&g_mu);
- while (n < MAX_ALARMS_PER_CHECK &&
- gpr_time_cmp(g_shard_queue[0]->min_deadline, now) < 0) {
+ while (gpr_time_cmp(g_shard_queue[0]->min_deadline, now) < 0) {
gpr_timespec new_min_deadline;
- /* For efficiency, we pop as many available alarms as we can from the
- shard. This may violate perfect alarm deadline ordering, but that
+ /* For efficiency, we pop as many available timers as we can from the
+ shard. This may violate perfect timer deadline ordering, but that
shouldn't be a big deal because we don't make ordering guarantees. */
- n += pop_alarms(g_shard_queue[0], now, alarms + n,
- MAX_ALARMS_PER_CHECK - n, &new_min_deadline);
+ n += pop_timers(exec_ctx, g_shard_queue[0], now, &new_min_deadline,
+ success);
- /* An grpc_alarm_init() on the shard could intervene here, adding a new
- alarm that is earlier than new_min_deadline. However,
- grpc_alarm_init() will block on the master_lock before it can call
- set_min_deadline, so this one will complete first and then the AddAlarm
+ /* An grpc_timer_init() on the shard could intervene here, adding a new
+ timer that is earlier than new_min_deadline. However,
+ grpc_timer_init() will block on the master_lock before it can call
+ set_min_deadline, so this one will complete first and then the Addtimer
will reduce the min_deadline (perhaps unnecessarily). */
g_shard_queue[0]->min_deadline = new_min_deadline;
note_deadline_change(g_shard_queue[0]);
@@ -335,31 +330,27 @@ static int run_some_expired_alarms(gpr_mu *drop_mu, gpr_timespec now,
gpr_mu_unlock(&g_mu);
gpr_mu_unlock(&g_checker_mu);
+ } else if (next != NULL) {
+ /* TODO(ctiller): this forces calling code to do an short poll, and
+ then retry the timer check (because this time through the timer list was
+ contended).
+
+ We could reduce the cost here dramatically by keeping a count of how many
+ currently active pollers got through the uncontended case above
+ successfully, and waking up other pollers IFF that count drops to zero.
+
+ Once that count is in place, this entire else branch could disappear. */
+ *next = gpr_time_min(
+ *next, gpr_time_add(now, gpr_time_from_millis(1, GPR_TIMESPAN)));
}
- if (n && drop_mu) {
- gpr_mu_unlock(drop_mu);
- }
-
- for (i = 0; i < n; i++) {
- alarms[i]->cb(alarms[i]->cb_arg, success);
- }
-
- if (n && drop_mu) {
- gpr_mu_lock(drop_mu);
- }
-
- return n;
-}
-
-int grpc_alarm_check(gpr_mu *drop_mu, gpr_timespec now, gpr_timespec *next) {
- return run_some_expired_alarms(drop_mu, now, next, 1);
+ return (int)n;
}
-gpr_timespec grpc_alarm_list_next_timeout(void) {
- gpr_timespec out;
- gpr_mu_lock(&g_mu);
- out = g_shard_queue[0]->min_deadline;
- gpr_mu_unlock(&g_mu);
- return out;
+bool grpc_timer_check(grpc_exec_ctx *exec_ctx, gpr_timespec now,
+ gpr_timespec *next) {
+ GPR_ASSERT(now.clock_type == g_clock_type);
+ return run_some_expired_timers(
+ exec_ctx, now, next,
+ gpr_time_cmp(now, gpr_inf_future(now.clock_type)) != 0);
}
diff --git a/src/core/iomgr/alarm.h b/src/core/iomgr/timer.h
index e5262e2199..63505df427 100644
--- a/src/core/iomgr/alarm.h
+++ b/src/core/iomgr/timer.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,59 +31,78 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_IOMGR_ALARM_H
-#define GRPC_INTERNAL_CORE_IOMGR_ALARM_H
+#ifndef GRPC_CORE_IOMGR_TIMER_H
+#define GRPC_CORE_IOMGR_TIMER_H
#include "src/core/iomgr/iomgr.h"
+#include "src/core/iomgr/exec_ctx.h"
#include <grpc/support/port_platform.h>
#include <grpc/support/time.h>
-typedef struct grpc_alarm {
+typedef struct grpc_timer {
gpr_timespec deadline;
- gpr_uint32 heap_index; /* INVALID_HEAP_INDEX if not in heap */
- struct grpc_alarm *next;
- struct grpc_alarm *prev;
+ uint32_t heap_index; /* INVALID_HEAP_INDEX if not in heap */
int triggered;
- grpc_iomgr_cb_func cb;
- void *cb_arg;
-} grpc_alarm;
+ struct grpc_timer *next;
+ struct grpc_timer *prev;
+ grpc_closure closure;
+} grpc_timer;
-/* Initialize *alarm. When expired or canceled, alarm_cb will be called with
- *alarm_cb_arg and status to indicate if it expired (SUCCESS) or was
- canceled (CANCELLED). alarm_cb is guaranteed to be called exactly once,
+/* Initialize *timer. When expired or canceled, timer_cb will be called with
+ *timer_cb_arg and status to indicate if it expired (SUCCESS) or was
+ canceled (CANCELLED). timer_cb is guaranteed to be called exactly once,
and application code should check the status to determine how it was
invoked. The application callback is also responsible for maintaining
information about when to free up any user-level state. */
-void grpc_alarm_init(grpc_alarm *alarm, gpr_timespec deadline,
- grpc_iomgr_cb_func alarm_cb, void *alarm_cb_arg,
- gpr_timespec now);
+void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
+ gpr_timespec deadline, grpc_iomgr_cb_func timer_cb,
+ void *timer_cb_arg, gpr_timespec now);
-/* Note that there is no alarm destroy function. This is because the
- alarm is a one-time occurrence with a guarantee that the callback will
+/* Note that there is no timer destroy function. This is because the
+ timer is a one-time occurrence with a guarantee that the callback will
be called exactly once, either at expiration or cancellation. Thus, all
- the internal alarm event management state is destroyed just before
+ the internal timer event management state is destroyed just before
that callback is invoked. If the user has additional state associated with
- the alarm, the user is responsible for determining when it is safe to
+ the timer, the user is responsible for determining when it is safe to
destroy that state. */
-/* Cancel an *alarm.
+/* Cancel an *timer.
There are three cases:
- 1. We normally cancel the alarm
- 2. The alarm has already run
- 3. We can't cancel the alarm because it is "in flight".
+ 1. We normally cancel the timer
+ 2. The timer has already run
+ 3. We can't cancel the timer because it is "in flight".
In all of these cases, the cancellation is still considered successful.
- They are essentially distinguished in that the alarm_cb will be run
+ They are essentially distinguished in that the timer_cb will be run
exactly once from either the cancellation (with status CANCELLED)
or from the activation (with status SUCCESS)
Note carefully that the callback function MAY occur in the same callstack
- as grpc_alarm_cancel. It's expected that most alarms will be cancelled (their
+ as grpc_timer_cancel. It's expected that most timers will be cancelled (their
primary use is to implement deadlines), and so this code is optimized such
that cancellation costs as little as possible. Making callbacks run inline
matches this aim.
- Requires: cancel() must happen after add() on a given alarm */
-void grpc_alarm_cancel(grpc_alarm *alarm);
+ Requires: cancel() must happen after add() on a given timer */
+void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer);
-#endif /* GRPC_INTERNAL_CORE_IOMGR_ALARM_H */
+/* iomgr internal api for dealing with timers */
+
+/* Check for timers to be run, and run them.
+ Return true if timer callbacks were executed.
+ Drops drop_mu if it is non-null before executing callbacks.
+ If next is non-null, TRY to update *next with the next running timer
+ IF that timer occurs before *next current value.
+ *next is never guaranteed to be updated on any given execution; however,
+ with high probability at least one thread in the system will see an update
+ at any time slice. */
+bool grpc_timer_check(grpc_exec_ctx *exec_ctx, gpr_timespec now,
+ gpr_timespec *next);
+void grpc_timer_list_init(gpr_timespec now);
+void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx);
+
+/* the following must be implemented by each iomgr implementation */
+
+void grpc_kick_poller(void);
+
+#endif /* GRPC_CORE_IOMGR_TIMER_H */
diff --git a/src/core/iomgr/alarm_heap.c b/src/core/iomgr/timer_heap.c
index d912178fda..b5df566c45 100644
--- a/src/core/iomgr/alarm_heap.c
+++ b/src/core/iomgr/timer_heap.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,7 +31,7 @@
*
*/
-#include "src/core/iomgr/alarm_heap.h"
+#include "src/core/iomgr/timer_heap.h"
#include <string.h>
@@ -43,10 +43,10 @@
position. This functor is called each time immediately after modifying a
value in the underlying container, with the offset of the modified element as
its argument. */
-static void adjust_upwards(grpc_alarm **first, int i, grpc_alarm *t) {
+static void adjust_upwards(grpc_timer **first, uint32_t i, grpc_timer *t) {
while (i > 0) {
- int parent = (i - 1) / 2;
- if (gpr_time_cmp(first[parent]->deadline, t->deadline) >= 0) break;
+ uint32_t parent = (uint32_t)(((int)i - 1) / 2);
+ if (gpr_time_cmp(first[parent]->deadline, t->deadline) <= 0) break;
first[i] = first[parent];
first[i]->heap_index = i;
i = parent;
@@ -58,20 +58,18 @@ static void adjust_upwards(grpc_alarm **first, int i, grpc_alarm *t) {
/* Adjusts a heap so as to move a hole at position i farther away from the root,
until a suitable position is found for element t. Then, copies t into that
position. */
-static void adjust_downwards(grpc_alarm **first, int i, int length,
- grpc_alarm *t) {
+static void adjust_downwards(grpc_timer **first, uint32_t i, uint32_t length,
+ grpc_timer *t) {
for (;;) {
- int left_child = 1 + 2 * i;
- int right_child;
- int next_i;
+ uint32_t left_child = 1u + 2u * i;
if (left_child >= length) break;
- right_child = left_child + 1;
- next_i =
- right_child < length && gpr_time_cmp(first[left_child]->deadline,
- first[right_child]->deadline) < 0
- ? right_child
- : left_child;
- if (gpr_time_cmp(t->deadline, first[next_i]->deadline) >= 0) break;
+ uint32_t right_child = left_child + 1;
+ uint32_t next_i = right_child < length &&
+ gpr_time_cmp(first[left_child]->deadline,
+ first[right_child]->deadline) > 0
+ ? right_child
+ : left_child;
+ if (gpr_time_cmp(t->deadline, first[next_i]->deadline) <= 0) break;
first[i] = first[next_i];
first[i]->heap_index = i;
i = next_i;
@@ -83,66 +81,66 @@ static void adjust_downwards(grpc_alarm **first, int i, int length,
#define SHRINK_MIN_ELEMS 8
#define SHRINK_FULLNESS_FACTOR 2
-static void maybe_shrink(grpc_alarm_heap *heap) {
- if (heap->alarm_count >= 8 &&
- heap->alarm_count <= heap->alarm_capacity / SHRINK_FULLNESS_FACTOR / 2) {
- heap->alarm_capacity = heap->alarm_count * SHRINK_FULLNESS_FACTOR;
- heap->alarms =
- gpr_realloc(heap->alarms, heap->alarm_capacity * sizeof(grpc_alarm *));
+static void maybe_shrink(grpc_timer_heap *heap) {
+ if (heap->timer_count >= 8 &&
+ heap->timer_count <= heap->timer_capacity / SHRINK_FULLNESS_FACTOR / 2) {
+ heap->timer_capacity = heap->timer_count * SHRINK_FULLNESS_FACTOR;
+ heap->timers =
+ gpr_realloc(heap->timers, heap->timer_capacity * sizeof(grpc_timer *));
}
}
-static void note_changed_priority(grpc_alarm_heap *heap, grpc_alarm *alarm) {
- int i = alarm->heap_index;
- int parent = (i - 1) / 2;
- if (gpr_time_cmp(heap->alarms[parent]->deadline, alarm->deadline) < 0) {
- adjust_upwards(heap->alarms, i, alarm);
+static void note_changed_priority(grpc_timer_heap *heap, grpc_timer *timer) {
+ uint32_t i = timer->heap_index;
+ uint32_t parent = (uint32_t)(((int)i - 1) / 2);
+ if (gpr_time_cmp(heap->timers[parent]->deadline, timer->deadline) > 0) {
+ adjust_upwards(heap->timers, i, timer);
} else {
- adjust_downwards(heap->alarms, i, heap->alarm_count, alarm);
+ adjust_downwards(heap->timers, i, heap->timer_count, timer);
}
}
-void grpc_alarm_heap_init(grpc_alarm_heap *heap) {
+void grpc_timer_heap_init(grpc_timer_heap *heap) {
memset(heap, 0, sizeof(*heap));
}
-void grpc_alarm_heap_destroy(grpc_alarm_heap *heap) { gpr_free(heap->alarms); }
+void grpc_timer_heap_destroy(grpc_timer_heap *heap) { gpr_free(heap->timers); }
-int grpc_alarm_heap_add(grpc_alarm_heap *heap, grpc_alarm *alarm) {
- if (heap->alarm_count == heap->alarm_capacity) {
- heap->alarm_capacity =
- GPR_MAX(heap->alarm_capacity + 1, heap->alarm_capacity * 3 / 2);
- heap->alarms =
- gpr_realloc(heap->alarms, heap->alarm_capacity * sizeof(grpc_alarm *));
+int grpc_timer_heap_add(grpc_timer_heap *heap, grpc_timer *timer) {
+ if (heap->timer_count == heap->timer_capacity) {
+ heap->timer_capacity =
+ GPR_MAX(heap->timer_capacity + 1, heap->timer_capacity * 3 / 2);
+ heap->timers =
+ gpr_realloc(heap->timers, heap->timer_capacity * sizeof(grpc_timer *));
}
- alarm->heap_index = heap->alarm_count;
- adjust_upwards(heap->alarms, heap->alarm_count, alarm);
- heap->alarm_count++;
- return alarm->heap_index == 0;
+ timer->heap_index = heap->timer_count;
+ adjust_upwards(heap->timers, heap->timer_count, timer);
+ heap->timer_count++;
+ return timer->heap_index == 0;
}
-void grpc_alarm_heap_remove(grpc_alarm_heap *heap, grpc_alarm *alarm) {
- int i = alarm->heap_index;
- if (i == heap->alarm_count - 1) {
- heap->alarm_count--;
+void grpc_timer_heap_remove(grpc_timer_heap *heap, grpc_timer *timer) {
+ uint32_t i = timer->heap_index;
+ if (i == heap->timer_count - 1) {
+ heap->timer_count--;
maybe_shrink(heap);
return;
}
- heap->alarms[i] = heap->alarms[heap->alarm_count - 1];
- heap->alarms[i]->heap_index = i;
- heap->alarm_count--;
+ heap->timers[i] = heap->timers[heap->timer_count - 1];
+ heap->timers[i]->heap_index = i;
+ heap->timer_count--;
maybe_shrink(heap);
- note_changed_priority(heap, heap->alarms[i]);
+ note_changed_priority(heap, heap->timers[i]);
}
-int grpc_alarm_heap_is_empty(grpc_alarm_heap *heap) {
- return heap->alarm_count == 0;
+int grpc_timer_heap_is_empty(grpc_timer_heap *heap) {
+ return heap->timer_count == 0;
}
-grpc_alarm *grpc_alarm_heap_top(grpc_alarm_heap *heap) {
- return heap->alarms[0];
+grpc_timer *grpc_timer_heap_top(grpc_timer_heap *heap) {
+ return heap->timers[0];
}
-void grpc_alarm_heap_pop(grpc_alarm_heap *heap) {
- grpc_alarm_heap_remove(heap, grpc_alarm_heap_top(heap));
+void grpc_timer_heap_pop(grpc_timer_heap *heap) {
+ grpc_timer_heap_remove(heap, grpc_timer_heap_top(heap));
}
diff --git a/src/core/iomgr/alarm_heap.h b/src/core/iomgr/timer_heap.h
index c5adfc6d31..c2912ef45d 100644
--- a/src/core/iomgr/alarm_heap.h
+++ b/src/core/iomgr/timer_heap.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,27 +31,27 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_IOMGR_ALARM_HEAP_H
-#define GRPC_INTERNAL_CORE_IOMGR_ALARM_HEAP_H
+#ifndef GRPC_CORE_IOMGR_TIMER_HEAP_H
+#define GRPC_CORE_IOMGR_TIMER_HEAP_H
-#include "src/core/iomgr/alarm.h"
+#include "src/core/iomgr/timer.h"
typedef struct {
- grpc_alarm **alarms;
- int alarm_count;
- int alarm_capacity;
-} grpc_alarm_heap;
+ grpc_timer **timers;
+ uint32_t timer_count;
+ uint32_t timer_capacity;
+} grpc_timer_heap;
-/* return 1 if the new alarm is the first alarm in the heap */
-int grpc_alarm_heap_add(grpc_alarm_heap *heap, grpc_alarm *alarm);
+/* return 1 if the new timer is the first timer in the heap */
+int grpc_timer_heap_add(grpc_timer_heap *heap, grpc_timer *timer);
-void grpc_alarm_heap_init(grpc_alarm_heap *heap);
-void grpc_alarm_heap_destroy(grpc_alarm_heap *heap);
+void grpc_timer_heap_init(grpc_timer_heap *heap);
+void grpc_timer_heap_destroy(grpc_timer_heap *heap);
-void grpc_alarm_heap_remove(grpc_alarm_heap *heap, grpc_alarm *alarm);
-grpc_alarm *grpc_alarm_heap_top(grpc_alarm_heap *heap);
-void grpc_alarm_heap_pop(grpc_alarm_heap *heap);
+void grpc_timer_heap_remove(grpc_timer_heap *heap, grpc_timer *timer);
+grpc_timer *grpc_timer_heap_top(grpc_timer_heap *heap);
+void grpc_timer_heap_pop(grpc_timer_heap *heap);
-int grpc_alarm_heap_is_empty(grpc_alarm_heap *heap);
+int grpc_timer_heap_is_empty(grpc_timer_heap *heap);
-#endif /* GRPC_INTERNAL_CORE_IOMGR_ALARM_HEAP_H */
+#endif /* GRPC_CORE_IOMGR_TIMER_HEAP_H */
diff --git a/src/core/iomgr/udp_server.c b/src/core/iomgr/udp_server.c
new file mode 100644
index 0000000000..ef548cfe4d
--- /dev/null
+++ b/src/core/iomgr/udp_server.c
@@ -0,0 +1,430 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/* FIXME: "posix" files shouldn't be depending on _GNU_SOURCE */
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+
+#include <grpc/support/port_platform.h>
+
+#ifdef GRPC_NEED_UDP
+#ifdef GPR_POSIX_SOCKET
+
+#include "src/core/iomgr/udp_server.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <string.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <unistd.h>
+
+#include "src/core/iomgr/fd_posix.h"
+#include "src/core/iomgr/pollset_posix.h"
+#include "src/core/iomgr/resolve_address.h"
+#include "src/core/iomgr/sockaddr_utils.h"
+#include "src/core/iomgr/socket_utils_posix.h"
+#include "src/core/support/string.h"
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/sync.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/time.h>
+
+#define INIT_PORT_CAP 2
+
+/* one listening port */
+typedef struct {
+ int fd;
+ grpc_fd *emfd;
+ grpc_udp_server *server;
+ union {
+ uint8_t untyped[GRPC_MAX_SOCKADDR_SIZE];
+ struct sockaddr sockaddr;
+ struct sockaddr_un un;
+ } addr;
+ size_t addr_len;
+ grpc_closure read_closure;
+ grpc_closure destroyed_closure;
+ grpc_udp_server_read_cb read_cb;
+} server_port;
+
+static void unlink_if_unix_domain_socket(const struct sockaddr_un *un) {
+ struct stat st;
+
+ if (stat(un->sun_path, &st) == 0 && (st.st_mode & S_IFMT) == S_IFSOCK) {
+ unlink(un->sun_path);
+ }
+}
+
+/* the overall server */
+struct grpc_udp_server {
+ gpr_mu mu;
+ gpr_cv cv;
+
+ /* active port count: how many ports are actually still listening */
+ size_t active_ports;
+ /* destroyed port count: how many ports are completely destroyed */
+ size_t destroyed_ports;
+
+ /* is this server shutting down? (boolean) */
+ int shutdown;
+
+ /* all listening ports */
+ server_port *ports;
+ size_t nports;
+ size_t port_capacity;
+
+ /* shutdown callback */
+ grpc_closure *shutdown_complete;
+
+ /* all pollsets interested in new connections */
+ grpc_pollset **pollsets;
+ /* number of pollsets in the pollsets array */
+ size_t pollset_count;
+ /* The parent grpc server */
+ grpc_server *grpc_server;
+};
+
+grpc_udp_server *grpc_udp_server_create(void) {
+ grpc_udp_server *s = gpr_malloc(sizeof(grpc_udp_server));
+ gpr_mu_init(&s->mu);
+ gpr_cv_init(&s->cv);
+ s->active_ports = 0;
+ s->destroyed_ports = 0;
+ s->shutdown = 0;
+ s->ports = gpr_malloc(sizeof(server_port) * INIT_PORT_CAP);
+ s->nports = 0;
+ s->port_capacity = INIT_PORT_CAP;
+
+ return s;
+}
+
+static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
+ grpc_exec_ctx_enqueue(exec_ctx, s->shutdown_complete, 1, NULL);
+
+ gpr_mu_destroy(&s->mu);
+ gpr_cv_destroy(&s->cv);
+
+ gpr_free(s->ports);
+ gpr_free(s);
+}
+
+static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server,
+ bool success) {
+ grpc_udp_server *s = server;
+ gpr_mu_lock(&s->mu);
+ s->destroyed_ports++;
+ if (s->destroyed_ports == s->nports) {
+ gpr_mu_unlock(&s->mu);
+ finish_shutdown(exec_ctx, s);
+ } else {
+ gpr_mu_unlock(&s->mu);
+ }
+}
+
+/* called when all listening endpoints have been shutdown, so no further
+ events will be received on them - at this point it's safe to destroy
+ things */
+static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
+ size_t i;
+
+ /* delete ALL the things */
+ gpr_mu_lock(&s->mu);
+
+ if (!s->shutdown) {
+ gpr_mu_unlock(&s->mu);
+ return;
+ }
+
+ if (s->nports) {
+ for (i = 0; i < s->nports; i++) {
+ server_port *sp = &s->ports[i];
+ if (sp->addr.sockaddr.sa_family == AF_UNIX) {
+ unlink_if_unix_domain_socket(&sp->addr.un);
+ }
+ sp->destroyed_closure.cb = destroyed_port;
+ sp->destroyed_closure.cb_arg = s;
+ grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure, NULL,
+ "udp_listener_shutdown");
+ }
+ gpr_mu_unlock(&s->mu);
+ } else {
+ gpr_mu_unlock(&s->mu);
+ finish_shutdown(exec_ctx, s);
+ }
+}
+
+void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
+ grpc_closure *on_done) {
+ size_t i;
+ gpr_mu_lock(&s->mu);
+
+ GPR_ASSERT(!s->shutdown);
+ s->shutdown = 1;
+
+ s->shutdown_complete = on_done;
+
+ /* shutdown all fd's */
+ if (s->active_ports) {
+ for (i = 0; i < s->nports; i++) {
+ grpc_fd_shutdown(exec_ctx, s->ports[i].emfd);
+ }
+ gpr_mu_unlock(&s->mu);
+ } else {
+ gpr_mu_unlock(&s->mu);
+ deactivated_all_ports(exec_ctx, s);
+ }
+}
+
+/* Prepare a recently-created socket for listening. */
+static int prepare_socket(int fd, const struct sockaddr *addr,
+ size_t addr_len) {
+ struct sockaddr_storage sockname_temp;
+ socklen_t sockname_len;
+ int get_local_ip;
+ int rc;
+
+ if (fd < 0) {
+ goto error;
+ }
+
+ if (!grpc_set_socket_nonblocking(fd, 1) || !grpc_set_socket_cloexec(fd, 1)) {
+ gpr_log(GPR_ERROR, "Unable to configure socket %d: %s", fd,
+ strerror(errno));
+ }
+
+ get_local_ip = 1;
+ rc = setsockopt(fd, IPPROTO_IP, IP_PKTINFO, &get_local_ip,
+ sizeof(get_local_ip));
+ if (rc == 0 && addr->sa_family == AF_INET6) {
+#if !defined(__APPLE__)
+ rc = setsockopt(fd, IPPROTO_IPV6, IPV6_RECVPKTINFO, &get_local_ip,
+ sizeof(get_local_ip));
+#endif
+ }
+
+ GPR_ASSERT(addr_len < ~(socklen_t)0);
+ if (bind(fd, addr, (socklen_t)addr_len) < 0) {
+ char *addr_str;
+ grpc_sockaddr_to_string(&addr_str, addr, 0);
+ gpr_log(GPR_ERROR, "bind addr=%s: %s", addr_str, strerror(errno));
+ gpr_free(addr_str);
+ goto error;
+ }
+
+ sockname_len = sizeof(sockname_temp);
+ if (getsockname(fd, (struct sockaddr *)&sockname_temp, &sockname_len) < 0) {
+ goto error;
+ }
+
+ return grpc_sockaddr_get_port((struct sockaddr *)&sockname_temp);
+
+error:
+ if (fd >= 0) {
+ close(fd);
+ }
+ return -1;
+}
+
+/* event manager callback when reads are ready */
+static void on_read(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
+ server_port *sp = arg;
+
+ if (!success) {
+ gpr_mu_lock(&sp->server->mu);
+ if (0 == --sp->server->active_ports) {
+ gpr_mu_unlock(&sp->server->mu);
+ deactivated_all_ports(exec_ctx, sp->server);
+ } else {
+ gpr_mu_unlock(&sp->server->mu);
+ }
+ return;
+ }
+
+ /* Tell the registered callback that data is available to read. */
+ GPR_ASSERT(sp->read_cb);
+ sp->read_cb(exec_ctx, sp->emfd, sp->server->grpc_server);
+
+ /* Re-arm the notification event so we get another chance to read. */
+ grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
+}
+
+static int add_socket_to_server(grpc_udp_server *s, int fd,
+ const struct sockaddr *addr, size_t addr_len,
+ grpc_udp_server_read_cb read_cb) {
+ server_port *sp;
+ int port;
+ char *addr_str;
+ char *name;
+
+ port = prepare_socket(fd, addr, addr_len);
+ if (port >= 0) {
+ grpc_sockaddr_to_string(&addr_str, (struct sockaddr *)&addr, 1);
+ gpr_asprintf(&name, "udp-server-listener:%s", addr_str);
+ gpr_free(addr_str);
+ gpr_mu_lock(&s->mu);
+ /* append it to the list under a lock */
+ if (s->nports == s->port_capacity) {
+ s->port_capacity *= 2;
+ s->ports = gpr_realloc(s->ports, sizeof(server_port) * s->port_capacity);
+ }
+ sp = &s->ports[s->nports++];
+ sp->server = s;
+ sp->fd = fd;
+ sp->emfd = grpc_fd_create(fd, name);
+ memcpy(sp->addr.untyped, addr, addr_len);
+ sp->addr_len = addr_len;
+ sp->read_cb = read_cb;
+ GPR_ASSERT(sp->emfd);
+ gpr_mu_unlock(&s->mu);
+ gpr_free(name);
+ }
+
+ return port;
+}
+
+int grpc_udp_server_add_port(grpc_udp_server *s, const void *addr,
+ size_t addr_len, grpc_udp_server_read_cb read_cb) {
+ int allocated_port1 = -1;
+ int allocated_port2 = -1;
+ unsigned i;
+ int fd;
+ grpc_dualstack_mode dsmode;
+ struct sockaddr_in6 addr6_v4mapped;
+ struct sockaddr_in wild4;
+ struct sockaddr_in6 wild6;
+ struct sockaddr_in addr4_copy;
+ struct sockaddr *allocated_addr = NULL;
+ struct sockaddr_storage sockname_temp;
+ socklen_t sockname_len;
+ int port;
+
+ if (((struct sockaddr *)addr)->sa_family == AF_UNIX) {
+ unlink_if_unix_domain_socket(addr);
+ }
+
+ /* Check if this is a wildcard port, and if so, try to keep the port the same
+ as some previously created listener. */
+ if (grpc_sockaddr_get_port(addr) == 0) {
+ for (i = 0; i < s->nports; i++) {
+ sockname_len = sizeof(sockname_temp);
+ if (0 == getsockname(s->ports[i].fd, (struct sockaddr *)&sockname_temp,
+ &sockname_len)) {
+ port = grpc_sockaddr_get_port((struct sockaddr *)&sockname_temp);
+ if (port > 0) {
+ allocated_addr = malloc(addr_len);
+ memcpy(allocated_addr, addr, addr_len);
+ grpc_sockaddr_set_port(allocated_addr, port);
+ addr = allocated_addr;
+ break;
+ }
+ }
+ }
+ }
+
+ if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) {
+ addr = (const struct sockaddr *)&addr6_v4mapped;
+ addr_len = sizeof(addr6_v4mapped);
+ }
+
+ /* Treat :: or 0.0.0.0 as a family-agnostic wildcard. */
+ if (grpc_sockaddr_is_wildcard(addr, &port)) {
+ grpc_sockaddr_make_wildcards(port, &wild4, &wild6);
+
+ /* Try listening on IPv6 first. */
+ addr = (struct sockaddr *)&wild6;
+ addr_len = sizeof(wild6);
+ fd = grpc_create_dualstack_socket(addr, SOCK_DGRAM, IPPROTO_UDP, &dsmode);
+ allocated_port1 = add_socket_to_server(s, fd, addr, addr_len, read_cb);
+ if (fd >= 0 && dsmode == GRPC_DSMODE_DUALSTACK) {
+ goto done;
+ }
+
+ /* If we didn't get a dualstack socket, also listen on 0.0.0.0. */
+ if (port == 0 && allocated_port1 > 0) {
+ grpc_sockaddr_set_port((struct sockaddr *)&wild4, allocated_port1);
+ }
+ addr = (struct sockaddr *)&wild4;
+ addr_len = sizeof(wild4);
+ }
+
+ fd = grpc_create_dualstack_socket(addr, SOCK_DGRAM, IPPROTO_UDP, &dsmode);
+ if (fd < 0) {
+ gpr_log(GPR_ERROR, "Unable to create socket: %s", strerror(errno));
+ }
+ if (dsmode == GRPC_DSMODE_IPV4 &&
+ grpc_sockaddr_is_v4mapped(addr, &addr4_copy)) {
+ addr = (struct sockaddr *)&addr4_copy;
+ addr_len = sizeof(addr4_copy);
+ }
+ allocated_port2 = add_socket_to_server(s, fd, addr, addr_len, read_cb);
+
+done:
+ gpr_free(allocated_addr);
+ return allocated_port1 >= 0 ? allocated_port1 : allocated_port2;
+}
+
+int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned port_index) {
+ return (port_index < s->nports) ? s->ports[port_index].fd : -1;
+}
+
+void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
+ grpc_pollset **pollsets, size_t pollset_count,
+ grpc_server *server) {
+ size_t i, j;
+ gpr_mu_lock(&s->mu);
+ GPR_ASSERT(s->active_ports == 0);
+ s->pollsets = pollsets;
+ s->grpc_server = server;
+ for (i = 0; i < s->nports; i++) {
+ for (j = 0; j < pollset_count; j++) {
+ grpc_pollset_add_fd(exec_ctx, pollsets[j], s->ports[i].emfd);
+ }
+ s->ports[i].read_closure.cb = on_read;
+ s->ports[i].read_closure.cb_arg = &s->ports[i];
+ grpc_fd_notify_on_read(exec_ctx, s->ports[i].emfd,
+ &s->ports[i].read_closure);
+ s->active_ports++;
+ }
+ gpr_mu_unlock(&s->mu);
+}
+
+#endif
+#endif
diff --git a/src/core/iomgr/udp_server.h b/src/core/iomgr/udp_server.h
new file mode 100644
index 0000000000..1e59a92392
--- /dev/null
+++ b/src/core/iomgr/udp_server.h
@@ -0,0 +1,76 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_IOMGR_UDP_SERVER_H
+#define GRPC_CORE_IOMGR_UDP_SERVER_H
+
+#include "src/core/iomgr/endpoint.h"
+#include "src/core/iomgr/fd_posix.h"
+
+/* Forward decl of grpc_server */
+typedef struct grpc_server grpc_server;
+
+/* Forward decl of grpc_udp_server */
+typedef struct grpc_udp_server grpc_udp_server;
+
+/* Called when data is available to read from the socket. */
+typedef void (*grpc_udp_server_read_cb)(grpc_exec_ctx *exec_ctx, grpc_fd *emfd,
+ grpc_server *server);
+
+/* Create a server, initially not bound to any ports */
+grpc_udp_server *grpc_udp_server_create(void);
+
+/* Start listening to bound ports */
+void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *udp_server,
+ grpc_pollset **pollsets, size_t pollset_count,
+ grpc_server *server);
+
+int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned index);
+
+/* Add a port to the server, returning port number on success, or negative
+ on failure.
+
+ The :: and 0.0.0.0 wildcard addresses are treated identically, accepting
+ both IPv4 and IPv6 connections, but :: is the preferred style. This usually
+ creates one socket, but possibly two on systems which support IPv6,
+ but not dualstack sockets. */
+
+/* TODO(ctiller): deprecate this, and make grpc_udp_server_add_ports to handle
+ all of the multiple socket port matching logic in one place */
+int grpc_udp_server_add_port(grpc_udp_server *s, const void *addr,
+ size_t addr_len, grpc_udp_server_read_cb read_cb);
+
+void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *server,
+ grpc_closure *on_done);
+
+#endif /* GRPC_CORE_IOMGR_UDP_SERVER_H */
diff --git a/src/core/iomgr/wakeup_fd_eventfd.c b/src/core/iomgr/wakeup_fd_eventfd.c
index 99c32bb9db..f67379e4fc 100644
--- a/src/core/iomgr/wakeup_fd_eventfd.c
+++ b/src/core/iomgr/wakeup_fd_eventfd.c
@@ -39,10 +39,12 @@
#include <sys/eventfd.h>
#include <unistd.h>
-#include "src/core/iomgr/wakeup_fd_posix.h"
#include <grpc/support/log.h>
-static void eventfd_create(grpc_wakeup_fd_info *fd_info) {
+#include "src/core/iomgr/wakeup_fd_posix.h"
+#include "src/core/profiling/timers.h"
+
+static void eventfd_create(grpc_wakeup_fd* fd_info) {
int efd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
/* TODO(klempner): Handle failure more gracefully */
GPR_ASSERT(efd >= 0);
@@ -50,7 +52,7 @@ static void eventfd_create(grpc_wakeup_fd_info *fd_info) {
fd_info->write_fd = -1;
}
-static void eventfd_consume(grpc_wakeup_fd_info *fd_info) {
+static void eventfd_consume(grpc_wakeup_fd* fd_info) {
eventfd_t value;
int err;
do {
@@ -58,15 +60,17 @@ static void eventfd_consume(grpc_wakeup_fd_info *fd_info) {
} while (err < 0 && errno == EINTR);
}
-static void eventfd_wakeup(grpc_wakeup_fd_info *fd_info) {
+static void eventfd_wakeup(grpc_wakeup_fd* fd_info) {
int err;
+ GPR_TIMER_BEGIN("eventfd_wakeup", 0);
do {
err = eventfd_write(fd_info->read_fd, 1);
} while (err < 0 && errno == EINTR);
+ GPR_TIMER_END("eventfd_wakeup", 0);
}
-static void eventfd_destroy(grpc_wakeup_fd_info *fd_info) {
- close(fd_info->read_fd);
+static void eventfd_destroy(grpc_wakeup_fd* fd_info) {
+ if (fd_info->read_fd != 0) close(fd_info->read_fd);
}
static int eventfd_check_availability(void) {
@@ -75,8 +79,7 @@ static int eventfd_check_availability(void) {
}
const grpc_wakeup_fd_vtable grpc_specialized_wakeup_fd_vtable = {
- eventfd_create, eventfd_consume, eventfd_wakeup, eventfd_destroy,
- eventfd_check_availability
-};
+ eventfd_create, eventfd_consume, eventfd_wakeup, eventfd_destroy,
+ eventfd_check_availability};
#endif /* GPR_LINUX_EVENTFD */
diff --git a/src/core/iomgr/wakeup_fd_nospecial.c b/src/core/iomgr/wakeup_fd_nospecial.c
index c1038bf379..78d763c103 100644
--- a/src/core/iomgr/wakeup_fd_nospecial.c
+++ b/src/core/iomgr/wakeup_fd_nospecial.c
@@ -43,12 +43,9 @@
#include "src/core/iomgr/wakeup_fd_posix.h"
#include <stddef.h>
-static int check_availability_invalid(void) {
- return 0;
-}
+static int check_availability_invalid(void) { return 0; }
const grpc_wakeup_fd_vtable grpc_specialized_wakeup_fd_vtable = {
- NULL, NULL, NULL, NULL, check_availability_invalid
-};
+ NULL, NULL, NULL, NULL, check_availability_invalid};
-#endif /* GPR_POSIX_NO_SPECIAL_WAKEUP_FD */
+#endif /* GPR_POSIX_NO_SPECIAL_WAKEUP_FD */
diff --git a/src/core/iomgr/wakeup_fd_pipe.c b/src/core/iomgr/wakeup_fd_pipe.c
index f895478990..80de181d9d 100644
--- a/src/core/iomgr/wakeup_fd_pipe.c
+++ b/src/core/iomgr/wakeup_fd_pipe.c
@@ -44,7 +44,7 @@
#include "src/core/iomgr/socket_utils_posix.h"
#include <grpc/support/log.h>
-static void pipe_create(grpc_wakeup_fd_info *fd_info) {
+static void pipe_init(grpc_wakeup_fd* fd_info) {
int pipefd[2];
/* TODO(klempner): Make this nonfatal */
GPR_ASSERT(0 == pipe(pipefd));
@@ -54,9 +54,9 @@ static void pipe_create(grpc_wakeup_fd_info *fd_info) {
fd_info->write_fd = pipefd[1];
}
-static void pipe_consume(grpc_wakeup_fd_info *fd_info) {
+static void pipe_consume(grpc_wakeup_fd* fd_info) {
char buf[128];
- int r;
+ ssize_t r;
for (;;) {
r = read(fd_info->read_fd, buf, sizeof(buf));
@@ -74,15 +74,15 @@ static void pipe_consume(grpc_wakeup_fd_info *fd_info) {
}
}
-static void pipe_wakeup(grpc_wakeup_fd_info *fd_info) {
+static void pipe_wakeup(grpc_wakeup_fd* fd_info) {
char c = 0;
while (write(fd_info->write_fd, &c, 1) != 1 && errno == EINTR)
;
}
-static void pipe_destroy(grpc_wakeup_fd_info *fd_info) {
- close(fd_info->read_fd);
- close(fd_info->write_fd);
+static void pipe_destroy(grpc_wakeup_fd* fd_info) {
+ if (fd_info->read_fd != 0) close(fd_info->read_fd);
+ if (fd_info->write_fd != 0) close(fd_info->write_fd);
}
static int pipe_check_availability(void) {
@@ -91,7 +91,7 @@ static int pipe_check_availability(void) {
}
const grpc_wakeup_fd_vtable grpc_pipe_wakeup_fd_vtable = {
- pipe_create, pipe_consume, pipe_wakeup, pipe_destroy, pipe_check_availability
-};
+ pipe_init, pipe_consume, pipe_wakeup, pipe_destroy,
+ pipe_check_availability};
-#endif /* GPR_POSIX_WAKUP_FD */
+#endif /* GPR_POSIX_WAKUP_FD */
diff --git a/src/core/iomgr/wakeup_fd_pipe.h b/src/core/iomgr/wakeup_fd_pipe.h
index aa8f977ddb..eb3e02b482 100644
--- a/src/core/iomgr/wakeup_fd_pipe.h
+++ b/src/core/iomgr/wakeup_fd_pipe.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,11 +31,11 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_IOMGR_WAKEUP_FD_PIPE_H
-#define GRPC_INTERNAL_CORE_IOMGR_WAKEUP_FD_PIPE_H
+#ifndef GRPC_CORE_IOMGR_WAKEUP_FD_PIPE_H
+#define GRPC_CORE_IOMGR_WAKEUP_FD_PIPE_H
#include "src/core/iomgr/wakeup_fd_posix.h"
extern grpc_wakeup_fd_vtable grpc_pipe_wakeup_fd_vtable;
-#endif /* GRPC_INTERNAL_CORE_IOMGR_WAKEUP_FD_PIPE_H */
+#endif /* GRPC_CORE_IOMGR_WAKEUP_FD_PIPE_H */
diff --git a/src/core/iomgr/wakeup_fd_posix.c b/src/core/iomgr/wakeup_fd_posix.c
index d3cc3ec570..f40be081b0 100644
--- a/src/core/iomgr/wakeup_fd_posix.c
+++ b/src/core/iomgr/wakeup_fd_posix.c
@@ -40,37 +40,33 @@
#include <stddef.h>
static const grpc_wakeup_fd_vtable *wakeup_fd_vtable = NULL;
+int grpc_allow_specialized_wakeup_fd = 1;
void grpc_wakeup_fd_global_init(void) {
- if (grpc_specialized_wakeup_fd_vtable.check_availability()) {
+ if (grpc_allow_specialized_wakeup_fd &&
+ grpc_specialized_wakeup_fd_vtable.check_availability()) {
wakeup_fd_vtable = &grpc_specialized_wakeup_fd_vtable;
} else {
wakeup_fd_vtable = &grpc_pipe_wakeup_fd_vtable;
}
}
-void grpc_wakeup_fd_global_init_force_fallback(void) {
- wakeup_fd_vtable = &grpc_pipe_wakeup_fd_vtable;
-}
-
-void grpc_wakeup_fd_global_destroy(void) {
- wakeup_fd_vtable = NULL;
-}
+void grpc_wakeup_fd_global_destroy(void) { wakeup_fd_vtable = NULL; }
-void grpc_wakeup_fd_create(grpc_wakeup_fd_info *fd_info) {
- wakeup_fd_vtable->create(fd_info);
+void grpc_wakeup_fd_init(grpc_wakeup_fd *fd_info) {
+ wakeup_fd_vtable->init(fd_info);
}
-void grpc_wakeup_fd_consume_wakeup(grpc_wakeup_fd_info *fd_info) {
+void grpc_wakeup_fd_consume_wakeup(grpc_wakeup_fd *fd_info) {
wakeup_fd_vtable->consume(fd_info);
}
-void grpc_wakeup_fd_wakeup(grpc_wakeup_fd_info *fd_info) {
+void grpc_wakeup_fd_wakeup(grpc_wakeup_fd *fd_info) {
wakeup_fd_vtable->wakeup(fd_info);
}
-void grpc_wakeup_fd_destroy(grpc_wakeup_fd_info *fd_info) {
+void grpc_wakeup_fd_destroy(grpc_wakeup_fd *fd_info) {
wakeup_fd_vtable->destroy(fd_info);
}
-#endif /* GPR_POSIX_WAKEUP_FD */
+#endif /* GPR_POSIX_WAKEUP_FD */
diff --git a/src/core/iomgr/wakeup_fd_posix.h b/src/core/iomgr/wakeup_fd_posix.h
index 1b0ff70c7f..d7e3cf4673 100644
--- a/src/core/iomgr/wakeup_fd_posix.h
+++ b/src/core/iomgr/wakeup_fd_posix.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -59,8 +59,8 @@
* 2. If the polling thread was awakened by a wakeup_fd event, call
* grpc_wakeup_fd_consume_wakeup() on it.
*/
-#ifndef GRPC_INTERNAL_CORE_IOMGR_WAKEUP_FD_POSIX_H
-#define GRPC_INTERNAL_CORE_IOMGR_WAKEUP_FD_POSIX_H
+#ifndef GRPC_CORE_IOMGR_WAKEUP_FD_POSIX_H
+#define GRPC_CORE_IOMGR_WAKEUP_FD_POSIX_H
void grpc_wakeup_fd_global_init(void);
void grpc_wakeup_fd_global_destroy(void);
@@ -69,31 +69,33 @@ void grpc_wakeup_fd_global_destroy(void);
* purposes only.*/
void grpc_wakeup_fd_global_init_force_fallback(void);
-typedef struct grpc_wakeup_fd_info grpc_wakeup_fd_info;
+typedef struct grpc_wakeup_fd grpc_wakeup_fd;
typedef struct grpc_wakeup_fd_vtable {
- void (*create)(grpc_wakeup_fd_info *fd_info);
- void (*consume)(grpc_wakeup_fd_info *fd_info);
- void (*wakeup)(grpc_wakeup_fd_info *fd_info);
- void (*destroy)(grpc_wakeup_fd_info *fd_info);
+ void (*init)(grpc_wakeup_fd* fd_info);
+ void (*consume)(grpc_wakeup_fd* fd_info);
+ void (*wakeup)(grpc_wakeup_fd* fd_info);
+ void (*destroy)(grpc_wakeup_fd* fd_info);
/* Must be called before calling any other functions */
int (*check_availability)(void);
} grpc_wakeup_fd_vtable;
-struct grpc_wakeup_fd_info {
+struct grpc_wakeup_fd {
int read_fd;
int write_fd;
};
+extern int grpc_allow_specialized_wakeup_fd;
+
#define GRPC_WAKEUP_FD_GET_READ_FD(fd_info) ((fd_info)->read_fd)
-void grpc_wakeup_fd_create(grpc_wakeup_fd_info *fd_info);
-void grpc_wakeup_fd_consume_wakeup(grpc_wakeup_fd_info *fd_info);
-void grpc_wakeup_fd_wakeup(grpc_wakeup_fd_info *fd_info);
-void grpc_wakeup_fd_destroy(grpc_wakeup_fd_info *fd_info);
+void grpc_wakeup_fd_init(grpc_wakeup_fd* fd_info);
+void grpc_wakeup_fd_consume_wakeup(grpc_wakeup_fd* fd_info);
+void grpc_wakeup_fd_wakeup(grpc_wakeup_fd* fd_info);
+void grpc_wakeup_fd_destroy(grpc_wakeup_fd* fd_info);
/* Defined in some specialized implementation's .c file, or by
* wakeup_fd_nospecial.c if no such implementation exists. */
extern const grpc_wakeup_fd_vtable grpc_specialized_wakeup_fd_vtable;
-#endif /* GRPC_INTERNAL_CORE_IOMGR_WAKEUP_FD_POSIX_H */
+#endif /* GRPC_CORE_IOMGR_WAKEUP_FD_POSIX_H */
diff --git a/src/core/iomgr/workqueue.h b/src/core/iomgr/workqueue.h
new file mode 100644
index 0000000000..2ba1e5d9a2
--- /dev/null
+++ b/src/core/iomgr/workqueue.h
@@ -0,0 +1,83 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_IOMGR_WORKQUEUE_H
+#define GRPC_CORE_IOMGR_WORKQUEUE_H
+
+#include "src/core/iomgr/iomgr.h"
+#include "src/core/iomgr/pollset.h"
+#include "src/core/iomgr/closure.h"
+#include "src/core/iomgr/exec_ctx.h"
+
+#ifdef GPR_POSIX_SOCKET
+#include "src/core/iomgr/workqueue_posix.h"
+#endif
+
+#ifdef GPR_WIN32
+#include "src/core/iomgr/workqueue_windows.h"
+#endif
+
+/* grpc_workqueue is forward declared in exec_ctx.h */
+
+/** Create a work queue */
+grpc_workqueue *grpc_workqueue_create(grpc_exec_ctx *exec_ctx);
+
+void grpc_workqueue_flush(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue);
+
+#define GRPC_WORKQUEUE_REFCOUNT_DEBUG
+#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
+#define GRPC_WORKQUEUE_REF(p, r) \
+ grpc_workqueue_ref((p), __FILE__, __LINE__, (r))
+#define GRPC_WORKQUEUE_UNREF(cl, p, r) \
+ grpc_workqueue_unref((cl), (p), __FILE__, __LINE__, (r))
+void grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file, int line,
+ const char *reason);
+void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
+ const char *file, int line, const char *reason);
+#else
+#define GRPC_WORKQUEUE_REF(p, r) grpc_workqueue_ref((p))
+#define GRPC_WORKQUEUE_UNREF(cl, p, r) grpc_workqueue_unref((cl), (p))
+void grpc_workqueue_ref(grpc_workqueue *workqueue);
+void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue);
+#endif
+
+/** Bind this workqueue to a pollset */
+void grpc_workqueue_add_to_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_workqueue *workqueue,
+ grpc_pollset *pollset);
+
+/** Add a work item to a workqueue */
+void grpc_workqueue_push(grpc_workqueue *workqueue, grpc_closure *closure,
+ int success);
+
+#endif /* GRPC_CORE_IOMGR_WORKQUEUE_H */
diff --git a/src/core/iomgr/workqueue_posix.c b/src/core/iomgr/workqueue_posix.c
new file mode 100644
index 0000000000..2b42e6d4fb
--- /dev/null
+++ b/src/core/iomgr/workqueue_posix.c
@@ -0,0 +1,144 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#ifdef GPR_POSIX_SOCKET
+
+#include "src/core/iomgr/workqueue.h"
+
+#include <stdio.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/useful.h>
+
+#include "src/core/iomgr/fd_posix.h"
+#include "src/core/iomgr/pollset_posix.h"
+
+static void on_readable(grpc_exec_ctx *exec_ctx, void *arg, bool success);
+
+grpc_workqueue *grpc_workqueue_create(grpc_exec_ctx *exec_ctx) {
+ char name[32];
+ grpc_workqueue *workqueue = gpr_malloc(sizeof(grpc_workqueue));
+ gpr_ref_init(&workqueue->refs, 1);
+ gpr_mu_init(&workqueue->mu);
+ workqueue->closure_list.head = workqueue->closure_list.tail = NULL;
+ grpc_wakeup_fd_init(&workqueue->wakeup_fd);
+ sprintf(name, "workqueue:%p", (void *)workqueue);
+ workqueue->wakeup_read_fd =
+ grpc_fd_create(GRPC_WAKEUP_FD_GET_READ_FD(&workqueue->wakeup_fd), name);
+ grpc_closure_init(&workqueue->read_closure, on_readable, workqueue);
+ grpc_fd_notify_on_read(exec_ctx, workqueue->wakeup_read_fd,
+ &workqueue->read_closure);
+ return workqueue;
+}
+
+static void workqueue_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_workqueue *workqueue) {
+ GPR_ASSERT(grpc_closure_list_empty(workqueue->closure_list));
+ grpc_fd_shutdown(exec_ctx, workqueue->wakeup_read_fd);
+}
+
+#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
+void grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file, int line,
+ const char *reason) {
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "WORKQUEUE:%p ref %d -> %d %s",
+ workqueue, (int)workqueue->refs.count, (int)workqueue->refs.count + 1,
+ reason);
+#else
+void grpc_workqueue_ref(grpc_workqueue *workqueue) {
+#endif
+ gpr_ref(&workqueue->refs);
+}
+
+#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
+void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
+ const char *file, int line, const char *reason) {
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "WORKQUEUE:%p unref %d -> %d %s",
+ workqueue, (int)workqueue->refs.count, (int)workqueue->refs.count - 1,
+ reason);
+#else
+void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {
+#endif
+ if (gpr_unref(&workqueue->refs)) {
+ workqueue_destroy(exec_ctx, workqueue);
+ }
+}
+
+void grpc_workqueue_add_to_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_workqueue *workqueue,
+ grpc_pollset *pollset) {
+ grpc_pollset_add_fd(exec_ctx, pollset, workqueue->wakeup_read_fd);
+}
+
+void grpc_workqueue_flush(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {
+ gpr_mu_lock(&workqueue->mu);
+ if (grpc_closure_list_empty(workqueue->closure_list)) {
+ grpc_wakeup_fd_wakeup(&workqueue->wakeup_fd);
+ }
+ grpc_exec_ctx_enqueue_list(exec_ctx, &workqueue->closure_list, NULL);
+ gpr_mu_unlock(&workqueue->mu);
+}
+
+static void on_readable(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
+ grpc_workqueue *workqueue = arg;
+
+ if (!success) {
+ gpr_mu_destroy(&workqueue->mu);
+ /* HACK: let wakeup_fd code know that we stole the fd */
+ workqueue->wakeup_fd.read_fd = 0;
+ grpc_wakeup_fd_destroy(&workqueue->wakeup_fd);
+ grpc_fd_orphan(exec_ctx, workqueue->wakeup_read_fd, NULL, NULL, "destroy");
+ gpr_free(workqueue);
+ } else {
+ gpr_mu_lock(&workqueue->mu);
+ grpc_exec_ctx_enqueue_list(exec_ctx, &workqueue->closure_list, NULL);
+ grpc_wakeup_fd_consume_wakeup(&workqueue->wakeup_fd);
+ gpr_mu_unlock(&workqueue->mu);
+ grpc_fd_notify_on_read(exec_ctx, workqueue->wakeup_read_fd,
+ &workqueue->read_closure);
+ }
+}
+
+void grpc_workqueue_push(grpc_workqueue *workqueue, grpc_closure *closure,
+ int success) {
+ gpr_mu_lock(&workqueue->mu);
+ if (grpc_closure_list_empty(workqueue->closure_list)) {
+ grpc_wakeup_fd_wakeup(&workqueue->wakeup_fd);
+ }
+ grpc_closure_list_add(&workqueue->closure_list, closure, success);
+ gpr_mu_unlock(&workqueue->mu);
+}
+
+#endif /* GPR_POSIX_SOCKET */
diff --git a/src/core/iomgr/workqueue_posix.h b/src/core/iomgr/workqueue_posix.h
new file mode 100644
index 0000000000..89937b1ea8
--- /dev/null
+++ b/src/core/iomgr/workqueue_posix.h
@@ -0,0 +1,53 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_IOMGR_WORKQUEUE_POSIX_H
+#define GRPC_CORE_IOMGR_WORKQUEUE_POSIX_H
+
+#include "src/core/iomgr/wakeup_fd_posix.h"
+
+struct grpc_fd;
+
+struct grpc_workqueue {
+ gpr_refcount refs;
+
+ gpr_mu mu;
+ grpc_closure_list closure_list;
+
+ grpc_wakeup_fd wakeup_fd;
+ struct grpc_fd *wakeup_read_fd;
+
+ grpc_closure read_closure;
+};
+
+#endif /* GRPC_CORE_IOMGR_WORKQUEUE_POSIX_H */
diff --git a/src/core/surface/server_create.c b/src/core/iomgr/workqueue_windows.c
index b7390675ad..f9ca57557b 100644
--- a/src/core/surface/server_create.c
+++ b/src/core/iomgr/workqueue_windows.c
@@ -31,10 +31,10 @@
*
*/
-#include <grpc/grpc.h>
-#include "src/core/surface/completion_queue.h"
-#include "src/core/surface/server.h"
+#include <grpc/support/port_platform.h>
-grpc_server *grpc_server_create(const grpc_channel_args *args) {
- return grpc_server_create_from_filters(NULL, 0, args);
-}
+#ifdef GPR_WIN32
+
+#include "src/core/iomgr/workqueue.h"
+
+#endif /* GPR_WIN32 */
diff --git a/src/core/iomgr/workqueue_windows.h b/src/core/iomgr/workqueue_windows.h
new file mode 100644
index 0000000000..7e8186921e
--- /dev/null
+++ b/src/core/iomgr/workqueue_windows.h
@@ -0,0 +1,37 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_IOMGR_WORKQUEUE_WINDOWS_H
+#define GRPC_CORE_IOMGR_WORKQUEUE_WINDOWS_H
+
+#endif /* GRPC_CORE_IOMGR_WORKQUEUE_WINDOWS_H */
diff --git a/src/core/json/json.h b/src/core/json/json.h
index b78b42a5b2..aea9d5dadb 100644
--- a/src/core/json/json.h
+++ b/src/core/json/json.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_JSON_JSON_H
-#define GRPC_INTERNAL_CORE_JSON_JSON_H
+#ifndef GRPC_CORE_JSON_JSON_H
+#define GRPC_CORE_JSON_JSON_H
#include <stdlib.h>
@@ -42,18 +42,18 @@
* are not owned by it.
*/
typedef struct grpc_json {
- struct grpc_json* next;
- struct grpc_json* prev;
- struct grpc_json* child;
- struct grpc_json* parent;
+ struct grpc_json *next;
+ struct grpc_json *prev;
+ struct grpc_json *child;
+ struct grpc_json *parent;
grpc_json_type type;
- const char* key;
- const char* value;
+ const char *key;
+ const char *value;
} grpc_json;
/* The next two functions are going to parse the input string, and
- * destroy it in the process, in order to use its space to store
+ * modify it in the process, in order to use its space to store
* all of the keys and values for the returned object tree.
*
* They assume UTF-8 input stream, and will output UTF-8 encoded
@@ -65,8 +65,8 @@ typedef struct grpc_json {
*
* Delete the allocated tree afterward using grpc_json_destroy().
*/
-grpc_json* grpc_json_parse_string_with_len(char* input, size_t size);
-grpc_json* grpc_json_parse_string(char* input);
+grpc_json *grpc_json_parse_string_with_len(char *input, size_t size);
+grpc_json *grpc_json_parse_string(char *input);
/* This function will create a new string using gpr_realloc, and will
* deserialize the grpc_json tree into it. It'll be zero-terminated,
@@ -76,13 +76,13 @@ grpc_json* grpc_json_parse_string(char* input);
* If indent is 0, then newlines will be suppressed as well, and the
* output will be condensed at its maximum.
*/
-char* grpc_json_dump_to_string(grpc_json* json, int indent);
+char *grpc_json_dump_to_string(grpc_json *json, int indent);
/* Use these to create or delete a grpc_json object.
* Deletion is recursive. We will not attempt to free any of the strings
* in any of the objects of that tree.
*/
-grpc_json* grpc_json_create(grpc_json_type type);
-void grpc_json_destroy(grpc_json* json);
+grpc_json *grpc_json_create(grpc_json_type type);
+void grpc_json_destroy(grpc_json *json);
-#endif /* GRPC_INTERNAL_CORE_JSON_JSON_H */
+#endif /* GRPC_CORE_JSON_JSON_H */
diff --git a/src/core/json/json_common.h b/src/core/json/json_common.h
index 84bf375916..7205a94685 100644
--- a/src/core/json/json_common.h
+++ b/src/core/json/json_common.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_JSON_JSON_COMMON_H
-#define GRPC_INTERNAL_CORE_JSON_JSON_COMMON_H
+#ifndef GRPC_CORE_JSON_JSON_COMMON_H
+#define GRPC_CORE_JSON_JSON_COMMON_H
/* The various json types. */
typedef enum {
@@ -46,4 +46,4 @@ typedef enum {
GRPC_JSON_TOP_LEVEL
} grpc_json_type;
-#endif /* GRPC_INTERNAL_CORE_JSON_JSON_COMMON_H */
+#endif /* GRPC_CORE_JSON_JSON_COMMON_H */
diff --git a/src/core/json/json_reader.c b/src/core/json/json_reader.c
index c14094c290..30da6f28f3 100644
--- a/src/core/json/json_reader.c
+++ b/src/core/json/json_reader.c
@@ -35,64 +35,64 @@
#include <grpc/support/port_platform.h>
+#include <grpc/support/log.h>
+
#include "src/core/json/json_reader.h"
-static void json_reader_string_clear(grpc_json_reader* reader) {
+static void json_reader_string_clear(grpc_json_reader *reader) {
reader->vtable->string_clear(reader->userdata);
}
-static void json_reader_string_add_char(grpc_json_reader* reader,
- gpr_uint32 c) {
+static void json_reader_string_add_char(grpc_json_reader *reader, uint32_t c) {
reader->vtable->string_add_char(reader->userdata, c);
}
-static void json_reader_string_add_utf32(grpc_json_reader* reader,
- gpr_uint32 utf32) {
+static void json_reader_string_add_utf32(grpc_json_reader *reader,
+ uint32_t utf32) {
reader->vtable->string_add_utf32(reader->userdata, utf32);
}
-static gpr_uint32
- grpc_json_reader_read_char(grpc_json_reader* reader) {
+static uint32_t grpc_json_reader_read_char(grpc_json_reader *reader) {
return reader->vtable->read_char(reader->userdata);
}
-static void json_reader_container_begins(grpc_json_reader* reader,
- grpc_json_type type) {
+static void json_reader_container_begins(grpc_json_reader *reader,
+ grpc_json_type type) {
reader->vtable->container_begins(reader->userdata, type);
}
-static grpc_json_type
- grpc_json_reader_container_ends(grpc_json_reader* reader) {
+static grpc_json_type grpc_json_reader_container_ends(
+ grpc_json_reader *reader) {
return reader->vtable->container_ends(reader->userdata);
}
-static void json_reader_set_key(grpc_json_reader* reader) {
+static void json_reader_set_key(grpc_json_reader *reader) {
reader->vtable->set_key(reader->userdata);
}
-static void json_reader_set_string(grpc_json_reader* reader) {
+static void json_reader_set_string(grpc_json_reader *reader) {
reader->vtable->set_string(reader->userdata);
}
-static int json_reader_set_number(grpc_json_reader* reader) {
+static int json_reader_set_number(grpc_json_reader *reader) {
return reader->vtable->set_number(reader->userdata);
}
-static void json_reader_set_true(grpc_json_reader* reader) {
+static void json_reader_set_true(grpc_json_reader *reader) {
reader->vtable->set_true(reader->userdata);
}
-static void json_reader_set_false(grpc_json_reader* reader) {
+static void json_reader_set_false(grpc_json_reader *reader) {
reader->vtable->set_false(reader->userdata);
}
-static void json_reader_set_null(grpc_json_reader* reader) {
+static void json_reader_set_null(grpc_json_reader *reader) {
reader->vtable->set_null(reader->userdata);
}
/* Call this function to initialize the reader structure. */
-void grpc_json_reader_init(grpc_json_reader* reader,
- grpc_json_reader_vtable* vtable, void* userdata) {
+void grpc_json_reader_init(grpc_json_reader *reader,
+ grpc_json_reader_vtable *vtable, void *userdata) {
memset(reader, 0, sizeof(*reader));
reader->vtable = vtable;
reader->userdata = userdata;
@@ -100,13 +100,14 @@ void grpc_json_reader_init(grpc_json_reader* reader,
reader->state = GRPC_JSON_STATE_VALUE_BEGIN;
}
-int grpc_json_reader_is_complete(grpc_json_reader* reader) {
- return ((reader->depth == 0) && ((reader->state == GRPC_JSON_STATE_END) ||
- (reader->state == GRPC_JSON_STATE_VALUE_END)));
+int grpc_json_reader_is_complete(grpc_json_reader *reader) {
+ return ((reader->depth == 0) &&
+ ((reader->state == GRPC_JSON_STATE_END) ||
+ (reader->state == GRPC_JSON_STATE_VALUE_END)));
}
-grpc_json_reader_status grpc_json_reader_run(grpc_json_reader* reader) {
- gpr_uint32 c, success;
+grpc_json_reader_status grpc_json_reader_run(grpc_json_reader *reader) {
+ uint32_t c, success;
/* This state-machine is a strict implementation of ECMA-404 */
for (;;) {
@@ -143,7 +144,8 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader* reader) {
case GRPC_JSON_STATE_OBJECT_KEY_STRING:
case GRPC_JSON_STATE_VALUE_STRING:
if (c != ' ') return GRPC_JSON_PARSE_ERROR;
- if (reader->unicode_high_surrogate != 0) return GRPC_JSON_PARSE_ERROR;
+ if (reader->unicode_high_surrogate != 0)
+ return GRPC_JSON_PARSE_ERROR;
json_reader_string_add_char(reader, c);
break;
@@ -151,7 +153,7 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader* reader) {
case GRPC_JSON_STATE_VALUE_NUMBER_WITH_DECIMAL:
case GRPC_JSON_STATE_VALUE_NUMBER_ZERO:
case GRPC_JSON_STATE_VALUE_NUMBER_EPM:
- success = (gpr_uint32)json_reader_set_number(reader);
+ success = (uint32_t)json_reader_set_number(reader);
if (!success) return GRPC_JSON_PARSE_ERROR;
json_reader_string_clear(reader);
reader->state = GRPC_JSON_STATE_VALUE_END;
@@ -169,7 +171,8 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader* reader) {
switch (reader->state) {
case GRPC_JSON_STATE_OBJECT_KEY_STRING:
case GRPC_JSON_STATE_VALUE_STRING:
- if (reader->unicode_high_surrogate != 0) return GRPC_JSON_PARSE_ERROR;
+ if (reader->unicode_high_surrogate != 0)
+ return GRPC_JSON_PARSE_ERROR;
json_reader_string_add_char(reader, c);
break;
@@ -177,7 +180,7 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader* reader) {
case GRPC_JSON_STATE_VALUE_NUMBER_WITH_DECIMAL:
case GRPC_JSON_STATE_VALUE_NUMBER_ZERO:
case GRPC_JSON_STATE_VALUE_NUMBER_EPM:
- success = (gpr_uint32)json_reader_set_number(reader);
+ success = (uint32_t)json_reader_set_number(reader);
if (!success) return GRPC_JSON_PARSE_ERROR;
json_reader_string_clear(reader);
reader->state = GRPC_JSON_STATE_VALUE_END;
@@ -222,13 +225,13 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader* reader) {
reader->in_array = 1;
break;
case GRPC_JSON_TOP_LEVEL:
- if (reader->depth != 0) return GRPC_JSON_INTERNAL_ERROR;
+ GPR_ASSERT(reader->depth == 0);
reader->in_object = 0;
reader->in_array = 0;
reader->state = GRPC_JSON_STATE_END;
break;
default:
- return GRPC_JSON_INTERNAL_ERROR;
+ GPR_UNREACHABLE_CODE(return GRPC_JSON_INTERNAL_ERROR);
}
}
break;
@@ -253,7 +256,8 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader* reader) {
/* This is the \\ case. */
case GRPC_JSON_STATE_STRING_ESCAPE:
- if (reader->unicode_high_surrogate != 0) return GRPC_JSON_PARSE_ERROR;
+ if (reader->unicode_high_surrogate != 0)
+ return GRPC_JSON_PARSE_ERROR;
json_reader_string_add_char(reader, '\\');
if (reader->escaped_string_was_key) {
reader->state = GRPC_JSON_STATE_OBJECT_KEY_STRING;
@@ -276,7 +280,7 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader* reader) {
break;
case GRPC_JSON_STATE_OBJECT_KEY_STRING:
- if (reader->unicode_high_surrogate != 0) return GRPC_JSON_PARSE_ERROR;
+ GPR_ASSERT(reader->unicode_high_surrogate == 0);
if (c == '"') {
reader->state = GRPC_JSON_STATE_OBJECT_KEY_END;
json_reader_set_key(reader);
@@ -288,7 +292,8 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader* reader) {
break;
case GRPC_JSON_STATE_VALUE_STRING:
- if (reader->unicode_high_surrogate != 0) return GRPC_JSON_PARSE_ERROR;
+ if (reader->unicode_high_surrogate != 0)
+ return GRPC_JSON_PARSE_ERROR;
if (c == '"') {
reader->state = GRPC_JSON_STATE_VALUE_END;
json_reader_set_string(reader);
@@ -410,8 +415,8 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader* reader) {
} else {
return GRPC_JSON_PARSE_ERROR;
}
- reader->unicode_char = (gpr_uint16)(reader->unicode_char << 4);
- reader->unicode_char = (gpr_uint16)(reader->unicode_char | c);
+ reader->unicode_char = (uint16_t)(reader->unicode_char << 4);
+ reader->unicode_char = (uint16_t)(reader->unicode_char | c);
switch (reader->state) {
case GRPC_JSON_STATE_STRING_ESCAPE_U1:
@@ -434,12 +439,13 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader* reader) {
reader->unicode_high_surrogate = reader->unicode_char;
} else if ((reader->unicode_char & 0xfc00) == 0xdc00) {
/* low surrogate utf-16 */
- gpr_uint32 utf32;
+ uint32_t utf32;
if (reader->unicode_high_surrogate == 0)
return GRPC_JSON_PARSE_ERROR;
utf32 = 0x10000;
- utf32 += (gpr_uint32)((reader->unicode_high_surrogate - 0xd800) * 0x400);
- utf32 += (gpr_uint32)(reader->unicode_char - 0xdc00);
+ utf32 += (uint32_t)(
+ (reader->unicode_high_surrogate - 0xd800) * 0x400);
+ utf32 += (uint32_t)(reader->unicode_char - 0xdc00);
json_reader_string_add_utf32(reader, utf32);
reader->unicode_high_surrogate = 0;
} else {
@@ -455,7 +461,7 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader* reader) {
}
break;
default:
- return GRPC_JSON_INTERNAL_ERROR;
+ GPR_UNREACHABLE_CODE(return GRPC_JSON_INTERNAL_ERROR);
}
break;
@@ -635,7 +641,7 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader* reader) {
case ',':
case '}':
case ']':
- return GRPC_JSON_INTERNAL_ERROR;
+ GPR_UNREACHABLE_CODE(return GRPC_JSON_INTERNAL_ERROR);
break;
default:
@@ -649,5 +655,5 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader* reader) {
}
}
- return GRPC_JSON_INTERNAL_ERROR;
+ GPR_UNREACHABLE_CODE(return GRPC_JSON_INTERNAL_ERROR);
}
diff --git a/src/core/json/json_reader.h b/src/core/json/json_reader.h
index b1a5ace8fb..f25f44b2ef 100644
--- a/src/core/json/json_reader.h
+++ b/src/core/json/json_reader.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_JSON_JSON_READER_H
-#define GRPC_INTERNAL_CORE_JSON_JSON_READER_H
+#ifndef GRPC_CORE_JSON_JSON_READER_H
+#define GRPC_CORE_JSON_JSON_READER_H
#include <grpc/support/port_platform.h>
#include "src/core/json/json_common.h"
@@ -82,27 +82,27 @@ struct grpc_json_reader;
typedef struct grpc_json_reader_vtable {
/* Clears your internal string scratchpad. */
- void (*string_clear)(void* userdata);
+ void (*string_clear)(void *userdata);
/* Adds a char to the string scratchpad. */
- void (*string_add_char)(void* userdata, gpr_uint32 c);
+ void (*string_add_char)(void *userdata, uint32_t c);
/* Adds a utf32 char to the string scratchpad. */
- void (*string_add_utf32)(void* userdata, gpr_uint32 c);
+ void (*string_add_utf32)(void *userdata, uint32_t c);
/* Reads a character from your input. May be utf-8, 16 or 32. */
- gpr_uint32 (*read_char)(void* userdata);
+ uint32_t (*read_char)(void *userdata);
/* Starts a container of type GRPC_JSON_ARRAY or GRPC_JSON_OBJECT. */
- void (*container_begins)(void* userdata, grpc_json_type type);
+ void (*container_begins)(void *userdata, grpc_json_type type);
/* Ends the current container. Must return the type of its parent. */
- grpc_json_type (*container_ends)(void* userdata);
+ grpc_json_type (*container_ends)(void *userdata);
/* Your internal string scratchpad is an object's key. */
- void (*set_key)(void* userdata);
+ void (*set_key)(void *userdata);
/* Your internal string scratchpad is a string value. */
- void (*set_string)(void* userdata);
+ void (*set_string)(void *userdata);
/* Your internal string scratchpad is a numerical value. Return 1 if valid. */
- int (*set_number)(void* userdata);
+ int (*set_number)(void *userdata);
/* Sets the values true, false or null. */
- void (*set_true)(void* userdata);
- void (*set_false)(void* userdata);
- void (*set_null)(void* userdata);
+ void (*set_true)(void *userdata);
+ void (*set_false)(void *userdata);
+ void (*set_null)(void *userdata);
} grpc_json_reader_vtable;
typedef struct grpc_json_reader {
@@ -110,14 +110,14 @@ typedef struct grpc_json_reader {
* The definition is public so you can put it on your stack.
*/
- void* userdata;
- grpc_json_reader_vtable* vtable;
+ void *userdata;
+ grpc_json_reader_vtable *vtable;
int depth;
int in_object;
int in_array;
int escaped_string_was_key;
int container_just_begun;
- gpr_uint16 unicode_char, unicode_high_surrogate;
+ uint16_t unicode_char, unicode_high_surrogate;
grpc_json_reader_state state;
} grpc_json_reader;
@@ -144,17 +144,17 @@ typedef enum {
* . GRPC_JSON_INTERNAL_ERROR if the parser somehow ended into an invalid
* internal state.
*/
-grpc_json_reader_status grpc_json_reader_run(grpc_json_reader* reader);
+grpc_json_reader_status grpc_json_reader_run(grpc_json_reader *reader);
/* Call this function to initialize the reader structure. */
-void grpc_json_reader_init(grpc_json_reader* reader,
- grpc_json_reader_vtable* vtable, void* userdata);
+void grpc_json_reader_init(grpc_json_reader *reader,
+ grpc_json_reader_vtable *vtable, void *userdata);
/* You may call this from the read_char callback if you don't know where is the
* end of your input stream, and you'd like the json reader to hint you that it
* has completed reading its input, so you can return an EOF to it. Note that
* there might still be trailing whitespaces after that point.
*/
-int grpc_json_reader_is_complete(grpc_json_reader* reader);
+int grpc_json_reader_is_complete(grpc_json_reader *reader);
-#endif /* GRPC_INTERNAL_CORE_JSON_JSON_READER_H */
+#endif /* GRPC_CORE_JSON_JSON_READER_H */
diff --git a/src/core/json/json_string.c b/src/core/json/json_string.c
index 03c1099167..2bc0b513d5 100644
--- a/src/core/json/json_string.c
+++ b/src/core/json/json_string.c
@@ -53,13 +53,13 @@
* input size, and never expands it.
*/
typedef struct {
- grpc_json* top;
- grpc_json* current_container;
- grpc_json* current_value;
- gpr_uint8* input;
- gpr_uint8* key;
- gpr_uint8* string;
- gpr_uint8* string_ptr;
+ grpc_json *top;
+ grpc_json *current_container;
+ grpc_json *current_value;
+ uint8_t *input;
+ uint8_t *key;
+ uint8_t *string;
+ uint8_t *string_ptr;
size_t remaining_input;
} json_reader_userdata;
@@ -67,19 +67,18 @@ typedef struct {
* The point is that we allocate that string in chunks of 256 bytes.
*/
typedef struct {
- char* output;
+ char *output;
size_t free_space;
size_t string_len;
size_t allocated;
} json_writer_userdata;
-
/* This function checks if there's enough space left in the output buffer,
* and will enlarge it if necessary. We're only allocating chunks of 256
* bytes at a time (or multiples thereof).
*/
-static void json_writer_output_check(void* userdata, size_t needed) {
- json_writer_userdata* state = userdata;
+static void json_writer_output_check(void *userdata, size_t needed) {
+ json_writer_userdata *state = userdata;
if (state->free_space >= needed) return;
needed -= state->free_space;
/* Round up by 256 bytes. */
@@ -90,24 +89,23 @@ static void json_writer_output_check(void* userdata, size_t needed) {
}
/* These are needed by the writer's implementation. */
-static void json_writer_output_char(void* userdata, char c) {
- json_writer_userdata* state = userdata;
+static void json_writer_output_char(void *userdata, char c) {
+ json_writer_userdata *state = userdata;
json_writer_output_check(userdata, 1);
state->output[state->string_len++] = c;
state->free_space--;
}
-static void json_writer_output_string_with_len(void* userdata,
- const char* str, size_t len) {
- json_writer_userdata* state = userdata;
+static void json_writer_output_string_with_len(void *userdata, const char *str,
+ size_t len) {
+ json_writer_userdata *state = userdata;
json_writer_output_check(userdata, len);
memcpy(state->output + state->string_len, str, len);
state->string_len += len;
state->free_space -= len;
}
-static void json_writer_output_string(void* userdata,
- const char* str) {
+static void json_writer_output_string(void *userdata, const char *str) {
size_t len = strlen(str);
json_writer_output_string_with_len(userdata, str, len);
}
@@ -115,8 +113,8 @@ static void json_writer_output_string(void* userdata,
/* The reader asks us to clear our scratchpad. In our case, we'll simply mark
* the end of the current string, and advance our output pointer.
*/
-static void json_reader_string_clear(void* userdata) {
- json_reader_userdata* state = userdata;
+static void json_reader_string_clear(void *userdata) {
+ json_reader_userdata *state = userdata;
if (state->string) {
GPR_ASSERT(state->string_ptr < state->input);
*state->string_ptr++ = 0;
@@ -124,36 +122,36 @@ static void json_reader_string_clear(void* userdata) {
state->string = state->string_ptr;
}
-static void json_reader_string_add_char(void* userdata, gpr_uint32 c) {
- json_reader_userdata* state = userdata;
+static void json_reader_string_add_char(void *userdata, uint32_t c) {
+ json_reader_userdata *state = userdata;
GPR_ASSERT(state->string_ptr < state->input);
GPR_ASSERT(c <= 0xff);
- *state->string_ptr++ = (gpr_uint8)c;
+ *state->string_ptr++ = (uint8_t)c;
}
/* We are converting a UTF-32 character into UTF-8 here,
* as described by RFC3629.
*/
-static void json_reader_string_add_utf32(void* userdata, gpr_uint32 c) {
+static void json_reader_string_add_utf32(void *userdata, uint32_t c) {
if (c <= 0x7f) {
json_reader_string_add_char(userdata, c);
} else if (c <= 0x7ff) {
- gpr_uint32 b1 = 0xc0 | ((c >> 6) & 0x1f);
- gpr_uint32 b2 = 0x80 | (c & 0x3f);
+ uint32_t b1 = 0xc0 | ((c >> 6) & 0x1f);
+ uint32_t b2 = 0x80 | (c & 0x3f);
json_reader_string_add_char(userdata, b1);
json_reader_string_add_char(userdata, b2);
} else if (c <= 0xffff) {
- gpr_uint32 b1 = 0xe0 | ((c >> 12) & 0x0f);
- gpr_uint32 b2 = 0x80 | ((c >> 6) & 0x3f);
- gpr_uint32 b3 = 0x80 | (c & 0x3f);
+ uint32_t b1 = 0xe0 | ((c >> 12) & 0x0f);
+ uint32_t b2 = 0x80 | ((c >> 6) & 0x3f);
+ uint32_t b3 = 0x80 | (c & 0x3f);
json_reader_string_add_char(userdata, b1);
json_reader_string_add_char(userdata, b2);
json_reader_string_add_char(userdata, b3);
} else if (c <= 0x1fffff) {
- gpr_uint32 b1 = 0xf0 | ((c >> 18) & 0x07);
- gpr_uint32 b2 = 0x80 | ((c >> 12) & 0x3f);
- gpr_uint32 b3 = 0x80 | ((c >> 6) & 0x3f);
- gpr_uint32 b4 = 0x80 | (c & 0x3f);
+ uint32_t b1 = 0xf0 | ((c >> 18) & 0x07);
+ uint32_t b2 = 0x80 | ((c >> 12) & 0x3f);
+ uint32_t b3 = 0x80 | ((c >> 6) & 0x3f);
+ uint32_t b4 = 0x80 | (c & 0x3f);
json_reader_string_add_char(userdata, b1);
json_reader_string_add_char(userdata, b2);
json_reader_string_add_char(userdata, b3);
@@ -164,9 +162,9 @@ static void json_reader_string_add_utf32(void* userdata, gpr_uint32 c) {
/* We consider that the input may be a zero-terminated string. So we
* can end up hitting eof before the end of the alleged string length.
*/
-static gpr_uint32 json_reader_read_char(void* userdata) {
- gpr_uint32 r;
- json_reader_userdata* state = userdata;
+static uint32_t json_reader_read_char(void *userdata) {
+ uint32_t r;
+ json_reader_userdata *state = userdata;
if (state->remaining_input == 0) return GRPC_JSON_READ_CHAR_EOF;
@@ -184,10 +182,9 @@ static gpr_uint32 json_reader_read_char(void* userdata) {
/* Helper function to create a new grpc_json object and link it into
* our tree-in-progress inside our opaque structure.
*/
-static grpc_json* json_create_and_link(void* userdata,
- grpc_json_type type) {
- json_reader_userdata* state = userdata;
- grpc_json* json = grpc_json_create(type);
+static grpc_json *json_create_and_link(void *userdata, grpc_json_type type) {
+ json_reader_userdata *state = userdata;
+ grpc_json *json = grpc_json_create(type);
json->parent = state->current_container;
json->prev = state->current_value;
@@ -201,7 +198,7 @@ static grpc_json* json_create_and_link(void* userdata,
json->parent->child = json;
}
if (json->parent->type == GRPC_JSON_OBJECT) {
- json->key = (char*) state->key;
+ json->key = (char *)state->key;
}
}
if (!state->top) {
@@ -211,9 +208,9 @@ static grpc_json* json_create_and_link(void* userdata,
return json;
}
-static void json_reader_container_begins(void* userdata, grpc_json_type type) {
- json_reader_userdata* state = userdata;
- grpc_json* container;
+static void json_reader_container_begins(void *userdata, grpc_json_type type) {
+ json_reader_userdata *state = userdata;
+ grpc_json *container;
GPR_ASSERT(type == GRPC_JSON_ARRAY || type == GRPC_JSON_OBJECT);
@@ -231,9 +228,9 @@ static void json_reader_container_begins(void* userdata, grpc_json_type type) {
* Also note that if we're at the top of the tree, and the last container
* ends, we have to return GRPC_JSON_TOP_LEVEL.
*/
-static grpc_json_type json_reader_container_ends(void* userdata) {
+static grpc_json_type json_reader_container_ends(void *userdata) {
grpc_json_type container_type = GRPC_JSON_TOP_LEVEL;
- json_reader_userdata* state = userdata;
+ json_reader_userdata *state = userdata;
GPR_ASSERT(state->current_container);
@@ -253,56 +250,49 @@ static grpc_json_type json_reader_container_ends(void* userdata) {
* Note that in the set_number case, we're not going to try interpreting it.
* We'll keep it as a string, and leave it to the caller to evaluate it.
*/
-static void json_reader_set_key(void* userdata) {
- json_reader_userdata* state = userdata;
+static void json_reader_set_key(void *userdata) {
+ json_reader_userdata *state = userdata;
state->key = state->string;
}
-static void json_reader_set_string(void* userdata) {
- json_reader_userdata* state = userdata;
- grpc_json* json = json_create_and_link(userdata, GRPC_JSON_STRING);
- json->value = (char*) state->string;
+static void json_reader_set_string(void *userdata) {
+ json_reader_userdata *state = userdata;
+ grpc_json *json = json_create_and_link(userdata, GRPC_JSON_STRING);
+ json->value = (char *)state->string;
}
-static int json_reader_set_number(void* userdata) {
- json_reader_userdata* state = userdata;
- grpc_json* json = json_create_and_link(userdata, GRPC_JSON_NUMBER);
- json->value = (char*) state->string;
+static int json_reader_set_number(void *userdata) {
+ json_reader_userdata *state = userdata;
+ grpc_json *json = json_create_and_link(userdata, GRPC_JSON_NUMBER);
+ json->value = (char *)state->string;
return 1;
}
/* The object types true, false and null are self-sufficient, and don't need
* any more information beside their type.
*/
-static void json_reader_set_true(void* userdata) {
+static void json_reader_set_true(void *userdata) {
json_create_and_link(userdata, GRPC_JSON_TRUE);
}
-static void json_reader_set_false(void* userdata) {
+static void json_reader_set_false(void *userdata) {
json_create_and_link(userdata, GRPC_JSON_FALSE);
}
-static void json_reader_set_null(void* userdata) {
+static void json_reader_set_null(void *userdata) {
json_create_and_link(userdata, GRPC_JSON_NULL);
}
static grpc_json_reader_vtable reader_vtable = {
- json_reader_string_clear,
- json_reader_string_add_char,
- json_reader_string_add_utf32,
- json_reader_read_char,
- json_reader_container_begins,
- json_reader_container_ends,
- json_reader_set_key,
- json_reader_set_string,
- json_reader_set_number,
- json_reader_set_true,
- json_reader_set_false,
- json_reader_set_null
-};
+ json_reader_string_clear, json_reader_string_add_char,
+ json_reader_string_add_utf32, json_reader_read_char,
+ json_reader_container_begins, json_reader_container_ends,
+ json_reader_set_key, json_reader_set_string,
+ json_reader_set_number, json_reader_set_true,
+ json_reader_set_false, json_reader_set_null};
/* And finally, let's define our public API. */
-grpc_json* grpc_json_parse_string_with_len(char* input, size_t size) {
+grpc_json *grpc_json_parse_string_with_len(char *input, size_t size) {
grpc_json_reader reader;
json_reader_userdata state;
grpc_json *json = NULL;
@@ -312,7 +302,7 @@ grpc_json* grpc_json_parse_string_with_len(char* input, size_t size) {
state.top = state.current_container = state.current_value = NULL;
state.string = state.key = NULL;
- state.string_ptr = state.input = (gpr_uint8*) input;
+ state.string_ptr = state.input = (uint8_t *)input;
state.remaining_input = size;
grpc_json_reader_init(&reader, &reader_vtable, &state);
@@ -329,12 +319,12 @@ grpc_json* grpc_json_parse_string_with_len(char* input, size_t size) {
#define UNBOUND_JSON_STRING_LENGTH 0x7fffffff
-grpc_json* grpc_json_parse_string(char* input) {
+grpc_json *grpc_json_parse_string(char *input) {
return grpc_json_parse_string_with_len(input, UNBOUND_JSON_STRING_LENGTH);
}
-static void json_dump_recursive(grpc_json_writer* writer,
- grpc_json* json, int in_object) {
+static void json_dump_recursive(grpc_json_writer *writer, grpc_json *json,
+ int in_object) {
while (json) {
if (in_object) grpc_json_writer_object_key(writer, json->key);
@@ -363,19 +353,17 @@ static void json_dump_recursive(grpc_json_writer* writer,
grpc_json_writer_value_raw_with_len(writer, "null", 4);
break;
default:
- abort();
+ GPR_UNREACHABLE_CODE(abort());
}
json = json->next;
}
}
static grpc_json_writer_vtable writer_vtable = {
- json_writer_output_char,
- json_writer_output_string,
- json_writer_output_string_with_len
-};
+ json_writer_output_char, json_writer_output_string,
+ json_writer_output_string_with_len};
-char* grpc_json_dump_to_string(grpc_json* json, int indent) {
+char *grpc_json_dump_to_string(grpc_json *json, int indent) {
grpc_json_writer writer;
json_writer_userdata state;
diff --git a/src/core/json/json_writer.c b/src/core/json/json_writer.c
index bed9a9bfa5..326ec2d431 100644
--- a/src/core/json/json_writer.c
+++ b/src/core/json/json_writer.c
@@ -37,20 +37,22 @@
#include "src/core/json/json_writer.h"
-static void json_writer_output_char(grpc_json_writer* writer, char c) {
+static void json_writer_output_char(grpc_json_writer *writer, char c) {
writer->vtable->output_char(writer->userdata, c);
}
-static void json_writer_output_string(grpc_json_writer* writer, const char* str) {
+static void json_writer_output_string(grpc_json_writer *writer,
+ const char *str) {
writer->vtable->output_string(writer->userdata, str);
}
-static void json_writer_output_string_with_len(grpc_json_writer* writer, const char* str, size_t len) {
+static void json_writer_output_string_with_len(grpc_json_writer *writer,
+ const char *str, size_t len) {
writer->vtable->output_string_with_len(writer->userdata, str, len);
}
-void grpc_json_writer_init(grpc_json_writer* writer, int indent,
- grpc_json_writer_vtable* vtable, void* userdata) {
+void grpc_json_writer_init(grpc_json_writer *writer, int indent,
+ grpc_json_writer_vtable *vtable, void *userdata) {
memset(writer, 0, sizeof(*writer));
writer->container_empty = 1;
writer->indent = indent;
@@ -58,8 +60,7 @@ void grpc_json_writer_init(grpc_json_writer* writer, int indent,
writer->userdata = userdata;
}
-static void json_writer_output_indent(
- grpc_json_writer* writer) {
+static void json_writer_output_indent(grpc_json_writer *writer) {
static const char spacesstr[] =
" "
" "
@@ -87,7 +88,7 @@ static void json_writer_output_indent(
writer, spacesstr + sizeof(spacesstr) - 1 - spaces, spaces);
}
-static void json_writer_value_end(grpc_json_writer* writer) {
+static void json_writer_value_end(grpc_json_writer *writer) {
if (writer->container_empty) {
writer->container_empty = 0;
if ((writer->indent == 0) || (writer->depth == 0)) return;
@@ -99,22 +100,22 @@ static void json_writer_value_end(grpc_json_writer* writer) {
}
}
-static void json_writer_escape_utf16(grpc_json_writer* writer, gpr_uint16 utf16) {
+static void json_writer_escape_utf16(grpc_json_writer *writer, uint16_t utf16) {
static const char hex[] = "0123456789abcdef";
json_writer_output_string_with_len(writer, "\\u", 2);
json_writer_output_char(writer, hex[(utf16 >> 12) & 0x0f]);
json_writer_output_char(writer, hex[(utf16 >> 8) & 0x0f]);
json_writer_output_char(writer, hex[(utf16 >> 4) & 0x0f]);
- json_writer_output_char(writer, hex[(utf16) & 0x0f]);
+ json_writer_output_char(writer, hex[(utf16)&0x0f]);
}
-static void json_writer_escape_string(grpc_json_writer* writer,
- const char* string) {
+static void json_writer_escape_string(grpc_json_writer *writer,
+ const char *string) {
json_writer_output_char(writer, '"');
for (;;) {
- gpr_uint8 c = (gpr_uint8)*string++;
+ uint8_t c = (uint8_t)*string++;
if (c == 0) {
break;
} else if ((c >= 32) && (c <= 126)) {
@@ -142,7 +143,7 @@ static void json_writer_escape_string(grpc_json_writer* writer,
break;
}
} else {
- gpr_uint32 utf32 = 0;
+ uint32_t utf32 = 0;
int extra = 0;
int i;
int valid = 1;
@@ -160,7 +161,7 @@ static void json_writer_escape_string(grpc_json_writer* writer,
}
for (i = 0; i < extra; i++) {
utf32 <<= 6;
- c = (gpr_uint8)(*string++);
+ c = (uint8_t)(*string++);
/* Breaks out and bail on any invalid UTF-8 sequence, including \0. */
if ((c & 0xc0) != 0x80) {
valid = 0;
@@ -173,8 +174,8 @@ static void json_writer_escape_string(grpc_json_writer* writer,
* Any other range is technically reserved for future usage, so if we
* don't want the software to break in the future, we have to allow
* anything else. The first non-unicode character is 0x110000. */
- if (((utf32 >= 0xd800) && (utf32 <= 0xdfff)) ||
- (utf32 >= 0x110000)) break;
+ if (((utf32 >= 0xd800) && (utf32 <= 0xdfff)) || (utf32 >= 0x110000))
+ break;
if (utf32 >= 0x10000) {
/* If utf32 contains a character that is above 0xffff, it needs to be
* broken down into a utf-16 surrogate pair. A surrogate pair is first
@@ -193,10 +194,10 @@ static void json_writer_escape_string(grpc_json_writer* writer,
* That range is exactly 20 bits.
*/
utf32 -= 0x10000;
- json_writer_escape_utf16(writer, (gpr_uint16)(0xd800 | (utf32 >> 10)));
- json_writer_escape_utf16(writer, (gpr_uint16)(0xdc00 | (utf32 & 0x3ff)));
+ json_writer_escape_utf16(writer, (uint16_t)(0xd800 | (utf32 >> 10)));
+ json_writer_escape_utf16(writer, (uint16_t)(0xdc00 | (utf32 & 0x3ff)));
} else {
- json_writer_escape_utf16(writer, (gpr_uint16)utf32);
+ json_writer_escape_utf16(writer, (uint16_t)utf32);
}
}
}
@@ -204,7 +205,8 @@ static void json_writer_escape_string(grpc_json_writer* writer,
json_writer_output_char(writer, '"');
}
-void grpc_json_writer_container_begins(grpc_json_writer* writer, grpc_json_type type) {
+void grpc_json_writer_container_begins(grpc_json_writer *writer,
+ grpc_json_type type) {
if (!writer->got_key) json_writer_value_end(writer);
json_writer_output_indent(writer);
json_writer_output_char(writer, type == GRPC_JSON_OBJECT ? '{' : '[');
@@ -213,7 +215,8 @@ void grpc_json_writer_container_begins(grpc_json_writer* writer, grpc_json_type
writer->depth++;
}
-void grpc_json_writer_container_ends(grpc_json_writer* writer, grpc_json_type type) {
+void grpc_json_writer_container_ends(grpc_json_writer *writer,
+ grpc_json_type type) {
if (writer->indent && !writer->container_empty)
json_writer_output_char(writer, '\n');
writer->depth--;
@@ -223,7 +226,7 @@ void grpc_json_writer_container_ends(grpc_json_writer* writer, grpc_json_type ty
writer->got_key = 0;
}
-void grpc_json_writer_object_key(grpc_json_writer* writer, const char* string) {
+void grpc_json_writer_object_key(grpc_json_writer *writer, const char *string) {
json_writer_value_end(writer);
json_writer_output_indent(writer);
json_writer_escape_string(writer, string);
@@ -231,21 +234,23 @@ void grpc_json_writer_object_key(grpc_json_writer* writer, const char* string) {
writer->got_key = 1;
}
-void grpc_json_writer_value_raw(grpc_json_writer* writer, const char* string) {
+void grpc_json_writer_value_raw(grpc_json_writer *writer, const char *string) {
if (!writer->got_key) json_writer_value_end(writer);
json_writer_output_indent(writer);
json_writer_output_string(writer, string);
writer->got_key = 0;
}
-void grpc_json_writer_value_raw_with_len(grpc_json_writer* writer, const char* string, size_t len) {
+void grpc_json_writer_value_raw_with_len(grpc_json_writer *writer,
+ const char *string, size_t len) {
if (!writer->got_key) json_writer_value_end(writer);
json_writer_output_indent(writer);
json_writer_output_string_with_len(writer, string, len);
writer->got_key = 0;
}
-void grpc_json_writer_value_string(grpc_json_writer* writer, const char* string) {
+void grpc_json_writer_value_string(grpc_json_writer *writer,
+ const char *string) {
if (!writer->got_key) json_writer_value_end(writer);
json_writer_output_indent(writer);
json_writer_escape_string(writer, string);
diff --git a/src/core/json/json_writer.h b/src/core/json/json_writer.h
index dfa61a5fef..c392126950 100644
--- a/src/core/json/json_writer.h
+++ b/src/core/json/json_writer.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -43,8 +43,8 @@
* a valid UTF-8 string overall.
*/
-#ifndef GRPC_INTERNAL_CORE_JSON_JSON_WRITER_H
-#define GRPC_INTERNAL_CORE_JSON_JSON_WRITER_H
+#ifndef GRPC_CORE_JSON_JSON_WRITER_H
+#define GRPC_CORE_JSON_JSON_WRITER_H
#include <stdlib.h>
@@ -52,17 +52,17 @@
typedef struct grpc_json_writer_vtable {
/* Adds a character to the output stream. */
- void (*output_char)(void* userdata, char);
+ void (*output_char)(void *userdata, char);
/* Adds a zero-terminated string to the output stream. */
- void (*output_string)(void* userdata, const char* str);
+ void (*output_string)(void *userdata, const char *str);
/* Adds a fixed-length string to the output stream. */
- void (*output_string_with_len)(void* userdata, const char* str, size_t len);
+ void (*output_string_with_len)(void *userdata, const char *str, size_t len);
} grpc_json_writer_vtable;
typedef struct grpc_json_writer {
- void* userdata;
- grpc_json_writer_vtable* vtable;
+ void *userdata;
+ grpc_json_writer_vtable *vtable;
int indent;
int depth;
int container_empty;
@@ -74,20 +74,24 @@ typedef struct grpc_json_writer {
* use indent=0, then the output will not have any newlines either, thus
* emitting a condensed json output.
*/
-void grpc_json_writer_init(grpc_json_writer* writer, int indent,
- grpc_json_writer_vtable* vtable, void* userdata);
+void grpc_json_writer_init(grpc_json_writer *writer, int indent,
+ grpc_json_writer_vtable *vtable, void *userdata);
/* Signals the beginning of a container. */
-void grpc_json_writer_container_begins(grpc_json_writer* writer, grpc_json_type type);
+void grpc_json_writer_container_begins(grpc_json_writer *writer,
+ grpc_json_type type);
/* Signals the end of a container. */
-void grpc_json_writer_container_ends(grpc_json_writer* writer, grpc_json_type type);
+void grpc_json_writer_container_ends(grpc_json_writer *writer,
+ grpc_json_type type);
/* Writes down an object key for the next value. */
-void grpc_json_writer_object_key(grpc_json_writer* writer, const char* string);
+void grpc_json_writer_object_key(grpc_json_writer *writer, const char *string);
/* Sets a raw value. Useful for numbers. */
-void grpc_json_writer_value_raw(grpc_json_writer* writer, const char* string);
+void grpc_json_writer_value_raw(grpc_json_writer *writer, const char *string);
/* Sets a raw value with its length. Useful for values like true or false. */
-void grpc_json_writer_value_raw_with_len(grpc_json_writer* writer, const char* string, size_t len);
+void grpc_json_writer_value_raw_with_len(grpc_json_writer *writer,
+ const char *string, size_t len);
/* Sets a string value. It'll be escaped, and utf-8 validated. */
-void grpc_json_writer_value_string(grpc_json_writer* writer, const char* string);
+void grpc_json_writer_value_string(grpc_json_writer *writer,
+ const char *string);
-#endif /* GRPC_INTERNAL_CORE_JSON_JSON_WRITER_H */
+#endif /* GRPC_CORE_JSON_JSON_WRITER_H */
diff --git a/src/core/profiling/basic_timers.c b/src/core/profiling/basic_timers.c
index ae37f584eb..df32472d1c 100644
--- a/src/core/profiling/basic_timers.c
+++ b/src/core/profiling/basic_timers.c
@@ -36,7 +36,6 @@
#ifdef GRPC_BASIC_PROFILER
#include "src/core/profiling/timers.h"
-#include "src/core/profiling/timers_preciseclock.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
@@ -45,98 +44,231 @@
#include <grpc/support/thd.h>
#include <stdio.h>
-typedef enum {
- BEGIN = '{',
- END = '}',
- MARK = '.',
- IMPORTANT = '!'
-} marker_type;
-
-typedef struct grpc_timer_entry {
- grpc_precise_clock tm;
- int tag;
- const char* tagstr;
- marker_type type;
- void* id;
- const char* file;
- int line;
-} grpc_timer_entry;
-
-#define MAX_COUNT (1024 * 1024 / sizeof(grpc_timer_entry))
-
-static __thread grpc_timer_entry log[MAX_COUNT];
-static __thread int count;
-
-static void log_report() {
- int i;
- for (i = 0; i < count; i++) {
- grpc_timer_entry* entry = &(log[i]);
- printf("GRPC_LAT_PROF " GRPC_PRECISE_CLOCK_FORMAT
- " %p %c %d(%s) %p %s %d\n",
- GRPC_PRECISE_CLOCK_PRINTF_ARGS(&entry->tm),
- (void*)(gpr_intptr)gpr_thd_currentid(), entry->type, entry->tag,
- entry->tagstr, entry->id, entry->file, entry->line);
- }
+typedef enum { BEGIN = '{', END = '}', MARK = '.' } marker_type;
+
+typedef struct gpr_timer_entry {
+ gpr_timespec tm;
+ const char *tagstr;
+ const char *file;
+ short line;
+ char type;
+ uint8_t important;
+ int thd;
+} gpr_timer_entry;
+
+#define MAX_COUNT 1000000
+
+typedef struct gpr_timer_log {
+ size_t num_entries;
+ struct gpr_timer_log *next;
+ struct gpr_timer_log *prev;
+ gpr_timer_entry log[MAX_COUNT];
+} gpr_timer_log;
+
+typedef struct gpr_timer_log_list {
+ gpr_timer_log *head;
+ /* valid iff head!=NULL */
+ gpr_timer_log *tail;
+} gpr_timer_log_list;
+
+static __thread gpr_timer_log *g_thread_log;
+static gpr_once g_once_init = GPR_ONCE_INIT;
+static FILE *output_file;
+static const char *output_filename = "latency_trace.txt";
+static pthread_mutex_t g_mu;
+static pthread_cond_t g_cv;
+static gpr_timer_log_list g_in_progress_logs;
+static gpr_timer_log_list g_done_logs;
+static int g_shutdown;
+static gpr_thd_id g_writing_thread;
+static __thread int g_thread_id;
+static int g_next_thread_id;
- /* Now clear out the log */
- count = 0;
+static int timer_log_push_back(gpr_timer_log_list *list, gpr_timer_log *log) {
+ if (list->head == NULL) {
+ list->head = list->tail = log;
+ log->next = log->prev = NULL;
+ return 1;
+ } else {
+ log->prev = list->tail;
+ log->next = NULL;
+ list->tail->next = log;
+ list->tail = log;
+ return 0;
+ }
}
-static void grpc_timers_log_add(int tag, const char* tagstr, marker_type type,
- void* id, const char* file, int line) {
- grpc_timer_entry* entry;
+static gpr_timer_log *timer_log_pop_front(gpr_timer_log_list *list) {
+ gpr_timer_log *out = list->head;
+ if (out != NULL) {
+ list->head = out->next;
+ if (list->head != NULL) {
+ list->head->prev = NULL;
+ } else {
+ list->tail = NULL;
+ }
+ }
+ return out;
+}
- /* TODO (vpai) : Improve concurrency */
- if (count == MAX_COUNT) {
- log_report();
+static void timer_log_remove(gpr_timer_log_list *list, gpr_timer_log *log) {
+ if (log->prev == NULL) {
+ list->head = log->next;
+ if (list->head != NULL) {
+ list->head->prev = NULL;
+ }
+ } else {
+ log->prev->next = log->next;
}
+ if (log->next == NULL) {
+ list->tail = log->prev;
+ if (list->tail != NULL) {
+ list->tail->next = NULL;
+ }
+ } else {
+ log->next->prev = log->prev;
+ }
+}
- entry = &log[count++];
+static void write_log(gpr_timer_log *log) {
+ size_t i;
+ if (output_file == NULL) {
+ output_file = fopen(output_filename, "w");
+ }
+ for (i = 0; i < log->num_entries; i++) {
+ gpr_timer_entry *entry = &(log->log[i]);
+ if (gpr_time_cmp(entry->tm, gpr_time_0(entry->tm.clock_type)) < 0) {
+ entry->tm = gpr_time_0(entry->tm.clock_type);
+ }
+ fprintf(output_file,
+ "{\"t\": %lld.%09d, \"thd\": \"%d\", \"type\": \"%c\", \"tag\": "
+ "\"%s\", \"file\": \"%s\", \"line\": %d, \"imp\": %d}\n",
+ (long long)entry->tm.tv_sec, (int)entry->tm.tv_nsec, entry->thd,
+ entry->type, entry->tagstr, entry->file, entry->line,
+ entry->important);
+ }
+}
- grpc_precise_clock_now(&entry->tm);
- entry->tag = tag;
- entry->tagstr = tagstr;
- entry->type = type;
- entry->id = id;
- entry->file = file;
- entry->line = line;
+static void writing_thread(void *unused) {
+ gpr_timer_log *log;
+ pthread_mutex_lock(&g_mu);
+ for (;;) {
+ while ((log = timer_log_pop_front(&g_done_logs)) == NULL && !g_shutdown) {
+ pthread_cond_wait(&g_cv, &g_mu);
+ }
+ if (log != NULL) {
+ pthread_mutex_unlock(&g_mu);
+ write_log(log);
+ free(log);
+ pthread_mutex_lock(&g_mu);
+ }
+ if (g_shutdown) {
+ pthread_mutex_unlock(&g_mu);
+ return;
+ }
+ }
}
-/* Latency profiler API implementation. */
-void grpc_timer_add_mark(int tag, const char* tagstr, void* id,
- const char* file, int line) {
- if (tag < GRPC_PTAG_IGNORE_THRESHOLD) {
- grpc_timers_log_add(tag, tagstr, MARK, id, file, line);
+static void flush_logs(gpr_timer_log_list *list) {
+ gpr_timer_log *log;
+ while ((log = timer_log_pop_front(list)) != NULL) {
+ write_log(log);
+ free(log);
}
}
-void grpc_timer_add_important_mark(int tag, const char* tagstr, void* id,
- const char* file, int line) {
- if (tag < GRPC_PTAG_IGNORE_THRESHOLD) {
- grpc_timers_log_add(tag, tagstr, IMPORTANT, id, file, line);
+static void finish_writing() {
+ pthread_mutex_lock(&g_mu);
+ g_shutdown = 1;
+ pthread_cond_signal(&g_cv);
+ pthread_mutex_unlock(&g_mu);
+ gpr_thd_join(g_writing_thread);
+
+ gpr_log(GPR_INFO, "flushing logs");
+
+ pthread_mutex_lock(&g_mu);
+ flush_logs(&g_done_logs);
+ flush_logs(&g_in_progress_logs);
+ pthread_mutex_unlock(&g_mu);
+
+ if (output_file) {
+ fclose(output_file);
}
}
-void grpc_timer_begin(int tag, const char* tagstr, void* id, const char* file,
- int line) {
- if (tag < GRPC_PTAG_IGNORE_THRESHOLD) {
- grpc_timers_log_add(tag, tagstr, BEGIN, id, file, line);
+void gpr_timers_set_log_filename(const char *filename) {
+ output_filename = filename;
+}
+
+static void init_output() {
+ gpr_thd_options options = gpr_thd_options_default();
+ gpr_thd_options_set_joinable(&options);
+ gpr_thd_new(&g_writing_thread, writing_thread, NULL, &options);
+ atexit(finish_writing);
+}
+
+static void rotate_log() {
+ gpr_timer_log *new = malloc(sizeof(*new));
+ gpr_once_init(&g_once_init, init_output);
+ new->num_entries = 0;
+ pthread_mutex_lock(&g_mu);
+ if (g_thread_log != NULL) {
+ timer_log_remove(&g_in_progress_logs, g_thread_log);
+ if (timer_log_push_back(&g_done_logs, g_thread_log)) {
+ pthread_cond_signal(&g_cv);
+ }
+ } else {
+ g_thread_id = g_next_thread_id++;
}
+ timer_log_push_back(&g_in_progress_logs, new);
+ pthread_mutex_unlock(&g_mu);
+ g_thread_log = new;
}
-void grpc_timer_end(int tag, const char* tagstr, void* id, const char* file,
- int line) {
- if (tag < GRPC_PTAG_IGNORE_THRESHOLD) {
- grpc_timers_log_add(tag, tagstr, END, id, file, line);
+static void gpr_timers_log_add(const char *tagstr, marker_type type,
+ int important, const char *file, int line) {
+ gpr_timer_entry *entry;
+
+ if (g_thread_log == NULL || g_thread_log->num_entries == MAX_COUNT) {
+ rotate_log();
}
+
+ entry = &g_thread_log->log[g_thread_log->num_entries++];
+
+ entry->tm = gpr_now(GPR_CLOCK_PRECISE);
+ entry->tagstr = tagstr;
+ entry->type = type;
+ entry->file = file;
+ entry->line = (short)line;
+ entry->important = important != 0;
+ entry->thd = g_thread_id;
+}
+
+/* Latency profiler API implementation. */
+void gpr_timer_add_mark(const char *tagstr, int important, const char *file,
+ int line) {
+ gpr_timers_log_add(tagstr, MARK, important, file, line);
+}
+
+void gpr_timer_begin(const char *tagstr, int important, const char *file,
+ int line) {
+ gpr_timers_log_add(tagstr, BEGIN, important, file, line);
+}
+
+void gpr_timer_end(const char *tagstr, int important, const char *file,
+ int line) {
+ gpr_timers_log_add(tagstr, END, important, file, line);
}
/* Basic profiler specific API functions. */
-void grpc_timers_global_init(void) {}
+void gpr_timers_global_init(void) {}
-void grpc_timers_global_destroy(void) {}
+void gpr_timers_global_destroy(void) {}
#else /* !GRPC_BASIC_PROFILER */
-void grpc_timers_global_init(void) {}
-void grpc_timers_global_destroy(void) {}
+void gpr_timers_global_init(void) {}
+
+void gpr_timers_global_destroy(void) {}
+
+void gpr_timers_set_log_filename(const char *filename) {}
#endif /* GRPC_BASIC_PROFILER */
diff --git a/src/core/profiling/stap_timers.c b/src/core/profiling/stap_timers.c
index 99975163f9..efcd1af4a1 100644
--- a/src/core/profiling/stap_timers.c
+++ b/src/core/profiling/stap_timers.c
@@ -42,23 +42,23 @@
#include "src/core/profiling/stap_probes.h"
/* Latency profiler API implementation. */
-void grpc_timer_add_mark(int tag, const char* tagstr, void* id,
- const char* file, int line) {
+void gpr_timer_add_mark(int tag, const char *tagstr, void *id, const char *file,
+ int line) {
_STAP_ADD_MARK(tag);
}
-void grpc_timer_add_important_mark(int tag, const char* tagstr, void* id,
- const char* file, int line) {
+void gpr_timer_add_important_mark(int tag, const char *tagstr, void *id,
+ const char *file, int line) {
_STAP_ADD_IMPORTANT_MARK(tag);
}
-void grpc_timer_begin(int tag, const char* tagstr, void* id, const char* file,
- int line) {
+void gpr_timer_begin(int tag, const char *tagstr, void *id, const char *file,
+ int line) {
_STAP_TIMING_NS_BEGIN(tag);
}
-void grpc_timer_end(int tag, const char* tagstr, void* id, const char* file,
- int line) {
+void gpr_timer_end(int tag, const char *tagstr, void *id, const char *file,
+ int line) {
_STAP_TIMING_NS_END(tag);
}
diff --git a/src/core/profiling/timers.h b/src/core/profiling/timers.h
index 036d02f187..6a188dc566 100644
--- a/src/core/profiling/timers.h
+++ b/src/core/profiling/timers.h
@@ -38,65 +38,30 @@
extern "C" {
#endif
-void grpc_timers_global_init(void);
-void grpc_timers_global_destroy(void);
-
-void grpc_timer_add_mark(int tag, const char *tagstr, void *id,
- const char *file, int line);
-void grpc_timer_add_important_mark(int tag, const char *tagstr, void *id,
- const char *file, int line);
-void grpc_timer_begin(int tag, const char *tagstr, void *id, const char *file,
- int line);
-void grpc_timer_end(int tag, const char *tagstr, void *id, const char *file,
- int line);
-
-enum grpc_profiling_tags {
- /* Any GRPC_PTAG_* >= than the threshold won't generate any profiling mark. */
- GRPC_PTAG_IGNORE_THRESHOLD = 1000000,
-
- /* Re. Protos. */
- GRPC_PTAG_PROTO_SERIALIZE = 100 + GRPC_PTAG_IGNORE_THRESHOLD,
- GRPC_PTAG_PROTO_DESERIALIZE = 101 + GRPC_PTAG_IGNORE_THRESHOLD,
-
- /* Re. sockets. */
- GRPC_PTAG_HANDLE_READ = 200 + GRPC_PTAG_IGNORE_THRESHOLD,
- GRPC_PTAG_SENDMSG = 201 + GRPC_PTAG_IGNORE_THRESHOLD,
- GRPC_PTAG_RECVMSG = 202 + GRPC_PTAG_IGNORE_THRESHOLD,
- GRPC_PTAG_POLL_FINISHED = 203 + GRPC_PTAG_IGNORE_THRESHOLD,
- GRPC_PTAG_TCP_CB_WRITE = 204 + GRPC_PTAG_IGNORE_THRESHOLD,
- GRPC_PTAG_TCP_WRITE = 205 + GRPC_PTAG_IGNORE_THRESHOLD,
- GRPC_PTAG_CALL_ON_DONE_RECV = 206 + GRPC_PTAG_IGNORE_THRESHOLD,
-
- /* C++ */
- GRPC_PTAG_CPP_CALL_CREATED = 300 + GRPC_PTAG_IGNORE_THRESHOLD,
- GRPC_PTAG_CPP_PERFORM_OPS = 301 + GRPC_PTAG_IGNORE_THRESHOLD,
-
- /* Transports */
- GRPC_PTAG_HTTP2_UNLOCK = 401 + GRPC_PTAG_IGNORE_THRESHOLD,
- GRPC_PTAG_HTTP2_UNLOCK_CLEANUP = 402 + GRPC_PTAG_IGNORE_THRESHOLD,
-
- /* > 1024 Unassigned reserved. For any miscellaneous use.
- * Use addition to generate tags from this base or take advantage of the 10
- * zero'd bits for OR-ing. */
- GRPC_PTAG_OTHER_BASE = 1024
-};
+void gpr_timers_global_init(void);
+void gpr_timers_global_destroy(void);
+
+void gpr_timer_add_mark(const char *tagstr, int important, const char *file,
+ int line);
+void gpr_timer_begin(const char *tagstr, int important, const char *file,
+ int line);
+void gpr_timer_end(const char *tagstr, int important, const char *file,
+ int line);
+
+void gpr_timers_set_log_filename(const char *filename);
#if !(defined(GRPC_STAP_PROFILER) + defined(GRPC_BASIC_PROFILER))
/* No profiling. No-op all the things. */
-#define GRPC_TIMER_MARK(tag, id) \
- do { \
- } while (0)
-
-#define GRPC_TIMER_IMPORTANT_MARK(tag, id) \
- do { \
+#define GPR_TIMER_MARK(tag, important) \
+ do { \
} while (0)
-#define GRPC_TIMER_BEGIN(tag, id) \
- do { \
+#define GPR_TIMER_BEGIN(tag, important) \
+ do { \
} while (0)
-#define GRPC_TIMER_END(tag, id) \
- do { \
+#define GPR_TIMER_END(tag, important) \
+ do { \
} while (0)
#else /* at least one profiler requested... */
@@ -106,28 +71,14 @@ enum grpc_profiling_tags {
#endif
/* Generic profiling interface. */
-#define GRPC_TIMER_MARK(tag, id) \
- if (tag < GRPC_PTAG_IGNORE_THRESHOLD) { \
- grpc_timer_add_mark(tag, #tag, ((void *)(gpr_intptr)(id)), __FILE__, \
- __LINE__); \
- }
+#define GPR_TIMER_MARK(tag, important) \
+ gpr_timer_add_mark(tag, important, __FILE__, __LINE__);
-#define GRPC_TIMER_IMPORTANT_MARK(tag, id) \
- if (tag < GRPC_PTAG_IGNORE_THRESHOLD) { \
- grpc_timer_add_important_mark(tag, #tag, ((void *)(gpr_intptr)(id)), \
- __FILE__, __LINE__); \
- }
+#define GPR_TIMER_BEGIN(tag, important) \
+ gpr_timer_begin(tag, important, __FILE__, __LINE__);
-#define GRPC_TIMER_BEGIN(tag, id) \
- if (tag < GRPC_PTAG_IGNORE_THRESHOLD) { \
- grpc_timer_begin(tag, #tag, ((void *)(gpr_intptr)(id)), __FILE__, \
- __LINE__); \
- }
-
-#define GRPC_TIMER_END(tag, id) \
- if (tag < GRPC_PTAG_IGNORE_THRESHOLD) { \
- grpc_timer_end(tag, #tag, ((void *)(gpr_intptr)(id)), __FILE__, __LINE__); \
- }
+#define GPR_TIMER_END(tag, important) \
+ gpr_timer_end(tag, important, __FILE__, __LINE__);
#ifdef GRPC_STAP_PROFILER
/* Empty placeholder for now. */
@@ -141,6 +92,28 @@ enum grpc_profiling_tags {
#ifdef __cplusplus
}
+
+#if (defined(GRPC_STAP_PROFILER) + defined(GRPC_BASIC_PROFILER))
+namespace grpc {
+class ProfileScope {
+ public:
+ ProfileScope(const char *desc, bool important) : desc_(desc) {
+ GPR_TIMER_BEGIN(desc_, important ? 1 : 0);
+ }
+ ~ProfileScope() { GPR_TIMER_END(desc_, 0); }
+
+ private:
+ const char *const desc_;
+};
+}
+
+#define GPR_TIMER_SCOPE(tag, important) \
+ ::grpc::ProfileScope _profile_scope_##__LINE__((tag), (important))
+#else
+#define GPR_TIMER_SCOPE(tag, important) \
+ do { \
+ } while (false)
+#endif
#endif
#endif /* GRPC_CORE_PROFILING_TIMERS_H */
diff --git a/src/core/proto/grpc/lb/v0/load_balancer.pb.c b/src/core/proto/grpc/lb/v0/load_balancer.pb.c
new file mode 100644
index 0000000000..59aae30cff
--- /dev/null
+++ b/src/core/proto/grpc/lb/v0/load_balancer.pb.c
@@ -0,0 +1,119 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+/* Automatically generated nanopb constant definitions */
+/* Generated by nanopb-0.3.5-dev */
+
+#include "src/core/proto/grpc/lb/v0/load_balancer.pb.h"
+
+#if PB_PROTO_HEADER_VERSION != 30
+#error Regenerate this file with the current version of nanopb generator.
+#endif
+
+
+
+const pb_field_t grpc_lb_v0_Duration_fields[3] = {
+ PB_FIELD( 1, INT64 , OPTIONAL, STATIC , FIRST, grpc_lb_v0_Duration, seconds, seconds, 0),
+ PB_FIELD( 2, INT32 , OPTIONAL, STATIC , OTHER, grpc_lb_v0_Duration, nanos, seconds, 0),
+ PB_LAST_FIELD
+};
+
+const pb_field_t grpc_lb_v0_LoadBalanceRequest_fields[3] = {
+ PB_FIELD( 1, MESSAGE , OPTIONAL, STATIC , FIRST, grpc_lb_v0_LoadBalanceRequest, initial_request, initial_request, &grpc_lb_v0_InitialLoadBalanceRequest_fields),
+ PB_FIELD( 2, MESSAGE , OPTIONAL, STATIC , OTHER, grpc_lb_v0_LoadBalanceRequest, client_stats, initial_request, &grpc_lb_v0_ClientStats_fields),
+ PB_LAST_FIELD
+};
+
+const pb_field_t grpc_lb_v0_InitialLoadBalanceRequest_fields[2] = {
+ PB_FIELD( 1, STRING , OPTIONAL, STATIC , FIRST, grpc_lb_v0_InitialLoadBalanceRequest, name, name, 0),
+ PB_LAST_FIELD
+};
+
+const pb_field_t grpc_lb_v0_ClientStats_fields[4] = {
+ PB_FIELD( 1, INT64 , OPTIONAL, STATIC , FIRST, grpc_lb_v0_ClientStats, total_requests, total_requests, 0),
+ PB_FIELD( 2, INT64 , OPTIONAL, STATIC , OTHER, grpc_lb_v0_ClientStats, client_rpc_errors, total_requests, 0),
+ PB_FIELD( 3, INT64 , OPTIONAL, STATIC , OTHER, grpc_lb_v0_ClientStats, dropped_requests, client_rpc_errors, 0),
+ PB_LAST_FIELD
+};
+
+const pb_field_t grpc_lb_v0_LoadBalanceResponse_fields[3] = {
+ PB_FIELD( 1, MESSAGE , OPTIONAL, STATIC , FIRST, grpc_lb_v0_LoadBalanceResponse, initial_response, initial_response, &grpc_lb_v0_InitialLoadBalanceResponse_fields),
+ PB_FIELD( 2, MESSAGE , OPTIONAL, STATIC , OTHER, grpc_lb_v0_LoadBalanceResponse, server_list, initial_response, &grpc_lb_v0_ServerList_fields),
+ PB_LAST_FIELD
+};
+
+const pb_field_t grpc_lb_v0_InitialLoadBalanceResponse_fields[4] = {
+ PB_FIELD( 1, STRING , OPTIONAL, STATIC , FIRST, grpc_lb_v0_InitialLoadBalanceResponse, client_config, client_config, 0),
+ PB_FIELD( 2, STRING , OPTIONAL, STATIC , OTHER, grpc_lb_v0_InitialLoadBalanceResponse, load_balancer_delegate, client_config, 0),
+ PB_FIELD( 3, MESSAGE , OPTIONAL, STATIC , OTHER, grpc_lb_v0_InitialLoadBalanceResponse, client_stats_report_interval, load_balancer_delegate, &grpc_lb_v0_Duration_fields),
+ PB_LAST_FIELD
+};
+
+const pb_field_t grpc_lb_v0_ServerList_fields[3] = {
+ PB_FIELD( 1, MESSAGE , REPEATED, CALLBACK, FIRST, grpc_lb_v0_ServerList, servers, servers, &grpc_lb_v0_Server_fields),
+ PB_FIELD( 3, MESSAGE , OPTIONAL, STATIC , OTHER, grpc_lb_v0_ServerList, expiration_interval, servers, &grpc_lb_v0_Duration_fields),
+ PB_LAST_FIELD
+};
+
+const pb_field_t grpc_lb_v0_Server_fields[5] = {
+ PB_FIELD( 1, STRING , OPTIONAL, STATIC , FIRST, grpc_lb_v0_Server, ip_address, ip_address, 0),
+ PB_FIELD( 2, INT32 , OPTIONAL, STATIC , OTHER, grpc_lb_v0_Server, port, ip_address, 0),
+ PB_FIELD( 3, BYTES , OPTIONAL, STATIC , OTHER, grpc_lb_v0_Server, load_balance_token, port, 0),
+ PB_FIELD( 4, BOOL , OPTIONAL, STATIC , OTHER, grpc_lb_v0_Server, drop_request, load_balance_token, 0),
+ PB_LAST_FIELD
+};
+
+
+/* Check that field information fits in pb_field_t */
+#if !defined(PB_FIELD_32BIT)
+/* If you get an error here, it means that you need to define PB_FIELD_32BIT
+ * compile-time option. You can do that in pb.h or on compiler command line.
+ *
+ * The reason you need to do this is that some of your messages contain tag
+ * numbers or field sizes that are larger than what can fit in 8 or 16 bit
+ * field descriptors.
+ */
+PB_STATIC_ASSERT((pb_membersize(grpc_lb_v0_LoadBalanceRequest, initial_request) < 65536 && pb_membersize(grpc_lb_v0_LoadBalanceRequest, client_stats) < 65536 && pb_membersize(grpc_lb_v0_LoadBalanceResponse, initial_response) < 65536 && pb_membersize(grpc_lb_v0_LoadBalanceResponse, server_list) < 65536 && pb_membersize(grpc_lb_v0_InitialLoadBalanceResponse, client_stats_report_interval) < 65536 && pb_membersize(grpc_lb_v0_ServerList, servers) < 65536 && pb_membersize(grpc_lb_v0_ServerList, expiration_interval) < 65536), YOU_MUST_DEFINE_PB_FIELD_32BIT_FOR_MESSAGES_grpc_lb_v0_Duration_grpc_lb_v0_LoadBalanceRequest_grpc_lb_v0_InitialLoadBalanceRequest_grpc_lb_v0_ClientStats_grpc_lb_v0_LoadBalanceResponse_grpc_lb_v0_InitialLoadBalanceResponse_grpc_lb_v0_ServerList_grpc_lb_v0_Server)
+#endif
+
+#if !defined(PB_FIELD_16BIT) && !defined(PB_FIELD_32BIT)
+/* If you get an error here, it means that you need to define PB_FIELD_16BIT
+ * compile-time option. You can do that in pb.h or on compiler command line.
+ *
+ * The reason you need to do this is that some of your messages contain tag
+ * numbers or field sizes that are larger than what can fit in the default
+ * 8 bit descriptors.
+ */
+PB_STATIC_ASSERT((pb_membersize(grpc_lb_v0_LoadBalanceRequest, initial_request) < 256 && pb_membersize(grpc_lb_v0_LoadBalanceRequest, client_stats) < 256 && pb_membersize(grpc_lb_v0_LoadBalanceResponse, initial_response) < 256 && pb_membersize(grpc_lb_v0_LoadBalanceResponse, server_list) < 256 && pb_membersize(grpc_lb_v0_InitialLoadBalanceResponse, client_stats_report_interval) < 256 && pb_membersize(grpc_lb_v0_ServerList, servers) < 256 && pb_membersize(grpc_lb_v0_ServerList, expiration_interval) < 256), YOU_MUST_DEFINE_PB_FIELD_16BIT_FOR_MESSAGES_grpc_lb_v0_Duration_grpc_lb_v0_LoadBalanceRequest_grpc_lb_v0_InitialLoadBalanceRequest_grpc_lb_v0_ClientStats_grpc_lb_v0_LoadBalanceResponse_grpc_lb_v0_InitialLoadBalanceResponse_grpc_lb_v0_ServerList_grpc_lb_v0_Server)
+#endif
+
+
diff --git a/src/core/proto/grpc/lb/v0/load_balancer.pb.h b/src/core/proto/grpc/lb/v0/load_balancer.pb.h
new file mode 100644
index 0000000000..3599f881bb
--- /dev/null
+++ b/src/core/proto/grpc/lb/v0/load_balancer.pb.h
@@ -0,0 +1,182 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+/* Automatically generated nanopb header */
+/* Generated by nanopb-0.3.5-dev */
+
+#ifndef PB_LOAD_BALANCER_PB_H_INCLUDED
+#define PB_LOAD_BALANCER_PB_H_INCLUDED
+#include "third_party/nanopb/pb.h"
+#if PB_PROTO_HEADER_VERSION != 30
+#error Regenerate this file with the current version of nanopb generator.
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Struct definitions */
+typedef struct _grpc_lb_v0_ClientStats {
+ bool has_total_requests;
+ int64_t total_requests;
+ bool has_client_rpc_errors;
+ int64_t client_rpc_errors;
+ bool has_dropped_requests;
+ int64_t dropped_requests;
+} grpc_lb_v0_ClientStats;
+
+typedef struct _grpc_lb_v0_Duration {
+ bool has_seconds;
+ int64_t seconds;
+ bool has_nanos;
+ int32_t nanos;
+} grpc_lb_v0_Duration;
+
+typedef struct _grpc_lb_v0_InitialLoadBalanceRequest {
+ bool has_name;
+ char name[128];
+} grpc_lb_v0_InitialLoadBalanceRequest;
+
+typedef PB_BYTES_ARRAY_T(64) grpc_lb_v0_Server_load_balance_token_t;
+typedef struct _grpc_lb_v0_Server {
+ bool has_ip_address;
+ char ip_address[46];
+ bool has_port;
+ int32_t port;
+ bool has_load_balance_token;
+ grpc_lb_v0_Server_load_balance_token_t load_balance_token;
+ bool has_drop_request;
+ bool drop_request;
+} grpc_lb_v0_Server;
+
+typedef struct _grpc_lb_v0_InitialLoadBalanceResponse {
+ bool has_client_config;
+ char client_config[64];
+ bool has_load_balancer_delegate;
+ char load_balancer_delegate[64];
+ bool has_client_stats_report_interval;
+ grpc_lb_v0_Duration client_stats_report_interval;
+} grpc_lb_v0_InitialLoadBalanceResponse;
+
+typedef struct _grpc_lb_v0_LoadBalanceRequest {
+ bool has_initial_request;
+ grpc_lb_v0_InitialLoadBalanceRequest initial_request;
+ bool has_client_stats;
+ grpc_lb_v0_ClientStats client_stats;
+} grpc_lb_v0_LoadBalanceRequest;
+
+typedef struct _grpc_lb_v0_ServerList {
+ pb_callback_t servers;
+ bool has_expiration_interval;
+ grpc_lb_v0_Duration expiration_interval;
+} grpc_lb_v0_ServerList;
+
+typedef struct _grpc_lb_v0_LoadBalanceResponse {
+ bool has_initial_response;
+ grpc_lb_v0_InitialLoadBalanceResponse initial_response;
+ bool has_server_list;
+ grpc_lb_v0_ServerList server_list;
+} grpc_lb_v0_LoadBalanceResponse;
+
+/* Default values for struct fields */
+
+/* Initializer values for message structs */
+#define grpc_lb_v0_Duration_init_default {false, 0, false, 0}
+#define grpc_lb_v0_LoadBalanceRequest_init_default {false, grpc_lb_v0_InitialLoadBalanceRequest_init_default, false, grpc_lb_v0_ClientStats_init_default}
+#define grpc_lb_v0_InitialLoadBalanceRequest_init_default {false, ""}
+#define grpc_lb_v0_ClientStats_init_default {false, 0, false, 0, false, 0}
+#define grpc_lb_v0_LoadBalanceResponse_init_default {false, grpc_lb_v0_InitialLoadBalanceResponse_init_default, false, grpc_lb_v0_ServerList_init_default}
+#define grpc_lb_v0_InitialLoadBalanceResponse_init_default {false, "", false, "", false, grpc_lb_v0_Duration_init_default}
+#define grpc_lb_v0_ServerList_init_default {{{NULL}, NULL}, false, grpc_lb_v0_Duration_init_default}
+#define grpc_lb_v0_Server_init_default {false, "", false, 0, false, {0, {0}}, false, 0}
+#define grpc_lb_v0_Duration_init_zero {false, 0, false, 0}
+#define grpc_lb_v0_LoadBalanceRequest_init_zero {false, grpc_lb_v0_InitialLoadBalanceRequest_init_zero, false, grpc_lb_v0_ClientStats_init_zero}
+#define grpc_lb_v0_InitialLoadBalanceRequest_init_zero {false, ""}
+#define grpc_lb_v0_ClientStats_init_zero {false, 0, false, 0, false, 0}
+#define grpc_lb_v0_LoadBalanceResponse_init_zero {false, grpc_lb_v0_InitialLoadBalanceResponse_init_zero, false, grpc_lb_v0_ServerList_init_zero}
+#define grpc_lb_v0_InitialLoadBalanceResponse_init_zero {false, "", false, "", false, grpc_lb_v0_Duration_init_zero}
+#define grpc_lb_v0_ServerList_init_zero {{{NULL}, NULL}, false, grpc_lb_v0_Duration_init_zero}
+#define grpc_lb_v0_Server_init_zero {false, "", false, 0, false, {0, {0}}, false, 0}
+
+/* Field tags (for use in manual encoding/decoding) */
+#define grpc_lb_v0_ClientStats_total_requests_tag 1
+#define grpc_lb_v0_ClientStats_client_rpc_errors_tag 2
+#define grpc_lb_v0_ClientStats_dropped_requests_tag 3
+#define grpc_lb_v0_Duration_seconds_tag 1
+#define grpc_lb_v0_Duration_nanos_tag 2
+#define grpc_lb_v0_InitialLoadBalanceRequest_name_tag 1
+#define grpc_lb_v0_Server_ip_address_tag 1
+#define grpc_lb_v0_Server_port_tag 2
+#define grpc_lb_v0_Server_load_balance_token_tag 3
+#define grpc_lb_v0_Server_drop_request_tag 4
+#define grpc_lb_v0_InitialLoadBalanceResponse_client_config_tag 1
+#define grpc_lb_v0_InitialLoadBalanceResponse_load_balancer_delegate_tag 2
+#define grpc_lb_v0_InitialLoadBalanceResponse_client_stats_report_interval_tag 3
+#define grpc_lb_v0_LoadBalanceRequest_initial_request_tag 1
+#define grpc_lb_v0_LoadBalanceRequest_client_stats_tag 2
+#define grpc_lb_v0_ServerList_servers_tag 1
+#define grpc_lb_v0_ServerList_expiration_interval_tag 3
+#define grpc_lb_v0_LoadBalanceResponse_initial_response_tag 1
+#define grpc_lb_v0_LoadBalanceResponse_server_list_tag 2
+
+/* Struct field encoding specification for nanopb */
+extern const pb_field_t grpc_lb_v0_Duration_fields[3];
+extern const pb_field_t grpc_lb_v0_LoadBalanceRequest_fields[3];
+extern const pb_field_t grpc_lb_v0_InitialLoadBalanceRequest_fields[2];
+extern const pb_field_t grpc_lb_v0_ClientStats_fields[4];
+extern const pb_field_t grpc_lb_v0_LoadBalanceResponse_fields[3];
+extern const pb_field_t grpc_lb_v0_InitialLoadBalanceResponse_fields[4];
+extern const pb_field_t grpc_lb_v0_ServerList_fields[3];
+extern const pb_field_t grpc_lb_v0_Server_fields[5];
+
+/* Maximum encoded size of messages (where known) */
+#define grpc_lb_v0_Duration_size 22
+#define grpc_lb_v0_LoadBalanceRequest_size 169
+#define grpc_lb_v0_InitialLoadBalanceRequest_size 131
+#define grpc_lb_v0_ClientStats_size 33
+#define grpc_lb_v0_LoadBalanceResponse_size (165 + grpc_lb_v0_ServerList_size)
+#define grpc_lb_v0_InitialLoadBalanceResponse_size 156
+#define grpc_lb_v0_Server_size 127
+
+/* Message IDs (where set with "msgid" option) */
+#ifdef PB_MSGID
+
+#define LOAD_BALANCER_MESSAGES \
+
+
+#endif
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif
diff --git a/src/core/security/auth_filters.h b/src/core/security/auth_filters.h
index ff921690e0..1154a1d914 100644
--- a/src/core/security/auth_filters.h
+++ b/src/core/security/auth_filters.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,12 +31,12 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_SECURITY_AUTH_FILTERS_H
-#define GRPC_INTERNAL_CORE_SECURITY_AUTH_FILTERS_H
+#ifndef GRPC_CORE_SECURITY_AUTH_FILTERS_H
+#define GRPC_CORE_SECURITY_AUTH_FILTERS_H
#include "src/core/channel/channel_stack.h"
extern const grpc_channel_filter grpc_client_auth_filter;
extern const grpc_channel_filter grpc_server_auth_filter;
-#endif /* GRPC_INTERNAL_CORE_SECURITY_AUTH_FILTERS_H */
+#endif /* GRPC_CORE_SECURITY_AUTH_FILTERS_H */
diff --git a/src/core/security/base64.c b/src/core/security/b64.c
index 8dfaef846f..c40b528e2f 100644
--- a/src/core/security/base64.c
+++ b/src/core/security/b64.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,9 @@
*
*/
-#include "src/core/security/base64.h"
+#include "src/core/security/b64.h"
+#include <stdint.h>
#include <string.h>
#include <grpc/support/alloc.h>
@@ -41,7 +42,7 @@
/* --- Constants. --- */
-static const char base64_bytes[] = {
+static const int8_t base64_bytes[] = {
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
@@ -114,7 +115,7 @@ char *grpc_base64_encode(const void *vdata, size_t data_size, int url_safe,
}
GPR_ASSERT(current >= result);
- GPR_ASSERT((gpr_uintptr)(current - result) < result_projected_size);
+ GPR_ASSERT((uintptr_t)(current - result) < result_projected_size);
result[current - result] = '\0';
return result;
}
@@ -125,13 +126,14 @@ gpr_slice grpc_base64_decode(const char *b64, int url_safe) {
static void decode_one_char(const unsigned char *codes, unsigned char *result,
size_t *result_offset) {
- gpr_uint32 packed = (codes[0] << 2) | (codes[1] >> 4);
+ uint32_t packed = ((uint32_t)codes[0] << 2) | ((uint32_t)codes[1] >> 4);
result[(*result_offset)++] = (unsigned char)packed;
}
static void decode_two_chars(const unsigned char *codes, unsigned char *result,
size_t *result_offset) {
- gpr_uint32 packed = (codes[0] << 10) | (codes[1] << 4) | (codes[2] >> 2);
+ uint32_t packed = ((uint32_t)codes[0] << 10) | ((uint32_t)codes[1] << 4) |
+ ((uint32_t)codes[2] >> 2);
result[(*result_offset)++] = (unsigned char)(packed >> 8);
result[(*result_offset)++] = (unsigned char)(packed);
}
@@ -171,8 +173,8 @@ static int decode_group(const unsigned char *codes, size_t num_codes,
decode_two_chars(codes, result, result_offset);
} else {
/* No padding. */
- gpr_uint32 packed =
- (codes[0] << 18) | (codes[1] << 12) | (codes[2] << 6) | codes[3];
+ uint32_t packed = ((uint32_t)codes[0] << 18) | ((uint32_t)codes[1] << 12) |
+ ((uint32_t)codes[2] << 6) | codes[3];
result[(*result_offset)++] = (unsigned char)(packed >> 16);
result[(*result_offset)++] = (unsigned char)(packed >> 8);
result[(*result_offset)++] = (unsigned char)(packed);
diff --git a/src/core/security/base64.h b/src/core/security/b64.h
index b9abc07b52..d18f69563d 100644
--- a/src/core/security/base64.h
+++ b/src/core/security/b64.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_SECURITY_BASE64_H
-#define GRPC_INTERNAL_CORE_SECURITY_BASE64_H
+#ifndef GRPC_CORE_SECURITY_B64_H
+#define GRPC_CORE_SECURITY_B64_H
#include <grpc/support/slice.h>
@@ -49,4 +49,4 @@ gpr_slice grpc_base64_decode(const char *b64, int url_safe);
gpr_slice grpc_base64_decode_with_len(const char *b64, size_t b64_len,
int url_safe);
-#endif /* GRPC_INTERNAL_CORE_SECURITY_BASE64_H */
+#endif /* GRPC_CORE_SECURITY_B64_H */
diff --git a/src/core/security/client_auth_filter.c b/src/core/security/client_auth_filter.c
index e9bd45db68..332d4259d2 100644
--- a/src/core/security/client_auth_filter.c
+++ b/src/core/security/client_auth_filter.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -39,18 +39,19 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
-#include "src/core/support/string.h"
#include "src/core/channel/channel_stack.h"
-#include "src/core/security/security_context.h"
-#include "src/core/security/security_connector.h"
#include "src/core/security/credentials.h"
+#include "src/core/security/security_connector.h"
+#include "src/core/security/security_context.h"
+#include "src/core/support/string.h"
#include "src/core/surface/call.h"
+#include "src/core/transport/static_metadata.h"
#define MAX_CREDENTIALS_METADATA_COUNT 4
/* We can have a per-call credentials. */
typedef struct {
- grpc_credentials *creds;
+ grpc_call_credentials *creds;
grpc_mdstr *host;
grpc_mdstr *method;
/* pollset bound to this call; if we need to make external
@@ -58,62 +59,77 @@ typedef struct {
so that work can progress when this call wants work to
progress */
grpc_pollset *pollset;
- grpc_transport_op op;
- size_t op_md_idx;
- int sent_initial_metadata;
+ grpc_transport_stream_op op;
+ uint8_t security_context_set;
grpc_linked_mdelem md_links[MAX_CREDENTIALS_METADATA_COUNT];
+ grpc_auth_metadata_context auth_md_context;
} call_data;
/* We can have a per-channel credentials. */
typedef struct {
grpc_channel_security_connector *security_connector;
- grpc_mdctx *md_ctx;
- grpc_mdstr *authority_string;
- grpc_mdstr *path_string;
- grpc_mdstr *error_msg_key;
- grpc_mdstr *status_key;
+ grpc_auth_context *auth_context;
} channel_data;
-static void bubble_up_error(grpc_call_element *elem, const char *error_msg) {
+static void reset_auth_metadata_context(
+ grpc_auth_metadata_context *auth_md_context) {
+ if (auth_md_context->service_url != NULL) {
+ gpr_free((char *)auth_md_context->service_url);
+ auth_md_context->service_url = NULL;
+ }
+ if (auth_md_context->method_name != NULL) {
+ gpr_free((char *)auth_md_context->method_name);
+ auth_md_context->method_name = NULL;
+ }
+ GRPC_AUTH_CONTEXT_UNREF(
+ (grpc_auth_context *)auth_md_context->channel_auth_context,
+ "grpc_auth_metadata_context");
+ auth_md_context->channel_auth_context = NULL;
+}
+
+static void bubble_up_error(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_status_code status, const char *error_msg) {
call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
- grpc_transport_op_add_cancellation(
- &calld->op, GRPC_STATUS_UNAUTHENTICATED,
- grpc_mdstr_from_string(chand->md_ctx, error_msg));
- grpc_call_next_op(elem, &calld->op);
+ gpr_log(GPR_ERROR, "Client side authentication failure: %s", error_msg);
+ grpc_transport_stream_op_add_cancellation(&calld->op, status);
+ grpc_call_next_op(exec_ctx, elem, &calld->op);
}
-static void on_credentials_metadata(void *user_data,
+static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_credentials_md *md_elems,
size_t num_md,
grpc_credentials_status status) {
grpc_call_element *elem = (grpc_call_element *)user_data;
call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
- grpc_transport_op *op = &calld->op;
+ grpc_transport_stream_op *op = &calld->op;
grpc_metadata_batch *mdb;
size_t i;
+ reset_auth_metadata_context(&calld->auth_md_context);
if (status != GRPC_CREDENTIALS_OK) {
- bubble_up_error(elem, "Credentials failed to get metadata.");
+ bubble_up_error(exec_ctx, elem, GRPC_STATUS_UNAUTHENTICATED,
+ "Credentials failed to get metadata.");
return;
}
GPR_ASSERT(num_md <= MAX_CREDENTIALS_METADATA_COUNT);
- GPR_ASSERT(op->send_ops && op->send_ops->nops > calld->op_md_idx &&
- op->send_ops->ops[calld->op_md_idx].type == GRPC_OP_METADATA);
- mdb = &op->send_ops->ops[calld->op_md_idx].data.metadata;
+ GPR_ASSERT(op->send_initial_metadata != NULL);
+ mdb = op->send_initial_metadata;
for (i = 0; i < num_md; i++) {
grpc_metadata_batch_add_tail(
mdb, &calld->md_links[i],
- grpc_mdelem_from_slices(chand->md_ctx, gpr_slice_ref(md_elems[i].key),
+ grpc_mdelem_from_slices(gpr_slice_ref(md_elems[i].key),
gpr_slice_ref(md_elems[i].value)));
}
- grpc_call_next_op(elem, op);
+ grpc_call_next_op(exec_ctx, elem, op);
}
-static char *build_service_url(const char *url_scheme, call_data *calld) {
- char *service_url;
+void build_auth_metadata_context(grpc_security_connector *sc,
+ grpc_auth_context *auth_context,
+ call_data *calld) {
char *service = gpr_strdup(grpc_mdstr_as_c_string(calld->method));
char *last_slash = strrchr(service, '/');
+ char *method_name = NULL;
+ char *service_url = NULL;
+ reset_auth_metadata_context(&calld->auth_md_context);
if (last_slash == NULL) {
gpr_log(GPR_ERROR, "No '/' found in fully qualified method name");
service[0] = '\0';
@@ -122,67 +138,70 @@ static char *build_service_url(const char *url_scheme, call_data *calld) {
service[1] = '\0';
} else {
*last_slash = '\0';
+ method_name = gpr_strdup(last_slash + 1);
}
- if (url_scheme == NULL) url_scheme = "";
- gpr_asprintf(&service_url, "%s://%s%s", url_scheme,
+ if (method_name == NULL) method_name = gpr_strdup("");
+ gpr_asprintf(&service_url, "%s://%s%s",
+ sc->url_scheme == NULL ? "" : sc->url_scheme,
grpc_mdstr_as_c_string(calld->host), service);
+ calld->auth_md_context.service_url = service_url;
+ calld->auth_md_context.method_name = method_name;
+ calld->auth_md_context.channel_auth_context =
+ GRPC_AUTH_CONTEXT_REF(auth_context, "grpc_auth_metadata_context");
gpr_free(service);
- return service_url;
}
-static void send_security_metadata(grpc_call_element *elem,
- grpc_transport_op *op) {
+static void send_security_metadata(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_transport_stream_op *op) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
grpc_client_security_context *ctx =
(grpc_client_security_context *)op->context[GRPC_CONTEXT_SECURITY].value;
- char *service_url = NULL;
- grpc_credentials *channel_creds =
+ grpc_call_credentials *channel_call_creds =
chand->security_connector->request_metadata_creds;
- int channel_creds_has_md =
- (channel_creds != NULL) &&
- grpc_credentials_has_request_metadata(channel_creds);
- int call_creds_has_md = (ctx != NULL) && (ctx->creds != NULL) &&
- grpc_credentials_has_request_metadata(ctx->creds);
+ int call_creds_has_md = (ctx != NULL) && (ctx->creds != NULL);
- if (!channel_creds_has_md && !call_creds_has_md) {
+ if (channel_call_creds == NULL && !call_creds_has_md) {
/* Skip sending metadata altogether. */
- grpc_call_next_op(elem, op);
+ grpc_call_next_op(exec_ctx, elem, op);
return;
}
- if (channel_creds_has_md && call_creds_has_md) {
- calld->creds = grpc_composite_credentials_create(channel_creds, ctx->creds);
+ if (channel_call_creds != NULL && call_creds_has_md) {
+ calld->creds = grpc_composite_call_credentials_create(channel_call_creds,
+ ctx->creds, NULL);
if (calld->creds == NULL) {
- bubble_up_error(elem,
+ bubble_up_error(exec_ctx, elem, GRPC_STATUS_INVALID_ARGUMENT,
"Incompatible credentials set on channel and call.");
return;
}
} else {
- calld->creds =
- grpc_credentials_ref(call_creds_has_md ? ctx->creds : channel_creds);
+ calld->creds = grpc_call_credentials_ref(
+ call_creds_has_md ? ctx->creds : channel_call_creds);
}
- service_url =
- build_service_url(chand->security_connector->base.url_scheme, calld);
+ build_auth_metadata_context(&chand->security_connector->base,
+ chand->auth_context, calld);
calld->op = *op; /* Copy op (originates from the caller's stack). */
GPR_ASSERT(calld->pollset);
- grpc_credentials_get_request_metadata(
- calld->creds, calld->pollset, service_url, on_credentials_metadata, elem);
- gpr_free(service_url);
+ grpc_call_credentials_get_request_metadata(
+ exec_ctx, calld->creds, calld->pollset, calld->auth_md_context,
+ on_credentials_metadata, elem);
}
-static void on_host_checked(void *user_data, grpc_security_status status) {
+static void on_host_checked(grpc_exec_ctx *exec_ctx, void *user_data,
+ grpc_security_status status) {
grpc_call_element *elem = (grpc_call_element *)user_data;
call_data *calld = elem->call_data;
if (status == GRPC_SECURITY_OK) {
- send_security_metadata(elem, &calld->op);
+ send_security_metadata(exec_ctx, elem, &calld->op);
} else {
char *error_msg;
gpr_asprintf(&error_msg, "Invalid host %s set in :authority metadata.",
grpc_mdstr_as_c_string(calld->host));
- bubble_up_error(elem, error_msg);
+ bubble_up_error(exec_ctx, elem, GRPC_STATUS_INVALID_ARGUMENT, error_msg);
gpr_free(error_msg);
}
}
@@ -192,148 +211,126 @@ static void on_host_checked(void *user_data, grpc_security_status status) {
- a network event (or similar) from below, to receive something
op contains type and call direction information, in addition to the data
that is being sent or received. */
-static void auth_start_transport_op(grpc_call_element *elem,
- grpc_transport_op *op) {
+static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_transport_stream_op *op) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
grpc_linked_mdelem *l;
- size_t i;
-
- /* TODO(jboeuf): write the call auth context. */
+ grpc_client_security_context *sec_ctx = NULL;
- if (op->bind_pollset) {
- calld->pollset = op->bind_pollset;
+ if (calld->security_context_set == 0 &&
+ op->cancel_with_status == GRPC_STATUS_OK) {
+ calld->security_context_set = 1;
+ GPR_ASSERT(op->context);
+ if (op->context[GRPC_CONTEXT_SECURITY].value == NULL) {
+ op->context[GRPC_CONTEXT_SECURITY].value =
+ grpc_client_security_context_create();
+ op->context[GRPC_CONTEXT_SECURITY].destroy =
+ grpc_client_security_context_destroy;
+ }
+ sec_ctx = op->context[GRPC_CONTEXT_SECURITY].value;
+ GRPC_AUTH_CONTEXT_UNREF(sec_ctx->auth_context, "client auth filter");
+ sec_ctx->auth_context =
+ GRPC_AUTH_CONTEXT_REF(chand->auth_context, "client_auth_filter");
}
- if (op->send_ops && !calld->sent_initial_metadata) {
- size_t nops = op->send_ops->nops;
- grpc_stream_op *ops = op->send_ops->ops;
- for (i = 0; i < nops; i++) {
- grpc_stream_op *sop = &ops[i];
- if (sop->type != GRPC_OP_METADATA) continue;
- calld->op_md_idx = i;
- calld->sent_initial_metadata = 1;
- for (l = sop->data.metadata.list.head; l != NULL; l = l->next) {
- grpc_mdelem *md = l->md;
- /* Pointer comparison is OK for md_elems created from the same context.
- */
- if (md->key == chand->authority_string) {
- if (calld->host != NULL) grpc_mdstr_unref(calld->host);
- calld->host = grpc_mdstr_ref(md->value);
- } else if (md->key == chand->path_string) {
- if (calld->method != NULL) grpc_mdstr_unref(calld->method);
- calld->method = grpc_mdstr_ref(md->value);
- }
- }
- if (calld->host != NULL) {
- grpc_security_status status;
- const char *call_host = grpc_mdstr_as_c_string(calld->host);
- calld->op = *op; /* Copy op (originates from the caller's stack). */
- status = grpc_channel_security_connector_check_call_host(
- chand->security_connector, call_host, on_host_checked, elem);
- if (status != GRPC_SECURITY_OK) {
- if (status == GRPC_SECURITY_ERROR) {
- char *error_msg;
- gpr_asprintf(&error_msg,
- "Invalid host %s set in :authority metadata.",
- call_host);
- bubble_up_error(elem, error_msg);
- gpr_free(error_msg);
- }
- return; /* early exit */
- }
+ if (op->send_initial_metadata != NULL) {
+ for (l = op->send_initial_metadata->list.head; l != NULL; l = l->next) {
+ grpc_mdelem *md = l->md;
+ /* Pointer comparison is OK for md_elems created from the same context.
+ */
+ if (md->key == GRPC_MDSTR_AUTHORITY) {
+ if (calld->host != NULL) GRPC_MDSTR_UNREF(calld->host);
+ calld->host = GRPC_MDSTR_REF(md->value);
+ } else if (md->key == GRPC_MDSTR_PATH) {
+ if (calld->method != NULL) GRPC_MDSTR_UNREF(calld->method);
+ calld->method = GRPC_MDSTR_REF(md->value);
}
- send_security_metadata(elem, op);
+ }
+ if (calld->host != NULL) {
+ const char *call_host = grpc_mdstr_as_c_string(calld->host);
+ calld->op = *op; /* Copy op (originates from the caller's stack). */
+ grpc_channel_security_connector_check_call_host(
+ exec_ctx, chand->security_connector, call_host, chand->auth_context,
+ on_host_checked, elem);
return; /* early exit */
}
}
- /* pass control up or down the stack */
- grpc_call_next_op(elem, op);
-}
-
-/* Called on special channel events, such as disconnection or new incoming
- calls on the server */
-static void channel_op(grpc_channel_element *elem,
- grpc_channel_element *from_elem, grpc_channel_op *op) {
- grpc_channel_next_op(elem, op);
+ /* pass control down the stack */
+ grpc_call_next_op(exec_ctx, elem, op);
}
/* Constructor for call_data */
-static void init_call_elem(grpc_call_element *elem,
- const void *server_transport_data,
- grpc_transport_op *initial_op) {
+static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_call_element_args *args) {
call_data *calld = elem->call_data;
- calld->creds = NULL;
- calld->host = NULL;
- calld->method = NULL;
- calld->pollset = NULL;
- calld->sent_initial_metadata = 0;
+ memset(calld, 0, sizeof(*calld));
+}
- GPR_ASSERT(!initial_op || !initial_op->send_ops);
+static void set_pollset(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_pollset *pollset) {
+ call_data *calld = elem->call_data;
+ calld->pollset = pollset;
}
/* Destructor for call_data */
-static void destroy_call_elem(grpc_call_element *elem) {
+static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
call_data *calld = elem->call_data;
- grpc_credentials_unref(calld->creds);
+ grpc_call_credentials_unref(calld->creds);
if (calld->host != NULL) {
- grpc_mdstr_unref(calld->host);
+ GRPC_MDSTR_UNREF(calld->host);
}
if (calld->method != NULL) {
- grpc_mdstr_unref(calld->method);
+ GRPC_MDSTR_UNREF(calld->method);
}
+ reset_auth_metadata_context(&calld->auth_md_context);
}
/* Constructor for channel_data */
-static void init_channel_elem(grpc_channel_element *elem,
- const grpc_channel_args *args,
- grpc_mdctx *metadata_context, int is_first,
- int is_last) {
- grpc_security_connector *sc = grpc_find_security_connector_in_args(args);
+static void init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
+ grpc_security_connector *sc =
+ grpc_find_security_connector_in_args(args->channel_args);
+ grpc_auth_context *auth_context =
+ grpc_find_auth_context_in_args(args->channel_args);
+
/* grab pointers to our data from the channel element */
channel_data *chand = elem->channel_data;
/* The first and the last filters tend to be implemented differently to
handle the case that there's no 'next' filter to call on the up or down
path */
- GPR_ASSERT(!is_first);
- GPR_ASSERT(!is_last);
+ GPR_ASSERT(!args->is_last);
GPR_ASSERT(sc != NULL);
+ GPR_ASSERT(auth_context != NULL);
/* initialize members */
- GPR_ASSERT(sc->is_client_side);
chand->security_connector =
- (grpc_channel_security_connector *)grpc_security_connector_ref(sc);
- chand->md_ctx = metadata_context;
- chand->authority_string = grpc_mdstr_from_string(chand->md_ctx, ":authority");
- chand->path_string = grpc_mdstr_from_string(chand->md_ctx, ":path");
- chand->error_msg_key = grpc_mdstr_from_string(chand->md_ctx, "grpc-message");
- chand->status_key = grpc_mdstr_from_string(chand->md_ctx, "grpc-status");
+ (grpc_channel_security_connector *)GRPC_SECURITY_CONNECTOR_REF(
+ sc, "client_auth_filter");
+ chand->auth_context =
+ GRPC_AUTH_CONTEXT_REF(auth_context, "client_auth_filter");
}
/* Destructor for channel data */
-static void destroy_channel_elem(grpc_channel_element *elem) {
+static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem) {
/* grab pointers to our data from the channel element */
channel_data *chand = elem->channel_data;
- grpc_channel_security_connector *ctx = chand->security_connector;
- if (ctx != NULL) grpc_security_connector_unref(&ctx->base);
- if (chand->authority_string != NULL) {
- grpc_mdstr_unref(chand->authority_string);
- }
- if (chand->error_msg_key != NULL) {
- grpc_mdstr_unref(chand->error_msg_key);
- }
- if (chand->status_key != NULL) {
- grpc_mdstr_unref(chand->status_key);
- }
- if (chand->path_string != NULL) {
- grpc_mdstr_unref(chand->path_string);
+ grpc_channel_security_connector *sc = chand->security_connector;
+ if (sc != NULL) {
+ GRPC_SECURITY_CONNECTOR_UNREF(&sc->base, "client_auth_filter");
}
+ GRPC_AUTH_CONTEXT_UNREF(chand->auth_context, "client_auth_filter");
}
const grpc_channel_filter grpc_client_auth_filter = {
- auth_start_transport_op, channel_op, sizeof(call_data),
- init_call_elem, destroy_call_elem, sizeof(channel_data),
- init_channel_elem, destroy_channel_elem, "client-auth"};
+ auth_start_transport_op, grpc_channel_next_op, sizeof(call_data),
+ init_call_elem, set_pollset, destroy_call_elem, sizeof(channel_data),
+ init_channel_elem, destroy_channel_elem, grpc_call_next_get_peer,
+ "client-auth"};
diff --git a/src/core/security/credentials.c b/src/core/security/credentials.c
index cf663faf2d..b4fa616fa7 100644
--- a/src/core/security/credentials.c
+++ b/src/core/security/credentials.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -33,16 +33,16 @@
#include "src/core/security/credentials.h"
-#include <string.h>
#include <stdio.h>
+#include <string.h>
#include "src/core/channel/channel_args.h"
#include "src/core/channel/http_client_filter.h"
-#include "src/core/json/json.h"
#include "src/core/httpcli/httpcli.h"
-#include "src/core/iomgr/iomgr.h"
-#include "src/core/security/json_token.h"
+#include "src/core/iomgr/executor.h"
+#include "src/core/json/json.h"
#include "src/core/support/string.h"
+#include "src/core/surface/api_trace.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
@@ -52,97 +52,121 @@
/* -- Common. -- */
-typedef struct {
- grpc_credentials *creds;
+struct grpc_credentials_metadata_request {
+ grpc_call_credentials *creds;
grpc_credentials_metadata_cb cb;
- grpc_iomgr_closure *on_simulated_token_fetch_done_closure;
void *user_data;
-} grpc_credentials_metadata_request;
+};
static grpc_credentials_metadata_request *
-grpc_credentials_metadata_request_create(grpc_credentials *creds,
+grpc_credentials_metadata_request_create(grpc_call_credentials *creds,
grpc_credentials_metadata_cb cb,
void *user_data) {
grpc_credentials_metadata_request *r =
gpr_malloc(sizeof(grpc_credentials_metadata_request));
- r->creds = grpc_credentials_ref(creds);
+ r->creds = grpc_call_credentials_ref(creds);
r->cb = cb;
- r->on_simulated_token_fetch_done_closure =
- gpr_malloc(sizeof(grpc_iomgr_closure));
r->user_data = user_data;
return r;
}
static void grpc_credentials_metadata_request_destroy(
grpc_credentials_metadata_request *r) {
- grpc_credentials_unref(r->creds);
- gpr_free(r->on_simulated_token_fetch_done_closure);
+ grpc_call_credentials_unref(r->creds);
gpr_free(r);
}
-grpc_credentials *grpc_credentials_ref(grpc_credentials *creds) {
+grpc_channel_credentials *grpc_channel_credentials_ref(
+ grpc_channel_credentials *creds) {
if (creds == NULL) return NULL;
gpr_ref(&creds->refcount);
return creds;
}
-void grpc_credentials_unref(grpc_credentials *creds) {
+void grpc_channel_credentials_unref(grpc_channel_credentials *creds) {
if (creds == NULL) return;
- if (gpr_unref(&creds->refcount)) creds->vtable->destroy(creds);
+ if (gpr_unref(&creds->refcount)) {
+ if (creds->vtable->destruct != NULL) creds->vtable->destruct(creds);
+ gpr_free(creds);
+ }
}
-void grpc_credentials_release(grpc_credentials *creds) {
- grpc_credentials_unref(creds);
+void grpc_channel_credentials_release(grpc_channel_credentials *creds) {
+ GRPC_API_TRACE("grpc_channel_credentials_release(creds=%p)", 1, (creds));
+ grpc_channel_credentials_unref(creds);
}
-int grpc_credentials_has_request_metadata(grpc_credentials *creds) {
- if (creds == NULL) return 0;
- return creds->vtable->has_request_metadata(creds);
+grpc_call_credentials *grpc_call_credentials_ref(grpc_call_credentials *creds) {
+ if (creds == NULL) return NULL;
+ gpr_ref(&creds->refcount);
+ return creds;
+}
+
+void grpc_call_credentials_unref(grpc_call_credentials *creds) {
+ if (creds == NULL) return;
+ if (gpr_unref(&creds->refcount)) {
+ if (creds->vtable->destruct != NULL) creds->vtable->destruct(creds);
+ gpr_free(creds);
+ }
}
-int grpc_credentials_has_request_metadata_only(grpc_credentials *creds) {
- if (creds == NULL) return 0;
- return creds->vtable->has_request_metadata_only(creds);
+void grpc_call_credentials_release(grpc_call_credentials *creds) {
+ GRPC_API_TRACE("grpc_call_credentials_release(creds=%p)", 1, (creds));
+ grpc_call_credentials_unref(creds);
}
-void grpc_credentials_get_request_metadata(grpc_credentials *creds,
- grpc_pollset *pollset,
- const char *service_url,
- grpc_credentials_metadata_cb cb,
- void *user_data) {
- if (creds == NULL || !grpc_credentials_has_request_metadata(creds) ||
- creds->vtable->get_request_metadata == NULL) {
+void grpc_call_credentials_get_request_metadata(
+ grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds,
+ grpc_pollset *pollset, grpc_auth_metadata_context context,
+ grpc_credentials_metadata_cb cb, void *user_data) {
+ if (creds == NULL || creds->vtable->get_request_metadata == NULL) {
if (cb != NULL) {
- cb(user_data, NULL, 0, GRPC_CREDENTIALS_OK);
+ cb(exec_ctx, user_data, NULL, 0, GRPC_CREDENTIALS_OK);
}
return;
}
- creds->vtable->get_request_metadata(creds, pollset, service_url, cb,
+ creds->vtable->get_request_metadata(exec_ctx, creds, pollset, context, cb,
user_data);
}
-grpc_security_status grpc_credentials_create_security_connector(
- grpc_credentials *creds, const char *target, const grpc_channel_args *args,
- grpc_credentials *request_metadata_creds,
- grpc_channel_security_connector **sc, grpc_channel_args **new_args) {
+grpc_security_status grpc_channel_credentials_create_security_connector(
+ grpc_channel_credentials *channel_creds, const char *target,
+ const grpc_channel_args *args, grpc_channel_security_connector **sc,
+ grpc_channel_args **new_args) {
*new_args = NULL;
- if (creds == NULL || creds->vtable->create_security_connector == NULL ||
- grpc_credentials_has_request_metadata_only(creds)) {
- gpr_log(GPR_ERROR,
- "Invalid credentials for creating a security connector.");
+ if (channel_creds == NULL) {
return GRPC_SECURITY_ERROR;
}
- return creds->vtable->create_security_connector(
- creds, target, args, request_metadata_creds, sc, new_args);
+ GPR_ASSERT(channel_creds->vtable->create_security_connector != NULL);
+ return channel_creds->vtable->create_security_connector(
+ channel_creds, NULL, target, args, sc, new_args);
}
-void grpc_server_credentials_release(grpc_server_credentials *creds) {
+grpc_server_credentials *grpc_server_credentials_ref(
+ grpc_server_credentials *creds) {
+ if (creds == NULL) return NULL;
+ gpr_ref(&creds->refcount);
+ return creds;
+}
+
+void grpc_server_credentials_unref(grpc_server_credentials *creds) {
if (creds == NULL) return;
- creds->vtable->destroy(creds);
+ if (gpr_unref(&creds->refcount)) {
+ if (creds->vtable->destruct != NULL) creds->vtable->destruct(creds);
+ if (creds->processor.destroy != NULL && creds->processor.state != NULL) {
+ creds->processor.destroy(creds->processor.state);
+ }
+ gpr_free(creds);
+ }
+}
+
+void grpc_server_credentials_release(grpc_server_credentials *creds) {
+ GRPC_API_TRACE("grpc_server_credentials_release(creds=%p)", 1, (creds));
+ grpc_server_credentials_unref(creds);
}
grpc_security_status grpc_server_credentials_create_security_connector(
- grpc_server_credentials *creds, grpc_security_connector **sc) {
+ grpc_server_credentials *creds, grpc_server_security_connector **sc) {
if (creds == NULL || creds->vtable->create_security_connector == NULL) {
gpr_log(GPR_ERROR, "Server credentials cannot create security context.");
return GRPC_SECURITY_ERROR;
@@ -150,27 +174,78 @@ grpc_security_status grpc_server_credentials_create_security_connector(
return creds->vtable->create_security_connector(creds, sc);
}
-/* -- Ssl credentials. -- */
+void grpc_server_credentials_set_auth_metadata_processor(
+ grpc_server_credentials *creds, grpc_auth_metadata_processor processor) {
+ GRPC_API_TRACE(
+ "grpc_server_credentials_set_auth_metadata_processor("
+ "creds=%p, "
+ "processor=grpc_auth_metadata_processor { process: %p, state: %p })",
+ 3, (creds, (void *)(intptr_t)processor.process, processor.state));
+ if (creds == NULL) return;
+ if (creds->processor.destroy != NULL && creds->processor.state != NULL) {
+ creds->processor.destroy(creds->processor.state);
+ }
+ creds->processor = processor;
+}
-typedef struct {
- grpc_credentials base;
- grpc_ssl_config config;
-} grpc_ssl_credentials;
+static void server_credentials_pointer_arg_destroy(void *p) {
+ grpc_server_credentials_unref(p);
+}
-typedef struct {
- grpc_server_credentials base;
- grpc_ssl_server_config config;
-} grpc_ssl_server_credentials;
+static void *server_credentials_pointer_arg_copy(void *p) {
+ return grpc_server_credentials_ref(p);
+}
+
+static int server_credentials_pointer_cmp(void *a, void *b) {
+ return GPR_ICMP(a, b);
+}
+
+static const grpc_arg_pointer_vtable cred_ptr_vtable = {
+ server_credentials_pointer_arg_copy, server_credentials_pointer_arg_destroy,
+ server_credentials_pointer_cmp};
+
+grpc_arg grpc_server_credentials_to_arg(grpc_server_credentials *p) {
+ grpc_arg arg;
+ memset(&arg, 0, sizeof(grpc_arg));
+ arg.type = GRPC_ARG_POINTER;
+ arg.key = GRPC_SERVER_CREDENTIALS_ARG;
+ arg.value.pointer.p = p;
+ arg.value.pointer.vtable = &cred_ptr_vtable;
+ return arg;
+}
+
+grpc_server_credentials *grpc_server_credentials_from_arg(const grpc_arg *arg) {
+ if (strcmp(arg->key, GRPC_SERVER_CREDENTIALS_ARG) != 0) return NULL;
+ if (arg->type != GRPC_ARG_POINTER) {
+ gpr_log(GPR_ERROR, "Invalid type %d for arg %s", arg->type,
+ GRPC_SERVER_CREDENTIALS_ARG);
+ return NULL;
+ }
+ return arg->value.pointer.p;
+}
-static void ssl_destroy(grpc_credentials *creds) {
+grpc_server_credentials *grpc_find_server_credentials_in_args(
+ const grpc_channel_args *args) {
+ size_t i;
+ if (args == NULL) return NULL;
+ for (i = 0; i < args->num_args; i++) {
+ grpc_server_credentials *p =
+ grpc_server_credentials_from_arg(&args->args[i]);
+ if (p != NULL) return p;
+ }
+ return NULL;
+}
+
+/* -- Ssl credentials. -- */
+
+static void ssl_destruct(grpc_channel_credentials *creds) {
grpc_ssl_credentials *c = (grpc_ssl_credentials *)creds;
if (c->config.pem_root_certs != NULL) gpr_free(c->config.pem_root_certs);
if (c->config.pem_private_key != NULL) gpr_free(c->config.pem_private_key);
if (c->config.pem_cert_chain != NULL) gpr_free(c->config.pem_cert_chain);
- gpr_free(creds);
}
-static void ssl_server_destroy(grpc_server_credentials *creds) {
+static void ssl_server_destruct(grpc_server_credentials *creds) {
grpc_ssl_server_credentials *c = (grpc_ssl_server_credentials *)creds;
size_t i;
for (i = 0; i < c->config.num_key_cert_pairs; i++) {
@@ -190,24 +265,17 @@ static void ssl_server_destroy(grpc_server_credentials *creds) {
gpr_free(c->config.pem_cert_chains_sizes);
}
if (c->config.pem_root_certs != NULL) gpr_free(c->config.pem_root_certs);
- gpr_free(creds);
-}
-
-static int ssl_has_request_metadata(const grpc_credentials *creds) { return 0; }
-
-static int ssl_has_request_metadata_only(const grpc_credentials *creds) {
- return 0;
}
static grpc_security_status ssl_create_security_connector(
- grpc_credentials *creds, const char *target, const grpc_channel_args *args,
- grpc_credentials *request_metadata_creds,
+ grpc_channel_credentials *creds, grpc_call_credentials *call_creds,
+ const char *target, const grpc_channel_args *args,
grpc_channel_security_connector **sc, grpc_channel_args **new_args) {
grpc_ssl_credentials *c = (grpc_ssl_credentials *)creds;
grpc_security_status status = GRPC_SECURITY_OK;
size_t i = 0;
const char *overridden_target_name = NULL;
- grpc_arg arg;
+ grpc_arg new_arg;
for (i = 0; args && i < args->num_args; i++) {
grpc_arg *arg = &args->args[i];
@@ -218,29 +286,28 @@ static grpc_security_status ssl_create_security_connector(
}
}
status = grpc_ssl_channel_security_connector_create(
- request_metadata_creds, &c->config, target, overridden_target_name, sc);
+ call_creds, &c->config, target, overridden_target_name, sc);
if (status != GRPC_SECURITY_OK) {
return status;
}
- arg.type = GRPC_ARG_STRING;
- arg.key = GRPC_ARG_HTTP2_SCHEME;
- arg.value.string = "https";
- *new_args = grpc_channel_args_copy_and_add(args, &arg);
+ new_arg.type = GRPC_ARG_STRING;
+ new_arg.key = GRPC_ARG_HTTP2_SCHEME;
+ new_arg.value.string = "https";
+ *new_args = grpc_channel_args_copy_and_add(args, &new_arg, 1);
return status;
}
static grpc_security_status ssl_server_create_security_connector(
- grpc_server_credentials *creds, grpc_security_connector **sc) {
+ grpc_server_credentials *creds, grpc_server_security_connector **sc) {
grpc_ssl_server_credentials *c = (grpc_ssl_server_credentials *)creds;
return grpc_ssl_server_security_connector_create(&c->config, sc);
}
-static grpc_credentials_vtable ssl_vtable = {
- ssl_destroy, ssl_has_request_metadata, ssl_has_request_metadata_only, NULL,
- ssl_create_security_connector};
+static grpc_channel_credentials_vtable ssl_vtable = {
+ ssl_destruct, ssl_create_security_connector};
static grpc_server_credentials_vtable ssl_server_vtable = {
- ssl_server_destroy, ssl_server_create_security_connector};
+ ssl_server_destruct, ssl_server_create_security_connector};
static void ssl_copy_key_material(const char *input, unsigned char **output,
size_t *output_size) {
@@ -270,8 +337,10 @@ static void ssl_build_config(const char *pem_root_certs,
static void ssl_build_server_config(
const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs,
- size_t num_key_cert_pairs, grpc_ssl_server_config *config) {
+ size_t num_key_cert_pairs, int force_client_auth,
+ grpc_ssl_server_config *config) {
size_t i;
+ config->force_client_auth = force_client_auth;
if (pem_root_certs != NULL) {
ssl_copy_key_material(pem_root_certs, &config->pem_root_certs,
&config->pem_root_certs_size);
@@ -300,11 +369,18 @@ static void ssl_build_server_config(
}
}
-grpc_credentials *grpc_ssl_credentials_create(
- const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pair) {
+grpc_channel_credentials *grpc_ssl_credentials_create(
+ const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pair,
+ void *reserved) {
grpc_ssl_credentials *c = gpr_malloc(sizeof(grpc_ssl_credentials));
+ GRPC_API_TRACE(
+ "grpc_ssl_credentials_create(pem_root_certs=%s, "
+ "pem_key_cert_pair=%p, "
+ "reserved=%p)",
+ 3, (pem_root_certs, pem_key_cert_pair, reserved));
+ GPR_ASSERT(reserved == NULL);
memset(c, 0, sizeof(grpc_ssl_credentials));
- c->base.type = GRPC_CREDENTIALS_TYPE_SSL;
+ c->base.type = GRPC_CHANNEL_CREDENTIALS_TYPE_SSL;
c->base.vtable = &ssl_vtable;
gpr_ref_init(&c->base.refcount, 1);
ssl_build_config(pem_root_certs, pem_key_cert_pair, &c->config);
@@ -313,36 +389,28 @@ grpc_credentials *grpc_ssl_credentials_create(
grpc_server_credentials *grpc_ssl_server_credentials_create(
const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs,
- size_t num_key_cert_pairs) {
+ size_t num_key_cert_pairs, int force_client_auth, void *reserved) {
grpc_ssl_server_credentials *c =
gpr_malloc(sizeof(grpc_ssl_server_credentials));
+ GRPC_API_TRACE(
+ "grpc_ssl_server_credentials_create("
+ "pem_root_certs=%s, pem_key_cert_pairs=%p, num_key_cert_pairs=%lu, "
+ "force_client_auth=%d, reserved=%p)",
+ 5, (pem_root_certs, pem_key_cert_pairs, (unsigned long)num_key_cert_pairs,
+ force_client_auth, reserved));
+ GPR_ASSERT(reserved == NULL);
memset(c, 0, sizeof(grpc_ssl_server_credentials));
- c->base.type = GRPC_CREDENTIALS_TYPE_SSL;
+ c->base.type = GRPC_CHANNEL_CREDENTIALS_TYPE_SSL;
+ gpr_ref_init(&c->base.refcount, 1);
c->base.vtable = &ssl_server_vtable;
ssl_build_server_config(pem_root_certs, pem_key_cert_pairs,
- num_key_cert_pairs, &c->config);
+ num_key_cert_pairs, force_client_auth, &c->config);
return &c->base;
}
/* -- Jwt credentials -- */
-typedef struct {
- grpc_credentials base;
-
- /* Have a simple cache for now with just 1 entry. We could have a map based on
- the service_url for a more sophisticated one. */
- gpr_mu cache_mu;
- struct {
- grpc_credentials_md_store *jwt_md;
- char *service_url;
- gpr_timespec jwt_expiration;
- } cached;
-
- grpc_auth_json_key key;
- gpr_timespec jwt_lifetime;
-} grpc_jwt_credentials;
-
-static void jwt_reset_cache(grpc_jwt_credentials *c) {
+static void jwt_reset_cache(grpc_service_account_jwt_access_credentials *c) {
if (c->cached.jwt_md != NULL) {
grpc_credentials_md_store_unref(c->cached.jwt_md);
c->cached.jwt_md = NULL;
@@ -351,40 +419,37 @@ static void jwt_reset_cache(grpc_jwt_credentials *c) {
gpr_free(c->cached.service_url);
c->cached.service_url = NULL;
}
- c->cached.jwt_expiration = gpr_inf_past;
+ c->cached.jwt_expiration = gpr_inf_past(GPR_CLOCK_REALTIME);
}
-static void jwt_destroy(grpc_credentials *creds) {
- grpc_jwt_credentials *c = (grpc_jwt_credentials *)creds;
+static void jwt_destruct(grpc_call_credentials *creds) {
+ grpc_service_account_jwt_access_credentials *c =
+ (grpc_service_account_jwt_access_credentials *)creds;
grpc_auth_json_key_destruct(&c->key);
jwt_reset_cache(c);
gpr_mu_destroy(&c->cache_mu);
- gpr_free(c);
-}
-
-static int jwt_has_request_metadata(const grpc_credentials *creds) { return 1; }
-
-static int jwt_has_request_metadata_only(const grpc_credentials *creds) {
- return 1;
}
-static void jwt_get_request_metadata(grpc_credentials *creds,
+static void jwt_get_request_metadata(grpc_exec_ctx *exec_ctx,
+ grpc_call_credentials *creds,
grpc_pollset *pollset,
- const char *service_url,
+ grpc_auth_metadata_context context,
grpc_credentials_metadata_cb cb,
void *user_data) {
- grpc_jwt_credentials *c = (grpc_jwt_credentials *)creds;
- gpr_timespec refresh_threshold = {GRPC_SECURE_TOKEN_REFRESH_THRESHOLD_SECS,
- 0};
+ grpc_service_account_jwt_access_credentials *c =
+ (grpc_service_account_jwt_access_credentials *)creds;
+ gpr_timespec refresh_threshold = gpr_time_from_seconds(
+ GRPC_SECURE_TOKEN_REFRESH_THRESHOLD_SECS, GPR_TIMESPAN);
/* See if we can return a cached jwt. */
grpc_credentials_md_store *jwt_md = NULL;
{
gpr_mu_lock(&c->cache_mu);
if (c->cached.service_url != NULL &&
- strcmp(c->cached.service_url, service_url) == 0 &&
+ strcmp(c->cached.service_url, context.service_url) == 0 &&
c->cached.jwt_md != NULL &&
- (gpr_time_cmp(gpr_time_sub(c->cached.jwt_expiration, gpr_now()),
+ (gpr_time_cmp(gpr_time_sub(c->cached.jwt_expiration,
+ gpr_now(GPR_CLOCK_REALTIME)),
refresh_threshold) > 0)) {
jwt_md = grpc_credentials_md_store_ref(c->cached.jwt_md);
}
@@ -396,13 +461,15 @@ static void jwt_get_request_metadata(grpc_credentials *creds,
/* Generate a new jwt. */
gpr_mu_lock(&c->cache_mu);
jwt_reset_cache(c);
- jwt = grpc_jwt_encode_and_sign(&c->key, service_url, c->jwt_lifetime, NULL);
+ jwt = grpc_jwt_encode_and_sign(&c->key, context.service_url,
+ c->jwt_lifetime, NULL);
if (jwt != NULL) {
char *md_value;
gpr_asprintf(&md_value, "Bearer %s", jwt);
gpr_free(jwt);
- c->cached.jwt_expiration = gpr_time_add(gpr_now(), c->jwt_lifetime);
- c->cached.service_url = gpr_strdup(service_url);
+ c->cached.jwt_expiration =
+ gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), c->jwt_lifetime);
+ c->cached.service_url = gpr_strdup(context.service_url);
c->cached.jwt_md = grpc_credentials_md_store_create(1);
grpc_credentials_md_store_add_cstrings(
c->cached.jwt_md, GRPC_AUTHORIZATION_METADATA_KEY, md_value);
@@ -413,28 +480,28 @@ static void jwt_get_request_metadata(grpc_credentials *creds,
}
if (jwt_md != NULL) {
- cb(user_data, jwt_md->entries, jwt_md->num_entries, GRPC_CREDENTIALS_OK);
+ cb(exec_ctx, user_data, jwt_md->entries, jwt_md->num_entries,
+ GRPC_CREDENTIALS_OK);
grpc_credentials_md_store_unref(jwt_md);
} else {
- cb(user_data, NULL, 0, GRPC_CREDENTIALS_ERROR);
+ cb(exec_ctx, user_data, NULL, 0, GRPC_CREDENTIALS_ERROR);
}
}
-static grpc_credentials_vtable jwt_vtable = {
- jwt_destroy, jwt_has_request_metadata, jwt_has_request_metadata_only,
- jwt_get_request_metadata, NULL};
+static grpc_call_credentials_vtable jwt_vtable = {jwt_destruct,
+ jwt_get_request_metadata};
-grpc_credentials *grpc_jwt_credentials_create(const char *json_key,
- gpr_timespec token_lifetime) {
- grpc_jwt_credentials *c;
- grpc_auth_json_key key = grpc_auth_json_key_create_from_string(json_key);
+grpc_call_credentials *
+grpc_service_account_jwt_access_credentials_create_from_auth_json_key(
+ grpc_auth_json_key key, gpr_timespec token_lifetime) {
+ grpc_service_account_jwt_access_credentials *c;
if (!grpc_auth_json_key_is_valid(&key)) {
gpr_log(GPR_ERROR, "Invalid input for jwt credentials creation");
return NULL;
}
- c = gpr_malloc(sizeof(grpc_jwt_credentials));
- memset(c, 0, sizeof(grpc_jwt_credentials));
- c->base.type = GRPC_CREDENTIALS_TYPE_JWT;
+ c = gpr_malloc(sizeof(grpc_service_account_jwt_access_credentials));
+ memset(c, 0, sizeof(grpc_service_account_jwt_access_credentials));
+ c->base.type = GRPC_CALL_CREDENTIALS_TYPE_JWT;
gpr_ref_init(&c->base.refcount, 1);
c->base.vtable = &jwt_vtable;
c->key = key;
@@ -444,44 +511,30 @@ grpc_credentials *grpc_jwt_credentials_create(const char *json_key,
return &c->base;
}
-/* -- Oauth2TokenFetcher credentials -- */
-
-/* This object is a base for credentials that need to acquire an oauth2 token
- from an http service. */
+grpc_call_credentials *grpc_service_account_jwt_access_credentials_create(
+ const char *json_key, gpr_timespec token_lifetime, void *reserved) {
+ GRPC_API_TRACE(
+ "grpc_service_account_jwt_access_credentials_create("
+ "json_key=%s, "
+ "token_lifetime="
+ "gpr_timespec { tv_sec: %lld, tv_nsec: %d, clock_type: %d }, "
+ "reserved=%p)",
+ 5,
+ (json_key, (long long)token_lifetime.tv_sec, (int)token_lifetime.tv_nsec,
+ (int)token_lifetime.clock_type, reserved));
+ GPR_ASSERT(reserved == NULL);
+ return grpc_service_account_jwt_access_credentials_create_from_auth_json_key(
+ grpc_auth_json_key_create_from_string(json_key), token_lifetime);
+}
-typedef void (*grpc_fetch_oauth2_func)(grpc_credentials_metadata_request *req,
- grpc_httpcli_context *http_context,
- grpc_pollset *pollset,
- grpc_httpcli_response_cb response_cb,
- gpr_timespec deadline);
+/* -- Oauth2TokenFetcher credentials -- */
-typedef struct {
- grpc_credentials base;
- gpr_mu mu;
- grpc_credentials_md_store *access_token_md;
- gpr_timespec token_expiration;
- grpc_httpcli_context httpcli_context;
- grpc_pollset_set pollset_set;
- grpc_fetch_oauth2_func fetch_func;
-} grpc_oauth2_token_fetcher_credentials;
-
-static void oauth2_token_fetcher_destroy(grpc_credentials *creds) {
+static void oauth2_token_fetcher_destruct(grpc_call_credentials *creds) {
grpc_oauth2_token_fetcher_credentials *c =
(grpc_oauth2_token_fetcher_credentials *)creds;
grpc_credentials_md_store_unref(c->access_token_md);
gpr_mu_destroy(&c->mu);
grpc_httpcli_context_destroy(&c->httpcli_context);
- gpr_free(c);
-}
-
-static int oauth2_token_fetcher_has_request_metadata(
- const grpc_credentials *creds) {
- return 1;
-}
-
-static int oauth2_token_fetcher_has_request_metadata_only(
- const grpc_credentials *creds) {
- return 1;
}
grpc_credentials_status
@@ -555,6 +608,7 @@ grpc_oauth2_token_fetcher_credentials_parse_server_response(
access_token->value);
token_lifetime->tv_sec = strtol(expires_in->value, NULL, 10);
token_lifetime->tv_nsec = 0;
+ token_lifetime->clock_type = GPR_TIMESPAN;
if (*token_md != NULL) grpc_credentials_md_store_unref(*token_md);
*token_md = grpc_credentials_md_store_create(1);
grpc_credentials_md_store_add_cstrings(
@@ -574,7 +628,8 @@ end:
}
static void on_oauth2_token_fetcher_http_response(
- void *user_data, const grpc_httpcli_response *response) {
+ grpc_exec_ctx *exec_ctx, void *user_data,
+ const grpc_httpcli_response *response) {
grpc_credentials_metadata_request *r =
(grpc_credentials_metadata_request *)user_data;
grpc_oauth2_token_fetcher_credentials *c =
@@ -586,67 +641,69 @@ static void on_oauth2_token_fetcher_http_response(
status = grpc_oauth2_token_fetcher_credentials_parse_server_response(
response, &c->access_token_md, &token_lifetime);
if (status == GRPC_CREDENTIALS_OK) {
- c->token_expiration = gpr_time_add(gpr_now(), token_lifetime);
- r->cb(r->user_data, c->access_token_md->entries,
+ c->token_expiration =
+ gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), token_lifetime);
+ r->cb(exec_ctx, r->user_data, c->access_token_md->entries,
c->access_token_md->num_entries, status);
} else {
- c->token_expiration = gpr_inf_past;
- r->cb(r->user_data, NULL, 0, status);
+ c->token_expiration = gpr_inf_past(GPR_CLOCK_REALTIME);
+ r->cb(exec_ctx, r->user_data, NULL, 0, status);
}
gpr_mu_unlock(&c->mu);
grpc_credentials_metadata_request_destroy(r);
}
static void oauth2_token_fetcher_get_request_metadata(
- grpc_credentials *creds, grpc_pollset *pollset, const char *service_url,
+ grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds,
+ grpc_pollset *pollset, grpc_auth_metadata_context context,
grpc_credentials_metadata_cb cb, void *user_data) {
grpc_oauth2_token_fetcher_credentials *c =
(grpc_oauth2_token_fetcher_credentials *)creds;
- gpr_timespec refresh_threshold = {GRPC_SECURE_TOKEN_REFRESH_THRESHOLD_SECS,
- 0};
+ gpr_timespec refresh_threshold = gpr_time_from_seconds(
+ GRPC_SECURE_TOKEN_REFRESH_THRESHOLD_SECS, GPR_TIMESPAN);
grpc_credentials_md_store *cached_access_token_md = NULL;
{
gpr_mu_lock(&c->mu);
if (c->access_token_md != NULL &&
- (gpr_time_cmp(gpr_time_sub(c->token_expiration, gpr_now()),
- refresh_threshold) > 0)) {
+ (gpr_time_cmp(
+ gpr_time_sub(c->token_expiration, gpr_now(GPR_CLOCK_REALTIME)),
+ refresh_threshold) > 0)) {
cached_access_token_md =
grpc_credentials_md_store_ref(c->access_token_md);
}
gpr_mu_unlock(&c->mu);
}
if (cached_access_token_md != NULL) {
- cb(user_data, cached_access_token_md->entries,
+ cb(exec_ctx, user_data, cached_access_token_md->entries,
cached_access_token_md->num_entries, GRPC_CREDENTIALS_OK);
grpc_credentials_md_store_unref(cached_access_token_md);
} else {
c->fetch_func(
+ exec_ctx,
grpc_credentials_metadata_request_create(creds, cb, user_data),
&c->httpcli_context, pollset, on_oauth2_token_fetcher_http_response,
- gpr_time_add(gpr_now(), refresh_threshold));
+ gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), refresh_threshold));
}
}
static void init_oauth2_token_fetcher(grpc_oauth2_token_fetcher_credentials *c,
grpc_fetch_oauth2_func fetch_func) {
memset(c, 0, sizeof(grpc_oauth2_token_fetcher_credentials));
- c->base.type = GRPC_CREDENTIALS_TYPE_OAUTH2;
+ c->base.type = GRPC_CALL_CREDENTIALS_TYPE_OAUTH2;
gpr_ref_init(&c->base.refcount, 1);
gpr_mu_init(&c->mu);
- c->token_expiration = gpr_inf_past;
+ c->token_expiration = gpr_inf_past(GPR_CLOCK_REALTIME);
c->fetch_func = fetch_func;
- grpc_pollset_set_init(&c->pollset_set);
+ grpc_httpcli_context_init(&c->httpcli_context);
}
-/* -- ComputeEngine credentials. -- */
+/* -- GoogleComputeEngine credentials. -- */
-static grpc_credentials_vtable compute_engine_vtable = {
- oauth2_token_fetcher_destroy, oauth2_token_fetcher_has_request_metadata,
- oauth2_token_fetcher_has_request_metadata_only,
- oauth2_token_fetcher_get_request_metadata, NULL};
+static grpc_call_credentials_vtable compute_engine_vtable = {
+ oauth2_token_fetcher_destruct, oauth2_token_fetcher_get_request_metadata};
static void compute_engine_fetch_oauth2(
- grpc_credentials_metadata_request *metadata_req,
+ grpc_exec_ctx *exec_ctx, grpc_credentials_metadata_request *metadata_req,
grpc_httpcli_context *httpcli_context, grpc_pollset *pollset,
grpc_httpcli_response_cb response_cb, gpr_timespec deadline) {
grpc_httpcli_header header = {"Metadata-Flavor", "Google"};
@@ -656,119 +713,40 @@ static void compute_engine_fetch_oauth2(
request.path = GRPC_COMPUTE_ENGINE_METADATA_TOKEN_PATH;
request.hdr_count = 1;
request.hdrs = &header;
- grpc_httpcli_get(httpcli_context, pollset, &request, deadline, response_cb,
- metadata_req);
+ grpc_httpcli_get(exec_ctx, httpcli_context, pollset, &request, deadline,
+ response_cb, metadata_req);
}
-grpc_credentials *grpc_compute_engine_credentials_create(void) {
+grpc_call_credentials *grpc_google_compute_engine_credentials_create(
+ void *reserved) {
grpc_oauth2_token_fetcher_credentials *c =
gpr_malloc(sizeof(grpc_oauth2_token_fetcher_credentials));
+ GRPC_API_TRACE("grpc_compute_engine_credentials_create(reserved=%p)", 1,
+ (reserved));
+ GPR_ASSERT(reserved == NULL);
init_oauth2_token_fetcher(c, compute_engine_fetch_oauth2);
c->base.vtable = &compute_engine_vtable;
return &c->base;
}
-/* -- ServiceAccount credentials. -- */
-
-typedef struct {
- grpc_oauth2_token_fetcher_credentials base;
- grpc_auth_json_key key;
- char *scope;
- gpr_timespec token_lifetime;
-} grpc_service_account_credentials;
+/* -- GoogleRefreshToken credentials. -- */
-static void service_account_destroy(grpc_credentials *creds) {
- grpc_service_account_credentials *c =
- (grpc_service_account_credentials *)creds;
- if (c->scope != NULL) gpr_free(c->scope);
- grpc_auth_json_key_destruct(&c->key);
- oauth2_token_fetcher_destroy(&c->base.base);
-}
-
-static grpc_credentials_vtable service_account_vtable = {
- service_account_destroy, oauth2_token_fetcher_has_request_metadata,
- oauth2_token_fetcher_has_request_metadata_only,
- oauth2_token_fetcher_get_request_metadata, NULL};
-
-static void service_account_fetch_oauth2(
- grpc_credentials_metadata_request *metadata_req,
- grpc_httpcli_context *httpcli_context, grpc_pollset *pollset,
- grpc_httpcli_response_cb response_cb, gpr_timespec deadline) {
- grpc_service_account_credentials *c =
- (grpc_service_account_credentials *)metadata_req->creds;
- grpc_httpcli_header header = {"Content-Type",
- "application/x-www-form-urlencoded"};
- grpc_httpcli_request request;
- char *body = NULL;
- char *jwt = grpc_jwt_encode_and_sign(&c->key, GRPC_JWT_OAUTH2_AUDIENCE,
- c->token_lifetime, c->scope);
- if (jwt == NULL) {
- grpc_httpcli_response response;
- memset(&response, 0, sizeof(grpc_httpcli_response));
- response.status = 400; /* Invalid request. */
- gpr_log(GPR_ERROR, "Could not create signed jwt.");
- /* Do not even send the request, just call the response callback. */
- response_cb(metadata_req, &response);
- return;
- }
- gpr_asprintf(&body, "%s%s", GRPC_SERVICE_ACCOUNT_POST_BODY_PREFIX, jwt);
- memset(&request, 0, sizeof(grpc_httpcli_request));
- request.host = GRPC_GOOGLE_OAUTH2_SERVICE_HOST;
- request.path = GRPC_GOOGLE_OAUTH2_SERVICE_TOKEN_PATH;
- request.hdr_count = 1;
- request.hdrs = &header;
- request.use_ssl = 1;
- grpc_httpcli_post(httpcli_context, pollset, &request, body, strlen(body),
- deadline, response_cb, metadata_req);
- gpr_free(body);
- gpr_free(jwt);
-}
-
-grpc_credentials *grpc_service_account_credentials_create(
- const char *json_key, const char *scope, gpr_timespec token_lifetime) {
- grpc_service_account_credentials *c;
- grpc_auth_json_key key = grpc_auth_json_key_create_from_string(json_key);
-
- if (scope == NULL || (strlen(scope) == 0) ||
- !grpc_auth_json_key_is_valid(&key)) {
- gpr_log(GPR_ERROR,
- "Invalid input for service account credentials creation");
- return NULL;
- }
- c = gpr_malloc(sizeof(grpc_service_account_credentials));
- memset(c, 0, sizeof(grpc_service_account_credentials));
- init_oauth2_token_fetcher(&c->base, service_account_fetch_oauth2);
- c->base.base.vtable = &service_account_vtable;
- c->scope = gpr_strdup(scope);
- c->key = key;
- c->token_lifetime = token_lifetime;
- return &c->base.base;
-}
-
-/* -- RefreshToken credentials. -- */
-
-typedef struct {
- grpc_oauth2_token_fetcher_credentials base;
- grpc_auth_refresh_token refresh_token;
-} grpc_refresh_token_credentials;
-
-static void refresh_token_destroy(grpc_credentials *creds) {
- grpc_refresh_token_credentials *c = (grpc_refresh_token_credentials *)creds;
+static void refresh_token_destruct(grpc_call_credentials *creds) {
+ grpc_google_refresh_token_credentials *c =
+ (grpc_google_refresh_token_credentials *)creds;
grpc_auth_refresh_token_destruct(&c->refresh_token);
- oauth2_token_fetcher_destroy(&c->base.base);
+ oauth2_token_fetcher_destruct(&c->base.base);
}
-static grpc_credentials_vtable refresh_token_vtable = {
- refresh_token_destroy, oauth2_token_fetcher_has_request_metadata,
- oauth2_token_fetcher_has_request_metadata_only,
- oauth2_token_fetcher_get_request_metadata, NULL};
+static grpc_call_credentials_vtable refresh_token_vtable = {
+ refresh_token_destruct, oauth2_token_fetcher_get_request_metadata};
static void refresh_token_fetch_oauth2(
- grpc_credentials_metadata_request *metadata_req,
+ grpc_exec_ctx *exec_ctx, grpc_credentials_metadata_request *metadata_req,
grpc_httpcli_context *httpcli_context, grpc_pollset *pollset,
grpc_httpcli_response_cb response_cb, gpr_timespec deadline) {
- grpc_refresh_token_credentials *c =
- (grpc_refresh_token_credentials *)metadata_req->creds;
+ grpc_google_refresh_token_credentials *c =
+ (grpc_google_refresh_token_credentials *)metadata_req->creds;
grpc_httpcli_header header = {"Content-Type",
"application/x-www-form-urlencoded"};
grpc_httpcli_request request;
@@ -781,153 +759,159 @@ static void refresh_token_fetch_oauth2(
request.path = GRPC_GOOGLE_OAUTH2_SERVICE_TOKEN_PATH;
request.hdr_count = 1;
request.hdrs = &header;
- request.use_ssl = 1;
- grpc_httpcli_post(httpcli_context, pollset, &request, body, strlen(body),
- deadline, response_cb, metadata_req);
+ request.handshaker = &grpc_httpcli_ssl;
+ grpc_httpcli_post(exec_ctx, httpcli_context, pollset, &request, body,
+ strlen(body), deadline, response_cb, metadata_req);
gpr_free(body);
}
-grpc_credentials *grpc_refresh_token_credentials_create(
- const char *json_refresh_token) {
- grpc_refresh_token_credentials *c;
- grpc_auth_refresh_token refresh_token =
- grpc_auth_refresh_token_create_from_string(json_refresh_token);
-
+grpc_call_credentials *
+grpc_refresh_token_credentials_create_from_auth_refresh_token(
+ grpc_auth_refresh_token refresh_token) {
+ grpc_google_refresh_token_credentials *c;
if (!grpc_auth_refresh_token_is_valid(&refresh_token)) {
gpr_log(GPR_ERROR, "Invalid input for refresh token credentials creation");
return NULL;
}
- c = gpr_malloc(sizeof(grpc_refresh_token_credentials));
- memset(c, 0, sizeof(grpc_refresh_token_credentials));
+ c = gpr_malloc(sizeof(grpc_google_refresh_token_credentials));
+ memset(c, 0, sizeof(grpc_google_refresh_token_credentials));
init_oauth2_token_fetcher(&c->base, refresh_token_fetch_oauth2);
c->base.base.vtable = &refresh_token_vtable;
c->refresh_token = refresh_token;
return &c->base.base;
}
-/* -- Fake Oauth2 credentials. -- */
-
-typedef struct {
- grpc_credentials base;
- grpc_credentials_md_store *access_token_md;
- int is_async;
-} grpc_fake_oauth2_credentials;
-
-static void fake_oauth2_destroy(grpc_credentials *creds) {
- grpc_fake_oauth2_credentials *c = (grpc_fake_oauth2_credentials *)creds;
- grpc_credentials_md_store_unref(c->access_token_md);
- gpr_free(c);
+grpc_call_credentials *grpc_google_refresh_token_credentials_create(
+ const char *json_refresh_token, void *reserved) {
+ GRPC_API_TRACE(
+ "grpc_refresh_token_credentials_create(json_refresh_token=%s, "
+ "reserved=%p)",
+ 2, (json_refresh_token, reserved));
+ GPR_ASSERT(reserved == NULL);
+ return grpc_refresh_token_credentials_create_from_auth_refresh_token(
+ grpc_auth_refresh_token_create_from_string(json_refresh_token));
}
-static int fake_oauth2_has_request_metadata(const grpc_credentials *creds) {
- return 1;
-}
+/* -- Metadata-only credentials. -- */
-static int fake_oauth2_has_request_metadata_only(
- const grpc_credentials *creds) {
- return 1;
+static void md_only_test_destruct(grpc_call_credentials *creds) {
+ grpc_md_only_test_credentials *c = (grpc_md_only_test_credentials *)creds;
+ grpc_credentials_md_store_unref(c->md_store);
}
-void on_simulated_token_fetch_done(void *user_data, int success) {
+static void on_simulated_token_fetch_done(grpc_exec_ctx *exec_ctx,
+ void *user_data, bool success) {
grpc_credentials_metadata_request *r =
(grpc_credentials_metadata_request *)user_data;
- grpc_fake_oauth2_credentials *c = (grpc_fake_oauth2_credentials *)r->creds;
- GPR_ASSERT(success);
- r->cb(r->user_data, c->access_token_md->entries,
- c->access_token_md->num_entries, GRPC_CREDENTIALS_OK);
+ grpc_md_only_test_credentials *c = (grpc_md_only_test_credentials *)r->creds;
+ r->cb(exec_ctx, r->user_data, c->md_store->entries, c->md_store->num_entries,
+ GRPC_CREDENTIALS_OK);
grpc_credentials_metadata_request_destroy(r);
}
-static void fake_oauth2_get_request_metadata(grpc_credentials *creds,
- grpc_pollset *pollset,
- const char *service_url,
- grpc_credentials_metadata_cb cb,
- void *user_data) {
- grpc_fake_oauth2_credentials *c = (grpc_fake_oauth2_credentials *)creds;
+static void md_only_test_get_request_metadata(
+ grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds,
+ grpc_pollset *pollset, grpc_auth_metadata_context context,
+ grpc_credentials_metadata_cb cb, void *user_data) {
+ grpc_md_only_test_credentials *c = (grpc_md_only_test_credentials *)creds;
if (c->is_async) {
grpc_credentials_metadata_request *cb_arg =
grpc_credentials_metadata_request_create(creds, cb, user_data);
- grpc_iomgr_closure_init(cb_arg->on_simulated_token_fetch_done_closure,
- on_simulated_token_fetch_done, cb_arg);
- grpc_iomgr_add_callback(cb_arg->on_simulated_token_fetch_done_closure);
+ grpc_executor_enqueue(
+ grpc_closure_create(on_simulated_token_fetch_done, cb_arg), true);
} else {
- cb(user_data, c->access_token_md->entries, 1, GRPC_CREDENTIALS_OK);
+ cb(exec_ctx, user_data, c->md_store->entries, 1, GRPC_CREDENTIALS_OK);
}
}
-static grpc_credentials_vtable fake_oauth2_vtable = {
- fake_oauth2_destroy, fake_oauth2_has_request_metadata,
- fake_oauth2_has_request_metadata_only, fake_oauth2_get_request_metadata,
- NULL};
+static grpc_call_credentials_vtable md_only_test_vtable = {
+ md_only_test_destruct, md_only_test_get_request_metadata};
-grpc_credentials *grpc_fake_oauth2_credentials_create(
- const char *token_md_value, int is_async) {
- grpc_fake_oauth2_credentials *c =
- gpr_malloc(sizeof(grpc_fake_oauth2_credentials));
- memset(c, 0, sizeof(grpc_fake_oauth2_credentials));
- c->base.type = GRPC_CREDENTIALS_TYPE_OAUTH2;
- c->base.vtable = &fake_oauth2_vtable;
+grpc_call_credentials *grpc_md_only_test_credentials_create(
+ const char *md_key, const char *md_value, int is_async) {
+ grpc_md_only_test_credentials *c =
+ gpr_malloc(sizeof(grpc_md_only_test_credentials));
+ memset(c, 0, sizeof(grpc_md_only_test_credentials));
+ c->base.type = GRPC_CALL_CREDENTIALS_TYPE_OAUTH2;
+ c->base.vtable = &md_only_test_vtable;
gpr_ref_init(&c->base.refcount, 1);
- c->access_token_md = grpc_credentials_md_store_create(1);
- grpc_credentials_md_store_add_cstrings(
- c->access_token_md, GRPC_AUTHORIZATION_METADATA_KEY, token_md_value);
+ c->md_store = grpc_credentials_md_store_create(1);
+ grpc_credentials_md_store_add_cstrings(c->md_store, md_key, md_value);
c->is_async = is_async;
return &c->base;
}
-/* -- Fake transport security credentials. -- */
-
-static void fake_transport_security_credentials_destroy(
- grpc_credentials *creds) {
- gpr_free(creds);
-}
+/* -- Oauth2 Access Token credentials. -- */
-static void fake_transport_security_server_credentials_destroy(
- grpc_server_credentials *creds) {
- gpr_free(creds);
+static void access_token_destruct(grpc_call_credentials *creds) {
+ grpc_access_token_credentials *c = (grpc_access_token_credentials *)creds;
+ grpc_credentials_md_store_unref(c->access_token_md);
}
-static int fake_transport_security_has_request_metadata(
- const grpc_credentials *creds) {
- return 0;
+static void access_token_get_request_metadata(
+ grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds,
+ grpc_pollset *pollset, grpc_auth_metadata_context context,
+ grpc_credentials_metadata_cb cb, void *user_data) {
+ grpc_access_token_credentials *c = (grpc_access_token_credentials *)creds;
+ cb(exec_ctx, user_data, c->access_token_md->entries, 1, GRPC_CREDENTIALS_OK);
+}
+
+static grpc_call_credentials_vtable access_token_vtable = {
+ access_token_destruct, access_token_get_request_metadata};
+
+grpc_call_credentials *grpc_access_token_credentials_create(
+ const char *access_token, void *reserved) {
+ grpc_access_token_credentials *c =
+ gpr_malloc(sizeof(grpc_access_token_credentials));
+ char *token_md_value;
+ GRPC_API_TRACE(
+ "grpc_access_token_credentials_create(access_token=%s, "
+ "reserved=%p)",
+ 2, (access_token, reserved));
+ GPR_ASSERT(reserved == NULL);
+ memset(c, 0, sizeof(grpc_access_token_credentials));
+ c->base.type = GRPC_CALL_CREDENTIALS_TYPE_OAUTH2;
+ c->base.vtable = &access_token_vtable;
+ gpr_ref_init(&c->base.refcount, 1);
+ c->access_token_md = grpc_credentials_md_store_create(1);
+ gpr_asprintf(&token_md_value, "Bearer %s", access_token);
+ grpc_credentials_md_store_add_cstrings(
+ c->access_token_md, GRPC_AUTHORIZATION_METADATA_KEY, token_md_value);
+ gpr_free(token_md_value);
+ return &c->base;
}
-static int fake_transport_security_has_request_metadata_only(
- const grpc_credentials *creds) {
- return 0;
-}
+/* -- Fake transport security credentials. -- */
static grpc_security_status fake_transport_security_create_security_connector(
- grpc_credentials *c, const char *target, const grpc_channel_args *args,
- grpc_credentials *request_metadata_creds,
+ grpc_channel_credentials *c, grpc_call_credentials *call_creds,
+ const char *target, const grpc_channel_args *args,
grpc_channel_security_connector **sc, grpc_channel_args **new_args) {
- *sc = grpc_fake_channel_security_connector_create(request_metadata_creds, 1);
+ *sc = grpc_fake_channel_security_connector_create(call_creds);
return GRPC_SECURITY_OK;
}
static grpc_security_status
fake_transport_security_server_create_security_connector(
- grpc_server_credentials *c, grpc_security_connector **sc) {
+ grpc_server_credentials *c, grpc_server_security_connector **sc) {
*sc = grpc_fake_server_security_connector_create();
return GRPC_SECURITY_OK;
}
-static grpc_credentials_vtable fake_transport_security_credentials_vtable = {
- fake_transport_security_credentials_destroy,
- fake_transport_security_has_request_metadata,
- fake_transport_security_has_request_metadata_only, NULL,
- fake_transport_security_create_security_connector};
+static grpc_channel_credentials_vtable
+ fake_transport_security_credentials_vtable = {
+ NULL, fake_transport_security_create_security_connector};
static grpc_server_credentials_vtable
fake_transport_security_server_credentials_vtable = {
- fake_transport_security_server_credentials_destroy,
- fake_transport_security_server_create_security_connector};
+ NULL, fake_transport_security_server_create_security_connector};
-grpc_credentials *grpc_fake_transport_security_credentials_create(void) {
- grpc_credentials *c = gpr_malloc(sizeof(grpc_credentials));
- memset(c, 0, sizeof(grpc_credentials));
- c->type = GRPC_CREDENTIALS_TYPE_FAKE_TRANSPORT_SECURITY;
+grpc_channel_credentials *grpc_fake_transport_security_credentials_create(
+ void) {
+ grpc_channel_credentials *c = gpr_malloc(sizeof(grpc_channel_credentials));
+ memset(c, 0, sizeof(grpc_channel_credentials));
+ c->type = GRPC_CHANNEL_CREDENTIALS_TYPE_FAKE_TRANSPORT_SECURITY;
c->vtable = &fake_transport_security_credentials_vtable;
gpr_ref_init(&c->refcount, 1);
return c;
@@ -937,77 +921,47 @@ grpc_server_credentials *grpc_fake_transport_security_server_credentials_create(
void) {
grpc_server_credentials *c = gpr_malloc(sizeof(grpc_server_credentials));
memset(c, 0, sizeof(grpc_server_credentials));
- c->type = GRPC_CREDENTIALS_TYPE_FAKE_TRANSPORT_SECURITY;
+ c->type = GRPC_CHANNEL_CREDENTIALS_TYPE_FAKE_TRANSPORT_SECURITY;
+ gpr_ref_init(&c->refcount, 1);
c->vtable = &fake_transport_security_server_credentials_vtable;
return c;
}
-/* -- Composite credentials. -- */
-
-typedef struct {
- grpc_credentials base;
- grpc_credentials_array inner;
- grpc_credentials *connector_creds;
-} grpc_composite_credentials;
+/* -- Composite call credentials. -- */
typedef struct {
- grpc_composite_credentials *composite_creds;
+ grpc_composite_call_credentials *composite_creds;
size_t creds_index;
grpc_credentials_md_store *md_elems;
- char *service_url;
+ grpc_auth_metadata_context auth_md_context;
void *user_data;
grpc_pollset *pollset;
grpc_credentials_metadata_cb cb;
-} grpc_composite_credentials_metadata_context;
+} grpc_composite_call_credentials_metadata_context;
-static void composite_destroy(grpc_credentials *creds) {
- grpc_composite_credentials *c = (grpc_composite_credentials *)creds;
+static void composite_call_destruct(grpc_call_credentials *creds) {
+ grpc_composite_call_credentials *c = (grpc_composite_call_credentials *)creds;
size_t i;
for (i = 0; i < c->inner.num_creds; i++) {
- grpc_credentials_unref(c->inner.creds_array[i]);
+ grpc_call_credentials_unref(c->inner.creds_array[i]);
}
gpr_free(c->inner.creds_array);
- gpr_free(creds);
}
-static int composite_has_request_metadata(const grpc_credentials *creds) {
- const grpc_composite_credentials *c =
- (const grpc_composite_credentials *)creds;
- size_t i;
- for (i = 0; i < c->inner.num_creds; i++) {
- if (grpc_credentials_has_request_metadata(c->inner.creds_array[i])) {
- return 1;
- }
- }
- return 0;
-}
-
-static int composite_has_request_metadata_only(const grpc_credentials *creds) {
- const grpc_composite_credentials *c =
- (const grpc_composite_credentials *)creds;
- size_t i;
- for (i = 0; i < c->inner.num_creds; i++) {
- if (!grpc_credentials_has_request_metadata_only(c->inner.creds_array[i])) {
- return 0;
- }
- }
- return 1;
-}
-
-static void composite_md_context_destroy(
- grpc_composite_credentials_metadata_context *ctx) {
+static void composite_call_md_context_destroy(
+ grpc_composite_call_credentials_metadata_context *ctx) {
grpc_credentials_md_store_unref(ctx->md_elems);
- if (ctx->service_url != NULL) gpr_free(ctx->service_url);
gpr_free(ctx);
}
-static void composite_metadata_cb(void *user_data,
- grpc_credentials_md *md_elems, size_t num_md,
- grpc_credentials_status status) {
- grpc_composite_credentials_metadata_context *ctx =
- (grpc_composite_credentials_metadata_context *)user_data;
+static void composite_call_metadata_cb(grpc_exec_ctx *exec_ctx, void *user_data,
+ grpc_credentials_md *md_elems,
+ size_t num_md,
+ grpc_credentials_status status) {
+ grpc_composite_call_credentials_metadata_context *ctx =
+ (grpc_composite_call_credentials_metadata_context *)user_data;
if (status != GRPC_CREDENTIALS_OK) {
- ctx->cb(ctx->user_data, NULL, 0, status);
+ ctx->cb(exec_ctx, ctx->user_data, NULL, 0, status);
return;
}
@@ -1021,153 +975,112 @@ static void composite_metadata_cb(void *user_data,
}
/* See if we need to get some more metadata. */
- while (ctx->creds_index < ctx->composite_creds->inner.num_creds) {
- grpc_credentials *inner_creds =
+ if (ctx->creds_index < ctx->composite_creds->inner.num_creds) {
+ grpc_call_credentials *inner_creds =
ctx->composite_creds->inner.creds_array[ctx->creds_index++];
- if (grpc_credentials_has_request_metadata(inner_creds)) {
- grpc_credentials_get_request_metadata(inner_creds, ctx->pollset,
- ctx->service_url,
- composite_metadata_cb, ctx);
- return;
- }
+ grpc_call_credentials_get_request_metadata(
+ exec_ctx, inner_creds, ctx->pollset, ctx->auth_md_context,
+ composite_call_metadata_cb, ctx);
+ return;
}
/* We're done!. */
- ctx->cb(ctx->user_data, ctx->md_elems->entries, ctx->md_elems->num_entries,
- GRPC_CREDENTIALS_OK);
- composite_md_context_destroy(ctx);
-}
-
-static void composite_get_request_metadata(grpc_credentials *creds,
- grpc_pollset *pollset,
- const char *service_url,
- grpc_credentials_metadata_cb cb,
- void *user_data) {
- grpc_composite_credentials *c = (grpc_composite_credentials *)creds;
- grpc_composite_credentials_metadata_context *ctx;
- if (!grpc_credentials_has_request_metadata(creds)) {
- cb(user_data, NULL, 0, GRPC_CREDENTIALS_OK);
- return;
- }
- ctx = gpr_malloc(sizeof(grpc_composite_credentials_metadata_context));
- memset(ctx, 0, sizeof(grpc_composite_credentials_metadata_context));
- ctx->service_url = gpr_strdup(service_url);
+ ctx->cb(exec_ctx, ctx->user_data, ctx->md_elems->entries,
+ ctx->md_elems->num_entries, GRPC_CREDENTIALS_OK);
+ composite_call_md_context_destroy(ctx);
+}
+
+static void composite_call_get_request_metadata(
+ grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds,
+ grpc_pollset *pollset, grpc_auth_metadata_context auth_md_context,
+ grpc_credentials_metadata_cb cb, void *user_data) {
+ grpc_composite_call_credentials *c = (grpc_composite_call_credentials *)creds;
+ grpc_composite_call_credentials_metadata_context *ctx;
+
+ ctx = gpr_malloc(sizeof(grpc_composite_call_credentials_metadata_context));
+ memset(ctx, 0, sizeof(grpc_composite_call_credentials_metadata_context));
+ ctx->auth_md_context = auth_md_context;
ctx->user_data = user_data;
ctx->cb = cb;
ctx->composite_creds = c;
ctx->pollset = pollset;
ctx->md_elems = grpc_credentials_md_store_create(c->inner.num_creds);
- while (ctx->creds_index < c->inner.num_creds) {
- grpc_credentials *inner_creds = c->inner.creds_array[ctx->creds_index++];
- if (grpc_credentials_has_request_metadata(inner_creds)) {
- grpc_credentials_get_request_metadata(inner_creds, pollset, service_url,
- composite_metadata_cb, ctx);
- return;
- }
- }
- GPR_ASSERT(0); /* Should have exited before. */
+ grpc_call_credentials_get_request_metadata(
+ exec_ctx, c->inner.creds_array[ctx->creds_index++], pollset,
+ auth_md_context, composite_call_metadata_cb, ctx);
}
-static grpc_security_status composite_create_security_connector(
- grpc_credentials *creds, const char *target, const grpc_channel_args *args,
- grpc_credentials *request_metadata_creds,
- grpc_channel_security_connector **sc, grpc_channel_args **new_args) {
- grpc_composite_credentials *c = (grpc_composite_credentials *)creds;
- if (c->connector_creds == NULL) {
- gpr_log(GPR_ERROR,
- "Cannot create security connector, missing connector credentials.");
- return GRPC_SECURITY_ERROR;
- }
- return grpc_credentials_create_security_connector(c->connector_creds, target,
- args, creds, sc, new_args);
-}
+static grpc_call_credentials_vtable composite_call_credentials_vtable = {
+ composite_call_destruct, composite_call_get_request_metadata};
-static grpc_credentials_vtable composite_credentials_vtable = {
- composite_destroy, composite_has_request_metadata,
- composite_has_request_metadata_only, composite_get_request_metadata,
- composite_create_security_connector};
-
-static grpc_credentials_array get_creds_array(grpc_credentials **creds_addr) {
- grpc_credentials_array result;
- grpc_credentials *creds = *creds_addr;
+static grpc_call_credentials_array get_creds_array(
+ grpc_call_credentials **creds_addr) {
+ grpc_call_credentials_array result;
+ grpc_call_credentials *creds = *creds_addr;
result.creds_array = creds_addr;
result.num_creds = 1;
- if (strcmp(creds->type, GRPC_CREDENTIALS_TYPE_COMPOSITE) == 0) {
- result = *grpc_composite_credentials_get_credentials(creds);
+ if (strcmp(creds->type, GRPC_CALL_CREDENTIALS_TYPE_COMPOSITE) == 0) {
+ result = *grpc_composite_call_credentials_get_credentials(creds);
}
return result;
}
-grpc_credentials *grpc_composite_credentials_create(grpc_credentials *creds1,
- grpc_credentials *creds2) {
+grpc_call_credentials *grpc_composite_call_credentials_create(
+ grpc_call_credentials *creds1, grpc_call_credentials *creds2,
+ void *reserved) {
size_t i;
size_t creds_array_byte_size;
- grpc_credentials_array creds1_array;
- grpc_credentials_array creds2_array;
- grpc_composite_credentials *c;
+ grpc_call_credentials_array creds1_array;
+ grpc_call_credentials_array creds2_array;
+ grpc_composite_call_credentials *c;
+ GRPC_API_TRACE(
+ "grpc_composite_call_credentials_create(creds1=%p, creds2=%p, "
+ "reserved=%p)",
+ 3, (creds1, creds2, reserved));
+ GPR_ASSERT(reserved == NULL);
GPR_ASSERT(creds1 != NULL);
GPR_ASSERT(creds2 != NULL);
- c = gpr_malloc(sizeof(grpc_composite_credentials));
- memset(c, 0, sizeof(grpc_composite_credentials));
- c->base.type = GRPC_CREDENTIALS_TYPE_COMPOSITE;
- c->base.vtable = &composite_credentials_vtable;
+ c = gpr_malloc(sizeof(grpc_composite_call_credentials));
+ memset(c, 0, sizeof(grpc_composite_call_credentials));
+ c->base.type = GRPC_CALL_CREDENTIALS_TYPE_COMPOSITE;
+ c->base.vtable = &composite_call_credentials_vtable;
gpr_ref_init(&c->base.refcount, 1);
creds1_array = get_creds_array(&creds1);
creds2_array = get_creds_array(&creds2);
c->inner.num_creds = creds1_array.num_creds + creds2_array.num_creds;
- creds_array_byte_size = c->inner.num_creds * sizeof(grpc_credentials *);
+ creds_array_byte_size = c->inner.num_creds * sizeof(grpc_call_credentials *);
c->inner.creds_array = gpr_malloc(creds_array_byte_size);
memset(c->inner.creds_array, 0, creds_array_byte_size);
for (i = 0; i < creds1_array.num_creds; i++) {
- grpc_credentials *cur_creds = creds1_array.creds_array[i];
- if (!grpc_credentials_has_request_metadata_only(cur_creds)) {
- if (c->connector_creds == NULL) {
- c->connector_creds = cur_creds;
- } else {
- gpr_log(GPR_ERROR, "Cannot compose multiple connector credentials.");
- goto fail;
- }
- }
- c->inner.creds_array[i] = grpc_credentials_ref(cur_creds);
+ grpc_call_credentials *cur_creds = creds1_array.creds_array[i];
+ c->inner.creds_array[i] = grpc_call_credentials_ref(cur_creds);
}
for (i = 0; i < creds2_array.num_creds; i++) {
- grpc_credentials *cur_creds = creds2_array.creds_array[i];
- if (!grpc_credentials_has_request_metadata_only(cur_creds)) {
- if (c->connector_creds == NULL) {
- c->connector_creds = cur_creds;
- } else {
- gpr_log(GPR_ERROR, "Cannot compose multiple connector credentials.");
- goto fail;
- }
- }
+ grpc_call_credentials *cur_creds = creds2_array.creds_array[i];
c->inner.creds_array[i + creds1_array.num_creds] =
- grpc_credentials_ref(cur_creds);
+ grpc_call_credentials_ref(cur_creds);
}
return &c->base;
-
-fail:
- grpc_credentials_unref(&c->base);
- return NULL;
}
-const grpc_credentials_array *grpc_composite_credentials_get_credentials(
- grpc_credentials *creds) {
- const grpc_composite_credentials *c =
- (const grpc_composite_credentials *)creds;
- GPR_ASSERT(strcmp(creds->type, GRPC_CREDENTIALS_TYPE_COMPOSITE) == 0);
+const grpc_call_credentials_array *
+grpc_composite_call_credentials_get_credentials(grpc_call_credentials *creds) {
+ const grpc_composite_call_credentials *c =
+ (const grpc_composite_call_credentials *)creds;
+ GPR_ASSERT(strcmp(creds->type, GRPC_CALL_CREDENTIALS_TYPE_COMPOSITE) == 0);
return &c->inner;
}
-grpc_credentials *grpc_credentials_contains_type(
- grpc_credentials *creds, const char *type,
- grpc_credentials **composite_creds) {
+grpc_call_credentials *grpc_credentials_contains_type(
+ grpc_call_credentials *creds, const char *type,
+ grpc_call_credentials **composite_creds) {
size_t i;
if (strcmp(creds->type, type) == 0) {
if (composite_creds != NULL) *composite_creds = NULL;
return creds;
- } else if (strcmp(creds->type, GRPC_CREDENTIALS_TYPE_COMPOSITE) == 0) {
- const grpc_credentials_array *inner_creds_array =
- grpc_composite_credentials_get_credentials(creds);
+ } else if (strcmp(creds->type, GRPC_CALL_CREDENTIALS_TYPE_COMPOSITE) == 0) {
+ const grpc_call_credentials_array *inner_creds_array =
+ grpc_composite_call_credentials_get_credentials(creds);
for (i = 0; i < inner_creds_array->num_creds; i++) {
if (strcmp(type, inner_creds_array->creds_array[i]->type) == 0) {
if (composite_creds != NULL) *composite_creds = creds;
@@ -1180,45 +1093,38 @@ grpc_credentials *grpc_credentials_contains_type(
/* -- IAM credentials. -- */
-typedef struct {
- grpc_credentials base;
- grpc_credentials_md_store *iam_md;
-} grpc_iam_credentials;
-
-static void iam_destroy(grpc_credentials *creds) {
- grpc_iam_credentials *c = (grpc_iam_credentials *)creds;
+static void iam_destruct(grpc_call_credentials *creds) {
+ grpc_google_iam_credentials *c = (grpc_google_iam_credentials *)creds;
grpc_credentials_md_store_unref(c->iam_md);
- gpr_free(c);
-}
-
-static int iam_has_request_metadata(const grpc_credentials *creds) { return 1; }
-
-static int iam_has_request_metadata_only(const grpc_credentials *creds) {
- return 1;
}
-static void iam_get_request_metadata(grpc_credentials *creds,
+static void iam_get_request_metadata(grpc_exec_ctx *exec_ctx,
+ grpc_call_credentials *creds,
grpc_pollset *pollset,
- const char *service_url,
+ grpc_auth_metadata_context context,
grpc_credentials_metadata_cb cb,
void *user_data) {
- grpc_iam_credentials *c = (grpc_iam_credentials *)creds;
- cb(user_data, c->iam_md->entries, c->iam_md->num_entries,
+ grpc_google_iam_credentials *c = (grpc_google_iam_credentials *)creds;
+ cb(exec_ctx, user_data, c->iam_md->entries, c->iam_md->num_entries,
GRPC_CREDENTIALS_OK);
}
-static grpc_credentials_vtable iam_vtable = {
- iam_destroy, iam_has_request_metadata, iam_has_request_metadata_only,
- iam_get_request_metadata, NULL};
+static grpc_call_credentials_vtable iam_vtable = {iam_destruct,
+ iam_get_request_metadata};
-grpc_credentials *grpc_iam_credentials_create(const char *token,
- const char *authority_selector) {
- grpc_iam_credentials *c;
+grpc_call_credentials *grpc_google_iam_credentials_create(
+ const char *token, const char *authority_selector, void *reserved) {
+ grpc_google_iam_credentials *c;
+ GRPC_API_TRACE(
+ "grpc_iam_credentials_create(token=%s, authority_selector=%s, "
+ "reserved=%p)",
+ 3, (token, authority_selector, reserved));
+ GPR_ASSERT(reserved == NULL);
GPR_ASSERT(token != NULL);
GPR_ASSERT(authority_selector != NULL);
- c = gpr_malloc(sizeof(grpc_iam_credentials));
- memset(c, 0, sizeof(grpc_iam_credentials));
- c->base.type = GRPC_CREDENTIALS_TYPE_IAM;
+ c = gpr_malloc(sizeof(grpc_google_iam_credentials));
+ memset(c, 0, sizeof(grpc_google_iam_credentials));
+ c->base.type = GRPC_CALL_CREDENTIALS_TYPE_IAM;
c->base.vtable = &iam_vtable;
gpr_ref_init(&c->base.refcount, 1);
c->iam_md = grpc_credentials_md_store_create(2);
@@ -1228,3 +1134,147 @@ grpc_credentials *grpc_iam_credentials_create(const char *token,
c->iam_md, GRPC_IAM_AUTHORITY_SELECTOR_METADATA_KEY, authority_selector);
return &c->base;
}
+
+/* -- Plugin credentials. -- */
+
+typedef struct {
+ void *user_data;
+ grpc_credentials_metadata_cb cb;
+} grpc_metadata_plugin_request;
+
+static void plugin_destruct(grpc_call_credentials *creds) {
+ grpc_plugin_credentials *c = (grpc_plugin_credentials *)creds;
+ if (c->plugin.state != NULL && c->plugin.destroy != NULL) {
+ c->plugin.destroy(c->plugin.state);
+ }
+}
+
+static void plugin_md_request_metadata_ready(void *request,
+ const grpc_metadata *md,
+ size_t num_md,
+ grpc_status_code status,
+ const char *error_details) {
+ /* called from application code */
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_metadata_plugin_request *r = (grpc_metadata_plugin_request *)request;
+ if (status != GRPC_STATUS_OK) {
+ if (error_details != NULL) {
+ gpr_log(GPR_ERROR, "Getting metadata from plugin failed with error: %s",
+ error_details);
+ }
+ r->cb(&exec_ctx, r->user_data, NULL, 0, GRPC_CREDENTIALS_ERROR);
+ } else {
+ size_t i;
+ grpc_credentials_md *md_array = NULL;
+ if (num_md > 0) {
+ md_array = gpr_malloc(num_md * sizeof(grpc_credentials_md));
+ for (i = 0; i < num_md; i++) {
+ md_array[i].key = gpr_slice_from_copied_string(md[i].key);
+ md_array[i].value =
+ gpr_slice_from_copied_buffer(md[i].value, md[i].value_length);
+ }
+ }
+ r->cb(&exec_ctx, r->user_data, md_array, num_md, GRPC_CREDENTIALS_OK);
+ if (md_array != NULL) {
+ for (i = 0; i < num_md; i++) {
+ gpr_slice_unref(md_array[i].key);
+ gpr_slice_unref(md_array[i].value);
+ }
+ gpr_free(md_array);
+ }
+ }
+ gpr_free(r);
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+static void plugin_get_request_metadata(grpc_exec_ctx *exec_ctx,
+ grpc_call_credentials *creds,
+ grpc_pollset *pollset,
+ grpc_auth_metadata_context context,
+ grpc_credentials_metadata_cb cb,
+ void *user_data) {
+ grpc_plugin_credentials *c = (grpc_plugin_credentials *)creds;
+ if (c->plugin.get_metadata != NULL) {
+ grpc_metadata_plugin_request *request = gpr_malloc(sizeof(*request));
+ memset(request, 0, sizeof(*request));
+ request->user_data = user_data;
+ request->cb = cb;
+ c->plugin.get_metadata(c->plugin.state, context,
+ plugin_md_request_metadata_ready, request);
+ } else {
+ cb(exec_ctx, user_data, NULL, 0, GRPC_CREDENTIALS_OK);
+ }
+}
+
+static grpc_call_credentials_vtable plugin_vtable = {
+ plugin_destruct, plugin_get_request_metadata};
+
+grpc_call_credentials *grpc_metadata_credentials_create_from_plugin(
+ grpc_metadata_credentials_plugin plugin, void *reserved) {
+ grpc_plugin_credentials *c = gpr_malloc(sizeof(*c));
+ GRPC_API_TRACE("grpc_metadata_credentials_create_from_plugin(reserved=%p)", 1,
+ (reserved));
+ GPR_ASSERT(reserved == NULL);
+ memset(c, 0, sizeof(*c));
+ c->base.type = plugin.type;
+ c->base.vtable = &plugin_vtable;
+ gpr_ref_init(&c->base.refcount, 1);
+ c->plugin = plugin;
+ return &c->base;
+}
+
+/* -- Composite channel credentials. -- */
+
+static void composite_channel_destruct(grpc_channel_credentials *creds) {
+ grpc_composite_channel_credentials *c =
+ (grpc_composite_channel_credentials *)creds;
+ grpc_channel_credentials_unref(c->inner_creds);
+ grpc_call_credentials_unref(c->call_creds);
+}
+
+static grpc_security_status composite_channel_create_security_connector(
+ grpc_channel_credentials *creds, grpc_call_credentials *call_creds,
+ const char *target, const grpc_channel_args *args,
+ grpc_channel_security_connector **sc, grpc_channel_args **new_args) {
+ grpc_composite_channel_credentials *c =
+ (grpc_composite_channel_credentials *)creds;
+ grpc_security_status status = GRPC_SECURITY_ERROR;
+
+ GPR_ASSERT(c->inner_creds != NULL && c->call_creds != NULL &&
+ c->inner_creds->vtable != NULL &&
+ c->inner_creds->vtable->create_security_connector != NULL);
+ /* If we are passed a call_creds, create a call composite to pass it
+ downstream. */
+ if (call_creds != NULL) {
+ grpc_call_credentials *composite_call_creds =
+ grpc_composite_call_credentials_create(c->call_creds, call_creds, NULL);
+ status = c->inner_creds->vtable->create_security_connector(
+ c->inner_creds, composite_call_creds, target, args, sc, new_args);
+ grpc_call_credentials_unref(composite_call_creds);
+ } else {
+ status = c->inner_creds->vtable->create_security_connector(
+ c->inner_creds, c->call_creds, target, args, sc, new_args);
+ }
+ return status;
+}
+
+static grpc_channel_credentials_vtable composite_channel_credentials_vtable = {
+ composite_channel_destruct, composite_channel_create_security_connector};
+
+grpc_channel_credentials *grpc_composite_channel_credentials_create(
+ grpc_channel_credentials *channel_creds, grpc_call_credentials *call_creds,
+ void *reserved) {
+ grpc_composite_channel_credentials *c = gpr_malloc(sizeof(*c));
+ memset(c, 0, sizeof(*c));
+ GPR_ASSERT(channel_creds != NULL && call_creds != NULL && reserved == NULL);
+ GRPC_API_TRACE(
+ "grpc_composite_channel_credentials_create(channel_creds=%p, "
+ "call_creds=%p, reserved=%p)",
+ 3, (channel_creds, call_creds, reserved));
+ c->base.type = channel_creds->type;
+ c->base.vtable = &composite_channel_credentials_vtable;
+ gpr_ref_init(&c->base.refcount, 1);
+ c->inner_creds = grpc_channel_credentials_ref(channel_creds);
+ c->call_creds = grpc_call_credentials_ref(call_creds);
+ return &c->base;
+}
diff --git a/src/core/security/credentials.h b/src/core/security/credentials.h
index 75af73a0c6..133aa9d8d9 100644
--- a/src/core/security/credentials.h
+++ b/src/core/security/credentials.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,14 +31,16 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_SECURITY_CREDENTIALS_H
-#define GRPC_INTERNAL_CORE_SECURITY_CREDENTIALS_H
+#ifndef GRPC_CORE_SECURITY_CREDENTIALS_H
+#define GRPC_CORE_SECURITY_CREDENTIALS_H
-#include "src/core/transport/stream_op.h"
+#include "src/core/transport/metadata_batch.h"
#include <grpc/grpc.h>
#include <grpc/grpc_security.h>
#include <grpc/support/sync.h>
+#include "src/core/httpcli/httpcli.h"
+#include "src/core/security/json_token.h"
#include "src/core/security/security_connector.h"
struct grpc_httpcli_response;
@@ -50,14 +52,18 @@ typedef enum {
GRPC_CREDENTIALS_ERROR
} grpc_credentials_status;
-#define GRPC_CREDENTIALS_TYPE_SSL "Ssl"
-#define GRPC_CREDENTIALS_TYPE_OAUTH2 "Oauth2"
-#define GRPC_CREDENTIALS_TYPE_JWT "Jwt"
-#define GRPC_CREDENTIALS_TYPE_IAM "Iam"
-#define GRPC_CREDENTIALS_TYPE_COMPOSITE "Composite"
-#define GRPC_CREDENTIALS_TYPE_FAKE_TRANSPORT_SECURITY "FakeTransportSecurity"
+#define GRPC_FAKE_TRANSPORT_SECURITY_TYPE "fake"
-#define GRPC_AUTHORIZATION_METADATA_KEY "Authorization"
+#define GRPC_CHANNEL_CREDENTIALS_TYPE_SSL "Ssl"
+#define GRPC_CHANNEL_CREDENTIALS_TYPE_FAKE_TRANSPORT_SECURITY \
+ "FakeTransportSecurity"
+
+#define GRPC_CALL_CREDENTIALS_TYPE_OAUTH2 "Oauth2"
+#define GRPC_CALL_CREDENTIALS_TYPE_JWT "Jwt"
+#define GRPC_CALL_CREDENTIALS_TYPE_IAM "Iam"
+#define GRPC_CALL_CREDENTIALS_TYPE_COMPOSITE "Composite"
+
+#define GRPC_AUTHORIZATION_METADATA_KEY "authorization"
#define GRPC_IAM_AUTHORIZATION_TOKEN_METADATA_KEY \
"x-goog-iam-authorization-token"
#define GRPC_IAM_AUTHORITY_SELECTOR_METADATA_KEY "x-goog-iam-authority-selector"
@@ -82,6 +88,49 @@ typedef enum {
#define GRPC_REFRESH_TOKEN_POST_BODY_FORMAT_STRING \
"client_id=%s&client_secret=%s&refresh_token=%s&grant_type=refresh_token"
+/* --- Google utils --- */
+
+/* It is the caller's responsibility to gpr_free the result if not NULL. */
+char *grpc_get_well_known_google_credentials_file_path(void);
+
+/* Implementation function for the different platforms. */
+char *grpc_get_well_known_google_credentials_file_path_impl(void);
+
+/* Override for testing only. Not thread-safe */
+typedef char *(*grpc_well_known_credentials_path_getter)(void);
+void grpc_override_well_known_credentials_path_getter(
+ grpc_well_known_credentials_path_getter getter);
+
+/* --- grpc_channel_credentials. --- */
+
+typedef struct {
+ void (*destruct)(grpc_channel_credentials *c);
+
+ grpc_security_status (*create_security_connector)(
+ grpc_channel_credentials *c, grpc_call_credentials *call_creds,
+ const char *target, const grpc_channel_args *args,
+ grpc_channel_security_connector **sc, grpc_channel_args **new_args);
+} grpc_channel_credentials_vtable;
+
+struct grpc_channel_credentials {
+ const grpc_channel_credentials_vtable *vtable;
+ const char *type;
+ gpr_refcount refcount;
+};
+
+grpc_channel_credentials *grpc_channel_credentials_ref(
+ grpc_channel_credentials *creds);
+void grpc_channel_credentials_unref(grpc_channel_credentials *creds);
+
+/* Creates a security connector for the channel. May also create new channel
+ args for the channel to be used in place of the passed in const args if
+ returned non NULL. In that case the caller is responsible for destroying
+ new_args after channel creation. */
+grpc_security_status grpc_channel_credentials_create_security_connector(
+ grpc_channel_credentials *creds, const char *target,
+ const grpc_channel_args *args, grpc_channel_security_connector **sc,
+ grpc_channel_args **new_args);
+
/* --- grpc_credentials_md. --- */
typedef struct {
@@ -108,70 +157,52 @@ grpc_credentials_md_store *grpc_credentials_md_store_ref(
grpc_credentials_md_store *store);
void grpc_credentials_md_store_unref(grpc_credentials_md_store *store);
-/* --- grpc_credentials. --- */
-
-/* It is the caller's responsibility to gpr_free the result if not NULL. */
-char *grpc_get_well_known_google_credentials_file_path(void);
+/* --- grpc_call_credentials. --- */
-typedef void (*grpc_credentials_metadata_cb)(void *user_data,
+typedef void (*grpc_credentials_metadata_cb)(grpc_exec_ctx *exec_ctx,
+ void *user_data,
grpc_credentials_md *md_elems,
size_t num_md,
grpc_credentials_status status);
typedef struct {
- void (*destroy)(grpc_credentials *c);
- int (*has_request_metadata)(const grpc_credentials *c);
- int (*has_request_metadata_only)(const grpc_credentials *c);
- void (*get_request_metadata)(grpc_credentials *c, grpc_pollset *pollset,
- const char *service_url,
+ void (*destruct)(grpc_call_credentials *c);
+ void (*get_request_metadata)(grpc_exec_ctx *exec_ctx,
+ grpc_call_credentials *c, grpc_pollset *pollset,
+ grpc_auth_metadata_context context,
grpc_credentials_metadata_cb cb,
void *user_data);
- grpc_security_status (*create_security_connector)(
- grpc_credentials *c, const char *target, const grpc_channel_args *args,
- grpc_credentials *request_metadata_creds,
- grpc_channel_security_connector **sc, grpc_channel_args **new_args);
-} grpc_credentials_vtable;
+} grpc_call_credentials_vtable;
-struct grpc_credentials {
- const grpc_credentials_vtable *vtable;
+struct grpc_call_credentials {
+ const grpc_call_credentials_vtable *vtable;
const char *type;
gpr_refcount refcount;
};
-grpc_credentials *grpc_credentials_ref(grpc_credentials *creds);
-void grpc_credentials_unref(grpc_credentials *creds);
-int grpc_credentials_has_request_metadata(grpc_credentials *creds);
-int grpc_credentials_has_request_metadata_only(grpc_credentials *creds);
-void grpc_credentials_get_request_metadata(grpc_credentials *creds,
- grpc_pollset *pollset,
- const char *service_url,
- grpc_credentials_metadata_cb cb,
- void *user_data);
-
-/* Creates a security connector for the channel. May also create new channel
- args for the channel to be used in place of the passed in const args if
- returned non NULL. In that case the caller is responsible for destroying
- new_args after channel creation. */
-grpc_security_status grpc_credentials_create_security_connector(
- grpc_credentials *creds, const char *target, const grpc_channel_args *args,
- grpc_credentials *request_metadata_creds,
- grpc_channel_security_connector **sc, grpc_channel_args **new_args);
+grpc_call_credentials *grpc_call_credentials_ref(grpc_call_credentials *creds);
+void grpc_call_credentials_unref(grpc_call_credentials *creds);
+void grpc_call_credentials_get_request_metadata(
+ grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds,
+ grpc_pollset *pollset, grpc_auth_metadata_context context,
+ grpc_credentials_metadata_cb cb, void *user_data);
typedef struct {
- grpc_credentials **creds_array;
+ grpc_call_credentials **creds_array;
size_t num_creds;
-} grpc_credentials_array;
+} grpc_call_credentials_array;
-const grpc_credentials_array *grpc_composite_credentials_get_credentials(
- grpc_credentials *composite_creds);
+const grpc_call_credentials_array *
+grpc_composite_call_credentials_get_credentials(
+ grpc_call_credentials *composite_creds);
/* Returns creds if creds is of the specified type or the inner creds of the
specified type (if found), if the creds is of type COMPOSITE.
If composite_creds is not NULL, *composite_creds will point to creds if of
type COMPOSITE in case of success. */
-grpc_credentials *grpc_credentials_contains_type(
- grpc_credentials *creds, const char *type,
- grpc_credentials **composite_creds);
+grpc_call_credentials *grpc_credentials_contains_type(
+ grpc_call_credentials *creds, const char *type,
+ grpc_call_credentials **composite_creds);
/* Exposed for testing only. */
grpc_credentials_status
@@ -179,24 +210,167 @@ grpc_oauth2_token_fetcher_credentials_parse_server_response(
const struct grpc_httpcli_response *response,
grpc_credentials_md_store **token_md, gpr_timespec *token_lifetime);
-/* Simulates an oauth2 token fetch with the specified value for testing. */
-grpc_credentials *grpc_fake_oauth2_credentials_create(
- const char *token_md_value, int is_async);
+void grpc_flush_cached_google_default_credentials(void);
+
+/* Metadata-only credentials with the specified key and value where
+ asynchronicity can be simulated for testing. */
+grpc_call_credentials *grpc_md_only_test_credentials_create(
+ const char *md_key, const char *md_value, int is_async);
+
+/* Private constructor for jwt credentials from an already parsed json key.
+ Takes ownership of the key. */
+grpc_call_credentials *
+grpc_service_account_jwt_access_credentials_create_from_auth_json_key(
+ grpc_auth_json_key key, gpr_timespec token_lifetime);
+
+/* Private constructor for refresh token credentials from an already parsed
+ refresh token. Takes ownership of the refresh token. */
+grpc_call_credentials *
+grpc_refresh_token_credentials_create_from_auth_refresh_token(
+ grpc_auth_refresh_token token);
/* --- grpc_server_credentials. --- */
typedef struct {
- void (*destroy)(grpc_server_credentials *c);
+ void (*destruct)(grpc_server_credentials *c);
grpc_security_status (*create_security_connector)(
- grpc_server_credentials *c, grpc_security_connector **sc);
+ grpc_server_credentials *c, grpc_server_security_connector **sc);
} grpc_server_credentials_vtable;
struct grpc_server_credentials {
const grpc_server_credentials_vtable *vtable;
const char *type;
+ gpr_refcount refcount;
+ grpc_auth_metadata_processor processor;
};
grpc_security_status grpc_server_credentials_create_security_connector(
- grpc_server_credentials *creds, grpc_security_connector **sc);
+ grpc_server_credentials *creds, grpc_server_security_connector **sc);
+
+grpc_server_credentials *grpc_server_credentials_ref(
+ grpc_server_credentials *creds);
+
+void grpc_server_credentials_unref(grpc_server_credentials *creds);
+
+#define GRPC_SERVER_CREDENTIALS_ARG "grpc.server_credentials"
+
+grpc_arg grpc_server_credentials_to_arg(grpc_server_credentials *c);
+grpc_server_credentials *grpc_server_credentials_from_arg(const grpc_arg *arg);
+grpc_server_credentials *grpc_find_server_credentials_in_args(
+ const grpc_channel_args *args);
+
+/* -- Fake transport security credentials. -- */
+
+/* Creates a fake transport security credentials object for testing. */
+grpc_channel_credentials *grpc_fake_transport_security_credentials_create(void);
+/* Creates a fake server transport security credentials object for testing. */
+grpc_server_credentials *grpc_fake_transport_security_server_credentials_create(
+ void);
+
+/* -- Ssl credentials. -- */
+
+typedef struct {
+ grpc_channel_credentials base;
+ grpc_ssl_config config;
+} grpc_ssl_credentials;
+
+typedef struct {
+ grpc_server_credentials base;
+ grpc_ssl_server_config config;
+} grpc_ssl_server_credentials;
+
+/* -- Channel composite credentials. -- */
+
+typedef struct {
+ grpc_channel_credentials base;
+ grpc_channel_credentials *inner_creds;
+ grpc_call_credentials *call_creds;
+} grpc_composite_channel_credentials;
+
+/* -- Jwt credentials -- */
+
+typedef struct {
+ grpc_call_credentials base;
+
+ /* Have a simple cache for now with just 1 entry. We could have a map based on
+ the service_url for a more sophisticated one. */
+ gpr_mu cache_mu;
+ struct {
+ grpc_credentials_md_store *jwt_md;
+ char *service_url;
+ gpr_timespec jwt_expiration;
+ } cached;
+
+ grpc_auth_json_key key;
+ gpr_timespec jwt_lifetime;
+} grpc_service_account_jwt_access_credentials;
+
+/* -- Oauth2TokenFetcher credentials --
+
+ This object is a base for credentials that need to acquire an oauth2 token
+ from an http service. */
+
+typedef struct grpc_credentials_metadata_request
+ grpc_credentials_metadata_request;
+
+typedef void (*grpc_fetch_oauth2_func)(grpc_exec_ctx *exec_ctx,
+ grpc_credentials_metadata_request *req,
+ grpc_httpcli_context *http_context,
+ grpc_pollset *pollset,
+ grpc_httpcli_response_cb response_cb,
+ gpr_timespec deadline);
+
+typedef struct {
+ grpc_call_credentials base;
+ gpr_mu mu;
+ grpc_credentials_md_store *access_token_md;
+ gpr_timespec token_expiration;
+ grpc_httpcli_context httpcli_context;
+ grpc_fetch_oauth2_func fetch_func;
+} grpc_oauth2_token_fetcher_credentials;
+
+/* -- GoogleRefreshToken credentials. -- */
+
+typedef struct {
+ grpc_oauth2_token_fetcher_credentials base;
+ grpc_auth_refresh_token refresh_token;
+} grpc_google_refresh_token_credentials;
+
+/* -- Oauth2 Access Token credentials. -- */
+
+typedef struct {
+ grpc_call_credentials base;
+ grpc_credentials_md_store *access_token_md;
+} grpc_access_token_credentials;
+
+/* -- Metadata-only Test credentials. -- */
+
+typedef struct {
+ grpc_call_credentials base;
+ grpc_credentials_md_store *md_store;
+ int is_async;
+} grpc_md_only_test_credentials;
+
+/* -- GoogleIAM credentials. -- */
+
+typedef struct {
+ grpc_call_credentials base;
+ grpc_credentials_md_store *iam_md;
+} grpc_google_iam_credentials;
+
+/* -- Composite credentials. -- */
+
+typedef struct {
+ grpc_call_credentials base;
+ grpc_call_credentials_array inner;
+} grpc_composite_call_credentials;
+
+/* -- Plugin credentials. -- */
+
+typedef struct {
+ grpc_call_credentials base;
+ grpc_metadata_credentials_plugin plugin;
+ grpc_credentials_md_store *plugin_md;
+} grpc_plugin_credentials;
-#endif /* GRPC_INTERNAL_CORE_SECURITY_CREDENTIALS_H */
+#endif /* GRPC_CORE_SECURITY_CREDENTIALS_H */
diff --git a/src/core/security/credentials_metadata.c b/src/core/security/credentials_metadata.c
index 22c786be56..b8a132f1ea 100644
--- a/src/core/security/credentials_metadata.c
+++ b/src/core/security/credentials_metadata.c
@@ -47,7 +47,8 @@ static void store_ensure_capacity(grpc_credentials_md_store *store) {
grpc_credentials_md_store *grpc_credentials_md_store_create(
size_t initial_capacity) {
- grpc_credentials_md_store *store = gpr_malloc(sizeof(grpc_credentials_md_store));
+ grpc_credentials_md_store *store =
+ gpr_malloc(sizeof(grpc_credentials_md_store));
memset(store, 0, sizeof(grpc_credentials_md_store));
if (initial_capacity > 0) {
store->entries = gpr_malloc(initial_capacity * sizeof(grpc_credentials_md));
@@ -98,4 +99,3 @@ void grpc_credentials_md_store_unref(grpc_credentials_md_store *store) {
gpr_free(store);
}
}
-
diff --git a/src/core/security/credentials_posix.c b/src/core/security/credentials_posix.c
index 20f67a7f14..0c92bd4a96 100644
--- a/src/core/security/credentials_posix.c
+++ b/src/core/security/credentials_posix.c
@@ -44,7 +44,7 @@
#include "src/core/support/env.h"
#include "src/core/support/string.h"
-char *grpc_get_well_known_google_credentials_file_path(void) {
+char *grpc_get_well_known_google_credentials_file_path_impl(void) {
char *result = NULL;
char *home = gpr_getenv("HOME");
if (home == NULL) {
diff --git a/src/core/security/credentials_win32.c b/src/core/security/credentials_win32.c
index 92dfd9bdfe..8ee9f706a1 100644
--- a/src/core/security/credentials_win32.c
+++ b/src/core/security/credentials_win32.c
@@ -44,7 +44,7 @@
#include "src/core/support/env.h"
#include "src/core/support/string.h"
-char *grpc_get_well_known_google_credentials_file_path(void) {
+char *grpc_get_well_known_google_credentials_file_path_impl(void) {
char *result = NULL;
char *appdata_path = gpr_getenv("APPDATA");
if (appdata_path == NULL) {
diff --git a/src/core/security/google_default_credentials.c b/src/core/security/google_default_credentials.c
index 5822ce6337..1f4f3e4aa5 100644
--- a/src/core/security/google_default_credentials.c
+++ b/src/core/security/google_default_credentials.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -41,30 +41,32 @@
#include "src/core/httpcli/httpcli.h"
#include "src/core/support/env.h"
-#include "src/core/support/file.h"
+#include "src/core/support/load_file.h"
+#include "src/core/surface/api_trace.h"
/* -- Constants. -- */
#define GRPC_COMPUTE_ENGINE_DETECTION_HOST "metadata.google.internal"
-#define GRPC_GOOGLE_CREDENTIALS_ENV_VAR "GOOGLE_APPLICATION_CREDENTIALS"
/* -- Default credentials. -- */
-static grpc_credentials *default_credentials = NULL;
+static grpc_channel_credentials *default_credentials = NULL;
static int compute_engine_detection_done = 0;
-static gpr_mu g_mu;
+static gpr_mu g_state_mu;
+static gpr_mu *g_polling_mu;
static gpr_once g_once = GPR_ONCE_INIT;
-static void init_default_credentials(void) { gpr_mu_init(&g_mu); }
+static void init_default_credentials(void) { gpr_mu_init(&g_state_mu); }
typedef struct {
- grpc_pollset pollset;
+ grpc_pollset *pollset;
int is_done;
int success;
} compute_engine_detector;
static void on_compute_engine_detection_http_response(
- void *user_data, const grpc_httpcli_response *response) {
+ grpc_exec_ctx *exec_ctx, void *user_data,
+ const grpc_httpcli_response *response) {
compute_engine_detector *detector = (compute_engine_detector *)user_data;
if (response != NULL && response->status == 200 && response->hdr_count > 0) {
/* Internet providers can return a generic response to all requests, so
@@ -79,22 +81,29 @@ static void on_compute_engine_detection_http_response(
}
}
}
- gpr_mu_lock(GRPC_POLLSET_MU(&detector->pollset));
+ gpr_mu_lock(g_polling_mu);
detector->is_done = 1;
- grpc_pollset_kick(&detector->pollset);
- gpr_mu_unlock(GRPC_POLLSET_MU(&detector->pollset));
+ grpc_pollset_kick(detector->pollset, NULL);
+ gpr_mu_unlock(g_polling_mu);
+}
+
+static void destroy_pollset(grpc_exec_ctx *exec_ctx, void *p, bool s) {
+ grpc_pollset_destroy(p);
}
static int is_stack_running_on_compute_engine(void) {
compute_engine_detector detector;
grpc_httpcli_request request;
grpc_httpcli_context context;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_closure destroy_closure;
/* The http call is local. If it takes more than one sec, it is for sure not
on compute engine. */
- gpr_timespec max_detection_delay = {1, 0};
+ gpr_timespec max_detection_delay = gpr_time_from_seconds(1, GPR_TIMESPAN);
- grpc_pollset_init(&detector.pollset);
+ detector.pollset = gpr_malloc(grpc_pollset_size());
+ grpc_pollset_init(detector.pollset, &g_polling_mu);
detector.is_done = 0;
detector.success = 0;
@@ -104,80 +113,98 @@ static int is_stack_running_on_compute_engine(void) {
grpc_httpcli_context_init(&context);
- grpc_httpcli_get(&context, &detector.pollset, &request,
- gpr_time_add(gpr_now(), max_detection_delay),
- on_compute_engine_detection_http_response, &detector);
+ grpc_httpcli_get(
+ &exec_ctx, &context, detector.pollset, &request,
+ gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), max_detection_delay),
+ on_compute_engine_detection_http_response, &detector);
+
+ grpc_exec_ctx_finish(&exec_ctx);
/* Block until we get the response. This is not ideal but this should only be
called once for the lifetime of the process by the default credentials. */
- gpr_mu_lock(GRPC_POLLSET_MU(&detector.pollset));
+ gpr_mu_lock(g_polling_mu);
while (!detector.is_done) {
- grpc_pollset_work(&detector.pollset, gpr_inf_future);
+ grpc_pollset_worker *worker = NULL;
+ grpc_pollset_work(&exec_ctx, detector.pollset, &worker,
+ gpr_now(GPR_CLOCK_MONOTONIC),
+ gpr_inf_future(GPR_CLOCK_MONOTONIC));
}
- gpr_mu_unlock(GRPC_POLLSET_MU(&detector.pollset));
+ gpr_mu_unlock(g_polling_mu);
grpc_httpcli_context_destroy(&context);
- grpc_pollset_destroy(&detector.pollset);
+ grpc_closure_init(&destroy_closure, destroy_pollset, detector.pollset);
+ grpc_pollset_shutdown(&exec_ctx, detector.pollset, &destroy_closure);
+ grpc_exec_ctx_finish(&exec_ctx);
+ g_polling_mu = NULL;
+
+ gpr_free(detector.pollset);
return detector.success;
}
/* Takes ownership of creds_path if not NULL. */
-static grpc_credentials *create_jwt_creds_from_path(char *creds_path) {
- grpc_credentials *result = NULL;
- gpr_slice creds_data;
+static grpc_call_credentials *create_default_creds_from_path(char *creds_path) {
+ grpc_json *json = NULL;
+ grpc_auth_json_key key;
+ grpc_auth_refresh_token token;
+ grpc_call_credentials *result = NULL;
+ gpr_slice creds_data = gpr_empty_slice();
int file_ok = 0;
- if (creds_path == NULL) return NULL;
- creds_data = gpr_load_file(creds_path, 1, &file_ok);
- gpr_free(creds_path);
- if (file_ok) {
- result = grpc_jwt_credentials_create(
- (const char *)GPR_SLICE_START_PTR(creds_data),
- grpc_max_auth_token_lifetime);
- gpr_slice_unref(creds_data);
+ if (creds_path == NULL) goto end;
+ creds_data = gpr_load_file(creds_path, 0, &file_ok);
+ if (!file_ok) goto end;
+ json = grpc_json_parse_string_with_len(
+ (char *)GPR_SLICE_START_PTR(creds_data), GPR_SLICE_LENGTH(creds_data));
+ if (json == NULL) goto end;
+
+ /* First, try an auth json key. */
+ key = grpc_auth_json_key_create_from_json(json);
+ if (grpc_auth_json_key_is_valid(&key)) {
+ result =
+ grpc_service_account_jwt_access_credentials_create_from_auth_json_key(
+ key, grpc_max_auth_token_lifetime());
+ goto end;
}
- return result;
-}
-/* Takes ownership of creds_path if not NULL. */
-static grpc_credentials *create_refresh_token_creds_from_path(
- char *creds_path) {
- grpc_credentials *result = NULL;
- gpr_slice creds_data;
- int file_ok = 0;
- if (creds_path == NULL) return NULL;
- creds_data = gpr_load_file(creds_path, 1, &file_ok);
- gpr_free(creds_path);
- if (file_ok) {
- result = grpc_refresh_token_credentials_create(
- (const char *)GPR_SLICE_START_PTR(creds_data));
- gpr_slice_unref(creds_data);
+ /* Then try a refresh token if the auth json key was invalid. */
+ token = grpc_auth_refresh_token_create_from_json(json);
+ if (grpc_auth_refresh_token_is_valid(&token)) {
+ result =
+ grpc_refresh_token_credentials_create_from_auth_refresh_token(token);
+ goto end;
}
+
+end:
+ if (creds_path != NULL) gpr_free(creds_path);
+ gpr_slice_unref(creds_data);
+ if (json != NULL) grpc_json_destroy(json);
return result;
}
-grpc_credentials *grpc_google_default_credentials_create(void) {
- grpc_credentials *result = NULL;
- int serving_cached_credentials = 0;
+grpc_channel_credentials *grpc_google_default_credentials_create(void) {
+ grpc_channel_credentials *result = NULL;
+ grpc_call_credentials *call_creds = NULL;
+
+ GRPC_API_TRACE("grpc_google_default_credentials_create(void)", 0, ());
+
gpr_once_init(&g_once, init_default_credentials);
- gpr_mu_lock(&g_mu);
+ gpr_mu_lock(&g_state_mu);
if (default_credentials != NULL) {
- result = grpc_credentials_ref(default_credentials);
- serving_cached_credentials = 1;
+ result = grpc_channel_credentials_ref(default_credentials);
goto end;
}
/* First, try the environment variable. */
- result =
- create_jwt_creds_from_path(gpr_getenv(GRPC_GOOGLE_CREDENTIALS_ENV_VAR));
- if (result != NULL) goto end;
+ call_creds = create_default_creds_from_path(
+ gpr_getenv(GRPC_GOOGLE_CREDENTIALS_ENV_VAR));
+ if (call_creds != NULL) goto end;
/* Then the well-known file. */
- result = create_refresh_token_creds_from_path(
+ call_creds = create_default_creds_from_path(
grpc_get_well_known_google_credentials_file_path());
- if (result != NULL) goto end;
+ if (call_creds != NULL) goto end;
/* At last try to see if we're on compute engine (do the detection only once
since it requires a network test). */
@@ -185,19 +212,54 @@ grpc_credentials *grpc_google_default_credentials_create(void) {
int need_compute_engine_creds = is_stack_running_on_compute_engine();
compute_engine_detection_done = 1;
if (need_compute_engine_creds) {
- result = grpc_compute_engine_credentials_create();
+ call_creds = grpc_google_compute_engine_credentials_create(NULL);
}
}
end:
- if (!serving_cached_credentials && result != NULL) {
- /* Blend with default ssl credentials and add a global reference so that it
- can be cached and re-served. */
- result = grpc_composite_credentials_create(
- grpc_ssl_credentials_create(NULL, NULL), result);
- GPR_ASSERT(result != NULL);
- default_credentials = grpc_credentials_ref(result);
+ if (result == NULL) {
+ if (call_creds != NULL) {
+ /* Blend with default ssl credentials and add a global reference so that
+ it
+ can be cached and re-served. */
+ grpc_channel_credentials *ssl_creds =
+ grpc_ssl_credentials_create(NULL, NULL, NULL);
+ default_credentials = grpc_channel_credentials_ref(
+ grpc_composite_channel_credentials_create(ssl_creds, call_creds,
+ NULL));
+ GPR_ASSERT(default_credentials != NULL);
+ grpc_channel_credentials_unref(ssl_creds);
+ grpc_call_credentials_unref(call_creds);
+ result = default_credentials;
+ } else {
+ gpr_log(GPR_ERROR, "Could not create google default credentials.");
+ }
}
- gpr_mu_unlock(&g_mu);
+ gpr_mu_unlock(&g_state_mu);
return result;
}
+
+void grpc_flush_cached_google_default_credentials(void) {
+ gpr_once_init(&g_once, init_default_credentials);
+ gpr_mu_lock(&g_state_mu);
+ if (default_credentials != NULL) {
+ grpc_channel_credentials_unref(default_credentials);
+ default_credentials = NULL;
+ }
+ compute_engine_detection_done = 0;
+ gpr_mu_unlock(&g_state_mu);
+}
+
+/* -- Well known credentials path. -- */
+
+static grpc_well_known_credentials_path_getter creds_path_getter = NULL;
+
+char *grpc_get_well_known_google_credentials_file_path(void) {
+ if (creds_path_getter != NULL) return creds_path_getter();
+ return grpc_get_well_known_google_credentials_file_path_impl();
+}
+
+void grpc_override_well_known_credentials_path_getter(
+ grpc_well_known_credentials_path_getter getter) {
+ creds_path_getter = getter;
+}
diff --git a/src/core/security/handshake.c b/src/core/security/handshake.c
new file mode 100644
index 0000000000..b5bb6667a7
--- /dev/null
+++ b/src/core/security/handshake.c
@@ -0,0 +1,336 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/security/handshake.h"
+
+#include <stdbool.h>
+#include <string.h>
+
+#include "src/core/security/security_context.h"
+#include "src/core/security/secure_endpoint.h"
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/slice_buffer.h>
+
+#define GRPC_INITIAL_HANDSHAKE_BUFFER_SIZE 256
+
+typedef struct {
+ grpc_security_connector *connector;
+ tsi_handshaker *handshaker;
+ bool is_client_side;
+ unsigned char *handshake_buffer;
+ size_t handshake_buffer_size;
+ grpc_endpoint *wrapped_endpoint;
+ grpc_endpoint *secure_endpoint;
+ gpr_slice_buffer left_overs;
+ gpr_slice_buffer incoming;
+ gpr_slice_buffer outgoing;
+ grpc_security_handshake_done_cb cb;
+ void *user_data;
+ grpc_closure on_handshake_data_sent_to_peer;
+ grpc_closure on_handshake_data_received_from_peer;
+ grpc_auth_context *auth_context;
+} grpc_security_handshake;
+
+static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
+ void *setup, bool success);
+
+static void on_handshake_data_sent_to_peer(grpc_exec_ctx *exec_ctx, void *setup,
+ bool success);
+
+static void security_connector_remove_handshake(grpc_security_handshake *h) {
+ GPR_ASSERT(!h->is_client_side);
+ grpc_security_connector_handshake_list *node;
+ grpc_security_connector_handshake_list *tmp;
+ grpc_server_security_connector *sc =
+ (grpc_server_security_connector *)h->connector;
+ gpr_mu_lock(&sc->mu);
+ node = sc->handshaking_handshakes;
+ if (node && node->handshake == h) {
+ sc->handshaking_handshakes = node->next;
+ gpr_free(node);
+ gpr_mu_unlock(&sc->mu);
+ return;
+ }
+ while (node) {
+ if (node->next->handshake == h) {
+ tmp = node->next;
+ node->next = node->next->next;
+ gpr_free(tmp);
+ gpr_mu_unlock(&sc->mu);
+ return;
+ }
+ node = node->next;
+ }
+ gpr_mu_unlock(&sc->mu);
+}
+
+static void security_handshake_done(grpc_exec_ctx *exec_ctx,
+ grpc_security_handshake *h,
+ int is_success) {
+ if (!h->is_client_side) {
+ security_connector_remove_handshake(h);
+ }
+ if (is_success) {
+ h->cb(exec_ctx, h->user_data, GRPC_SECURITY_OK, h->secure_endpoint,
+ h->auth_context);
+ } else {
+ if (h->secure_endpoint != NULL) {
+ grpc_endpoint_shutdown(exec_ctx, h->secure_endpoint);
+ grpc_endpoint_destroy(exec_ctx, h->secure_endpoint);
+ } else {
+ grpc_endpoint_destroy(exec_ctx, h->wrapped_endpoint);
+ }
+ h->cb(exec_ctx, h->user_data, GRPC_SECURITY_ERROR, NULL, NULL);
+ }
+ if (h->handshaker != NULL) tsi_handshaker_destroy(h->handshaker);
+ if (h->handshake_buffer != NULL) gpr_free(h->handshake_buffer);
+ gpr_slice_buffer_destroy(&h->left_overs);
+ gpr_slice_buffer_destroy(&h->outgoing);
+ gpr_slice_buffer_destroy(&h->incoming);
+ GRPC_AUTH_CONTEXT_UNREF(h->auth_context, "handshake");
+ GRPC_SECURITY_CONNECTOR_UNREF(h->connector, "handshake");
+ gpr_free(h);
+}
+
+static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *user_data,
+ grpc_security_status status,
+ grpc_auth_context *auth_context) {
+ grpc_security_handshake *h = user_data;
+ tsi_frame_protector *protector;
+ tsi_result result;
+ if (status != GRPC_SECURITY_OK) {
+ gpr_log(GPR_ERROR, "Error checking peer.");
+ security_handshake_done(exec_ctx, h, 0);
+ return;
+ }
+ h->auth_context = GRPC_AUTH_CONTEXT_REF(auth_context, "handshake");
+ result =
+ tsi_handshaker_create_frame_protector(h->handshaker, NULL, &protector);
+ if (result != TSI_OK) {
+ gpr_log(GPR_ERROR, "Frame protector creation failed with error %s.",
+ tsi_result_to_string(result));
+ security_handshake_done(exec_ctx, h, 0);
+ return;
+ }
+ h->secure_endpoint =
+ grpc_secure_endpoint_create(protector, h->wrapped_endpoint,
+ h->left_overs.slices, h->left_overs.count);
+ h->left_overs.count = 0;
+ h->left_overs.length = 0;
+ security_handshake_done(exec_ctx, h, 1);
+ return;
+}
+
+static void check_peer(grpc_exec_ctx *exec_ctx, grpc_security_handshake *h) {
+ tsi_peer peer;
+ tsi_result result = tsi_handshaker_extract_peer(h->handshaker, &peer);
+
+ if (result != TSI_OK) {
+ gpr_log(GPR_ERROR, "Peer extraction failed with error %s",
+ tsi_result_to_string(result));
+ security_handshake_done(exec_ctx, h, 0);
+ return;
+ }
+ grpc_security_connector_check_peer(exec_ctx, h->connector, peer,
+ on_peer_checked, h);
+}
+
+static void send_handshake_bytes_to_peer(grpc_exec_ctx *exec_ctx,
+ grpc_security_handshake *h) {
+ size_t offset = 0;
+ tsi_result result = TSI_OK;
+ gpr_slice to_send;
+
+ do {
+ size_t to_send_size = h->handshake_buffer_size - offset;
+ result = tsi_handshaker_get_bytes_to_send_to_peer(
+ h->handshaker, h->handshake_buffer + offset, &to_send_size);
+ offset += to_send_size;
+ if (result == TSI_INCOMPLETE_DATA) {
+ h->handshake_buffer_size *= 2;
+ h->handshake_buffer =
+ gpr_realloc(h->handshake_buffer, h->handshake_buffer_size);
+ }
+ } while (result == TSI_INCOMPLETE_DATA);
+
+ if (result != TSI_OK) {
+ gpr_log(GPR_ERROR, "Handshake failed with error %s",
+ tsi_result_to_string(result));
+ security_handshake_done(exec_ctx, h, 0);
+ return;
+ }
+
+ to_send =
+ gpr_slice_from_copied_buffer((const char *)h->handshake_buffer, offset);
+ gpr_slice_buffer_reset_and_unref(&h->outgoing);
+ gpr_slice_buffer_add(&h->outgoing, to_send);
+ /* TODO(klempner,jboeuf): This should probably use the client setup
+ deadline */
+ grpc_endpoint_write(exec_ctx, h->wrapped_endpoint, &h->outgoing,
+ &h->on_handshake_data_sent_to_peer);
+}
+
+static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
+ void *handshake,
+ bool success) {
+ grpc_security_handshake *h = handshake;
+ size_t consumed_slice_size = 0;
+ tsi_result result = TSI_OK;
+ size_t i;
+ size_t num_left_overs;
+ int has_left_overs_in_current_slice = 0;
+
+ if (!success) {
+ gpr_log(GPR_ERROR, "Read failed.");
+ security_handshake_done(exec_ctx, h, 0);
+ return;
+ }
+
+ for (i = 0; i < h->incoming.count; i++) {
+ consumed_slice_size = GPR_SLICE_LENGTH(h->incoming.slices[i]);
+ result = tsi_handshaker_process_bytes_from_peer(
+ h->handshaker, GPR_SLICE_START_PTR(h->incoming.slices[i]),
+ &consumed_slice_size);
+ if (!tsi_handshaker_is_in_progress(h->handshaker)) break;
+ }
+
+ if (tsi_handshaker_is_in_progress(h->handshaker)) {
+ /* We may need more data. */
+ if (result == TSI_INCOMPLETE_DATA) {
+ grpc_endpoint_read(exec_ctx, h->wrapped_endpoint, &h->incoming,
+ &h->on_handshake_data_received_from_peer);
+ return;
+ } else {
+ send_handshake_bytes_to_peer(exec_ctx, h);
+ return;
+ }
+ }
+
+ if (result != TSI_OK) {
+ gpr_log(GPR_ERROR, "Handshake failed with error %s",
+ tsi_result_to_string(result));
+ security_handshake_done(exec_ctx, h, 0);
+ return;
+ }
+
+ /* Handshake is done and successful this point. */
+ has_left_overs_in_current_slice =
+ (consumed_slice_size < GPR_SLICE_LENGTH(h->incoming.slices[i]));
+ num_left_overs =
+ (has_left_overs_in_current_slice ? 1 : 0) + h->incoming.count - i - 1;
+ if (num_left_overs == 0) {
+ check_peer(exec_ctx, h);
+ return;
+ }
+
+ /* Put the leftovers in our buffer (ownership transfered). */
+ if (has_left_overs_in_current_slice) {
+ gpr_slice_buffer_add(
+ &h->left_overs,
+ gpr_slice_split_tail(&h->incoming.slices[i], consumed_slice_size));
+ gpr_slice_unref(
+ h->incoming.slices[i]); /* split_tail above increments refcount. */
+ }
+ gpr_slice_buffer_addn(
+ &h->left_overs, &h->incoming.slices[i + 1],
+ num_left_overs - (size_t)has_left_overs_in_current_slice);
+ check_peer(exec_ctx, h);
+}
+
+/* If handshake is NULL, the handshake is done. */
+static void on_handshake_data_sent_to_peer(grpc_exec_ctx *exec_ctx,
+ void *handshake, bool success) {
+ grpc_security_handshake *h = handshake;
+
+ /* Make sure that write is OK. */
+ if (!success) {
+ gpr_log(GPR_ERROR, "Write failed.");
+ if (handshake != NULL) security_handshake_done(exec_ctx, h, 0);
+ return;
+ }
+
+ /* We may be done. */
+ if (tsi_handshaker_is_in_progress(h->handshaker)) {
+ /* TODO(klempner,jboeuf): This should probably use the client setup
+ deadline */
+ grpc_endpoint_read(exec_ctx, h->wrapped_endpoint, &h->incoming,
+ &h->on_handshake_data_received_from_peer);
+ } else {
+ check_peer(exec_ctx, h);
+ }
+}
+
+void grpc_do_security_handshake(grpc_exec_ctx *exec_ctx,
+ tsi_handshaker *handshaker,
+ grpc_security_connector *connector,
+ bool is_client_side,
+ grpc_endpoint *nonsecure_endpoint,
+ grpc_security_handshake_done_cb cb,
+ void *user_data) {
+ grpc_security_connector_handshake_list *handshake_node;
+ grpc_security_handshake *h = gpr_malloc(sizeof(grpc_security_handshake));
+ memset(h, 0, sizeof(grpc_security_handshake));
+ h->handshaker = handshaker;
+ h->connector = GRPC_SECURITY_CONNECTOR_REF(connector, "handshake");
+ h->is_client_side = is_client_side;
+ h->handshake_buffer_size = GRPC_INITIAL_HANDSHAKE_BUFFER_SIZE;
+ h->handshake_buffer = gpr_malloc(h->handshake_buffer_size);
+ h->wrapped_endpoint = nonsecure_endpoint;
+ h->user_data = user_data;
+ h->cb = cb;
+ grpc_closure_init(&h->on_handshake_data_sent_to_peer,
+ on_handshake_data_sent_to_peer, h);
+ grpc_closure_init(&h->on_handshake_data_received_from_peer,
+ on_handshake_data_received_from_peer, h);
+ gpr_slice_buffer_init(&h->left_overs);
+ gpr_slice_buffer_init(&h->outgoing);
+ gpr_slice_buffer_init(&h->incoming);
+ if (!is_client_side) {
+ grpc_server_security_connector *server_connector =
+ (grpc_server_security_connector *)connector;
+ handshake_node = gpr_malloc(sizeof(grpc_security_connector_handshake_list));
+ handshake_node->handshake = h;
+ gpr_mu_lock(&server_connector->mu);
+ handshake_node->next = server_connector->handshaking_handshakes;
+ server_connector->handshaking_handshakes = handshake_node;
+ gpr_mu_unlock(&server_connector->mu);
+ }
+ send_handshake_bytes_to_peer(exec_ctx, h);
+}
+
+void grpc_security_handshake_shutdown(grpc_exec_ctx *exec_ctx,
+ void *handshake) {
+ grpc_security_handshake *h = handshake;
+ grpc_endpoint_shutdown(exec_ctx, h->wrapped_endpoint);
+}
diff --git a/src/core/security/secure_transport_setup.h b/src/core/security/handshake.h
index 58701c461d..4872045874 100644
--- a/src/core/security/secure_transport_setup.h
+++ b/src/core/security/handshake.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,23 +31,21 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_SECURITY_SECURE_TRANSPORT_SETUP_H
-#define GRPC_INTERNAL_CORE_SECURITY_SECURE_TRANSPORT_SETUP_H
+#ifndef GRPC_CORE_SECURITY_HANDSHAKE_H
+#define GRPC_CORE_SECURITY_HANDSHAKE_H
#include "src/core/iomgr/endpoint.h"
#include "src/core/security/security_connector.h"
-/* --- Secure transport setup --- */
+/* Calls the callback upon completion. Takes owership of handshaker. */
+void grpc_do_security_handshake(grpc_exec_ctx *exec_ctx,
+ tsi_handshaker *handshaker,
+ grpc_security_connector *connector,
+ bool is_client_side,
+ grpc_endpoint *nonsecure_endpoint,
+ grpc_security_handshake_done_cb cb,
+ void *user_data);
-/* Ownership of the secure_endpoint is transfered. */
-typedef void (*grpc_secure_transport_setup_done_cb)(
- void *user_data, grpc_security_status status,
- grpc_endpoint *secure_endpoint);
+void grpc_security_handshake_shutdown(grpc_exec_ctx *exec_ctx, void *handshake);
-/* Calls the callback upon completion. */
-void grpc_setup_secure_transport(grpc_security_connector *connector,
- grpc_endpoint *nonsecure_endpoint,
- grpc_secure_transport_setup_done_cb cb,
- void *user_data);
-
-#endif /* GRPC_INTERNAL_CORE_SECURITY_SECURE_TRANSPORT_SETUP_H */
+#endif /* GRPC_CORE_SECURITY_HANDSHAKE_H */
diff --git a/src/core/security/json_token.c b/src/core/security/json_token.c
index 6116f1d767..372e5bfc5a 100644
--- a/src/core/security/json_token.c
+++ b/src/core/security/json_token.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -39,23 +39,23 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
-#include "src/core/security/base64.h"
+#include "src/core/security/b64.h"
#include "src/core/support/string.h"
#include <openssl/bio.h>
#include <openssl/evp.h>
#include <openssl/pem.h>
-#include "src/core/json/json.h"
-
/* --- Constants. --- */
/* 1 hour max. */
-const gpr_timespec grpc_max_auth_token_lifetime = {3600, 0};
-
-#define GRPC_AUTH_JSON_TYPE_INVALID "invalid"
-#define GRPC_AUTH_JSON_TYPE_SERVICE_ACCOUNT "service_account"
-#define GRPC_AUTH_JSON_TYPE_AUTHORIZED_USER "authorized_user"
+gpr_timespec grpc_max_auth_token_lifetime() {
+ gpr_timespec out;
+ out.tv_sec = 3600;
+ out.tv_nsec = 0;
+ out.clock_type = GPR_TIMESPAN;
+ return out;
+}
#define GRPC_JWT_RSA_SHA256_ALGORITHM "RS256"
#define GRPC_JWT_TYPE "JWT"
@@ -66,7 +66,7 @@ static grpc_jwt_encode_and_sign_override g_jwt_encode_and_sign_override = NULL;
/* --- grpc_auth_json_key. --- */
-static const char *json_get_string_property(grpc_json *json,
+static const char *json_get_string_property(const grpc_json *json,
const char *prop_name) {
grpc_json *child;
for (child = json->child; child != NULL; child = child->next) {
@@ -79,7 +79,8 @@ static const char *json_get_string_property(grpc_json *json,
return child->value;
}
-static int set_json_key_string_property(grpc_json *json, const char *prop_name,
+static int set_json_key_string_property(const grpc_json *json,
+ const char *prop_name,
char **json_key_field) {
const char *prop_value = json_get_string_property(json, prop_name);
if (prop_value == NULL) return 0;
@@ -92,11 +93,8 @@ int grpc_auth_json_key_is_valid(const grpc_auth_json_key *json_key) {
strcmp(json_key->type, GRPC_AUTH_JSON_TYPE_INVALID);
}
-grpc_auth_json_key grpc_auth_json_key_create_from_string(
- const char *json_string) {
+grpc_auth_json_key grpc_auth_json_key_create_from_json(const grpc_json *json) {
grpc_auth_json_key result;
- char *scratchpad = gpr_strdup(json_string);
- grpc_json *json = grpc_json_parse_string(scratchpad);
BIO *bio = NULL;
const char *prop_value;
int success = 0;
@@ -104,7 +102,7 @@ grpc_auth_json_key grpc_auth_json_key_create_from_string(
memset(&result, 0, sizeof(grpc_auth_json_key));
result.type = GRPC_AUTH_JSON_TYPE_INVALID;
if (json == NULL) {
- gpr_log(GPR_ERROR, "Invalid json string %s", json_string);
+ gpr_log(GPR_ERROR, "Invalid json.");
goto end;
}
@@ -142,8 +140,16 @@ grpc_auth_json_key grpc_auth_json_key_create_from_string(
end:
if (bio != NULL) BIO_free(bio);
- if (json != NULL) grpc_json_destroy(json);
if (!success) grpc_auth_json_key_destruct(&result);
+ return result;
+}
+
+grpc_auth_json_key grpc_auth_json_key_create_from_string(
+ const char *json_string) {
+ char *scratchpad = gpr_strdup(json_string);
+ grpc_json *json = grpc_json_parse_string(scratchpad);
+ grpc_auth_json_key result = grpc_auth_json_key_create_from_json(json);
+ if (json != NULL) grpc_json_destroy(json);
gpr_free(scratchpad);
return result;
}
@@ -207,19 +213,19 @@ static char *encoded_jwt_claim(const grpc_auth_json_key *json_key,
grpc_json *child = NULL;
char *json_str = NULL;
char *result = NULL;
- gpr_timespec now = gpr_now();
+ gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME);
gpr_timespec expiration = gpr_time_add(now, token_lifetime);
char now_str[GPR_LTOA_MIN_BUFSIZE];
char expiration_str[GPR_LTOA_MIN_BUFSIZE];
- if (gpr_time_cmp(token_lifetime, grpc_max_auth_token_lifetime) > 0) {
+ if (gpr_time_cmp(token_lifetime, grpc_max_auth_token_lifetime()) > 0) {
gpr_log(GPR_INFO, "Cropping token lifetime to maximum allowed value.");
- expiration = gpr_time_add(now, grpc_max_auth_token_lifetime);
+ expiration = gpr_time_add(now, grpc_max_auth_token_lifetime());
}
- gpr_ltoa(now.tv_sec, now_str);
- gpr_ltoa(expiration.tv_sec, expiration_str);
+ int64_ttoa(now.tv_sec, now_str);
+ int64_ttoa(expiration.tv_sec, expiration_str);
- child = create_child(NULL, json, "iss", json_key->client_email,
- GRPC_JSON_STRING);
+ child =
+ create_child(NULL, json, "iss", json_key->client_email, GRPC_JSON_STRING);
if (scope != NULL) {
child = create_child(child, json, "scope", scope, GRPC_JSON_STRING);
} else {
@@ -251,7 +257,7 @@ static char *dot_concat_and_free_strings(char *str1, char *str2) {
memcpy(current, str2, str2_len);
current += str2_len;
GPR_ASSERT(current >= result);
- GPR_ASSERT((gpr_uintptr)(current - result) == result_len);
+ GPR_ASSERT((uintptr_t)(current - result) == result_len);
*current = '\0';
gpr_free(str1);
gpr_free(str2);
@@ -342,18 +348,16 @@ int grpc_auth_refresh_token_is_valid(
strcmp(refresh_token->type, GRPC_AUTH_JSON_TYPE_INVALID);
}
-grpc_auth_refresh_token grpc_auth_refresh_token_create_from_string(
- const char *json_string) {
+grpc_auth_refresh_token grpc_auth_refresh_token_create_from_json(
+ const grpc_json *json) {
grpc_auth_refresh_token result;
- char *scratchpad = gpr_strdup(json_string);
- grpc_json *json = grpc_json_parse_string(scratchpad);
const char *prop_value;
int success = 0;
memset(&result, 0, sizeof(grpc_auth_refresh_token));
result.type = GRPC_AUTH_JSON_TYPE_INVALID;
if (json == NULL) {
- gpr_log(GPR_ERROR, "Invalid json string %s", json_string);
+ gpr_log(GPR_ERROR, "Invalid json.");
goto end;
}
@@ -374,8 +378,17 @@ grpc_auth_refresh_token grpc_auth_refresh_token_create_from_string(
success = 1;
end:
- if (json != NULL) grpc_json_destroy(json);
if (!success) grpc_auth_refresh_token_destruct(&result);
+ return result;
+}
+
+grpc_auth_refresh_token grpc_auth_refresh_token_create_from_string(
+ const char *json_string) {
+ char *scratchpad = gpr_strdup(json_string);
+ grpc_json *json = grpc_json_parse_string(scratchpad);
+ grpc_auth_refresh_token result =
+ grpc_auth_refresh_token_create_from_json(json);
+ if (json != NULL) grpc_json_destroy(json);
gpr_free(scratchpad);
return result;
}
@@ -396,4 +409,3 @@ void grpc_auth_refresh_token_destruct(grpc_auth_refresh_token *refresh_token) {
refresh_token->refresh_token = NULL;
}
}
-
diff --git a/src/core/security/json_token.h b/src/core/security/json_token.h
index 197796ab4c..d183f9b3a3 100644
--- a/src/core/security/json_token.h
+++ b/src/core/security/json_token.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,16 +31,22 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_SECURITY_JSON_TOKEN_H
-#define GRPC_INTERNAL_CORE_SECURITY_JSON_TOKEN_H
+#ifndef GRPC_CORE_SECURITY_JSON_TOKEN_H
+#define GRPC_CORE_SECURITY_JSON_TOKEN_H
#include <grpc/support/slice.h>
#include <openssl/rsa.h>
+#include "src/core/json/json.h"
+
/* --- Constants. --- */
#define GRPC_JWT_OAUTH2_AUDIENCE "https://www.googleapis.com/oauth2/v3/token"
+#define GRPC_AUTH_JSON_TYPE_INVALID "invalid"
+#define GRPC_AUTH_JSON_TYPE_SERVICE_ACCOUNT "service_account"
+#define GRPC_AUTH_JSON_TYPE_AUTHORIZED_USER "authorized_user"
+
/* --- auth_json_key parsing. --- */
typedef struct {
@@ -59,6 +65,10 @@ int grpc_auth_json_key_is_valid(const grpc_auth_json_key *json_key);
grpc_auth_json_key grpc_auth_json_key_create_from_string(
const char *json_string);
+/* Creates a json_key object from parsed json. Returns an invalid object if a
+ parsing error has been encountered. */
+grpc_auth_json_key grpc_auth_json_key_create_from_json(const grpc_json *json);
+
/* Destructs the object. */
void grpc_auth_json_key_destruct(grpc_auth_json_key *json_key);
@@ -97,7 +107,12 @@ int grpc_auth_refresh_token_is_valid(
grpc_auth_refresh_token grpc_auth_refresh_token_create_from_string(
const char *json_string);
+/* Creates a refresh token object from parsed json. Returns an invalid object if
+ a parsing error has been encountered. */
+grpc_auth_refresh_token grpc_auth_refresh_token_create_from_json(
+ const grpc_json *json);
+
/* Destructs the object. */
void grpc_auth_refresh_token_destruct(grpc_auth_refresh_token *refresh_token);
-#endif /* GRPC_INTERNAL_CORE_SECURITY_JSON_TOKEN_H */
+#endif /* GRPC_CORE_SECURITY_JSON_TOKEN_H */
diff --git a/src/core/security/jwt_verifier.c b/src/core/security/jwt_verifier.c
new file mode 100644
index 0000000000..928c6c148d
--- /dev/null
+++ b/src/core/security/jwt_verifier.c
@@ -0,0 +1,843 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/security/jwt_verifier.h"
+
+#include <limits.h>
+#include <string.h>
+
+#include "src/core/httpcli/httpcli.h"
+#include "src/core/security/b64.h"
+#include "src/core/tsi/ssl_types.h"
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/sync.h>
+#include <openssl/pem.h>
+
+/* --- Utils. --- */
+
+const char *grpc_jwt_verifier_status_to_string(
+ grpc_jwt_verifier_status status) {
+ switch (status) {
+ case GRPC_JWT_VERIFIER_OK:
+ return "OK";
+ case GRPC_JWT_VERIFIER_BAD_SIGNATURE:
+ return "BAD_SIGNATURE";
+ case GRPC_JWT_VERIFIER_BAD_FORMAT:
+ return "BAD_FORMAT";
+ case GRPC_JWT_VERIFIER_BAD_AUDIENCE:
+ return "BAD_AUDIENCE";
+ case GRPC_JWT_VERIFIER_KEY_RETRIEVAL_ERROR:
+ return "KEY_RETRIEVAL_ERROR";
+ case GRPC_JWT_VERIFIER_TIME_CONSTRAINT_FAILURE:
+ return "TIME_CONSTRAINT_FAILURE";
+ case GRPC_JWT_VERIFIER_GENERIC_ERROR:
+ return "GENERIC_ERROR";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static const EVP_MD *evp_md_from_alg(const char *alg) {
+ if (strcmp(alg, "RS256") == 0) {
+ return EVP_sha256();
+ } else if (strcmp(alg, "RS384") == 0) {
+ return EVP_sha384();
+ } else if (strcmp(alg, "RS512") == 0) {
+ return EVP_sha512();
+ } else {
+ return NULL;
+ }
+}
+
+static grpc_json *parse_json_part_from_jwt(const char *str, size_t len,
+ gpr_slice *buffer) {
+ grpc_json *json;
+
+ *buffer = grpc_base64_decode_with_len(str, len, 1);
+ if (GPR_SLICE_IS_EMPTY(*buffer)) {
+ gpr_log(GPR_ERROR, "Invalid base64.");
+ return NULL;
+ }
+ json = grpc_json_parse_string_with_len((char *)GPR_SLICE_START_PTR(*buffer),
+ GPR_SLICE_LENGTH(*buffer));
+ if (json == NULL) {
+ gpr_slice_unref(*buffer);
+ gpr_log(GPR_ERROR, "JSON parsing error.");
+ }
+ return json;
+}
+
+static const char *validate_string_field(const grpc_json *json,
+ const char *key) {
+ if (json->type != GRPC_JSON_STRING) {
+ gpr_log(GPR_ERROR, "Invalid %s field [%s]", key, json->value);
+ return NULL;
+ }
+ return json->value;
+}
+
+static gpr_timespec validate_time_field(const grpc_json *json,
+ const char *key) {
+ gpr_timespec result = gpr_time_0(GPR_CLOCK_REALTIME);
+ if (json->type != GRPC_JSON_NUMBER) {
+ gpr_log(GPR_ERROR, "Invalid %s field [%s]", key, json->value);
+ return result;
+ }
+ result.tv_sec = strtol(json->value, NULL, 10);
+ return result;
+}
+
+/* --- JOSE header. see http://tools.ietf.org/html/rfc7515#section-4 --- */
+
+typedef struct {
+ const char *alg;
+ const char *kid;
+ const char *typ;
+ /* TODO(jboeuf): Add others as needed (jku, jwk, x5u, x5c and so on...). */
+ gpr_slice buffer;
+} jose_header;
+
+static void jose_header_destroy(jose_header *h) {
+ gpr_slice_unref(h->buffer);
+ gpr_free(h);
+}
+
+/* Takes ownership of json and buffer. */
+static jose_header *jose_header_from_json(grpc_json *json, gpr_slice buffer) {
+ grpc_json *cur;
+ jose_header *h = gpr_malloc(sizeof(jose_header));
+ memset(h, 0, sizeof(jose_header));
+ h->buffer = buffer;
+ for (cur = json->child; cur != NULL; cur = cur->next) {
+ if (strcmp(cur->key, "alg") == 0) {
+ /* We only support RSA-1.5 signatures for now.
+ Beware of this if we add HMAC support:
+ https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/
+ */
+ if (cur->type != GRPC_JSON_STRING || strncmp(cur->value, "RS", 2) ||
+ evp_md_from_alg(cur->value) == NULL) {
+ gpr_log(GPR_ERROR, "Invalid alg field [%s]", cur->value);
+ goto error;
+ }
+ h->alg = cur->value;
+ } else if (strcmp(cur->key, "typ") == 0) {
+ h->typ = validate_string_field(cur, "typ");
+ if (h->typ == NULL) goto error;
+ } else if (strcmp(cur->key, "kid") == 0) {
+ h->kid = validate_string_field(cur, "kid");
+ if (h->kid == NULL) goto error;
+ }
+ }
+ if (h->alg == NULL) {
+ gpr_log(GPR_ERROR, "Missing alg field.");
+ goto error;
+ }
+ grpc_json_destroy(json);
+ h->buffer = buffer;
+ return h;
+
+error:
+ grpc_json_destroy(json);
+ jose_header_destroy(h);
+ return NULL;
+}
+
+/* --- JWT claims. see http://tools.ietf.org/html/rfc7519#section-4.1 */
+
+struct grpc_jwt_claims {
+ /* Well known properties already parsed. */
+ const char *sub;
+ const char *iss;
+ const char *aud;
+ const char *jti;
+ gpr_timespec iat;
+ gpr_timespec exp;
+ gpr_timespec nbf;
+
+ grpc_json *json;
+ gpr_slice buffer;
+};
+
+void grpc_jwt_claims_destroy(grpc_jwt_claims *claims) {
+ grpc_json_destroy(claims->json);
+ gpr_slice_unref(claims->buffer);
+ gpr_free(claims);
+}
+
+const grpc_json *grpc_jwt_claims_json(const grpc_jwt_claims *claims) {
+ if (claims == NULL) return NULL;
+ return claims->json;
+}
+
+const char *grpc_jwt_claims_subject(const grpc_jwt_claims *claims) {
+ if (claims == NULL) return NULL;
+ return claims->sub;
+}
+
+const char *grpc_jwt_claims_issuer(const grpc_jwt_claims *claims) {
+ if (claims == NULL) return NULL;
+ return claims->iss;
+}
+
+const char *grpc_jwt_claims_id(const grpc_jwt_claims *claims) {
+ if (claims == NULL) return NULL;
+ return claims->jti;
+}
+
+const char *grpc_jwt_claims_audience(const grpc_jwt_claims *claims) {
+ if (claims == NULL) return NULL;
+ return claims->aud;
+}
+
+gpr_timespec grpc_jwt_claims_issued_at(const grpc_jwt_claims *claims) {
+ if (claims == NULL) return gpr_inf_past(GPR_CLOCK_REALTIME);
+ return claims->iat;
+}
+
+gpr_timespec grpc_jwt_claims_expires_at(const grpc_jwt_claims *claims) {
+ if (claims == NULL) return gpr_inf_future(GPR_CLOCK_REALTIME);
+ return claims->exp;
+}
+
+gpr_timespec grpc_jwt_claims_not_before(const grpc_jwt_claims *claims) {
+ if (claims == NULL) return gpr_inf_past(GPR_CLOCK_REALTIME);
+ return claims->nbf;
+}
+
+/* Takes ownership of json and buffer even in case of failure. */
+grpc_jwt_claims *grpc_jwt_claims_from_json(grpc_json *json, gpr_slice buffer) {
+ grpc_json *cur;
+ grpc_jwt_claims *claims = gpr_malloc(sizeof(grpc_jwt_claims));
+ memset(claims, 0, sizeof(grpc_jwt_claims));
+ claims->json = json;
+ claims->buffer = buffer;
+ claims->iat = gpr_inf_past(GPR_CLOCK_REALTIME);
+ claims->nbf = gpr_inf_past(GPR_CLOCK_REALTIME);
+ claims->exp = gpr_inf_future(GPR_CLOCK_REALTIME);
+
+ /* Per the spec, all fields are optional. */
+ for (cur = json->child; cur != NULL; cur = cur->next) {
+ if (strcmp(cur->key, "sub") == 0) {
+ claims->sub = validate_string_field(cur, "sub");
+ if (claims->sub == NULL) goto error;
+ } else if (strcmp(cur->key, "iss") == 0) {
+ claims->iss = validate_string_field(cur, "iss");
+ if (claims->iss == NULL) goto error;
+ } else if (strcmp(cur->key, "aud") == 0) {
+ claims->aud = validate_string_field(cur, "aud");
+ if (claims->aud == NULL) goto error;
+ } else if (strcmp(cur->key, "jti") == 0) {
+ claims->jti = validate_string_field(cur, "jti");
+ if (claims->jti == NULL) goto error;
+ } else if (strcmp(cur->key, "iat") == 0) {
+ claims->iat = validate_time_field(cur, "iat");
+ if (gpr_time_cmp(claims->iat, gpr_time_0(GPR_CLOCK_REALTIME)) == 0)
+ goto error;
+ } else if (strcmp(cur->key, "exp") == 0) {
+ claims->exp = validate_time_field(cur, "exp");
+ if (gpr_time_cmp(claims->exp, gpr_time_0(GPR_CLOCK_REALTIME)) == 0)
+ goto error;
+ } else if (strcmp(cur->key, "nbf") == 0) {
+ claims->nbf = validate_time_field(cur, "nbf");
+ if (gpr_time_cmp(claims->nbf, gpr_time_0(GPR_CLOCK_REALTIME)) == 0)
+ goto error;
+ }
+ }
+ return claims;
+
+error:
+ grpc_jwt_claims_destroy(claims);
+ return NULL;
+}
+
+grpc_jwt_verifier_status grpc_jwt_claims_check(const grpc_jwt_claims *claims,
+ const char *audience) {
+ gpr_timespec skewed_now;
+ int audience_ok;
+
+ GPR_ASSERT(claims != NULL);
+
+ skewed_now =
+ gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_clock_skew);
+ if (gpr_time_cmp(skewed_now, claims->nbf) < 0) {
+ gpr_log(GPR_ERROR, "JWT is not valid yet.");
+ return GRPC_JWT_VERIFIER_TIME_CONSTRAINT_FAILURE;
+ }
+ skewed_now =
+ gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_clock_skew);
+ if (gpr_time_cmp(skewed_now, claims->exp) > 0) {
+ gpr_log(GPR_ERROR, "JWT is expired.");
+ return GRPC_JWT_VERIFIER_TIME_CONSTRAINT_FAILURE;
+ }
+
+ if (audience == NULL) {
+ audience_ok = claims->aud == NULL;
+ } else {
+ audience_ok = claims->aud != NULL && strcmp(audience, claims->aud) == 0;
+ }
+ if (!audience_ok) {
+ gpr_log(GPR_ERROR, "Audience mismatch: expected %s and found %s.",
+ audience == NULL ? "NULL" : audience,
+ claims->aud == NULL ? "NULL" : claims->aud);
+ return GRPC_JWT_VERIFIER_BAD_AUDIENCE;
+ }
+ return GRPC_JWT_VERIFIER_OK;
+}
+
+/* --- verifier_cb_ctx object. --- */
+
+typedef struct {
+ grpc_jwt_verifier *verifier;
+ grpc_pollset *pollset;
+ jose_header *header;
+ grpc_jwt_claims *claims;
+ char *audience;
+ gpr_slice signature;
+ gpr_slice signed_data;
+ void *user_data;
+ grpc_jwt_verification_done_cb user_cb;
+} verifier_cb_ctx;
+
+/* Takes ownership of the header, claims and signature. */
+static verifier_cb_ctx *verifier_cb_ctx_create(
+ grpc_jwt_verifier *verifier, grpc_pollset *pollset, jose_header *header,
+ grpc_jwt_claims *claims, const char *audience, gpr_slice signature,
+ const char *signed_jwt, size_t signed_jwt_len, void *user_data,
+ grpc_jwt_verification_done_cb cb) {
+ verifier_cb_ctx *ctx = gpr_malloc(sizeof(verifier_cb_ctx));
+ memset(ctx, 0, sizeof(verifier_cb_ctx));
+ ctx->verifier = verifier;
+ ctx->pollset = pollset;
+ ctx->header = header;
+ ctx->audience = gpr_strdup(audience);
+ ctx->claims = claims;
+ ctx->signature = signature;
+ ctx->signed_data = gpr_slice_from_copied_buffer(signed_jwt, signed_jwt_len);
+ ctx->user_data = user_data;
+ ctx->user_cb = cb;
+ return ctx;
+}
+
+void verifier_cb_ctx_destroy(verifier_cb_ctx *ctx) {
+ if (ctx->audience != NULL) gpr_free(ctx->audience);
+ if (ctx->claims != NULL) grpc_jwt_claims_destroy(ctx->claims);
+ gpr_slice_unref(ctx->signature);
+ gpr_slice_unref(ctx->signed_data);
+ jose_header_destroy(ctx->header);
+ /* TODO: see what to do with claims... */
+ gpr_free(ctx);
+}
+
+/* --- grpc_jwt_verifier object. --- */
+
+/* Clock skew defaults to one minute. */
+gpr_timespec grpc_jwt_verifier_clock_skew = {60, 0, GPR_TIMESPAN};
+
+/* Max delay defaults to one minute. */
+gpr_timespec grpc_jwt_verifier_max_delay = {60, 0, GPR_TIMESPAN};
+
+typedef struct {
+ char *email_domain;
+ char *key_url_prefix;
+} email_key_mapping;
+
+struct grpc_jwt_verifier {
+ email_key_mapping *mappings;
+ size_t num_mappings; /* Should be very few, linear search ok. */
+ size_t allocated_mappings;
+ grpc_httpcli_context http_ctx;
+};
+
+static grpc_json *json_from_http(const grpc_httpcli_response *response) {
+ grpc_json *json = NULL;
+
+ if (response == NULL) {
+ gpr_log(GPR_ERROR, "HTTP response is NULL.");
+ return NULL;
+ }
+ if (response->status != 200) {
+ gpr_log(GPR_ERROR, "Call to http server failed with error %d.",
+ response->status);
+ return NULL;
+ }
+
+ json = grpc_json_parse_string_with_len(response->body, response->body_length);
+ if (json == NULL) {
+ gpr_log(GPR_ERROR, "Invalid JSON found in response.");
+ }
+ return json;
+}
+
+static const grpc_json *find_property_by_name(const grpc_json *json,
+ const char *name) {
+ const grpc_json *cur;
+ for (cur = json->child; cur != NULL; cur = cur->next) {
+ if (strcmp(cur->key, name) == 0) return cur;
+ }
+ return NULL;
+}
+
+static EVP_PKEY *extract_pkey_from_x509(const char *x509_str) {
+ X509 *x509 = NULL;
+ EVP_PKEY *result = NULL;
+ BIO *bio = BIO_new(BIO_s_mem());
+ size_t len = strlen(x509_str);
+ GPR_ASSERT(len < INT_MAX);
+ BIO_write(bio, x509_str, (int)len);
+ x509 = PEM_read_bio_X509(bio, NULL, NULL, NULL);
+ if (x509 == NULL) {
+ gpr_log(GPR_ERROR, "Unable to parse x509 cert.");
+ goto end;
+ }
+ result = X509_get_pubkey(x509);
+ if (result == NULL) {
+ gpr_log(GPR_ERROR, "Cannot find public key in X509 cert.");
+ }
+
+end:
+ BIO_free(bio);
+ if (x509 != NULL) X509_free(x509);
+ return result;
+}
+
+static BIGNUM *bignum_from_base64(const char *b64) {
+ BIGNUM *result = NULL;
+ gpr_slice bin;
+
+ if (b64 == NULL) return NULL;
+ bin = grpc_base64_decode(b64, 1);
+ if (GPR_SLICE_IS_EMPTY(bin)) {
+ gpr_log(GPR_ERROR, "Invalid base64 for big num.");
+ return NULL;
+ }
+ result = BN_bin2bn(GPR_SLICE_START_PTR(bin),
+ TSI_SIZE_AS_SIZE(GPR_SLICE_LENGTH(bin)), NULL);
+ gpr_slice_unref(bin);
+ return result;
+}
+
+static EVP_PKEY *pkey_from_jwk(const grpc_json *json, const char *kty) {
+ const grpc_json *key_prop;
+ RSA *rsa = NULL;
+ EVP_PKEY *result = NULL;
+
+ GPR_ASSERT(kty != NULL && json != NULL);
+ if (strcmp(kty, "RSA") != 0) {
+ gpr_log(GPR_ERROR, "Unsupported key type %s.", kty);
+ goto end;
+ }
+ rsa = RSA_new();
+ if (rsa == NULL) {
+ gpr_log(GPR_ERROR, "Could not create rsa key.");
+ goto end;
+ }
+ for (key_prop = json->child; key_prop != NULL; key_prop = key_prop->next) {
+ if (strcmp(key_prop->key, "n") == 0) {
+ rsa->n = bignum_from_base64(validate_string_field(key_prop, "n"));
+ if (rsa->n == NULL) goto end;
+ } else if (strcmp(key_prop->key, "e") == 0) {
+ rsa->e = bignum_from_base64(validate_string_field(key_prop, "e"));
+ if (rsa->e == NULL) goto end;
+ }
+ }
+ if (rsa->e == NULL || rsa->n == NULL) {
+ gpr_log(GPR_ERROR, "Missing RSA public key field.");
+ goto end;
+ }
+ result = EVP_PKEY_new();
+ EVP_PKEY_set1_RSA(result, rsa); /* uprefs rsa. */
+
+end:
+ if (rsa != NULL) RSA_free(rsa);
+ return result;
+}
+
+static EVP_PKEY *find_verification_key(const grpc_json *json,
+ const char *header_alg,
+ const char *header_kid) {
+ const grpc_json *jkey;
+ const grpc_json *jwk_keys;
+ /* Try to parse the json as a JWK set:
+ https://tools.ietf.org/html/rfc7517#section-5. */
+ jwk_keys = find_property_by_name(json, "keys");
+ if (jwk_keys == NULL) {
+ /* Use the google proprietary format which is:
+ { <kid1>: <x5091>, <kid2>: <x5092>, ... } */
+ const grpc_json *cur = find_property_by_name(json, header_kid);
+ if (cur == NULL) return NULL;
+ return extract_pkey_from_x509(cur->value);
+ }
+
+ if (jwk_keys->type != GRPC_JSON_ARRAY) {
+ gpr_log(GPR_ERROR,
+ "Unexpected value type of keys property in jwks key set.");
+ return NULL;
+ }
+ /* Key format is specified in:
+ https://tools.ietf.org/html/rfc7518#section-6. */
+ for (jkey = jwk_keys->child; jkey != NULL; jkey = jkey->next) {
+ grpc_json *key_prop;
+ const char *alg = NULL;
+ const char *kid = NULL;
+ const char *kty = NULL;
+
+ if (jkey->type != GRPC_JSON_OBJECT) continue;
+ for (key_prop = jkey->child; key_prop != NULL; key_prop = key_prop->next) {
+ if (strcmp(key_prop->key, "alg") == 0 &&
+ key_prop->type == GRPC_JSON_STRING) {
+ alg = key_prop->value;
+ } else if (strcmp(key_prop->key, "kid") == 0 &&
+ key_prop->type == GRPC_JSON_STRING) {
+ kid = key_prop->value;
+ } else if (strcmp(key_prop->key, "kty") == 0 &&
+ key_prop->type == GRPC_JSON_STRING) {
+ kty = key_prop->value;
+ }
+ }
+ if (alg != NULL && kid != NULL && kty != NULL &&
+ strcmp(kid, header_kid) == 0 && strcmp(alg, header_alg) == 0) {
+ return pkey_from_jwk(jkey, kty);
+ }
+ }
+ gpr_log(GPR_ERROR,
+ "Could not find matching key in key set for kid=%s and alg=%s",
+ header_kid, header_alg);
+ return NULL;
+}
+
+static int verify_jwt_signature(EVP_PKEY *key, const char *alg,
+ gpr_slice signature, gpr_slice signed_data) {
+ EVP_MD_CTX *md_ctx = EVP_MD_CTX_create();
+ const EVP_MD *md = evp_md_from_alg(alg);
+ int result = 0;
+
+ GPR_ASSERT(md != NULL); /* Checked before. */
+ if (md_ctx == NULL) {
+ gpr_log(GPR_ERROR, "Could not create EVP_MD_CTX.");
+ goto end;
+ }
+ if (EVP_DigestVerifyInit(md_ctx, NULL, md, NULL, key) != 1) {
+ gpr_log(GPR_ERROR, "EVP_DigestVerifyInit failed.");
+ goto end;
+ }
+ if (EVP_DigestVerifyUpdate(md_ctx, GPR_SLICE_START_PTR(signed_data),
+ GPR_SLICE_LENGTH(signed_data)) != 1) {
+ gpr_log(GPR_ERROR, "EVP_DigestVerifyUpdate failed.");
+ goto end;
+ }
+ if (EVP_DigestVerifyFinal(md_ctx, GPR_SLICE_START_PTR(signature),
+ GPR_SLICE_LENGTH(signature)) != 1) {
+ gpr_log(GPR_ERROR, "JWT signature verification failed.");
+ goto end;
+ }
+ result = 1;
+
+end:
+ if (md_ctx != NULL) EVP_MD_CTX_destroy(md_ctx);
+ return result;
+}
+
+static void on_keys_retrieved(grpc_exec_ctx *exec_ctx, void *user_data,
+ const grpc_httpcli_response *response) {
+ grpc_json *json = json_from_http(response);
+ verifier_cb_ctx *ctx = (verifier_cb_ctx *)user_data;
+ EVP_PKEY *verification_key = NULL;
+ grpc_jwt_verifier_status status = GRPC_JWT_VERIFIER_GENERIC_ERROR;
+ grpc_jwt_claims *claims = NULL;
+
+ if (json == NULL) {
+ status = GRPC_JWT_VERIFIER_KEY_RETRIEVAL_ERROR;
+ goto end;
+ }
+ verification_key =
+ find_verification_key(json, ctx->header->alg, ctx->header->kid);
+ if (verification_key == NULL) {
+ gpr_log(GPR_ERROR, "Could not find verification key with kid %s.",
+ ctx->header->kid);
+ status = GRPC_JWT_VERIFIER_KEY_RETRIEVAL_ERROR;
+ goto end;
+ }
+
+ if (!verify_jwt_signature(verification_key, ctx->header->alg, ctx->signature,
+ ctx->signed_data)) {
+ status = GRPC_JWT_VERIFIER_BAD_SIGNATURE;
+ goto end;
+ }
+
+ status = grpc_jwt_claims_check(ctx->claims, ctx->audience);
+ if (status == GRPC_JWT_VERIFIER_OK) {
+ /* Pass ownership. */
+ claims = ctx->claims;
+ ctx->claims = NULL;
+ }
+
+end:
+ if (json != NULL) grpc_json_destroy(json);
+ if (verification_key != NULL) EVP_PKEY_free(verification_key);
+ ctx->user_cb(ctx->user_data, status, claims);
+ verifier_cb_ctx_destroy(ctx);
+}
+
+static void on_openid_config_retrieved(grpc_exec_ctx *exec_ctx, void *user_data,
+ const grpc_httpcli_response *response) {
+ const grpc_json *cur;
+ grpc_json *json = json_from_http(response);
+ verifier_cb_ctx *ctx = (verifier_cb_ctx *)user_data;
+ grpc_httpcli_request req;
+ const char *jwks_uri;
+
+ /* TODO(jboeuf): Cache the jwks_uri in order to avoid this hop next time. */
+ if (json == NULL) goto error;
+ cur = find_property_by_name(json, "jwks_uri");
+ if (cur == NULL) {
+ gpr_log(GPR_ERROR, "Could not find jwks_uri in openid config.");
+ goto error;
+ }
+ jwks_uri = validate_string_field(cur, "jwks_uri");
+ if (jwks_uri == NULL) goto error;
+ if (strstr(jwks_uri, "https://") != jwks_uri) {
+ gpr_log(GPR_ERROR, "Invalid non https jwks_uri: %s.", jwks_uri);
+ goto error;
+ }
+ jwks_uri += 8;
+ req.handshaker = &grpc_httpcli_ssl;
+ req.host = gpr_strdup(jwks_uri);
+ req.path = strchr(jwks_uri, '/');
+ if (req.path == NULL) {
+ req.path = "";
+ } else {
+ *(req.host + (req.path - jwks_uri)) = '\0';
+ }
+ grpc_httpcli_get(
+ exec_ctx, &ctx->verifier->http_ctx, ctx->pollset, &req,
+ gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay),
+ on_keys_retrieved, ctx);
+ grpc_json_destroy(json);
+ gpr_free(req.host);
+ return;
+
+error:
+ if (json != NULL) grpc_json_destroy(json);
+ ctx->user_cb(ctx->user_data, GRPC_JWT_VERIFIER_KEY_RETRIEVAL_ERROR, NULL);
+ verifier_cb_ctx_destroy(ctx);
+}
+
+static email_key_mapping *verifier_get_mapping(grpc_jwt_verifier *v,
+ const char *email_domain) {
+ size_t i;
+ if (v->mappings == NULL) return NULL;
+ for (i = 0; i < v->num_mappings; i++) {
+ if (strcmp(email_domain, v->mappings[i].email_domain) == 0) {
+ return &v->mappings[i];
+ }
+ }
+ return NULL;
+}
+
+static void verifier_put_mapping(grpc_jwt_verifier *v, const char *email_domain,
+ const char *key_url_prefix) {
+ email_key_mapping *mapping = verifier_get_mapping(v, email_domain);
+ GPR_ASSERT(v->num_mappings < v->allocated_mappings);
+ if (mapping != NULL) {
+ gpr_free(mapping->key_url_prefix);
+ mapping->key_url_prefix = gpr_strdup(key_url_prefix);
+ return;
+ }
+ v->mappings[v->num_mappings].email_domain = gpr_strdup(email_domain);
+ v->mappings[v->num_mappings].key_url_prefix = gpr_strdup(key_url_prefix);
+ v->num_mappings++;
+ GPR_ASSERT(v->num_mappings <= v->allocated_mappings);
+}
+
+/* Takes ownership of ctx. */
+static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx,
+ verifier_cb_ctx *ctx) {
+ const char *at_sign;
+ grpc_httpcli_response_cb http_cb;
+ char *path_prefix = NULL;
+ const char *iss;
+ grpc_httpcli_request req;
+ memset(&req, 0, sizeof(grpc_httpcli_request));
+ req.handshaker = &grpc_httpcli_ssl;
+
+ GPR_ASSERT(ctx != NULL && ctx->header != NULL && ctx->claims != NULL);
+ iss = ctx->claims->iss;
+ if (ctx->header->kid == NULL) {
+ gpr_log(GPR_ERROR, "Missing kid in jose header.");
+ goto error;
+ }
+ if (iss == NULL) {
+ gpr_log(GPR_ERROR, "Missing iss in claims.");
+ goto error;
+ }
+
+ /* This code relies on:
+ https://openid.net/specs/openid-connect-discovery-1_0.html
+ Nobody seems to implement the account/email/webfinger part 2. of the spec
+ so we will rely instead on email/url mappings if we detect such an issuer.
+ Part 4, on the other hand is implemented by both google and salesforce. */
+
+ /* Very non-sophisticated way to detect an email address. Should be good
+ enough for now... */
+ at_sign = strchr(iss, '@');
+ if (at_sign != NULL) {
+ email_key_mapping *mapping;
+ const char *email_domain = at_sign + 1;
+ GPR_ASSERT(ctx->verifier != NULL);
+ mapping = verifier_get_mapping(ctx->verifier, email_domain);
+ if (mapping == NULL) {
+ gpr_log(GPR_ERROR, "Missing mapping for issuer email.");
+ goto error;
+ }
+ req.host = gpr_strdup(mapping->key_url_prefix);
+ path_prefix = strchr(req.host, '/');
+ if (path_prefix == NULL) {
+ gpr_asprintf(&req.path, "/%s", iss);
+ } else {
+ *(path_prefix++) = '\0';
+ gpr_asprintf(&req.path, "/%s/%s", path_prefix, iss);
+ }
+ http_cb = on_keys_retrieved;
+ } else {
+ req.host = gpr_strdup(strstr(iss, "https://") == iss ? iss + 8 : iss);
+ path_prefix = strchr(req.host, '/');
+ if (path_prefix == NULL) {
+ req.path = gpr_strdup(GRPC_OPENID_CONFIG_URL_SUFFIX);
+ } else {
+ *(path_prefix++) = 0;
+ gpr_asprintf(&req.path, "/%s%s", path_prefix,
+ GRPC_OPENID_CONFIG_URL_SUFFIX);
+ }
+ http_cb = on_openid_config_retrieved;
+ }
+
+ grpc_httpcli_get(
+ exec_ctx, &ctx->verifier->http_ctx, ctx->pollset, &req,
+ gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay),
+ http_cb, ctx);
+ gpr_free(req.host);
+ gpr_free(req.path);
+ return;
+
+error:
+ ctx->user_cb(ctx->user_data, GRPC_JWT_VERIFIER_KEY_RETRIEVAL_ERROR, NULL);
+ verifier_cb_ctx_destroy(ctx);
+}
+
+void grpc_jwt_verifier_verify(grpc_exec_ctx *exec_ctx,
+ grpc_jwt_verifier *verifier,
+ grpc_pollset *pollset, const char *jwt,
+ const char *audience,
+ grpc_jwt_verification_done_cb cb,
+ void *user_data) {
+ const char *dot = NULL;
+ grpc_json *json;
+ jose_header *header = NULL;
+ grpc_jwt_claims *claims = NULL;
+ gpr_slice header_buffer;
+ gpr_slice claims_buffer;
+ gpr_slice signature;
+ size_t signed_jwt_len;
+ const char *cur = jwt;
+
+ GPR_ASSERT(verifier != NULL && jwt != NULL && audience != NULL && cb != NULL);
+ dot = strchr(cur, '.');
+ if (dot == NULL) goto error;
+ json = parse_json_part_from_jwt(cur, (size_t)(dot - cur), &header_buffer);
+ if (json == NULL) goto error;
+ header = jose_header_from_json(json, header_buffer);
+ if (header == NULL) goto error;
+
+ cur = dot + 1;
+ dot = strchr(cur, '.');
+ if (dot == NULL) goto error;
+ json = parse_json_part_from_jwt(cur, (size_t)(dot - cur), &claims_buffer);
+ if (json == NULL) goto error;
+ claims = grpc_jwt_claims_from_json(json, claims_buffer);
+ if (claims == NULL) goto error;
+
+ signed_jwt_len = (size_t)(dot - jwt);
+ cur = dot + 1;
+ signature = grpc_base64_decode(cur, 1);
+ if (GPR_SLICE_IS_EMPTY(signature)) goto error;
+ retrieve_key_and_verify(
+ exec_ctx,
+ verifier_cb_ctx_create(verifier, pollset, header, claims, audience,
+ signature, jwt, signed_jwt_len, user_data, cb));
+ return;
+
+error:
+ if (header != NULL) jose_header_destroy(header);
+ if (claims != NULL) grpc_jwt_claims_destroy(claims);
+ cb(user_data, GRPC_JWT_VERIFIER_BAD_FORMAT, NULL);
+}
+
+grpc_jwt_verifier *grpc_jwt_verifier_create(
+ const grpc_jwt_verifier_email_domain_key_url_mapping *mappings,
+ size_t num_mappings) {
+ grpc_jwt_verifier *v = gpr_malloc(sizeof(grpc_jwt_verifier));
+ memset(v, 0, sizeof(grpc_jwt_verifier));
+ grpc_httpcli_context_init(&v->http_ctx);
+
+ /* We know at least of one mapping. */
+ v->allocated_mappings = 1 + num_mappings;
+ v->mappings = gpr_malloc(v->allocated_mappings * sizeof(email_key_mapping));
+ verifier_put_mapping(v, GRPC_GOOGLE_SERVICE_ACCOUNTS_EMAIL_DOMAIN,
+ GRPC_GOOGLE_SERVICE_ACCOUNTS_KEY_URL_PREFIX);
+ /* User-Provided mappings. */
+ if (mappings != NULL) {
+ size_t i;
+ for (i = 0; i < num_mappings; i++) {
+ verifier_put_mapping(v, mappings[i].email_domain,
+ mappings[i].key_url_prefix);
+ }
+ }
+ return v;
+}
+
+void grpc_jwt_verifier_destroy(grpc_jwt_verifier *v) {
+ size_t i;
+ if (v == NULL) return;
+ grpc_httpcli_context_destroy(&v->http_ctx);
+ if (v->mappings != NULL) {
+ for (i = 0; i < v->num_mappings; i++) {
+ gpr_free(v->mappings[i].email_domain);
+ gpr_free(v->mappings[i].key_url_prefix);
+ }
+ gpr_free(v->mappings);
+ }
+ gpr_free(v);
+}
diff --git a/src/core/security/jwt_verifier.h b/src/core/security/jwt_verifier.h
new file mode 100644
index 0000000000..d898d2193f
--- /dev/null
+++ b/src/core/security/jwt_verifier.h
@@ -0,0 +1,136 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_SECURITY_JWT_VERIFIER_H
+#define GRPC_CORE_SECURITY_JWT_VERIFIER_H
+
+#include "src/core/iomgr/pollset.h"
+#include "src/core/json/json.h"
+
+#include <grpc/support/slice.h>
+#include <grpc/support/time.h>
+
+/* --- Constants. --- */
+
+#define GRPC_OPENID_CONFIG_URL_SUFFIX "/.well-known/openid-configuration"
+#define GRPC_GOOGLE_SERVICE_ACCOUNTS_EMAIL_DOMAIN \
+ "developer.gserviceaccount.com"
+#define GRPC_GOOGLE_SERVICE_ACCOUNTS_KEY_URL_PREFIX \
+ "www.googleapis.com/robot/v1/metadata/x509"
+
+/* --- grpc_jwt_verifier_status. --- */
+
+typedef enum {
+ GRPC_JWT_VERIFIER_OK = 0,
+ GRPC_JWT_VERIFIER_BAD_SIGNATURE,
+ GRPC_JWT_VERIFIER_BAD_FORMAT,
+ GRPC_JWT_VERIFIER_BAD_AUDIENCE,
+ GRPC_JWT_VERIFIER_KEY_RETRIEVAL_ERROR,
+ GRPC_JWT_VERIFIER_TIME_CONSTRAINT_FAILURE,
+ GRPC_JWT_VERIFIER_GENERIC_ERROR
+} grpc_jwt_verifier_status;
+
+const char *grpc_jwt_verifier_status_to_string(grpc_jwt_verifier_status status);
+
+/* --- grpc_jwt_claims. --- */
+
+typedef struct grpc_jwt_claims grpc_jwt_claims;
+
+void grpc_jwt_claims_destroy(grpc_jwt_claims *claims);
+
+/* Returns the whole JSON tree of the claims. */
+const grpc_json *grpc_jwt_claims_json(const grpc_jwt_claims *claims);
+
+/* Access to registered claims in https://tools.ietf.org/html/rfc7519#page-9 */
+const char *grpc_jwt_claims_subject(const grpc_jwt_claims *claims);
+const char *grpc_jwt_claims_issuer(const grpc_jwt_claims *claims);
+const char *grpc_jwt_claims_id(const grpc_jwt_claims *claims);
+const char *grpc_jwt_claims_audience(const grpc_jwt_claims *claims);
+gpr_timespec grpc_jwt_claims_issued_at(const grpc_jwt_claims *claims);
+gpr_timespec grpc_jwt_claims_expires_at(const grpc_jwt_claims *claims);
+gpr_timespec grpc_jwt_claims_not_before(const grpc_jwt_claims *claims);
+
+/* --- grpc_jwt_verifier. --- */
+
+typedef struct grpc_jwt_verifier grpc_jwt_verifier;
+
+typedef struct {
+ /* The email domain is the part after the @ sign. */
+ const char *email_domain;
+
+ /* The key url prefix will be used to get the public key from the issuer:
+ https://<key_url_prefix>/<issuer_email>
+ Therefore the key_url_prefix must NOT contain https://. */
+ const char *key_url_prefix;
+} grpc_jwt_verifier_email_domain_key_url_mapping;
+
+/* Globals to control the verifier. Not thread-safe. */
+extern gpr_timespec grpc_jwt_verifier_clock_skew;
+extern gpr_timespec grpc_jwt_verifier_max_delay;
+
+/* The verifier can be created with some custom mappings to help with key
+ discovery in the case where the issuer is an email address.
+ mappings can be NULL in which case num_mappings MUST be 0.
+ A verifier object has one built-in mapping (unless overridden):
+ GRPC_GOOGLE_SERVICE_ACCOUNTS_EMAIL_DOMAIN ->
+ GRPC_GOOGLE_SERVICE_ACCOUNTS_KEY_URL_PREFIX.*/
+grpc_jwt_verifier *grpc_jwt_verifier_create(
+ const grpc_jwt_verifier_email_domain_key_url_mapping *mappings,
+ size_t num_mappings);
+
+/*The verifier must not be destroyed if there are still outstanding callbacks.*/
+void grpc_jwt_verifier_destroy(grpc_jwt_verifier *verifier);
+
+/* User provided callback that will be called when the verification of the JWT
+ is done (maybe in another thread).
+ It is the responsibility of the callee to call grpc_jwt_claims_destroy on
+ the claims. */
+typedef void (*grpc_jwt_verification_done_cb)(void *user_data,
+ grpc_jwt_verifier_status status,
+ grpc_jwt_claims *claims);
+
+/* Verifies for the JWT for the given expected audience. */
+void grpc_jwt_verifier_verify(grpc_exec_ctx *exec_ctx,
+ grpc_jwt_verifier *verifier,
+ grpc_pollset *pollset, const char *jwt,
+ const char *audience,
+ grpc_jwt_verification_done_cb cb,
+ void *user_data);
+
+/* --- TESTING ONLY exposed functions. --- */
+
+grpc_jwt_claims *grpc_jwt_claims_from_json(grpc_json *json, gpr_slice buffer);
+grpc_jwt_verifier_status grpc_jwt_claims_check(const grpc_jwt_claims *claims,
+ const char *audience);
+
+#endif /* GRPC_CORE_SECURITY_JWT_VERIFIER_H */
diff --git a/src/core/security/secure_endpoint.c b/src/core/security/secure_endpoint.c
index 73496d1153..d11c43be20 100644
--- a/src/core/security/secure_endpoint.c
+++ b/src/core/security/secure_endpoint.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -49,15 +49,15 @@ typedef struct {
struct tsi_frame_protector *protector;
gpr_mu protector_mu;
/* saved upper level callbacks and user_data. */
- grpc_endpoint_read_cb read_cb;
- void *read_user_data;
- grpc_endpoint_write_cb write_cb;
- void *write_user_data;
+ grpc_closure *read_cb;
+ grpc_closure *write_cb;
+ grpc_closure on_read;
+ gpr_slice_buffer *read_buffer;
+ gpr_slice_buffer source_buffer;
/* saved handshaker leftover data to unprotect. */
gpr_slice_buffer leftover_bytes;
/* buffers for read and write */
gpr_slice read_staging_buffer;
- gpr_slice_buffer input_buffer;
gpr_slice write_staging_buffer;
gpr_slice_buffer output_buffer;
@@ -67,65 +67,98 @@ typedef struct {
int grpc_trace_secure_endpoint = 0;
-static void secure_endpoint_ref(secure_endpoint *ep) { gpr_ref(&ep->ref); }
-
-static void destroy(secure_endpoint *secure_ep) {
+static void destroy(grpc_exec_ctx *exec_ctx, secure_endpoint *secure_ep) {
secure_endpoint *ep = secure_ep;
- grpc_endpoint_destroy(ep->wrapped_ep);
+ grpc_endpoint_destroy(exec_ctx, ep->wrapped_ep);
tsi_frame_protector_destroy(ep->protector);
gpr_slice_buffer_destroy(&ep->leftover_bytes);
gpr_slice_unref(ep->read_staging_buffer);
- gpr_slice_buffer_destroy(&ep->input_buffer);
gpr_slice_unref(ep->write_staging_buffer);
gpr_slice_buffer_destroy(&ep->output_buffer);
+ gpr_slice_buffer_destroy(&ep->source_buffer);
gpr_mu_destroy(&ep->protector_mu);
gpr_free(ep);
}
-static void secure_endpoint_unref(secure_endpoint *ep) {
+/*#define GRPC_SECURE_ENDPOINT_REFCOUNT_DEBUG*/
+#ifdef GRPC_SECURE_ENDPOINT_REFCOUNT_DEBUG
+#define SECURE_ENDPOINT_UNREF(exec_ctx, ep, reason) \
+ secure_endpoint_unref((exec_ctx), (ep), (reason), __FILE__, __LINE__)
+#define SECURE_ENDPOINT_REF(ep, reason) \
+ secure_endpoint_ref((ep), (reason), __FILE__, __LINE__)
+static void secure_endpoint_unref(secure_endpoint *ep,
+ grpc_closure_list *closure_list,
+ const char *reason, const char *file,
+ int line) {
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "SECENDP unref %p : %s %d -> %d",
+ ep, reason, ep->ref.count, ep->ref.count - 1);
+ if (gpr_unref(&ep->ref)) {
+ destroy(exec_ctx, ep);
+ }
+}
+
+static void secure_endpoint_ref(secure_endpoint *ep, const char *reason,
+ const char *file, int line) {
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "SECENDP ref %p : %s %d -> %d",
+ ep, reason, ep->ref.count, ep->ref.count + 1);
+ gpr_ref(&ep->ref);
+}
+#else
+#define SECURE_ENDPOINT_UNREF(exec_ctx, ep, reason) \
+ secure_endpoint_unref((exec_ctx), (ep))
+#define SECURE_ENDPOINT_REF(ep, reason) secure_endpoint_ref((ep))
+static void secure_endpoint_unref(grpc_exec_ctx *exec_ctx,
+ secure_endpoint *ep) {
if (gpr_unref(&ep->ref)) {
- destroy(ep);
+ destroy(exec_ctx, ep);
}
}
-static void flush_read_staging_buffer(secure_endpoint *ep, gpr_uint8 **cur,
- gpr_uint8 **end) {
- gpr_slice_buffer_add(&ep->input_buffer, ep->read_staging_buffer);
+static void secure_endpoint_ref(secure_endpoint *ep) { gpr_ref(&ep->ref); }
+#endif
+
+static void flush_read_staging_buffer(secure_endpoint *ep, uint8_t **cur,
+ uint8_t **end) {
+ gpr_slice_buffer_add(ep->read_buffer, ep->read_staging_buffer);
ep->read_staging_buffer = gpr_slice_malloc(STAGING_BUFFER_SIZE);
*cur = GPR_SLICE_START_PTR(ep->read_staging_buffer);
*end = GPR_SLICE_END_PTR(ep->read_staging_buffer);
}
-static void call_read_cb(secure_endpoint *ep, gpr_slice *slices, size_t nslices,
- grpc_endpoint_cb_status error) {
+static void call_read_cb(grpc_exec_ctx *exec_ctx, secure_endpoint *ep,
+ bool success) {
if (grpc_trace_secure_endpoint) {
size_t i;
- for (i = 0; i < nslices; i++) {
- char *data =
- gpr_hexdump((char *)GPR_SLICE_START_PTR(slices[i]),
- GPR_SLICE_LENGTH(slices[i]), GPR_HEXDUMP_PLAINTEXT);
+ for (i = 0; i < ep->read_buffer->count; i++) {
+ char *data = gpr_dump_slice(ep->read_buffer->slices[i],
+ GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "READ %p: %s", ep, data);
gpr_free(data);
}
}
- ep->read_cb(ep->read_user_data, slices, nslices, error);
- secure_endpoint_unref(ep);
+ ep->read_buffer = NULL;
+ grpc_exec_ctx_enqueue(exec_ctx, ep->read_cb, success, NULL);
+ SECURE_ENDPOINT_UNREF(exec_ctx, ep, "read");
}
-static void on_read(void *user_data, gpr_slice *slices, size_t nslices,
- grpc_endpoint_cb_status error) {
+static void on_read(grpc_exec_ctx *exec_ctx, void *user_data, bool success) {
unsigned i;
- gpr_uint8 keep_looping = 0;
- size_t input_buffer_count = 0;
+ uint8_t keep_looping = 0;
tsi_result result = TSI_OK;
secure_endpoint *ep = (secure_endpoint *)user_data;
- gpr_uint8 *cur = GPR_SLICE_START_PTR(ep->read_staging_buffer);
- gpr_uint8 *end = GPR_SLICE_END_PTR(ep->read_staging_buffer);
+ uint8_t *cur = GPR_SLICE_START_PTR(ep->read_staging_buffer);
+ uint8_t *end = GPR_SLICE_END_PTR(ep->read_staging_buffer);
+
+ if (!success) {
+ gpr_slice_buffer_reset_and_unref(ep->read_buffer);
+ call_read_cb(exec_ctx, ep, 0);
+ return;
+ }
/* TODO(yangg) check error, maybe bail out early */
- for (i = 0; i < nslices; i++) {
- gpr_slice encrypted = slices[i];
- gpr_uint8 *message_bytes = GPR_SLICE_START_PTR(encrypted);
+ for (i = 0; i < ep->source_buffer.count; i++) {
+ gpr_slice encrypted = ep->source_buffer.slices[i];
+ uint8_t *message_bytes = GPR_SLICE_START_PTR(encrypted);
size_t message_size = GPR_SLICE_LENGTH(encrypted);
while (message_size > 0 || keep_looping) {
@@ -163,7 +196,7 @@ static void on_read(void *user_data, gpr_slice *slices, size_t nslices,
if (cur != GPR_SLICE_START_PTR(ep->read_staging_buffer)) {
gpr_slice_buffer_add(
- &ep->input_buffer,
+ ep->read_buffer,
gpr_slice_split_head(
&ep->read_staging_buffer,
(size_t)(cur - GPR_SLICE_START_PTR(ep->read_staging_buffer))));
@@ -171,81 +204,66 @@ static void on_read(void *user_data, gpr_slice *slices, size_t nslices,
/* TODO(yangg) experiment with moving this block after read_cb to see if it
helps latency */
- for (i = 0; i < nslices; i++) {
- gpr_slice_unref(slices[i]);
- }
+ gpr_slice_buffer_reset_and_unref(&ep->source_buffer);
if (result != TSI_OK) {
- gpr_slice_buffer_reset_and_unref(&ep->input_buffer);
- call_read_cb(ep, NULL, 0, GRPC_ENDPOINT_CB_ERROR);
+ gpr_slice_buffer_reset_and_unref(ep->read_buffer);
+ call_read_cb(exec_ctx, ep, 0);
return;
}
- /* The upper level will unref the slices. */
- input_buffer_count = ep->input_buffer.count;
- ep->input_buffer.count = 0;
- call_read_cb(ep, ep->input_buffer.slices, input_buffer_count, error);
+
+ call_read_cb(exec_ctx, ep, 1);
}
-static void endpoint_notify_on_read(grpc_endpoint *secure_ep,
- grpc_endpoint_read_cb cb, void *user_data) {
+static void endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep,
+ gpr_slice_buffer *slices, grpc_closure *cb) {
secure_endpoint *ep = (secure_endpoint *)secure_ep;
ep->read_cb = cb;
- ep->read_user_data = user_data;
-
- secure_endpoint_ref(ep);
+ ep->read_buffer = slices;
+ gpr_slice_buffer_reset_and_unref(ep->read_buffer);
+ SECURE_ENDPOINT_REF(ep, "read");
if (ep->leftover_bytes.count) {
- size_t leftover_nslices = ep->leftover_bytes.count;
- ep->leftover_bytes.count = 0;
- on_read(ep, ep->leftover_bytes.slices, leftover_nslices,
- GRPC_ENDPOINT_CB_OK);
+ gpr_slice_buffer_swap(&ep->leftover_bytes, &ep->source_buffer);
+ GPR_ASSERT(ep->leftover_bytes.count == 0);
+ on_read(exec_ctx, ep, 1);
return;
}
- grpc_endpoint_notify_on_read(ep->wrapped_ep, on_read, ep);
+ grpc_endpoint_read(exec_ctx, ep->wrapped_ep, &ep->source_buffer,
+ &ep->on_read);
}
-static void flush_write_staging_buffer(secure_endpoint *ep, gpr_uint8 **cur,
- gpr_uint8 **end) {
+static void flush_write_staging_buffer(secure_endpoint *ep, uint8_t **cur,
+ uint8_t **end) {
gpr_slice_buffer_add(&ep->output_buffer, ep->write_staging_buffer);
ep->write_staging_buffer = gpr_slice_malloc(STAGING_BUFFER_SIZE);
*cur = GPR_SLICE_START_PTR(ep->write_staging_buffer);
*end = GPR_SLICE_END_PTR(ep->write_staging_buffer);
}
-static void on_write(void *data, grpc_endpoint_cb_status error) {
- secure_endpoint *ep = data;
- ep->write_cb(ep->write_user_data, error);
- secure_endpoint_unref(ep);
-}
-
-static grpc_endpoint_write_status endpoint_write(grpc_endpoint *secure_ep,
- gpr_slice *slices,
- size_t nslices,
- grpc_endpoint_write_cb cb,
- void *user_data) {
+static void endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep,
+ gpr_slice_buffer *slices, grpc_closure *cb) {
unsigned i;
- size_t output_buffer_count = 0;
tsi_result result = TSI_OK;
secure_endpoint *ep = (secure_endpoint *)secure_ep;
- gpr_uint8 *cur = GPR_SLICE_START_PTR(ep->write_staging_buffer);
- gpr_uint8 *end = GPR_SLICE_END_PTR(ep->write_staging_buffer);
- grpc_endpoint_write_status status;
- GPR_ASSERT(ep->output_buffer.count == 0);
+ uint8_t *cur = GPR_SLICE_START_PTR(ep->write_staging_buffer);
+ uint8_t *end = GPR_SLICE_END_PTR(ep->write_staging_buffer);
+
+ gpr_slice_buffer_reset_and_unref(&ep->output_buffer);
if (grpc_trace_secure_endpoint) {
- for (i = 0; i < nslices; i++) {
+ for (i = 0; i < slices->count; i++) {
char *data =
- gpr_hexdump((char *)GPR_SLICE_START_PTR(slices[i]),
- GPR_SLICE_LENGTH(slices[i]), GPR_HEXDUMP_PLAINTEXT);
+ gpr_dump_slice(slices->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "WRITE %p: %s", ep, data);
gpr_free(data);
}
}
- for (i = 0; i < nslices; i++) {
- gpr_slice plain = slices[i];
- gpr_uint8 *message_bytes = GPR_SLICE_START_PTR(plain);
+ for (i = 0; i < slices->count; i++) {
+ gpr_slice plain = slices->slices[i];
+ uint8_t *message_bytes = GPR_SLICE_START_PTR(plain);
size_t message_size = GPR_SLICE_LENGTH(plain);
while (message_size > 0) {
size_t protected_buffer_size_to_send = (size_t)(end - cur);
@@ -294,50 +312,51 @@ static grpc_endpoint_write_status endpoint_write(grpc_endpoint *secure_ep,
}
}
- for (i = 0; i < nslices; i++) {
- gpr_slice_unref(slices[i]);
- }
-
if (result != TSI_OK) {
/* TODO(yangg) do different things according to the error type? */
gpr_slice_buffer_reset_and_unref(&ep->output_buffer);
- return GRPC_ENDPOINT_WRITE_ERROR;
+ grpc_exec_ctx_enqueue(exec_ctx, cb, false, NULL);
+ return;
}
- /* clear output_buffer and let the lower level handle its slices. */
- output_buffer_count = ep->output_buffer.count;
- ep->output_buffer.count = 0;
- ep->write_cb = cb;
- ep->write_user_data = user_data;
- /* Need to keep the endpoint alive across a transport */
- secure_endpoint_ref(ep);
- status = grpc_endpoint_write(ep->wrapped_ep, ep->output_buffer.slices,
- output_buffer_count, on_write, ep);
- if (status != GRPC_ENDPOINT_WRITE_PENDING) {
- secure_endpoint_unref(ep);
- }
- return status;
+ grpc_endpoint_write(exec_ctx, ep->wrapped_ep, &ep->output_buffer, cb);
}
-static void endpoint_shutdown(grpc_endpoint *secure_ep) {
+static void endpoint_shutdown(grpc_exec_ctx *exec_ctx,
+ grpc_endpoint *secure_ep) {
secure_endpoint *ep = (secure_endpoint *)secure_ep;
- grpc_endpoint_shutdown(ep->wrapped_ep);
+ grpc_endpoint_shutdown(exec_ctx, ep->wrapped_ep);
}
-static void endpoint_unref(grpc_endpoint *secure_ep) {
+static void endpoint_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_endpoint *secure_ep) {
secure_endpoint *ep = (secure_endpoint *)secure_ep;
- secure_endpoint_unref(ep);
+ SECURE_ENDPOINT_UNREF(exec_ctx, ep, "destroy");
}
-static void endpoint_add_to_pollset(grpc_endpoint *secure_ep,
+static void endpoint_add_to_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_endpoint *secure_ep,
grpc_pollset *pollset) {
secure_endpoint *ep = (secure_endpoint *)secure_ep;
- grpc_endpoint_add_to_pollset(ep->wrapped_ep, pollset);
+ grpc_endpoint_add_to_pollset(exec_ctx, ep->wrapped_ep, pollset);
+}
+
+static void endpoint_add_to_pollset_set(grpc_exec_ctx *exec_ctx,
+ grpc_endpoint *secure_ep,
+ grpc_pollset_set *pollset_set) {
+ secure_endpoint *ep = (secure_endpoint *)secure_ep;
+ grpc_endpoint_add_to_pollset_set(exec_ctx, ep->wrapped_ep, pollset_set);
+}
+
+static char *endpoint_get_peer(grpc_endpoint *secure_ep) {
+ secure_endpoint *ep = (secure_endpoint *)secure_ep;
+ return grpc_endpoint_get_peer(ep->wrapped_ep);
}
static const grpc_endpoint_vtable vtable = {
- endpoint_notify_on_read, endpoint_write, endpoint_add_to_pollset,
- endpoint_shutdown, endpoint_unref};
+ endpoint_read, endpoint_write, endpoint_add_to_pollset,
+ endpoint_add_to_pollset_set, endpoint_shutdown, endpoint_destroy,
+ endpoint_get_peer};
grpc_endpoint *grpc_secure_endpoint_create(
struct tsi_frame_protector *protector, grpc_endpoint *transport,
@@ -354,8 +373,10 @@ grpc_endpoint *grpc_secure_endpoint_create(
}
ep->write_staging_buffer = gpr_slice_malloc(STAGING_BUFFER_SIZE);
ep->read_staging_buffer = gpr_slice_malloc(STAGING_BUFFER_SIZE);
- gpr_slice_buffer_init(&ep->input_buffer);
gpr_slice_buffer_init(&ep->output_buffer);
+ gpr_slice_buffer_init(&ep->source_buffer);
+ ep->read_buffer = NULL;
+ grpc_closure_init(&ep->on_read, on_read, ep);
gpr_mu_init(&ep->protector_mu);
gpr_ref_init(&ep->ref, 1);
return &ep->base;
diff --git a/src/core/security/secure_endpoint.h b/src/core/security/secure_endpoint.h
index 93c29b5111..5176ef2059 100644
--- a/src/core/security/secure_endpoint.h
+++ b/src/core/security/secure_endpoint.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_SECURITY_SECURE_ENDPOINT_H
-#define GRPC_INTERNAL_CORE_SECURITY_SECURE_ENDPOINT_H
+#ifndef GRPC_CORE_SECURITY_SECURE_ENDPOINT_H
+#define GRPC_CORE_SECURITY_SECURE_ENDPOINT_H
#include "src/core/iomgr/endpoint.h"
#include <grpc/support/slice.h>
@@ -46,4 +46,4 @@ grpc_endpoint *grpc_secure_endpoint_create(
struct tsi_frame_protector *protector, grpc_endpoint *to_wrap,
gpr_slice *leftover_slices, size_t leftover_nslices);
-#endif /* GRPC_INTERNAL_CORE_SECURITY_SECURE_ENDPOINT_H */
+#endif /* GRPC_CORE_SECURITY_SECURE_ENDPOINT_H */
diff --git a/src/core/security/secure_transport_setup.c b/src/core/security/secure_transport_setup.c
deleted file mode 100644
index 1b39ab141e..0000000000
--- a/src/core/security/secure_transport_setup.c
+++ /dev/null
@@ -1,286 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include "src/core/security/secure_transport_setup.h"
-
-#include <string.h>
-
-#include "src/core/security/secure_endpoint.h"
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/slice_buffer.h>
-
-#define GRPC_INITIAL_HANDSHAKE_BUFFER_SIZE 256
-
-typedef struct {
- grpc_security_connector *connector;
- tsi_handshaker *handshaker;
- unsigned char *handshake_buffer;
- size_t handshake_buffer_size;
- grpc_endpoint *endpoint;
- gpr_slice_buffer left_overs;
- grpc_secure_transport_setup_done_cb cb;
- void *user_data;
-} grpc_secure_transport_setup;
-
-static void on_handshake_data_received_from_peer(void *setup, gpr_slice *slices,
- size_t nslices,
- grpc_endpoint_cb_status error);
-
-static void on_handshake_data_sent_to_peer(void *setup,
- grpc_endpoint_cb_status error);
-
-static void secure_transport_setup_done(grpc_secure_transport_setup *s,
- int is_success) {
- if (is_success) {
- s->cb(s->user_data, GRPC_SECURITY_OK, s->endpoint);
- } else {
- if (s->endpoint != NULL) {
- grpc_endpoint_shutdown(s->endpoint);
- grpc_endpoint_destroy(s->endpoint);
- }
- s->cb(s->user_data, GRPC_SECURITY_ERROR, NULL);
- }
- if (s->handshaker != NULL) tsi_handshaker_destroy(s->handshaker);
- if (s->handshake_buffer != NULL) gpr_free(s->handshake_buffer);
- gpr_slice_buffer_destroy(&s->left_overs);
- grpc_security_connector_unref(s->connector);
- gpr_free(s);
-}
-
-static void on_peer_checked(void *user_data, grpc_security_status status) {
- grpc_secure_transport_setup *s = user_data;
- tsi_frame_protector *protector;
- tsi_result result;
- if (status != GRPC_SECURITY_OK) {
- gpr_log(GPR_ERROR, "Error checking peer.");
- secure_transport_setup_done(s, 0);
- return;
- }
- result =
- tsi_handshaker_create_frame_protector(s->handshaker, NULL, &protector);
- if (result != TSI_OK) {
- gpr_log(GPR_ERROR, "Frame protector creation failed with error %s.",
- tsi_result_to_string(result));
- secure_transport_setup_done(s, 0);
- return;
- }
- s->endpoint = grpc_secure_endpoint_create(
- protector, s->endpoint, s->left_overs.slices, s->left_overs.count);
- secure_transport_setup_done(s, 1);
- return;
-}
-
-static void check_peer(grpc_secure_transport_setup *s) {
- grpc_security_status peer_status;
- tsi_peer peer;
- tsi_result result = tsi_handshaker_extract_peer(s->handshaker, &peer);
-
- if (result != TSI_OK) {
- gpr_log(GPR_ERROR, "Peer extraction failed with error %s",
- tsi_result_to_string(result));
- secure_transport_setup_done(s, 0);
- return;
- }
- peer_status = grpc_security_connector_check_peer(s->connector, peer,
- on_peer_checked, s);
- if (peer_status == GRPC_SECURITY_ERROR) {
- gpr_log(GPR_ERROR, "Peer check failed.");
- secure_transport_setup_done(s, 0);
- return;
- } else if (peer_status == GRPC_SECURITY_OK) {
- on_peer_checked(s, peer_status);
- }
-}
-
-static void send_handshake_bytes_to_peer(grpc_secure_transport_setup *s) {
- size_t offset = 0;
- tsi_result result = TSI_OK;
- gpr_slice to_send;
- grpc_endpoint_write_status write_status;
-
- do {
- size_t to_send_size = s->handshake_buffer_size - offset;
- result = tsi_handshaker_get_bytes_to_send_to_peer(
- s->handshaker, s->handshake_buffer + offset, &to_send_size);
- offset += to_send_size;
- if (result == TSI_INCOMPLETE_DATA) {
- s->handshake_buffer_size *= 2;
- s->handshake_buffer =
- gpr_realloc(s->handshake_buffer, s->handshake_buffer_size);
- }
- } while (result == TSI_INCOMPLETE_DATA);
-
- if (result != TSI_OK) {
- gpr_log(GPR_ERROR, "Handshake failed with error %s",
- tsi_result_to_string(result));
- secure_transport_setup_done(s, 0);
- return;
- }
-
- to_send =
- gpr_slice_from_copied_buffer((const char *)s->handshake_buffer, offset);
- /* TODO(klempner,jboeuf): This should probably use the client setup
- deadline */
- write_status = grpc_endpoint_write(s->endpoint, &to_send, 1,
- on_handshake_data_sent_to_peer, s);
- if (write_status == GRPC_ENDPOINT_WRITE_ERROR) {
- gpr_log(GPR_ERROR, "Could not send handshake data to peer.");
- secure_transport_setup_done(s, 0);
- } else if (write_status == GRPC_ENDPOINT_WRITE_DONE) {
- on_handshake_data_sent_to_peer(s, GRPC_ENDPOINT_CB_OK);
- }
-}
-
-static void cleanup_slices(gpr_slice *slices, size_t num_slices) {
- size_t i;
- for (i = 0; i < num_slices; i++) {
- gpr_slice_unref(slices[i]);
- }
-}
-
-static void on_handshake_data_received_from_peer(
- void *setup, gpr_slice *slices, size_t nslices,
- grpc_endpoint_cb_status error) {
- grpc_secure_transport_setup *s = setup;
- size_t consumed_slice_size = 0;
- tsi_result result = TSI_OK;
- size_t i;
- size_t num_left_overs;
- int has_left_overs_in_current_slice = 0;
-
- if (error != GRPC_ENDPOINT_CB_OK) {
- gpr_log(GPR_ERROR, "Read failed.");
- cleanup_slices(slices, nslices);
- secure_transport_setup_done(s, 0);
- return;
- }
-
- for (i = 0; i < nslices; i++) {
- consumed_slice_size = GPR_SLICE_LENGTH(slices[i]);
- result = tsi_handshaker_process_bytes_from_peer(
- s->handshaker, GPR_SLICE_START_PTR(slices[i]), &consumed_slice_size);
- if (!tsi_handshaker_is_in_progress(s->handshaker)) break;
- }
-
- if (tsi_handshaker_is_in_progress(s->handshaker)) {
- /* We may need more data. */
- if (result == TSI_INCOMPLETE_DATA) {
- /* TODO(klempner,jboeuf): This should probably use the client setup
- deadline */
- grpc_endpoint_notify_on_read(s->endpoint,
- on_handshake_data_received_from_peer, setup);
- cleanup_slices(slices, nslices);
- return;
- } else {
- send_handshake_bytes_to_peer(s);
- cleanup_slices(slices, nslices);
- return;
- }
- }
-
- if (result != TSI_OK) {
- gpr_log(GPR_ERROR, "Handshake failed with error %s",
- tsi_result_to_string(result));
- cleanup_slices(slices, nslices);
- secure_transport_setup_done(s, 0);
- return;
- }
-
- /* Handshake is done and successful this point. */
- has_left_overs_in_current_slice =
- (consumed_slice_size < GPR_SLICE_LENGTH(slices[i]));
- num_left_overs = (has_left_overs_in_current_slice ? 1 : 0) + nslices - i - 1;
- if (num_left_overs == 0) {
- cleanup_slices(slices, nslices);
- check_peer(s);
- return;
- }
- cleanup_slices(slices, nslices - num_left_overs);
-
- /* Put the leftovers in our buffer (ownership transfered). */
- if (has_left_overs_in_current_slice) {
- gpr_slice_buffer_add(&s->left_overs,
- gpr_slice_split_tail(&slices[i], consumed_slice_size));
- gpr_slice_unref(slices[i]); /* split_tail above increments refcount. */
- }
- gpr_slice_buffer_addn(&s->left_overs, &slices[i + 1],
- num_left_overs - (size_t)has_left_overs_in_current_slice);
- check_peer(s);
-}
-
-/* If setup is NULL, the setup is done. */
-static void on_handshake_data_sent_to_peer(void *setup,
- grpc_endpoint_cb_status error) {
- grpc_secure_transport_setup *s = setup;
-
- /* Make sure that write is OK. */
- if (error != GRPC_ENDPOINT_CB_OK) {
- gpr_log(GPR_ERROR, "Write failed with error %d.", error);
- if (setup != NULL) secure_transport_setup_done(s, 0);
- return;
- }
-
- /* We may be done. */
- if (tsi_handshaker_is_in_progress(s->handshaker)) {
- /* TODO(klempner,jboeuf): This should probably use the client setup
- deadline */
- grpc_endpoint_notify_on_read(s->endpoint,
- on_handshake_data_received_from_peer, setup);
- } else {
- check_peer(s);
- }
-}
-
-void grpc_setup_secure_transport(grpc_security_connector *connector,
- grpc_endpoint *nonsecure_endpoint,
- grpc_secure_transport_setup_done_cb cb,
- void *user_data) {
- grpc_security_status result = GRPC_SECURITY_OK;
- grpc_secure_transport_setup *s =
- gpr_malloc(sizeof(grpc_secure_transport_setup));
- memset(s, 0, sizeof(grpc_secure_transport_setup));
- result = grpc_security_connector_create_handshaker(connector, &s->handshaker);
- if (result != GRPC_SECURITY_OK) {
- secure_transport_setup_done(s, 0);
- return;
- }
- s->connector = grpc_security_connector_ref(connector);
- s->handshake_buffer_size = GRPC_INITIAL_HANDSHAKE_BUFFER_SIZE;
- s->handshake_buffer = gpr_malloc(s->handshake_buffer_size);
- s->endpoint = nonsecure_endpoint;
- s->user_data = user_data;
- s->cb = cb;
- gpr_slice_buffer_init(&s->left_overs);
- send_handshake_bytes_to_peer(s);
-}
diff --git a/src/core/security/security_connector.c b/src/core/security/security_connector.c
index 34cb0395a2..fbec263eed 100644
--- a/src/core/security/security_connector.c
+++ b/src/core/security/security_connector.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -33,21 +33,23 @@
#include "src/core/security/security_connector.h"
+#include <stdbool.h>
#include <string.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/host_port.h>
+#include <grpc/support/log.h>
+#include <grpc/support/slice_buffer.h>
+#include <grpc/support/string_util.h>
+
#include "src/core/security/credentials.h"
+#include "src/core/security/handshake.h"
#include "src/core/security/secure_endpoint.h"
#include "src/core/security/security_context.h"
#include "src/core/support/env.h"
-#include "src/core/support/file.h"
+#include "src/core/support/load_file.h"
#include "src/core/support/string.h"
#include "src/core/transport/chttp2/alpn.h"
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/host_port.h>
-#include <grpc/support/log.h>
-#include <grpc/support/slice_buffer.h>
-#include <grpc/support/string_util.h>
#include "src/core/tsi/fake_transport_security.h"
#include "src/core/tsi/ssl_transport_security.h"
@@ -60,6 +62,14 @@ static const char *installed_roots_path =
INSTALL_PREFIX "/share/grpc/roots.pem";
#endif
+/* -- Overridden default roots. -- */
+
+static grpc_ssl_roots_override_callback ssl_roots_override_cb = NULL;
+
+void grpc_set_ssl_roots_override_callback(grpc_ssl_roots_override_callback cb) {
+ ssl_roots_override_cb = cb;
+}
+
/* -- Cipher suites. -- */
/* Defines the cipher suites that we accept by default. All these cipher suites
@@ -84,12 +94,12 @@ static const char *ssl_cipher_suites(void) {
/* -- Common methods. -- */
/* Returns the first property with that name. */
-const tsi_peer_property *tsi_peer_get_property_by_name(
- const tsi_peer *peer, const char *name) {
+const tsi_peer_property *tsi_peer_get_property_by_name(const tsi_peer *peer,
+ const char *name) {
size_t i;
if (peer == NULL) return NULL;
for (i = 0; i < peer->property_count; i++) {
- const tsi_peer_property* property = &peer->properties[i];
+ const tsi_peer_property *property = &peer->properties[i];
if (name == NULL && property->name == NULL) {
return property;
}
@@ -101,55 +111,117 @@ const tsi_peer_property *tsi_peer_get_property_by_name(
return NULL;
}
-grpc_security_status grpc_security_connector_create_handshaker(
- grpc_security_connector *sc, tsi_handshaker **handshaker) {
- if (sc == NULL || handshaker == NULL) return GRPC_SECURITY_ERROR;
- return sc->vtable->create_handshaker(sc, handshaker);
+void grpc_server_security_connector_shutdown(
+ grpc_exec_ctx *exec_ctx, grpc_server_security_connector *connector) {
+ grpc_security_connector_handshake_list *tmp;
+ gpr_mu_lock(&connector->mu);
+ while (connector->handshaking_handshakes) {
+ tmp = connector->handshaking_handshakes;
+ grpc_security_handshake_shutdown(
+ exec_ctx, connector->handshaking_handshakes->handshake);
+ connector->handshaking_handshakes = tmp->next;
+ gpr_free(tmp);
+ }
+ gpr_mu_unlock(&connector->mu);
}
-grpc_security_status grpc_security_connector_check_peer(
- grpc_security_connector *sc, tsi_peer peer, grpc_security_check_cb cb,
+void grpc_channel_security_connector_do_handshake(
+ grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc,
+ grpc_endpoint *nonsecure_endpoint, grpc_security_handshake_done_cb cb,
void *user_data) {
- if (sc == NULL) {
- tsi_peer_destruct(&peer);
- return GRPC_SECURITY_ERROR;
+ if (sc == NULL || nonsecure_endpoint == NULL) {
+ cb(exec_ctx, user_data, GRPC_SECURITY_ERROR, NULL, NULL);
+ } else {
+ sc->do_handshake(exec_ctx, sc, nonsecure_endpoint, cb, user_data);
}
- return sc->vtable->check_peer(sc, peer, cb, user_data);
}
-grpc_security_status grpc_channel_security_connector_check_call_host(
- grpc_channel_security_connector *sc, const char *host,
- grpc_security_check_cb cb, void *user_data) {
- if (sc == NULL || sc->check_call_host == NULL) return GRPC_SECURITY_ERROR;
- return sc->check_call_host(sc, host, cb, user_data);
+void grpc_server_security_connector_do_handshake(
+ grpc_exec_ctx *exec_ctx, grpc_server_security_connector *sc,
+ grpc_tcp_server_acceptor *acceptor, grpc_endpoint *nonsecure_endpoint,
+ grpc_security_handshake_done_cb cb, void *user_data) {
+ if (sc == NULL || nonsecure_endpoint == NULL) {
+ cb(exec_ctx, user_data, GRPC_SECURITY_ERROR, NULL, NULL);
+ } else {
+ sc->do_handshake(exec_ctx, sc, acceptor, nonsecure_endpoint, cb, user_data);
+ }
}
-void grpc_security_connector_unref(grpc_security_connector *sc) {
- if (sc == NULL) return;
- if (gpr_unref(&sc->refcount)) sc->vtable->destroy(sc);
+void grpc_security_connector_check_peer(grpc_exec_ctx *exec_ctx,
+ grpc_security_connector *sc,
+ tsi_peer peer,
+ grpc_security_peer_check_cb cb,
+ void *user_data) {
+ if (sc == NULL) {
+ cb(exec_ctx, user_data, GRPC_SECURITY_ERROR, NULL);
+ tsi_peer_destruct(&peer);
+ } else {
+ sc->vtable->check_peer(exec_ctx, sc, peer, cb, user_data);
+ }
+}
+
+void grpc_channel_security_connector_check_call_host(
+ grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc,
+ const char *host, grpc_auth_context *auth_context,
+ grpc_security_call_host_check_cb cb, void *user_data) {
+ if (sc == NULL || sc->check_call_host == NULL) {
+ cb(exec_ctx, user_data, GRPC_SECURITY_ERROR);
+ } else {
+ sc->check_call_host(exec_ctx, sc, host, auth_context, cb, user_data);
+ }
}
+#ifdef GRPC_SECURITY_CONNECTOR_REFCOUNT_DEBUG
+grpc_security_connector *grpc_security_connector_ref(
+ grpc_security_connector *sc, const char *file, int line,
+ const char *reason) {
+ if (sc == NULL) return NULL;
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "SECURITY_CONNECTOR:%p ref %d -> %d %s", sc,
+ (int)sc->refcount.count, (int)sc->refcount.count + 1, reason);
+#else
grpc_security_connector *grpc_security_connector_ref(
grpc_security_connector *sc) {
if (sc == NULL) return NULL;
+#endif
gpr_ref(&sc->refcount);
return sc;
}
+#ifdef GRPC_SECURITY_CONNECTOR_REFCOUNT_DEBUG
+void grpc_security_connector_unref(grpc_security_connector *sc,
+ const char *file, int line,
+ const char *reason) {
+ if (sc == NULL) return;
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "SECURITY_CONNECTOR:%p unref %d -> %d %s", sc,
+ (int)sc->refcount.count, (int)sc->refcount.count - 1, reason);
+#else
+void grpc_security_connector_unref(grpc_security_connector *sc) {
+ if (sc == NULL) return;
+#endif
+ if (gpr_unref(&sc->refcount)) sc->vtable->destroy(sc);
+}
+
static void connector_pointer_arg_destroy(void *p) {
- grpc_security_connector_unref(p);
+ GRPC_SECURITY_CONNECTOR_UNREF(p, "connector_pointer_arg");
}
static void *connector_pointer_arg_copy(void *p) {
- return grpc_security_connector_ref(p);
+ return GRPC_SECURITY_CONNECTOR_REF(p, "connector_pointer_arg");
}
+static int connector_pointer_cmp(void *a, void *b) { return GPR_ICMP(a, b); }
+
+static const grpc_arg_pointer_vtable connector_pointer_vtable = {
+ connector_pointer_arg_copy, connector_pointer_arg_destroy,
+ connector_pointer_cmp};
+
grpc_arg grpc_security_connector_to_arg(grpc_security_connector *sc) {
grpc_arg result;
result.type = GRPC_ARG_POINTER;
result.key = GRPC_SECURITY_CONNECTOR_ARG;
- result.value.pointer.destroy = connector_pointer_arg_destroy;
- result.value.pointer.copy = connector_pointer_arg_copy;
+ result.value.pointer.vtable = &connector_pointer_vtable;
result.value.pointer.p = sc;
return result;
}
@@ -176,53 +248,26 @@ grpc_security_connector *grpc_find_security_connector_in_args(
return NULL;
}
-static int check_request_metadata_creds(grpc_credentials *creds) {
- if (creds != NULL && !grpc_credentials_has_request_metadata(creds)) {
- gpr_log(GPR_ERROR,
- "Incompatible credentials for channel security connector: needs to "
- "set request metadata.");
- return 0;
- }
- return 1;
-}
-
/* -- Fake implementation. -- */
-typedef struct {
- grpc_channel_security_connector base;
- int call_host_check_is_async;
-} grpc_fake_channel_security_connector;
-
static void fake_channel_destroy(grpc_security_connector *sc) {
grpc_channel_security_connector *c = (grpc_channel_security_connector *)sc;
- grpc_credentials_unref(c->request_metadata_creds);
- GRPC_AUTH_CONTEXT_UNREF(sc->auth_context, "connector");
+ grpc_call_credentials_unref(c->request_metadata_creds);
gpr_free(sc);
}
static void fake_server_destroy(grpc_security_connector *sc) {
- GRPC_AUTH_CONTEXT_UNREF(sc->auth_context, "connector");
+ grpc_server_security_connector *c = (grpc_server_security_connector *)sc;
+ gpr_mu_destroy(&c->mu);
gpr_free(sc);
}
-static grpc_security_status fake_channel_create_handshaker(
- grpc_security_connector *sc, tsi_handshaker **handshaker) {
- *handshaker = tsi_create_fake_handshaker(1);
- return GRPC_SECURITY_OK;
-}
-
-static grpc_security_status fake_server_create_handshaker(
- grpc_security_connector *sc, tsi_handshaker **handshaker) {
- *handshaker = tsi_create_fake_handshaker(0);
- return GRPC_SECURITY_OK;
-}
-
-static grpc_security_status fake_check_peer(grpc_security_connector *sc,
- tsi_peer peer,
- grpc_security_check_cb cb,
- void *user_data) {
+static void fake_check_peer(grpc_exec_ctx *exec_ctx,
+ grpc_security_connector *sc, tsi_peer peer,
+ grpc_security_peer_check_cb cb, void *user_data) {
const char *prop_name;
grpc_security_status status = GRPC_SECURITY_OK;
+ grpc_auth_context *auth_context = NULL;
if (peer.property_count != 1) {
gpr_log(GPR_ERROR, "Fake peers should only have 1 property.");
status = GRPC_SECURITY_ERROR;
@@ -242,59 +287,74 @@ static grpc_security_status fake_check_peer(grpc_security_connector *sc,
status = GRPC_SECURITY_ERROR;
goto end;
}
- GRPC_AUTH_CONTEXT_UNREF(sc->auth_context, "connector");
- sc->auth_context = grpc_auth_context_create(NULL, 1);
- sc->auth_context->properties[0] = grpc_auth_property_init_from_cstring(
- GRPC_TRANSPORT_SECURITY_TYPE_PROPERTY_NAME,
+ auth_context = grpc_auth_context_create(NULL);
+ grpc_auth_context_add_cstring_property(
+ auth_context, GRPC_TRANSPORT_SECURITY_TYPE_PROPERTY_NAME,
GRPC_FAKE_TRANSPORT_SECURITY_TYPE);
end:
+ cb(exec_ctx, user_data, status, auth_context);
+ grpc_auth_context_unref(auth_context);
tsi_peer_destruct(&peer);
- return status;
}
-static grpc_security_status fake_channel_check_call_host(
- grpc_channel_security_connector *sc, const char *host,
- grpc_security_check_cb cb, void *user_data) {
- grpc_fake_channel_security_connector *c =
- (grpc_fake_channel_security_connector *)sc;
- if (c->call_host_check_is_async) {
- cb(user_data, GRPC_SECURITY_OK);
- return GRPC_SECURITY_PENDING;
- } else {
- return GRPC_SECURITY_OK;
- }
+static void fake_channel_check_call_host(grpc_exec_ctx *exec_ctx,
+ grpc_channel_security_connector *sc,
+ const char *host,
+ grpc_auth_context *auth_context,
+ grpc_security_call_host_check_cb cb,
+ void *user_data) {
+ cb(exec_ctx, user_data, GRPC_SECURITY_OK);
+}
+
+static void fake_channel_do_handshake(grpc_exec_ctx *exec_ctx,
+ grpc_channel_security_connector *sc,
+ grpc_endpoint *nonsecure_endpoint,
+ grpc_security_handshake_done_cb cb,
+ void *user_data) {
+ grpc_do_security_handshake(exec_ctx, tsi_create_fake_handshaker(1), &sc->base,
+ true, nonsecure_endpoint, cb, user_data);
+}
+
+static void fake_server_do_handshake(grpc_exec_ctx *exec_ctx,
+ grpc_server_security_connector *sc,
+ grpc_tcp_server_acceptor *acceptor,
+ grpc_endpoint *nonsecure_endpoint,
+ grpc_security_handshake_done_cb cb,
+ void *user_data) {
+ grpc_do_security_handshake(exec_ctx, tsi_create_fake_handshaker(0), &sc->base,
+ false, nonsecure_endpoint, cb, user_data);
}
static grpc_security_connector_vtable fake_channel_vtable = {
- fake_channel_destroy, fake_channel_create_handshaker, fake_check_peer};
+ fake_channel_destroy, fake_check_peer};
-static grpc_security_connector_vtable fake_server_vtable = {
- fake_server_destroy, fake_server_create_handshaker, fake_check_peer};
+static grpc_security_connector_vtable fake_server_vtable = {fake_server_destroy,
+ fake_check_peer};
grpc_channel_security_connector *grpc_fake_channel_security_connector_create(
- grpc_credentials *request_metadata_creds, int call_host_check_is_async) {
- grpc_fake_channel_security_connector *c =
- gpr_malloc(sizeof(grpc_fake_channel_security_connector));
- memset(c, 0, sizeof(grpc_fake_channel_security_connector));
- gpr_ref_init(&c->base.base.refcount, 1);
- c->base.base.is_client_side = 1;
- c->base.base.url_scheme = GRPC_FAKE_SECURITY_URL_SCHEME;
- c->base.base.vtable = &fake_channel_vtable;
- GPR_ASSERT(check_request_metadata_creds(request_metadata_creds));
- c->base.request_metadata_creds = grpc_credentials_ref(request_metadata_creds);
- c->base.check_call_host = fake_channel_check_call_host;
- c->call_host_check_is_async = call_host_check_is_async;
- return &c->base;
-}
-
-grpc_security_connector *grpc_fake_server_security_connector_create(void) {
- grpc_security_connector *c = gpr_malloc(sizeof(grpc_security_connector));
- memset(c, 0, sizeof(grpc_security_connector));
- gpr_ref_init(&c->refcount, 1);
- c->is_client_side = 0;
- c->vtable = &fake_server_vtable;
- c->url_scheme = GRPC_FAKE_SECURITY_URL_SCHEME;
+ grpc_call_credentials *request_metadata_creds) {
+ grpc_channel_security_connector *c = gpr_malloc(sizeof(*c));
+ memset(c, 0, sizeof(*c));
+ gpr_ref_init(&c->base.refcount, 1);
+ c->base.url_scheme = GRPC_FAKE_SECURITY_URL_SCHEME;
+ c->base.vtable = &fake_channel_vtable;
+ c->request_metadata_creds = grpc_call_credentials_ref(request_metadata_creds);
+ c->check_call_host = fake_channel_check_call_host;
+ c->do_handshake = fake_channel_do_handshake;
+ return c;
+}
+
+grpc_server_security_connector *grpc_fake_server_security_connector_create(
+ void) {
+ grpc_server_security_connector *c =
+ gpr_malloc(sizeof(grpc_server_security_connector));
+ memset(c, 0, sizeof(*c));
+ gpr_ref_init(&c->base.refcount, 1);
+ c->base.vtable = &fake_server_vtable;
+ c->base.url_scheme = GRPC_FAKE_SECURITY_URL_SCHEME;
+ c->do_handshake = fake_server_do_handshake;
+ gpr_mu_init(&c->mu);
return c;
}
@@ -305,40 +365,38 @@ typedef struct {
tsi_ssl_handshaker_factory *handshaker_factory;
char *target_name;
char *overridden_target_name;
- tsi_peer peer;
} grpc_ssl_channel_security_connector;
typedef struct {
- grpc_security_connector base;
+ grpc_server_security_connector base;
tsi_ssl_handshaker_factory *handshaker_factory;
} grpc_ssl_server_security_connector;
static void ssl_channel_destroy(grpc_security_connector *sc) {
grpc_ssl_channel_security_connector *c =
(grpc_ssl_channel_security_connector *)sc;
- grpc_credentials_unref(c->base.request_metadata_creds);
+ grpc_call_credentials_unref(c->base.request_metadata_creds);
if (c->handshaker_factory != NULL) {
tsi_ssl_handshaker_factory_destroy(c->handshaker_factory);
}
if (c->target_name != NULL) gpr_free(c->target_name);
if (c->overridden_target_name != NULL) gpr_free(c->overridden_target_name);
- tsi_peer_destruct(&c->peer);
- GRPC_AUTH_CONTEXT_UNREF(sc->auth_context, "connector");
gpr_free(sc);
}
static void ssl_server_destroy(grpc_security_connector *sc) {
grpc_ssl_server_security_connector *c =
(grpc_ssl_server_security_connector *)sc;
+
if (c->handshaker_factory != NULL) {
tsi_ssl_handshaker_factory_destroy(c->handshaker_factory);
}
- GRPC_AUTH_CONTEXT_UNREF(sc->auth_context, "connector");
+ gpr_mu_destroy(&c->base.mu);
gpr_free(sc);
}
static grpc_security_status ssl_create_handshaker(
- tsi_ssl_handshaker_factory *handshaker_factory, int is_client,
+ tsi_ssl_handshaker_factory *handshaker_factory, bool is_client,
const char *peer_name, tsi_handshaker **handshaker) {
tsi_result result = TSI_OK;
if (handshaker_factory == NULL) return GRPC_SECURITY_ERROR;
@@ -352,22 +410,44 @@ static grpc_security_status ssl_create_handshaker(
return GRPC_SECURITY_OK;
}
-static grpc_security_status ssl_channel_create_handshaker(
- grpc_security_connector *sc, tsi_handshaker **handshaker) {
+static void ssl_channel_do_handshake(grpc_exec_ctx *exec_ctx,
+ grpc_channel_security_connector *sc,
+ grpc_endpoint *nonsecure_endpoint,
+ grpc_security_handshake_done_cb cb,
+ void *user_data) {
grpc_ssl_channel_security_connector *c =
(grpc_ssl_channel_security_connector *)sc;
- return ssl_create_handshaker(c->handshaker_factory, 1,
- c->overridden_target_name != NULL
- ? c->overridden_target_name
- : c->target_name,
- handshaker);
+ tsi_handshaker *handshaker;
+ grpc_security_status status = ssl_create_handshaker(
+ c->handshaker_factory, true,
+ c->overridden_target_name != NULL ? c->overridden_target_name
+ : c->target_name,
+ &handshaker);
+ if (status != GRPC_SECURITY_OK) {
+ cb(exec_ctx, user_data, status, NULL, NULL);
+ } else {
+ grpc_do_security_handshake(exec_ctx, handshaker, &sc->base, true,
+ nonsecure_endpoint, cb, user_data);
+ }
}
-static grpc_security_status ssl_server_create_handshaker(
- grpc_security_connector *sc, tsi_handshaker **handshaker) {
+static void ssl_server_do_handshake(grpc_exec_ctx *exec_ctx,
+ grpc_server_security_connector *sc,
+ grpc_tcp_server_acceptor *acceptor,
+ grpc_endpoint *nonsecure_endpoint,
+ grpc_security_handshake_done_cb cb,
+ void *user_data) {
grpc_ssl_server_security_connector *c =
(grpc_ssl_server_security_connector *)sc;
- return ssl_create_handshaker(c->handshaker_factory, 0, NULL, handshaker);
+ tsi_handshaker *handshaker;
+ grpc_security_status status =
+ ssl_create_handshaker(c->handshaker_factory, false, NULL, &handshaker);
+ if (status != GRPC_SECURITY_OK) {
+ cb(exec_ctx, user_data, status, NULL, NULL);
+ } else {
+ grpc_do_security_handshake(exec_ctx, handshaker, &sc->base, false,
+ nonsecure_endpoint, cb, user_data);
+ }
}
static int ssl_host_matches_name(const tsi_peer *peer, const char *peer_name) {
@@ -389,37 +469,45 @@ static int ssl_host_matches_name(const tsi_peer *peer, const char *peer_name) {
grpc_auth_context *tsi_ssl_peer_to_auth_context(const tsi_peer *peer) {
size_t i;
grpc_auth_context *ctx = NULL;
+ const char *peer_identity_property_name = NULL;
/* The caller has checked the certificate type property. */
GPR_ASSERT(peer->property_count >= 1);
- ctx = grpc_auth_context_create(NULL, peer->property_count);
- ctx->properties[0] = grpc_auth_property_init_from_cstring(
- GRPC_TRANSPORT_SECURITY_TYPE_PROPERTY_NAME,
+ ctx = grpc_auth_context_create(NULL);
+ grpc_auth_context_add_cstring_property(
+ ctx, GRPC_TRANSPORT_SECURITY_TYPE_PROPERTY_NAME,
GRPC_SSL_TRANSPORT_SECURITY_TYPE);
- ctx->property_count = 1;
for (i = 0; i < peer->property_count; i++) {
const tsi_peer_property *prop = &peer->properties[i];
if (prop->name == NULL) continue;
if (strcmp(prop->name, TSI_X509_SUBJECT_COMMON_NAME_PEER_PROPERTY) == 0) {
/* If there is no subject alt name, have the CN as the identity. */
- if (ctx->peer_identity_property_name == NULL) {
- ctx->peer_identity_property_name = GRPC_X509_CN_PROPERTY_NAME;
+ if (peer_identity_property_name == NULL) {
+ peer_identity_property_name = GRPC_X509_CN_PROPERTY_NAME;
}
- ctx->properties[ctx->property_count++] = grpc_auth_property_init(
- GRPC_X509_CN_PROPERTY_NAME, prop->value.data, prop->value.length);
+ grpc_auth_context_add_property(ctx, GRPC_X509_CN_PROPERTY_NAME,
+ prop->value.data, prop->value.length);
} else if (strcmp(prop->name,
TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY) == 0) {
- ctx->peer_identity_property_name = GRPC_X509_SAN_PROPERTY_NAME;
- ctx->properties[ctx->property_count++] = grpc_auth_property_init(
- GRPC_X509_SAN_PROPERTY_NAME, prop->value.data, prop->value.length);
+ peer_identity_property_name = GRPC_X509_SAN_PROPERTY_NAME;
+ grpc_auth_context_add_property(ctx, GRPC_X509_SAN_PROPERTY_NAME,
+ prop->value.data, prop->value.length);
+ } else if (strcmp(prop->name, TSI_X509_PEM_CERT_PROPERTY) == 0) {
+ grpc_auth_context_add_property(ctx, GRPC_X509_PEM_CERT_PROPERTY_NAME,
+ prop->value.data, prop->value.length);
}
}
+ if (peer_identity_property_name != NULL) {
+ GPR_ASSERT(grpc_auth_context_set_peer_identity_property_name(
+ ctx, peer_identity_property_name) == 1);
+ }
return ctx;
}
static grpc_security_status ssl_check_peer(grpc_security_connector *sc,
const char *peer_name,
- const tsi_peer *peer) {
+ const tsi_peer *peer,
+ grpc_auth_context **auth_context) {
/* Check the ALPN. */
const tsi_peer_property *p =
tsi_peer_get_property_by_name(peer, TSI_SSL_ALPN_SELECTED_PROTOCOL);
@@ -437,79 +525,147 @@ static grpc_security_status ssl_check_peer(grpc_security_connector *sc,
gpr_log(GPR_ERROR, "Peer name %s is not in peer certificate", peer_name);
return GRPC_SECURITY_ERROR;
}
- if (sc->auth_context != NULL) {
- GRPC_AUTH_CONTEXT_UNREF(sc->auth_context, "connector");
- }
- sc->auth_context = tsi_ssl_peer_to_auth_context(peer);
+ *auth_context = tsi_ssl_peer_to_auth_context(peer);
return GRPC_SECURITY_OK;
}
-static grpc_security_status ssl_channel_check_peer(grpc_security_connector *sc,
- tsi_peer peer,
- grpc_security_check_cb cb,
- void *user_data) {
+static void ssl_channel_check_peer(grpc_exec_ctx *exec_ctx,
+ grpc_security_connector *sc, tsi_peer peer,
+ grpc_security_peer_check_cb cb,
+ void *user_data) {
grpc_ssl_channel_security_connector *c =
(grpc_ssl_channel_security_connector *)sc;
grpc_security_status status;
- tsi_peer_destruct(&c->peer);
- c->peer = peer;
+ grpc_auth_context *auth_context = NULL;
status = ssl_check_peer(sc, c->overridden_target_name != NULL
? c->overridden_target_name
: c->target_name,
- &peer);
- return status;
+ &peer, &auth_context);
+ cb(exec_ctx, user_data, status, auth_context);
+ grpc_auth_context_unref(auth_context);
+ tsi_peer_destruct(&peer);
}
-static grpc_security_status ssl_server_check_peer(grpc_security_connector *sc,
- tsi_peer peer,
- grpc_security_check_cb cb,
- void *user_data) {
- grpc_security_status status = ssl_check_peer(sc, NULL, &peer);
+static void ssl_server_check_peer(grpc_exec_ctx *exec_ctx,
+ grpc_security_connector *sc, tsi_peer peer,
+ grpc_security_peer_check_cb cb,
+ void *user_data) {
+ grpc_auth_context *auth_context = NULL;
+ grpc_security_status status = ssl_check_peer(sc, NULL, &peer, &auth_context);
tsi_peer_destruct(&peer);
- return status;
+ cb(exec_ctx, user_data, status, auth_context);
+ grpc_auth_context_unref(auth_context);
+}
+
+static void add_shallow_auth_property_to_peer(tsi_peer *peer,
+ const grpc_auth_property *prop,
+ const char *tsi_prop_name) {
+ tsi_peer_property *tsi_prop = &peer->properties[peer->property_count++];
+ tsi_prop->name = (char *)tsi_prop_name;
+ tsi_prop->value.data = prop->value;
+ tsi_prop->value.length = prop->value_length;
}
-static grpc_security_status ssl_channel_check_call_host(
- grpc_channel_security_connector *sc, const char *host,
- grpc_security_check_cb cb, void *user_data) {
+tsi_peer tsi_shallow_peer_from_ssl_auth_context(
+ const grpc_auth_context *auth_context) {
+ size_t max_num_props = 0;
+ grpc_auth_property_iterator it;
+ const grpc_auth_property *prop;
+ tsi_peer peer;
+ memset(&peer, 0, sizeof(peer));
+
+ it = grpc_auth_context_property_iterator(auth_context);
+ while (grpc_auth_property_iterator_next(&it) != NULL) max_num_props++;
+
+ if (max_num_props > 0) {
+ peer.properties = gpr_malloc(max_num_props * sizeof(tsi_peer_property));
+ it = grpc_auth_context_property_iterator(auth_context);
+ while ((prop = grpc_auth_property_iterator_next(&it)) != NULL) {
+ if (strcmp(prop->name, GRPC_X509_SAN_PROPERTY_NAME) == 0) {
+ add_shallow_auth_property_to_peer(
+ &peer, prop, TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY);
+ } else if (strcmp(prop->name, GRPC_X509_CN_PROPERTY_NAME) == 0) {
+ add_shallow_auth_property_to_peer(
+ &peer, prop, TSI_X509_SUBJECT_COMMON_NAME_PEER_PROPERTY);
+ } else if (strcmp(prop->name, GRPC_X509_PEM_CERT_PROPERTY_NAME) == 0) {
+ add_shallow_auth_property_to_peer(&peer, prop,
+ TSI_X509_PEM_CERT_PROPERTY);
+ }
+ }
+ }
+ return peer;
+}
+
+void tsi_shallow_peer_destruct(tsi_peer *peer) {
+ if (peer->properties != NULL) gpr_free(peer->properties);
+}
+
+static void ssl_channel_check_call_host(grpc_exec_ctx *exec_ctx,
+ grpc_channel_security_connector *sc,
+ const char *host,
+ grpc_auth_context *auth_context,
+ grpc_security_call_host_check_cb cb,
+ void *user_data) {
grpc_ssl_channel_security_connector *c =
(grpc_ssl_channel_security_connector *)sc;
-
- if (ssl_host_matches_name(&c->peer, host)) return GRPC_SECURITY_OK;
+ grpc_security_status status = GRPC_SECURITY_ERROR;
+ tsi_peer peer = tsi_shallow_peer_from_ssl_auth_context(auth_context);
+ if (ssl_host_matches_name(&peer, host)) status = GRPC_SECURITY_OK;
/* If the target name was overridden, then the original target_name was
'checked' transitively during the previous peer check at the end of the
handshake. */
if (c->overridden_target_name != NULL && strcmp(host, c->target_name) == 0) {
- return GRPC_SECURITY_OK;
- } else {
- return GRPC_SECURITY_ERROR;
+ status = GRPC_SECURITY_OK;
}
+ cb(exec_ctx, user_data, status);
+ tsi_shallow_peer_destruct(&peer);
}
static grpc_security_connector_vtable ssl_channel_vtable = {
- ssl_channel_destroy, ssl_channel_create_handshaker, ssl_channel_check_peer};
+ ssl_channel_destroy, ssl_channel_check_peer};
static grpc_security_connector_vtable ssl_server_vtable = {
- ssl_server_destroy, ssl_server_create_handshaker, ssl_server_check_peer};
+ ssl_server_destroy, ssl_server_check_peer};
-static gpr_slice default_pem_root_certs;
+static gpr_slice compute_default_pem_root_certs_once(void) {
+ gpr_slice result = gpr_empty_slice();
-static void init_default_pem_root_certs(void) {
/* First try to load the roots from the environment. */
char *default_root_certs_path =
gpr_getenv(GRPC_DEFAULT_SSL_ROOTS_FILE_PATH_ENV_VAR);
- if (default_root_certs_path == NULL) {
- default_pem_root_certs = gpr_empty_slice();
- } else {
- default_pem_root_certs = gpr_load_file(default_root_certs_path, 0, NULL);
+ if (default_root_certs_path != NULL) {
+ result = gpr_load_file(default_root_certs_path, 0, NULL);
gpr_free(default_root_certs_path);
}
+ /* Try overridden roots if needed. */
+ grpc_ssl_roots_override_result ovrd_res = GRPC_SSL_ROOTS_OVERRIDE_FAIL;
+ if (GPR_SLICE_IS_EMPTY(result) && ssl_roots_override_cb != NULL) {
+ char *pem_root_certs = NULL;
+ ovrd_res = ssl_roots_override_cb(&pem_root_certs);
+ if (ovrd_res == GRPC_SSL_ROOTS_OVERRIDE_OK) {
+ GPR_ASSERT(pem_root_certs != NULL);
+ result = gpr_slice_new(pem_root_certs, strlen(pem_root_certs), gpr_free);
+ }
+ }
+
/* Fall back to installed certs if needed. */
- if (GPR_SLICE_IS_EMPTY(default_pem_root_certs)) {
- default_pem_root_certs = gpr_load_file(installed_roots_path, 0, NULL);
+ if (GPR_SLICE_IS_EMPTY(result) &&
+ ovrd_res != GRPC_SSL_ROOTS_OVERRIDE_FAIL_PERMANENTLY) {
+ result = gpr_load_file(installed_roots_path, 0, NULL);
}
+ return result;
+}
+
+static gpr_slice default_pem_root_certs;
+
+static void init_default_pem_root_certs(void) {
+ default_pem_root_certs = compute_default_pem_root_certs_once();
+}
+
+gpr_slice grpc_get_default_ssl_roots_for_testing(void) {
+ return compute_default_pem_root_certs_once();
}
size_t grpc_get_default_ssl_roots(const unsigned char **pem_root_certs) {
@@ -522,9 +678,9 @@ size_t grpc_get_default_ssl_roots(const unsigned char **pem_root_certs) {
}
grpc_security_status grpc_ssl_channel_security_connector_create(
- grpc_credentials *request_metadata_creds, const grpc_ssl_config *config,
- const char *target_name, const char *overridden_target_name,
- grpc_channel_security_connector **sc) {
+ grpc_call_credentials *request_metadata_creds,
+ const grpc_ssl_config *config, const char *target_name,
+ const char *overridden_target_name, grpc_channel_security_connector **sc) {
size_t num_alpn_protocols = grpc_chttp2_num_alpn_versions();
const unsigned char **alpn_protocol_strings =
gpr_malloc(sizeof(const char *) * num_alpn_protocols);
@@ -548,8 +704,15 @@ grpc_security_status grpc_ssl_channel_security_connector_create(
gpr_log(GPR_ERROR, "An ssl channel needs a config and a target name.");
goto error;
}
- if (!check_request_metadata_creds(request_metadata_creds)) {
- goto error;
+ if (config->pem_root_certs == NULL) {
+ pem_root_certs_size = grpc_get_default_ssl_roots(&pem_root_certs);
+ if (pem_root_certs == NULL || pem_root_certs_size == 0) {
+ gpr_log(GPR_ERROR, "Could not get default pem root certs.");
+ goto error;
+ }
+ } else {
+ pem_root_certs = config->pem_root_certs;
+ pem_root_certs_size = config->pem_root_certs_size;
}
c = gpr_malloc(sizeof(grpc_ssl_channel_security_connector));
@@ -557,30 +720,22 @@ grpc_security_status grpc_ssl_channel_security_connector_create(
gpr_ref_init(&c->base.base.refcount, 1);
c->base.base.vtable = &ssl_channel_vtable;
- c->base.base.is_client_side = 1;
c->base.base.url_scheme = GRPC_SSL_URL_SCHEME;
- c->base.request_metadata_creds = grpc_credentials_ref(request_metadata_creds);
+ c->base.request_metadata_creds =
+ grpc_call_credentials_ref(request_metadata_creds);
c->base.check_call_host = ssl_channel_check_call_host;
+ c->base.do_handshake = ssl_channel_do_handshake;
gpr_split_host_port(target_name, &c->target_name, &port);
gpr_free(port);
if (overridden_target_name != NULL) {
c->overridden_target_name = gpr_strdup(overridden_target_name);
}
- if (config->pem_root_certs == NULL) {
- pem_root_certs_size = grpc_get_default_ssl_roots(&pem_root_certs);
- if (pem_root_certs == NULL || pem_root_certs_size == 0) {
- gpr_log(GPR_ERROR, "Could not get default pem root certs.");
- goto error;
- }
- } else {
- pem_root_certs = config->pem_root_certs;
- pem_root_certs_size = config->pem_root_certs_size;
- }
result = tsi_create_ssl_client_handshaker_factory(
config->pem_private_key, config->pem_private_key_size,
config->pem_cert_chain, config->pem_cert_chain_size, pem_root_certs,
pem_root_certs_size, ssl_cipher_suites(), alpn_protocol_strings,
- alpn_protocol_string_lengths, (uint16_t)num_alpn_protocols, &c->handshaker_factory);
+ alpn_protocol_string_lengths, (uint16_t)num_alpn_protocols,
+ &c->handshaker_factory);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.",
tsi_result_to_string(result));
@@ -589,18 +744,18 @@ grpc_security_status grpc_ssl_channel_security_connector_create(
goto error;
}
*sc = &c->base;
- gpr_free(alpn_protocol_strings);
+ gpr_free((void *)alpn_protocol_strings);
gpr_free(alpn_protocol_string_lengths);
return GRPC_SECURITY_OK;
error:
- gpr_free(alpn_protocol_strings);
+ gpr_free((void *)alpn_protocol_strings);
gpr_free(alpn_protocol_string_lengths);
return GRPC_SECURITY_ERROR;
}
grpc_security_status grpc_ssl_server_security_connector_create(
- const grpc_ssl_server_config *config, grpc_security_connector **sc) {
+ const grpc_ssl_server_config *config, grpc_server_security_connector **sc) {
size_t num_alpn_protocols = grpc_chttp2_num_alpn_versions();
const unsigned char **alpn_protocol_strings =
gpr_malloc(sizeof(const char *) * num_alpn_protocols);
@@ -624,31 +779,34 @@ grpc_security_status grpc_ssl_server_security_connector_create(
c = gpr_malloc(sizeof(grpc_ssl_server_security_connector));
memset(c, 0, sizeof(grpc_ssl_server_security_connector));
- gpr_ref_init(&c->base.refcount, 1);
- c->base.url_scheme = GRPC_SSL_URL_SCHEME;
- c->base.vtable = &ssl_server_vtable;
+ gpr_ref_init(&c->base.base.refcount, 1);
+ c->base.base.url_scheme = GRPC_SSL_URL_SCHEME;
+ c->base.base.vtable = &ssl_server_vtable;
result = tsi_create_ssl_server_handshaker_factory(
(const unsigned char **)config->pem_private_keys,
config->pem_private_keys_sizes,
(const unsigned char **)config->pem_cert_chains,
config->pem_cert_chains_sizes, config->num_key_cert_pairs,
- config->pem_root_certs, config->pem_root_certs_size, ssl_cipher_suites(),
- alpn_protocol_strings, alpn_protocol_string_lengths, (uint16_t)num_alpn_protocols,
+ config->pem_root_certs, config->pem_root_certs_size,
+ config->force_client_auth, ssl_cipher_suites(), alpn_protocol_strings,
+ alpn_protocol_string_lengths, (uint16_t)num_alpn_protocols,
&c->handshaker_factory);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.",
tsi_result_to_string(result));
- ssl_server_destroy(&c->base);
+ ssl_server_destroy(&c->base.base);
*sc = NULL;
goto error;
}
+ gpr_mu_init(&c->base.mu);
+ c->base.do_handshake = ssl_server_do_handshake;
*sc = &c->base;
- gpr_free(alpn_protocol_strings);
+ gpr_free((void *)alpn_protocol_strings);
gpr_free(alpn_protocol_string_lengths);
return GRPC_SECURITY_OK;
error:
- gpr_free(alpn_protocol_strings);
+ gpr_free((void *)alpn_protocol_strings);
gpr_free(alpn_protocol_string_lengths);
return GRPC_SECURITY_ERROR;
}
diff --git a/src/core/security/security_connector.h b/src/core/security/security_connector.h
index ee3057b43b..6f915ebb9d 100644
--- a/src/core/security/security_connector.h
+++ b/src/core/security/security_connector.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,20 +31,17 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_SECURITY_SECURITY_CONNECTOR_H
-#define GRPC_INTERNAL_CORE_SECURITY_SECURITY_CONNECTOR_H
+#ifndef GRPC_CORE_SECURITY_SECURITY_CONNECTOR_H
+#define GRPC_CORE_SECURITY_SECURITY_CONNECTOR_H
#include <grpc/grpc_security.h>
#include "src/core/iomgr/endpoint.h"
+#include "src/core/iomgr/tcp_server.h"
#include "src/core/tsi/transport_security_interface.h"
/* --- status enum. --- */
-typedef enum {
- GRPC_SECURITY_OK = 0,
- GRPC_SECURITY_PENDING,
- GRPC_SECURITY_ERROR
-} grpc_security_status;
+typedef enum { GRPC_SECURITY_OK = 0, GRPC_SECURITY_ERROR } grpc_security_status;
/* --- URL schemes. --- */
@@ -60,47 +57,61 @@ typedef struct grpc_security_connector grpc_security_connector;
#define GRPC_SECURITY_CONNECTOR_ARG "grpc.security_connector"
-typedef void (*grpc_security_check_cb)(void *user_data,
- grpc_security_status status);
+typedef void (*grpc_security_peer_check_cb)(grpc_exec_ctx *exec_ctx,
+ void *user_data,
+ grpc_security_status status,
+ grpc_auth_context *auth_context);
+
+/* Ownership of the secure_endpoint is transfered. */
+typedef void (*grpc_security_handshake_done_cb)(
+ grpc_exec_ctx *exec_ctx, void *user_data, grpc_security_status status,
+ grpc_endpoint *secure_endpoint, grpc_auth_context *auth_context);
typedef struct {
void (*destroy)(grpc_security_connector *sc);
- grpc_security_status (*create_handshaker)(grpc_security_connector *sc,
- tsi_handshaker **handshaker);
- grpc_security_status (*check_peer)(grpc_security_connector *sc, tsi_peer peer,
- grpc_security_check_cb cb,
- void *user_data);
+ void (*check_peer)(grpc_exec_ctx *exec_ctx, grpc_security_connector *sc,
+ tsi_peer peer, grpc_security_peer_check_cb cb,
+ void *user_data);
} grpc_security_connector_vtable;
+typedef struct grpc_security_connector_handshake_list {
+ void *handshake;
+ struct grpc_security_connector_handshake_list *next;
+} grpc_security_connector_handshake_list;
+
struct grpc_security_connector {
const grpc_security_connector_vtable *vtable;
gpr_refcount refcount;
- int is_client_side;
const char *url_scheme;
- grpc_auth_context *auth_context; /* Populated after the peer is checked. */
};
-/* Increments the refcount. */
+/* Refcounting. */
+#ifdef GRPC_SECURITY_CONNECTOR_REFCOUNT_DEBUG
+#define GRPC_SECURITY_CONNECTOR_REF(p, r) \
+ grpc_security_connector_ref((p), __FILE__, __LINE__, (r))
+#define GRPC_SECURITY_CONNECTOR_UNREF(p, r) \
+ grpc_security_connector_unref((p), __FILE__, __LINE__, (r))
grpc_security_connector *grpc_security_connector_ref(
- grpc_security_connector *sc);
-
-/* Decrements the refcount and destroys the object if it reaches 0. */
-void grpc_security_connector_unref(grpc_security_connector *sc);
-
-/* Handshake creation. */
-grpc_security_status grpc_security_connector_create_handshaker(
- grpc_security_connector *sc, tsi_handshaker **handshaker);
-
-/* Check the peer.
- Implementations can choose to check the peer either synchronously or
- asynchronously. In the first case, a successful call will return
- GRPC_SECURITY_OK. In the asynchronous case, the call will return
- GRPC_SECURITY_PENDING unless an error is detected early on.
- Ownership of the peer is transfered.
-*/
-grpc_security_status grpc_security_connector_check_peer(
- grpc_security_connector *sc, tsi_peer peer, grpc_security_check_cb cb,
- void *user_data);
+ grpc_security_connector *policy, const char *file, int line,
+ const char *reason);
+void grpc_security_connector_unref(grpc_security_connector *policy,
+ const char *file, int line,
+ const char *reason);
+#else
+#define GRPC_SECURITY_CONNECTOR_REF(p, r) grpc_security_connector_ref((p))
+#define GRPC_SECURITY_CONNECTOR_UNREF(p, r) grpc_security_connector_unref((p))
+grpc_security_connector *grpc_security_connector_ref(
+ grpc_security_connector *policy);
+void grpc_security_connector_unref(grpc_security_connector *policy);
+#endif
+
+/* Check the peer. Callee takes ownership of the peer object.
+ The callback will include the resulting auth_context. */
+void grpc_security_connector_check_peer(grpc_exec_ctx *exec_ctx,
+ grpc_security_connector *sc,
+ tsi_peer peer,
+ grpc_security_peer_check_cb cb,
+ void *user_data);
/* Util to encapsulate the connector in a channel arg. */
grpc_arg grpc_security_connector_to_arg(grpc_security_connector *sc);
@@ -119,34 +130,73 @@ grpc_security_connector *grpc_find_security_connector_in_args(
typedef struct grpc_channel_security_connector grpc_channel_security_connector;
+typedef void (*grpc_security_call_host_check_cb)(grpc_exec_ctx *exec_ctx,
+ void *user_data,
+ grpc_security_status status);
+
struct grpc_channel_security_connector {
- grpc_security_connector base; /* requires is_client_side to be non 0. */
- grpc_credentials *request_metadata_creds;
- grpc_security_status (*check_call_host)(grpc_channel_security_connector *sc,
- const char *host,
- grpc_security_check_cb cb,
- void *user_data);
+ grpc_security_connector base;
+ grpc_call_credentials *request_metadata_creds;
+ void (*check_call_host)(grpc_exec_ctx *exec_ctx,
+ grpc_channel_security_connector *sc, const char *host,
+ grpc_auth_context *auth_context,
+ grpc_security_call_host_check_cb cb, void *user_data);
+ void (*do_handshake)(grpc_exec_ctx *exec_ctx,
+ grpc_channel_security_connector *sc,
+ grpc_endpoint *nonsecure_endpoint,
+ grpc_security_handshake_done_cb cb, void *user_data);
};
-/* Checks that the host that will be set for a call is acceptable.
- Implementations can choose do the check either synchronously or
- asynchronously. In the first case, a successful call will return
- GRPC_SECURITY_OK. In the asynchronous case, the call will return
- GRPC_SECURITY_PENDING unless an error is detected early on. */
-grpc_security_status grpc_channel_security_connector_check_call_host(
- grpc_channel_security_connector *sc, const char *host,
- grpc_security_check_cb cb, void *user_data);
+/* Checks that the host that will be set for a call is acceptable. */
+void grpc_channel_security_connector_check_call_host(
+ grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc,
+ const char *host, grpc_auth_context *auth_context,
+ grpc_security_call_host_check_cb cb, void *user_data);
+
+/* Handshake. */
+void grpc_channel_security_connector_do_handshake(
+ grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *connector,
+ grpc_endpoint *nonsecure_endpoint, grpc_security_handshake_done_cb cb,
+ void *user_data);
+
+/* --- server_security_connector object. ---
+
+ A server security connector object represents away to configure the
+ underlying transport security mechanism on the server side. */
+
+typedef struct grpc_server_security_connector grpc_server_security_connector;
+
+struct grpc_server_security_connector {
+ grpc_security_connector base;
+ gpr_mu mu;
+ grpc_security_connector_handshake_list *handshaking_handshakes;
+ const grpc_channel_args *channel_args;
+ void (*do_handshake)(grpc_exec_ctx *exec_ctx,
+ grpc_server_security_connector *sc,
+ grpc_tcp_server_acceptor *acceptor,
+ grpc_endpoint *nonsecure_endpoint,
+ grpc_security_handshake_done_cb cb, void *user_data);
+};
+
+void grpc_server_security_connector_do_handshake(
+ grpc_exec_ctx *exec_ctx, grpc_server_security_connector *sc,
+ grpc_tcp_server_acceptor *acceptor, grpc_endpoint *nonsecure_endpoint,
+ grpc_security_handshake_done_cb cb, void *user_data);
+
+void grpc_server_security_connector_shutdown(
+ grpc_exec_ctx *exec_ctx, grpc_server_security_connector *connector);
/* --- Creation security connectors. --- */
/* For TESTING ONLY!
Creates a fake connector that emulates real channel security. */
grpc_channel_security_connector *grpc_fake_channel_security_connector_create(
- grpc_credentials *request_metadata_creds, int call_host_check_is_async);
+ grpc_call_credentials *request_metadata_creds);
/* For TESTING ONLY!
Creates a fake connector that emulates real server security. */
-grpc_security_connector *grpc_fake_server_security_connector_create(void);
+grpc_server_security_connector *grpc_fake_server_security_connector_create(
+ void);
/* Config for ssl clients. */
typedef struct {
@@ -172,13 +222,16 @@ typedef struct {
specific error code otherwise.
*/
grpc_security_status grpc_ssl_channel_security_connector_create(
- grpc_credentials *request_metadata_creds,
+ grpc_call_credentials *request_metadata_creds,
const grpc_ssl_config *config, const char *target_name,
const char *overridden_target_name, grpc_channel_security_connector **sc);
/* Gets the default ssl roots. */
size_t grpc_get_default_ssl_roots(const unsigned char **pem_root_certs);
+/* Exposed for TESTING ONLY!. */
+gpr_slice grpc_get_default_ssl_roots_for_testing(void);
+
/* Config for ssl servers. */
typedef struct {
unsigned char **pem_private_keys;
@@ -188,6 +241,7 @@ typedef struct {
size_t num_key_cert_pairs;
unsigned char *pem_root_certs;
size_t pem_root_certs_size;
+ int force_client_auth;
} grpc_ssl_server_config;
/* Creates an SSL server_security_connector.
@@ -197,13 +251,16 @@ typedef struct {
specific error code otherwise.
*/
grpc_security_status grpc_ssl_server_security_connector_create(
- const grpc_ssl_server_config *config, grpc_security_connector **sc);
+ const grpc_ssl_server_config *config, grpc_server_security_connector **sc);
/* Util. */
-const tsi_peer_property *tsi_peer_get_property_by_name(
- const tsi_peer *peer, const char *name);
+const tsi_peer_property *tsi_peer_get_property_by_name(const tsi_peer *peer,
+ const char *name);
/* Exposed for testing only. */
grpc_auth_context *tsi_ssl_peer_to_auth_context(const tsi_peer *peer);
+tsi_peer tsi_shallow_peer_from_ssl_auth_context(
+ const grpc_auth_context *auth_context);
+void tsi_shallow_peer_destruct(tsi_peer *peer);
-#endif /* GRPC_INTERNAL_CORE_SECURITY_SECURITY_CONNECTOR_H */
+#endif /* GRPC_CORE_SECURITY_SECURITY_CONNECTOR_H */
diff --git a/src/core/security/security_context.c b/src/core/security/security_context.c
index 4d56549f9b..a71b3bc915 100644
--- a/src/core/security/security_context.c
+++ b/src/core/security/security_context.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -34,6 +34,7 @@
#include <string.h>
#include "src/core/security/security_context.h"
+#include "src/core/surface/api_trace.h"
#include "src/core/surface/call.h"
#include "src/core/support/string.h"
@@ -45,36 +46,44 @@
/* --- grpc_call --- */
grpc_call_error grpc_call_set_credentials(grpc_call *call,
- grpc_credentials *creds) {
+ grpc_call_credentials *creds) {
grpc_client_security_context *ctx = NULL;
+ GRPC_API_TRACE("grpc_call_set_credentials(call=%p, creds=%p)", 2,
+ (call, creds));
if (!grpc_call_is_client(call)) {
gpr_log(GPR_ERROR, "Method is client-side only.");
return GRPC_CALL_ERROR_NOT_ON_SERVER;
}
- if (creds != NULL && !grpc_credentials_has_request_metadata_only(creds)) {
- gpr_log(GPR_ERROR, "Incompatible credentials to set on a call.");
- return GRPC_CALL_ERROR;
- }
ctx = (grpc_client_security_context *)grpc_call_context_get(
call, GRPC_CONTEXT_SECURITY);
if (ctx == NULL) {
ctx = grpc_client_security_context_create();
- ctx->creds = grpc_credentials_ref(creds);
+ ctx->creds = grpc_call_credentials_ref(creds);
grpc_call_context_set(call, GRPC_CONTEXT_SECURITY, ctx,
grpc_client_security_context_destroy);
} else {
- grpc_credentials_unref(ctx->creds);
- ctx->creds = grpc_credentials_ref(creds);
+ grpc_call_credentials_unref(ctx->creds);
+ ctx->creds = grpc_call_credentials_ref(creds);
}
return GRPC_CALL_OK;
}
-const grpc_auth_context *grpc_call_auth_context(grpc_call *call) {
+grpc_auth_context *grpc_call_auth_context(grpc_call *call) {
void *sec_ctx = grpc_call_context_get(call, GRPC_CONTEXT_SECURITY);
+ GRPC_API_TRACE("grpc_call_auth_context(call=%p)", 1, (call));
if (sec_ctx == NULL) return NULL;
return grpc_call_is_client(call)
- ? ((grpc_client_security_context *)sec_ctx)->auth_context
- : ((grpc_server_security_context *)sec_ctx)->auth_context;
+ ? GRPC_AUTH_CONTEXT_REF(
+ ((grpc_client_security_context *)sec_ctx)->auth_context,
+ "grpc_call_auth_context client")
+ : GRPC_AUTH_CONTEXT_REF(
+ ((grpc_server_security_context *)sec_ctx)->auth_context,
+ "grpc_call_auth_context server");
+}
+
+void grpc_auth_context_release(grpc_auth_context *context) {
+ GRPC_API_TRACE("grpc_auth_context_release(context=%p)", 1, (context));
+ GRPC_AUTH_CONTEXT_UNREF(context, "grpc_auth_context_unref");
}
/* --- grpc_client_security_context --- */
@@ -88,7 +97,7 @@ grpc_client_security_context *grpc_client_security_context_create(void) {
void grpc_client_security_context_destroy(void *ctx) {
grpc_client_security_context *c = (grpc_client_security_context *)ctx;
- grpc_credentials_unref(c->creds);
+ grpc_call_credentials_unref(c->creds);
GRPC_AUTH_CONTEXT_UNREF(c->auth_context, "client_security_context");
gpr_free(ctx);
}
@@ -112,15 +121,15 @@ void grpc_server_security_context_destroy(void *ctx) {
static grpc_auth_property_iterator empty_iterator = {NULL, 0, NULL};
-grpc_auth_context *grpc_auth_context_create(grpc_auth_context *chained,
- size_t property_count) {
+grpc_auth_context *grpc_auth_context_create(grpc_auth_context *chained) {
grpc_auth_context *ctx = gpr_malloc(sizeof(grpc_auth_context));
memset(ctx, 0, sizeof(grpc_auth_context));
- ctx->properties = gpr_malloc(property_count * sizeof(grpc_auth_property));
- memset(ctx->properties, 0, property_count * sizeof(grpc_auth_property));
- ctx->property_count = property_count;
gpr_ref_init(&ctx->refcount, 1);
- if (chained != NULL) ctx->chained = GRPC_AUTH_CONTEXT_REF(chained, "chained");
+ if (chained != NULL) {
+ ctx->chained = GRPC_AUTH_CONTEXT_REF(chained, "chained");
+ ctx->peer_identity_property_name =
+ ctx->chained->peer_identity_property_name;
+ }
return ctx;
}
@@ -154,11 +163,11 @@ void grpc_auth_context_unref(grpc_auth_context *ctx) {
if (gpr_unref(&ctx->refcount)) {
size_t i;
GRPC_AUTH_CONTEXT_UNREF(ctx->chained, "chained");
- if (ctx->properties != NULL) {
- for (i = 0; i < ctx->property_count; i++) {
- grpc_auth_property_reset(&ctx->properties[i]);
+ if (ctx->properties.array != NULL) {
+ for (i = 0; i < ctx->properties.count; i++) {
+ grpc_auth_property_reset(&ctx->properties.array[i]);
}
- gpr_free(ctx->properties);
+ gpr_free(ctx->properties.array);
}
gpr_free(ctx);
}
@@ -166,17 +175,37 @@ void grpc_auth_context_unref(grpc_auth_context *ctx) {
const char *grpc_auth_context_peer_identity_property_name(
const grpc_auth_context *ctx) {
+ GRPC_API_TRACE("grpc_auth_context_peer_identity_property_name(ctx=%p)", 1,
+ (ctx));
return ctx->peer_identity_property_name;
}
-int grpc_auth_context_peer_is_authenticated(
- const grpc_auth_context *ctx) {
+int grpc_auth_context_set_peer_identity_property_name(grpc_auth_context *ctx,
+ const char *name) {
+ grpc_auth_property_iterator it =
+ grpc_auth_context_find_properties_by_name(ctx, name);
+ const grpc_auth_property *prop = grpc_auth_property_iterator_next(&it);
+ GRPC_API_TRACE(
+ "grpc_auth_context_set_peer_identity_property_name(ctx=%p, name=%s)", 2,
+ (ctx, name));
+ if (prop == NULL) {
+ gpr_log(GPR_ERROR, "Property name %s not found in auth context.",
+ name != NULL ? name : "NULL");
+ return 0;
+ }
+ ctx->peer_identity_property_name = prop->name;
+ return 1;
+}
+
+int grpc_auth_context_peer_is_authenticated(const grpc_auth_context *ctx) {
+ GRPC_API_TRACE("grpc_auth_context_peer_is_authenticated(ctx=%p)", 1, (ctx));
return ctx->peer_identity_property_name == NULL ? 0 : 1;
}
grpc_auth_property_iterator grpc_auth_context_property_iterator(
const grpc_auth_context *ctx) {
grpc_auth_property_iterator it = empty_iterator;
+ GRPC_API_TRACE("grpc_auth_context_property_iterator(ctx=%p)", 1, (ctx));
if (ctx == NULL) return it;
it.ctx = ctx;
return it;
@@ -184,17 +213,18 @@ grpc_auth_property_iterator grpc_auth_context_property_iterator(
const grpc_auth_property *grpc_auth_property_iterator_next(
grpc_auth_property_iterator *it) {
+ GRPC_API_TRACE("grpc_auth_property_iterator_next(it=%p)", 1, (it));
if (it == NULL || it->ctx == NULL) return NULL;
- while (it->index == it->ctx->property_count) {
+ while (it->index == it->ctx->properties.count) {
if (it->ctx->chained == NULL) return NULL;
it->ctx = it->ctx->chained;
it->index = 0;
}
if (it->name == NULL) {
- return &it->ctx->properties[it->index++];
+ return &it->ctx->properties.array[it->index++];
} else {
- while (it->index < it->ctx->property_count) {
- const grpc_auth_property *prop = &it->ctx->properties[it->index++];
+ while (it->index < it->ctx->properties.count) {
+ const grpc_auth_property *prop = &it->ctx->properties.array[it->index++];
GPR_ASSERT(prop->name != NULL);
if (strcmp(it->name, prop->name) == 0) {
return prop;
@@ -208,6 +238,8 @@ const grpc_auth_property *grpc_auth_property_iterator_next(
grpc_auth_property_iterator grpc_auth_context_find_properties_by_name(
const grpc_auth_context *ctx, const char *name) {
grpc_auth_property_iterator it = empty_iterator;
+ GRPC_API_TRACE("grpc_auth_context_find_properties_by_name(ctx=%p, name=%s)",
+ 2, (ctx, name));
if (ctx == NULL || name == NULL) return empty_iterator;
it.ctx = ctx;
it.name = name;
@@ -216,29 +248,51 @@ grpc_auth_property_iterator grpc_auth_context_find_properties_by_name(
grpc_auth_property_iterator grpc_auth_context_peer_identity(
const grpc_auth_context *ctx) {
+ GRPC_API_TRACE("grpc_auth_context_peer_identity(ctx=%p)", 1, (ctx));
if (ctx == NULL) return empty_iterator;
return grpc_auth_context_find_properties_by_name(
ctx, ctx->peer_identity_property_name);
}
-grpc_auth_property grpc_auth_property_init_from_cstring(const char *name,
- const char *value) {
- grpc_auth_property prop;
- prop.name = gpr_strdup(name);
- prop.value = gpr_strdup(value);
- prop.value_length = strlen(value);
- return prop;
+static void ensure_auth_context_capacity(grpc_auth_context *ctx) {
+ if (ctx->properties.count == ctx->properties.capacity) {
+ ctx->properties.capacity =
+ GPR_MAX(ctx->properties.capacity + 8, ctx->properties.capacity * 2);
+ ctx->properties.array =
+ gpr_realloc(ctx->properties.array,
+ ctx->properties.capacity * sizeof(grpc_auth_property));
+ }
+}
+
+void grpc_auth_context_add_property(grpc_auth_context *ctx, const char *name,
+ const char *value, size_t value_length) {
+ grpc_auth_property *prop;
+ GRPC_API_TRACE(
+ "grpc_auth_context_add_property(ctx=%p, name=%s, value=%*.*s, "
+ "value_length=%lu)",
+ 6, (ctx, name, (int)value_length, (int)value_length, value,
+ (unsigned long)value_length));
+ ensure_auth_context_capacity(ctx);
+ prop = &ctx->properties.array[ctx->properties.count++];
+ prop->name = gpr_strdup(name);
+ prop->value = gpr_malloc(value_length + 1);
+ memcpy(prop->value, value, value_length);
+ prop->value[value_length] = '\0';
+ prop->value_length = value_length;
}
-grpc_auth_property grpc_auth_property_init(const char *name, const char *value,
- size_t value_length) {
- grpc_auth_property prop;
- prop.name = gpr_strdup(name);
- prop.value = gpr_malloc(value_length + 1);
- memcpy(prop.value, value, value_length);
- prop.value[value_length] = '\0';
- prop.value_length = value_length;
- return prop;
+void grpc_auth_context_add_cstring_property(grpc_auth_context *ctx,
+ const char *name,
+ const char *value) {
+ grpc_auth_property *prop;
+ GRPC_API_TRACE(
+ "grpc_auth_context_add_cstring_property(ctx=%p, name=%s, value=%s)", 3,
+ (ctx, name, value));
+ ensure_auth_context_capacity(ctx);
+ prop = &ctx->properties.array[ctx->properties.count++];
+ prop->name = gpr_strdup(name);
+ prop->value = gpr_strdup(value);
+ prop->value_length = strlen(value);
}
void grpc_auth_property_reset(grpc_auth_property *property) {
@@ -247,3 +301,47 @@ void grpc_auth_property_reset(grpc_auth_property *property) {
memset(property, 0, sizeof(grpc_auth_property));
}
+static void auth_context_pointer_arg_destroy(void *p) {
+ GRPC_AUTH_CONTEXT_UNREF(p, "auth_context_pointer_arg");
+}
+
+static void *auth_context_pointer_arg_copy(void *p) {
+ return GRPC_AUTH_CONTEXT_REF(p, "auth_context_pointer_arg");
+}
+
+static int auth_context_pointer_cmp(void *a, void *b) { return GPR_ICMP(a, b); }
+
+static const grpc_arg_pointer_vtable auth_context_pointer_vtable = {
+ auth_context_pointer_arg_copy, auth_context_pointer_arg_destroy,
+ auth_context_pointer_cmp};
+
+grpc_arg grpc_auth_context_to_arg(grpc_auth_context *p) {
+ grpc_arg arg;
+ memset(&arg, 0, sizeof(grpc_arg));
+ arg.type = GRPC_ARG_POINTER;
+ arg.key = GRPC_AUTH_CONTEXT_ARG;
+ arg.value.pointer.p = p;
+ arg.value.pointer.vtable = &auth_context_pointer_vtable;
+ return arg;
+}
+
+grpc_auth_context *grpc_auth_context_from_arg(const grpc_arg *arg) {
+ if (strcmp(arg->key, GRPC_AUTH_CONTEXT_ARG) != 0) return NULL;
+ if (arg->type != GRPC_ARG_POINTER) {
+ gpr_log(GPR_ERROR, "Invalid type %d for arg %s", arg->type,
+ GRPC_AUTH_CONTEXT_ARG);
+ return NULL;
+ }
+ return arg->value.pointer.p;
+}
+
+grpc_auth_context *grpc_find_auth_context_in_args(
+ const grpc_channel_args *args) {
+ size_t i;
+ if (args == NULL) return NULL;
+ for (i = 0; i < args->num_args; i++) {
+ grpc_auth_context *p = grpc_auth_context_from_arg(&args->args[i]);
+ if (p != NULL) return p;
+ }
+ return NULL;
+}
diff --git a/src/core/security/security_context.h b/src/core/security/security_context.h
index 20c4390898..61601f538b 100644
--- a/src/core/security/security_context.h
+++ b/src/core/security/security_context.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,9 +31,10 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_SECURITY_SECURITY_CONTEXT_H
-#define GRPC_INTERNAL_CORE_SECURITY_SECURITY_CONTEXT_H
+#ifndef GRPC_CORE_SECURITY_SECURITY_CONTEXT_H
+#define GRPC_CORE_SECURITY_SECURITY_CONTEXT_H
+#include "src/core/iomgr/pollset.h"
#include "src/core/security/credentials.h"
/* --- grpc_auth_context ---
@@ -42,17 +43,22 @@
/* Property names are always NULL terminated. */
+typedef struct {
+ grpc_auth_property *array;
+ size_t count;
+ size_t capacity;
+} grpc_auth_property_array;
+
struct grpc_auth_context {
struct grpc_auth_context *chained;
- grpc_auth_property *properties;
- size_t property_count;
+ grpc_auth_property_array properties;
gpr_refcount refcount;
const char *peer_identity_property_name;
+ grpc_pollset *pollset;
};
-/* Constructor. */
-grpc_auth_context *grpc_auth_context_create(grpc_auth_context *chained,
- size_t property_count);
+/* Creation. */
+grpc_auth_context *grpc_auth_context_create(grpc_auth_context *chained);
/* Refcounting. */
#ifdef GRPC_AUTH_CONTEXT_REFCOUNT_DEBUG
@@ -72,12 +78,6 @@ grpc_auth_context *grpc_auth_context_ref(grpc_auth_context *policy);
void grpc_auth_context_unref(grpc_auth_context *policy);
#endif
-grpc_auth_property grpc_auth_property_init_from_cstring(const char *name,
- const char *value);
-
-grpc_auth_property grpc_auth_property_init(const char *name, const char *value,
- size_t value_length);
-
void grpc_auth_property_reset(grpc_auth_property *property);
/* --- grpc_client_security_context ---
@@ -85,7 +85,7 @@ void grpc_auth_property_reset(grpc_auth_property *property);
Internal client-side security context. */
typedef struct {
- grpc_credentials *creds;
+ grpc_call_credentials *creds;
grpc_auth_context *auth_context;
} grpc_client_security_context;
@@ -103,5 +103,12 @@ typedef struct {
grpc_server_security_context *grpc_server_security_context_create(void);
void grpc_server_security_context_destroy(void *ctx);
-#endif /* GRPC_INTERNAL_CORE_SECURITY_SECURITY_CONTEXT_H */
+/* --- Channel args for auth context --- */
+#define GRPC_AUTH_CONTEXT_ARG "grpc.auth_context"
+
+grpc_arg grpc_auth_context_to_arg(grpc_auth_context *c);
+grpc_auth_context *grpc_auth_context_from_arg(const grpc_arg *arg);
+grpc_auth_context *grpc_find_auth_context_in_args(
+ const grpc_channel_args *args);
+#endif /* GRPC_CORE_SECURITY_SECURITY_CONTEXT_H */
diff --git a/src/core/security/server_auth_filter.c b/src/core/security/server_auth_filter.c
index b8639287a5..3d8e5e8d35 100644
--- a/src/core/security/server_auth_filter.c
+++ b/src/core/security/server_auth_filter.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,101 +31,234 @@
*
*/
+#include <string.h>
+
#include "src/core/security/auth_filters.h"
-#include "src/core/security/security_connector.h"
+#include "src/core/security/credentials.h"
#include "src/core/security/security_context.h"
+#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
typedef struct call_data {
- int unused; /* C89 requires at least one struct element */
+ grpc_metadata_batch *recv_initial_metadata;
+ /* Closure to call when finished with the auth_on_recv hook. */
+ grpc_closure *on_done_recv;
+ /* Receive closures are chained: we inject this closure as the on_done_recv
+ up-call on transport_op, and remember to call our on_done_recv member after
+ handling it. */
+ grpc_closure auth_on_recv;
+ grpc_transport_stream_op transport_op;
+ grpc_metadata_array md;
+ const grpc_metadata *consumed_md;
+ size_t num_consumed_md;
+ grpc_auth_context *auth_context;
} call_data;
typedef struct channel_data {
- grpc_security_connector *security_connector;
+ grpc_auth_context *auth_context;
+ grpc_server_credentials *creds;
} channel_data;
+static grpc_metadata_array metadata_batch_to_md_array(
+ const grpc_metadata_batch *batch) {
+ grpc_linked_mdelem *l;
+ grpc_metadata_array result;
+ grpc_metadata_array_init(&result);
+ for (l = batch->list.head; l != NULL; l = l->next) {
+ grpc_metadata *usr_md = NULL;
+ grpc_mdelem *md = l->md;
+ grpc_mdstr *key = md->key;
+ grpc_mdstr *value = md->value;
+ if (result.count == result.capacity) {
+ result.capacity = GPR_MAX(result.capacity + 8, result.capacity * 2);
+ result.metadata =
+ gpr_realloc(result.metadata, result.capacity * sizeof(grpc_metadata));
+ }
+ usr_md = &result.metadata[result.count++];
+ usr_md->key = grpc_mdstr_as_c_string(key);
+ usr_md->value = grpc_mdstr_as_c_string(value);
+ usr_md->value_length = GPR_SLICE_LENGTH(value->slice);
+ }
+ return result;
+}
+
+static grpc_mdelem *remove_consumed_md(void *user_data, grpc_mdelem *md) {
+ grpc_call_element *elem = user_data;
+ call_data *calld = elem->call_data;
+ size_t i;
+ for (i = 0; i < calld->num_consumed_md; i++) {
+ const grpc_metadata *consumed_md = &calld->consumed_md[i];
+ /* Maybe we could do a pointer comparison but we do not have any guarantee
+ that the metadata processor used the same pointers for consumed_md in the
+ callback. */
+ if (GPR_SLICE_LENGTH(md->key->slice) != strlen(consumed_md->key) ||
+ GPR_SLICE_LENGTH(md->value->slice) != consumed_md->value_length) {
+ continue;
+ }
+ if (memcmp(GPR_SLICE_START_PTR(md->key->slice), consumed_md->key,
+ GPR_SLICE_LENGTH(md->key->slice)) == 0 &&
+ memcmp(GPR_SLICE_START_PTR(md->value->slice), consumed_md->value,
+ GPR_SLICE_LENGTH(md->value->slice)) == 0) {
+ return NULL; /* Delete. */
+ }
+ }
+ return md;
+}
+
+/* called from application code */
+static void on_md_processing_done(
+ void *user_data, const grpc_metadata *consumed_md, size_t num_consumed_md,
+ const grpc_metadata *response_md, size_t num_response_md,
+ grpc_status_code status, const char *error_details) {
+ grpc_call_element *elem = user_data;
+ call_data *calld = elem->call_data;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
+ /* TODO(jboeuf): Implement support for response_md. */
+ if (response_md != NULL && num_response_md > 0) {
+ gpr_log(GPR_INFO,
+ "response_md in auth metadata processing not supported for now. "
+ "Ignoring...");
+ }
+
+ if (status == GRPC_STATUS_OK) {
+ calld->consumed_md = consumed_md;
+ calld->num_consumed_md = num_consumed_md;
+ grpc_metadata_batch_filter(calld->recv_initial_metadata, remove_consumed_md,
+ elem);
+ grpc_metadata_array_destroy(&calld->md);
+ calld->on_done_recv->cb(&exec_ctx, calld->on_done_recv->cb_arg, 1);
+ } else {
+ gpr_slice message;
+ grpc_transport_stream_op close_op;
+ memset(&close_op, 0, sizeof(close_op));
+ grpc_metadata_array_destroy(&calld->md);
+ error_details = error_details != NULL
+ ? error_details
+ : "Authentication metadata processing failed.";
+ message = gpr_slice_from_copied_string(error_details);
+ calld->transport_op.send_initial_metadata = NULL;
+ if (calld->transport_op.send_message != NULL) {
+ grpc_byte_stream_destroy(&exec_ctx, calld->transport_op.send_message);
+ calld->transport_op.send_message = NULL;
+ }
+ calld->transport_op.send_trailing_metadata = NULL;
+ grpc_transport_stream_op_add_close(&close_op, status, &message);
+ grpc_call_next_op(&exec_ctx, elem, &close_op);
+ calld->on_done_recv->cb(&exec_ctx, calld->on_done_recv->cb_arg, 0);
+ }
+
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+static void auth_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
+ bool success) {
+ grpc_call_element *elem = user_data;
+ call_data *calld = elem->call_data;
+ channel_data *chand = elem->channel_data;
+ if (success) {
+ if (chand->creds->processor.process != NULL) {
+ calld->md = metadata_batch_to_md_array(calld->recv_initial_metadata);
+ chand->creds->processor.process(
+ chand->creds->processor.state, calld->auth_context,
+ calld->md.metadata, calld->md.count, on_md_processing_done, elem);
+ return;
+ }
+ }
+ calld->on_done_recv->cb(exec_ctx, calld->on_done_recv->cb_arg, success);
+}
+
+static void set_recv_ops_md_callbacks(grpc_call_element *elem,
+ grpc_transport_stream_op *op) {
+ call_data *calld = elem->call_data;
+
+ if (op->recv_initial_metadata != NULL) {
+ /* substitute our callback for the higher callback */
+ calld->recv_initial_metadata = op->recv_initial_metadata;
+ calld->on_done_recv = op->recv_initial_metadata_ready;
+ op->recv_initial_metadata_ready = &calld->auth_on_recv;
+ calld->transport_op = *op;
+ }
+}
+
/* Called either:
- in response to an API call (or similar) from above, to send something
- a network event (or similar) from below, to receive something
op contains type and call direction information, in addition to the data
that is being sent or received. */
-static void auth_start_transport_op(grpc_call_element *elem,
- grpc_transport_op *op) {
- /* TODO(jboeuf): Get the metadata and get a new context from it. */
-
- /* pass control down the stack */
- grpc_call_next_op(elem, op);
-}
-
-/* Called on special channel events, such as disconnection or new incoming
- calls on the server */
-static void channel_op(grpc_channel_element *elem,
- grpc_channel_element *from_elem, grpc_channel_op *op) {
- grpc_channel_next_op(elem, op);
+static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_transport_stream_op *op) {
+ set_recv_ops_md_callbacks(elem, op);
+ grpc_call_next_op(exec_ctx, elem, op);
}
/* Constructor for call_data */
-static void init_call_elem(grpc_call_element *elem,
- const void *server_transport_data,
- grpc_transport_op *initial_op) {
+static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_call_element_args *args) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
grpc_server_security_context *server_ctx = NULL;
/* initialize members */
- calld->unused = 0;
-
- GPR_ASSERT(initial_op && initial_op->context != NULL &&
- initial_op->context[GRPC_CONTEXT_SECURITY].value == NULL);
+ memset(calld, 0, sizeof(*calld));
+ grpc_closure_init(&calld->auth_on_recv, auth_on_recv, elem);
- /* Create a security context for the call and reference the auth context from
- the channel. */
- if (initial_op->context[GRPC_CONTEXT_SECURITY].value != NULL) {
- initial_op->context[GRPC_CONTEXT_SECURITY].destroy(
- initial_op->context[GRPC_CONTEXT_SECURITY].value);
+ if (args->context[GRPC_CONTEXT_SECURITY].value != NULL) {
+ args->context[GRPC_CONTEXT_SECURITY].destroy(
+ args->context[GRPC_CONTEXT_SECURITY].value);
}
+
server_ctx = grpc_server_security_context_create();
- server_ctx->auth_context = GRPC_AUTH_CONTEXT_REF(
- chand->security_connector->auth_context, "server_security_context");
- initial_op->context[GRPC_CONTEXT_SECURITY].value = server_ctx;
- initial_op->context[GRPC_CONTEXT_SECURITY].destroy =
+ server_ctx->auth_context = grpc_auth_context_create(chand->auth_context);
+ calld->auth_context = server_ctx->auth_context;
+
+ args->context[GRPC_CONTEXT_SECURITY].value = server_ctx;
+ args->context[GRPC_CONTEXT_SECURITY].destroy =
grpc_server_security_context_destroy;
}
+static void set_pollset(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_pollset *pollset) {}
+
/* Destructor for call_data */
-static void destroy_call_elem(grpc_call_element *elem) {
-}
+static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {}
/* Constructor for channel_data */
-static void init_channel_elem(grpc_channel_element *elem,
- const grpc_channel_args *args, grpc_mdctx *mdctx,
- int is_first, int is_last) {
- grpc_security_connector *sc = grpc_find_security_connector_in_args(args);
+static void init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
+ grpc_auth_context *auth_context =
+ grpc_find_auth_context_in_args(args->channel_args);
+ grpc_server_credentials *creds =
+ grpc_find_server_credentials_in_args(args->channel_args);
/* grab pointers to our data from the channel element */
channel_data *chand = elem->channel_data;
- /* The first and the last filters tend to be implemented differently to
- handle the case that there's no 'next' filter to call on the up or down
- path */
- GPR_ASSERT(!is_first);
- GPR_ASSERT(!is_last);
- GPR_ASSERT(sc != NULL);
+ GPR_ASSERT(!args->is_last);
+ GPR_ASSERT(auth_context != NULL);
+ GPR_ASSERT(creds != NULL);
/* initialize members */
- GPR_ASSERT(!sc->is_client_side);
- chand->security_connector = grpc_security_connector_ref(sc);
+ chand->auth_context =
+ GRPC_AUTH_CONTEXT_REF(auth_context, "server_auth_filter");
+ chand->creds = grpc_server_credentials_ref(creds);
}
/* Destructor for channel data */
-static void destroy_channel_elem(grpc_channel_element *elem) {
+static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem) {
/* grab pointers to our data from the channel element */
channel_data *chand = elem->channel_data;
- grpc_security_connector_unref(chand->security_connector);
+ GRPC_AUTH_CONTEXT_UNREF(chand->auth_context, "server_auth_filter");
+ grpc_server_credentials_unref(chand->creds);
}
const grpc_channel_filter grpc_server_auth_filter = {
- auth_start_transport_op, channel_op, sizeof(call_data), init_call_elem,
- destroy_call_elem, sizeof(channel_data), init_channel_elem,
- destroy_channel_elem, "server-auth"};
+ auth_start_transport_op, grpc_channel_next_op, sizeof(call_data),
+ init_call_elem, set_pollset, destroy_call_elem, sizeof(channel_data),
+ init_channel_elem, destroy_channel_elem, grpc_call_next_get_peer,
+ "server-auth"};
diff --git a/src/core/security/server_secure_chttp2.c b/src/core/security/server_secure_chttp2.c
index 53afa1caad..009ec95682 100644
--- a/src/core/security/server_secure_chttp2.c
+++ b/src/core/security/server_secure_chttp2.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -43,7 +43,8 @@
#include "src/core/security/auth_filters.h"
#include "src/core/security/credentials.h"
#include "src/core/security/security_connector.h"
-#include "src/core/security/secure_transport_setup.h"
+#include "src/core/security/security_context.h"
+#include "src/core/surface/api_trace.h"
#include "src/core/surface/server.h"
#include "src/core/transport/chttp2_transport.h"
#include <grpc/support/alloc.h>
@@ -54,10 +55,13 @@
typedef struct grpc_server_secure_state {
grpc_server *server;
grpc_tcp_server *tcp;
- grpc_security_connector *sc;
+ grpc_server_security_connector *sc;
+ grpc_server_credentials *creds;
int is_shutdown;
gpr_mu mu;
gpr_refcount refcount;
+ grpc_closure destroy_closure;
+ grpc_closure *destroy_callback;
} grpc_server_secure_state;
static void state_ref(grpc_server_secure_state *state) {
@@ -70,80 +74,93 @@ static void state_unref(grpc_server_secure_state *state) {
gpr_mu_lock(&state->mu);
gpr_mu_unlock(&state->mu);
/* clean up */
- grpc_security_connector_unref(state->sc);
+ GRPC_SECURITY_CONNECTOR_UNREF(&state->sc->base, "server");
+ grpc_server_credentials_unref(state->creds);
gpr_free(state);
}
}
-static grpc_transport_setup_result setup_transport(void *statep,
- grpc_transport *transport,
- grpc_mdctx *mdctx) {
- static grpc_channel_filter const *extra_filters[] = {
- &grpc_server_auth_filter, &grpc_http_server_filter};
+static void setup_transport(grpc_exec_ctx *exec_ctx, void *statep,
+ grpc_transport *transport,
+ grpc_auth_context *auth_context) {
grpc_server_secure_state *state = statep;
- grpc_transport_setup_result result;
- grpc_arg connector_arg = grpc_security_connector_to_arg(state->sc);
- grpc_channel_args *args_copy = grpc_channel_args_copy_and_add(
- grpc_server_get_channel_args(state->server), &connector_arg);
- result = grpc_server_setup_transport(state->server, transport, extra_filters,
- GPR_ARRAY_SIZE(extra_filters), mdctx,
- args_copy);
+ grpc_channel_args *args_copy;
+ grpc_arg args_to_add[2];
+ args_to_add[0] = grpc_server_credentials_to_arg(state->creds);
+ args_to_add[1] = grpc_auth_context_to_arg(auth_context);
+ args_copy = grpc_channel_args_copy_and_add(
+ grpc_server_get_channel_args(state->server), args_to_add,
+ GPR_ARRAY_SIZE(args_to_add));
+ grpc_server_setup_transport(exec_ctx, state->server, transport, args_copy);
grpc_channel_args_destroy(args_copy);
- return result;
}
-static void on_secure_transport_setup_done(void *statep,
- grpc_security_status status,
- grpc_endpoint *secure_endpoint) {
+static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *statep,
+ grpc_security_status status,
+ grpc_endpoint *secure_endpoint,
+ grpc_auth_context *auth_context) {
grpc_server_secure_state *state = statep;
+ grpc_transport *transport;
if (status == GRPC_SECURITY_OK) {
- gpr_mu_lock(&state->mu);
- if (!state->is_shutdown) {
- grpc_create_chttp2_transport(
- setup_transport, state, grpc_server_get_channel_args(state->server),
- secure_endpoint, NULL, 0, grpc_mdctx_create(), 0);
- } else {
- /* We need to consume this here, because the server may already have gone
- * away. */
- grpc_endpoint_destroy(secure_endpoint);
+ if (secure_endpoint) {
+ gpr_mu_lock(&state->mu);
+ if (!state->is_shutdown) {
+ transport = grpc_create_chttp2_transport(
+ exec_ctx, grpc_server_get_channel_args(state->server),
+ secure_endpoint, 0);
+ setup_transport(exec_ctx, state, transport, auth_context);
+ grpc_chttp2_transport_start_reading(exec_ctx, transport, NULL, 0);
+ } else {
+ /* We need to consume this here, because the server may already have
+ * gone away. */
+ grpc_endpoint_destroy(exec_ctx, secure_endpoint);
+ }
+ gpr_mu_unlock(&state->mu);
}
- gpr_mu_unlock(&state->mu);
} else {
gpr_log(GPR_ERROR, "Secure transport failed with error %d", status);
}
state_unref(state);
}
-static void on_accept(void *statep, grpc_endpoint *tcp) {
+static void on_accept(grpc_exec_ctx *exec_ctx, void *statep, grpc_endpoint *tcp,
+ grpc_tcp_server_acceptor *acceptor) {
grpc_server_secure_state *state = statep;
state_ref(state);
- grpc_setup_secure_transport(state->sc, tcp, on_secure_transport_setup_done,
- state);
+ grpc_server_security_connector_do_handshake(
+ exec_ctx, state->sc, acceptor, tcp, on_secure_handshake_done, state);
}
/* Server callback: start listening on our ports */
-static void start(grpc_server *server, void *statep, grpc_pollset **pollsets,
- size_t pollset_count) {
+static void start(grpc_exec_ctx *exec_ctx, grpc_server *server, void *statep,
+ grpc_pollset **pollsets, size_t pollset_count) {
grpc_server_secure_state *state = statep;
- grpc_tcp_server_start(state->tcp, pollsets, pollset_count, on_accept, state);
+ grpc_tcp_server_start(exec_ctx, state->tcp, pollsets, pollset_count,
+ on_accept, state);
}
-static void destroy_done(void *statep) {
+static void destroy_done(grpc_exec_ctx *exec_ctx, void *statep, bool success) {
grpc_server_secure_state *state = statep;
- grpc_server_listener_destroy_done(state->server);
+ if (state->destroy_callback != NULL) {
+ state->destroy_callback->cb(exec_ctx, state->destroy_callback->cb_arg,
+ success);
+ }
+ grpc_server_security_connector_shutdown(exec_ctx, state->sc);
state_unref(state);
}
/* Server callback: destroy the tcp listener (so we don't generate further
callbacks) */
-static void destroy(grpc_server *server, void *statep) {
+static void destroy(grpc_exec_ctx *exec_ctx, grpc_server *server, void *statep,
+ grpc_closure *callback) {
grpc_server_secure_state *state = statep;
grpc_tcp_server *tcp;
gpr_mu_lock(&state->mu);
state->is_shutdown = 1;
+ state->destroy_callback = callback;
tcp = state->tcp;
gpr_mu_unlock(&state->mu);
- grpc_tcp_server_destroy(tcp, destroy_done, state);
+ grpc_tcp_server_unref(exec_ctx, tcp);
}
int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
@@ -156,7 +173,13 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
int port_num = -1;
int port_temp;
grpc_security_status status = GRPC_SECURITY_ERROR;
- grpc_security_connector *sc = NULL;
+ grpc_server_security_connector *sc = NULL;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
+ GRPC_API_TRACE(
+ "grpc_server_add_secure_http2_port("
+ "server=%p, addr=%s, creds=%p)",
+ 3, (server, addr, creds));
/* create security context */
if (creds == NULL) goto error;
@@ -167,23 +190,34 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
creds->type);
goto error;
}
+ sc->channel_args = grpc_server_get_channel_args(server);
/* resolve address */
resolved = grpc_blocking_resolve_address(addr, "https");
if (!resolved) {
goto error;
}
-
- tcp = grpc_tcp_server_create();
+ state = gpr_malloc(sizeof(*state));
+ memset(state, 0, sizeof(*state));
+ grpc_closure_init(&state->destroy_closure, destroy_done, state);
+ tcp = grpc_tcp_server_create(&state->destroy_closure);
if (!tcp) {
goto error;
}
+ state->server = server;
+ state->tcp = tcp;
+ state->sc = sc;
+ state->creds = grpc_server_credentials_ref(creds);
+ state->is_shutdown = 0;
+ gpr_mu_init(&state->mu);
+ gpr_ref_init(&state->refcount, 1);
+
for (i = 0; i < resolved->naddrs; i++) {
port_temp = grpc_tcp_server_add_port(
tcp, (struct sockaddr *)&resolved->addrs[i].addr,
resolved->addrs[i].len);
- if (port_temp >= 0) {
+ if (port_temp > 0) {
if (port_num == -1) {
port_num = port_temp;
} else {
@@ -204,32 +238,27 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
}
grpc_resolved_addresses_destroy(resolved);
- state = gpr_malloc(sizeof(*state));
- state->server = server;
- state->tcp = tcp;
- state->sc = sc;
- state->is_shutdown = 0;
- gpr_mu_init(&state->mu);
- gpr_ref_init(&state->refcount, 1);
-
/* Register with the server only upon success */
- grpc_server_add_listener(server, state, start, destroy);
+ grpc_server_add_listener(&exec_ctx, server, state, start, destroy);
+ grpc_exec_ctx_finish(&exec_ctx);
return port_num;
/* Error path: cleanup and return */
error:
- if (sc) {
- grpc_security_connector_unref(sc);
- }
if (resolved) {
grpc_resolved_addresses_destroy(resolved);
}
if (tcp) {
- grpc_tcp_server_destroy(tcp, NULL, NULL);
- }
- if (state) {
- gpr_free(state);
+ grpc_tcp_server_unref(&exec_ctx, tcp);
+ } else {
+ if (sc) {
+ GRPC_SECURITY_CONNECTOR_UNREF(&sc->base, "server");
+ }
+ if (state) {
+ gpr_free(state);
+ }
}
+ grpc_exec_ctx_finish(&exec_ctx);
return 0;
}
diff --git a/src/core/statistics/census_init.c b/src/core/statistics/census_init.c
index e6306f5e6f..b6a962f228 100644
--- a/src/core/statistics/census_init.c
+++ b/src/core/statistics/census_init.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/src/core/statistics/census_interface.h b/src/core/statistics/census_interface.h
index eb4349c311..ce8ff92cd4 100644
--- a/src/core/statistics/census_interface.h
+++ b/src/core/statistics/census_interface.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_STATISTICS_CENSUS_INTERFACE_H
-#define GRPC_INTERNAL_CORE_STATISTICS_CENSUS_INTERFACE_H
+#ifndef GRPC_CORE_STATISTICS_CENSUS_INTERFACE_H
+#define GRPC_CORE_STATISTICS_CENSUS_INTERFACE_H
#include <grpc/support/port_platform.h>
@@ -42,8 +42,8 @@
/* Structure of a census op id. Define as structure because 64bit integer is not
available on every platform for C89. */
typedef struct census_op_id {
- gpr_uint32 upper;
- gpr_uint32 lower;
+ uint32_t upper;
+ uint32_t lower;
} census_op_id;
typedef struct census_rpc_stats census_rpc_stats;
@@ -61,11 +61,11 @@ void census_shutdown(void);
TODO(hongyu): Figure out valid characters set for service name and command
name and document requirements here.*/
-int census_add_method_tag(census_op_id op_id, const char* method_name);
+int census_add_method_tag(census_op_id op_id, const char *method_name);
/* Annotates tracing information to a specific op_id.
Up to CENSUS_MAX_ANNOTATION_LENGTH bytes are recorded. */
-void census_tracing_print(census_op_id op_id, const char* annotation);
+void census_tracing_print(census_op_id op_id, const char *annotation);
/* Starts tracing for an RPC. Returns a locally unique census_op_id */
census_op_id census_tracing_start_op(void);
@@ -73,4 +73,4 @@ census_op_id census_tracing_start_op(void);
/* Ends tracing. Calling this function will invalidate the input op_id. */
void census_tracing_end_op(census_op_id op_id);
-#endif /* GRPC_INTERNAL_CORE_STATISTICS_CENSUS_INTERFACE_H */
+#endif /* GRPC_CORE_STATISTICS_CENSUS_INTERFACE_H */
diff --git a/src/core/statistics/census_log.c b/src/core/statistics/census_log.c
index ec56ce38df..257ba586a3 100644
--- a/src/core/statistics/census_log.c
+++ b/src/core/statistics/census_log.c
@@ -102,21 +102,21 @@
/* End of platform specific code */
typedef struct census_log_block_list_struct {
- struct census_log_block_list_struct* next;
- struct census_log_block_list_struct* prev;
- struct census_log_block* block;
+ struct census_log_block_list_struct *next;
+ struct census_log_block_list_struct *prev;
+ struct census_log_block *block;
} cl_block_list_struct;
typedef struct census_log_block {
/* Pointer to underlying buffer */
- char* buffer;
+ char *buffer;
gpr_atm writer_lock;
gpr_atm reader_lock;
/* Keeps completely written bytes. Declared atomic because accessed
simultaneously by reader and writer. */
gpr_atm bytes_committed;
/* Bytes already read */
- gpr_int32 bytes_read;
+ int32_t bytes_read;
/* Links for list */
cl_block_list_struct link;
/* We want this structure to be cacheline aligned. We assume the following
@@ -124,7 +124,7 @@ typedef struct census_log_block {
type 32b size 64b size
char* 4 8
3x gpr_atm 12 24
- gpr_int32 4 8 (assumes padding)
+ int32_t 4 8 (assumes padding)
cl_block_list_struct 12 24
TOTAL 32 64
@@ -147,7 +147,7 @@ typedef struct census_log_block {
/* A list of cl_blocks, doubly-linked through cl_block::link. */
typedef struct census_log_block_list {
- gpr_int32 count; /* Number of items in list. */
+ int32_t count; /* Number of items in list. */
cl_block_list_struct ht; /* head/tail of linked list. */
} cl_block_list;
@@ -175,21 +175,21 @@ struct census_log {
/* Number of cores (aka hardware-contexts) */
unsigned num_cores;
/* number of CENSUS_LOG_2_MAX_RECORD_SIZE blocks in log */
- gpr_int32 num_blocks;
- cl_block* blocks; /* Block metadata. */
- cl_core_local_block* core_local_blocks; /* Keeps core to block mappings. */
+ int32_t num_blocks;
+ cl_block *blocks; /* Block metadata. */
+ cl_core_local_block *core_local_blocks; /* Keeps core to block mappings. */
gpr_mu lock;
int initialized; /* has log been initialized? */
/* Keeps the state of the reader iterator. A value of 0 indicates that
iterator has reached the end. census_log_init_reader() resets the
value to num_core to restart iteration. */
- gpr_uint32 read_iterator_state;
+ uint32_t read_iterator_state;
/* Points to the block being read. If non-NULL, the block is locked for
reading (block_being_read_->reader_lock is held). */
- cl_block* block_being_read;
+ cl_block *block_being_read;
/* A non-zero value indicates that log is full. */
gpr_atm is_full;
- char* buffer;
+ char *buffer;
cl_block_list free_block_list;
cl_block_list dirty_block_list;
gpr_atm out_of_space_count;
@@ -201,44 +201,44 @@ static struct census_log g_log;
/* Functions that operate on an atomic memory location used as a lock */
/* Returns non-zero if lock is acquired */
-static int cl_try_lock(gpr_atm* lock) { return gpr_atm_acq_cas(lock, 0, 1); }
+static int cl_try_lock(gpr_atm *lock) { return gpr_atm_acq_cas(lock, 0, 1); }
-static void cl_unlock(gpr_atm* lock) { gpr_atm_rel_store(lock, 0); }
+static void cl_unlock(gpr_atm *lock) { gpr_atm_rel_store(lock, 0); }
/* Functions that operate on cl_core_local_block's */
-static void cl_core_local_block_set_block(cl_core_local_block* clb,
- cl_block* block) {
+static void cl_core_local_block_set_block(cl_core_local_block *clb,
+ cl_block *block) {
gpr_atm_rel_store(&clb->block, (gpr_atm)block);
}
-static cl_block* cl_core_local_block_get_block(cl_core_local_block* clb) {
- return (cl_block*)gpr_atm_acq_load(&clb->block);
+static cl_block *cl_core_local_block_get_block(cl_core_local_block *clb) {
+ return (cl_block *)gpr_atm_acq_load(&clb->block);
}
/* Functions that operate on cl_block_list_struct's */
-static void cl_block_list_struct_initialize(cl_block_list_struct* bls,
- cl_block* block) {
+static void cl_block_list_struct_initialize(cl_block_list_struct *bls,
+ cl_block *block) {
bls->next = bls->prev = bls;
bls->block = block;
}
/* Functions that operate on cl_block_list's */
-static void cl_block_list_initialize(cl_block_list* list) {
+static void cl_block_list_initialize(cl_block_list *list) {
list->count = 0;
cl_block_list_struct_initialize(&list->ht, NULL);
}
/* Returns head of *this, or NULL if empty. */
-static cl_block* cl_block_list_head(cl_block_list* list) {
+static cl_block *cl_block_list_head(cl_block_list *list) {
return list->ht.next->block;
}
/* Insert element *e after *pos. */
-static void cl_block_list_insert(cl_block_list* list, cl_block_list_struct* pos,
- cl_block_list_struct* e) {
+static void cl_block_list_insert(cl_block_list *list, cl_block_list_struct *pos,
+ cl_block_list_struct *e) {
list->count++;
e->next = pos->next;
e->prev = pos;
@@ -247,17 +247,17 @@ static void cl_block_list_insert(cl_block_list* list, cl_block_list_struct* pos,
}
/* Insert block at the head of the list */
-static void cl_block_list_insert_at_head(cl_block_list* list, cl_block* block) {
+static void cl_block_list_insert_at_head(cl_block_list *list, cl_block *block) {
cl_block_list_insert(list, &list->ht, &block->link);
}
/* Insert block at the tail of the list */
-static void cl_block_list_insert_at_tail(cl_block_list* list, cl_block* block) {
+static void cl_block_list_insert_at_tail(cl_block_list *list, cl_block *block) {
cl_block_list_insert(list, list->ht.prev, &block->link);
}
/* Removes block *b. Requires *b be in the list. */
-static void cl_block_list_remove(cl_block_list* list, cl_block* b) {
+static void cl_block_list_remove(cl_block_list *list, cl_block *b) {
list->count--;
b->link.next->prev = b->link.prev;
b->link.prev->next = b->link.next;
@@ -265,7 +265,7 @@ static void cl_block_list_remove(cl_block_list* list, cl_block* b) {
/* Functions that operate on cl_block's */
-static void cl_block_initialize(cl_block* block, char* buffer) {
+static void cl_block_initialize(cl_block *block, char *buffer) {
block->buffer = buffer;
gpr_atm_rel_store(&block->writer_lock, 0);
gpr_atm_rel_store(&block->reader_lock, 0);
@@ -275,12 +275,12 @@ static void cl_block_initialize(cl_block* block, char* buffer) {
}
/* Guards against exposing partially written buffer to the reader. */
-static void cl_block_set_bytes_committed(cl_block* block,
- gpr_int32 bytes_committed) {
+static void cl_block_set_bytes_committed(cl_block *block,
+ int32_t bytes_committed) {
gpr_atm_rel_store(&block->bytes_committed, bytes_committed);
}
-static gpr_int32 cl_block_get_bytes_committed(cl_block* block) {
+static int32_t cl_block_get_bytes_committed(cl_block *block) {
return gpr_atm_acq_load(&block->bytes_committed);
}
@@ -291,7 +291,7 @@ static gpr_int32 cl_block_get_bytes_committed(cl_block* block) {
On success, clears the block state and returns with writer_lock_ and
reader_lock_ held. These locks are released by a subsequent
cl_block_access_enable() call. */
-static int cl_block_try_disable_access(cl_block* block, int discard_data) {
+static int cl_block_try_disable_access(cl_block *block, int discard_data) {
if (!cl_try_lock(&block->writer_lock)) {
return 0;
}
@@ -310,14 +310,14 @@ static int cl_block_try_disable_access(cl_block* block, int discard_data) {
return 1;
}
-static void cl_block_enable_access(cl_block* block) {
+static void cl_block_enable_access(cl_block *block) {
cl_unlock(&block->reader_lock);
cl_unlock(&block->writer_lock);
}
/* Returns with writer_lock held. */
-static void* cl_block_start_write(cl_block* block, size_t size) {
- gpr_int32 bytes_committed;
+static void *cl_block_start_write(cl_block *block, size_t size) {
+ int32_t bytes_committed;
if (!cl_try_lock(&block->writer_lock)) {
return NULL;
}
@@ -332,7 +332,7 @@ static void* cl_block_start_write(cl_block* block, size_t size) {
/* Releases writer_lock and increments committed bytes by 'bytes_written'.
'bytes_written' must be <= 'size' specified in the corresponding
StartWrite() call. This function is thread-safe. */
-static void cl_block_end_write(cl_block* block, size_t bytes_written) {
+static void cl_block_end_write(cl_block *block, size_t bytes_written) {
cl_block_set_bytes_committed(
block, cl_block_get_bytes_committed(block) + bytes_written);
cl_unlock(&block->writer_lock);
@@ -343,8 +343,8 @@ static void cl_block_end_write(cl_block* block, size_t bytes_written) {
released by a subsequent cl_block_end_read() call. Returns NULL if:
- read in progress
- no data available */
-static void* cl_block_start_read(cl_block* block, size_t* bytes_available) {
- void* record;
+static void *cl_block_start_read(cl_block *block, size_t *bytes_available) {
+ void *record;
if (!cl_try_lock(&block->reader_lock)) {
return NULL;
}
@@ -360,7 +360,7 @@ static void* cl_block_start_read(cl_block* block, size_t* bytes_available) {
return record;
}
-static void cl_block_end_read(cl_block* block) {
+static void cl_block_end_read(cl_block *block) {
cl_unlock(&block->reader_lock);
}
@@ -368,8 +368,8 @@ static void cl_block_end_read(cl_block* block) {
/* Allocates a new free block (or recycles an available dirty block if log is
configured to discard old records). Returns NULL if out-of-space. */
-static cl_block* cl_allocate_block(void) {
- cl_block* block = cl_block_list_head(&g_log.free_block_list);
+static cl_block *cl_allocate_block(void) {
+ cl_block *block = cl_block_list_head(&g_log.free_block_list);
if (block != NULL) {
cl_block_list_remove(&g_log.free_block_list, block);
return block;
@@ -395,11 +395,10 @@ static cl_block* cl_allocate_block(void) {
- allocated a new block OR
- 'core_id' => 'old_block' mapping changed (another thread allocated a
block before lock was acquired). */
-static int cl_allocate_core_local_block(gpr_int32 core_id,
- cl_block* old_block) {
+static int cl_allocate_core_local_block(int32_t core_id, cl_block *old_block) {
/* Now that we have the lock, check if core-local mapping has changed. */
- cl_core_local_block* core_local_block = &g_log.core_local_blocks[core_id];
- cl_block* block = cl_core_local_block_get_block(core_local_block);
+ cl_core_local_block *core_local_block = &g_log.core_local_blocks[core_id];
+ cl_block *block = cl_core_local_block_get_block(core_local_block);
if ((block != NULL) && (block != old_block)) {
return 1;
}
@@ -417,16 +416,16 @@ static int cl_allocate_core_local_block(gpr_int32 core_id,
return 1;
}
-static cl_block* cl_get_block(void* record) {
- gpr_uintptr p = (gpr_uintptr)((char*)record - g_log.buffer);
- gpr_uintptr index = p >> CENSUS_LOG_2_MAX_RECORD_SIZE;
+static cl_block *cl_get_block(void *record) {
+ uintptr_t p = (uintptr_t)((char *)record - g_log.buffer);
+ uintptr_t index = p >> CENSUS_LOG_2_MAX_RECORD_SIZE;
return &g_log.blocks[index];
}
/* Gets the next block to read and tries to free 'prev' block (if not NULL).
Returns NULL if reached the end. */
-static cl_block* cl_next_block_to_read(cl_block* prev) {
- cl_block* block = NULL;
+static cl_block *cl_next_block_to_read(cl_block *prev) {
+ cl_block *block = NULL;
if (g_log.read_iterator_state == g_log.num_cores) {
/* We are traversing dirty list; find the next dirty block. */
if (prev != NULL) {
@@ -460,7 +459,7 @@ static cl_block* cl_next_block_to_read(cl_block* prev) {
/* External functions: primary stats_log interface */
void census_log_initialize(size_t size_in_mb, int discard_old_records) {
- gpr_int32 ix;
+ int32_t ix;
/* Check cacheline alignment. */
GPR_ASSERT(sizeof(cl_block) % GPR_CACHELINE_SIZE == 0);
GPR_ASSERT(sizeof(cl_core_local_block) % GPR_CACHELINE_SIZE == 0);
@@ -474,11 +473,11 @@ void census_log_initialize(size_t size_in_mb, int discard_old_records) {
g_log.read_iterator_state = 0;
g_log.block_being_read = NULL;
gpr_atm_rel_store(&g_log.is_full, 0);
- g_log.core_local_blocks = (cl_core_local_block*)gpr_malloc_aligned(
+ g_log.core_local_blocks = (cl_core_local_block *)gpr_malloc_aligned(
g_log.num_cores * sizeof(cl_core_local_block), GPR_CACHELINE_SIZE_LOG);
memset(g_log.core_local_blocks, 0,
g_log.num_cores * sizeof(cl_core_local_block));
- g_log.blocks = (cl_block*)gpr_malloc_aligned(
+ g_log.blocks = (cl_block *)gpr_malloc_aligned(
g_log.num_blocks * sizeof(cl_block), GPR_CACHELINE_SIZE_LOG);
memset(g_log.blocks, 0, g_log.num_blocks * sizeof(cl_block));
g_log.buffer = gpr_malloc(g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
@@ -486,7 +485,7 @@ void census_log_initialize(size_t size_in_mb, int discard_old_records) {
cl_block_list_initialize(&g_log.free_block_list);
cl_block_list_initialize(&g_log.dirty_block_list);
for (ix = 0; ix < g_log.num_blocks; ++ix) {
- cl_block* block = g_log.blocks + ix;
+ cl_block *block = g_log.blocks + ix;
cl_block_initialize(block,
g_log.buffer + (CENSUS_LOG_MAX_RECORD_SIZE * ix));
cl_block_try_disable_access(block, 1 /* discard data */);
@@ -508,19 +507,19 @@ void census_log_shutdown(void) {
g_log.initialized = 0;
}
-void* census_log_start_write(size_t size) {
+void *census_log_start_write(size_t size) {
/* Used to bound number of times block allocation is attempted. */
- gpr_int32 attempts_remaining = g_log.num_blocks;
+ int32_t attempts_remaining = g_log.num_blocks;
/* TODO(aveitch): move this inside the do loop when current_cpu is fixed */
- gpr_int32 core_id = gpr_cpu_current_cpu();
+ int32_t core_id = gpr_cpu_current_cpu();
GPR_ASSERT(g_log.initialized);
if (size > CENSUS_LOG_MAX_RECORD_SIZE) {
return NULL;
}
do {
int allocated;
- void* record = NULL;
- cl_block* block =
+ void *record = NULL;
+ cl_block *block =
cl_core_local_block_get_block(&g_log.core_local_blocks[core_id]);
if (block && (record = cl_block_start_write(block, size))) {
return record;
@@ -546,7 +545,7 @@ void* census_log_start_write(size_t size) {
return NULL;
}
-void census_log_end_write(void* record, size_t bytes_written) {
+void census_log_end_write(void *record, size_t bytes_written) {
GPR_ASSERT(g_log.initialized);
cl_block_end_write(cl_get_block(record), bytes_written);
}
@@ -563,7 +562,7 @@ void census_log_init_reader(void) {
gpr_mu_unlock(&g_log.lock);
}
-const void* census_log_read_next(size_t* bytes_available) {
+const void *census_log_read_next(size_t *bytes_available) {
GPR_ASSERT(g_log.initialized);
gpr_mu_lock(&g_log.lock);
if (g_log.block_being_read != NULL) {
@@ -572,7 +571,7 @@ const void* census_log_read_next(size_t* bytes_available) {
do {
g_log.block_being_read = cl_next_block_to_read(g_log.block_being_read);
if (g_log.block_being_read != NULL) {
- void* record =
+ void *record =
cl_block_start_read(g_log.block_being_read, bytes_available);
if (record != NULL) {
gpr_mu_unlock(&g_log.lock);
diff --git a/src/core/statistics/census_log.h b/src/core/statistics/census_log.h
index 06869b7a33..e7ce0d4433 100644
--- a/src/core/statistics/census_log.h
+++ b/src/core/statistics/census_log.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_STATISTICS_CENSUS_LOG_H
-#define GRPC_INTERNAL_CORE_STATISTICS_CENSUS_LOG_H
+#ifndef GRPC_CORE_STATISTICS_CENSUS_LOG_H
+#define GRPC_CORE_STATISTICS_CENSUS_LOG_H
#include <stddef.h>
@@ -62,9 +62,9 @@ void census_log_shutdown(void);
- log is configured to keep old records OR
- all blocks are pinned by incomplete records.
*/
-void* census_log_start_write(size_t size);
+void *census_log_start_write(size_t size);
-void census_log_end_write(void* record, size_t bytes_written);
+void census_log_end_write(void *record, size_t bytes_written);
/* census_log_read_next() iterates over blocks with data and for each block
returns a pointer to the first unread byte. The number of bytes that can be
@@ -75,7 +75,7 @@ void census_log_end_write(void* record, size_t bytes_written);
current iteration.
*/
void census_log_init_reader(void);
-const void* census_log_read_next(size_t* bytes_available);
+const void *census_log_read_next(size_t *bytes_available);
/* Returns estimated remaining space across all blocks, in bytes. If log is
configured to discard old records, returns total log space. Otherwise,
@@ -88,4 +88,4 @@ size_t census_log_remaining_space(void);
out-of-space. */
int census_log_out_of_space_count(void);
-#endif /* GRPC_INTERNAL_CORE_STATISTICS_CENSUS_LOG_H */
+#endif /* GRPC_CORE_STATISTICS_CENSUS_LOG_H */
diff --git a/src/core/statistics/census_rpc_stats.c b/src/core/statistics/census_rpc_stats.c
index 0491c91947..524a60793a 100644
--- a/src/core/statistics/census_rpc_stats.c
+++ b/src/core/statistics/census_rpc_stats.c
@@ -56,8 +56,8 @@ typedef census_per_method_rpc_stats per_method_stats;
static gpr_once g_stats_store_mu_init = GPR_ONCE_INIT;
/* Guards two stats stores. */
static gpr_mu g_mu;
-static census_ht* g_client_stats_store = NULL;
-static census_ht* g_server_stats_store = NULL;
+static census_ht *g_client_stats_store = NULL;
+static census_ht *g_server_stats_store = NULL;
static void init_mutex(void) { gpr_mu_init(&g_mu); }
@@ -65,23 +65,23 @@ static void init_mutex_once(void) {
gpr_once_init(&g_stats_store_mu_init, init_mutex);
}
-static int cmp_str_keys(const void* k1, const void* k2) {
- return strcmp((const char*)k1, (const char*)k2);
+static int cmp_str_keys(const void *k1, const void *k2) {
+ return strcmp((const char *)k1, (const char *)k2);
}
/* TODO(hongyu): replace it with cityhash64 */
-static gpr_uint64 simple_hash(const void* k) {
+static uint64_t simple_hash(const void *k) {
size_t len = strlen(k);
- gpr_uint64 higher = gpr_murmur_hash3((const char*)k, len / 2, 0);
+ uint64_t higher = gpr_murmur_hash3((const char *)k, len / 2, 0);
return higher << 32 |
- gpr_murmur_hash3((const char*)k + len / 2, len - len / 2, 0);
+ gpr_murmur_hash3((const char *)k + len / 2, len - len / 2, 0);
}
-static void delete_stats(void* stats) {
- census_window_stats_destroy((struct census_window_stats*)stats);
+static void delete_stats(void *stats) {
+ census_window_stats_destroy((struct census_window_stats *)stats);
}
-static void delete_key(void* key) { gpr_free(key); }
+static void delete_key(void *key) { gpr_free(key); }
static const census_ht_option ht_opt = {
CENSUS_HT_POINTER /* key type */, 1999 /* n_of_buckets */,
@@ -89,13 +89,13 @@ static const census_ht_option ht_opt = {
delete_stats /* data deleter */, delete_key /* key deleter */
};
-static void init_rpc_stats(void* stats) {
+static void init_rpc_stats(void *stats) {
memset(stats, 0, sizeof(census_rpc_stats));
}
-static void stat_add_proportion(double p, void* base, const void* addme) {
- census_rpc_stats* b = (census_rpc_stats*)base;
- census_rpc_stats* a = (census_rpc_stats*)addme;
+static void stat_add_proportion(double p, void *base, const void *addme) {
+ census_rpc_stats *b = (census_rpc_stats *)base;
+ census_rpc_stats *a = (census_rpc_stats *)addme;
b->cnt += p * a->cnt;
b->rpc_error_cnt += p * a->rpc_error_cnt;
b->app_error_cnt += p * a->app_error_cnt;
@@ -106,7 +106,7 @@ static void stat_add_proportion(double p, void* base, const void* addme) {
b->wire_response_bytes += p * a->wire_response_bytes;
}
-static void stat_add(void* base, const void* addme) {
+static void stat_add(void *base, const void *addme) {
stat_add_proportion(1.0, base, addme);
}
@@ -116,18 +116,18 @@ static gpr_timespec min_hour_total_intervals[3] = {
static const census_window_stats_stat_info window_stats_settings = {
sizeof(census_rpc_stats), init_rpc_stats, stat_add, stat_add_proportion};
-census_rpc_stats* census_rpc_stats_create_empty(void) {
- census_rpc_stats* ret =
- (census_rpc_stats*)gpr_malloc(sizeof(census_rpc_stats));
+census_rpc_stats *census_rpc_stats_create_empty(void) {
+ census_rpc_stats *ret =
+ (census_rpc_stats *)gpr_malloc(sizeof(census_rpc_stats));
memset(ret, 0, sizeof(census_rpc_stats));
return ret;
}
-void census_aggregated_rpc_stats_set_empty(census_aggregated_rpc_stats* data) {
+void census_aggregated_rpc_stats_set_empty(census_aggregated_rpc_stats *data) {
int i = 0;
for (i = 0; i < data->num_entries; i++) {
if (data->stats[i].method != NULL) {
- gpr_free((void*)data->stats[i].method);
+ gpr_free((void *)data->stats[i].method);
}
}
if (data->stats != NULL) {
@@ -137,27 +137,27 @@ void census_aggregated_rpc_stats_set_empty(census_aggregated_rpc_stats* data) {
data->stats = NULL;
}
-static void record_stats(census_ht* store, census_op_id op_id,
- const census_rpc_stats* stats) {
+static void record_stats(census_ht *store, census_op_id op_id,
+ const census_rpc_stats *stats) {
gpr_mu_lock(&g_mu);
if (store != NULL) {
- census_trace_obj* trace = NULL;
+ census_trace_obj *trace = NULL;
census_internal_lock_trace_store();
trace = census_get_trace_obj_locked(op_id);
if (trace != NULL) {
- const char* method_name = census_get_trace_method_name(trace);
- struct census_window_stats* window_stats = NULL;
+ const char *method_name = census_get_trace_method_name(trace);
+ struct census_window_stats *window_stats = NULL;
census_ht_key key;
- key.ptr = (void*)method_name;
+ key.ptr = (void *)method_name;
window_stats = census_ht_find(store, key);
census_internal_unlock_trace_store();
if (window_stats == NULL) {
window_stats = census_window_stats_create(3, min_hour_total_intervals,
30, &window_stats_settings);
key.ptr = gpr_strdup(key.ptr);
- census_ht_insert(store, key, (void*)window_stats);
+ census_ht_insert(store, key, (void *)window_stats);
}
- census_window_stats_add(window_stats, gpr_now(), stats);
+ census_window_stats_add(window_stats, gpr_now(GPR_CLOCK_REALTIME), stats);
} else {
census_internal_unlock_trace_store();
}
@@ -166,17 +166,17 @@ static void record_stats(census_ht* store, census_op_id op_id,
}
void census_record_rpc_client_stats(census_op_id op_id,
- const census_rpc_stats* stats) {
+ const census_rpc_stats *stats) {
record_stats(g_client_stats_store, op_id, stats);
}
void census_record_rpc_server_stats(census_op_id op_id,
- const census_rpc_stats* stats) {
+ const census_rpc_stats *stats) {
record_stats(g_server_stats_store, op_id, stats);
}
/* Get stats from input stats store */
-static void get_stats(census_ht* store, census_aggregated_rpc_stats* data) {
+static void get_stats(census_ht *store, census_aggregated_rpc_stats *data) {
GPR_ASSERT(data != NULL);
if (data->num_entries != 0) {
census_aggregated_rpc_stats_set_empty(data);
@@ -185,24 +185,25 @@ static void get_stats(census_ht* store, census_aggregated_rpc_stats* data) {
if (store != NULL) {
size_t n;
unsigned i, j;
- gpr_timespec now = gpr_now();
- census_ht_kv* kv = census_ht_get_all_elements(store, &n);
+ gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME);
+ census_ht_kv *kv = census_ht_get_all_elements(store, &n);
if (kv != NULL) {
data->num_entries = n;
- data->stats = (per_method_stats*)gpr_malloc(sizeof(per_method_stats) * n);
+ data->stats =
+ (per_method_stats *)gpr_malloc(sizeof(per_method_stats) * n);
for (i = 0; i < n; i++) {
census_window_stats_sums sums[NUM_INTERVALS];
for (j = 0; j < NUM_INTERVALS; j++) {
- sums[j].statistic = (void*)census_rpc_stats_create_empty();
+ sums[j].statistic = (void *)census_rpc_stats_create_empty();
}
data->stats[i].method = gpr_strdup(kv[i].k.ptr);
census_window_stats_get_sums(kv[i].v, now, sums);
data->stats[i].minute_stats =
- *(census_rpc_stats*)sums[MINUTE_INTERVAL].statistic;
+ *(census_rpc_stats *)sums[MINUTE_INTERVAL].statistic;
data->stats[i].hour_stats =
- *(census_rpc_stats*)sums[HOUR_INTERVAL].statistic;
+ *(census_rpc_stats *)sums[HOUR_INTERVAL].statistic;
data->stats[i].total_stats =
- *(census_rpc_stats*)sums[TOTAL_INTERVAL].statistic;
+ *(census_rpc_stats *)sums[TOTAL_INTERVAL].statistic;
for (j = 0; j < NUM_INTERVALS; j++) {
gpr_free(sums[j].statistic);
}
@@ -213,11 +214,11 @@ static void get_stats(census_ht* store, census_aggregated_rpc_stats* data) {
gpr_mu_unlock(&g_mu);
}
-void census_get_client_stats(census_aggregated_rpc_stats* data) {
+void census_get_client_stats(census_aggregated_rpc_stats *data) {
get_stats(g_client_stats_store, data);
}
-void census_get_server_stats(census_aggregated_rpc_stats* data) {
+void census_get_server_stats(census_aggregated_rpc_stats *data) {
get_stats(g_server_stats_store, data);
}
diff --git a/src/core/statistics/census_rpc_stats.h b/src/core/statistics/census_rpc_stats.h
index 9336dce1f8..4cf17d2e52 100644
--- a/src/core/statistics/census_rpc_stats.h
+++ b/src/core/statistics/census_rpc_stats.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_STATISTICS_CENSUS_RPC_STATS_H
-#define GRPC_INTERNAL_CORE_STATISTICS_CENSUS_RPC_STATS_H
+#ifndef GRPC_CORE_STATISTICS_CENSUS_RPC_STATS_H
+#define GRPC_CORE_STATISTICS_CENSUS_RPC_STATS_H
#include "src/core/statistics/census_interface.h"
#include <grpc/support/port_platform.h>
@@ -42,9 +42,9 @@ extern "C" {
#endif
struct census_rpc_stats {
- gpr_uint64 cnt;
- gpr_uint64 rpc_error_cnt;
- gpr_uint64 app_error_cnt;
+ uint64_t cnt;
+ uint64_t rpc_error_cnt;
+ uint64_t app_error_cnt;
double elapsed_time_ms;
double api_request_bytes;
double wire_request_bytes;
@@ -53,10 +53,10 @@ struct census_rpc_stats {
};
/* Creates an empty rpc stats object on heap. */
-census_rpc_stats* census_rpc_stats_create_empty(void);
+census_rpc_stats *census_rpc_stats_create_empty(void);
typedef struct census_per_method_rpc_stats {
- const char* method;
+ const char *method;
census_rpc_stats minute_stats; /* cumulative stats in the past minute */
census_rpc_stats hour_stats; /* cumulative stats in the past hour */
census_rpc_stats total_stats; /* cumulative stats from last gc */
@@ -64,19 +64,19 @@ typedef struct census_per_method_rpc_stats {
typedef struct census_aggregated_rpc_stats {
int num_entries;
- census_per_method_rpc_stats* stats;
+ census_per_method_rpc_stats *stats;
} census_aggregated_rpc_stats;
/* Initializes an aggregated rpc stats object to an empty state. */
-void census_aggregated_rpc_stats_set_empty(census_aggregated_rpc_stats* data);
+void census_aggregated_rpc_stats_set_empty(census_aggregated_rpc_stats *data);
/* Records client side stats of a rpc. */
void census_record_rpc_client_stats(census_op_id op_id,
- const census_rpc_stats* stats);
+ const census_rpc_stats *stats);
/* Records server side stats of a rpc. */
void census_record_rpc_server_stats(census_op_id op_id,
- const census_rpc_stats* stats);
+ const census_rpc_stats *stats);
/* The following two functions are intended for inprocess query of
per-service per-method stats from grpc implementations. */
@@ -84,12 +84,12 @@ void census_record_rpc_server_stats(census_op_id op_id,
/* Populates *data_map with server side aggregated per-service per-method
stats.
DO NOT CALL from outside of grpc code. */
-void census_get_server_stats(census_aggregated_rpc_stats* data_map);
+void census_get_server_stats(census_aggregated_rpc_stats *data_map);
/* Populates *data_map with client side aggregated per-service per-method
stats.
DO NOT CALL from outside of grpc code. */
-void census_get_client_stats(census_aggregated_rpc_stats* data_map);
+void census_get_client_stats(census_aggregated_rpc_stats *data_map);
void census_stats_store_init(void);
void census_stats_store_shutdown(void);
@@ -98,4 +98,4 @@ void census_stats_store_shutdown(void);
}
#endif
-#endif /* GRPC_INTERNAL_CORE_STATISTICS_CENSUS_RPC_STATS_H */
+#endif /* GRPC_CORE_STATISTICS_CENSUS_RPC_STATS_H */
diff --git a/src/core/statistics/census_tracing.c b/src/core/statistics/census_tracing.c
index 05e72b99c0..dc0f8a26f5 100644
--- a/src/core/statistics/census_tracing.c
+++ b/src/core/statistics/census_tracing.c
@@ -44,10 +44,10 @@
#include <grpc/support/port_platform.h>
#include <grpc/support/sync.h>
-void census_trace_obj_destroy(census_trace_obj* obj) {
- census_trace_annotation* p = obj->annotations;
+void census_trace_obj_destroy(census_trace_obj *obj) {
+ census_trace_annotation *p = obj->annotations;
while (p != NULL) {
- census_trace_annotation* next = p->next;
+ census_trace_annotation *next = p->next;
gpr_free(p);
p = next;
}
@@ -55,27 +55,27 @@ void census_trace_obj_destroy(census_trace_obj* obj) {
gpr_free(obj);
}
-static void delete_trace_obj(void* obj) {
- census_trace_obj_destroy((census_trace_obj*)obj);
+static void delete_trace_obj(void *obj) {
+ census_trace_obj_destroy((census_trace_obj *)obj);
}
static const census_ht_option ht_opt = {
- CENSUS_HT_UINT64 /* key type*/, 571 /* n_of_buckets */, NULL /* hash */,
+ CENSUS_HT_UINT64 /* key type */, 571 /* n_of_buckets */, NULL /* hash */,
NULL /* compare_keys */, delete_trace_obj /* delete data */,
NULL /* delete key */
};
static gpr_once g_init_mutex_once = GPR_ONCE_INIT;
static gpr_mu g_mu; /* Guards following two static variables. */
-static census_ht* g_trace_store = NULL;
-static gpr_uint64 g_id = 0;
+static census_ht *g_trace_store = NULL;
+static uint64_t g_id = 0;
-static census_ht_key op_id_as_key(census_op_id* id) {
- return *(census_ht_key*)id;
+static census_ht_key op_id_as_key(census_op_id *id) {
+ return *(census_ht_key *)id;
}
-static gpr_uint64 op_id_2_uint64(census_op_id* id) {
- gpr_uint64 ret;
+static uint64_t op_id_2_uint64(census_op_id *id) {
+ uint64_t ret;
memcpy(&ret, id, sizeof(census_op_id));
return ret;
}
@@ -89,22 +89,22 @@ static void init_mutex_once(void) {
census_op_id census_tracing_start_op(void) {
gpr_mu_lock(&g_mu);
{
- census_trace_obj* ret = gpr_malloc(sizeof(census_trace_obj));
+ census_trace_obj *ret = gpr_malloc(sizeof(census_trace_obj));
memset(ret, 0, sizeof(census_trace_obj));
g_id++;
memcpy(&ret->id, &g_id, sizeof(census_op_id));
ret->rpc_stats.cnt = 1;
- ret->ts = gpr_now();
- census_ht_insert(g_trace_store, op_id_as_key(&ret->id), (void*)ret);
+ ret->ts = gpr_now(GPR_CLOCK_REALTIME);
+ census_ht_insert(g_trace_store, op_id_as_key(&ret->id), (void *)ret);
gpr_log(GPR_DEBUG, "Start tracing for id %lu", g_id);
gpr_mu_unlock(&g_mu);
return ret->id;
}
}
-int census_add_method_tag(census_op_id op_id, const char* method) {
+int census_add_method_tag(census_op_id op_id, const char *method) {
int ret = 0;
- census_trace_obj* trace = NULL;
+ census_trace_obj *trace = NULL;
gpr_mu_lock(&g_mu);
trace = census_ht_find(g_trace_store, op_id_as_key(&op_id));
if (trace == NULL) {
@@ -116,16 +116,16 @@ int census_add_method_tag(census_op_id op_id, const char* method) {
return ret;
}
-void census_tracing_print(census_op_id op_id, const char* anno_txt) {
- census_trace_obj* trace = NULL;
+void census_tracing_print(census_op_id op_id, const char *anno_txt) {
+ census_trace_obj *trace = NULL;
gpr_mu_lock(&g_mu);
trace = census_ht_find(g_trace_store, op_id_as_key(&op_id));
if (trace != NULL) {
- census_trace_annotation* anno = gpr_malloc(sizeof(census_trace_annotation));
- anno->ts = gpr_now();
+ census_trace_annotation *anno = gpr_malloc(sizeof(census_trace_annotation));
+ anno->ts = gpr_now(GPR_CLOCK_REALTIME);
{
- char* d = anno->txt;
- const char* s = anno_txt;
+ char *d = anno->txt;
+ const char *s = anno_txt;
int n = 0;
for (; n < CENSUS_MAX_ANNOTATION_LENGTH && *s != '\0'; ++n) {
*d++ = *s++;
@@ -139,12 +139,12 @@ void census_tracing_print(census_op_id op_id, const char* anno_txt) {
}
void census_tracing_end_op(census_op_id op_id) {
- census_trace_obj* trace = NULL;
+ census_trace_obj *trace = NULL;
gpr_mu_lock(&g_mu);
trace = census_ht_find(g_trace_store, op_id_as_key(&op_id));
if (trace != NULL) {
- trace->rpc_stats.elapsed_time_ms =
- gpr_timespec_to_micros(gpr_time_sub(gpr_now(), trace->ts));
+ trace->rpc_stats.elapsed_time_ms = gpr_timespec_to_micros(
+ gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), trace->ts));
gpr_log(GPR_DEBUG, "End tracing for id %lu, method %s, latency %f us",
op_id_2_uint64(&op_id), trace->method,
trace->rpc_stats.elapsed_time_ms);
@@ -180,20 +180,21 @@ void census_internal_lock_trace_store(void) { gpr_mu_lock(&g_mu); }
void census_internal_unlock_trace_store(void) { gpr_mu_unlock(&g_mu); }
-census_trace_obj* census_get_trace_obj_locked(census_op_id op_id) {
+census_trace_obj *census_get_trace_obj_locked(census_op_id op_id) {
if (g_trace_store == NULL) {
gpr_log(GPR_ERROR, "Census trace store is not initialized.");
return NULL;
}
- return (census_trace_obj*)census_ht_find(g_trace_store, op_id_as_key(&op_id));
+ return (census_trace_obj *)census_ht_find(g_trace_store,
+ op_id_as_key(&op_id));
}
-const char* census_get_trace_method_name(const census_trace_obj* trace) {
+const char *census_get_trace_method_name(const census_trace_obj *trace) {
return trace->method;
}
-static census_trace_annotation* dup_annotation_chain(
- census_trace_annotation* from) {
+static census_trace_annotation *dup_annotation_chain(
+ census_trace_annotation *from) {
census_trace_annotation *ret = NULL;
census_trace_annotation **to = &ret;
for (; from != NULL; from = from->next) {
@@ -204,8 +205,8 @@ static census_trace_annotation* dup_annotation_chain(
return ret;
}
-static census_trace_obj* trace_obj_dup(census_trace_obj* from) {
- census_trace_obj* to = NULL;
+static census_trace_obj *trace_obj_dup(census_trace_obj *from) {
+ census_trace_obj *to = NULL;
GPR_ASSERT(from != NULL);
to = gpr_malloc(sizeof(census_trace_obj));
to->id = from->id;
@@ -216,18 +217,18 @@ static census_trace_obj* trace_obj_dup(census_trace_obj* from) {
return to;
}
-census_trace_obj** census_get_active_ops(int* num_active_ops) {
- census_trace_obj** ret = NULL;
+census_trace_obj **census_get_active_ops(int *num_active_ops) {
+ census_trace_obj **ret = NULL;
gpr_mu_lock(&g_mu);
if (g_trace_store != NULL) {
size_t n = 0;
- census_ht_kv* all_kvs = census_ht_get_all_elements(g_trace_store, &n);
+ census_ht_kv *all_kvs = census_ht_get_all_elements(g_trace_store, &n);
*num_active_ops = (int)n;
- if (n != 0 ) {
+ if (n != 0) {
size_t i = 0;
ret = gpr_malloc(sizeof(census_trace_obj *) * n);
for (i = 0; i < n; i++) {
- ret[i] = trace_obj_dup((census_trace_obj*)all_kvs[i].v);
+ ret[i] = trace_obj_dup((census_trace_obj *)all_kvs[i].v);
}
}
gpr_free(all_kvs);
diff --git a/src/core/statistics/census_tracing.h b/src/core/statistics/census_tracing.h
index a4494b510c..b611e95bf4 100644
--- a/src/core/statistics/census_tracing.h
+++ b/src/core/statistics/census_tracing.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_STATISTICS_CENSUS_TRACING_H
-#define GRPC_INTERNAL_CORE_STATISTICS_CENSUS_TRACING_H
+#ifndef GRPC_CORE_STATISTICS_CENSUS_TRACING_H
+#define GRPC_CORE_STATISTICS_CENSUS_TRACING_H
#include <grpc/support/time.h>
#include "src/core/statistics/census_rpc_stats.h"
@@ -50,19 +50,19 @@ extern "C" {
typedef struct census_trace_annotation {
gpr_timespec ts; /* timestamp of the annotation */
char txt[CENSUS_MAX_ANNOTATION_LENGTH + 1]; /* actual txt annotation */
- struct census_trace_annotation* next;
+ struct census_trace_annotation *next;
} census_trace_annotation;
typedef struct census_trace_obj {
census_op_id id;
gpr_timespec ts;
census_rpc_stats rpc_stats;
- char* method;
- census_trace_annotation* annotations;
+ char *method;
+ census_trace_annotation *annotations;
} census_trace_obj;
/* Deletes trace object. */
-void census_trace_obj_destroy(census_trace_obj* obj);
+void census_trace_obj_destroy(census_trace_obj *obj);
/* Initializes trace store. This function is thread safe. */
void census_tracing_init(void);
@@ -73,7 +73,7 @@ void census_tracing_shutdown(void);
/* Gets trace obj corresponding to the input op_id. Returns NULL if trace store
is not initialized or trace obj is not found. Requires trace store being
locked before calling this function. */
-census_trace_obj* census_get_trace_obj_locked(census_op_id op_id);
+census_trace_obj *census_get_trace_obj_locked(census_op_id op_id);
/* The following two functions acquire and release the trace store global lock.
They are for census internal use only. */
@@ -81,16 +81,16 @@ void census_internal_lock_trace_store(void);
void census_internal_unlock_trace_store(void);
/* Gets method name associated with the input trace object. */
-const char* census_get_trace_method_name(const census_trace_obj* trace);
+const char *census_get_trace_method_name(const census_trace_obj *trace);
/* Returns an array of pointers to trace objects of currently active operations
and fills in number of active operations. Returns NULL if there are no active
operations.
Caller owns the returned objects. */
-census_trace_obj** census_get_active_ops(int* num_active_ops);
+census_trace_obj **census_get_active_ops(int *num_active_ops);
#ifdef __cplusplus
}
#endif
-#endif /* GRPC_INTERNAL_CORE_STATISTICS_CENSUS_TRACING_H */
+#endif /* GRPC_CORE_STATISTICS_CENSUS_TRACING_H */
diff --git a/src/core/statistics/hash_table.c b/src/core/statistics/hash_table.c
index 56bdcc2fff..0cadcd4740 100644
--- a/src/core/statistics/hash_table.c
+++ b/src/core/statistics/hash_table.c
@@ -45,45 +45,45 @@
/* A single hash table data entry */
typedef struct ht_entry {
census_ht_key key;
- void* data;
- struct ht_entry* next;
+ void *data;
+ struct ht_entry *next;
} ht_entry;
/* hash table bucket */
typedef struct bucket {
/* NULL if bucket is empty */
- ht_entry* next;
+ ht_entry *next;
/* -1 if all buckets are empty. */
- gpr_int32 prev_non_empty_bucket;
+ int32_t prev_non_empty_bucket;
/* -1 if all buckets are empty. */
- gpr_int32 next_non_empty_bucket;
+ int32_t next_non_empty_bucket;
} bucket;
struct unresizable_hash_table {
/* Number of entries in the table */
size_t size;
/* Number of buckets */
- gpr_uint32 num_buckets;
+ uint32_t num_buckets;
/* Array of buckets initialized at creation time. Memory consumption is
16 bytes per bucket on a 64-bit platform. */
- bucket* buckets;
+ bucket *buckets;
/* Index of the first non-empty bucket. -1 iff size == 0. */
- gpr_int32 first_non_empty_bucket;
+ int32_t first_non_empty_bucket;
/* Index of the last non_empty bucket. -1 iff size == 0. */
- gpr_int32 last_non_empty_bucket;
+ int32_t last_non_empty_bucket;
/* Immutable options of this hash table, initialized at creation time. */
census_ht_option options;
};
typedef struct entry_locator {
- gpr_int32 bucket_idx;
+ int32_t bucket_idx;
int is_first_in_chain;
int found;
- ht_entry* prev_entry;
+ ht_entry *prev_entry;
} entry_locator;
/* Asserts if option is not valid. */
-void check_options(const census_ht_option* option) {
+void check_options(const census_ht_option *option) {
GPR_ASSERT(option != NULL);
GPR_ASSERT(option->num_buckets > 0);
GPR_ASSERT(option->key_type == CENSUS_HT_UINT64 ||
@@ -98,12 +98,12 @@ void check_options(const census_ht_option* option) {
#define REMOVE_NEXT(options, ptr) \
do { \
- ht_entry* tmp = (ptr)->next; \
+ ht_entry *tmp = (ptr)->next; \
(ptr)->next = tmp->next; \
delete_entry(options, tmp); \
} while (0)
-static void delete_entry(const census_ht_option* opt, ht_entry* p) {
+static void delete_entry(const census_ht_option *opt, ht_entry *p) {
if (opt->delete_data != NULL) {
opt->delete_data(p->data);
}
@@ -113,18 +113,18 @@ static void delete_entry(const census_ht_option* opt, ht_entry* p) {
gpr_free(p);
}
-static gpr_uint64 hash(const census_ht_option* opt, census_ht_key key) {
+static uint64_t hash(const census_ht_option *opt, census_ht_key key) {
return opt->key_type == CENSUS_HT_UINT64 ? key.val : opt->hash(key.ptr);
}
-census_ht* census_ht_create(const census_ht_option* option) {
+census_ht *census_ht_create(const census_ht_option *option) {
int i;
- census_ht* ret = NULL;
+ census_ht *ret = NULL;
check_options(option);
- ret = (census_ht*)gpr_malloc(sizeof(census_ht));
+ ret = (census_ht *)gpr_malloc(sizeof(census_ht));
ret->size = 0;
ret->num_buckets = option->num_buckets;
- ret->buckets = (bucket*)gpr_malloc(sizeof(bucket) * ret->num_buckets);
+ ret->buckets = (bucket *)gpr_malloc(sizeof(bucket) * ret->num_buckets);
ret->options = *option;
/* initialize each bucket */
for (i = 0; i < ret->options.num_buckets; i++) {
@@ -135,11 +135,11 @@ census_ht* census_ht_create(const census_ht_option* option) {
return ret;
}
-static gpr_int32 find_bucket_idx(const census_ht* ht, census_ht_key key) {
+static int32_t find_bucket_idx(const census_ht *ht, census_ht_key key) {
return hash(&ht->options, key) % ht->num_buckets;
}
-static int keys_match(const census_ht_option* opt, const ht_entry* p,
+static int keys_match(const census_ht_option *opt, const ht_entry *p,
const census_ht_key key) {
GPR_ASSERT(opt->key_type == CENSUS_HT_UINT64 ||
opt->key_type == CENSUS_HT_POINTER);
@@ -147,10 +147,10 @@ static int keys_match(const census_ht_option* opt, const ht_entry* p,
return !opt->compare_keys((p->key).ptr, key.ptr);
}
-static entry_locator ht_find(const census_ht* ht, census_ht_key key) {
+static entry_locator ht_find(const census_ht *ht, census_ht_key key) {
entry_locator loc = {0, 0, 0, NULL};
- gpr_int32 idx = 0;
- ht_entry* ptr = NULL;
+ int32_t idx = 0;
+ ht_entry *ptr = NULL;
GPR_ASSERT(ht != NULL);
idx = find_bucket_idx(ht, key);
ptr = ht->buckets[idx].next;
@@ -178,7 +178,7 @@ static entry_locator ht_find(const census_ht* ht, census_ht_key key) {
return loc;
}
-void* census_ht_find(const census_ht* ht, census_ht_key key) {
+void *census_ht_find(const census_ht *ht, census_ht_key key) {
entry_locator loc = ht_find(ht, key);
if (loc.found == 0) {
return NULL;
@@ -187,9 +187,9 @@ void* census_ht_find(const census_ht* ht, census_ht_key key) {
: loc.prev_entry->next->data;
}
-void census_ht_insert(census_ht* ht, census_ht_key key, void* data) {
- gpr_int32 idx = find_bucket_idx(ht, key);
- ht_entry* ptr = NULL;
+void census_ht_insert(census_ht *ht, census_ht_key key, void *data) {
+ int32_t idx = find_bucket_idx(ht, key);
+ ht_entry *ptr = NULL;
entry_locator loc = ht_find(ht, key);
if (loc.found) {
/* Replace old value with new value. */
@@ -215,7 +215,7 @@ void census_ht_insert(census_ht* ht, census_ht_key key, void* data) {
ht->buckets[idx].next_non_empty_bucket = -1;
ht->last_non_empty_bucket = idx;
}
- ptr = (ht_entry*)gpr_malloc(sizeof(ht_entry));
+ ptr = (ht_entry *)gpr_malloc(sizeof(ht_entry));
ptr->key = key;
ptr->data = data;
ptr->next = ht->buckets[idx].next;
@@ -223,7 +223,7 @@ void census_ht_insert(census_ht* ht, census_ht_key key, void* data) {
ht->size++;
}
-void census_ht_erase(census_ht* ht, census_ht_key key) {
+void census_ht_erase(census_ht *ht, census_ht_key key) {
entry_locator loc = ht_find(ht, key);
if (loc.found == 0) {
/* noop if not found */
@@ -231,7 +231,7 @@ void census_ht_erase(census_ht* ht, census_ht_key key) {
}
ht->size--;
if (loc.is_first_in_chain) {
- bucket* b = &ht->buckets[loc.bucket_idx];
+ bucket *b = &ht->buckets[loc.bucket_idx];
GPR_ASSERT(b->next != NULL);
/* The only entry in the bucket */
if (b->next->next == NULL) {
@@ -256,20 +256,20 @@ void census_ht_erase(census_ht* ht, census_ht_key key) {
}
/* Returns NULL if input table is empty. */
-census_ht_kv* census_ht_get_all_elements(const census_ht* ht, size_t* num) {
- census_ht_kv* ret = NULL;
+census_ht_kv *census_ht_get_all_elements(const census_ht *ht, size_t *num) {
+ census_ht_kv *ret = NULL;
int i = 0;
- gpr_int32 idx = -1;
+ int32_t idx = -1;
GPR_ASSERT(ht != NULL && num != NULL);
*num = ht->size;
if (*num == 0) {
return NULL;
}
- ret = (census_ht_kv*)gpr_malloc(sizeof(census_ht_kv) * ht->size);
+ ret = (census_ht_kv *)gpr_malloc(sizeof(census_ht_kv) * ht->size);
idx = ht->first_non_empty_bucket;
while (idx >= 0) {
- ht_entry* ptr = ht->buckets[idx].next;
+ ht_entry *ptr = ht->buckets[idx].next;
for (; ptr != NULL; ptr = ptr->next) {
ret[i].k = ptr->key;
ret[i].v = ptr->data;
@@ -280,8 +280,8 @@ census_ht_kv* census_ht_get_all_elements(const census_ht* ht, size_t* num) {
return ret;
}
-static void ht_delete_entry_chain(const census_ht_option* options,
- ht_entry* first) {
+static void ht_delete_entry_chain(const census_ht_option *options,
+ ht_entry *first) {
if (first == NULL) {
return;
}
@@ -291,7 +291,7 @@ static void ht_delete_entry_chain(const census_ht_option* options,
delete_entry(options, first);
}
-void census_ht_destroy(census_ht* ht) {
+void census_ht_destroy(census_ht *ht) {
unsigned i;
for (i = 0; i < ht->num_buckets; ++i) {
ht_delete_entry_chain(&ht->options, ht->buckets[i].next);
@@ -300,4 +300,4 @@ void census_ht_destroy(census_ht* ht) {
gpr_free(ht);
}
-size_t census_ht_get_size(const census_ht* ht) { return ht->size; }
+size_t census_ht_get_size(const census_ht *ht) { return ht->size; }
diff --git a/src/core/statistics/hash_table.h b/src/core/statistics/hash_table.h
index 7bcb4bcd9b..f4bf2ba49a 100644
--- a/src/core/statistics/hash_table.h
+++ b/src/core/statistics/hash_table.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_STATISTICS_HASH_TABLE_H
-#define GRPC_INTERNAL_CORE_STATISTICS_HASH_TABLE_H
+#ifndef GRPC_CORE_STATISTICS_HASH_TABLE_H
+#define GRPC_CORE_STATISTICS_HASH_TABLE_H
#include <stddef.h>
@@ -60,8 +60,8 @@ typedef struct unresizable_hash_table census_ht;
/* Currently, the hash_table can take two types of keys. (uint64 for trace
store and const char* for stats store). */
typedef union {
- gpr_uint64 val;
- void* ptr;
+ uint64_t val;
+ void *ptr;
} census_ht_key;
typedef enum census_ht_key_type {
@@ -73,59 +73,59 @@ typedef struct census_ht_option {
/* Type of hash key */
census_ht_key_type key_type;
/* Desired number of buckets, preferably a prime number */
- gpr_int32 num_buckets;
+ int32_t num_buckets;
/* Fucntion to calculate uint64 hash value of the key. Only takes effect if
key_type is POINTER. */
- gpr_uint64 (*hash)(const void*);
+ uint64_t (*hash)(const void *);
/* Function to compare two keys, returns 0 iff equal. Only takes effect if
key_type is POINTER */
- int (*compare_keys)(const void* k1, const void* k2);
+ int (*compare_keys)(const void *k1, const void *k2);
/* Value deleter. NULL if no specialized delete function is needed. */
- void (*delete_data)(void*);
+ void (*delete_data)(void *);
/* Key deleter. NULL if table does not own the key. (e.g. key is part of the
value or key is not owned by the table.) */
- void (*delete_key)(void*);
+ void (*delete_key)(void *);
} census_ht_option;
/* Creates a hashtable with fixed number of buckets according to the settings
specified in 'options' arg. Function pointers "hash" and "compare_keys" must
be provided if key_type is POINTER. Asserts if fail to create. */
-census_ht* census_ht_create(const census_ht_option* options);
+census_ht *census_ht_create(const census_ht_option *options);
/* Deletes hash table instance. Frees all dynamic memory owned by ht.*/
-void census_ht_destroy(census_ht* ht);
+void census_ht_destroy(census_ht *ht);
/* Inserts the input key-val pair into hash_table. If an entry with the same key
exists in the table, the corresponding value will be overwritten by the input
val. */
-void census_ht_insert(census_ht* ht, census_ht_key key, void* val);
+void census_ht_insert(census_ht *ht, census_ht_key key, void *val);
/* Returns pointer to data, returns NULL if not found. */
-void* census_ht_find(const census_ht* ht, census_ht_key key);
+void *census_ht_find(const census_ht *ht, census_ht_key key);
/* Erase hash table entry with input key. Noop if key is not found. */
-void census_ht_erase(census_ht* ht, census_ht_key key);
+void census_ht_erase(census_ht *ht, census_ht_key key);
typedef struct census_ht_kv {
census_ht_key k;
- void* v;
+ void *v;
} census_ht_kv;
/* Returns an array of pointers to all values in the hash table. Order of the
elements can be arbitrary. Sets 'num' to the size of returned array. Caller
owns returned array. */
-census_ht_kv* census_ht_get_all_elements(const census_ht* ht, size_t* num);
+census_ht_kv *census_ht_get_all_elements(const census_ht *ht, size_t *num);
/* Returns number of elements kept. */
-size_t census_ht_get_size(const census_ht* ht);
+size_t census_ht_get_size(const census_ht *ht);
/* Functor applied on each key-value pair while iterating through entries in the
table. The functor should not mutate data. */
-typedef void (*census_ht_itr_cb)(census_ht_key key, const void* val_ptr,
- void* state);
+typedef void (*census_ht_itr_cb)(census_ht_key key, const void *val_ptr,
+ void *state);
/* Iterates through all key-value pairs in the hash_table. The callback function
should not invalidate data entries. */
-gpr_uint64 census_ht_for_all(const census_ht* ht, census_ht_itr_cb);
+uint64_t census_ht_for_all(const census_ht *ht, census_ht_itr_cb);
-#endif /* GRPC_INTERNAL_CORE_STATISTICS_HASH_TABLE_H */
+#endif /* GRPC_CORE_STATISTICS_HASH_TABLE_H */
diff --git a/src/core/statistics/window_stats.c b/src/core/statistics/window_stats.c
index a64e080565..3f2940853a 100644
--- a/src/core/statistics/window_stats.c
+++ b/src/core/statistics/window_stats.c
@@ -47,23 +47,23 @@ typedef struct census_window_stats_sum cws_sum;
/* Each interval is composed of a number of buckets, which hold a count of
entries and a single statistic */
typedef struct census_window_stats_bucket {
- gpr_int64 count;
- void* statistic;
+ int64_t count;
+ void *statistic;
} cws_bucket;
/* Each interval has a set of buckets, and the variables needed to keep
track of their current state */
typedef struct census_window_stats_interval_stats {
/* The buckets. There will be 'granularity' + 1 of these. */
- cws_bucket* buckets;
+ cws_bucket *buckets;
/* Index of the bucket containing the smallest time interval. */
int bottom_bucket;
/* The smallest time storable in the current window. */
- gpr_int64 bottom;
+ int64_t bottom;
/* The largest time storable in the current window + 1ns */
- gpr_int64 top;
+ int64_t top;
/* The width of each bucket in ns. */
- gpr_int64 width;
+ int64_t width;
} cws_interval_stats;
typedef struct census_window_stats {
@@ -74,9 +74,9 @@ typedef struct census_window_stats {
/* Record of stat_info. */
cws_stat_info stat_info;
/* Stats for each interval. */
- cws_interval_stats* interval_stats;
+ cws_interval_stats *interval_stats;
/* The time the newset stat was recorded. */
- gpr_int64 newest_time;
+ int64_t newest_time;
} window_stats;
/* Calculate an actual bucket index from a logical index 'IDX'. Other
@@ -87,18 +87,17 @@ typedef struct census_window_stats {
/* The maximum seconds value we can have in a valid timespec. More than this
will result in overflow in timespec_to_ns(). This works out to ~292 years.
TODO: consider using doubles instead of int64. */
-static gpr_int64 max_seconds =
- (GPR_INT64_MAX - GPR_NS_PER_SEC) / GPR_NS_PER_SEC;
+static int64_t max_seconds = (GPR_INT64_MAX - GPR_NS_PER_SEC) / GPR_NS_PER_SEC;
-static gpr_int64 timespec_to_ns(const gpr_timespec ts) {
+static int64_t timespec_to_ns(const gpr_timespec ts) {
if (ts.tv_sec > max_seconds) {
return GPR_INT64_MAX - 1;
}
- return (gpr_int64)ts.tv_sec * GPR_NS_PER_SEC + ts.tv_nsec;
+ return ts.tv_sec * GPR_NS_PER_SEC + ts.tv_nsec;
}
-static void cws_initialize_statistic(void* statistic,
- const cws_stat_info* stat_info) {
+static void cws_initialize_statistic(void *statistic,
+ const cws_stat_info *stat_info) {
if (stat_info->stat_initialize == NULL) {
memset(statistic, 0, stat_info->stat_size);
} else {
@@ -107,39 +106,39 @@ static void cws_initialize_statistic(void* statistic,
}
/* Create and initialize a statistic */
-static void* cws_create_statistic(const cws_stat_info* stat_info) {
- void* stat = gpr_malloc(stat_info->stat_size);
+static void *cws_create_statistic(const cws_stat_info *stat_info) {
+ void *stat = gpr_malloc(stat_info->stat_size);
cws_initialize_statistic(stat, stat_info);
return stat;
}
-window_stats* census_window_stats_create(int nintervals,
+window_stats *census_window_stats_create(int nintervals,
const gpr_timespec intervals[],
int granularity,
- const cws_stat_info* stat_info) {
- window_stats* ret;
+ const cws_stat_info *stat_info) {
+ window_stats *ret;
int i;
/* validate inputs */
GPR_ASSERT(nintervals > 0 && granularity > 2 && intervals != NULL &&
stat_info != NULL);
for (i = 0; i < nintervals; i++) {
- gpr_int64 ns = timespec_to_ns(intervals[i]);
+ int64_t ns = timespec_to_ns(intervals[i]);
GPR_ASSERT(intervals[i].tv_sec >= 0 && intervals[i].tv_nsec >= 0 &&
intervals[i].tv_nsec < GPR_NS_PER_SEC && ns >= 100 &&
granularity * 10 <= ns);
}
/* Allocate and initialize relevant data structures */
- ret = (window_stats*)gpr_malloc(sizeof(window_stats));
+ ret = (window_stats *)gpr_malloc(sizeof(window_stats));
ret->nintervals = nintervals;
ret->nbuckets = granularity + 1;
ret->stat_info = *stat_info;
ret->interval_stats =
- (cws_interval_stats*)gpr_malloc(nintervals * sizeof(cws_interval_stats));
+ (cws_interval_stats *)gpr_malloc(nintervals * sizeof(cws_interval_stats));
for (i = 0; i < nintervals; i++) {
- gpr_int64 size_ns = timespec_to_ns(intervals[i]);
- cws_interval_stats* is = ret->interval_stats + i;
- cws_bucket* buckets = is->buckets =
- (cws_bucket*)gpr_malloc(ret->nbuckets * sizeof(cws_bucket));
+ int64_t size_ns = timespec_to_ns(intervals[i]);
+ cws_interval_stats *is = ret->interval_stats + i;
+ cws_bucket *buckets = is->buckets =
+ (cws_bucket *)gpr_malloc(ret->nbuckets * sizeof(cws_bucket));
int b;
for (b = 0; b < ret->nbuckets; b++) {
buckets[b].statistic = cws_create_statistic(stat_info);
@@ -168,8 +167,8 @@ window_stats* census_window_stats_create(int nintervals,
/* When we try adding a measurement above the current interval range, we
need to "shift" the buckets sufficiently to cover the new range. */
-static void cws_shift_buckets(const window_stats* wstats,
- cws_interval_stats* is, gpr_int64 when_ns) {
+static void cws_shift_buckets(const window_stats *wstats,
+ cws_interval_stats *is, int64_t when_ns) {
int i;
/* number of bucket time widths to "shift" */
int shift;
@@ -191,14 +190,14 @@ static void cws_shift_buckets(const window_stats* wstats,
is->bottom += shift * is->width;
}
-void census_window_stats_add(window_stats* wstats, const gpr_timespec when,
- const void* stat_value) {
+void census_window_stats_add(window_stats *wstats, const gpr_timespec when,
+ const void *stat_value) {
int i;
- gpr_int64 when_ns = timespec_to_ns(when);
+ int64_t when_ns = timespec_to_ns(when);
GPR_ASSERT(wstats->interval_stats != NULL);
for (i = 0; i < wstats->nintervals; i++) {
- cws_interval_stats* is = wstats->interval_stats + i;
- cws_bucket* bucket;
+ cws_interval_stats *is = wstats->interval_stats + i;
+ cws_bucket *bucket;
if (when_ns < is->bottom) { /* Below smallest time in interval: drop */
continue;
}
@@ -218,32 +217,32 @@ void census_window_stats_add(window_stats* wstats, const gpr_timespec when,
}
/* Add a specific bucket contents to an accumulating total. */
-static void cws_add_bucket_to_sum(cws_sum* sum, const cws_bucket* bucket,
- const cws_stat_info* stat_info) {
+static void cws_add_bucket_to_sum(cws_sum *sum, const cws_bucket *bucket,
+ const cws_stat_info *stat_info) {
sum->count += bucket->count;
stat_info->stat_add(sum->statistic, bucket->statistic);
}
/* Add a proportion to an accumulating sum. */
-static void cws_add_proportion_to_sum(double p, cws_sum* sum,
- const cws_bucket* bucket,
- const cws_stat_info* stat_info) {
+static void cws_add_proportion_to_sum(double p, cws_sum *sum,
+ const cws_bucket *bucket,
+ const cws_stat_info *stat_info) {
sum->count += p * bucket->count;
stat_info->stat_add_proportion(p, sum->statistic, bucket->statistic);
}
-void census_window_stats_get_sums(const window_stats* wstats,
+void census_window_stats_get_sums(const window_stats *wstats,
const gpr_timespec when, cws_sum sums[]) {
int i;
- gpr_int64 when_ns = timespec_to_ns(when);
+ int64_t when_ns = timespec_to_ns(when);
GPR_ASSERT(wstats->interval_stats != NULL);
for (i = 0; i < wstats->nintervals; i++) {
int when_bucket;
int new_bucket;
double last_proportion = 1.0;
double bottom_proportion;
- cws_interval_stats* is = wstats->interval_stats + i;
- cws_sum* sum = sums + i;
+ cws_interval_stats *is = wstats->interval_stats + i;
+ cws_sum *sum = sums + i;
sum->count = 0;
cws_initialize_statistic(sum->statistic, &wstats->stat_info);
if (when_ns < is->bottom) {
@@ -255,16 +254,16 @@ void census_window_stats_get_sums(const window_stats* wstats,
/* Calculating the appropriate amount of which buckets to use can get
complicated. Essentially there are two cases:
1) if the "top" bucket (new_bucket, where the newest additions to the
- stats recorded are entered) corresponds to 'when', then we need
- to take a proportion of it - (if when < newest_time) or the full
- thing. We also (possibly) need to take a corresponding
- proportion of the bottom bucket.
+ stats recorded are entered) corresponds to 'when', then we need
+ to take a proportion of it - (if when < newest_time) or the full
+ thing. We also (possibly) need to take a corresponding
+ proportion of the bottom bucket.
2) Other cases, we just take a straight proportion.
- */
+ */
when_bucket = (when_ns - is->bottom) / is->width;
new_bucket = (wstats->newest_time - is->bottom) / is->width;
if (new_bucket == when_bucket) {
- gpr_int64 bottom_bucket_time = is->bottom + when_bucket * is->width;
+ int64_t bottom_bucket_time = is->bottom + when_bucket * is->width;
if (when_ns < wstats->newest_time) {
last_proportion = (double)(when_ns - bottom_bucket_time) /
(double)(wstats->newest_time - bottom_bucket_time);
@@ -300,7 +299,7 @@ void census_window_stats_get_sums(const window_stats* wstats,
}
}
-void census_window_stats_destroy(window_stats* wstats) {
+void census_window_stats_destroy(window_stats *wstats) {
int i;
GPR_ASSERT(wstats->interval_stats != NULL);
for (i = 0; i < wstats->nintervals; i++) {
diff --git a/src/core/statistics/window_stats.h b/src/core/statistics/window_stats.h
index d733d8d247..774277180f 100644
--- a/src/core/statistics/window_stats.h
+++ b/src/core/statistics/window_stats.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_STATISTICS_WINDOW_STATS_H
-#define GRPC_INTERNAL_CORE_STATISTICS_WINDOW_STATS_H
+#ifndef GRPC_CORE_STATISTICS_WINDOW_STATS_H
+#define GRPC_CORE_STATISTICS_WINDOW_STATS_H
#include <grpc/support/time.h>
@@ -90,11 +90,11 @@
// Record a new event, taking 15.3ms, transferring 1784 bytes.
stat.latency = 0.153;
stat.bytes = 1784;
- census_window_stats_add(stats, gpr_now(), &stat);
+ census_window_stats_add(stats, gpr_now(GPR_CLOCK_REALTIME), &stat);
// Get sums and print them out
result[kMinInterval].statistic = &sums[kMinInterval];
result[kHourInterval].statistic = &sums[kHourInterval];
- census_window_stats_get_sums(stats, gpr_now(), result);
+ census_window_stats_get_sums(stats, gpr_now(GPR_CLOCK_REALTIME), result);
printf("%d events/min, average time %gs, average bytes %g\n",
result[kMinInterval].count,
(my_stat*)result[kMinInterval].statistic->latency /
@@ -120,13 +120,13 @@ typedef struct census_window_stats_stat_info {
size_t stat_size;
/* Function to initialize a user-defined statistics object. If this is set
* to NULL, then the object will be zero-initialized. */
- void (*stat_initialize)(void* stat);
+ void (*stat_initialize)(void *stat);
/* Function to add one user-defined statistics object ('addme') to 'base' */
- void (*stat_add)(void* base, const void* addme);
+ void (*stat_add)(void *base, const void *addme);
/* As for previous function, but only add a proportion 'p'. This API will
currently only use 'p' values in the range [0,1], but other values are
possible in the future, and should be supported. */
- void (*stat_add_proportion)(double p, void* base, const void* addme);
+ void (*stat_add_proportion)(double p, void *base, const void *addme);
} census_window_stats_stat_info;
/* Create a new window_stats object. 'nintervals' is the number of
@@ -138,29 +138,29 @@ typedef struct census_window_stats_stat_info {
years will be treated as essentially infinite in size. This function will
GPR_ASSERT() if the object cannot be created or any of the parameters have
invalid values. This function is thread-safe. */
-struct census_window_stats* census_window_stats_create(
+struct census_window_stats *census_window_stats_create(
int nintervals, const gpr_timespec intervals[], int granularity,
- const census_window_stats_stat_info* stat_info);
+ const census_window_stats_stat_info *stat_info);
/* Add a new measurement (in 'stat_value'), as of a given time ('when').
This function is thread-compatible. */
-void census_window_stats_add(struct census_window_stats* wstats,
- const gpr_timespec when, const void* stat_value);
+void census_window_stats_add(struct census_window_stats *wstats,
+ const gpr_timespec when, const void *stat_value);
/* Structure used to record a single intervals sum for a given statistic */
typedef struct census_window_stats_sum {
/* Total count of samples. Note that because some internal interpolation
- is performed, the count of samples returned for each interval may not be an
- integral value. */
+ is performed, the count of samples returned for each interval may not be an
+ integral value. */
double count;
/* Sum for statistic */
- void* statistic;
+ void *statistic;
} census_window_stats_sums;
/* Retrieve a set of all values stored in a window_stats object 'wstats'. The
number of 'sums' MUST be the same as the number 'nintervals' used in
census_window_stats_create(). This function is thread-compatible. */
-void census_window_stats_get_sums(const struct census_window_stats* wstats,
+void census_window_stats_get_sums(const struct census_window_stats *wstats,
const gpr_timespec when,
struct census_window_stats_sum sums[]);
@@ -168,6 +168,6 @@ void census_window_stats_get_sums(const struct census_window_stats* wstats,
object will no longer be usable from any of the above functions (and
calling them will most likely result in a NULL-pointer dereference or
assertion failure). This function is thread-compatible. */
-void census_window_stats_destroy(struct census_window_stats* wstats);
+void census_window_stats_destroy(struct census_window_stats *wstats);
-#endif /* GRPC_INTERNAL_CORE_STATISTICS_WINDOW_STATS_H */
+#endif /* GRPC_CORE_STATISTICS_WINDOW_STATS_H */
diff --git a/src/core/support/alloc.c b/src/core/support/alloc.c
index d2ed82e771..b99584bd20 100644
--- a/src/core/support/alloc.c
+++ b/src/core/support/alloc.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -34,23 +34,47 @@
#include <grpc/support/alloc.h>
#include <stdlib.h>
+#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
+#include "src/core/profiling/timers.h"
+
+static gpr_allocation_functions g_alloc_functions = {malloc, realloc, free};
+
+gpr_allocation_functions gpr_get_allocation_functions() {
+ return g_alloc_functions;
+}
+
+void gpr_set_allocation_functions(gpr_allocation_functions functions) {
+ GPR_ASSERT(functions.malloc_fn != NULL);
+ GPR_ASSERT(functions.realloc_fn != NULL);
+ GPR_ASSERT(functions.free_fn != NULL);
+ g_alloc_functions = functions;
+}
void *gpr_malloc(size_t size) {
- void *p = malloc(size);
+ void *p;
+ GPR_TIMER_BEGIN("gpr_malloc", 0);
+ p = g_alloc_functions.malloc_fn(size);
if (!p) {
abort();
}
+ GPR_TIMER_END("gpr_malloc", 0);
return p;
}
-void gpr_free(void *p) { free(p); }
+void gpr_free(void *p) {
+ GPR_TIMER_BEGIN("gpr_free", 0);
+ g_alloc_functions.free_fn(p);
+ GPR_TIMER_END("gpr_free", 0);
+}
void *gpr_realloc(void *p, size_t size) {
- p = realloc(p, size);
+ GPR_TIMER_BEGIN("gpr_realloc", 0);
+ p = g_alloc_functions.realloc_fn(p, size);
if (!p) {
abort();
}
+ GPR_TIMER_END("gpr_realloc", 0);
return p;
}
@@ -58,9 +82,9 @@ void *gpr_malloc_aligned(size_t size, size_t alignment_log) {
size_t alignment = ((size_t)1) << alignment_log;
size_t extra = alignment - 1 + sizeof(void *);
void *p = gpr_malloc(size + extra);
- void **ret = (void **)(((gpr_uintptr)p + extra) & ~(alignment - 1));
+ void **ret = (void **)(((uintptr_t)p + extra) & ~(alignment - 1));
ret[-1] = p;
return (void *)ret;
}
-void gpr_free_aligned(void *ptr) { free(((void **)ptr)[-1]); }
+void gpr_free_aligned(void *ptr) { gpr_free(((void **)ptr)[-1]); }
diff --git a/src/core/support/avl.c b/src/core/support/avl.c
new file mode 100644
index 0000000000..f378b3ee17
--- /dev/null
+++ b/src/core/support/avl.c
@@ -0,0 +1,288 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/support/avl.h>
+
+#include <assert.h>
+#include <stdlib.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/useful.h>
+
+gpr_avl gpr_avl_create(const gpr_avl_vtable *vtable) {
+ gpr_avl out;
+ out.vtable = vtable;
+ out.root = NULL;
+ return out;
+}
+
+static gpr_avl_node *ref_node(gpr_avl_node *node) {
+ if (node) {
+ gpr_ref(&node->refs);
+ }
+ return node;
+}
+
+static void unref_node(const gpr_avl_vtable *vtable, gpr_avl_node *node) {
+ if (node == NULL) {
+ return;
+ }
+ if (gpr_unref(&node->refs)) {
+ vtable->destroy_key(node->key);
+ vtable->destroy_value(node->value);
+ unref_node(vtable, node->left);
+ unref_node(vtable, node->right);
+ gpr_free(node);
+ }
+}
+
+static long node_height(gpr_avl_node *node) {
+ return node == NULL ? 0 : node->height;
+}
+
+#ifndef NDEBUG
+static long calculate_height(gpr_avl_node *node) {
+ return node == NULL ? 0 : 1 + GPR_MAX(calculate_height(node->left),
+ calculate_height(node->right));
+}
+
+static gpr_avl_node *assert_invariants(gpr_avl_node *n) {
+ if (n == NULL) return NULL;
+ assert_invariants(n->left);
+ assert_invariants(n->right);
+ assert(calculate_height(n) == n->height);
+ assert(labs(node_height(n->left) - node_height(n->right)) <= 1);
+ return n;
+}
+#else
+static gpr_avl_node *assert_invariants(gpr_avl_node *n) { return n; }
+#endif
+
+gpr_avl_node *new_node(void *key, void *value, gpr_avl_node *left,
+ gpr_avl_node *right) {
+ gpr_avl_node *node = gpr_malloc(sizeof(*node));
+ gpr_ref_init(&node->refs, 1);
+ node->key = key;
+ node->value = value;
+ node->left = assert_invariants(left);
+ node->right = assert_invariants(right);
+ node->height = 1 + GPR_MAX(node_height(left), node_height(right));
+ return node;
+}
+
+static gpr_avl_node *get(const gpr_avl_vtable *vtable, gpr_avl_node *node,
+ void *key) {
+ long cmp;
+
+ if (node == NULL) {
+ return NULL;
+ }
+
+ cmp = vtable->compare_keys(node->key, key);
+ if (cmp == 0) {
+ return node;
+ } else if (cmp > 0) {
+ return get(vtable, node->left, key);
+ } else {
+ return get(vtable, node->right, key);
+ }
+}
+
+void *gpr_avl_get(gpr_avl avl, void *key) {
+ gpr_avl_node *node = get(avl.vtable, avl.root, key);
+ return node ? node->value : NULL;
+}
+
+static gpr_avl_node *rotate_left(const gpr_avl_vtable *vtable, void *key,
+ void *value, gpr_avl_node *left,
+ gpr_avl_node *right) {
+ gpr_avl_node *n =
+ new_node(vtable->copy_key(right->key), vtable->copy_value(right->value),
+ new_node(key, value, left, ref_node(right->left)),
+ ref_node(right->right));
+ unref_node(vtable, right);
+ return n;
+}
+
+static gpr_avl_node *rotate_right(const gpr_avl_vtable *vtable, void *key,
+ void *value, gpr_avl_node *left,
+ gpr_avl_node *right) {
+ gpr_avl_node *n = new_node(
+ vtable->copy_key(left->key), vtable->copy_value(left->value),
+ ref_node(left->left), new_node(key, value, ref_node(left->right), right));
+ unref_node(vtable, left);
+ return n;
+}
+
+static gpr_avl_node *rotate_left_right(const gpr_avl_vtable *vtable, void *key,
+ void *value, gpr_avl_node *left,
+ gpr_avl_node *right) {
+ /* rotate_right(..., rotate_left(left), right) */
+ gpr_avl_node *n = new_node(
+ vtable->copy_key(left->right->key),
+ vtable->copy_value(left->right->value),
+ new_node(vtable->copy_key(left->key), vtable->copy_value(left->value),
+ ref_node(left->left), ref_node(left->right->left)),
+ new_node(key, value, ref_node(left->right->right), right));
+ unref_node(vtable, left);
+ return n;
+}
+
+static gpr_avl_node *rotate_right_left(const gpr_avl_vtable *vtable, void *key,
+ void *value, gpr_avl_node *left,
+ gpr_avl_node *right) {
+ /* rotate_left(..., left, rotate_right(right)) */
+ gpr_avl_node *n = new_node(
+ vtable->copy_key(right->left->key),
+ vtable->copy_value(right->left->value),
+ new_node(key, value, left, ref_node(right->left->left)),
+ new_node(vtable->copy_key(right->key), vtable->copy_value(right->value),
+ ref_node(right->left->right), ref_node(right->right)));
+ unref_node(vtable, right);
+ return n;
+}
+
+static gpr_avl_node *rebalance(const gpr_avl_vtable *vtable, void *key,
+ void *value, gpr_avl_node *left,
+ gpr_avl_node *right) {
+ switch (node_height(left) - node_height(right)) {
+ case 2:
+ if (node_height(left->left) - node_height(left->right) == -1) {
+ return assert_invariants(
+ rotate_left_right(vtable, key, value, left, right));
+ } else {
+ return assert_invariants(rotate_right(vtable, key, value, left, right));
+ }
+ case -2:
+ if (node_height(right->left) - node_height(right->right) == 1) {
+ return assert_invariants(
+ rotate_right_left(vtable, key, value, left, right));
+ } else {
+ return assert_invariants(rotate_left(vtable, key, value, left, right));
+ }
+ default:
+ return assert_invariants(new_node(key, value, left, right));
+ }
+}
+
+static gpr_avl_node *add(const gpr_avl_vtable *vtable, gpr_avl_node *node,
+ void *key, void *value) {
+ long cmp;
+ if (node == NULL) {
+ return new_node(key, value, NULL, NULL);
+ }
+ cmp = vtable->compare_keys(node->key, key);
+ if (cmp == 0) {
+ return new_node(key, value, ref_node(node->left), ref_node(node->right));
+ } else if (cmp > 0) {
+ return rebalance(
+ vtable, vtable->copy_key(node->key), vtable->copy_value(node->value),
+ add(vtable, node->left, key, value), ref_node(node->right));
+ } else {
+ return rebalance(vtable, vtable->copy_key(node->key),
+ vtable->copy_value(node->value), ref_node(node->left),
+ add(vtable, node->right, key, value));
+ }
+}
+
+gpr_avl gpr_avl_add(gpr_avl avl, void *key, void *value) {
+ gpr_avl_node *old_root = avl.root;
+ avl.root = add(avl.vtable, avl.root, key, value);
+ assert_invariants(avl.root);
+ unref_node(avl.vtable, old_root);
+ return avl;
+}
+
+static gpr_avl_node *in_order_head(gpr_avl_node *node) {
+ while (node->left != NULL) {
+ node = node->left;
+ }
+ return node;
+}
+
+static gpr_avl_node *in_order_tail(gpr_avl_node *node) {
+ while (node->right != NULL) {
+ node = node->right;
+ }
+ return node;
+}
+
+static gpr_avl_node *remove(const gpr_avl_vtable *vtable, gpr_avl_node *node,
+ void *key) {
+ long cmp;
+ if (node == NULL) {
+ return NULL;
+ }
+ cmp = vtable->compare_keys(node->key, key);
+ if (cmp == 0) {
+ if (node->left == NULL) {
+ return ref_node(node->right);
+ } else if (node->right == NULL) {
+ return ref_node(node->left);
+ } else if (node->left->height < node->right->height) {
+ gpr_avl_node *h = in_order_head(node->right);
+ return rebalance(vtable, vtable->copy_key(h->key),
+ vtable->copy_value(h->value), ref_node(node->left),
+ remove(vtable, node->right, h->key));
+ } else {
+ gpr_avl_node *h = in_order_tail(node->left);
+ return rebalance(
+ vtable, vtable->copy_key(h->key), vtable->copy_value(h->value),
+ remove(vtable, node->left, h->key), ref_node(node->right));
+ }
+ } else if (cmp > 0) {
+ return rebalance(vtable, vtable->copy_key(node->key),
+ vtable->copy_value(node->value),
+ remove(vtable, node->left, key), ref_node(node->right));
+ } else {
+ return rebalance(vtable, vtable->copy_key(node->key),
+ vtable->copy_value(node->value), ref_node(node->left),
+ remove(vtable, node->right, key));
+ }
+}
+
+gpr_avl gpr_avl_remove(gpr_avl avl, void *key) {
+ gpr_avl_node *old_root = avl.root;
+ avl.root = remove(avl.vtable, avl.root, key);
+ assert_invariants(avl.root);
+ unref_node(avl.vtable, old_root);
+ return avl;
+}
+
+gpr_avl gpr_avl_ref(gpr_avl avl) {
+ ref_node(avl.root);
+ return avl;
+}
+
+void gpr_avl_unref(gpr_avl avl) { unref_node(avl.vtable, avl.root); }
diff --git a/src/core/surface/byte_buffer_queue.c b/src/core/support/backoff.c
index 7c31bfe5da..7458219645 100644
--- a/src/core/surface/byte_buffer_queue.c
+++ b/src/core/support/backoff.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,61 +31,41 @@
*
*/
-#include "src/core/surface/byte_buffer_queue.h"
-#include <grpc/support/alloc.h>
-#include <grpc/support/useful.h>
-
-static void bba_destroy(grpc_bbq_array *array, size_t start_pos) {
- size_t i;
- for (i = start_pos; i < array->count; i++) {
- grpc_byte_buffer_destroy(array->data[i]);
- }
- gpr_free(array->data);
-}
-
-/* Append an operation to an array, expanding as needed */
-static void bba_push(grpc_bbq_array *a, grpc_byte_buffer *buffer) {
- if (a->count == a->capacity) {
- a->capacity = GPR_MAX(a->capacity * 2, 8);
- a->data = gpr_realloc(a->data, sizeof(grpc_byte_buffer *) * a->capacity);
- }
- a->data[a->count++] = buffer;
-}
+#include "src/core/support/backoff.h"
-void grpc_bbq_destroy(grpc_byte_buffer_queue *q) {
- bba_destroy(&q->filling, 0);
- bba_destroy(&q->draining, q->drain_pos);
-}
+#include <grpc/support/useful.h>
-int grpc_bbq_empty(grpc_byte_buffer_queue *q) {
- return (q->drain_pos == q->draining.count && q->filling.count == 0);
+void gpr_backoff_init(gpr_backoff *backoff, double multiplier, double jitter,
+ int64_t min_timeout_millis, int64_t max_timeout_millis) {
+ backoff->multiplier = multiplier;
+ backoff->jitter = jitter;
+ backoff->min_timeout_millis = min_timeout_millis;
+ backoff->max_timeout_millis = max_timeout_millis;
+ backoff->rng_state = (uint32_t)gpr_now(GPR_CLOCK_REALTIME).tv_nsec;
}
-void grpc_bbq_push(grpc_byte_buffer_queue *q, grpc_byte_buffer *buffer) {
- bba_push(&q->filling, buffer);
+gpr_timespec gpr_backoff_begin(gpr_backoff *backoff, gpr_timespec now) {
+ backoff->current_timeout_millis = backoff->min_timeout_millis;
+ return gpr_time_add(
+ now, gpr_time_from_millis(backoff->current_timeout_millis, GPR_TIMESPAN));
}
-void grpc_bbq_flush(grpc_byte_buffer_queue *q) {
- grpc_byte_buffer *bb;
- while ((bb = grpc_bbq_pop(q))) {
- grpc_byte_buffer_destroy(bb);
- }
+/* Generate a random number between 0 and 1. */
+static double generate_uniform_random_number(uint32_t *rng_state) {
+ *rng_state = (1103515245 * *rng_state + 12345) % ((uint32_t)1 << 31);
+ return *rng_state / (double)((uint32_t)1 << 31);
}
-grpc_byte_buffer *grpc_bbq_pop(grpc_byte_buffer_queue *q) {
- grpc_bbq_array temp_array;
-
- if (q->drain_pos == q->draining.count) {
- if (q->filling.count == 0) {
- return NULL;
- }
- q->draining.count = 0;
- q->drain_pos = 0;
- /* swap arrays */
- temp_array = q->filling;
- q->filling = q->draining;
- q->draining = temp_array;
- }
-
- return q->draining.data[q->drain_pos++];
+gpr_timespec gpr_backoff_step(gpr_backoff *backoff, gpr_timespec now) {
+ double new_timeout_millis =
+ backoff->multiplier * (double)backoff->current_timeout_millis;
+ double jitter_range = backoff->jitter * new_timeout_millis;
+ double jitter =
+ (2 * generate_uniform_random_number(&backoff->rng_state) - 1) *
+ jitter_range;
+ backoff->current_timeout_millis =
+ GPR_CLAMP((int64_t)(new_timeout_millis + jitter),
+ backoff->min_timeout_millis, backoff->max_timeout_millis);
+ return gpr_time_add(
+ now, gpr_time_from_millis(backoff->current_timeout_millis, GPR_TIMESPAN));
}
diff --git a/src/core/support/backoff.h b/src/core/support/backoff.h
new file mode 100644
index 0000000000..f7730fde2a
--- /dev/null
+++ b/src/core/support/backoff.h
@@ -0,0 +1,65 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_SUPPORT_BACKOFF_H
+#define GRPC_CORE_SUPPORT_BACKOFF_H
+
+#include <grpc/support/time.h>
+
+typedef struct {
+ /// const: multiplier between retry attempts
+ double multiplier;
+ /// const: amount to randomize backoffs
+ double jitter;
+ /// const: minimum time between retries in milliseconds
+ int64_t min_timeout_millis;
+ /// const: maximum time between retries in milliseconds
+ int64_t max_timeout_millis;
+
+ /// random number generator
+ uint32_t rng_state;
+
+ /// current retry timeout in milliseconds
+ int64_t current_timeout_millis;
+} gpr_backoff;
+
+/// Initialize backoff machinery - does not need to be destroyed
+void gpr_backoff_init(gpr_backoff *backoff, double multiplier, double jitter,
+ int64_t min_timeout_millis, int64_t max_timeout_millis);
+
+/// Begin retry loop: returns a timespec for the NEXT retry
+gpr_timespec gpr_backoff_begin(gpr_backoff *backoff, gpr_timespec now);
+/// Step a retry loop: returns a timespec for the NEXT retry
+gpr_timespec gpr_backoff_step(gpr_backoff *backoff, gpr_timespec now);
+
+#endif /* GRPC_CORE_SUPPORT_BACKOFF_H */
diff --git a/src/core/census/context.h b/src/core/support/block_annotate.h
index d43a69f7e5..79a18039f4 100644
--- a/src/core/census/context.h
+++ b/src/core/support/block_annotate.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,19 +31,18 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_CENSUS_CONTEXT_H
-#define GRPC_INTERNAL_CORE_CENSUS_CONTEXT_H
+#ifndef GRPC_CORE_SUPPORT_BLOCK_ANNOTATE_H
+#define GRPC_CORE_SUPPORT_BLOCK_ANNOTATE_H
-#include <grpc/census.h>
+/* These annotations identify the beginning and end of regions where
+ the code may block for reasons other than synchronization functions.
+ These include poll, epoll, and getaddrinfo. */
-/* census_context is the in-memory representation of information needed to
- * maintain tracing, RPC statistics and resource usage information. */
-struct census_context {
- gpr_uint64 op_id; /* Operation identifier - unique per-context */
- gpr_uint64 trace_id; /* Globally unique trace identifier */
- /* TODO(aveitch) Add census tags:
- const census_tag_set *tags;
- */
-};
+#define GRPC_SCHEDULING_START_BLOCKING_REGION \
+ do { \
+ } while (0)
+#define GRPC_SCHEDULING_END_BLOCKING_REGION \
+ do { \
+ } while (0)
-#endif /* GRPC_INTERNAL_CORE_CENSUS_CONTEXT_H */
+#endif /* GRPC_CORE_SUPPORT_BLOCK_ANNOTATE_H */
diff --git a/src/core/support/cancellable.c b/src/core/support/cancellable.c
deleted file mode 100644
index 5a4d488dd3..0000000000
--- a/src/core/support/cancellable.c
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-/* Implementation for gpr_cancellable */
-
-#include <grpc/support/atm.h>
-#include <grpc/support/sync.h>
-#include <grpc/support/time.h>
-
-void gpr_cancellable_init(gpr_cancellable *c) {
- gpr_mu_init(&c->mu);
- c->cancelled = 0;
- c->waiters.next = &c->waiters;
- c->waiters.prev = &c->waiters;
- c->waiters.mu = NULL;
- c->waiters.cv = NULL;
-}
-
-void gpr_cancellable_destroy(gpr_cancellable *c) { gpr_mu_destroy(&c->mu); }
-
-int gpr_cancellable_is_cancelled(gpr_cancellable *c) {
- return gpr_atm_acq_load(&c->cancelled) != 0;
-}
-
-/* Threads in gpr_cv_cancellable_wait(cv, mu, ..., c) place themselves on a
- linked list c->waiters of gpr_cancellable_list_ before waiting on their
- condition variables. They check for cancellation while holding *mu. Thus,
- to wake a thread from gpr_cv_cancellable_wait(), it suffices to:
- - set c->cancelled
- - acquire and release *mu
- - gpr_cv_broadcast(cv)
-
- However, gpr_cancellable_cancel() may not use gpr_mu_lock(mu), since the
- caller may already hold *mu---a possible deadlock. (If we knew the caller
- did not hold *mu, care would still be needed, because c->mu follows *mu in
- the locking order, so *mu could not be acquired while holding c->mu---which
- is needed to iterate over c->waiters.)
-
- Therefore, gpr_cancellable_cancel() uses gpr_mu_trylock() rather than
- gpr_mu_lock(), and retries until either gpr_mu_trylock() succeeds or the
- thread leaves gpr_cv_cancellable_wait() for other reasons. In the first
- case, gpr_cancellable_cancel() removes the entry from the waiters list; in
- the second, the waiting thread removes itself from the list.
-
- A one-entry cache of mutexes and condition variables processed is kept to
- avoid doing the same work again and again if many threads are blocked in the
- same place. However, it's important to broadcast on a condition variable if
- the corresponding mutex has been locked successfully, even if the condition
- variable has been signalled before. */
-
-void gpr_cancellable_cancel(gpr_cancellable *c) {
- if (!gpr_cancellable_is_cancelled(c)) {
- int failures;
- int backoff = 1;
- do {
- struct gpr_cancellable_list_ *l;
- struct gpr_cancellable_list_ *nl;
- gpr_mu *omu = 0; /* one-element cache of a processed gpr_mu */
- gpr_cv *ocv = 0; /* one-element cache of a processd gpr_cv */
- gpr_mu_lock(&c->mu);
- gpr_atm_rel_store(&c->cancelled, 1);
- failures = 0;
- for (l = c->waiters.next; l != &c->waiters; l = nl) {
- nl = l->next;
- if (omu != l->mu) {
- omu = l->mu;
- if (gpr_mu_trylock(l->mu)) {
- gpr_mu_unlock(l->mu);
- l->next->prev = l->prev; /* remove *l from list */
- l->prev->next = l->next;
- /* allow unconditional dequeue in gpr_cv_cancellable_wait() */
- l->next = l;
- l->prev = l;
- ocv = 0; /* force broadcast */
- } else {
- failures++;
- }
- }
- if (ocv != l->cv) {
- ocv = l->cv;
- gpr_cv_broadcast(l->cv);
- }
- }
- gpr_mu_unlock(&c->mu);
- if (failures != 0) {
- if (backoff < 10) {
- volatile int i;
- for (i = 0; i != (1 << backoff); i++) {
- }
- backoff++;
- } else {
- gpr_event ev;
- gpr_event_init(&ev);
- gpr_event_wait(&ev,
- gpr_time_add(gpr_now(), gpr_time_from_micros(1000)));
- }
- }
- } while (failures != 0);
- }
-}
-
-int gpr_cv_cancellable_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline,
- gpr_cancellable *c) {
- gpr_int32 timeout;
- gpr_mu_lock(&c->mu);
- timeout = gpr_cancellable_is_cancelled(c);
- if (!timeout) {
- struct gpr_cancellable_list_ le;
- le.mu = mu;
- le.cv = cv;
- le.next = c->waiters.next;
- le.prev = &c->waiters;
- le.next->prev = &le;
- le.prev->next = &le;
- gpr_mu_unlock(&c->mu);
- timeout = gpr_cv_wait(cv, mu, abs_deadline);
- gpr_mu_lock(&c->mu);
- le.next->prev = le.prev;
- le.prev->next = le.next;
- if (!timeout) {
- timeout = gpr_cancellable_is_cancelled(c);
- }
- }
- gpr_mu_unlock(&c->mu);
- return timeout;
-}
diff --git a/src/core/support/cmdline.c b/src/core/support/cmdline.c
index 45a3182f73..b517f30b2d 100644
--- a/src/core/support/cmdline.c
+++ b/src/core/support/cmdline.c
@@ -62,11 +62,13 @@ struct gpr_cmdline {
void (*extra_arg)(void *user_data, const char *arg);
void *extra_arg_user_data;
- void (*state)(gpr_cmdline *cl, char *arg);
+ int (*state)(gpr_cmdline *cl, char *arg);
arg *cur_arg;
+
+ int survive_failure;
};
-static void normal_state(gpr_cmdline *cl, char *arg);
+static int normal_state(gpr_cmdline *cl, char *arg);
gpr_cmdline *gpr_cmdline_create(const char *description) {
gpr_cmdline *cl = gpr_malloc(sizeof(gpr_cmdline));
@@ -78,6 +80,10 @@ gpr_cmdline *gpr_cmdline_create(const char *description) {
return cl;
}
+void gpr_cmdline_set_survive_failure(gpr_cmdline *cl) {
+ cl->survive_failure = 1;
+}
+
void gpr_cmdline_destroy(gpr_cmdline *cl) {
while (cl->args) {
arg *a = cl->args;
@@ -185,16 +191,22 @@ char *gpr_cmdline_usage_string(gpr_cmdline *cl, const char *argv0) {
return tmp;
}
-static void print_usage_and_die(gpr_cmdline *cl) {
+static int print_usage_and_die(gpr_cmdline *cl) {
char *usage = gpr_cmdline_usage_string(cl, cl->argv0);
fprintf(stderr, "%s", usage);
gpr_free(usage);
- exit(1);
+ if (!cl->survive_failure) {
+ exit(1);
+ }
+ return 0;
}
-static void extra_state(gpr_cmdline *cl, char *arg) {
- if (!cl->extra_arg) print_usage_and_die(cl);
- cl->extra_arg(cl->extra_arg_user_data, arg);
+static int extra_state(gpr_cmdline *cl, char *str) {
+ if (!cl->extra_arg) {
+ return print_usage_and_die(cl);
+ }
+ cl->extra_arg(cl->extra_arg_user_data, str);
+ return 1;
}
static arg *find_arg(gpr_cmdline *cl, char *name) {
@@ -208,13 +220,13 @@ static arg *find_arg(gpr_cmdline *cl, char *name) {
if (!a) {
fprintf(stderr, "Unknown argument: %s\n", name);
- print_usage_and_die(cl);
+ return NULL;
}
return a;
}
-static void value_state(gpr_cmdline *cl, char *arg) {
+static int value_state(gpr_cmdline *cl, char *str) {
long intval;
char *end;
@@ -222,81 +234,89 @@ static void value_state(gpr_cmdline *cl, char *arg) {
switch (cl->cur_arg->type) {
case ARGTYPE_INT:
- intval = strtol(arg, &end, 0);
+ intval = strtol(str, &end, 0);
if (*end || intval < INT_MIN || intval > INT_MAX) {
- fprintf(stderr, "expected integer, got '%s' for %s\n", arg,
+ fprintf(stderr, "expected integer, got '%s' for %s\n", str,
cl->cur_arg->name);
- print_usage_and_die(cl);
+ return print_usage_and_die(cl);
}
*(int *)cl->cur_arg->value = (int)intval;
break;
case ARGTYPE_BOOL:
- if (0 == strcmp(arg, "1") || 0 == strcmp(arg, "true")) {
+ if (0 == strcmp(str, "1") || 0 == strcmp(str, "true")) {
*(int *)cl->cur_arg->value = 1;
- } else if (0 == strcmp(arg, "0") || 0 == strcmp(arg, "false")) {
+ } else if (0 == strcmp(str, "0") || 0 == strcmp(str, "false")) {
*(int *)cl->cur_arg->value = 0;
} else {
- fprintf(stderr, "expected boolean, got '%s' for %s\n", arg,
+ fprintf(stderr, "expected boolean, got '%s' for %s\n", str,
cl->cur_arg->name);
- print_usage_and_die(cl);
+ return print_usage_and_die(cl);
}
break;
case ARGTYPE_STRING:
- *(char **)cl->cur_arg->value = arg;
+ *(char **)cl->cur_arg->value = str;
break;
}
cl->state = normal_state;
+ return 1;
}
-static void normal_state(gpr_cmdline *cl, char *arg) {
+static int normal_state(gpr_cmdline *cl, char *str) {
char *eq = NULL;
char *tmp = NULL;
char *arg_name = NULL;
+ int r = 1;
- if (0 == strcmp(arg, "-help") || 0 == strcmp(arg, "--help") ||
- 0 == strcmp(arg, "-h")) {
- print_usage_and_die(cl);
+ if (0 == strcmp(str, "-help") || 0 == strcmp(str, "--help") ||
+ 0 == strcmp(str, "-h")) {
+ return print_usage_and_die(cl);
}
cl->cur_arg = NULL;
- if (arg[0] == '-') {
- if (arg[1] == '-') {
- if (arg[2] == 0) {
+ if (str[0] == '-') {
+ if (str[1] == '-') {
+ if (str[2] == 0) {
/* handle '--' to move to just extra args */
cl->state = extra_state;
- return;
+ return 1;
}
- arg += 2;
+ str += 2;
} else {
- arg += 1;
+ str += 1;
}
- /* first byte of arg is now past the leading '-' or '--' */
- if (arg[0] == 'n' && arg[1] == 'o' && arg[2] == '-') {
- /* arg is of the form '--no-foo' - it's a flag disable */
- arg += 3;
- cl->cur_arg = find_arg(cl, arg);
+ /* first byte of str is now past the leading '-' or '--' */
+ if (str[0] == 'n' && str[1] == 'o' && str[2] == '-') {
+ /* str is of the form '--no-foo' - it's a flag disable */
+ str += 3;
+ cl->cur_arg = find_arg(cl, str);
+ if (cl->cur_arg == NULL) {
+ return print_usage_and_die(cl);
+ }
if (cl->cur_arg->type != ARGTYPE_BOOL) {
- fprintf(stderr, "%s is not a flag argument\n", arg);
- print_usage_and_die(cl);
+ fprintf(stderr, "%s is not a flag argument\n", str);
+ return print_usage_and_die(cl);
}
*(int *)cl->cur_arg->value = 0;
- return; /* early out */
+ return 1; /* early out */
}
- eq = strchr(arg, '=');
+ eq = strchr(str, '=');
if (eq != NULL) {
/* copy the string into a temp buffer and extract the name */
- tmp = arg_name = gpr_malloc((size_t)(eq - arg + 1));
- memcpy(arg_name, arg, (size_t)(eq - arg));
- arg_name[eq - arg] = 0;
+ tmp = arg_name = gpr_malloc((size_t)(eq - str + 1));
+ memcpy(arg_name, str, (size_t)(eq - str));
+ arg_name[eq - str] = 0;
} else {
- arg_name = arg;
+ arg_name = str;
}
cl->cur_arg = find_arg(cl, arg_name);
+ if (cl->cur_arg == NULL) {
+ return print_usage_and_die(cl);
+ }
if (eq != NULL) {
- /* arg was of the type --foo=value, parse the value */
- value_state(cl, eq + 1);
+ /* str was of the type --foo=value, parse the value */
+ r = value_state(cl, eq + 1);
} else if (cl->cur_arg->type != ARGTYPE_BOOL) {
/* flag types don't have a '--foo value' variant, other types do */
cl->state = value_state;
@@ -305,19 +325,23 @@ static void normal_state(gpr_cmdline *cl, char *arg) {
*(int *)cl->cur_arg->value = 1;
}
} else {
- extra_state(cl, arg);
+ r = extra_state(cl, str);
}
gpr_free(tmp);
+ return r;
}
-void gpr_cmdline_parse(gpr_cmdline *cl, int argc, char **argv) {
+int gpr_cmdline_parse(gpr_cmdline *cl, int argc, char **argv) {
int i;
GPR_ASSERT(argc >= 1);
cl->argv0 = argv[0];
for (i = 1; i < argc; i++) {
- cl->state(cl, argv[i]);
+ if (!cl->state(cl, argv[i])) {
+ return 0;
+ }
}
+ return 1;
}
diff --git a/src/core/support/cpu_iphone.c b/src/core/support/cpu_iphone.c
index d412a6d7ee..82b49b47bc 100644
--- a/src/core/support/cpu_iphone.c
+++ b/src/core/support/cpu_iphone.c
@@ -36,9 +36,7 @@
#ifdef GPR_CPU_IPHONE
/* Probably 2 instead of 1, but see comment on gpr_cpu_current_cpu. */
-unsigned gpr_cpu_num_cores(void) {
- return 1;
-}
+unsigned gpr_cpu_num_cores(void) { return 1; }
/* Most code that's using this is using it to shard across work queues. So
unless profiling shows it's a problem or there appears a way to detect the
@@ -46,8 +44,6 @@ unsigned gpr_cpu_num_cores(void) {
Note that the interface in cpu.h lets gpr_cpu_num_cores return 0, but doing
it makes it impossible for gpr_cpu_current_cpu to satisfy its stated range,
and some code might be relying on it. */
-unsigned gpr_cpu_current_cpu(void) {
- return 0;
-}
+unsigned gpr_cpu_current_cpu(void) { return 0; }
#endif /* GPR_CPU_IPHONE */
diff --git a/src/core/support/cpu_linux.c b/src/core/support/cpu_linux.c
index 282d4daab1..7af6a8f009 100644
--- a/src/core/support/cpu_linux.c
+++ b/src/core/support/cpu_linux.c
@@ -33,7 +33,7 @@
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
-#endif /* _GNU_SOURCE */
+#endif /* _GNU_SOURCE */
#include <grpc/support/port_platform.h>
diff --git a/src/core/support/cpu_posix.c b/src/core/support/cpu_posix.c
index 99484e37fb..8f01c284ca 100644
--- a/src/core/support/cpu_posix.c
+++ b/src/core/support/cpu_posix.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -44,11 +44,11 @@
static __thread char magic_thread_local;
-static int ncpus = 0;
+static long ncpus = 0;
static void init_ncpus() {
ncpus = sysconf(_SC_NPROCESSORS_ONLN);
- if (ncpus < 1) {
+ if (ncpus < 1 || ncpus > INT32_MAX) {
gpr_log(GPR_ERROR, "Cannot determine number of CPUs: assuming 1");
ncpus = 1;
}
@@ -57,7 +57,7 @@ static void init_ncpus() {
unsigned gpr_cpu_num_cores(void) {
static gpr_once once = GPR_ONCE_INIT;
gpr_once_init(&once, init_ncpus);
- return ncpus;
+ return (unsigned)ncpus;
}
/* This is a cheap, but good enough, pointer hash for sharding things: */
@@ -71,7 +71,7 @@ unsigned gpr_cpu_current_cpu(void) {
most code that's using this is using it to shard across work queues though,
so here we use thread identity instead to achieve a similar though not
identical effect */
- return shard_ptr(&magic_thread_local);
+ return (unsigned)shard_ptr(&magic_thread_local);
}
#endif /* GPR_CPU_POSIX */
diff --git a/src/core/support/cpu_windows.c b/src/core/support/cpu_windows.c
index 107a7b85f0..ce32eb0a9d 100644
--- a/src/core/support/cpu_windows.c
+++ b/src/core/support/cpu_windows.c
@@ -34,7 +34,6 @@
#include <grpc/support/port_platform.h>
#ifdef GPR_WIN32
-#include <windows.h>
#include <grpc/support/log.h>
unsigned gpr_cpu_num_cores(void) {
diff --git a/src/core/support/env.h b/src/core/support/env.h
index 4f2e394d14..2902456947 100644
--- a/src/core/support/env.h
+++ b/src/core/support/env.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_SUPPORT_ENV_H
-#define GRPC_INTERNAL_CORE_SUPPORT_ENV_H
+#ifndef GRPC_CORE_SUPPORT_ENV_H
+#define GRPC_CORE_SUPPORT_ENV_H
#include <stdio.h>
@@ -57,4 +57,4 @@ void gpr_setenv(const char *name, const char *value);
}
#endif
-#endif /* GRPC_INTERNAL_CORE_SUPPORT_ENV_H */
+#endif /* GRPC_CORE_SUPPORT_ENV_H */
diff --git a/src/core/support/env_linux.c b/src/core/support/env_linux.c
index 2e03365e33..fe51f846b7 100644
--- a/src/core/support/env_linux.c
+++ b/src/core/support/env_linux.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -42,16 +42,43 @@
#include "src/core/support/env.h"
+#include <dlfcn.h>
+#include <features.h>
#include <stdlib.h>
+#include <string.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
+#include <grpc/support/useful.h>
#include "src/core/support/string.h"
char *gpr_getenv(const char *name) {
+#if defined(GPR_BACKWARDS_COMPATIBILITY_MODE)
+ typedef char *(*getenv_type)(const char *);
+ static getenv_type getenv_func = NULL;
+ /* Check to see which getenv variant is supported (go from most
+ * to least secure) */
+ const char *names[] = {"secure_getenv", "__secure_getenv", "getenv"};
+ for (size_t i = 0; getenv_func == NULL && i < GPR_ARRAY_SIZE(names); i++) {
+ getenv_func = (getenv_type)dlsym(RTLD_DEFAULT, names[i]);
+ if (getenv_func != NULL && strstr(names[i], "secure") == NULL) {
+ gpr_log(GPR_DEBUG,
+ "Warning: insecure environment read function '%s' used",
+ names[i]);
+ }
+ }
+ char *result = getenv_func(name);
+ return result == NULL ? result : gpr_strdup(result);
+#elif __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 17)
char *result = secure_getenv(name);
return result == NULL ? result : gpr_strdup(result);
+#else
+ gpr_log(GPR_DEBUG, "Warning: insecure environment read function '%s' used",
+ "getenv");
+ char *result = getenv(name);
+ return result == NULL ? result : gpr_strdup(result);
+#endif
}
void gpr_setenv(const char *name, const char *value) {
diff --git a/src/core/support/env_win32.c b/src/core/support/env_win32.c
index 6b1ff102b0..10258283ba 100644
--- a/src/core/support/env_win32.c
+++ b/src/core/support/env_win32.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -38,7 +38,12 @@
#include "src/core/support/env.h"
#include "src/core/support/string.h"
+#ifdef __MINGW32__
+errno_t getenv_s(size_t *size_needed, char *buffer, size_t size,
+ const char *varname);
+#else
#include <stdlib.h>
+#endif
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
@@ -47,14 +52,17 @@
char *gpr_getenv(const char *name) {
size_t size;
char *result = NULL;
- char *duplicated;
errno_t err;
- err = _dupenv_s(&result, &size, name);
- if (err) return NULL;
- duplicated = gpr_strdup(result);
- free(result);
- return duplicated;
+ err = getenv_s(&size, NULL, 0, name);
+ if (err || (size == 0)) return NULL;
+ result = gpr_malloc(size);
+ err = getenv_s(&size, result, size, name);
+ if (err) {
+ gpr_free(result);
+ return NULL;
+ }
+ return result;
}
void gpr_setenv(const char *name, const char *value) {
diff --git a/src/core/support/histogram.c b/src/core/support/histogram.c
index 9029703891..20ed2b14b1 100644
--- a/src/core/support/histogram.c
+++ b/src/core/support/histogram.c
@@ -66,7 +66,7 @@ struct gpr_histogram {
/* number of buckets */
size_t num_buckets;
/* the buckets themselves */
- gpr_uint32 *buckets;
+ uint32_t *buckets;
};
/* determine a bucket index given a value - does no bounds checking */
@@ -102,8 +102,8 @@ gpr_histogram *gpr_histogram_create(double resolution,
h->num_buckets = bucket_for_unchecked(h, max_bucket_start) + 1;
GPR_ASSERT(h->num_buckets > 1);
GPR_ASSERT(h->num_buckets < 100000000);
- h->buckets = gpr_malloc(sizeof(gpr_uint32) * h->num_buckets);
- memset(h->buckets, 0, sizeof(gpr_uint32) * h->num_buckets);
+ h->buckets = gpr_malloc(sizeof(uint32_t) * h->num_buckets);
+ memset(h->buckets, 0, sizeof(uint32_t) * h->num_buckets);
return h;
}
@@ -125,7 +125,7 @@ void gpr_histogram_add(gpr_histogram *h, double x) {
h->buckets[bucket_for(h, x)]++;
}
-int gpr_histogram_merge(gpr_histogram *dst, gpr_histogram *src) {
+int gpr_histogram_merge(gpr_histogram *dst, const gpr_histogram *src) {
if ((dst->num_buckets != src->num_buckets) ||
(dst->multiplier != src->multiplier)) {
/* Fail because these histograms don't match */
@@ -137,7 +137,7 @@ int gpr_histogram_merge(gpr_histogram *dst, gpr_histogram *src) {
return 1;
}
-void gpr_histogram_merge_contents(gpr_histogram *dst, const gpr_uint32 *data,
+void gpr_histogram_merge_contents(gpr_histogram *dst, const uint32_t *data,
size_t data_count, double min_seen,
double max_seen, double sum,
double sum_of_squares, double count) {
@@ -191,15 +191,18 @@ static double threshold_for_count_below(gpr_histogram *h, double count_below) {
break;
}
}
- return (bucket_start(h, (double)lower_idx) + bucket_start(h, (double)upper_idx)) / 2.0;
+ return (bucket_start(h, (double)lower_idx) +
+ bucket_start(h, (double)upper_idx)) /
+ 2.0;
} else {
/* treat values as uniform throughout the bucket, and find where this value
should lie */
lower_bound = bucket_start(h, (double)lower_idx);
upper_bound = bucket_start(h, (double)(lower_idx + 1));
- return GPR_CLAMP(upper_bound - (upper_bound - lower_bound) *
- (count_so_far - count_below) /
- h->buckets[lower_idx],
+ return GPR_CLAMP(upper_bound -
+ (upper_bound - lower_bound) *
+ (count_so_far - count_below) /
+ h->buckets[lower_idx],
h->min_seen, h->max_seen);
}
}
@@ -209,7 +212,7 @@ double gpr_histogram_percentile(gpr_histogram *h, double percentile) {
}
double gpr_histogram_mean(gpr_histogram *h) {
- GPR_ASSERT(h->count);
+ GPR_ASSERT(h->count != 0);
return h->sum / h->count;
}
@@ -235,7 +238,7 @@ double gpr_histogram_sum_of_squares(gpr_histogram *h) {
return h->sum_of_squares;
}
-const gpr_uint32 *gpr_histogram_get_contents(gpr_histogram *h, size_t *size) {
+const uint32_t *gpr_histogram_get_contents(gpr_histogram *h, size_t *size) {
*size = h->num_buckets;
return h->buckets;
}
diff --git a/src/core/support/host_port.c b/src/core/support/host_port.c
index 0906ebc2a3..23f65b1581 100644
--- a/src/core/support/host_port.c
+++ b/src/core/support/host_port.c
@@ -50,7 +50,7 @@ int gpr_join_host_port(char **out, const char *host, int port) {
}
}
-void gpr_split_host_port(const char *name, char **host, char **port) {
+int gpr_split_host_port(const char *name, char **host, char **port) {
const char *host_start;
size_t host_len;
const char *port_start;
@@ -63,7 +63,7 @@ void gpr_split_host_port(const char *name, char **host, char **port) {
const char *rbracket = strchr(name, ']');
if (rbracket == NULL) {
/* Unmatched [ */
- return;
+ return 0;
}
if (rbracket[1] == '\0') {
/* ]<end> */
@@ -73,14 +73,14 @@ void gpr_split_host_port(const char *name, char **host, char **port) {
port_start = rbracket + 2;
} else {
/* ]<invalid> */
- return;
+ return 0;
}
host_start = name + 1;
host_len = (size_t)(rbracket - host_start);
if (memchr(host_start, ':', host_len) == NULL) {
/* Require all bracketed hosts to contain a colon, because a hostname or
- IPv4 address should never use brackets. */
- return;
+ IPv4 address should never use brackets. */
+ return 0;
}
} else {
const char *colon = strchr(name, ':');
@@ -105,4 +105,6 @@ void gpr_split_host_port(const char *name, char **host, char **port) {
if (port_start != NULL) {
*port = gpr_strdup(port_start);
}
+
+ return 1;
}
diff --git a/src/core/support/file.c b/src/core/support/load_file.c
index c1361d8a9e..650bd62ccb 100644
--- a/src/core/support/file.c
+++ b/src/core/support/load_file.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,7 +31,7 @@
*
*/
-#include "src/core/support/file.h"
+#include "src/core/support/load_file.h"
#include <errno.h>
#include <string.h>
@@ -40,6 +40,7 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
+#include "src/core/support/block_annotate.h"
#include "src/core/support/string.h"
gpr_slice gpr_load_file(const char *filename, int add_null_terminator,
@@ -48,9 +49,11 @@ gpr_slice gpr_load_file(const char *filename, int add_null_terminator,
size_t contents_size = 0;
char *error_msg = NULL;
gpr_slice result = gpr_empty_slice();
- FILE *file = fopen(filename, "rb");
+ FILE *file;
size_t bytes_read = 0;
+ GRPC_SCHEDULING_START_BLOCKING_REGION;
+ file = fopen(filename, "rb");
if (file == NULL) {
gpr_asprintf(&error_msg, "Could not open file %s (error = %s).", filename,
strerror(errno));
@@ -83,5 +86,6 @@ end:
if (success != NULL) *success = 0;
}
if (file != NULL) fclose(file);
+ GRPC_SCHEDULING_END_BLOCKING_REGION;
return result;
}
diff --git a/src/core/support/file.h b/src/core/support/load_file.h
index 1dafe390e3..5896654e9a 100644
--- a/src/core/support/file.h
+++ b/src/core/support/load_file.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_SUPPORT_FILE_H
-#define GRPC_INTERNAL_CORE_SUPPORT_FILE_H
+#ifndef GRPC_CORE_SUPPORT_LOAD_FILE_H
+#define GRPC_CORE_SUPPORT_LOAD_FILE_H
#include <stdio.h>
@@ -42,22 +42,14 @@
extern "C" {
#endif
-/* File utility functions */
-
/* Loads the content of a file into a slice. add_null_terminator will add
a NULL terminator if non-zero. The success parameter, if not NULL,
will be set to 1 in case of success and 0 in case of failure. */
gpr_slice gpr_load_file(const char *filename, int add_null_terminator,
int *success);
-/* Creates a temporary file from a prefix.
- If tmp_filename is not NULL, *tmp_filename is assigned the name of the
- created file and it is the responsibility of the caller to gpr_free it
- unless an error occurs in which case it will be set to NULL. */
-FILE *gpr_tmpfile(const char *prefix, char **tmp_filename);
-
#ifdef __cplusplus
}
#endif
-#endif /* GRPC_INTERNAL_CORE_SUPPORT_FILE_H */
+#endif /* GRPC_CORE_SUPPORT_LOAD_FILE_H */
diff --git a/src/core/support/log.c b/src/core/support/log.c
index f52c2035b9..04156a5b1f 100644
--- a/src/core/support/log.c
+++ b/src/core/support/log.c
@@ -32,6 +32,7 @@
*/
#include <grpc/support/log.h>
+#include <grpc/support/port_platform.h>
#include <stdio.h>
#include <string.h>
@@ -48,7 +49,7 @@ const char *gpr_log_severity_string(gpr_log_severity severity) {
case GPR_LOG_SEVERITY_ERROR:
return "E";
}
- return "UNKNOWN";
+ GPR_UNREACHABLE_CODE(return "UNKNOWN");
}
void gpr_log_message(const char *file, int line, gpr_log_severity severity,
diff --git a/src/core/support/log_linux.c b/src/core/support/log_linux.c
index 48349d2c83..d66b7a3cc0 100644
--- a/src/core/support/log_linux.c
+++ b/src/core/support/log_linux.c
@@ -43,7 +43,9 @@
#ifdef GPR_LINUX
+#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
#include <grpc/support/time.h>
#include <stdio.h>
#include <stdarg.h>
@@ -71,28 +73,33 @@ void gpr_log(const char *file, int line, gpr_log_severity severity,
void gpr_default_log(gpr_log_func_args *args) {
char *final_slash;
+ char *prefix;
const char *display_file;
char time_buffer[64];
- gpr_timespec now = gpr_now();
+ time_t timer;
+ gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME);
struct tm tm;
+ timer = (time_t)now.tv_sec;
final_slash = strrchr(args->file, '/');
if (final_slash == NULL)
display_file = args->file;
else
display_file = final_slash + 1;
- if (!localtime_r(&now.tv_sec, &tm)) {
+ if (!localtime_r(&timer, &tm)) {
strcpy(time_buffer, "error:localtime");
} else if (0 ==
strftime(time_buffer, sizeof(time_buffer), "%m%d %H:%M:%S", &tm)) {
strcpy(time_buffer, "error:strftime");
}
- fprintf(stderr, "%s%s.%09d %7ld %s:%d] %s\n",
- gpr_log_severity_string(args->severity), time_buffer,
- (int)(now.tv_nsec), gettid(), display_file, args->line,
- args->message);
+ gpr_asprintf(&prefix, "%s%s.%09d %7tu %s:%d]",
+ gpr_log_severity_string(args->severity), time_buffer,
+ (int)(now.tv_nsec), gettid(), display_file, args->line);
+
+ fprintf(stderr, "%-60s %s\n", prefix, args->message);
+ gpr_free(prefix);
}
#endif
diff --git a/src/core/support/log_posix.c b/src/core/support/log_posix.c
index afca792c40..3ff171f99c 100644
--- a/src/core/support/log_posix.c
+++ b/src/core/support/log_posix.c
@@ -45,7 +45,7 @@
#include <time.h>
#include <pthread.h>
-static gpr_intptr gettid(void) { return (gpr_intptr)pthread_self(); }
+static intptr_t gettid(void) { return (intptr_t)pthread_self(); }
void gpr_log(const char *file, int line, gpr_log_severity severity,
const char *format, ...) {
@@ -62,9 +62,9 @@ void gpr_log(const char *file, int line, gpr_log_severity severity,
} else if ((size_t)ret <= sizeof(buf) - 1) {
message = buf;
} else {
- message = allocated = gpr_malloc(ret + 1);
+ message = allocated = gpr_malloc((size_t)ret + 1);
va_start(args, format);
- vsnprintf(message, ret + 1, format, args);
+ vsnprintf(message, (size_t)(ret + 1), format, args);
va_end(args);
}
gpr_log_message(file, line, severity, message);
@@ -75,16 +75,18 @@ void gpr_default_log(gpr_log_func_args *args) {
char *final_slash;
const char *display_file;
char time_buffer[64];
- gpr_timespec now = gpr_now();
+ time_t timer;
+ gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME);
struct tm tm;
+ timer = (time_t)now.tv_sec;
final_slash = strrchr(args->file, '/');
if (final_slash == NULL)
display_file = args->file;
else
display_file = final_slash + 1;
- if (!localtime_r(&now.tv_sec, &tm)) {
+ if (!localtime_r(&timer, &tm)) {
strcpy(time_buffer, "error:localtime");
} else if (0 ==
strftime(time_buffer, sizeof(time_buffer), "%m%d %H:%M:%S", &tm)) {
diff --git a/src/core/support/log_win32.c b/src/core/support/log_win32.c
index d249be7d2e..e18e667fe5 100644
--- a/src/core/support/log_win32.c
+++ b/src/core/support/log_win32.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -81,11 +81,21 @@ void gpr_log(const char *file, int line, gpr_log_severity severity,
/* Simple starter implementation */
void gpr_default_log(gpr_log_func_args *args) {
+ char *final_slash;
+ const char *display_file;
char time_buffer[64];
- gpr_timespec now = gpr_now();
+ time_t timer;
+ gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME);
struct tm tm;
- if (localtime_s(&tm, &now.tv_sec)) {
+ timer = (time_t)now.tv_sec;
+ final_slash = strrchr(args->file, '\\');
+ if (final_slash == NULL)
+ display_file = args->file;
+ else
+ display_file = final_slash + 1;
+
+ if (localtime_s(&tm, &timer)) {
strcpy(time_buffer, "error:localtime");
} else if (0 ==
strftime(time_buffer, sizeof(time_buffer), "%m%d %H:%M:%S", &tm)) {
@@ -94,17 +104,18 @@ void gpr_default_log(gpr_log_func_args *args) {
fprintf(stderr, "%s%s.%09u %5lu %s:%d] %s\n",
gpr_log_severity_string(args->severity), time_buffer,
- (int)(now.tv_nsec), GetCurrentThreadId(), args->file, args->line,
+ (int)(now.tv_nsec), GetCurrentThreadId(), display_file, args->line,
args->message);
+ fflush(stderr);
}
-char *gpr_format_message(DWORD messageid) {
+char *gpr_format_message(int messageid) {
LPTSTR tmessage;
char *message;
DWORD status = FormatMessage(
FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS,
- NULL, messageid, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ NULL, (DWORD)messageid, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
(LPTSTR)(&tmessage), 0, NULL);
if (status == 0) return gpr_strdup("Unable to retrieve error string");
message = gpr_tchar_to_char(tmessage);
diff --git a/src/core/support/murmur_hash.c b/src/core/support/murmur_hash.c
index 37fdca82ba..a5261c0cc0 100644
--- a/src/core/support/murmur_hash.c
+++ b/src/core/support/murmur_hash.c
@@ -46,19 +46,19 @@
handle aligned reads, do the conversion here */
#define GETBLOCK32(p, i) (p)[(i)]
-gpr_uint32 gpr_murmur_hash3(const void *key, size_t len, gpr_uint32 seed) {
- const gpr_uint8 *data = (const gpr_uint8 *)key;
+uint32_t gpr_murmur_hash3(const void *key, size_t len, uint32_t seed) {
+ const uint8_t *data = (const uint8_t *)key;
const size_t nblocks = len / 4;
int i;
- gpr_uint32 h1 = seed;
- gpr_uint32 k1;
+ uint32_t h1 = seed;
+ uint32_t k1;
- const gpr_uint32 c1 = 0xcc9e2d51;
- const gpr_uint32 c2 = 0x1b873593;
+ const uint32_t c1 = 0xcc9e2d51;
+ const uint32_t c2 = 0x1b873593;
- const gpr_uint32 *blocks = ((const gpr_uint32 *)key) + nblocks;
- const gpr_uint8 *tail = (const gpr_uint8 *)(data + nblocks * 4);
+ const uint32_t *blocks = ((const uint32_t *)key) + nblocks;
+ const uint8_t *tail = (const uint8_t *)(data + nblocks * 4);
/* body */
for (i = -(int)nblocks; i; i++) {
@@ -78,9 +78,9 @@ gpr_uint32 gpr_murmur_hash3(const void *key, size_t len, gpr_uint32 seed) {
/* tail */
switch (len & 3) {
case 3:
- k1 ^= ((gpr_uint32)tail[2]) << 16;
+ k1 ^= ((uint32_t)tail[2]) << 16;
case 2:
- k1 ^= ((gpr_uint32)tail[1]) << 8;
+ k1 ^= ((uint32_t)tail[1]) << 8;
case 1:
k1 ^= tail[0];
k1 *= c1;
@@ -90,7 +90,7 @@ gpr_uint32 gpr_murmur_hash3(const void *key, size_t len, gpr_uint32 seed) {
};
/* finalization */
- h1 ^= (gpr_uint32)len;
+ h1 ^= (uint32_t)len;
FMIX32(h1);
return h1;
}
diff --git a/src/core/support/murmur_hash.h b/src/core/support/murmur_hash.h
index 85ab2fe4bf..0f0b399e5d 100644
--- a/src/core/support/murmur_hash.h
+++ b/src/core/support/murmur_hash.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,14 +31,14 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_SUPPORT_MURMUR_HASH_H
-#define GRPC_INTERNAL_CORE_SUPPORT_MURMUR_HASH_H
+#ifndef GRPC_CORE_SUPPORT_MURMUR_HASH_H
+#define GRPC_CORE_SUPPORT_MURMUR_HASH_H
#include <grpc/support/port_platform.h>
#include <stddef.h>
/* compute the hash of key (length len) */
-gpr_uint32 gpr_murmur_hash3(const void *key, size_t len, gpr_uint32 seed);
+uint32_t gpr_murmur_hash3(const void *key, size_t len, uint32_t seed);
-#endif /* GRPC_INTERNAL_CORE_SUPPORT_MURMUR_HASH_H */
+#endif /* GRPC_CORE_SUPPORT_MURMUR_HASH_H */
diff --git a/src/core/support/slice.c b/src/core/support/slice.c
index a2d62fc1e5..b9a7c77bda 100644
--- a/src/core/support/slice.c
+++ b/src/core/support/slice.c
@@ -57,6 +57,21 @@ void gpr_slice_unref(gpr_slice slice) {
}
}
+/* gpr_slice_from_static_string support structure - a refcount that does
+ nothing */
+static void noop_ref_or_unref(void *unused) {}
+
+static gpr_slice_refcount noop_refcount = {noop_ref_or_unref,
+ noop_ref_or_unref};
+
+gpr_slice gpr_slice_from_static_string(const char *s) {
+ gpr_slice slice;
+ slice.refcount = &noop_refcount;
+ slice.data.refcounted.bytes = (uint8_t *)s;
+ slice.data.refcounted.length = strlen(s);
+ return slice;
+}
+
/* gpr_slice_new support structures - we create a refcount object extended
with the user provided data pointer & destroy function */
typedef struct new_slice_refcount {
@@ -188,13 +203,13 @@ gpr_slice gpr_slice_malloc(size_t length) {
/* The slices refcount points back to the allocated block. */
slice.refcount = &rc->base;
/* The data bytes are placed immediately after the refcount struct */
- slice.data.refcounted.bytes = (gpr_uint8 *)(rc + 1);
+ slice.data.refcounted.bytes = (uint8_t *)(rc + 1);
/* And the length of the block is set to the requested length */
slice.data.refcounted.length = length;
} else {
/* small slice: just inline the data */
slice.refcount = NULL;
- slice.data.inlined.length = (gpr_uint8)length;
+ slice.data.inlined.length = (uint8_t)length;
}
return slice;
}
@@ -217,7 +232,7 @@ gpr_slice gpr_slice_sub_no_ref(gpr_slice source, size_t begin, size_t end) {
/* Enforce preconditions */
GPR_ASSERT(source.data.inlined.length >= end);
subset.refcount = NULL;
- subset.data.inlined.length = (gpr_uint8)(end - begin);
+ subset.data.inlined.length = (uint8_t)(end - begin);
memcpy(subset.data.inlined.bytes, source.data.inlined.bytes + begin,
end - begin);
}
@@ -229,7 +244,7 @@ gpr_slice gpr_slice_sub(gpr_slice source, size_t begin, size_t end) {
if (end - begin <= sizeof(subset.data.inlined.bytes)) {
subset.refcount = NULL;
- subset.data.inlined.length = (gpr_uint8)(end - begin);
+ subset.data.inlined.length = (uint8_t)(end - begin);
memcpy(subset.data.inlined.bytes, GPR_SLICE_START_PTR(source) + begin,
end - begin);
} else {
@@ -247,17 +262,17 @@ gpr_slice gpr_slice_split_tail(gpr_slice *source, size_t split) {
/* inlined data, copy it out */
GPR_ASSERT(source->data.inlined.length >= split);
tail.refcount = NULL;
- tail.data.inlined.length = (gpr_uint8)(source->data.inlined.length - split);
+ tail.data.inlined.length = (uint8_t)(source->data.inlined.length - split);
memcpy(tail.data.inlined.bytes, source->data.inlined.bytes + split,
tail.data.inlined.length);
- source->data.inlined.length = (gpr_uint8)split;
+ source->data.inlined.length = (uint8_t)split;
} else {
size_t tail_length = source->data.refcounted.length - split;
GPR_ASSERT(source->data.refcounted.length >= split);
if (tail_length < sizeof(tail.data.inlined.bytes)) {
/* Copy out the bytes - it'll be cheaper than refcounting */
tail.refcount = NULL;
- tail.data.inlined.length = (gpr_uint8)tail_length;
+ tail.data.inlined.length = (uint8_t)tail_length;
memcpy(tail.data.inlined.bytes, source->data.refcounted.bytes + split,
tail_length);
} else {
@@ -282,16 +297,17 @@ gpr_slice gpr_slice_split_head(gpr_slice *source, size_t split) {
GPR_ASSERT(source->data.inlined.length >= split);
head.refcount = NULL;
- head.data.inlined.length = (gpr_uint8)split;
+ head.data.inlined.length = (uint8_t)split;
memcpy(head.data.inlined.bytes, source->data.inlined.bytes, split);
- source->data.inlined.length = (gpr_uint8)(source->data.inlined.length - split);
+ source->data.inlined.length =
+ (uint8_t)(source->data.inlined.length - split);
memmove(source->data.inlined.bytes, source->data.inlined.bytes + split,
source->data.inlined.length);
} else if (split < sizeof(head.data.inlined.bytes)) {
GPR_ASSERT(source->data.refcounted.length >= split);
head.refcount = NULL;
- head.data.inlined.length = (gpr_uint8)split;
+ head.data.inlined.length = (uint8_t)split;
memcpy(head.data.inlined.bytes, source->data.refcounted.bytes, split);
source->data.refcounted.bytes += split;
source->data.refcounted.length -= split;
diff --git a/src/core/support/slice_buffer.c b/src/core/support/slice_buffer.c
index 6e6c72a2bf..66f111d767 100644
--- a/src/core/support/slice_buffer.c
+++ b/src/core/support/slice_buffer.c
@@ -31,6 +31,7 @@
*
*/
+#include <grpc/support/port_platform.h>
#include <grpc/support/slice_buffer.h>
#include <string.h>
@@ -69,9 +70,9 @@ void gpr_slice_buffer_destroy(gpr_slice_buffer *sb) {
}
}
-gpr_uint8 *gpr_slice_buffer_tiny_add(gpr_slice_buffer *sb, unsigned n) {
+uint8_t *gpr_slice_buffer_tiny_add(gpr_slice_buffer *sb, size_t n) {
gpr_slice *back;
- gpr_uint8 *out;
+ uint8_t *out;
sb->length += n;
@@ -81,7 +82,7 @@ gpr_uint8 *gpr_slice_buffer_tiny_add(gpr_slice_buffer *sb, unsigned n) {
if ((back->data.inlined.length + n) > sizeof(back->data.inlined.bytes))
goto add_new;
out = back->data.inlined.bytes + back->data.inlined.length;
- back->data.inlined.length = (gpr_uint8)(back->data.inlined.length + n);
+ back->data.inlined.length = (uint8_t)(back->data.inlined.length + n);
return out;
add_new:
@@ -89,7 +90,7 @@ add_new:
back = &sb->slices[sb->count];
sb->count++;
back->refcount = NULL;
- back->data.inlined.length = (gpr_uint8)n;
+ back->data.inlined.length = (uint8_t)n;
return back->data.inlined.bytes;
}
@@ -116,7 +117,8 @@ void gpr_slice_buffer_add(gpr_slice_buffer *sb, gpr_slice s) {
GPR_SLICE_INLINED_SIZE) {
memcpy(back->data.inlined.bytes + back->data.inlined.length,
s.data.inlined.bytes, s.data.inlined.length);
- back->data.inlined.length = (gpr_uint8)(back->data.inlined.length + s.data.inlined.length);
+ back->data.inlined.length =
+ (uint8_t)(back->data.inlined.length + s.data.inlined.length);
} else {
size_t cp1 = GPR_SLICE_INLINED_SIZE - back->data.inlined.length;
memcpy(back->data.inlined.bytes + back->data.inlined.length,
@@ -126,7 +128,7 @@ void gpr_slice_buffer_add(gpr_slice_buffer *sb, gpr_slice s) {
back = &sb->slices[n];
sb->count = n + 1;
back->refcount = NULL;
- back->data.inlined.length = (gpr_uint8)(s.data.inlined.length - cp1);
+ back->data.inlined.length = (uint8_t)(s.data.inlined.length - cp1);
memcpy(back->data.inlined.bytes, s.data.inlined.bytes + cp1,
s.data.inlined.length - cp1);
}
@@ -206,3 +208,75 @@ void gpr_slice_buffer_move_into(gpr_slice_buffer *src, gpr_slice_buffer *dst) {
src->count = 0;
src->length = 0;
}
+
+void gpr_slice_buffer_move_first(gpr_slice_buffer *src, size_t n,
+ gpr_slice_buffer *dst) {
+ size_t src_idx;
+ size_t output_len = dst->length + n;
+ size_t new_input_len = src->length - n;
+ GPR_ASSERT(src->length >= n);
+ if (src->length == n) {
+ gpr_slice_buffer_move_into(src, dst);
+ return;
+ }
+ src_idx = 0;
+ while (src_idx < src->capacity) {
+ gpr_slice slice = src->slices[src_idx];
+ size_t slice_len = GPR_SLICE_LENGTH(slice);
+ if (n > slice_len) {
+ gpr_slice_buffer_add(dst, slice);
+ n -= slice_len;
+ src_idx++;
+ } else if (n == slice_len) {
+ gpr_slice_buffer_add(dst, slice);
+ src_idx++;
+ break;
+ } else { /* n < slice_len */
+ src->slices[src_idx] = gpr_slice_split_tail(&slice, n);
+ GPR_ASSERT(GPR_SLICE_LENGTH(slice) == n);
+ GPR_ASSERT(GPR_SLICE_LENGTH(src->slices[src_idx]) == slice_len - n);
+ gpr_slice_buffer_add(dst, slice);
+ break;
+ }
+ }
+ GPR_ASSERT(dst->length == output_len);
+ memmove(src->slices, src->slices + src_idx,
+ sizeof(gpr_slice) * (src->count - src_idx));
+ src->count -= src_idx;
+ src->length = new_input_len;
+ GPR_ASSERT(src->count > 0);
+}
+
+void gpr_slice_buffer_trim_end(gpr_slice_buffer *sb, size_t n,
+ gpr_slice_buffer *garbage) {
+ GPR_ASSERT(n <= sb->length);
+ sb->length -= n;
+ for (;;) {
+ size_t idx = sb->count - 1;
+ gpr_slice slice = sb->slices[idx];
+ size_t slice_len = GPR_SLICE_LENGTH(slice);
+ if (slice_len > n) {
+ sb->slices[idx] = gpr_slice_split_head(&slice, slice_len - n);
+ gpr_slice_buffer_add_indexed(garbage, slice);
+ return;
+ } else if (slice_len == n) {
+ gpr_slice_buffer_add_indexed(garbage, slice);
+ sb->count = idx;
+ return;
+ } else {
+ gpr_slice_buffer_add_indexed(garbage, slice);
+ n -= slice_len;
+ sb->count = idx;
+ }
+ }
+}
+
+gpr_slice gpr_slice_buffer_take_first(gpr_slice_buffer *sb) {
+ gpr_slice slice;
+ GPR_ASSERT(sb->count > 0);
+ slice = sb->slices[0];
+ memmove(&sb->slices[0], &sb->slices[1], (sb->count - 1) * sizeof(gpr_slice));
+ sb->count--;
+ sb->length -= GPR_SLICE_LENGTH(slice);
+ return slice;
+}
diff --git a/src/core/support/stack_lockfree.c b/src/core/support/stack_lockfree.c
new file mode 100644
index 0000000000..9daecd2e18
--- /dev/null
+++ b/src/core/support/stack_lockfree.c
@@ -0,0 +1,185 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/support/stack_lockfree.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <grpc/support/port_platform.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/atm.h>
+#include <grpc/support/log.h>
+
+/* The lockfree node structure is a single architecture-level
+ word that allows for an atomic CAS to set it up. */
+struct lockfree_node_contents {
+ /* next thing to look at. Actual index for head, next index otherwise */
+ uint16_t index;
+#ifdef GPR_ARCH_64
+ uint16_t pad;
+ uint32_t aba_ctr;
+#else
+#ifdef GPR_ARCH_32
+ uint16_t aba_ctr;
+#else
+#error Unsupported bit width architecture
+#endif
+#endif
+};
+
+/* Use a union to make sure that these are in the same bits as an atm word */
+typedef union lockfree_node {
+ gpr_atm atm;
+ struct lockfree_node_contents contents;
+} lockfree_node;
+
+#define ENTRY_ALIGNMENT_BITS 3 /* make sure that entries aligned to 8-bytes */
+#define INVALID_ENTRY_INDEX \
+ ((1 << 16) - 1) /* reserve this entry as invalid \
+ */
+
+struct gpr_stack_lockfree {
+ lockfree_node *entries;
+ lockfree_node head; /* An atomic entry describing curr head */
+
+#ifndef NDEBUG
+ /* Bitmap of pushed entries to check for double-push or pop */
+ gpr_atm pushed[(INVALID_ENTRY_INDEX + 1) / (8 * sizeof(gpr_atm))];
+#endif
+};
+
+gpr_stack_lockfree *gpr_stack_lockfree_create(size_t entries) {
+ gpr_stack_lockfree *stack;
+ stack = gpr_malloc(sizeof(*stack));
+ /* Since we only allocate 16 bits to represent an entry number,
+ * make sure that we are within the desired range */
+ /* Reserve the highest entry number as a dummy */
+ GPR_ASSERT(entries < INVALID_ENTRY_INDEX);
+ stack->entries = gpr_malloc_aligned(entries * sizeof(stack->entries[0]),
+ ENTRY_ALIGNMENT_BITS);
+ /* Clear out all entries */
+ memset(stack->entries, 0, entries * sizeof(stack->entries[0]));
+ memset(&stack->head, 0, sizeof(stack->head));
+#ifndef NDEBUG
+ memset(&stack->pushed, 0, sizeof(stack->pushed));
+#endif
+
+ GPR_ASSERT(sizeof(stack->entries->atm) == sizeof(stack->entries->contents));
+
+ /* Point the head at reserved dummy entry */
+ stack->head.contents.index = INVALID_ENTRY_INDEX;
+/* Fill in the pad and aba_ctr to avoid confusing memcheck tools */
+#ifdef GPR_ARCH_64
+ stack->head.contents.pad = 0;
+#endif
+ stack->head.contents.aba_ctr = 0;
+ return stack;
+}
+
+void gpr_stack_lockfree_destroy(gpr_stack_lockfree *stack) {
+ gpr_free_aligned(stack->entries);
+ gpr_free(stack);
+}
+
+int gpr_stack_lockfree_push(gpr_stack_lockfree *stack, int entry) {
+ lockfree_node head;
+ lockfree_node newhead;
+ lockfree_node curent;
+ lockfree_node newent;
+
+ /* First fill in the entry's index and aba ctr for new head */
+ newhead.contents.index = (uint16_t)entry;
+#ifdef GPR_ARCH_64
+ /* Fill in the pad to avoid confusing memcheck tools */
+ newhead.contents.pad = 0;
+#endif
+
+ /* Also post-increment the aba_ctr */
+ curent.atm = gpr_atm_no_barrier_load(&stack->entries[entry].atm);
+ newhead.contents.aba_ctr = ++curent.contents.aba_ctr;
+ gpr_atm_no_barrier_store(&stack->entries[entry].atm, curent.atm);
+
+#ifndef NDEBUG
+ /* Check for double push */
+ {
+ int pushed_index = entry / (int)(8 * sizeof(gpr_atm));
+ int pushed_bit = entry % (int)(8 * sizeof(gpr_atm));
+ gpr_atm old_val;
+
+ old_val = gpr_atm_no_barrier_fetch_add(&stack->pushed[pushed_index],
+ ((gpr_atm)1 << pushed_bit));
+ GPR_ASSERT((old_val & (((gpr_atm)1) << pushed_bit)) == 0);
+ }
+#endif
+
+ do {
+ /* Atomically get the existing head value for use */
+ head.atm = gpr_atm_no_barrier_load(&(stack->head.atm));
+ /* Point to it */
+ newent.atm = gpr_atm_no_barrier_load(&stack->entries[entry].atm);
+ newent.contents.index = head.contents.index;
+ gpr_atm_no_barrier_store(&stack->entries[entry].atm, newent.atm);
+ } while (!gpr_atm_rel_cas(&(stack->head.atm), head.atm, newhead.atm));
+ /* Use rel_cas above to make sure that entry index is set properly */
+ return head.contents.index == INVALID_ENTRY_INDEX;
+}
+
+int gpr_stack_lockfree_pop(gpr_stack_lockfree *stack) {
+ lockfree_node head;
+ lockfree_node newhead;
+
+ do {
+ head.atm = gpr_atm_acq_load(&(stack->head.atm));
+ if (head.contents.index == INVALID_ENTRY_INDEX) {
+ return -1;
+ }
+ newhead.atm =
+ gpr_atm_no_barrier_load(&(stack->entries[head.contents.index].atm));
+
+ } while (!gpr_atm_no_barrier_cas(&(stack->head.atm), head.atm, newhead.atm));
+#ifndef NDEBUG
+ /* Check for valid pop */
+ {
+ int pushed_index = head.contents.index / (8 * sizeof(gpr_atm));
+ int pushed_bit = head.contents.index % (8 * sizeof(gpr_atm));
+ gpr_atm old_val;
+
+ old_val = gpr_atm_no_barrier_fetch_add(&stack->pushed[pushed_index],
+ -((gpr_atm)1 << pushed_bit));
+ GPR_ASSERT((old_val & (((gpr_atm)1) << pushed_bit)) != 0);
+ }
+#endif
+
+ return head.contents.index;
+}
diff --git a/src/core/support/stack_lockfree.h b/src/core/support/stack_lockfree.h
new file mode 100644
index 0000000000..d6fd06d67c
--- /dev/null
+++ b/src/core/support/stack_lockfree.h
@@ -0,0 +1,53 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_SUPPORT_STACK_LOCKFREE_H
+#define GRPC_CORE_SUPPORT_STACK_LOCKFREE_H
+
+#include <stddef.h>
+
+typedef struct gpr_stack_lockfree gpr_stack_lockfree;
+
+/* This stack must specify the maximum number of entries to track.
+ The current implementation only allows up to 65534 entries */
+gpr_stack_lockfree *gpr_stack_lockfree_create(size_t entries);
+void gpr_stack_lockfree_destroy(gpr_stack_lockfree *stack);
+
+/* Pass in a valid entry number for the next stack entry */
+/* Returns 1 if this is the first element on the stack, 0 otherwise */
+int gpr_stack_lockfree_push(gpr_stack_lockfree *, int entry);
+
+/* Returns -1 on empty or the actual entry number */
+int gpr_stack_lockfree_pop(gpr_stack_lockfree *stack);
+
+#endif /* GRPC_CORE_SUPPORT_STACK_LOCKFREE_H */
diff --git a/src/core/support/string.c b/src/core/support/string.c
index 6a80ccc841..1f541de40f 100644
--- a/src/core/support/string.c
+++ b/src/core/support/string.c
@@ -38,6 +38,7 @@
#include <string.h>
#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include <grpc/support/useful.h>
@@ -61,14 +62,14 @@ typedef struct {
size_t capacity;
size_t length;
char *data;
-} hexout;
+} dump_out;
-static hexout hexout_create(void) {
- hexout r = {0, 0, NULL};
+static dump_out dump_out_create(void) {
+ dump_out r = {0, 0, NULL};
return r;
}
-static void hexout_append(hexout *out, char c) {
+static void dump_out_append(dump_out *out, char c) {
if (out->length == out->capacity) {
out->capacity = GPR_MAX(8, 2 * out->capacity);
out->data = gpr_realloc(out->data, out->capacity);
@@ -76,44 +77,64 @@ static void hexout_append(hexout *out, char c) {
out->data[out->length++] = c;
}
-char *gpr_hexdump(const char *buf, size_t len, gpr_uint32 flags) {
+static void hexdump(dump_out *out, const char *buf, size_t len) {
static const char hex[16] = "0123456789abcdef";
- hexout out = hexout_create();
- const gpr_uint8 *const beg = (const gpr_uint8 *)buf;
- const gpr_uint8 *const end = beg + len;
- const gpr_uint8 *cur;
+ const uint8_t *const beg = (const uint8_t *)buf;
+ const uint8_t *const end = beg + len;
+ const uint8_t *cur;
for (cur = beg; cur != end; ++cur) {
- if (cur != beg) hexout_append(&out, ' ');
- hexout_append(&out, hex[*cur >> 4]);
- hexout_append(&out, hex[*cur & 0xf]);
+ if (cur != beg) dump_out_append(out, ' ');
+ dump_out_append(out, hex[*cur >> 4]);
+ dump_out_append(out, hex[*cur & 0xf]);
}
+}
- if (flags & GPR_HEXDUMP_PLAINTEXT) {
- if (len) hexout_append(&out, ' ');
- hexout_append(&out, '\'');
- for (cur = beg; cur != end; ++cur) {
- hexout_append(&out, isprint(*cur) ? *(char*)cur : '.');
- }
- hexout_append(&out, '\'');
+static void asciidump(dump_out *out, const char *buf, size_t len) {
+ const uint8_t *const beg = (const uint8_t *)buf;
+ const uint8_t *const end = beg + len;
+ const uint8_t *cur;
+ int out_was_empty = (out->length == 0);
+ if (!out_was_empty) {
+ dump_out_append(out, ' ');
+ dump_out_append(out, '\'');
}
+ for (cur = beg; cur != end; ++cur) {
+ dump_out_append(out, (char)(isprint(*cur) ? *(char *)cur : '.'));
+ }
+ if (!out_was_empty) {
+ dump_out_append(out, '\'');
+ }
+}
- hexout_append(&out, 0);
-
+char *gpr_dump(const char *buf, size_t len, uint32_t flags) {
+ dump_out out = dump_out_create();
+ if (flags & GPR_DUMP_HEX) {
+ hexdump(&out, buf, len);
+ }
+ if (flags & GPR_DUMP_ASCII) {
+ asciidump(&out, buf, len);
+ }
+ dump_out_append(&out, 0);
return out.data;
}
-int gpr_parse_bytes_to_uint32(const char *buf, size_t len, gpr_uint32 *result) {
- gpr_uint32 out = 0;
- gpr_uint32 new;
+char *gpr_dump_slice(gpr_slice s, uint32_t flags) {
+ return gpr_dump((const char *)GPR_SLICE_START_PTR(s), GPR_SLICE_LENGTH(s),
+ flags);
+}
+
+int gpr_parse_bytes_to_uint32(const char *buf, size_t len, uint32_t *result) {
+ uint32_t out = 0;
+ uint32_t new;
size_t i;
if (len == 0) return 0; /* must have some bytes */
for (i = 0; i < len; i++) {
if (buf[i] < '0' || buf[i] > '9') return 0; /* bad char */
- new = 10 * out + (gpr_uint32)(buf[i] - '0');
+ new = 10 * out + (uint32_t)(buf[i] - '0');
if (new < out) return 0; /* overflow */
out = new;
}
@@ -132,8 +153,8 @@ void gpr_reverse_bytes(char *str, int len) {
}
int gpr_ltoa(long value, char *string) {
+ long sign;
int i = 0;
- int neg = value < 0;
if (value == 0) {
string[0] = '0';
@@ -141,29 +162,63 @@ int gpr_ltoa(long value, char *string) {
return 1;
}
- if (neg) value = -value;
+ sign = value < 0 ? -1 : 1;
while (value) {
- string[i++] = (char)('0' + value % 10);
+ string[i++] = (char)('0' + sign * (value % 10));
value /= 10;
}
- if (neg) string[i++] = '-';
+ if (sign < 0) string[i++] = '-';
+ gpr_reverse_bytes(string, i);
+ string[i] = 0;
+ return i;
+}
+
+int int64_ttoa(int64_t value, char *string) {
+ int64_t sign;
+ int i = 0;
+
+ if (value == 0) {
+ string[0] = '0';
+ string[1] = 0;
+ return 1;
+ }
+
+ sign = value < 0 ? -1 : 1;
+ while (value) {
+ string[i++] = (char)('0' + sign * (value % 10));
+ value /= 10;
+ }
+ if (sign < 0) string[i++] = '-';
gpr_reverse_bytes(string, i);
string[i] = 0;
return i;
}
char *gpr_strjoin(const char **strs, size_t nstrs, size_t *final_length) {
+ return gpr_strjoin_sep(strs, nstrs, "", final_length);
+}
+
+char *gpr_strjoin_sep(const char **strs, size_t nstrs, const char *sep,
+ size_t *final_length) {
+ const size_t sep_len = strlen(sep);
size_t out_length = 0;
size_t i;
char *out;
for (i = 0; i < nstrs; i++) {
out_length += strlen(strs[i]);
}
- out_length += 1; /* null terminator */
+ out_length += 1; /* null terminator */
+ if (nstrs > 0) {
+ out_length += sep_len * (nstrs - 1); /* separators */
+ }
out = gpr_malloc(out_length);
out_length = 0;
for (i = 0; i < nstrs; i++) {
- size_t slen = strlen(strs[i]);
+ const size_t slen = strlen(strs[i]);
+ if (i != 0) {
+ memcpy(out + out_length, sep, sep_len);
+ out_length += sep_len;
+ }
memcpy(out + out_length, strs[i], slen);
out_length += slen;
}
@@ -174,10 +229,52 @@ char *gpr_strjoin(const char **strs, size_t nstrs, size_t *final_length) {
return out;
}
-void gpr_strvec_init(gpr_strvec *sv) {
- memset(sv, 0, sizeof(*sv));
+/** Finds the initial (\a begin) and final (\a end) offsets of the next
+ * substring from \a str + \a read_offset until the next \a sep or the end of \a
+ * str.
+ *
+ * Returns 1 and updates \a begin and \a end. Returns 0 otherwise. */
+static int slice_find_separator_offset(const gpr_slice str, const char *sep,
+ const size_t read_offset, size_t *begin,
+ size_t *end) {
+ size_t i;
+ const uint8_t *str_ptr = GPR_SLICE_START_PTR(str) + read_offset;
+ const size_t str_len = GPR_SLICE_LENGTH(str) - read_offset;
+ const size_t sep_len = strlen(sep);
+ if (str_len < sep_len) {
+ return 0;
+ }
+
+ for (i = 0; i <= str_len - sep_len; i++) {
+ if (memcmp(str_ptr + i, sep, sep_len) == 0) {
+ *begin = read_offset;
+ *end = read_offset + i;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+void gpr_slice_split(gpr_slice str, const char *sep, gpr_slice_buffer *dst) {
+ const size_t sep_len = strlen(sep);
+ size_t begin, end;
+
+ GPR_ASSERT(sep_len > 0);
+
+ if (slice_find_separator_offset(str, sep, 0, &begin, &end) != 0) {
+ do {
+ gpr_slice_buffer_add_indexed(dst, gpr_slice_sub(str, begin, end));
+ } while (slice_find_separator_offset(str, sep, end + sep_len, &begin,
+ &end) != 0);
+ gpr_slice_buffer_add_indexed(
+ dst, gpr_slice_sub(str, end + sep_len, GPR_SLICE_LENGTH(str)));
+ } else { /* no sep found, add whole input */
+ gpr_slice_buffer_add_indexed(dst, gpr_slice_ref(str));
+ }
}
+void gpr_strvec_init(gpr_strvec *sv) { memset(sv, 0, sizeof(*sv)); }
+
void gpr_strvec_destroy(gpr_strvec *sv) {
size_t i;
for (i = 0; i < sv->count; i++) {
@@ -189,11 +286,11 @@ void gpr_strvec_destroy(gpr_strvec *sv) {
void gpr_strvec_add(gpr_strvec *sv, char *str) {
if (sv->count == sv->capacity) {
sv->capacity = GPR_MAX(sv->capacity + 8, sv->capacity * 2);
- sv->strs = gpr_realloc(sv->strs, sizeof(char*) * sv->capacity);
+ sv->strs = gpr_realloc(sv->strs, sizeof(char *) * sv->capacity);
}
sv->strs[sv->count++] = str;
}
char *gpr_strvec_flatten(gpr_strvec *sv, size_t *final_length) {
- return gpr_strjoin((const char**)sv->strs, sv->count, final_length);
+ return gpr_strjoin((const char **)sv->strs, sv->count, final_length);
}
diff --git a/src/core/support/string.h b/src/core/support/string.h
index 31e9fcb5e9..a367ed7cd8 100644
--- a/src/core/support/string.h
+++ b/src/core/support/string.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,12 +31,14 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_SUPPORT_STRING_H
-#define GRPC_INTERNAL_CORE_SUPPORT_STRING_H
+#ifndef GRPC_CORE_SUPPORT_STRING_H
+#define GRPC_CORE_SUPPORT_STRING_H
#include <stddef.h>
#include <grpc/support/port_platform.h>
+#include <grpc/support/slice_buffer.h>
+#include <grpc/support/slice.h>
#ifdef __cplusplus
extern "C" {
@@ -44,17 +46,21 @@ extern "C" {
/* String utility functions */
-/* flag to include plaintext after a hexdump */
-#define GPR_HEXDUMP_PLAINTEXT 0x00000001
+/* Flags for gpr_dump function. */
+#define GPR_DUMP_HEX 0x00000001
+#define GPR_DUMP_ASCII 0x00000002
-/* Converts array buf, of length len, into a hexadecimal dump. Result should
- be freed with gpr_free() */
-char *gpr_hexdump(const char *buf, size_t len, gpr_uint32 flags);
+/* Converts array buf, of length len, into a C string according to the flags.
+ Result should be freed with gpr_free() */
+char *gpr_dump(const char *buf, size_t len, uint32_t flags);
+
+/* Calls gpr_dump on a slice. */
+char *gpr_dump_slice(gpr_slice slice, uint32_t flags);
/* Parses an array of bytes into an integer (base 10). Returns 1 on success,
0 on failure. */
int gpr_parse_bytes_to_uint32(const char *data, size_t length,
- gpr_uint32 *result);
+ uint32_t *result);
/* Minimum buffer size for calling ltoa */
#define GPR_LTOA_MIN_BUFSIZE (3 * sizeof(long))
@@ -64,6 +70,16 @@ int gpr_parse_bytes_to_uint32(const char *data, size_t length,
output must be at least GPR_LTOA_MIN_BUFSIZE bytes long. */
int gpr_ltoa(long value, char *output);
+/* Minimum buffer size for calling int64toa */
+#define GPR_INT64TOA_MIN_BUFSIZE (3 * sizeof(int64_t))
+
+/* Convert an int64 to a string in base 10; returns the length of the
+output string (or 0 on failure).
+output must be at least GPR_INT64TOA_MIN_BUFSIZE bytes long.
+NOTE: This function ensures sufficient bit width even on Win x64,
+where long is 32bit is size.*/
+int int64_ttoa(int64_t value, char *output);
+
/* Reverse a run of bytes */
void gpr_reverse_bytes(char *str, int len);
@@ -72,6 +88,16 @@ void gpr_reverse_bytes(char *str, int len);
if it is non-null. */
char *gpr_strjoin(const char **strs, size_t nstrs, size_t *total_length);
+/* Join a set of strings using a separator, returning the resulting string.
+ Total combined length (excluding null terminator) is returned in total_length
+ if it is non-null. */
+char *gpr_strjoin_sep(const char **strs, size_t nstrs, const char *sep,
+ size_t *total_length);
+
+/** Split \a str by the separator \a sep. Results are stored in \a dst, which
+ * should be a properly initialized instance. */
+void gpr_slice_split(gpr_slice str, const char *sep, gpr_slice_buffer *dst);
+
/* A vector of strings... for building up a final string one piece at a time */
typedef struct {
char **strs;
@@ -92,4 +118,4 @@ char *gpr_strvec_flatten(gpr_strvec *strs, size_t *total_length);
}
#endif
-#endif /* GRPC_INTERNAL_CORE_SUPPORT_STRING_H */
+#endif /* GRPC_CORE_SUPPORT_STRING_H */
diff --git a/src/core/support/string_win32.c b/src/core/support/string_win32.c
index 6d1d6337a9..3b1f702cf1 100644
--- a/src/core/support/string_win32.c
+++ b/src/core/support/string_win32.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -37,7 +37,6 @@
#ifdef GPR_WIN32
-#include <windows.h>
#include <stdio.h>
#include <stdarg.h>
#include <string.h>
@@ -82,31 +81,29 @@ int gpr_asprintf(char **strp, const char *format, ...) {
}
#if defined UNICODE || defined _UNICODE
-LPTSTR gpr_char_to_tchar(LPCSTR input) {
+LPTSTR
+gpr_char_to_tchar(LPCSTR input) {
LPTSTR ret;
int needed = MultiByteToWideChar(CP_UTF8, 0, input, -1, NULL, 0);
- if (needed == 0) return NULL;
- ret = gpr_malloc(needed * sizeof(TCHAR));
+ if (needed <= 0) return NULL;
+ ret = gpr_malloc((unsigned)needed * sizeof(TCHAR));
MultiByteToWideChar(CP_UTF8, 0, input, -1, ret, needed);
return ret;
}
-LPSTR gpr_tchar_to_char(LPCTSTR input) {
+LPSTR
+gpr_tchar_to_char(LPCTSTR input) {
LPSTR ret;
int needed = WideCharToMultiByte(CP_UTF8, 0, input, -1, NULL, 0, NULL, NULL);
- if (needed == 0) return NULL;
- ret = gpr_malloc(needed);
+ if (needed <= 0) return NULL;
+ ret = gpr_malloc((unsigned)needed);
WideCharToMultiByte(CP_UTF8, 0, input, -1, ret, needed, NULL, NULL);
return ret;
}
#else
-char *gpr_tchar_to_char(LPTSTR input) {
- return gpr_strdup(input);
-}
+char *gpr_tchar_to_char(LPTSTR input) { return gpr_strdup(input); }
-char *gpr_char_to_tchar(LPTSTR input) {
- return gpr_strdup(input);
-}
+char *gpr_char_to_tchar(LPTSTR input) { return gpr_strdup(input); }
#endif
#endif /* GPR_WIN32 */
diff --git a/src/core/support/string_win32.h b/src/core/support/string_win32.h
index 0bc3247d9d..c9ae8d9932 100644
--- a/src/core/support/string_win32.h
+++ b/src/core/support/string_win32.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,19 +31,17 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_SUPPORT_STRING_WIN32_H
-#define GRPC_INTERNAL_CORE_SUPPORT_STRING_WIN32_H
+#ifndef GRPC_CORE_SUPPORT_STRING_WIN32_H
+#define GRPC_CORE_SUPPORT_STRING_WIN32_H
#include <grpc/support/port_platform.h>
#ifdef GPR_WIN32
-#include <windows.h>
-
/* These allocate new strings using gpr_malloc to convert from and to utf-8. */
LPTSTR gpr_char_to_tchar(LPCSTR input);
LPSTR gpr_tchar_to_char(LPCTSTR input);
-#endif /* GPR_WIN32 */
+#endif /* GPR_WIN32 */
-#endif /* GRPC_INTERNAL_CORE_SUPPORT_STRING_WIN32_H */
+#endif /* GRPC_CORE_SUPPORT_STRING_WIN32_H */
diff --git a/src/core/support/subprocess_windows.c b/src/core/support/subprocess_windows.c
new file mode 100644
index 0000000000..2b25ef063a
--- /dev/null
+++ b/src/core/support/subprocess_windows.c
@@ -0,0 +1,141 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#ifdef GPR_WINDOWS_SUBPROCESS
+
+#include <windows.h>
+#include <string.h>
+#include <tchar.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/subprocess.h>
+#include "src/core/support/string.h"
+#include "src/core/support/string_win32.h"
+
+struct gpr_subprocess {
+ PROCESS_INFORMATION pi;
+ int joined;
+ int interrupted;
+};
+
+const char *gpr_subprocess_binary_extension() { return ".exe"; }
+
+gpr_subprocess *gpr_subprocess_create(int argc, const char **argv) {
+ gpr_subprocess *r;
+
+ STARTUPINFO si;
+ PROCESS_INFORMATION pi;
+
+ char *args = gpr_strjoin_sep(argv, (size_t)argc, " ", NULL);
+ TCHAR *args_tchar;
+
+ args_tchar = gpr_char_to_tchar(args);
+ gpr_free(args);
+
+ memset(&si, 0, sizeof(si));
+ si.cb = sizeof(si);
+ memset(&pi, 0, sizeof(pi));
+
+ if (!CreateProcess(NULL, args_tchar, NULL, NULL, FALSE,
+ CREATE_NEW_PROCESS_GROUP, NULL, NULL, &si, &pi)) {
+ gpr_free(args_tchar);
+ return NULL;
+ }
+ gpr_free(args_tchar);
+
+ r = gpr_malloc(sizeof(gpr_subprocess));
+ memset(r, 0, sizeof(*r));
+ r->pi = pi;
+ return r;
+}
+
+void gpr_subprocess_destroy(gpr_subprocess *p) {
+ if (p) {
+ if (!p->joined) {
+ gpr_subprocess_interrupt(p);
+ gpr_subprocess_join(p);
+ }
+ if (p->pi.hProcess) {
+ CloseHandle(p->pi.hProcess);
+ }
+ if (p->pi.hThread) {
+ CloseHandle(p->pi.hThread);
+ }
+ gpr_free(p);
+ }
+}
+
+int gpr_subprocess_join(gpr_subprocess *p) {
+ DWORD dwExitCode;
+ if (GetExitCodeProcess(p->pi.hProcess, &dwExitCode)) {
+ if (dwExitCode == STILL_ACTIVE) {
+ if (WaitForSingleObject(p->pi.hProcess, INFINITE) == WAIT_OBJECT_0) {
+ p->joined = 1;
+ goto getExitCode;
+ }
+ return -1; // failed to join
+ } else {
+ goto getExitCode;
+ }
+ } else {
+ return -1; // failed to get exit code
+ }
+
+getExitCode:
+ if (p->interrupted) {
+ return 0;
+ }
+ if (GetExitCodeProcess(p->pi.hProcess, &dwExitCode)) {
+ return (int)dwExitCode;
+ } else {
+ return -1; // failed to get exit code
+ }
+}
+
+void gpr_subprocess_interrupt(gpr_subprocess *p) {
+ DWORD dwExitCode;
+ if (GetExitCodeProcess(p->pi.hProcess, &dwExitCode)) {
+ if (dwExitCode == STILL_ACTIVE) {
+ gpr_log(GPR_INFO, "sending ctrl-break");
+ GenerateConsoleCtrlEvent(CTRL_BREAK_EVENT, p->pi.dwProcessId);
+ p->joined = 1;
+ p->interrupted = 1;
+ }
+ }
+ return;
+}
+
+#endif /* GPR_WINDOWS_SUBPROCESS */
diff --git a/src/core/support/sync.c b/src/core/support/sync.c
index 856b5adb86..69e3e39c5c 100644
--- a/src/core/support/sync.c
+++ b/src/core/support/sync.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -59,7 +59,7 @@ static void event_initialize(void) {
/* Hash ev into an element of sync_array[]. */
static struct sync_array_s *hash(gpr_event *ev) {
- return &sync_array[((gpr_uintptr)ev) % event_sync_partitions];
+ return &sync_array[((uintptr_t)ev) % event_sync_partitions];
}
void gpr_event_init(gpr_event *ev) {
@@ -94,25 +94,15 @@ void *gpr_event_wait(gpr_event *ev, gpr_timespec abs_deadline) {
return result;
}
-void *gpr_event_cancellable_wait(gpr_event *ev, gpr_timespec abs_deadline,
- gpr_cancellable *c) {
- void *result = (void *)gpr_atm_acq_load(&ev->state);
- if (result == NULL) {
- struct sync_array_s *s = hash(ev);
- gpr_mu_lock(&s->mu);
- do {
- result = (void *)gpr_atm_acq_load(&ev->state);
- } while (result == NULL &&
- !gpr_cv_cancellable_wait(&s->cv, &s->mu, abs_deadline, c));
- gpr_mu_unlock(&s->mu);
- }
- return result;
-}
-
void gpr_ref_init(gpr_refcount *r, int n) { gpr_atm_rel_store(&r->count, n); }
void gpr_ref(gpr_refcount *r) { gpr_atm_no_barrier_fetch_add(&r->count, 1); }
+void gpr_ref_non_zero(gpr_refcount *r) {
+ gpr_atm prior = gpr_atm_no_barrier_fetch_add(&r->count, 1);
+ GPR_ASSERT(prior > 0);
+}
+
void gpr_refn(gpr_refcount *r, int n) {
gpr_atm_no_barrier_fetch_add(&r->count, n);
}
@@ -123,15 +113,15 @@ int gpr_unref(gpr_refcount *r) {
return prior == 1;
}
-void gpr_stats_init(gpr_stats_counter *c, gpr_intptr n) {
+void gpr_stats_init(gpr_stats_counter *c, intptr_t n) {
gpr_atm_rel_store(&c->value, n);
}
-void gpr_stats_inc(gpr_stats_counter *c, gpr_intptr inc) {
+void gpr_stats_inc(gpr_stats_counter *c, intptr_t inc) {
gpr_atm_no_barrier_fetch_add(&c->value, inc);
}
-gpr_intptr gpr_stats_read(const gpr_stats_counter *c) {
+intptr_t gpr_stats_read(const gpr_stats_counter *c) {
/* don't need acquire-load, but we have no no-barrier load yet */
return gpr_atm_acq_load(&c->value);
}
diff --git a/src/core/support/sync_posix.c b/src/core/support/sync_posix.c
index 0ccbd4923f..d3c483f1b5 100644
--- a/src/core/support/sync_posix.c
+++ b/src/core/support/sync_posix.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -40,34 +40,48 @@
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
+#include "src/core/profiling/timers.h"
-void gpr_mu_init(gpr_mu *mu) { GPR_ASSERT(pthread_mutex_init(mu, NULL) == 0); }
+void gpr_mu_init(gpr_mu* mu) { GPR_ASSERT(pthread_mutex_init(mu, NULL) == 0); }
-void gpr_mu_destroy(gpr_mu *mu) { GPR_ASSERT(pthread_mutex_destroy(mu) == 0); }
+void gpr_mu_destroy(gpr_mu* mu) { GPR_ASSERT(pthread_mutex_destroy(mu) == 0); }
-void gpr_mu_lock(gpr_mu *mu) { GPR_ASSERT(pthread_mutex_lock(mu) == 0); }
+void gpr_mu_lock(gpr_mu* mu) {
+ GPR_TIMER_BEGIN("gpr_mu_lock", 0);
+ GPR_ASSERT(pthread_mutex_lock(mu) == 0);
+ GPR_TIMER_END("gpr_mu_lock", 0);
+}
-void gpr_mu_unlock(gpr_mu *mu) { GPR_ASSERT(pthread_mutex_unlock(mu) == 0); }
+void gpr_mu_unlock(gpr_mu* mu) {
+ GPR_TIMER_BEGIN("gpr_mu_unlock", 0);
+ GPR_ASSERT(pthread_mutex_unlock(mu) == 0);
+ GPR_TIMER_END("gpr_mu_unlock", 0);
+}
-int gpr_mu_trylock(gpr_mu *mu) {
- int err = pthread_mutex_trylock(mu);
+int gpr_mu_trylock(gpr_mu* mu) {
+ int err;
+ GPR_TIMER_BEGIN("gpr_mu_trylock", 0);
+ err = pthread_mutex_trylock(mu);
GPR_ASSERT(err == 0 || err == EBUSY);
+ GPR_TIMER_END("gpr_mu_trylock", 0);
return err == 0;
}
/*----------------------------------------*/
-void gpr_cv_init(gpr_cv *cv) { GPR_ASSERT(pthread_cond_init(cv, NULL) == 0); }
+void gpr_cv_init(gpr_cv* cv) { GPR_ASSERT(pthread_cond_init(cv, NULL) == 0); }
-void gpr_cv_destroy(gpr_cv *cv) { GPR_ASSERT(pthread_cond_destroy(cv) == 0); }
+void gpr_cv_destroy(gpr_cv* cv) { GPR_ASSERT(pthread_cond_destroy(cv) == 0); }
-int gpr_cv_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline) {
+int gpr_cv_wait(gpr_cv* cv, gpr_mu* mu, gpr_timespec abs_deadline) {
int err = 0;
- if (gpr_time_cmp(abs_deadline, gpr_inf_future) == 0) {
+ if (gpr_time_cmp(abs_deadline, gpr_inf_future(abs_deadline.clock_type)) ==
+ 0) {
err = pthread_cond_wait(cv, mu);
} else {
struct timespec abs_deadline_ts;
- abs_deadline_ts.tv_sec = abs_deadline.tv_sec;
+ abs_deadline = gpr_convert_clock_type(abs_deadline, GPR_CLOCK_REALTIME);
+ abs_deadline_ts.tv_sec = (time_t)abs_deadline.tv_sec;
abs_deadline_ts.tv_nsec = abs_deadline.tv_nsec;
err = pthread_cond_timedwait(cv, mu, &abs_deadline_ts);
}
@@ -75,15 +89,15 @@ int gpr_cv_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline) {
return err == ETIMEDOUT;
}
-void gpr_cv_signal(gpr_cv *cv) { GPR_ASSERT(pthread_cond_signal(cv) == 0); }
+void gpr_cv_signal(gpr_cv* cv) { GPR_ASSERT(pthread_cond_signal(cv) == 0); }
-void gpr_cv_broadcast(gpr_cv *cv) {
+void gpr_cv_broadcast(gpr_cv* cv) {
GPR_ASSERT(pthread_cond_broadcast(cv) == 0);
}
/*----------------------------------------*/
-void gpr_once_init(gpr_once *once, void (*init_function)(void)) {
+void gpr_once_init(gpr_once* once, void (*init_function)(void)) {
GPR_ASSERT(pthread_once(once, init_function) == 0);
}
diff --git a/src/core/support/sync_win32.c b/src/core/support/sync_win32.c
index cc31d9b052..41998ebcb6 100644
--- a/src/core/support/sync_win32.c
+++ b/src/core/support/sync_win32.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -37,9 +37,6 @@
#ifdef GPR_WIN32
-#undef _WIN32_WINNT
-#define _WIN32_WINNT 0x0600
-#include <windows.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
@@ -86,17 +83,23 @@ int gpr_cv_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline) {
int timeout = 0;
DWORD timeout_max_ms;
mu->locked = 0;
- if (gpr_time_cmp(abs_deadline, gpr_inf_future) == 0) {
+ if (gpr_time_cmp(abs_deadline, gpr_inf_future(abs_deadline.clock_type)) ==
+ 0) {
SleepConditionVariableCS(cv, &mu->cs, INFINITE);
} else {
- gpr_timespec now = gpr_now();
- gpr_int64 now_ms = now.tv_sec * 1000 + now.tv_nsec / 1000000;
- gpr_int64 deadline_ms =
- abs_deadline.tv_sec * 1000 + abs_deadline.tv_nsec / 1000000;
+ abs_deadline = gpr_convert_clock_type(abs_deadline, GPR_CLOCK_REALTIME);
+ gpr_timespec now = gpr_now(abs_deadline.clock_type);
+ int64_t now_ms = (int64_t)now.tv_sec * 1000 + now.tv_nsec / 1000000;
+ int64_t deadline_ms =
+ (int64_t)abs_deadline.tv_sec * 1000 + abs_deadline.tv_nsec / 1000000;
if (now_ms >= deadline_ms) {
timeout = 1;
} else {
- timeout_max_ms = (DWORD)min(deadline_ms - now_ms, INFINITE - 1);
+ if ((deadline_ms - now_ms) >= INFINITE) {
+ timeout_max_ms = INFINITE - 1;
+ } else {
+ timeout_max_ms = (DWORD)(deadline_ms - now_ms);
+ }
timeout = (SleepConditionVariableCS(cv, &mu->cs, timeout_max_ms) == 0 &&
GetLastError() == ERROR_TIMEOUT);
}
diff --git a/src/core/support/thd.c b/src/core/support/thd.c
index ec308f3119..41daeb5d0e 100644
--- a/src/core/support/thd.c
+++ b/src/core/support/thd.c
@@ -37,9 +37,7 @@
#include <grpc/support/thd.h>
-enum {
- GPR_THD_JOINABLE = 1
-};
+enum { GPR_THD_JOINABLE = 1 };
gpr_thd_options gpr_thd_options_default(void) {
gpr_thd_options options;
@@ -47,20 +45,20 @@ gpr_thd_options gpr_thd_options_default(void) {
return options;
}
-void gpr_thd_options_set_detached(gpr_thd_options *options) {
+void gpr_thd_options_set_detached(gpr_thd_options* options) {
options->flags &= ~GPR_THD_JOINABLE;
}
-void gpr_thd_options_set_joinable(gpr_thd_options *options) {
+void gpr_thd_options_set_joinable(gpr_thd_options* options) {
options->flags |= GPR_THD_JOINABLE;
}
-int gpr_thd_options_is_detached(const gpr_thd_options *options) {
+int gpr_thd_options_is_detached(const gpr_thd_options* options) {
if (!options) return 1;
return (options->flags & GPR_THD_JOINABLE) == 0;
}
-int gpr_thd_options_is_joinable(const gpr_thd_options *options) {
+int gpr_thd_options_is_joinable(const gpr_thd_options* options) {
if (!options) return 0;
return (options->flags & GPR_THD_JOINABLE) == GPR_THD_JOINABLE;
}
diff --git a/src/core/support/thd_internal.h b/src/core/support/thd_internal.h
index 4683c37742..33b904e59b 100644
--- a/src/core/support/thd_internal.h
+++ b/src/core/support/thd_internal.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,9 +31,9 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_SUPPORT_THD_INTERNAL_H
-#define GRPC_INTERNAL_CORE_SUPPORT_THD_INTERNAL_H
+#ifndef GRPC_CORE_SUPPORT_THD_INTERNAL_H
+#define GRPC_CORE_SUPPORT_THD_INTERNAL_H
/* Internal interfaces between modules within the gpr support library. */
-#endif /* GRPC_INTERNAL_CORE_SUPPORT_THD_INTERNAL_H */
+#endif /* GRPC_CORE_SUPPORT_THD_INTERNAL_H */
diff --git a/src/core/support/thd_posix.c b/src/core/support/thd_posix.c
index fa4eb50556..653a1c88c1 100644
--- a/src/core/support/thd_posix.c
+++ b/src/core/support/thd_posix.c
@@ -53,7 +53,7 @@ struct thd_arg {
/* Body of every thread started via gpr_thd_new. */
static void *thread_body(void *v) {
struct thd_arg a = *(struct thd_arg *)v;
- gpr_free(v);
+ free(v);
(*a.body)(a.arg);
return NULL;
}
@@ -63,31 +63,32 @@ int gpr_thd_new(gpr_thd_id *t, void (*thd_body)(void *arg), void *arg,
int thread_started;
pthread_attr_t attr;
pthread_t p;
- struct thd_arg *a = gpr_malloc(sizeof(*a));
+ /* don't use gpr_malloc as we may cause an infinite recursion with
+ * the profiling code */
+ struct thd_arg *a = malloc(sizeof(*a));
+ GPR_ASSERT(a != NULL);
a->body = thd_body;
a->arg = arg;
GPR_ASSERT(pthread_attr_init(&attr) == 0);
if (gpr_thd_options_is_detached(options)) {
- GPR_ASSERT(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0);
+ GPR_ASSERT(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) ==
+ 0);
} else {
- GPR_ASSERT(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE) == 0);
+ GPR_ASSERT(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE) ==
+ 0);
}
thread_started = (pthread_create(&p, &attr, &thread_body, a) == 0);
GPR_ASSERT(pthread_attr_destroy(&attr) == 0);
if (!thread_started) {
- gpr_free(a);
+ free(a);
}
*t = (gpr_thd_id)p;
return thread_started;
}
-gpr_thd_id gpr_thd_currentid(void) {
- return (gpr_thd_id)pthread_self();
-}
+gpr_thd_id gpr_thd_currentid(void) { return (gpr_thd_id)pthread_self(); }
-void gpr_thd_join(gpr_thd_id t) {
- pthread_join((pthread_t)t, NULL);
-}
+void gpr_thd_join(gpr_thd_id t) { pthread_join((pthread_t)t, NULL); }
#endif /* GPR_POSIX_SYNC */
diff --git a/src/core/support/thd_win32.c b/src/core/support/thd_win32.c
index 3cc798293a..a9db180c1b 100644
--- a/src/core/support/thd_win32.c
+++ b/src/core/support/thd_win32.c
@@ -37,7 +37,6 @@
#ifdef GPR_WIN32
-#include <windows.h>
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
@@ -106,9 +105,7 @@ int gpr_thd_new(gpr_thd_id *t, void (*thd_body)(void *arg), void *arg,
return handle != NULL;
}
-gpr_thd_id gpr_thd_currentid(void) {
- return (gpr_thd_id)g_thd_info;
-}
+gpr_thd_id gpr_thd_currentid(void) { return (gpr_thd_id)g_thd_info; }
void gpr_thd_join(gpr_thd_id t) {
struct thd_info *info = (struct thd_info *)t;
diff --git a/src/core/support/time.c b/src/core/support/time.c
index d47b08b266..423d12ffc0 100644
--- a/src/core/support/time.c
+++ b/src/core/support/time.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -41,6 +41,7 @@
int gpr_time_cmp(gpr_timespec a, gpr_timespec b) {
int cmp = (a.tv_sec > b.tv_sec) - (a.tv_sec < b.tv_sec);
+ GPR_ASSERT(a.clock_type == b.clock_type);
if (cmp == 0) {
cmp = (a.tv_nsec > b.tv_nsec) - (a.tv_nsec < b.tv_nsec);
}
@@ -55,86 +56,94 @@ gpr_timespec gpr_time_max(gpr_timespec a, gpr_timespec b) {
return gpr_time_cmp(a, b) > 0 ? a : b;
}
-/* There's no standard TIME_T_MIN and TIME_T_MAX, so we construct them. The
- following assumes that signed types are two's-complement and that bytes are
- 8 bits. */
-
-/* The top bit of integral type t. */
-#define TOP_BIT_OF_TYPE(t) (((gpr_uintmax)1) << ((8 * sizeof(t)) - 1))
-
-/* Return whether integral type t is signed. */
-#define TYPE_IS_SIGNED(t) (((t)1) > (t) ~(t)0)
+gpr_timespec gpr_time_0(gpr_clock_type type) {
+ gpr_timespec out;
+ out.tv_sec = 0;
+ out.tv_nsec = 0;
+ out.clock_type = type;
+ return out;
+}
-/* The minimum and maximum value of integral type t. */
-#define TYPE_MIN(t) ((t)(TYPE_IS_SIGNED(t) ? TOP_BIT_OF_TYPE(t) : 0))
-#define TYPE_MAX(t) \
- ((t)(TYPE_IS_SIGNED(t) ? (TOP_BIT_OF_TYPE(t) - 1) \
- : ((TOP_BIT_OF_TYPE(t) - 1) << 1) + 1))
+gpr_timespec gpr_inf_future(gpr_clock_type type) {
+ gpr_timespec out;
+ out.tv_sec = INT64_MAX;
+ out.tv_nsec = 0;
+ out.clock_type = type;
+ return out;
+}
-const gpr_timespec gpr_time_0 = {0, 0};
-const gpr_timespec gpr_inf_future = {TYPE_MAX(time_t), 0};
-const gpr_timespec gpr_inf_past = {TYPE_MIN(time_t), 0};
+gpr_timespec gpr_inf_past(gpr_clock_type type) {
+ gpr_timespec out;
+ out.tv_sec = INT64_MIN;
+ out.tv_nsec = 0;
+ out.clock_type = type;
+ return out;
+}
/* TODO(ctiller): consider merging _nanos, _micros, _millis into a single
function for maintainability. Similarly for _seconds, _minutes, and _hours */
-gpr_timespec gpr_time_from_nanos(long ns) {
+gpr_timespec gpr_time_from_nanos(int64_t ns, gpr_clock_type type) {
gpr_timespec result;
- if (ns == LONG_MAX) {
- result = gpr_inf_future;
- } else if (ns == LONG_MIN) {
- result = gpr_inf_past;
+ result.clock_type = type;
+ if (ns == INT64_MAX) {
+ result = gpr_inf_future(type);
+ } else if (ns == INT64_MIN) {
+ result = gpr_inf_past(type);
} else if (ns >= 0) {
result.tv_sec = ns / GPR_NS_PER_SEC;
- result.tv_nsec = (int)(ns - result.tv_sec * GPR_NS_PER_SEC);
+ result.tv_nsec = (int32_t)(ns - result.tv_sec * GPR_NS_PER_SEC);
} else {
/* Calculation carefully formulated to avoid any possible under/overflow. */
result.tv_sec = (-(999999999 - (ns + GPR_NS_PER_SEC)) / GPR_NS_PER_SEC) - 1;
- result.tv_nsec = (int)(ns - result.tv_sec * GPR_NS_PER_SEC);
+ result.tv_nsec = (int32_t)(ns - result.tv_sec * GPR_NS_PER_SEC);
}
return result;
}
-gpr_timespec gpr_time_from_micros(long us) {
+gpr_timespec gpr_time_from_micros(int64_t us, gpr_clock_type type) {
gpr_timespec result;
- if (us == LONG_MAX) {
- result = gpr_inf_future;
- } else if (us == LONG_MIN) {
- result = gpr_inf_past;
+ result.clock_type = type;
+ if (us == INT64_MAX) {
+ result = gpr_inf_future(type);
+ } else if (us == INT64_MIN) {
+ result = gpr_inf_past(type);
} else if (us >= 0) {
result.tv_sec = us / 1000000;
- result.tv_nsec = (int)((us - result.tv_sec * 1000000) * 1000);
+ result.tv_nsec = (int32_t)((us - result.tv_sec * 1000000) * 1000);
} else {
/* Calculation carefully formulated to avoid any possible under/overflow. */
result.tv_sec = (-(999999 - (us + 1000000)) / 1000000) - 1;
- result.tv_nsec = (int)((us - result.tv_sec * 1000000) * 1000);
+ result.tv_nsec = (int32_t)((us - result.tv_sec * 1000000) * 1000);
}
return result;
}
-gpr_timespec gpr_time_from_millis(long ms) {
+gpr_timespec gpr_time_from_millis(int64_t ms, gpr_clock_type type) {
gpr_timespec result;
- if (ms == LONG_MAX) {
- result = gpr_inf_future;
- } else if (ms == LONG_MIN) {
- result = gpr_inf_past;
+ result.clock_type = type;
+ if (ms == INT64_MAX) {
+ result = gpr_inf_future(type);
+ } else if (ms == INT64_MIN) {
+ result = gpr_inf_past(type);
} else if (ms >= 0) {
result.tv_sec = ms / 1000;
- result.tv_nsec = (int)((ms - result.tv_sec * 1000) * 1000000);
+ result.tv_nsec = (int32_t)((ms - result.tv_sec * 1000) * 1000000);
} else {
/* Calculation carefully formulated to avoid any possible under/overflow. */
result.tv_sec = (-(999 - (ms + 1000)) / 1000) - 1;
- result.tv_nsec = (int)((ms - result.tv_sec * 1000) * 1000000);
+ result.tv_nsec = (int32_t)((ms - result.tv_sec * 1000) * 1000000);
}
return result;
}
-gpr_timespec gpr_time_from_seconds(long s) {
+gpr_timespec gpr_time_from_seconds(int64_t s, gpr_clock_type type) {
gpr_timespec result;
- if (s == LONG_MAX) {
- result = gpr_inf_future;
- } else if (s == LONG_MIN) {
- result = gpr_inf_past;
+ result.clock_type = type;
+ if (s == INT64_MAX) {
+ result = gpr_inf_future(type);
+ } else if (s == INT64_MIN) {
+ result = gpr_inf_past(type);
} else {
result.tv_sec = s;
result.tv_nsec = 0;
@@ -142,12 +151,13 @@ gpr_timespec gpr_time_from_seconds(long s) {
return result;
}
-gpr_timespec gpr_time_from_minutes(long m) {
+gpr_timespec gpr_time_from_minutes(int64_t m, gpr_clock_type type) {
gpr_timespec result;
- if (m >= LONG_MAX / 60) {
- result = gpr_inf_future;
- } else if (m <= LONG_MIN / 60) {
- result = gpr_inf_past;
+ result.clock_type = type;
+ if (m >= INT64_MAX / 60) {
+ result = gpr_inf_future(type);
+ } else if (m <= INT64_MIN / 60) {
+ result = gpr_inf_past(type);
} else {
result.tv_sec = m * 60;
result.tv_nsec = 0;
@@ -155,12 +165,13 @@ gpr_timespec gpr_time_from_minutes(long m) {
return result;
}
-gpr_timespec gpr_time_from_hours(long h) {
+gpr_timespec gpr_time_from_hours(int64_t h, gpr_clock_type type) {
gpr_timespec result;
- if (h >= LONG_MAX / 3600) {
- result = gpr_inf_future;
- } else if (h <= LONG_MIN / 3600) {
- result = gpr_inf_past;
+ result.clock_type = type;
+ if (h >= INT64_MAX / 3600) {
+ result = gpr_inf_future(type);
+ } else if (h <= INT64_MIN / 3600) {
+ result = gpr_inf_past(type);
} else {
result.tv_sec = h * 3600;
result.tv_nsec = 0;
@@ -170,24 +181,26 @@ gpr_timespec gpr_time_from_hours(long h) {
gpr_timespec gpr_time_add(gpr_timespec a, gpr_timespec b) {
gpr_timespec sum;
- int inc = 0;
+ int64_t inc = 0;
+ GPR_ASSERT(b.clock_type == GPR_TIMESPAN);
+ sum.clock_type = a.clock_type;
sum.tv_nsec = a.tv_nsec + b.tv_nsec;
if (sum.tv_nsec >= GPR_NS_PER_SEC) {
sum.tv_nsec -= GPR_NS_PER_SEC;
inc++;
}
- if (a.tv_sec == TYPE_MAX(time_t) || a.tv_sec == TYPE_MIN(time_t)) {
+ if (a.tv_sec == INT64_MAX || a.tv_sec == INT64_MIN) {
sum = a;
- } else if (b.tv_sec == TYPE_MAX(time_t) ||
- (b.tv_sec >= 0 && a.tv_sec >= TYPE_MAX(time_t) - b.tv_sec)) {
- sum = gpr_inf_future;
- } else if (b.tv_sec == TYPE_MIN(time_t) ||
- (b.tv_sec <= 0 && a.tv_sec <= TYPE_MIN(time_t) - b.tv_sec)) {
- sum = gpr_inf_past;
+ } else if (b.tv_sec == INT64_MAX ||
+ (b.tv_sec >= 0 && a.tv_sec >= INT64_MAX - b.tv_sec)) {
+ sum = gpr_inf_future(sum.clock_type);
+ } else if (b.tv_sec == INT64_MIN ||
+ (b.tv_sec <= 0 && a.tv_sec <= INT64_MIN - b.tv_sec)) {
+ sum = gpr_inf_past(sum.clock_type);
} else {
sum.tv_sec = a.tv_sec + b.tv_sec;
- if (inc != 0 && sum.tv_sec == TYPE_MAX(time_t) - 1) {
- sum = gpr_inf_future;
+ if (inc != 0 && sum.tv_sec == INT64_MAX - 1) {
+ sum = gpr_inf_future(sum.clock_type);
} else {
sum.tv_sec += inc;
}
@@ -197,24 +210,30 @@ gpr_timespec gpr_time_add(gpr_timespec a, gpr_timespec b) {
gpr_timespec gpr_time_sub(gpr_timespec a, gpr_timespec b) {
gpr_timespec diff;
- int dec = 0;
+ int64_t dec = 0;
+ if (b.clock_type == GPR_TIMESPAN) {
+ diff.clock_type = a.clock_type;
+ } else {
+ GPR_ASSERT(a.clock_type == b.clock_type);
+ diff.clock_type = GPR_TIMESPAN;
+ }
diff.tv_nsec = a.tv_nsec - b.tv_nsec;
if (diff.tv_nsec < 0) {
diff.tv_nsec += GPR_NS_PER_SEC;
dec++;
}
- if (a.tv_sec == TYPE_MAX(time_t) || a.tv_sec == TYPE_MIN(time_t)) {
+ if (a.tv_sec == INT64_MAX || a.tv_sec == INT64_MIN) {
diff = a;
- } else if (b.tv_sec == TYPE_MIN(time_t) ||
- (b.tv_sec <= 0 && a.tv_sec >= TYPE_MAX(time_t) + b.tv_sec)) {
- diff = gpr_inf_future;
- } else if (b.tv_sec == TYPE_MAX(time_t) ||
- (b.tv_sec >= 0 && a.tv_sec <= TYPE_MIN(time_t) + b.tv_sec)) {
- diff = gpr_inf_past;
+ } else if (b.tv_sec == INT64_MIN ||
+ (b.tv_sec <= 0 && a.tv_sec >= INT64_MAX + b.tv_sec)) {
+ diff = gpr_inf_future(GPR_CLOCK_REALTIME);
+ } else if (b.tv_sec == INT64_MAX ||
+ (b.tv_sec >= 0 && a.tv_sec <= INT64_MIN + b.tv_sec)) {
+ diff = gpr_inf_past(GPR_CLOCK_REALTIME);
} else {
diff.tv_sec = a.tv_sec - b.tv_sec;
- if (dec != 0 && diff.tv_sec == TYPE_MIN(time_t) + 1) {
- diff = gpr_inf_past;
+ if (dec != 0 && diff.tv_sec == INT64_MIN + 1) {
+ diff = gpr_inf_past(GPR_CLOCK_REALTIME);
} else {
diff.tv_sec -= dec;
}
@@ -225,6 +244,9 @@ gpr_timespec gpr_time_sub(gpr_timespec a, gpr_timespec b) {
int gpr_time_similar(gpr_timespec a, gpr_timespec b, gpr_timespec threshold) {
int cmp_ab;
+ GPR_ASSERT(a.clock_type == b.clock_type);
+ GPR_ASSERT(threshold.clock_type == GPR_TIMESPAN);
+
cmp_ab = gpr_time_cmp(a, b);
if (cmp_ab == 0) return 1;
if (cmp_ab < 0) {
@@ -234,7 +256,7 @@ int gpr_time_similar(gpr_timespec a, gpr_timespec b, gpr_timespec threshold) {
}
}
-gpr_int32 gpr_time_to_millis(gpr_timespec t) {
+int32_t gpr_time_to_millis(gpr_timespec t) {
if (t.tv_sec >= 2147483) {
if (t.tv_sec == 2147483 && t.tv_nsec < 648 * GPR_NS_PER_MS) {
return 2147483 * GPR_MS_PER_SEC + t.tv_nsec / GPR_NS_PER_MS;
@@ -245,10 +267,38 @@ gpr_int32 gpr_time_to_millis(gpr_timespec t) {
care?) */
return -2147483647;
} else {
- return (gpr_int32)(t.tv_sec * GPR_MS_PER_SEC + t.tv_nsec / GPR_NS_PER_MS);
+ return (int32_t)(t.tv_sec * GPR_MS_PER_SEC + t.tv_nsec / GPR_NS_PER_MS);
}
}
double gpr_timespec_to_micros(gpr_timespec t) {
return (double)t.tv_sec * GPR_US_PER_SEC + t.tv_nsec * 1e-3;
}
+
+gpr_timespec gpr_convert_clock_type(gpr_timespec t, gpr_clock_type clock_type) {
+ if (t.clock_type == clock_type) {
+ return t;
+ }
+
+ if (t.tv_nsec == 0) {
+ if (t.tv_sec == INT64_MAX) {
+ t.clock_type = clock_type;
+ return t;
+ }
+ if (t.tv_sec == INT64_MIN) {
+ t.clock_type = clock_type;
+ return t;
+ }
+ }
+
+ if (clock_type == GPR_TIMESPAN) {
+ return gpr_time_sub(t, gpr_now(t.clock_type));
+ }
+
+ if (t.clock_type == GPR_TIMESPAN) {
+ return gpr_time_add(gpr_now(clock_type), t);
+ }
+
+ return gpr_time_add(gpr_now(clock_type),
+ gpr_time_sub(t, gpr_now(t.clock_type)));
+}
diff --git a/src/core/support/time_posix.c b/src/core/support/time_posix.c
index afb58ef231..36d75e7da2 100644
--- a/src/core/support/time_posix.c
+++ b/src/core/support/time_posix.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -32,45 +32,112 @@
*/
#include <grpc/support/port_platform.h>
+#include <src/core/support/time_precise.h>
#ifdef GPR_POSIX_TIME
#include <stdlib.h>
#include <time.h>
#include <unistd.h>
+#ifdef __linux__
+#include <sys/syscall.h>
+#endif
+#include <grpc/support/log.h>
#include <grpc/support/time.h>
+#include "src/core/support/block_annotate.h"
static struct timespec timespec_from_gpr(gpr_timespec gts) {
struct timespec rv;
- rv.tv_sec = gts.tv_sec;
+ if (sizeof(time_t) < sizeof(int64_t)) {
+ /* fine to assert, as this is only used in gpr_sleep_until */
+ GPR_ASSERT(gts.tv_sec <= INT32_MAX && gts.tv_sec >= INT32_MIN);
+ }
+ rv.tv_sec = (time_t)gts.tv_sec;
rv.tv_nsec = gts.tv_nsec;
return rv;
}
#if _POSIX_TIMERS > 0
-static gpr_timespec gpr_from_timespec(struct timespec ts) {
+static gpr_timespec gpr_from_timespec(struct timespec ts,
+ gpr_clock_type clock_type) {
+ /*
+ * timespec.tv_sec can have smaller size than gpr_timespec.tv_sec,
+ * but we are only using this function to implement gpr_now
+ * so there's no need to handle "infinity" values.
+ */
gpr_timespec rv;
rv.tv_sec = ts.tv_sec;
- rv.tv_nsec = (int)ts.tv_nsec;
+ rv.tv_nsec = (int32_t)ts.tv_nsec;
+ rv.clock_type = clock_type;
return rv;
}
-gpr_timespec gpr_now(void) {
+/** maps gpr_clock_type --> clockid_t for clock_gettime */
+static const clockid_t clockid_for_gpr_clock[] = {CLOCK_MONOTONIC,
+ CLOCK_REALTIME};
+
+void gpr_time_init(void) { gpr_precise_clock_init(); }
+
+gpr_timespec gpr_now(gpr_clock_type clock_type) {
struct timespec now;
- clock_gettime(CLOCK_REALTIME, &now);
- return gpr_from_timespec(now);
+ GPR_ASSERT(clock_type != GPR_TIMESPAN);
+ if (clock_type == GPR_CLOCK_PRECISE) {
+ gpr_timespec ret;
+ gpr_precise_clock_now(&ret);
+ return ret;
+ } else {
+#if defined(GPR_BACKWARDS_COMPATIBILITY_MODE) && defined(__linux__)
+ /* avoid ABI problems by invoking syscalls directly */
+ syscall(SYS_clock_gettime, clockid_for_gpr_clock[clock_type], &now);
+#else
+ clock_gettime(clockid_for_gpr_clock[clock_type], &now);
+#endif
+ return gpr_from_timespec(now, clock_type);
+ }
}
#else
/* For some reason Apple's OSes haven't implemented clock_gettime. */
#include <sys/time.h>
+#include <mach/mach.h>
+#include <mach/mach_time.h>
+
+static double g_time_scale;
+static uint64_t g_time_start;
-gpr_timespec gpr_now(void) {
+void gpr_time_init(void) {
+ mach_timebase_info_data_t tb = {0, 1};
+ gpr_precise_clock_init();
+ mach_timebase_info(&tb);
+ g_time_scale = tb.numer;
+ g_time_scale /= tb.denom;
+ g_time_start = mach_absolute_time();
+}
+
+gpr_timespec gpr_now(gpr_clock_type clock) {
gpr_timespec now;
struct timeval now_tv;
- gettimeofday(&now_tv, NULL);
- now.tv_sec = now_tv.tv_sec;
- now.tv_nsec = now_tv.tv_usec * 1000;
+ double now_dbl;
+
+ now.clock_type = clock;
+ switch (clock) {
+ case GPR_CLOCK_REALTIME:
+ gettimeofday(&now_tv, NULL);
+ now.tv_sec = now_tv.tv_sec;
+ now.tv_nsec = now_tv.tv_usec * 1000;
+ break;
+ case GPR_CLOCK_MONOTONIC:
+ now_dbl = (mach_absolute_time() - g_time_start) * g_time_scale;
+ now.tv_sec = (int64_t)(now_dbl * 1e-9);
+ now.tv_nsec = (int32_t)(now_dbl - ((double)now.tv_sec) * 1e9);
+ break;
+ case GPR_CLOCK_PRECISE:
+ gpr_precise_clock_now(&now);
+ break;
+ case GPR_TIMESPAN:
+ abort();
+ }
+
return now;
}
#endif
@@ -79,18 +146,22 @@ void gpr_sleep_until(gpr_timespec until) {
gpr_timespec now;
gpr_timespec delta;
struct timespec delta_ts;
+ int ns_result;
for (;;) {
/* We could simplify by using clock_nanosleep instead, but it might be
* slightly less portable. */
- now = gpr_now();
+ now = gpr_now(until.clock_type);
if (gpr_time_cmp(until, now) <= 0) {
return;
}
delta = gpr_time_sub(until, now);
delta_ts = timespec_from_gpr(delta);
- if (nanosleep(&delta_ts, NULL) == 0) {
+ GRPC_SCHEDULING_START_BLOCKING_REGION;
+ ns_result = nanosleep(&delta_ts, NULL);
+ GRPC_SCHEDULING_END_BLOCKING_REGION;
+ if (ns_result == 0) {
break;
}
}
diff --git a/src/core/profiling/timers_preciseclock.h b/src/core/support/time_precise.c
index 163d52b797..a2cf74bc84 100644
--- a/src/core/profiling/timers_preciseclock.h
+++ b/src/core/support/time_precise.c
@@ -31,65 +31,59 @@
*
*/
-#ifndef GRPC_CORE_PROFILING_TIMERS_PRECISECLOCK_H
-#define GRPC_CORE_PROFILING_TIMERS_PRECISECLOCK_H
-
-#include <grpc/support/sync.h>
+#include <grpc/support/log.h>
#include <grpc/support/time.h>
#include <stdio.h>
#ifdef GRPC_TIMERS_RDTSC
-typedef long long int grpc_precise_clock;
#if defined(__i386__)
-static void grpc_precise_clock_now(grpc_precise_clock *clk) {
- grpc_precise_clock ret;
+static void gpr_get_cycle_counter(long long int *clk) {
+ long long int ret;
__asm__ volatile("rdtsc" : "=A"(ret));
*clk = ret;
}
// ----------------------------------------------------------------
#elif defined(__x86_64__) || defined(__amd64__)
-static void grpc_precise_clock_now(grpc_precise_clock *clk) {
+static void gpr_get_cycle_counter(long long int *clk) {
unsigned long long low, high;
__asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
- *clk = (high << 32) | low;
+ *clk = (long long)(high << 32) | (long long)low;
}
#endif
-static gpr_once precise_clock_init = GPR_ONCE_INIT;
-static double cycles_per_second = 0.0;
-static void grpc_precise_clock_init() {
- time_t start = time(NULL);
- grpc_precise_clock start_time;
- grpc_precise_clock end_time;
+
+static double cycles_per_second = 0;
+static long long int start_cycle;
+void gpr_precise_clock_init(void) {
+ time_t start;
+ long long end_cycle;
+ gpr_log(GPR_DEBUG, "Calibrating timers");
+ start = time(NULL);
while (time(NULL) == start)
;
- grpc_precise_clock_now(&start_time);
- while (time(NULL) == start + 1)
+ gpr_get_cycle_counter(&start_cycle);
+ while (time(NULL) <= start + 10)
;
- grpc_precise_clock_now(&end_time);
- cycles_per_second = end_time - start_time;
+ gpr_get_cycle_counter(&end_cycle);
+ cycles_per_second = (double)(end_cycle - start_cycle) / 10.0;
+ gpr_log(GPR_DEBUG, "... cycles_per_second = %f\n", cycles_per_second);
}
-static double grpc_precise_clock_scaling_factor() {
- gpr_once_init(&precise_clock_init, grpc_precise_clock_init);
- return 1e6 / cycles_per_second;
-}
-#define GRPC_PRECISE_CLOCK_FORMAT "%f"
-#define GRPC_PRECISE_CLOCK_PRINTF_ARGS(clk) \
- (*(clk)*grpc_precise_clock_scaling_factor())
-#else
-typedef struct grpc_precise_clock grpc_precise_clock;
-struct grpc_precise_clock {
- gpr_timespec clock;
-};
-static void grpc_precise_clock_now(grpc_precise_clock* clk) {
- clk->clock = gpr_now();
+
+void gpr_precise_clock_now(gpr_timespec *clk) {
+ long long int counter;
+ double secs;
+ gpr_get_cycle_counter(&counter);
+ secs = (double)(counter - start_cycle) / cycles_per_second;
+ clk->clock_type = GPR_CLOCK_PRECISE;
+ clk->tv_sec = (int64_t)secs;
+ clk->tv_nsec = (int32_t)(1e9 * (secs - (double)clk->tv_sec));
}
-#define GRPC_PRECISE_CLOCK_FORMAT "%ld.%09d"
-#define GRPC_PRECISE_CLOCK_PRINTF_ARGS(clk) \
- (clk)->clock.tv_sec, (clk)->clock.tv_nsec
-static void grpc_precise_clock_print(const grpc_precise_clock* clk, FILE* fp) {
- fprintf(fp, "%ld.%09d", clk->clock.tv_sec, clk->clock.tv_nsec);
+
+#else /* GRPC_TIMERS_RDTSC */
+void gpr_precise_clock_init(void) {}
+
+void gpr_precise_clock_now(gpr_timespec *clk) {
+ *clk = gpr_now(GPR_CLOCK_REALTIME);
+ clk->clock_type = GPR_CLOCK_PRECISE;
}
#endif /* GRPC_TIMERS_RDTSC */
-
-#endif /* GRPC_CORE_PROFILING_TIMERS_PRECISECLOCK_H */
diff --git a/src/core/support/time_precise.h b/src/core/support/time_precise.h
new file mode 100644
index 0000000000..871c99a623
--- /dev/null
+++ b/src/core/support/time_precise.h
@@ -0,0 +1,42 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_SUPPORT_TIME_PRECISE_H
+#define GRPC_CORE_SUPPORT_TIME_PRECISE_H
+
+#include <grpc/support/time.h>
+
+void gpr_precise_clock_init(void);
+void gpr_precise_clock_now(gpr_timespec *clk);
+
+#endif /* GRPC_CORE_SUPPORT_TIME_PRECISE_H */
diff --git a/src/core/support/time_win32.c b/src/core/support/time_win32.c
index f4443b5c2d..8af957e6f4 100644
--- a/src/core/support/time_win32.c
+++ b/src/core/support/time_win32.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -37,35 +37,73 @@
#ifdef GPR_WIN32
+#include <grpc/support/log.h>
#include <grpc/support/time.h>
+#include <src/core/support/time_precise.h>
#include <sys/timeb.h>
-#include <windows.h>
+#include <process.h>
+#include <limits.h>
-gpr_timespec gpr_now(void) {
+#include "src/core/support/block_annotate.h"
+
+static LARGE_INTEGER g_start_time;
+static double g_time_scale;
+
+void gpr_time_init(void) {
+ LARGE_INTEGER frequency;
+ QueryPerformanceFrequency(&frequency);
+ QueryPerformanceCounter(&g_start_time);
+ g_time_scale = 1.0 / (double)frequency.QuadPart;
+}
+
+gpr_timespec gpr_now(gpr_clock_type clock) {
gpr_timespec now_tv;
+ LONGLONG diff;
struct _timeb now_tb;
- _ftime_s(&now_tb);
- now_tv.tv_sec = now_tb.time;
- now_tv.tv_nsec = now_tb.millitm * 1000000;
+ LARGE_INTEGER timestamp;
+ double now_dbl;
+ now_tv.clock_type = clock;
+ switch (clock) {
+ case GPR_CLOCK_REALTIME:
+ _ftime_s(&now_tb);
+ now_tv.tv_sec = (int64_t)now_tb.time;
+ now_tv.tv_nsec = now_tb.millitm * 1000000;
+ break;
+ case GPR_CLOCK_MONOTONIC:
+ case GPR_CLOCK_PRECISE:
+ QueryPerformanceCounter(&timestamp);
+ diff = timestamp.QuadPart - g_start_time.QuadPart;
+ now_dbl = (double)diff * g_time_scale;
+ now_tv.tv_sec = (int64_t)now_dbl;
+ now_tv.tv_nsec = (int32_t)((now_dbl - (double)now_tv.tv_sec) * 1e9);
+ break;
+ case GPR_TIMESPAN:
+ abort();
+ break;
+ }
return now_tv;
}
void gpr_sleep_until(gpr_timespec until) {
gpr_timespec now;
gpr_timespec delta;
- DWORD sleep_millis;
+ int64_t sleep_millis;
for (;;) {
/* We could simplify by using clock_nanosleep instead, but it might be
* slightly less portable. */
- now = gpr_now();
+ now = gpr_now(until.clock_type);
if (gpr_time_cmp(until, now) <= 0) {
return;
}
delta = gpr_time_sub(until, now);
- sleep_millis = (DWORD)delta.tv_sec * GPR_MS_PER_SEC + delta.tv_nsec / GPR_NS_PER_MS;
- Sleep(sleep_millis);
+ sleep_millis =
+ delta.tv_sec * GPR_MS_PER_SEC + delta.tv_nsec / GPR_NS_PER_MS;
+ GPR_ASSERT((sleep_millis >= 0) && (sleep_millis <= INT_MAX));
+ GRPC_SCHEDULING_START_BLOCKING_REGION;
+ Sleep((DWORD)sleep_millis);
+ GRPC_SCHEDULING_END_BLOCKING_REGION;
}
}
diff --git a/src/core/support/tls_pthread.c b/src/core/support/tls_pthread.c
index f2e76a553f..9683a6e547 100644
--- a/src/core/support/tls_pthread.c
+++ b/src/core/support/tls_pthread.c
@@ -37,8 +37,8 @@
#include <grpc/support/tls.h>
-gpr_intptr gpr_tls_set(struct gpr_pthread_thread_local *tls, gpr_intptr value) {
- GPR_ASSERT(0 == pthread_setspecific(tls->key, (void*)value));
+intptr_t gpr_tls_set(struct gpr_pthread_thread_local *tls, intptr_t value) {
+ GPR_ASSERT(0 == pthread_setspecific(tls->key, (void *)value));
return value;
}
diff --git a/src/core/support/tmpfile.h b/src/core/support/tmpfile.h
new file mode 100644
index 0000000000..df6f8692bb
--- /dev/null
+++ b/src/core/support/tmpfile.h
@@ -0,0 +1,55 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_SUPPORT_TMPFILE_H
+#define GRPC_CORE_SUPPORT_TMPFILE_H
+
+#include <stdio.h>
+
+#include <grpc/support/slice.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Creates a temporary file from a prefix.
+ If tmp_filename is not NULL, *tmp_filename is assigned the name of the
+ created file and it is the responsibility of the caller to gpr_free it
+ unless an error occurs in which case it will be set to NULL. */
+FILE *gpr_tmpfile(const char *prefix, char **tmp_filename);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRPC_CORE_SUPPORT_TMPFILE_H */
diff --git a/src/core/support/file_posix.c b/src/core/support/tmpfile_posix.c
index c11c07148a..b16eeacf9d 100644
--- a/src/core/support/file_posix.c
+++ b/src/core/support/tmpfile_posix.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -35,7 +35,7 @@
#ifdef GPR_POSIX_FILE
-#include "src/core/support/file.h"
+#include "src/core/support/tmpfile.h"
#include <errno.h>
#include <stdlib.h>
diff --git a/src/core/support/file_win32.c b/src/core/support/tmpfile_win32.c
index 355744f79a..3000f0029f 100644
--- a/src/core/support/file_win32.c
+++ b/src/core/support/tmpfile_win32.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -44,8 +44,8 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
-#include "src/core/support/file.h"
#include "src/core/support/string_win32.h"
+#include "src/core/support/tmpfile.h"
FILE *gpr_tmpfile(const char *prefix, char **tmp_filename_out) {
FILE *result = NULL;
diff --git a/src/core/support/wrap_memcpy.c b/src/core/support/wrap_memcpy.c
new file mode 100644
index 0000000000..15c289f7b8
--- /dev/null
+++ b/src/core/support/wrap_memcpy.c
@@ -0,0 +1,53 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <string.h>
+
+/* Provide a wrapped memcpy for targets that need to be backwards
+ * compatible with older libc's.
+ *
+ * Enable by setting LDFLAGS=-Wl,-wrap,memcpy when linking.
+ */
+
+#ifdef __linux__
+#ifdef __x86_64__
+__asm__(".symver memcpy,memcpy@GLIBC_2.2.5");
+void *__wrap_memcpy(void *destination, const void *source, size_t num) {
+ return memcpy(destination, source, num);
+}
+#else /* !__x86_64__ */
+void *__wrap_memcpy(void *destination, const void *source, size_t num) {
+ return memmove(destination, source, num);
+}
+#endif
+#endif
diff --git a/src/core/surface/alarm.c b/src/core/surface/alarm.c
new file mode 100644
index 0000000000..8169ede065
--- /dev/null
+++ b/src/core/surface/alarm.c
@@ -0,0 +1,84 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/iomgr/timer.h"
+#include "src/core/surface/completion_queue.h"
+#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
+
+struct grpc_alarm {
+ grpc_timer alarm;
+ grpc_cq_completion completion;
+ /** completion queue where events about this alarm will be posted */
+ grpc_completion_queue *cq;
+ /** user supplied tag */
+ void *tag;
+};
+
+static void do_nothing_end_completion(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_cq_completion *c) {}
+
+static void alarm_cb(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
+ grpc_alarm *alarm = arg;
+ grpc_cq_end_op(exec_ctx, alarm->cq, alarm->tag, success,
+ do_nothing_end_completion, NULL, &alarm->completion);
+}
+
+grpc_alarm *grpc_alarm_create(grpc_completion_queue *cq, gpr_timespec deadline,
+ void *tag) {
+ grpc_alarm *alarm = gpr_malloc(sizeof(grpc_alarm));
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
+ GRPC_CQ_INTERNAL_REF(cq, "alarm");
+ alarm->cq = cq;
+ alarm->tag = tag;
+
+ grpc_cq_begin_op(cq, tag);
+ grpc_timer_init(&exec_ctx, &alarm->alarm,
+ gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
+ alarm_cb, alarm, gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_exec_ctx_finish(&exec_ctx);
+ return alarm;
+}
+
+void grpc_alarm_cancel(grpc_alarm *alarm) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_timer_cancel(&exec_ctx, &alarm->alarm);
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+void grpc_alarm_destroy(grpc_alarm *alarm) {
+ grpc_alarm_cancel(alarm);
+ GRPC_CQ_INTERNAL_UNREF(alarm->cq, "alarm");
+ gpr_free(alarm);
+}
diff --git a/src/core/surface/surface_trace.c b/src/core/surface/api_trace.c
index 57a0053162..9f0b900d46 100644
--- a/src/core/surface/surface_trace.c
+++ b/src/core/surface/api_trace.c
@@ -31,6 +31,6 @@
*
*/
-#include "src/core/surface/surface_trace.h"
+#include "src/core/surface/api_trace.h"
-int grpc_surface_trace = 0;
+int grpc_api_trace = 0;
diff --git a/src/core/surface/api_trace.h b/src/core/surface/api_trace.h
new file mode 100644
index 0000000000..29a9b2d79c
--- /dev/null
+++ b/src/core/surface/api_trace.h
@@ -0,0 +1,65 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_SURFACE_API_TRACE_H
+#define GRPC_CORE_SURFACE_API_TRACE_H
+
+#include "src/core/debug/trace.h"
+#include <grpc/support/log.h>
+
+extern int grpc_api_trace;
+
+/* Provide unwrapping macros because we're in C89 and variadic macros weren't
+ introduced until C99... */
+#define GRPC_API_TRACE_UNWRAP0()
+#define GRPC_API_TRACE_UNWRAP1(a) , a
+#define GRPC_API_TRACE_UNWRAP2(a, b) , a, b
+#define GRPC_API_TRACE_UNWRAP3(a, b, c) , a, b, c
+#define GRPC_API_TRACE_UNWRAP4(a, b, c, d) , a, b, c, d
+#define GRPC_API_TRACE_UNWRAP5(a, b, c, d, e) , a, b, c, d, e
+#define GRPC_API_TRACE_UNWRAP6(a, b, c, d, e, f) , a, b, c, d, e, f
+#define GRPC_API_TRACE_UNWRAP7(a, b, c, d, e, f, g) , a, b, c, d, e, f, g
+#define GRPC_API_TRACE_UNWRAP8(a, b, c, d, e, f, g, h) , a, b, c, d, e, f, g, h
+#define GRPC_API_TRACE_UNWRAP9(a, b, c, d, e, f, g, h, i) \
+ , a, b, c, d, e, f, g, h, i
+#define GRPC_API_TRACE_UNWRAP10(a, b, c, d, e, f, g, h, i, j) \
+ , a, b, c, d, e, f, g, h, i, j
+
+/* Due to the limitations of C89's preprocessor, the arity of the var-arg list
+ 'nargs' must be specified. */
+#define GRPC_API_TRACE(fmt, nargs, args) \
+ if (grpc_api_trace) { \
+ gpr_log(GPR_INFO, fmt GRPC_API_TRACE_UNWRAP##nargs args); \
+ }
+
+#endif /* GRPC_CORE_SURFACE_API_TRACE_H */
diff --git a/src/core/surface/byte_buffer.c b/src/core/surface/byte_buffer.c
index 4817e00454..fb39c4531d 100644
--- a/src/core/surface/byte_buffer.c
+++ b/src/core/surface/byte_buffer.c
@@ -55,15 +55,27 @@ grpc_byte_buffer *grpc_raw_compressed_byte_buffer_create(
return bb;
}
+grpc_byte_buffer *grpc_raw_byte_buffer_from_reader(
+ grpc_byte_buffer_reader *reader) {
+ grpc_byte_buffer *bb = malloc(sizeof(grpc_byte_buffer));
+ gpr_slice slice;
+ bb->type = GRPC_BB_RAW;
+ bb->data.raw.compression = GRPC_COMPRESS_NONE;
+ gpr_slice_buffer_init(&bb->data.raw.slice_buffer);
+
+ while (grpc_byte_buffer_reader_next(reader, &slice)) {
+ gpr_slice_buffer_add(&bb->data.raw.slice_buffer, slice);
+ }
+ return bb;
+}
+
grpc_byte_buffer *grpc_byte_buffer_copy(grpc_byte_buffer *bb) {
switch (bb->type) {
case GRPC_BB_RAW:
return grpc_raw_byte_buffer_create(bb->data.raw.slice_buffer.slices,
bb->data.raw.slice_buffer.count);
}
- gpr_log(GPR_INFO, "should never get here");
- abort();
- return NULL;
+ GPR_UNREACHABLE_CODE(return NULL);
}
void grpc_byte_buffer_destroy(grpc_byte_buffer *bb) {
@@ -81,6 +93,5 @@ size_t grpc_byte_buffer_length(grpc_byte_buffer *bb) {
case GRPC_BB_RAW:
return bb->data.raw.slice_buffer.length;
}
- gpr_log(GPR_ERROR, "should never reach here");
- abort();
+ GPR_UNREACHABLE_CODE(return 0);
}
diff --git a/src/core/surface/byte_buffer_reader.c b/src/core/surface/byte_buffer_reader.c
index 283db83833..4679854227 100644
--- a/src/core/surface/byte_buffer_reader.c
+++ b/src/core/surface/byte_buffer_reader.c
@@ -31,6 +31,7 @@
*
*/
+#include <string.h>
#include <grpc/byte_buffer_reader.h>
#include <grpc/compression.h>
@@ -103,3 +104,20 @@ int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader *reader,
}
return 0;
}
+
+gpr_slice grpc_byte_buffer_reader_readall(grpc_byte_buffer_reader *reader) {
+ gpr_slice in_slice;
+ size_t bytes_read = 0;
+ const size_t input_size = grpc_byte_buffer_length(reader->buffer_out);
+ gpr_slice out_slice = gpr_slice_malloc(input_size);
+ uint8_t *const outbuf = GPR_SLICE_START_PTR(out_slice); /* just an alias */
+
+ while (grpc_byte_buffer_reader_next(reader, &in_slice) != 0) {
+ const size_t slice_length = GPR_SLICE_LENGTH(in_slice);
+ memcpy(&(outbuf[bytes_read]), GPR_SLICE_START_PTR(in_slice), slice_length);
+ bytes_read += slice_length;
+ gpr_slice_unref(in_slice);
+ GPR_ASSERT(bytes_read <= input_size);
+ }
+ return out_slice;
+}
diff --git a/src/core/surface/call.c b/src/core/surface/call.c
index 181617fff8..6f1cd1df10 100644
--- a/src/core/surface/call.c
+++ b/src/core/surface/call.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -30,36 +30,40 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
-
-#include "src/core/census/grpc_context.h"
-#include "src/core/surface/call.h"
-#include "src/core/channel/channel_stack.h"
-#include "src/core/iomgr/alarm.h"
-#include "src/core/profiling/timers.h"
-#include "src/core/support/string.h"
-#include "src/core/surface/byte_buffer_queue.h"
-#include "src/core/surface/channel.h"
-#include "src/core/surface/completion_queue.h"
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/string_util.h>
#include <assert.h>
-
+#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-typedef enum { REQ_INITIAL = 0, REQ_READY, REQ_DONE } req_state;
+#include <grpc/compression.h>
+#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/useful.h>
-typedef enum {
- SEND_NOTHING,
- SEND_INITIAL_METADATA,
- SEND_BUFFERED_INITIAL_METADATA,
- SEND_MESSAGE,
- SEND_BUFFERED_MESSAGE,
- SEND_TRAILING_METADATA_AND_FINISH,
- SEND_FINISH
-} send_action;
+#include "src/core/channel/channel_stack.h"
+#include "src/core/compression/algorithm_metadata.h"
+#include "src/core/iomgr/timer.h"
+#include "src/core/profiling/timers.h"
+#include "src/core/support/string.h"
+#include "src/core/surface/api_trace.h"
+#include "src/core/surface/call.h"
+#include "src/core/surface/channel.h"
+#include "src/core/surface/completion_queue.h"
+#include "src/core/transport/static_metadata.h"
+
+/** The maximum number of concurrent batches possible.
+ Based upon the maximum number of individually queueable ops in the batch
+ api:
+ - initial metadata send
+ - message send
+ - status/close send (depending on client/server)
+ - initial metadata recv
+ - message recv
+ - status/close recv (depending on client/server) */
+#define MAX_CONCURRENT_BATCHES 6
typedef struct {
grpc_ioreq_completion_func on_complete;
@@ -67,24 +71,7 @@ typedef struct {
int success;
} completed_request;
-/* See request_set in grpc_call below for a description */
-#define REQSET_EMPTY 'X'
-#define REQSET_DONE 'Y'
-
-#define MAX_SEND_INITIAL_METADATA_COUNT 3
-
-typedef struct {
- /* Overall status of the operation: starts OK, may degrade to
- non-OK */
- int success;
- /* Completion function to call at the end of the operation */
- grpc_ioreq_completion_func on_complete;
- void *user_data;
- /* a bit mask of which request ops are needed (1u << opid) */
- gpr_uint16 need_mask;
- /* a bit mask of which request ops are now completed */
- gpr_uint16 complete_mask;
-} reqinfo_master;
+#define MAX_SEND_EXTRA_METADATA_COUNT 3
/* Status data for a request can come from several sources; this
enumerates them all, and acts as a priority sorting for which
@@ -105,7 +92,7 @@ typedef enum {
} status_source;
typedef struct {
- gpr_uint8 is_set;
+ uint8_t is_set;
grpc_status_code code;
grpc_mdstr *details;
} received_status;
@@ -129,127 +116,111 @@ typedef enum {
WRITE_STATE_WRITE_CLOSED
} write_state;
+typedef struct batch_control {
+ grpc_call *call;
+ grpc_cq_completion cq_completion;
+ grpc_closure finish_batch;
+ void *notify_tag;
+ gpr_refcount steps_to_complete;
+
+ uint8_t send_initial_metadata;
+ uint8_t send_message;
+ uint8_t send_final_op;
+ uint8_t recv_initial_metadata;
+ uint8_t recv_message;
+ uint8_t recv_final_op;
+ uint8_t is_notify_tag_closure;
+ uint8_t success;
+} batch_control;
+
struct grpc_call {
grpc_completion_queue *cq;
grpc_channel *channel;
- grpc_mdctx *metadata_context;
+ grpc_call *parent;
+ grpc_call *first_child;
/* TODO(ctiller): share with cq if possible? */
gpr_mu mu;
- /* how far through the stream have we read? */
- read_state read_state;
- /* how far through the stream have we written? */
- write_state write_state;
/* client or server call */
- gpr_uint8 is_client;
+ uint8_t is_client;
/* is the alarm set */
- gpr_uint8 have_alarm;
- /* are we currently performing a send operation */
- gpr_uint8 sending;
- /* are we currently performing a recv operation */
- gpr_uint8 receiving;
- /* are we currently completing requests */
- gpr_uint8 completing;
+ uint8_t have_alarm;
/** has grpc_call_destroy been called */
- gpr_uint8 destroy_called;
- /* pairs with completed_requests */
- gpr_uint8 num_completed_requests;
- /* are we currently reading a message? */
- gpr_uint8 reading_message;
- /* have we bound a pollset yet? */
- gpr_uint8 bound_pollset;
- /* is an error status set */
- gpr_uint8 error_status_set;
- /** should the alarm be cancelled */
- gpr_uint8 cancel_alarm;
-
- /* flags with bits corresponding to write states allowing us to determine
- what was sent */
- gpr_uint16 last_send_contains;
- /* cancel with this status on the next outgoing transport op */
- grpc_status_code cancel_with_status;
-
- /* Active ioreqs.
- request_set and request_data contain one element per active ioreq
- operation.
-
- request_set[op] is an integer specifying a set of operations to which
- the request belongs:
- - if it is < GRPC_IOREQ_OP_COUNT, then this operation is pending
- completion, and the integer represents to which group of operations
- the ioreq belongs. Each group is represented by one master, and the
- integer in request_set is an index into masters to find the master
- data.
- - if it is REQSET_EMPTY, the ioreq op is inactive and available to be
- started
- - finally, if request_set[op] is REQSET_DONE, then the operation is
- complete and unavailable to be started again
-
- request_data[op] is the request data as supplied by the initiator of
- a request, and is valid iff request_set[op] <= GRPC_IOREQ_OP_COUNT.
- The set fields are as per the request type specified by op.
-
- Finally, one element of masters is set per active _set_ of ioreq
- operations. It describes work left outstanding, result status, and
- what work to perform upon operation completion. As one ioreq of each
- op type can be active at once, by convention we choose the first element
- of the group to be the master -- ie the master of in-progress operation
- op is masters[request_set[op]]. This allows constant time allocation
- and a strong upper bound of a count of masters to be calculated. */
- gpr_uint8 request_set[GRPC_IOREQ_OP_COUNT];
- grpc_ioreq_data request_data[GRPC_IOREQ_OP_COUNT];
- gpr_uint32 request_flags[GRPC_IOREQ_OP_COUNT];
- reqinfo_master masters[GRPC_IOREQ_OP_COUNT];
-
- /* Dynamic array of ioreq's that have completed: the count of
- elements is queued in num_completed_requests.
- This list is built up under lock(), and flushed entirely during
- unlock().
- We know the upper bound of the number of elements as we can only
- have one ioreq of each type active at once. */
- completed_request completed_requests[GRPC_IOREQ_OP_COUNT];
- /* Incoming buffer of messages */
- grpc_byte_buffer_queue incoming_queue;
+ uint8_t destroy_called;
+ /** flag indicating that cancellation is inherited */
+ uint8_t cancellation_is_inherited;
+ /** bitmask of live batches */
+ uint8_t used_batches;
+ /** which ops are in-flight */
+ uint8_t sent_initial_metadata;
+ uint8_t sending_message;
+ uint8_t sent_final_op;
+ uint8_t received_initial_metadata;
+ uint8_t receiving_message;
+ uint8_t received_final_op;
+
+ /* have we received initial metadata */
+ bool has_initial_md_been_received;
+
+ batch_control active_batches[MAX_CONCURRENT_BATCHES];
+
+ /* first idx: is_receiving, second idx: is_trailing */
+ grpc_metadata_batch metadata_batch[2][2];
+
/* Buffered read metadata waiting to be returned to the application.
Element 0 is initial metadata, element 1 is trailing metadata. */
- grpc_metadata_array buffered_metadata[2];
- /* All metadata received - unreffed at once at the end of the call */
- grpc_mdelem **owned_metadata;
- size_t owned_metadata_count;
- size_t owned_metadata_capacity;
+ grpc_metadata_array *buffered_metadata[2];
/* Received call statuses from various sources */
received_status status[STATUS_SOURCE_COUNT];
- /* Compression level for the call */
- grpc_compression_level compression_level;
+ /* Compression algorithm for the call */
+ grpc_compression_algorithm compression_algorithm;
+ /* Supported encodings (compression algorithms), a bitset */
+ uint32_t encodings_accepted_by_peer;
/* Contexts for various subsystems (security, tracing, ...). */
grpc_call_context_element context[GRPC_CONTEXT_COUNT];
/* Deadline alarm - if have_alarm is non-zero */
- grpc_alarm alarm;
-
- /* Call refcount - to keep the call alive during asynchronous operations */
- gpr_refcount internal_refcount;
+ grpc_timer alarm;
- grpc_linked_mdelem send_initial_metadata[MAX_SEND_INITIAL_METADATA_COUNT];
- grpc_linked_mdelem status_link;
- grpc_linked_mdelem details_link;
- size_t send_initial_metadata_count;
+ /* for the client, extra metadata is initial metadata; for the
+ server, it's trailing metadata */
+ grpc_linked_mdelem send_extra_metadata[MAX_SEND_EXTRA_METADATA_COUNT];
+ int send_extra_metadata_count;
gpr_timespec send_deadline;
- grpc_stream_op_buffer send_ops;
- grpc_stream_op_buffer recv_ops;
- grpc_stream_state recv_state;
-
- gpr_slice_buffer incoming_message;
- gpr_uint32 incoming_message_length;
- gpr_uint32 incoming_message_flags;
- grpc_iomgr_closure destroy_closure;
- grpc_iomgr_closure on_done_recv;
- grpc_iomgr_closure on_done_send;
- grpc_iomgr_closure on_done_bind;
+ /** siblings: children of the same parent form a list, and this list is
+ protected under
+ parent->mu */
+ grpc_call *sibling_next;
+ grpc_call *sibling_prev;
+
+ grpc_slice_buffer_stream sending_stream;
+ grpc_byte_stream *receiving_stream;
+ grpc_byte_buffer **receiving_buffer;
+ gpr_slice receiving_slice;
+ grpc_closure receiving_slice_ready;
+ grpc_closure receiving_stream_ready;
+ grpc_closure receiving_initial_metadata_ready;
+ uint32_t test_only_last_message_flags;
+
+ union {
+ struct {
+ grpc_status_code *status;
+ char **status_details;
+ size_t *status_details_capacity;
+ } client;
+ struct {
+ int *cancelled;
+ } server;
+ } final_op;
+
+ struct {
+ void *bctlp;
+ bool success;
+ } saved_receiving_stream_ready_ctx;
};
#define CALL_STACK_FROM_CALL(call) ((grpc_call_stack *)((call) + 1))
@@ -259,319 +230,301 @@ struct grpc_call {
#define CALL_FROM_TOP_ELEM(top_elem) \
CALL_FROM_CALL_STACK(grpc_call_stack_from_top_element(top_elem))
-static void set_deadline_alarm(grpc_call *call, gpr_timespec deadline);
-static void call_on_done_recv(void *call, int success);
-static void call_on_done_send(void *call, int success);
-static int fill_send_ops(grpc_call *call, grpc_transport_op *op);
-static void execute_op(grpc_call *call, grpc_transport_op *op);
-static void recv_metadata(grpc_call *call, grpc_metadata_batch *metadata);
-static void finish_read_ops(grpc_call *call);
-static grpc_call_error cancel_with_status(grpc_call *c, grpc_status_code status,
+static void set_deadline_alarm(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ gpr_timespec deadline);
+static void execute_op(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ grpc_transport_stream_op *op);
+static grpc_call_error cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
+ grpc_status_code status,
const char *description);
-static void finished_loose_op(void *call, int success);
-
-static void lock(grpc_call *call);
-static void unlock(grpc_call *call);
-
-grpc_call *grpc_call_create(grpc_channel *channel, grpc_completion_queue *cq,
+static void destroy_call(grpc_exec_ctx *exec_ctx, void *call_stack,
+ bool success);
+static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
+ bool success);
+
+grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
+ uint32_t propagation_mask,
+ grpc_completion_queue *cq,
const void *server_transport_data,
grpc_mdelem **add_initial_metadata,
size_t add_initial_metadata_count,
gpr_timespec send_deadline) {
- size_t i;
- grpc_transport_op initial_op;
- grpc_transport_op *initial_op_ptr = NULL;
+ size_t i, j;
grpc_channel_stack *channel_stack = grpc_channel_get_channel_stack(channel);
- grpc_call *call =
- gpr_malloc(sizeof(grpc_call) + channel_stack->call_stack_size);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_call *call;
+ GPR_TIMER_BEGIN("grpc_call_create", 0);
+ call = gpr_malloc(sizeof(grpc_call) + channel_stack->call_stack_size);
memset(call, 0, sizeof(grpc_call));
gpr_mu_init(&call->mu);
call->channel = channel;
call->cq = cq;
- if (cq) {
- GRPC_CQ_INTERNAL_REF(cq, "bind");
- }
+ call->parent = parent_call;
call->is_client = server_transport_data == NULL;
- for (i = 0; i < GRPC_IOREQ_OP_COUNT; i++) {
- call->request_set[i] = REQSET_EMPTY;
- }
if (call->is_client) {
- call->request_set[GRPC_IOREQ_SEND_TRAILING_METADATA] = REQSET_DONE;
- call->request_set[GRPC_IOREQ_SEND_STATUS] = REQSET_DONE;
- call->context[GRPC_CONTEXT_TRACING].value = grpc_census_context_create();
- call->context[GRPC_CONTEXT_TRACING].destroy = grpc_census_context_destroy;
+ GPR_ASSERT(add_initial_metadata_count < MAX_SEND_EXTRA_METADATA_COUNT);
+ for (i = 0; i < add_initial_metadata_count; i++) {
+ call->send_extra_metadata[i].md = add_initial_metadata[i];
+ }
+ call->send_extra_metadata_count = (int)add_initial_metadata_count;
+ } else {
+ GPR_ASSERT(add_initial_metadata_count == 0);
+ call->send_extra_metadata_count = 0;
}
- GPR_ASSERT(add_initial_metadata_count < MAX_SEND_INITIAL_METADATA_COUNT);
- for (i = 0; i < add_initial_metadata_count; i++) {
- call->send_initial_metadata[i].md = add_initial_metadata[i];
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < 2; j++) {
+ call->metadata_batch[i][j].deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ }
}
- call->send_initial_metadata_count = add_initial_metadata_count;
call->send_deadline = send_deadline;
GRPC_CHANNEL_INTERNAL_REF(channel, "call");
- call->metadata_context = grpc_channel_get_metadata_context(channel);
- grpc_sopb_init(&call->send_ops);
- grpc_sopb_init(&call->recv_ops);
- gpr_slice_buffer_init(&call->incoming_message);
- grpc_iomgr_closure_init(&call->on_done_recv, call_on_done_recv, call);
- grpc_iomgr_closure_init(&call->on_done_send, call_on_done_send, call);
- grpc_iomgr_closure_init(&call->on_done_bind, finished_loose_op, call);
- /* dropped in destroy and when READ_STATE_STREAM_CLOSED received */
- gpr_ref_init(&call->internal_refcount, 2);
- /* server hack: start reads immediately so we can get initial metadata.
- TODO(ctiller): figure out a cleaner solution */
- if (!call->is_client) {
- memset(&initial_op, 0, sizeof(initial_op));
- initial_op.recv_ops = &call->recv_ops;
- initial_op.recv_state = &call->recv_state;
- initial_op.on_done_recv = &call->on_done_recv;
- initial_op.context = call->context;
- call->receiving = 1;
- GRPC_CALL_INTERNAL_REF(call, "receiving");
- initial_op_ptr = &initial_op;
- }
- grpc_call_stack_init(channel_stack, server_transport_data, initial_op_ptr,
+ /* initial refcount dropped by grpc_call_destroy */
+ grpc_call_stack_init(&exec_ctx, channel_stack, 1, destroy_call, call,
+ call->context, server_transport_data,
CALL_STACK_FROM_CALL(call));
- if (gpr_time_cmp(send_deadline, gpr_inf_future) != 0) {
- set_deadline_alarm(call, send_deadline);
+ if (cq != NULL) {
+ GRPC_CQ_INTERNAL_REF(cq, "bind");
+ grpc_call_stack_set_pollset(&exec_ctx, CALL_STACK_FROM_CALL(call),
+ grpc_cq_pollset(cq));
}
+ if (parent_call != NULL) {
+ GRPC_CALL_INTERNAL_REF(parent_call, "child");
+ GPR_ASSERT(call->is_client);
+ GPR_ASSERT(!parent_call->is_client);
+
+ gpr_mu_lock(&parent_call->mu);
+
+ if (propagation_mask & GRPC_PROPAGATE_DEADLINE) {
+ send_deadline = gpr_time_min(
+ gpr_convert_clock_type(send_deadline,
+ parent_call->send_deadline.clock_type),
+ parent_call->send_deadline);
+ }
+ /* for now GRPC_PROPAGATE_TRACING_CONTEXT *MUST* be passed with
+ * GRPC_PROPAGATE_STATS_CONTEXT */
+ /* TODO(ctiller): This should change to use the appropriate census start_op
+ * call. */
+ if (propagation_mask & GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT) {
+ GPR_ASSERT(propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT);
+ grpc_call_context_set(call, GRPC_CONTEXT_TRACING,
+ parent_call->context[GRPC_CONTEXT_TRACING].value,
+ NULL);
+ } else {
+ GPR_ASSERT(propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT);
+ }
+ if (propagation_mask & GRPC_PROPAGATE_CANCELLATION) {
+ call->cancellation_is_inherited = 1;
+ }
+
+ if (parent_call->first_child == NULL) {
+ parent_call->first_child = call;
+ call->sibling_next = call->sibling_prev = call;
+ } else {
+ call->sibling_next = parent_call->first_child;
+ call->sibling_prev = parent_call->first_child->sibling_prev;
+ call->sibling_next->sibling_prev = call->sibling_prev->sibling_next =
+ call;
+ }
+
+ gpr_mu_unlock(&parent_call->mu);
+ }
+ if (gpr_time_cmp(send_deadline, gpr_inf_future(send_deadline.clock_type)) !=
+ 0) {
+ set_deadline_alarm(&exec_ctx, call, send_deadline);
+ }
+ grpc_exec_ctx_finish(&exec_ctx);
+ GPR_TIMER_END("grpc_call_create", 0);
return call;
}
-void grpc_call_set_completion_queue(grpc_call *call,
+void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call,
grpc_completion_queue *cq) {
- lock(call);
+ GPR_ASSERT(cq);
call->cq = cq;
- if (cq) {
- GRPC_CQ_INTERNAL_REF(cq, "bind");
- }
- unlock(call);
-}
-
-grpc_completion_queue *grpc_call_get_completion_queue(grpc_call *call) {
- return call->cq;
+ GRPC_CQ_INTERNAL_REF(cq, "bind");
+ grpc_call_stack_set_pollset(exec_ctx, CALL_STACK_FROM_CALL(call),
+ grpc_cq_pollset(cq));
}
-#ifdef GRPC_CALL_REF_COUNT_DEBUG
-void grpc_call_internal_ref(grpc_call *c, const char *reason) {
- gpr_log(GPR_DEBUG, "CALL: ref %p %d -> %d [%s]", c,
- c->internal_refcount.count, c->internal_refcount.count + 1, reason);
+#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#define REF_REASON reason
+#define REF_ARG , const char *reason
#else
-void grpc_call_internal_ref(grpc_call *c) {
+#define REF_REASON ""
+#define REF_ARG
#endif
- gpr_ref(&c->internal_refcount);
+void grpc_call_internal_ref(grpc_call *c REF_ARG) {
+ GRPC_CALL_STACK_REF(CALL_STACK_FROM_CALL(c), REF_REASON);
+}
+void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *c REF_ARG) {
+ GRPC_CALL_STACK_UNREF(exec_ctx, CALL_STACK_FROM_CALL(c), REF_REASON);
}
-static void destroy_call(void *call, int ignored_success) {
+static void destroy_call(grpc_exec_ctx *exec_ctx, void *call, bool success) {
size_t i;
+ int ii;
grpc_call *c = call;
- grpc_call_stack_destroy(CALL_STACK_FROM_CALL(c));
- GRPC_CHANNEL_INTERNAL_UNREF(c->channel, "call");
+ GPR_TIMER_BEGIN("destroy_call", 0);
+ for (i = 0; i < 2; i++) {
+ grpc_metadata_batch_destroy(
+ &c->metadata_batch[1 /* is_receiving */][i /* is_initial */]);
+ }
+ if (c->receiving_stream != NULL) {
+ grpc_byte_stream_destroy(exec_ctx, c->receiving_stream);
+ }
+ grpc_call_stack_destroy(exec_ctx, CALL_STACK_FROM_CALL(c));
+ GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, c->channel, "call");
gpr_mu_destroy(&c->mu);
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
if (c->status[i].details) {
- grpc_mdstr_unref(c->status[i].details);
+ GRPC_MDSTR_UNREF(c->status[i].details);
}
}
- for (i = 0; i < c->owned_metadata_count; i++) {
- grpc_mdelem_unref(c->owned_metadata[i]);
- }
- gpr_free(c->owned_metadata);
- for (i = 0; i < GPR_ARRAY_SIZE(c->buffered_metadata); i++) {
- gpr_free(c->buffered_metadata[i].metadata);
- }
- for (i = 0; i < c->send_initial_metadata_count; i++) {
- grpc_mdelem_unref(c->send_initial_metadata[i].md);
+ for (ii = 0; ii < c->send_extra_metadata_count; ii++) {
+ GRPC_MDELEM_UNREF(c->send_extra_metadata[ii].md);
}
for (i = 0; i < GRPC_CONTEXT_COUNT; i++) {
if (c->context[i].destroy) {
c->context[i].destroy(c->context[i].value);
}
}
- grpc_sopb_destroy(&c->send_ops);
- grpc_sopb_destroy(&c->recv_ops);
- grpc_bbq_destroy(&c->incoming_queue);
- gpr_slice_buffer_destroy(&c->incoming_message);
if (c->cq) {
GRPC_CQ_INTERNAL_UNREF(c->cq, "bind");
}
gpr_free(c);
-}
-
-#ifdef GRPC_CALL_REF_COUNT_DEBUG
-void grpc_call_internal_unref(grpc_call *c, const char *reason,
- int allow_immediate_deletion) {
- gpr_log(GPR_DEBUG, "CALL: unref %p %d -> %d [%s]", c,
- c->internal_refcount.count, c->internal_refcount.count - 1, reason);
-#else
-void grpc_call_internal_unref(grpc_call *c, int allow_immediate_deletion) {
-#endif
- if (gpr_unref(&c->internal_refcount)) {
- if (allow_immediate_deletion) {
- destroy_call(c, 1);
- } else {
- c->destroy_closure.cb = destroy_call;
- c->destroy_closure.cb_arg = c;
- grpc_iomgr_add_callback(&c->destroy_closure);
- }
- }
+ GPR_TIMER_END("destroy_call", 0);
}
static void set_status_code(grpc_call *call, status_source source,
- gpr_uint32 status) {
+ uint32_t status) {
if (call->status[source].is_set) return;
call->status[source].is_set = 1;
- call->status[source].code = status;
- call->error_status_set = status != GRPC_STATUS_OK;
-
- if (status != GRPC_STATUS_OK && !grpc_bbq_empty(&call->incoming_queue)) {
- grpc_bbq_flush(&call->incoming_queue);
- }
-}
+ call->status[source].code = (grpc_status_code)status;
-static void set_decode_compression_level(grpc_call *call,
- grpc_compression_level clevel) {
- call->compression_level = clevel;
+ /* TODO(ctiller): what to do about the flush that was previously here */
}
-static void set_status_details(grpc_call *call, status_source source,
- grpc_mdstr *status) {
- if (call->status[source].details != NULL) {
- grpc_mdstr_unref(call->status[source].details);
- }
- call->status[source].details = status;
+static void set_compression_algorithm(grpc_call *call,
+ grpc_compression_algorithm algo) {
+ call->compression_algorithm = algo;
}
-static int is_op_live(grpc_call *call, grpc_ioreq_op op) {
- gpr_uint8 set = call->request_set[op];
- reqinfo_master *master;
- if (set >= GRPC_IOREQ_OP_COUNT) return 0;
- master = &call->masters[set];
- return (master->complete_mask & (1u << op)) == 0;
+grpc_compression_algorithm grpc_call_test_only_get_compression_algorithm(
+ grpc_call *call) {
+ grpc_compression_algorithm algorithm;
+ gpr_mu_lock(&call->mu);
+ algorithm = call->compression_algorithm;
+ gpr_mu_unlock(&call->mu);
+ return algorithm;
}
-static void lock(grpc_call *call) { gpr_mu_lock(&call->mu); }
-
-static int need_more_data(grpc_call *call) {
- if (call->read_state == READ_STATE_STREAM_CLOSED) return 0;
- /* TODO(ctiller): this needs some serious cleanup */
- return is_op_live(call, GRPC_IOREQ_RECV_INITIAL_METADATA) ||
- (is_op_live(call, GRPC_IOREQ_RECV_MESSAGE) &&
- grpc_bbq_empty(&call->incoming_queue)) ||
- is_op_live(call, GRPC_IOREQ_RECV_TRAILING_METADATA) ||
- is_op_live(call, GRPC_IOREQ_RECV_STATUS) ||
- is_op_live(call, GRPC_IOREQ_RECV_STATUS_DETAILS) ||
- (is_op_live(call, GRPC_IOREQ_RECV_CLOSE) &&
- grpc_bbq_empty(&call->incoming_queue)) ||
- (call->write_state == WRITE_STATE_INITIAL && !call->is_client) ||
- (call->cancel_with_status != GRPC_STATUS_OK) ||
- call->destroy_called;
+uint32_t grpc_call_test_only_get_message_flags(grpc_call *call) {
+ uint32_t flags;
+ gpr_mu_lock(&call->mu);
+ flags = call->test_only_last_message_flags;
+ gpr_mu_unlock(&call->mu);
+ return flags;
}
-static void unlock(grpc_call *call) {
- grpc_transport_op op;
- completed_request completed_requests[GRPC_IOREQ_OP_COUNT];
- int completing_requests = 0;
- int start_op = 0;
- int i;
- int cancel_alarm = 0;
-
- memset(&op, 0, sizeof(op));
-
- op.cancel_with_status = call->cancel_with_status;
- start_op = op.cancel_with_status != GRPC_STATUS_OK;
- call->cancel_with_status = GRPC_STATUS_OK; /* reset */
+static void destroy_encodings_accepted_by_peer(void *p) { return; }
- cancel_alarm = call->cancel_alarm;
- call->cancel_alarm = 0;
-
- if (!call->receiving && need_more_data(call)) {
- op.recv_ops = &call->recv_ops;
- op.recv_state = &call->recv_state;
- op.on_done_recv = &call->on_done_recv;
- call->receiving = 1;
- GRPC_CALL_INTERNAL_REF(call, "receiving");
- start_op = 1;
+static void set_encodings_accepted_by_peer(grpc_call *call, grpc_mdelem *mdel) {
+ size_t i;
+ grpc_compression_algorithm algorithm;
+ gpr_slice_buffer accept_encoding_parts;
+ gpr_slice accept_encoding_slice;
+ void *accepted_user_data;
+
+ accepted_user_data =
+ grpc_mdelem_get_user_data(mdel, destroy_encodings_accepted_by_peer);
+ if (accepted_user_data != NULL) {
+ call->encodings_accepted_by_peer =
+ (uint32_t)(((uintptr_t)accepted_user_data) - 1);
+ return;
}
- if (!call->sending) {
- if (fill_send_ops(call, &op)) {
- call->sending = 1;
- GRPC_CALL_INTERNAL_REF(call, "sending");
- start_op = 1;
+ accept_encoding_slice = mdel->value->slice;
+ gpr_slice_buffer_init(&accept_encoding_parts);
+ gpr_slice_split(accept_encoding_slice, ",", &accept_encoding_parts);
+
+ /* No need to zero call->encodings_accepted_by_peer: grpc_call_create already
+ * zeroes the whole grpc_call */
+ /* Always support no compression */
+ GPR_BITSET(&call->encodings_accepted_by_peer, GRPC_COMPRESS_NONE);
+ for (i = 0; i < accept_encoding_parts.count; i++) {
+ const gpr_slice *accept_encoding_entry_slice =
+ &accept_encoding_parts.slices[i];
+ if (grpc_compression_algorithm_parse(
+ (const char *)GPR_SLICE_START_PTR(*accept_encoding_entry_slice),
+ GPR_SLICE_LENGTH(*accept_encoding_entry_slice), &algorithm)) {
+ GPR_BITSET(&call->encodings_accepted_by_peer, algorithm);
+ } else {
+ char *accept_encoding_entry_str =
+ gpr_dump_slice(*accept_encoding_entry_slice, GPR_DUMP_ASCII);
+ gpr_log(GPR_ERROR,
+ "Invalid entry in accept encoding metadata: '%s'. Ignoring.",
+ accept_encoding_entry_str);
+ gpr_free(accept_encoding_entry_str);
}
}
- if (!call->bound_pollset && call->cq && (!call->is_client || start_op)) {
- call->bound_pollset = 1;
- op.bind_pollset = grpc_cq_pollset(call->cq);
- start_op = 1;
- }
+ gpr_slice_buffer_destroy(&accept_encoding_parts);
- if (!call->completing && call->num_completed_requests != 0) {
- completing_requests = call->num_completed_requests;
- memcpy(completed_requests, call->completed_requests,
- sizeof(completed_requests));
- call->num_completed_requests = 0;
- call->completing = 1;
- GRPC_CALL_INTERNAL_REF(call, "completing");
- }
+ grpc_mdelem_set_user_data(
+ mdel, destroy_encodings_accepted_by_peer,
+ (void *)(((uintptr_t)call->encodings_accepted_by_peer) + 1));
+}
+uint32_t grpc_call_test_only_get_encodings_accepted_by_peer(grpc_call *call) {
+ uint32_t encodings_accepted_by_peer;
+ gpr_mu_lock(&call->mu);
+ encodings_accepted_by_peer = call->encodings_accepted_by_peer;
gpr_mu_unlock(&call->mu);
+ return encodings_accepted_by_peer;
+}
- if (cancel_alarm) {
- grpc_alarm_cancel(&call->alarm);
- }
-
- if (start_op) {
- execute_op(call, &op);
- }
-
- if (completing_requests > 0) {
- for (i = 0; i < completing_requests; i++) {
- completed_requests[i].on_complete(call, completed_requests[i].success,
- completed_requests[i].user_data);
- }
- lock(call);
- call->completing = 0;
- unlock(call);
- GRPC_CALL_INTERNAL_UNREF(call, "completing", 0);
+static void set_status_details(grpc_call *call, status_source source,
+ grpc_mdstr *status) {
+ if (call->status[source].details != NULL) {
+ GRPC_MDSTR_UNREF(call->status[source].details);
}
+ call->status[source].details = status;
}
-static void get_final_status(grpc_call *call, grpc_ioreq_data out) {
+static void get_final_status(grpc_call *call,
+ void (*set_value)(grpc_status_code code,
+ void *user_data),
+ void *set_value_user_data) {
int i;
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
if (call->status[i].is_set) {
- out.recv_status.set_value(call->status[i].code,
- out.recv_status.user_data);
+ set_value(call->status[i].code, set_value_user_data);
return;
}
}
if (call->is_client) {
- out.recv_status.set_value(GRPC_STATUS_UNKNOWN, out.recv_status.user_data);
+ set_value(GRPC_STATUS_UNKNOWN, set_value_user_data);
} else {
- out.recv_status.set_value(GRPC_STATUS_OK, out.recv_status.user_data);
+ set_value(GRPC_STATUS_OK, set_value_user_data);
}
}
-static void get_final_details(grpc_call *call, grpc_ioreq_data out) {
+static void get_final_details(grpc_call *call, char **out_details,
+ size_t *out_details_capacity) {
int i;
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
if (call->status[i].is_set) {
if (call->status[i].details) {
gpr_slice details = call->status[i].details->slice;
size_t len = GPR_SLICE_LENGTH(details);
- if (len + 1 > *out.recv_status_details.details_capacity) {
- *out.recv_status_details.details_capacity = GPR_MAX(
- len + 1, *out.recv_status_details.details_capacity * 3 / 2);
- *out.recv_status_details.details =
- gpr_realloc(*out.recv_status_details.details,
- *out.recv_status_details.details_capacity);
+ if (len + 1 > *out_details_capacity) {
+ *out_details_capacity =
+ GPR_MAX(len + 1, *out_details_capacity * 3 / 2);
+ *out_details = gpr_realloc(*out_details, *out_details_capacity);
}
- memcpy(*out.recv_status_details.details, GPR_SLICE_START_PTR(details),
- len);
- (*out.recv_status_details.details)[len] = 0;
+ memcpy(*out_details, GPR_SLICE_START_PTR(details), len);
+ (*out_details)[len] = 0;
} else {
goto no_details;
}
@@ -580,609 +533,253 @@ static void get_final_details(grpc_call *call, grpc_ioreq_data out) {
}
no_details:
- if (0 == *out.recv_status_details.details_capacity) {
- *out.recv_status_details.details_capacity = 8;
- *out.recv_status_details.details =
- gpr_malloc(*out.recv_status_details.details_capacity);
- }
- **out.recv_status_details.details = 0;
-}
-
-static void finish_live_ioreq_op(grpc_call *call, grpc_ioreq_op op,
- int success) {
- completed_request *cr;
- gpr_uint8 master_set = call->request_set[op];
- reqinfo_master *master;
- size_t i;
- /* ioreq is live: we need to do something */
- master = &call->masters[master_set];
- master->complete_mask |= 1u << op;
- if (!success) {
- master->success = 0;
- }
- if (master->complete_mask == master->need_mask) {
- for (i = 0; i < GRPC_IOREQ_OP_COUNT; i++) {
- if (call->request_set[i] != master_set) {
- continue;
- }
- call->request_set[i] = REQSET_DONE;
- switch ((grpc_ioreq_op)i) {
- case GRPC_IOREQ_RECV_MESSAGE:
- case GRPC_IOREQ_SEND_MESSAGE:
- call->request_set[i] = REQSET_EMPTY;
- if (!master->success) {
- call->write_state = WRITE_STATE_WRITE_CLOSED;
- }
- break;
- case GRPC_IOREQ_SEND_STATUS:
- if (call->request_data[GRPC_IOREQ_SEND_STATUS].send_status.details !=
- NULL) {
- grpc_mdstr_unref(
- call->request_data[GRPC_IOREQ_SEND_STATUS].send_status.details);
- call->request_data[GRPC_IOREQ_SEND_STATUS].send_status.details =
- NULL;
- }
- break;
- case GRPC_IOREQ_RECV_CLOSE:
- case GRPC_IOREQ_SEND_INITIAL_METADATA:
- case GRPC_IOREQ_SEND_TRAILING_METADATA:
- case GRPC_IOREQ_SEND_CLOSE:
- break;
- case GRPC_IOREQ_RECV_STATUS:
- get_final_status(call, call->request_data[GRPC_IOREQ_RECV_STATUS]);
- break;
- case GRPC_IOREQ_RECV_STATUS_DETAILS:
- get_final_details(call,
- call->request_data[GRPC_IOREQ_RECV_STATUS_DETAILS]);
- break;
- case GRPC_IOREQ_RECV_INITIAL_METADATA:
- GPR_SWAP(grpc_metadata_array, call->buffered_metadata[0],
- *call->request_data[GRPC_IOREQ_RECV_INITIAL_METADATA]
- .recv_metadata);
- break;
- case GRPC_IOREQ_RECV_TRAILING_METADATA:
- GPR_SWAP(grpc_metadata_array, call->buffered_metadata[1],
- *call->request_data[GRPC_IOREQ_RECV_TRAILING_METADATA]
- .recv_metadata);
- break;
- case GRPC_IOREQ_OP_COUNT:
- abort();
- break;
- }
- }
- cr = &call->completed_requests[call->num_completed_requests++];
- cr->success = master->success;
- cr->on_complete = master->on_complete;
- cr->user_data = master->user_data;
- }
-}
-
-static void finish_ioreq_op(grpc_call *call, grpc_ioreq_op op, int success) {
- if (is_op_live(call, op)) {
- finish_live_ioreq_op(call, op, success);
- }
-}
-
-static void early_out_write_ops(grpc_call *call) {
- switch (call->write_state) {
- case WRITE_STATE_WRITE_CLOSED:
- finish_ioreq_op(call, GRPC_IOREQ_SEND_MESSAGE, 0);
- finish_ioreq_op(call, GRPC_IOREQ_SEND_STATUS, 0);
- finish_ioreq_op(call, GRPC_IOREQ_SEND_TRAILING_METADATA, 0);
- finish_ioreq_op(call, GRPC_IOREQ_SEND_CLOSE, 1);
- /* fallthrough */
- case WRITE_STATE_STARTED:
- finish_ioreq_op(call, GRPC_IOREQ_SEND_INITIAL_METADATA, 0);
- /* fallthrough */
- case WRITE_STATE_INITIAL:
- /* do nothing */
- break;
- }
-}
-
-static void call_on_done_send(void *pc, int success) {
- grpc_call *call = pc;
- lock(call);
- if (call->last_send_contains & (1 << GRPC_IOREQ_SEND_INITIAL_METADATA)) {
- finish_ioreq_op(call, GRPC_IOREQ_SEND_INITIAL_METADATA, success);
- call->write_state = WRITE_STATE_STARTED;
- }
- if (call->last_send_contains & (1 << GRPC_IOREQ_SEND_MESSAGE)) {
- finish_ioreq_op(call, GRPC_IOREQ_SEND_MESSAGE, success);
- }
- if (call->last_send_contains & (1 << GRPC_IOREQ_SEND_CLOSE)) {
- finish_ioreq_op(call, GRPC_IOREQ_SEND_TRAILING_METADATA, success);
- finish_ioreq_op(call, GRPC_IOREQ_SEND_STATUS, success);
- finish_ioreq_op(call, GRPC_IOREQ_SEND_CLOSE, 1);
- call->write_state = WRITE_STATE_WRITE_CLOSED;
- }
- if (!success) {
- call->write_state = WRITE_STATE_WRITE_CLOSED;
- early_out_write_ops(call);
- }
- call->send_ops.nops = 0;
- call->last_send_contains = 0;
- call->sending = 0;
- unlock(call);
- GRPC_CALL_INTERNAL_UNREF(call, "sending", 0);
-}
-
-static void finish_message(grpc_call *call) {
- if (call->error_status_set == 0) {
- /* TODO(ctiller): this could be a lot faster if coded directly */
- grpc_byte_buffer *byte_buffer = grpc_raw_byte_buffer_create(
- call->incoming_message.slices, call->incoming_message.count);
- grpc_bbq_push(&call->incoming_queue, byte_buffer);
- }
- gpr_slice_buffer_reset_and_unref(&call->incoming_message);
- GPR_ASSERT(call->incoming_message.count == 0);
- call->reading_message = 0;
-}
-
-static int begin_message(grpc_call *call, grpc_begin_message msg) {
- /* can't begin a message when we're still reading a message */
- if (call->reading_message) {
- char *message = NULL;
- gpr_asprintf(
- &message, "Message terminated early; read %d bytes, expected %d",
- (int)call->incoming_message.length, (int)call->incoming_message_length);
- cancel_with_status(call, GRPC_STATUS_INVALID_ARGUMENT, message);
- gpr_free(message);
- return 0;
- }
- /* stash away parameters, and prepare for incoming slices */
- if (msg.length > grpc_channel_get_max_message_length(call->channel)) {
- char *message = NULL;
- gpr_asprintf(
- &message,
- "Maximum message length of %d exceeded by a message of length %d",
- grpc_channel_get_max_message_length(call->channel), msg.length);
- cancel_with_status(call, GRPC_STATUS_INVALID_ARGUMENT, message);
- gpr_free(message);
- return 0;
- } else if (msg.length > 0) {
- call->reading_message = 1;
- call->incoming_message_length = msg.length;
- call->incoming_message_flags = msg.flags;
- return 1;
- } else {
- finish_message(call);
- return 1;
+ if (0 == *out_details_capacity) {
+ *out_details_capacity = 8;
+ *out_details = gpr_malloc(*out_details_capacity);
}
+ **out_details = 0;
}
-static int add_slice_to_message(grpc_call *call, gpr_slice slice) {
- if (GPR_SLICE_LENGTH(slice) == 0) {
- gpr_slice_unref(slice);
- return 1;
- }
- /* we have to be reading a message to know what to do here */
- if (!call->reading_message) {
- cancel_with_status(call, GRPC_STATUS_INVALID_ARGUMENT,
- "Received payload data while not reading a message");
- return 0;
- }
- /* append the slice to the incoming buffer */
- gpr_slice_buffer_add(&call->incoming_message, slice);
- if (call->incoming_message.length > call->incoming_message_length) {
- /* if we got too many bytes, complain */
- char *message = NULL;
- gpr_asprintf(
- &message, "Receiving message overflow; read %d bytes, expected %d",
- (int)call->incoming_message.length, (int)call->incoming_message_length);
- cancel_with_status(call, GRPC_STATUS_INVALID_ARGUMENT, message);
- gpr_free(message);
- return 0;
- } else if (call->incoming_message.length == call->incoming_message_length) {
- finish_message(call);
- return 1;
- } else {
- return 1;
- }
+static grpc_linked_mdelem *linked_from_md(grpc_metadata *md) {
+ return (grpc_linked_mdelem *)&md->internal_data;
}
-static void call_on_done_recv(void *pc, int success) {
- grpc_call *call = pc;
- size_t i;
- GRPC_TIMER_BEGIN(GRPC_PTAG_CALL_ON_DONE_RECV, 0);
- lock(call);
- call->receiving = 0;
- if (success) {
- for (i = 0; success && i < call->recv_ops.nops; i++) {
- grpc_stream_op *op = &call->recv_ops.ops[i];
- switch (op->type) {
- case GRPC_NO_OP:
- break;
- case GRPC_OP_METADATA:
- recv_metadata(call, &op->data.metadata);
- break;
- case GRPC_OP_BEGIN_MESSAGE:
- success = begin_message(call, op->data.begin_message);
- break;
- case GRPC_OP_SLICE:
- success = add_slice_to_message(call, op->data.slice);
- break;
+static int prepare_application_metadata(grpc_call *call, int count,
+ grpc_metadata *metadata,
+ int is_trailing,
+ int prepend_extra_metadata) {
+ int i;
+ grpc_metadata_batch *batch =
+ &call->metadata_batch[0 /* is_receiving */][is_trailing];
+ if (prepend_extra_metadata) {
+ if (call->send_extra_metadata_count == 0) {
+ prepend_extra_metadata = 0;
+ } else {
+ for (i = 0; i < call->send_extra_metadata_count; i++) {
+ GRPC_MDELEM_REF(call->send_extra_metadata[i].md);
+ }
+ for (i = 1; i < call->send_extra_metadata_count; i++) {
+ call->send_extra_metadata[i].prev = &call->send_extra_metadata[i - 1];
+ }
+ for (i = 0; i < call->send_extra_metadata_count - 1; i++) {
+ call->send_extra_metadata[i].next = &call->send_extra_metadata[i + 1];
}
}
- if (!success) {
- grpc_stream_ops_unref_owned_objects(&call->recv_ops.ops[i],
- call->recv_ops.nops - i);
- }
- if (call->recv_state == GRPC_STREAM_RECV_CLOSED) {
- GPR_ASSERT(call->read_state <= READ_STATE_READ_CLOSED);
- call->read_state = READ_STATE_READ_CLOSED;
- }
- if (call->recv_state == GRPC_STREAM_CLOSED) {
- GPR_ASSERT(call->read_state <= READ_STATE_STREAM_CLOSED);
- call->read_state = READ_STATE_STREAM_CLOSED;
- call->cancel_alarm |= call->have_alarm;
- GRPC_CALL_INTERNAL_UNREF(call, "closed", 0);
- }
- finish_read_ops(call);
- } else {
- finish_ioreq_op(call, GRPC_IOREQ_RECV_MESSAGE, 0);
- finish_ioreq_op(call, GRPC_IOREQ_RECV_STATUS, 0);
- finish_ioreq_op(call, GRPC_IOREQ_RECV_CLOSE, 0);
- finish_ioreq_op(call, GRPC_IOREQ_RECV_TRAILING_METADATA, 0);
- finish_ioreq_op(call, GRPC_IOREQ_RECV_INITIAL_METADATA, 0);
- finish_ioreq_op(call, GRPC_IOREQ_RECV_STATUS_DETAILS, 0);
}
- call->recv_ops.nops = 0;
- unlock(call);
-
- GRPC_CALL_INTERNAL_UNREF(call, "receiving", 0);
- GRPC_TIMER_END(GRPC_PTAG_CALL_ON_DONE_RECV, 0);
-}
-
-static int prepare_application_metadata(grpc_call *call, size_t count,
- grpc_metadata *metadata) {
- size_t i;
for (i = 0; i < count; i++) {
grpc_metadata *md = &metadata[i];
- grpc_metadata *next_md = (i == count - 1) ? NULL : &metadata[i + 1];
- grpc_metadata *prev_md = (i == 0) ? NULL : &metadata[i - 1];
grpc_linked_mdelem *l = (grpc_linked_mdelem *)&md->internal_data;
GPR_ASSERT(sizeof(grpc_linked_mdelem) == sizeof(md->internal_data));
- l->md = grpc_mdelem_from_string_and_buffer(call->metadata_context, md->key,
- (const gpr_uint8 *)md->value,
- md->value_length);
- if (!grpc_mdstr_is_legal_header(l->md->key)) {
- gpr_log(GPR_ERROR, "attempt to send invalid metadata key");
+ l->md = grpc_mdelem_from_string_and_buffer(
+ md->key, (const uint8_t *)md->value, md->value_length);
+ if (!grpc_header_key_is_legal(grpc_mdstr_as_c_string(l->md->key),
+ GRPC_MDSTR_LENGTH(l->md->key))) {
+ gpr_log(GPR_ERROR, "attempt to send invalid metadata key: %s",
+ grpc_mdstr_as_c_string(l->md->key));
return 0;
- } else if (!grpc_mdstr_is_bin_suffixed(l->md->key) &&
- !grpc_mdstr_is_legal_header(l->md->value)) {
+ } else if (!grpc_is_binary_header(grpc_mdstr_as_c_string(l->md->key),
+ GRPC_MDSTR_LENGTH(l->md->key)) &&
+ !grpc_header_nonbin_value_is_legal(
+ grpc_mdstr_as_c_string(l->md->value),
+ GRPC_MDSTR_LENGTH(l->md->value))) {
gpr_log(GPR_ERROR, "attempt to send invalid metadata value");
return 0;
}
- l->next = next_md ? (grpc_linked_mdelem *)&next_md->internal_data : NULL;
- l->prev = prev_md ? (grpc_linked_mdelem *)&prev_md->internal_data : NULL;
}
- return 1;
-}
-
-static grpc_mdelem_list chain_metadata_from_app(grpc_call *call, size_t count,
- grpc_metadata *metadata) {
- grpc_mdelem_list out;
- if (count == 0) {
- out.head = out.tail = NULL;
- return out;
+ for (i = 1; i < count; i++) {
+ linked_from_md(&metadata[i])->prev = linked_from_md(&metadata[i - 1]);
}
- out.head = (grpc_linked_mdelem *)&(metadata[0].internal_data);
- out.tail = (grpc_linked_mdelem *)&(metadata[count - 1].internal_data);
- return out;
-}
-
-/* Copy the contents of a byte buffer into stream ops */
-static void copy_byte_buffer_to_stream_ops(grpc_byte_buffer *byte_buffer,
- grpc_stream_op_buffer *sopb) {
- size_t i;
-
- switch (byte_buffer->type) {
- case GRPC_BB_RAW:
- for (i = 0; i < byte_buffer->data.raw.slice_buffer.count; i++) {
- gpr_slice slice = byte_buffer->data.raw.slice_buffer.slices[i];
- gpr_slice_ref(slice);
- grpc_sopb_add_slice(sopb, slice);
- }
- break;
+ for (i = 0; i < count - 1; i++) {
+ linked_from_md(&metadata[i])->next = linked_from_md(&metadata[i + 1]);
}
-}
-
-static int fill_send_ops(grpc_call *call, grpc_transport_op *op) {
- grpc_ioreq_data data;
- gpr_uint32 flags;
- grpc_metadata_batch mdb;
- size_t i;
- GPR_ASSERT(op->send_ops == NULL);
-
- switch (call->write_state) {
- case WRITE_STATE_INITIAL:
- if (!is_op_live(call, GRPC_IOREQ_SEND_INITIAL_METADATA)) {
- break;
- }
- data = call->request_data[GRPC_IOREQ_SEND_INITIAL_METADATA];
- mdb.list = chain_metadata_from_app(call, data.send_metadata.count,
- data.send_metadata.metadata);
- mdb.garbage.head = mdb.garbage.tail = NULL;
- mdb.deadline = call->send_deadline;
- for (i = 0; i < call->send_initial_metadata_count; i++) {
- grpc_metadata_batch_link_head(&mdb, &call->send_initial_metadata[i]);
- }
- grpc_sopb_add_metadata(&call->send_ops, mdb);
- op->send_ops = &call->send_ops;
- call->last_send_contains |= 1 << GRPC_IOREQ_SEND_INITIAL_METADATA;
- call->send_initial_metadata_count = 0;
- /* fall through intended */
- case WRITE_STATE_STARTED:
- if (is_op_live(call, GRPC_IOREQ_SEND_MESSAGE)) {
- data = call->request_data[GRPC_IOREQ_SEND_MESSAGE];
- flags = call->request_flags[GRPC_IOREQ_SEND_MESSAGE];
- grpc_sopb_add_begin_message(
- &call->send_ops, grpc_byte_buffer_length(data.send_message), flags);
- copy_byte_buffer_to_stream_ops(data.send_message, &call->send_ops);
- op->send_ops = &call->send_ops;
- call->last_send_contains |= 1 << GRPC_IOREQ_SEND_MESSAGE;
- }
- if (is_op_live(call, GRPC_IOREQ_SEND_CLOSE)) {
- op->is_last_send = 1;
- op->send_ops = &call->send_ops;
- call->last_send_contains |= 1 << GRPC_IOREQ_SEND_CLOSE;
- if (!call->is_client) {
- /* send trailing metadata */
- data = call->request_data[GRPC_IOREQ_SEND_TRAILING_METADATA];
- mdb.list = chain_metadata_from_app(call, data.send_metadata.count,
- data.send_metadata.metadata);
- mdb.garbage.head = mdb.garbage.tail = NULL;
- mdb.deadline = gpr_inf_future;
- /* send status */
- /* TODO(ctiller): cache common status values */
- data = call->request_data[GRPC_IOREQ_SEND_STATUS];
- grpc_metadata_batch_add_tail(
- &mdb, &call->status_link,
- grpc_channel_get_reffed_status_elem(call->channel,
- data.send_status.code));
- if (data.send_status.details) {
- grpc_metadata_batch_add_tail(
- &mdb, &call->details_link,
- grpc_mdelem_from_metadata_strings(
- call->metadata_context,
- grpc_mdstr_ref(
- grpc_channel_get_message_string(call->channel)),
- data.send_status.details));
- call->request_data[GRPC_IOREQ_SEND_STATUS].send_status.details =
- NULL;
- }
- grpc_sopb_add_metadata(&call->send_ops, mdb);
- }
- }
+ switch (prepend_extra_metadata * 2 + (count != 0)) {
+ case 0:
+ /* no prepend, no metadata => nothing to do */
+ batch->list.head = batch->list.tail = NULL;
break;
- case WRITE_STATE_WRITE_CLOSED:
+ case 1:
+ /* metadata, but no prepend */
+ batch->list.head = linked_from_md(&metadata[0]);
+ batch->list.tail = linked_from_md(&metadata[count - 1]);
+ batch->list.head->prev = NULL;
+ batch->list.tail->next = NULL;
break;
- }
- if (op->send_ops) {
- op->on_done_send = &call->on_done_send;
- }
- return op->send_ops != NULL;
-}
-
-static grpc_call_error start_ioreq_error(grpc_call *call,
- gpr_uint32 mutated_ops,
- grpc_call_error ret) {
- size_t i;
- for (i = 0; i < GRPC_IOREQ_OP_COUNT; i++) {
- if (mutated_ops & (1u << i)) {
- call->request_set[i] = REQSET_EMPTY;
- }
- }
- return ret;
-}
-
-static void finish_read_ops(grpc_call *call) {
- int empty;
-
- if (is_op_live(call, GRPC_IOREQ_RECV_MESSAGE)) {
- empty =
- (NULL == (*call->request_data[GRPC_IOREQ_RECV_MESSAGE].recv_message =
- grpc_bbq_pop(&call->incoming_queue)));
- if (!empty) {
- finish_live_ioreq_op(call, GRPC_IOREQ_RECV_MESSAGE, 1);
- empty = grpc_bbq_empty(&call->incoming_queue);
- }
- } else {
- empty = grpc_bbq_empty(&call->incoming_queue);
- }
-
- switch (call->read_state) {
- case READ_STATE_STREAM_CLOSED:
- if (empty && !call->have_alarm) {
- finish_ioreq_op(call, GRPC_IOREQ_RECV_CLOSE, 1);
- }
- /* fallthrough */
- case READ_STATE_READ_CLOSED:
- if (empty) {
- finish_ioreq_op(call, GRPC_IOREQ_RECV_MESSAGE, 1);
- }
- finish_ioreq_op(call, GRPC_IOREQ_RECV_STATUS, 1);
- finish_ioreq_op(call, GRPC_IOREQ_RECV_STATUS_DETAILS, 1);
- finish_ioreq_op(call, GRPC_IOREQ_RECV_TRAILING_METADATA, 1);
- /* fallthrough */
- case READ_STATE_GOT_INITIAL_METADATA:
- finish_ioreq_op(call, GRPC_IOREQ_RECV_INITIAL_METADATA, 1);
- /* fallthrough */
- case READ_STATE_INITIAL:
- /* do nothing */
+ case 2:
+ /* prepend, but no md */
+ batch->list.head = &call->send_extra_metadata[0];
+ batch->list.tail =
+ &call->send_extra_metadata[call->send_extra_metadata_count - 1];
+ batch->list.head->prev = NULL;
+ batch->list.tail->next = NULL;
break;
- }
-}
-
-static grpc_call_error start_ioreq(grpc_call *call, const grpc_ioreq *reqs,
- size_t nreqs,
- grpc_ioreq_completion_func completion,
- void *user_data) {
- size_t i;
- gpr_uint32 have_ops = 0;
- grpc_ioreq_op op;
- reqinfo_master *master;
- grpc_ioreq_data data;
- gpr_uint8 set;
-
- if (nreqs == 0) {
- return GRPC_CALL_OK;
+ case 3:
+ /* prepend AND md */
+ batch->list.head = &call->send_extra_metadata[0];
+ call->send_extra_metadata[call->send_extra_metadata_count - 1].next =
+ linked_from_md(&metadata[0]);
+ linked_from_md(&metadata[0])->prev =
+ &call->send_extra_metadata[call->send_extra_metadata_count - 1];
+ batch->list.tail = linked_from_md(&metadata[count - 1]);
+ batch->list.head->prev = NULL;
+ batch->list.tail->next = NULL;
+ break;
+ default:
+ GPR_UNREACHABLE_CODE(return 0);
}
- set = reqs[0].op;
+ return 1;
+}
- for (i = 0; i < nreqs; i++) {
- op = reqs[i].op;
- if (call->request_set[op] < GRPC_IOREQ_OP_COUNT) {
- return start_ioreq_error(call, have_ops,
- GRPC_CALL_ERROR_TOO_MANY_OPERATIONS);
- } else if (call->request_set[op] == REQSET_DONE) {
- return start_ioreq_error(call, have_ops, GRPC_CALL_ERROR_ALREADY_INVOKED);
- }
- data = reqs[i].data;
- if (op == GRPC_IOREQ_SEND_INITIAL_METADATA ||
- op == GRPC_IOREQ_SEND_TRAILING_METADATA) {
- if (!prepare_application_metadata(call, data.send_metadata.count,
- data.send_metadata.metadata)) {
- return start_ioreq_error(call, have_ops,
- GRPC_CALL_ERROR_INVALID_METADATA);
- }
- }
- if (op == GRPC_IOREQ_SEND_STATUS) {
- set_status_code(call, STATUS_FROM_SERVER_STATUS,
- reqs[i].data.send_status.code);
- if (reqs[i].data.send_status.details) {
- set_status_details(call, STATUS_FROM_SERVER_STATUS,
- grpc_mdstr_ref(reqs[i].data.send_status.details));
+void grpc_call_destroy(grpc_call *c) {
+ int cancel;
+ grpc_call *parent = c->parent;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
+ GPR_TIMER_BEGIN("grpc_call_destroy", 0);
+ GRPC_API_TRACE("grpc_call_destroy(c=%p)", 1, (c));
+
+ if (parent) {
+ gpr_mu_lock(&parent->mu);
+ if (c == parent->first_child) {
+ parent->first_child = c->sibling_next;
+ if (c == parent->first_child) {
+ parent->first_child = NULL;
}
+ c->sibling_prev->sibling_next = c->sibling_next;
+ c->sibling_next->sibling_prev = c->sibling_prev;
}
- have_ops |= 1u << op;
-
- call->request_data[op] = data;
- call->request_flags[op] = reqs[i].flags;
- call->request_set[op] = set;
+ gpr_mu_unlock(&parent->mu);
+ GRPC_CALL_INTERNAL_UNREF(&exec_ctx, parent, "child");
}
- master = &call->masters[set];
- master->success = 1;
- master->need_mask = have_ops;
- master->complete_mask = 0;
- master->on_complete = completion;
- master->user_data = user_data;
-
- finish_read_ops(call);
- early_out_write_ops(call);
-
- return GRPC_CALL_OK;
-}
-
-grpc_call_error grpc_call_start_ioreq_and_call_back(
- grpc_call *call, const grpc_ioreq *reqs, size_t nreqs,
- grpc_ioreq_completion_func on_complete, void *user_data) {
- grpc_call_error err;
- lock(call);
- err = start_ioreq(call, reqs, nreqs, on_complete, user_data);
- unlock(call);
- return err;
-}
-
-void grpc_call_destroy(grpc_call *c) {
- int cancel;
- lock(c);
+ gpr_mu_lock(&c->mu);
GPR_ASSERT(!c->destroy_called);
c->destroy_called = 1;
- c->cancel_alarm |= c->have_alarm;
- cancel = c->read_state != READ_STATE_STREAM_CLOSED;
- unlock(c);
- if (cancel) grpc_call_cancel(c);
- GRPC_CALL_INTERNAL_UNREF(c, "destroy", 1);
+ if (c->have_alarm) {
+ grpc_timer_cancel(&exec_ctx, &c->alarm);
+ }
+ cancel = !c->received_final_op;
+ gpr_mu_unlock(&c->mu);
+ if (cancel) grpc_call_cancel(c, NULL);
+ GRPC_CALL_INTERNAL_UNREF(&exec_ctx, c, "destroy");
+ grpc_exec_ctx_finish(&exec_ctx);
+ GPR_TIMER_END("grpc_call_destroy", 0);
}
-grpc_call_error grpc_call_cancel(grpc_call *call) {
- return grpc_call_cancel_with_status(call, GRPC_STATUS_CANCELLED, "Cancelled");
+grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved) {
+ GRPC_API_TRACE("grpc_call_cancel(call=%p, reserved=%p)", 2, (call, reserved));
+ GPR_ASSERT(!reserved);
+ return grpc_call_cancel_with_status(call, GRPC_STATUS_CANCELLED, "Cancelled",
+ NULL);
}
grpc_call_error grpc_call_cancel_with_status(grpc_call *c,
grpc_status_code status,
- const char *description) {
+ const char *description,
+ void *reserved) {
grpc_call_error r;
- lock(c);
- r = cancel_with_status(c, status, description);
- unlock(c);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ GRPC_API_TRACE(
+ "grpc_call_cancel_with_status("
+ "c=%p, status=%d, description=%s, reserved=%p)",
+ 4, (c, (int)status, description, reserved));
+ GPR_ASSERT(reserved == NULL);
+ gpr_mu_lock(&c->mu);
+ r = cancel_with_status(&exec_ctx, c, status, description);
+ gpr_mu_unlock(&c->mu);
+ grpc_exec_ctx_finish(&exec_ctx);
return r;
}
-static grpc_call_error cancel_with_status(grpc_call *c, grpc_status_code status,
+typedef struct cancel_closure {
+ grpc_closure closure;
+ grpc_call *call;
+ grpc_status_code status;
+} cancel_closure;
+
+static void done_cancel(grpc_exec_ctx *exec_ctx, void *ccp, bool success) {
+ cancel_closure *cc = ccp;
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, cc->call, "cancel");
+ gpr_free(cc);
+}
+
+static void send_cancel(grpc_exec_ctx *exec_ctx, void *ccp, bool success) {
+ grpc_transport_stream_op op;
+ cancel_closure *cc = ccp;
+ memset(&op, 0, sizeof(op));
+ op.cancel_with_status = cc->status;
+ /* reuse closure to catch completion */
+ grpc_closure_init(&cc->closure, done_cancel, cc);
+ op.on_complete = &cc->closure;
+ execute_op(exec_ctx, cc->call, &op);
+}
+
+static grpc_call_error cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
+ grpc_status_code status,
const char *description) {
grpc_mdstr *details =
- description ? grpc_mdstr_from_string(c->metadata_context, description)
- : NULL;
+ description ? grpc_mdstr_from_string(description) : NULL;
+ cancel_closure *cc = gpr_malloc(sizeof(*cc));
GPR_ASSERT(status != GRPC_STATUS_OK);
- set_status_code(c, STATUS_FROM_API_OVERRIDE, status);
+ set_status_code(c, STATUS_FROM_API_OVERRIDE, (uint32_t)status);
set_status_details(c, STATUS_FROM_API_OVERRIDE, details);
- c->cancel_with_status = status;
+ grpc_closure_init(&cc->closure, send_cancel, cc);
+ cc->call = c;
+ cc->status = status;
+ GRPC_CALL_INTERNAL_REF(c, "cancel");
+ grpc_exec_ctx_enqueue(exec_ctx, &cc->closure, true, NULL);
return GRPC_CALL_OK;
}
-static void finished_loose_op(void *call, int success_ignored) {
- GRPC_CALL_INTERNAL_UNREF(call, "loose-op", 0);
-}
-
-typedef struct {
- grpc_call *call;
- grpc_iomgr_closure closure;
-} finished_loose_op_allocated_args;
-
-static void finished_loose_op_allocated(void *alloc, int success) {
- finished_loose_op_allocated_args *args = alloc;
- finished_loose_op(args->call, success);
- gpr_free(args);
-}
-
-static void execute_op(grpc_call *call, grpc_transport_op *op) {
+static void execute_op(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ grpc_transport_stream_op *op) {
grpc_call_element *elem;
- GPR_ASSERT(op->on_consumed == NULL);
- if (op->cancel_with_status != GRPC_STATUS_OK || op->bind_pollset) {
- GRPC_CALL_INTERNAL_REF(call, "loose-op");
- if (op->bind_pollset) {
- op->on_consumed = &call->on_done_bind;
- } else {
- finished_loose_op_allocated_args *args = gpr_malloc(sizeof(*args));
- args->call = call;
- grpc_iomgr_closure_init(&args->closure, finished_loose_op_allocated, args);
- op->on_consumed = &args->closure;
- }
- }
-
+ GPR_TIMER_BEGIN("execute_op", 0);
elem = CALL_ELEM_FROM_CALL(call, 0);
op->context = call->context;
- elem->filter->start_transport_op(elem, op);
+ elem->filter->start_transport_stream_op(exec_ctx, elem, op);
+ GPR_TIMER_END("execute_op", 0);
+}
+
+char *grpc_call_get_peer(grpc_call *call) {
+ grpc_call_element *elem = CALL_ELEM_FROM_CALL(call, 0);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ char *result;
+ GRPC_API_TRACE("grpc_call_get_peer(%p)", 1, (call));
+ result = elem->filter->get_peer(&exec_ctx, elem);
+ if (result == NULL) {
+ result = grpc_channel_get_target(call->channel);
+ }
+ if (result == NULL) {
+ result = gpr_strdup("unknown");
+ }
+ grpc_exec_ctx_finish(&exec_ctx);
+ return result;
}
grpc_call *grpc_call_from_top_element(grpc_call_element *elem) {
return CALL_FROM_TOP_ELEM(elem);
}
-static void call_alarm(void *arg, int success) {
+static void call_alarm(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
grpc_call *call = arg;
- lock(call);
+ gpr_mu_lock(&call->mu);
call->have_alarm = 0;
if (success) {
- cancel_with_status(call, GRPC_STATUS_DEADLINE_EXCEEDED,
+ cancel_with_status(exec_ctx, call, GRPC_STATUS_DEADLINE_EXCEEDED,
"Deadline Exceeded");
}
- finish_read_ops(call);
- unlock(call);
- GRPC_CALL_INTERNAL_UNREF(call, "alarm", 1);
+ gpr_mu_unlock(&call->mu);
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "alarm");
}
-static void set_deadline_alarm(grpc_call *call, gpr_timespec deadline) {
+static void set_deadline_alarm(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ gpr_timespec deadline) {
if (call->have_alarm) {
gpr_log(GPR_ERROR, "Attempt to set deadline alarm twice");
assert(0);
@@ -1190,7 +787,9 @@ static void set_deadline_alarm(grpc_call *call, gpr_timespec deadline) {
}
GRPC_CALL_INTERNAL_REF(call, "alarm");
call->have_alarm = 1;
- grpc_alarm_init(&call->alarm, deadline, call_alarm, call, gpr_now());
+ call->send_deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
+ grpc_timer_init(exec_ctx, &call->alarm, call->send_deadline, call_alarm, call,
+ gpr_now(GPR_CLOCK_MONOTONIC));
}
/* we offset status by a small amount when storing it into transport metadata
@@ -1199,11 +798,15 @@ static void set_deadline_alarm(grpc_call *call, gpr_timespec deadline) {
#define STATUS_OFFSET 1
static void destroy_status(void *ignored) {}
-static gpr_uint32 decode_status(grpc_mdelem *md) {
- gpr_uint32 status;
- void *user_data = grpc_mdelem_get_user_data(md, destroy_status);
- if (user_data) {
- status = ((gpr_uint32)(gpr_intptr)user_data) - STATUS_OFFSET;
+static uint32_t decode_status(grpc_mdelem *md) {
+ uint32_t status;
+ void *user_data;
+ if (md == GRPC_MDELEM_GRPC_STATUS_0) return 0;
+ if (md == GRPC_MDELEM_GRPC_STATUS_1) return 1;
+ if (md == GRPC_MDELEM_GRPC_STATUS_2) return 2;
+ user_data = grpc_mdelem_get_user_data(md, destroy_status);
+ if (user_data != NULL) {
+ status = ((uint32_t)(intptr_t)user_data) - STATUS_OFFSET;
} else {
if (!gpr_parse_bytes_to_uint32(grpc_mdstr_as_c_string(md->value),
GPR_SLICE_LENGTH(md->value->slice),
@@ -1211,93 +814,83 @@ static gpr_uint32 decode_status(grpc_mdelem *md) {
status = GRPC_STATUS_UNKNOWN; /* could not parse status code */
}
grpc_mdelem_set_user_data(md, destroy_status,
- (void *)(gpr_intptr)(status + STATUS_OFFSET));
+ (void *)(intptr_t)(status + STATUS_OFFSET));
}
return status;
}
-/* just as for status above, we need to offset: metadata userdata can't hold a
- * zero (null), which in this case is used to signal no compression */
-#define COMPRESS_OFFSET 1
-static void destroy_compression(void *ignored) {}
-
-static gpr_uint32 decode_compression(grpc_mdelem *md) {
- grpc_compression_level clevel;
- void *user_data = grpc_mdelem_get_user_data(md, destroy_status);
- if (user_data) {
- clevel = ((grpc_compression_level)(gpr_intptr)user_data) - COMPRESS_OFFSET;
- } else {
- gpr_uint32 parsed_clevel_bytes;
- if (gpr_parse_bytes_to_uint32(grpc_mdstr_as_c_string(md->value),
- GPR_SLICE_LENGTH(md->value->slice),
- &parsed_clevel_bytes)) {
- /* the following cast is safe, as a gpr_uint32 should be able to hold all
- * possible values of the grpc_compression_level enum */
- clevel = (grpc_compression_level) parsed_clevel_bytes;
- } else {
- clevel = GRPC_COMPRESS_LEVEL_NONE; /* could not parse, no compression */
- }
- grpc_mdelem_set_user_data(md, destroy_compression,
- (void *)(gpr_intptr)(clevel + COMPRESS_OFFSET));
+static uint32_t decode_compression(grpc_mdelem *md) {
+ grpc_compression_algorithm algorithm =
+ grpc_compression_algorithm_from_mdstr(md->value);
+ if (algorithm == GRPC_COMPRESS_ALGORITHMS_COUNT) {
+ const char *md_c_str = grpc_mdstr_as_c_string(md->value);
+ gpr_log(GPR_ERROR, "Invalid compression algorithm: '%s'", md_c_str);
}
- return clevel;
+ return algorithm;
+}
+
+static grpc_mdelem *recv_common_filter(grpc_call *call, grpc_mdelem *elem) {
+ if (elem->key == GRPC_MDSTR_GRPC_STATUS) {
+ GPR_TIMER_BEGIN("status", 0);
+ set_status_code(call, STATUS_FROM_WIRE, decode_status(elem));
+ GPR_TIMER_END("status", 0);
+ return NULL;
+ } else if (elem->key == GRPC_MDSTR_GRPC_MESSAGE) {
+ GPR_TIMER_BEGIN("status-details", 0);
+ set_status_details(call, STATUS_FROM_WIRE, GRPC_MDSTR_REF(elem->value));
+ GPR_TIMER_END("status-details", 0);
+ return NULL;
+ }
+ return elem;
}
-static void recv_metadata(grpc_call *call, grpc_metadata_batch *md) {
- grpc_linked_mdelem *l;
+static grpc_mdelem *publish_app_metadata(grpc_call *call, grpc_mdelem *elem,
+ int is_trailing) {
grpc_metadata_array *dest;
grpc_metadata *mdusr;
- int is_trailing;
- grpc_mdctx *mdctx = call->metadata_context;
-
- is_trailing = call->read_state >= READ_STATE_GOT_INITIAL_METADATA;
- for (l = md->list.head; l != NULL; l = l->next) {
- grpc_mdelem *md = l->md;
- grpc_mdstr *key = md->key;
- if (key == grpc_channel_get_status_string(call->channel)) {
- set_status_code(call, STATUS_FROM_WIRE, decode_status(md));
- } else if (key == grpc_channel_get_message_string(call->channel)) {
- set_status_details(call, STATUS_FROM_WIRE, grpc_mdstr_ref(md->value));
- } else if (key == grpc_channel_get_compresssion_level_string(call->channel)) {
- set_decode_compression_level(call, decode_compression(md));
- } else {
- dest = &call->buffered_metadata[is_trailing];
- if (dest->count == dest->capacity) {
- dest->capacity = GPR_MAX(dest->capacity + 8, dest->capacity * 2);
- dest->metadata =
- gpr_realloc(dest->metadata, sizeof(grpc_metadata) * dest->capacity);
- }
- mdusr = &dest->metadata[dest->count++];
- mdusr->key = grpc_mdstr_as_c_string(md->key);
- mdusr->value = grpc_mdstr_as_c_string(md->value);
- mdusr->value_length = GPR_SLICE_LENGTH(md->value->slice);
- if (call->owned_metadata_count == call->owned_metadata_capacity) {
- call->owned_metadata_capacity =
- GPR_MAX(call->owned_metadata_capacity + 8,
- call->owned_metadata_capacity * 2);
- call->owned_metadata =
- gpr_realloc(call->owned_metadata,
- sizeof(grpc_mdelem *) * call->owned_metadata_capacity);
- }
- call->owned_metadata[call->owned_metadata_count++] = md;
- l->md = 0;
- }
- }
- if (gpr_time_cmp(md->deadline, gpr_inf_future) != 0) {
- set_deadline_alarm(call, md->deadline);
- }
- if (!is_trailing) {
- call->read_state = READ_STATE_GOT_INITIAL_METADATA;
- }
+ GPR_TIMER_BEGIN("publish_app_metadata", 0);
+ dest = call->buffered_metadata[is_trailing];
+ if (dest->count == dest->capacity) {
+ dest->capacity = GPR_MAX(dest->capacity + 8, dest->capacity * 2);
+ dest->metadata =
+ gpr_realloc(dest->metadata, sizeof(grpc_metadata) * dest->capacity);
+ }
+ mdusr = &dest->metadata[dest->count++];
+ mdusr->key = grpc_mdstr_as_c_string(elem->key);
+ mdusr->value = grpc_mdstr_as_c_string(elem->value);
+ mdusr->value_length = GPR_SLICE_LENGTH(elem->value->slice);
+ GPR_TIMER_END("publish_app_metadata", 0);
+ return elem;
+}
- grpc_mdctx_lock(mdctx);
- for (l = md->list.head; l; l = l->next) {
- if (l->md) grpc_mdctx_locked_mdelem_unref(mdctx, l->md);
+static grpc_mdelem *recv_initial_filter(void *callp, grpc_mdelem *elem) {
+ grpc_call *call = callp;
+ elem = recv_common_filter(call, elem);
+ if (elem == NULL) {
+ return NULL;
+ } else if (elem->key == GRPC_MDSTR_GRPC_ENCODING) {
+ GPR_TIMER_BEGIN("compression_algorithm", 0);
+ set_compression_algorithm(call, decode_compression(elem));
+ GPR_TIMER_END("compression_algorithm", 0);
+ return NULL;
+ } else if (elem->key == GRPC_MDSTR_GRPC_ACCEPT_ENCODING) {
+ GPR_TIMER_BEGIN("encodings_accepted_by_peer", 0);
+ set_encodings_accepted_by_peer(call, elem);
+ GPR_TIMER_END("encodings_accepted_by_peer", 0);
+ return NULL;
+ } else {
+ return publish_app_metadata(call, elem, 0);
}
- for (l = md->garbage.head; l; l = l->next) {
- grpc_mdctx_locked_mdelem_unref(mdctx, l->md);
+}
+
+static grpc_mdelem *recv_trailing_filter(void *callp, grpc_mdelem *elem) {
+ grpc_call *call = callp;
+ elem = recv_common_filter(call, elem);
+ if (elem == NULL) {
+ return NULL;
+ } else {
+ return publish_app_metadata(call, elem, 1);
}
- grpc_mdctx_unlock(mdctx);
}
grpc_call_stack *grpc_call_get_call_stack(grpc_call *call) {
@@ -1313,163 +906,565 @@ static void set_status_value_directly(grpc_status_code status, void *dest) {
}
static void set_cancelled_value(grpc_status_code status, void *dest) {
- *(grpc_status_code *)dest = (status != GRPC_STATUS_OK);
+ *(int *)dest = (status != GRPC_STATUS_OK);
}
-static void finish_batch(grpc_call *call, int success, void *tag) {
- grpc_cq_end_op(call->cq, tag, call, success);
+static int are_write_flags_valid(uint32_t flags) {
+ /* check that only bits in GRPC_WRITE_(INTERNAL?)_USED_MASK are set */
+ const uint32_t allowed_write_positions =
+ (GRPC_WRITE_USED_MASK | GRPC_WRITE_INTERNAL_USED_MASK);
+ const uint32_t invalid_positions = ~allowed_write_positions;
+ return !(flags & invalid_positions);
}
-static void finish_batch_with_close(grpc_call *call, int success, void *tag) {
- grpc_cq_end_op(call->cq, tag, call, 1);
+static batch_control *allocate_batch_control(grpc_call *call) {
+ size_t i;
+ for (i = 0; i < MAX_CONCURRENT_BATCHES; i++) {
+ if ((call->used_batches & (1 << i)) == 0) {
+ call->used_batches = (uint8_t)(call->used_batches | (uint8_t)(1 << i));
+ return &call->active_batches[i];
+ }
+ }
+ return NULL;
}
-static int are_write_flags_valid(gpr_uint32 flags) {
- /* check that only bits in GRPC_WRITE_(INTERNAL?)_USED_MASK are set */
- const gpr_uint32 allowed_write_positions =
- (GRPC_WRITE_USED_MASK | GRPC_WRITE_INTERNAL_USED_MASK);
- const gpr_uint32 invalid_positions = ~allowed_write_positions;
- return !(flags & invalid_positions);
+static void finish_batch_completion(grpc_exec_ctx *exec_ctx, void *user_data,
+ grpc_cq_completion *storage) {
+ batch_control *bctl = user_data;
+ grpc_call *call = bctl->call;
+ gpr_mu_lock(&call->mu);
+ call->used_batches = (uint8_t)(
+ call->used_batches & ~(uint8_t)(1 << (bctl - call->active_batches)));
+ gpr_mu_unlock(&call->mu);
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion");
}
-grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
- size_t nops, void *tag) {
- grpc_ioreq reqs[GRPC_IOREQ_OP_COUNT];
- size_t in;
- size_t out;
- const grpc_op *op;
- grpc_ioreq *req;
- void (*finish_func)(grpc_call *, int, void *) = finish_batch;
+static void post_batch_completion(grpc_exec_ctx *exec_ctx,
+ batch_control *bctl) {
+ grpc_call *call = bctl->call;
+ if (bctl->is_notify_tag_closure) {
+ grpc_exec_ctx_enqueue(exec_ctx, bctl->notify_tag, bctl->success, NULL);
+ gpr_mu_lock(&call->mu);
+ bctl->call->used_batches =
+ (uint8_t)(bctl->call->used_batches &
+ ~(uint8_t)(1 << (bctl - bctl->call->active_batches)));
+ gpr_mu_unlock(&call->mu);
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion");
+ } else {
+ grpc_cq_end_op(exec_ctx, bctl->call->cq, bctl->notify_tag, bctl->success,
+ finish_batch_completion, bctl, &bctl->cq_completion);
+ }
+}
+
+static void continue_receiving_slices(grpc_exec_ctx *exec_ctx,
+ batch_control *bctl) {
+ grpc_call *call = bctl->call;
+ for (;;) {
+ size_t remaining = call->receiving_stream->length -
+ (*call->receiving_buffer)->data.raw.slice_buffer.length;
+ if (remaining == 0) {
+ call->receiving_message = 0;
+ grpc_byte_stream_destroy(exec_ctx, call->receiving_stream);
+ call->receiving_stream = NULL;
+ if (gpr_unref(&bctl->steps_to_complete)) {
+ post_batch_completion(exec_ctx, bctl);
+ }
+ return;
+ }
+ if (grpc_byte_stream_next(exec_ctx, call->receiving_stream,
+ &call->receiving_slice, remaining,
+ &call->receiving_slice_ready)) {
+ gpr_slice_buffer_add(&(*call->receiving_buffer)->data.raw.slice_buffer,
+ call->receiving_slice);
+ } else {
+ return;
+ }
+ }
+}
- GRPC_CALL_LOG_BATCH(GPR_INFO, call, ops, nops, tag);
+static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
+ bool success) {
+ batch_control *bctl = bctlp;
+ grpc_call *call = bctl->call;
- if (nops == 0) {
- grpc_cq_begin_op(call->cq, call);
- grpc_cq_end_op(call->cq, tag, call, 1);
- return GRPC_CALL_OK;
+ if (success) {
+ gpr_slice_buffer_add(&(*call->receiving_buffer)->data.raw.slice_buffer,
+ call->receiving_slice);
+ continue_receiving_slices(exec_ctx, bctl);
+ } else {
+ grpc_byte_stream_destroy(exec_ctx, call->receiving_stream);
+ call->receiving_stream = NULL;
+ grpc_byte_buffer_destroy(*call->receiving_buffer);
+ *call->receiving_buffer = NULL;
+ if (gpr_unref(&bctl->steps_to_complete)) {
+ post_batch_completion(exec_ctx, bctl);
+ }
+ }
+}
+
+static void process_data_after_md(grpc_exec_ctx *exec_ctx, batch_control *bctl,
+ bool success) {
+ grpc_call *call = bctl->call;
+ if (call->receiving_stream == NULL) {
+ *call->receiving_buffer = NULL;
+ call->receiving_message = 0;
+ if (gpr_unref(&bctl->steps_to_complete)) {
+ post_batch_completion(exec_ctx, bctl);
+ }
+ } else if (call->receiving_stream->length >
+ grpc_channel_get_max_message_length(call->channel)) {
+ cancel_with_status(exec_ctx, call, GRPC_STATUS_INTERNAL,
+ "Max message size exceeded");
+ grpc_byte_stream_destroy(exec_ctx, call->receiving_stream);
+ call->receiving_stream = NULL;
+ *call->receiving_buffer = NULL;
+ call->receiving_message = 0;
+ if (gpr_unref(&bctl->steps_to_complete)) {
+ post_batch_completion(exec_ctx, bctl);
+ }
+ } else {
+ call->test_only_last_message_flags = call->receiving_stream->flags;
+ if ((call->receiving_stream->flags & GRPC_WRITE_INTERNAL_COMPRESS) &&
+ (call->compression_algorithm > GRPC_COMPRESS_NONE)) {
+ *call->receiving_buffer = grpc_raw_compressed_byte_buffer_create(
+ NULL, 0, call->compression_algorithm);
+ } else {
+ *call->receiving_buffer = grpc_raw_byte_buffer_create(NULL, 0);
+ }
+ grpc_closure_init(&call->receiving_slice_ready, receiving_slice_ready,
+ bctl);
+ continue_receiving_slices(exec_ctx, bctl);
+ /* early out */
+ return;
}
+}
- /* rewrite batch ops into ioreq ops */
- for (in = 0, out = 0; in < nops; in++) {
- op = &ops[in];
+static void receiving_stream_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
+ bool success) {
+ batch_control *bctl = bctlp;
+ grpc_call *call = bctl->call;
+
+ gpr_mu_lock(&bctl->call->mu);
+ if (bctl->call->has_initial_md_been_received) {
+ gpr_mu_unlock(&bctl->call->mu);
+ process_data_after_md(exec_ctx, bctlp, success);
+ } else {
+ call->saved_receiving_stream_ready_ctx.bctlp = bctlp;
+ call->saved_receiving_stream_ready_ctx.success = success;
+ gpr_mu_unlock(&bctl->call->mu);
+ }
+}
+
+static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
+ void *bctlp, bool success) {
+ batch_control *bctl = bctlp;
+ grpc_call *call = bctl->call;
+
+ gpr_mu_lock(&call->mu);
+
+ grpc_metadata_batch *md =
+ &call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */];
+ grpc_metadata_batch_filter(md, recv_initial_filter, call);
+ call->has_initial_md_been_received = true;
+
+ if (gpr_time_cmp(md->deadline, gpr_inf_future(md->deadline.clock_type)) !=
+ 0 &&
+ !call->is_client) {
+ GPR_TIMER_BEGIN("set_deadline_alarm", 0);
+ set_deadline_alarm(exec_ctx, call, md->deadline);
+ GPR_TIMER_END("set_deadline_alarm", 0);
+ }
+
+ if (call->saved_receiving_stream_ready_ctx.bctlp != NULL) {
+ grpc_closure *saved_rsr_closure = grpc_closure_create(
+ receiving_stream_ready, call->saved_receiving_stream_ready_ctx.bctlp);
+ grpc_exec_ctx_enqueue(exec_ctx, saved_rsr_closure,
+ call->saved_receiving_stream_ready_ctx.success, NULL);
+ call->saved_receiving_stream_ready_ctx.bctlp = NULL;
+ }
+
+ gpr_mu_unlock(&call->mu);
+
+ if (gpr_unref(&bctl->steps_to_complete)) {
+ post_batch_completion(exec_ctx, bctl);
+ }
+}
+
+static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp, bool success) {
+ batch_control *bctl = bctlp;
+ grpc_call *call = bctl->call;
+ grpc_call *child_call;
+ grpc_call *next_child_call;
+
+ gpr_mu_lock(&call->mu);
+ if (bctl->send_initial_metadata) {
+ grpc_metadata_batch_destroy(
+ &call->metadata_batch[0 /* is_receiving */][0 /* is_trailing */]);
+ }
+ if (bctl->send_message) {
+ call->sending_message = 0;
+ }
+ if (bctl->send_final_op) {
+ grpc_metadata_batch_destroy(
+ &call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */]);
+ }
+ if (bctl->recv_final_op) {
+ grpc_metadata_batch *md =
+ &call->metadata_batch[1 /* is_receiving */][1 /* is_trailing */];
+ grpc_metadata_batch_filter(md, recv_trailing_filter, call);
+
+ if (call->have_alarm) {
+ grpc_timer_cancel(exec_ctx, &call->alarm);
+ }
+ /* propagate cancellation to any interested children */
+ child_call = call->first_child;
+ if (child_call != NULL) {
+ do {
+ next_child_call = child_call->sibling_next;
+ if (child_call->cancellation_is_inherited) {
+ GRPC_CALL_INTERNAL_REF(child_call, "propagate_cancel");
+ grpc_call_cancel(child_call, NULL);
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, child_call, "propagate_cancel");
+ }
+ child_call = next_child_call;
+ } while (child_call != call->first_child);
+ }
+
+ if (call->is_client) {
+ get_final_status(call, set_status_value_directly,
+ call->final_op.client.status);
+ get_final_details(call, call->final_op.client.status_details,
+ call->final_op.client.status_details_capacity);
+ } else {
+ get_final_status(call, set_cancelled_value,
+ call->final_op.server.cancelled);
+ }
+
+ success = 1;
+ }
+ bctl->success = success != 0;
+ gpr_mu_unlock(&call->mu);
+ if (gpr_unref(&bctl->steps_to_complete)) {
+ post_batch_completion(exec_ctx, bctl);
+ }
+}
+
+static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
+ grpc_call *call, const grpc_op *ops,
+ size_t nops, void *notify_tag,
+ int is_notify_tag_closure) {
+ grpc_transport_stream_op stream_op;
+ size_t i;
+ const grpc_op *op;
+ batch_control *bctl;
+ int num_completion_callbacks_needed = 1;
+ grpc_call_error error = GRPC_CALL_OK;
+
+ GPR_TIMER_BEGIN("grpc_call_start_batch", 0);
+
+ GRPC_CALL_LOG_BATCH(GPR_INFO, call, ops, nops, notify_tag);
+
+ memset(&stream_op, 0, sizeof(stream_op));
+
+ /* TODO(ctiller): this feels like it could be made lock-free */
+ gpr_mu_lock(&call->mu);
+ bctl = allocate_batch_control(call);
+ memset(bctl, 0, sizeof(*bctl));
+ bctl->call = call;
+ bctl->notify_tag = notify_tag;
+ bctl->is_notify_tag_closure = (uint8_t)(is_notify_tag_closure != 0);
+
+ if (nops == 0) {
+ GRPC_CALL_INTERNAL_REF(call, "completion");
+ bctl->success = 1;
+ if (!is_notify_tag_closure) {
+ grpc_cq_begin_op(call->cq, notify_tag);
+ }
+ gpr_mu_unlock(&call->mu);
+ post_batch_completion(exec_ctx, bctl);
+ error = GRPC_CALL_OK;
+ goto done;
+ }
+
+ /* rewrite batch ops into a transport op */
+ for (i = 0; i < nops; i++) {
+ op = &ops[i];
+ if (op->reserved != NULL) {
+ error = GRPC_CALL_ERROR;
+ goto done_with_error;
+ }
switch (op->op) {
case GRPC_OP_SEND_INITIAL_METADATA:
/* Flag validation: currently allow no flags */
- if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS;
- req = &reqs[out++];
- req->op = GRPC_IOREQ_SEND_INITIAL_METADATA;
- req->data.send_metadata.count = op->data.send_initial_metadata.count;
- req->data.send_metadata.metadata =
- op->data.send_initial_metadata.metadata;
- req->flags = op->flags;
+ if (op->flags != 0) {
+ error = GRPC_CALL_ERROR_INVALID_FLAGS;
+ goto done_with_error;
+ }
+ if (call->sent_initial_metadata) {
+ error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
+ goto done_with_error;
+ }
+ if (op->data.send_initial_metadata.count > INT_MAX) {
+ error = GRPC_CALL_ERROR_INVALID_METADATA;
+ goto done_with_error;
+ }
+ bctl->send_initial_metadata = 1;
+ call->sent_initial_metadata = 1;
+ if (!prepare_application_metadata(
+ call, (int)op->data.send_initial_metadata.count,
+ op->data.send_initial_metadata.metadata, 0, call->is_client)) {
+ error = GRPC_CALL_ERROR_INVALID_METADATA;
+ goto done_with_error;
+ }
+ /* TODO(ctiller): just make these the same variable? */
+ call->metadata_batch[0][0].deadline = call->send_deadline;
+ stream_op.send_initial_metadata =
+ &call->metadata_batch[0 /* is_receiving */][0 /* is_trailing */];
break;
case GRPC_OP_SEND_MESSAGE:
if (!are_write_flags_valid(op->flags)) {
- return GRPC_CALL_ERROR_INVALID_FLAGS;
+ error = GRPC_CALL_ERROR_INVALID_FLAGS;
+ goto done_with_error;
}
- req = &reqs[out++];
- req->op = GRPC_IOREQ_SEND_MESSAGE;
- req->data.send_message = op->data.send_message;
- req->flags = ops->flags;
+ if (op->data.send_message == NULL) {
+ error = GRPC_CALL_ERROR_INVALID_MESSAGE;
+ goto done_with_error;
+ }
+ if (call->sending_message) {
+ error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
+ goto done_with_error;
+ }
+ bctl->send_message = 1;
+ call->sending_message = 1;
+ grpc_slice_buffer_stream_init(
+ &call->sending_stream,
+ &op->data.send_message->data.raw.slice_buffer, op->flags);
+ stream_op.send_message = &call->sending_stream.base;
break;
case GRPC_OP_SEND_CLOSE_FROM_CLIENT:
/* Flag validation: currently allow no flags */
- if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS;
+ if (op->flags != 0) {
+ error = GRPC_CALL_ERROR_INVALID_FLAGS;
+ goto done_with_error;
+ }
if (!call->is_client) {
- return GRPC_CALL_ERROR_NOT_ON_SERVER;
+ error = GRPC_CALL_ERROR_NOT_ON_SERVER;
+ goto done_with_error;
}
- req = &reqs[out++];
- req->op = GRPC_IOREQ_SEND_CLOSE;
- req->flags = op->flags;
+ if (call->sent_final_op) {
+ error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
+ goto done_with_error;
+ }
+ bctl->send_final_op = 1;
+ call->sent_final_op = 1;
+ stream_op.send_trailing_metadata =
+ &call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */];
break;
case GRPC_OP_SEND_STATUS_FROM_SERVER:
/* Flag validation: currently allow no flags */
- if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS;
+ if (op->flags != 0) {
+ error = GRPC_CALL_ERROR_INVALID_FLAGS;
+ goto done_with_error;
+ }
if (call->is_client) {
- return GRPC_CALL_ERROR_NOT_ON_CLIENT;
+ error = GRPC_CALL_ERROR_NOT_ON_CLIENT;
+ goto done_with_error;
+ }
+ if (call->sent_final_op) {
+ error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
+ goto done_with_error;
}
- req = &reqs[out++];
- req->op = GRPC_IOREQ_SEND_TRAILING_METADATA;
- req->flags = op->flags;
- req->data.send_metadata.count =
- op->data.send_status_from_server.trailing_metadata_count;
- req->data.send_metadata.metadata =
- op->data.send_status_from_server.trailing_metadata;
- req = &reqs[out++];
- req->op = GRPC_IOREQ_SEND_STATUS;
- req->data.send_status.code = op->data.send_status_from_server.status;
- req->data.send_status.details =
- op->data.send_status_from_server.status_details != NULL
- ? grpc_mdstr_from_string(
- call->metadata_context,
- op->data.send_status_from_server.status_details)
- : NULL;
- req = &reqs[out++];
- req->op = GRPC_IOREQ_SEND_CLOSE;
+ if (op->data.send_status_from_server.trailing_metadata_count >
+ INT_MAX) {
+ error = GRPC_CALL_ERROR_INVALID_METADATA;
+ goto done_with_error;
+ }
+ bctl->send_final_op = 1;
+ call->sent_final_op = 1;
+ call->send_extra_metadata_count = 1;
+ call->send_extra_metadata[0].md = grpc_channel_get_reffed_status_elem(
+ call->channel, op->data.send_status_from_server.status);
+ if (op->data.send_status_from_server.status_details != NULL) {
+ call->send_extra_metadata[1].md = grpc_mdelem_from_metadata_strings(
+ GRPC_MDSTR_GRPC_MESSAGE,
+ grpc_mdstr_from_string(
+ op->data.send_status_from_server.status_details));
+ call->send_extra_metadata_count++;
+ set_status_details(
+ call, STATUS_FROM_API_OVERRIDE,
+ GRPC_MDSTR_REF(call->send_extra_metadata[1].md->value));
+ }
+ set_status_code(call, STATUS_FROM_API_OVERRIDE,
+ (uint32_t)op->data.send_status_from_server.status);
+ if (!prepare_application_metadata(
+ call,
+ (int)op->data.send_status_from_server.trailing_metadata_count,
+ op->data.send_status_from_server.trailing_metadata, 1, 1)) {
+ error = GRPC_CALL_ERROR_INVALID_METADATA;
+ goto done_with_error;
+ }
+ stream_op.send_trailing_metadata =
+ &call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */];
break;
case GRPC_OP_RECV_INITIAL_METADATA:
/* Flag validation: currently allow no flags */
- if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS;
- if (!call->is_client) {
- return GRPC_CALL_ERROR_NOT_ON_SERVER;
+ if (op->flags != 0) {
+ error = GRPC_CALL_ERROR_INVALID_FLAGS;
+ goto done_with_error;
+ }
+ if (call->received_initial_metadata) {
+ error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
+ goto done_with_error;
}
- req = &reqs[out++];
- req->op = GRPC_IOREQ_RECV_INITIAL_METADATA;
- req->data.recv_metadata = op->data.recv_initial_metadata;
- req->flags = op->flags;
+ call->received_initial_metadata = 1;
+ call->buffered_metadata[0] = op->data.recv_initial_metadata;
+ grpc_closure_init(&call->receiving_initial_metadata_ready,
+ receiving_initial_metadata_ready, bctl);
+ bctl->recv_initial_metadata = 1;
+ stream_op.recv_initial_metadata =
+ &call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */];
+ stream_op.recv_initial_metadata_ready =
+ &call->receiving_initial_metadata_ready;
+ num_completion_callbacks_needed++;
break;
case GRPC_OP_RECV_MESSAGE:
/* Flag validation: currently allow no flags */
- if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS;
- req = &reqs[out++];
- req->op = GRPC_IOREQ_RECV_MESSAGE;
- req->data.recv_message = op->data.recv_message;
- req->flags = op->flags;
+ if (op->flags != 0) {
+ error = GRPC_CALL_ERROR_INVALID_FLAGS;
+ goto done_with_error;
+ }
+ if (call->receiving_message) {
+ error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
+ goto done_with_error;
+ }
+ call->receiving_message = 1;
+ bctl->recv_message = 1;
+ call->receiving_buffer = op->data.recv_message;
+ stream_op.recv_message = &call->receiving_stream;
+ grpc_closure_init(&call->receiving_stream_ready, receiving_stream_ready,
+ bctl);
+ stream_op.recv_message_ready = &call->receiving_stream_ready;
+ num_completion_callbacks_needed++;
break;
case GRPC_OP_RECV_STATUS_ON_CLIENT:
/* Flag validation: currently allow no flags */
- if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS;
+ if (op->flags != 0) {
+ error = GRPC_CALL_ERROR_INVALID_FLAGS;
+ goto done_with_error;
+ }
if (!call->is_client) {
- return GRPC_CALL_ERROR_NOT_ON_SERVER;
+ error = GRPC_CALL_ERROR_NOT_ON_SERVER;
+ goto done_with_error;
+ }
+ if (call->received_final_op) {
+ error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
+ goto done_with_error;
}
- req = &reqs[out++];
- req->op = GRPC_IOREQ_RECV_STATUS;
- req->flags = op->flags;
- req->data.recv_status.set_value = set_status_value_directly;
- req->data.recv_status.user_data = op->data.recv_status_on_client.status;
- req = &reqs[out++];
- req->op = GRPC_IOREQ_RECV_STATUS_DETAILS;
- req->data.recv_status_details.details =
+ call->received_final_op = 1;
+ call->buffered_metadata[1] =
+ op->data.recv_status_on_client.trailing_metadata;
+ call->final_op.client.status = op->data.recv_status_on_client.status;
+ call->final_op.client.status_details =
op->data.recv_status_on_client.status_details;
- req->data.recv_status_details.details_capacity =
+ call->final_op.client.status_details_capacity =
op->data.recv_status_on_client.status_details_capacity;
- req = &reqs[out++];
- req->op = GRPC_IOREQ_RECV_TRAILING_METADATA;
- req->data.recv_metadata =
- op->data.recv_status_on_client.trailing_metadata;
- req = &reqs[out++];
- req->op = GRPC_IOREQ_RECV_CLOSE;
- finish_func = finish_batch_with_close;
+ bctl->recv_final_op = 1;
+ stream_op.recv_trailing_metadata =
+ &call->metadata_batch[1 /* is_receiving */][1 /* is_trailing */];
break;
case GRPC_OP_RECV_CLOSE_ON_SERVER:
/* Flag validation: currently allow no flags */
- if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS;
- req = &reqs[out++];
- req->op = GRPC_IOREQ_RECV_STATUS;
- req->flags = op->flags;
- req->data.recv_status.set_value = set_cancelled_value;
- req->data.recv_status.user_data =
+ if (op->flags != 0) {
+ error = GRPC_CALL_ERROR_INVALID_FLAGS;
+ goto done_with_error;
+ }
+ if (call->is_client) {
+ error = GRPC_CALL_ERROR_NOT_ON_CLIENT;
+ goto done_with_error;
+ }
+ if (call->received_final_op) {
+ error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
+ goto done_with_error;
+ }
+ call->received_final_op = 1;
+ call->final_op.server.cancelled =
op->data.recv_close_on_server.cancelled;
- req = &reqs[out++];
- req->op = GRPC_IOREQ_RECV_CLOSE;
- finish_func = finish_batch_with_close;
+ bctl->recv_final_op = 1;
+ stream_op.recv_trailing_metadata =
+ &call->metadata_batch[1 /* is_receiving */][1 /* is_trailing */];
break;
}
}
- grpc_cq_begin_op(call->cq, call);
+ GRPC_CALL_INTERNAL_REF(call, "completion");
+ if (!is_notify_tag_closure) {
+ grpc_cq_begin_op(call->cq, notify_tag);
+ }
+ gpr_ref_init(&bctl->steps_to_complete, num_completion_callbacks_needed);
+
+ stream_op.context = call->context;
+ grpc_closure_init(&bctl->finish_batch, finish_batch, bctl);
+ stream_op.on_complete = &bctl->finish_batch;
+ gpr_mu_unlock(&call->mu);
+
+ execute_op(exec_ctx, call, &stream_op);
+
+done:
+ GPR_TIMER_END("grpc_call_start_batch", 0);
+ return error;
+
+done_with_error:
+ /* reverse any mutations that occured */
+ if (bctl->send_initial_metadata) {
+ call->sent_initial_metadata = 0;
+ grpc_metadata_batch_clear(&call->metadata_batch[0][0]);
+ }
+ if (bctl->send_message) {
+ call->sending_message = 0;
+ grpc_byte_stream_destroy(exec_ctx, &call->sending_stream.base);
+ }
+ if (bctl->send_final_op) {
+ call->sent_final_op = 0;
+ grpc_metadata_batch_clear(&call->metadata_batch[0][1]);
+ }
+ if (bctl->recv_initial_metadata) {
+ call->received_initial_metadata = 0;
+ }
+ if (bctl->recv_message) {
+ call->receiving_message = 0;
+ }
+ if (bctl->recv_final_op) {
+ call->received_final_op = 0;
+ }
+ gpr_mu_unlock(&call->mu);
+ goto done;
+}
+
+grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
+ size_t nops, void *tag, void *reserved) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_call_error err;
+
+ GRPC_API_TRACE(
+ "grpc_call_start_batch(call=%p, ops=%p, nops=%lu, tag=%p, reserved=%p)",
+ 5, (call, ops, (unsigned long)nops, tag, reserved));
+
+ if (reserved != NULL) {
+ err = GRPC_CALL_ERROR;
+ } else {
+ err = call_start_batch(&exec_ctx, call, ops, nops, tag, 0);
+ }
+
+ grpc_exec_ctx_finish(&exec_ctx);
+ return err;
+}
- return grpc_call_start_ioreq_and_call_back(call, reqs, out, finish_func, tag);
+grpc_call_error grpc_call_start_batch_and_execute(grpc_exec_ctx *exec_ctx,
+ grpc_call *call,
+ const grpc_op *ops,
+ size_t nops,
+ grpc_closure *closure) {
+ return call_start_batch(exec_ctx, call, ops, nops, closure, 1);
}
void grpc_call_context_set(grpc_call *call, grpc_context_index elem,
@@ -1485,4 +1480,12 @@ void *grpc_call_context_get(grpc_call *call, grpc_context_index elem) {
return call->context[elem].value;
}
-gpr_uint8 grpc_call_is_client(grpc_call *call) { return call->is_client; }
+uint8_t grpc_call_is_client(grpc_call *call) { return call->is_client; }
+
+grpc_compression_algorithm grpc_call_compression_for_level(
+ grpc_call *call, grpc_compression_level level) {
+ gpr_mu_lock(&call->mu);
+ const uint32_t accepted_encodings = call->encodings_accepted_by_peer;
+ gpr_mu_unlock(&call->mu);
+ return grpc_compression_algorithm_for_level(level, accepted_encodings);
+}
diff --git a/src/core/surface/call.h b/src/core/surface/call.h
index fb3662b50d..d2edf03d45 100644
--- a/src/core/surface/call.h
+++ b/src/core/surface/call.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,109 +31,67 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_SURFACE_CALL_H
-#define GRPC_INTERNAL_CORE_SURFACE_CALL_H
+#ifndef GRPC_CORE_SURFACE_CALL_H
+#define GRPC_CORE_SURFACE_CALL_H
#include "src/core/channel/channel_stack.h"
#include "src/core/channel/context.h"
+#include "src/core/surface/api_trace.h"
+#include "src/core/surface/surface_trace.h"
+
#include <grpc/grpc.h>
+#include <grpc/impl/codegen/compression_types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
-/* Primitive operation types - grpc_op's get rewritten into these */
-typedef enum {
- GRPC_IOREQ_RECV_INITIAL_METADATA,
- GRPC_IOREQ_RECV_MESSAGE,
- GRPC_IOREQ_RECV_TRAILING_METADATA,
- GRPC_IOREQ_RECV_STATUS,
- GRPC_IOREQ_RECV_STATUS_DETAILS,
- GRPC_IOREQ_RECV_CLOSE,
- GRPC_IOREQ_SEND_INITIAL_METADATA,
- GRPC_IOREQ_SEND_MESSAGE,
- GRPC_IOREQ_SEND_TRAILING_METADATA,
- GRPC_IOREQ_SEND_STATUS,
- GRPC_IOREQ_SEND_CLOSE,
- GRPC_IOREQ_OP_COUNT
-} grpc_ioreq_op;
-
-typedef union {
- grpc_metadata_array *recv_metadata;
- grpc_byte_buffer **recv_message;
- struct {
- void (*set_value)(grpc_status_code status, void *user_data);
- void *user_data;
- } recv_status;
- struct {
- char **details;
- size_t *details_capacity;
- } recv_status_details;
- struct {
- size_t count;
- grpc_metadata *metadata;
- } send_metadata;
- grpc_byte_buffer *send_message;
- struct {
- grpc_status_code code;
- grpc_mdstr *details;
- } send_status;
-} grpc_ioreq_data;
-
-typedef struct {
- grpc_ioreq_op op;
- grpc_ioreq_data data;
- gpr_uint32 flags; /**< A copy of the write flags from grpc_op */
-} grpc_ioreq;
-
-typedef void (*grpc_ioreq_completion_func)(grpc_call *call, int success,
+typedef void (*grpc_ioreq_completion_func)(grpc_exec_ctx *exec_ctx,
+ grpc_call *call, int success,
void *user_data);
-grpc_call *grpc_call_create(grpc_channel *channel, grpc_completion_queue *cq,
+grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
+ uint32_t propagation_mask,
+ grpc_completion_queue *cq,
const void *server_transport_data,
grpc_mdelem **add_initial_metadata,
size_t add_initial_metadata_count,
gpr_timespec send_deadline);
-void grpc_call_set_completion_queue(grpc_call *call, grpc_completion_queue *cq);
-grpc_completion_queue *grpc_call_get_completion_queue(grpc_call *call);
+void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ grpc_completion_queue *cq);
-#ifdef GRPC_CALL_REF_COUNT_DEBUG
+#ifdef GRPC_STREAM_REFCOUNT_DEBUG
void grpc_call_internal_ref(grpc_call *call, const char *reason);
-void grpc_call_internal_unref(grpc_call *call, const char *reason,
- int allow_immediate_deletion);
+void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ const char *reason);
#define GRPC_CALL_INTERNAL_REF(call, reason) \
grpc_call_internal_ref(call, reason)
-#define GRPC_CALL_INTERNAL_UNREF(call, reason, allow_immediate_deletion) \
- grpc_call_internal_unref(call, reason, allow_immediate_deletion)
+#define GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, reason) \
+ grpc_call_internal_unref(exec_ctx, call, reason)
#else
void grpc_call_internal_ref(grpc_call *call);
-void grpc_call_internal_unref(grpc_call *call, int allow_immediate_deletion);
+void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *call);
#define GRPC_CALL_INTERNAL_REF(call, reason) grpc_call_internal_ref(call)
-#define GRPC_CALL_INTERNAL_UNREF(call, reason, allow_immediate_deletion) \
- grpc_call_internal_unref(call, allow_immediate_deletion)
+#define GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, reason) \
+ grpc_call_internal_unref(exec_ctx, call)
#endif
-grpc_call_error grpc_call_start_ioreq_and_call_back(
- grpc_call *call, const grpc_ioreq *reqs, size_t nreqs,
- grpc_ioreq_completion_func on_complete, void *user_data);
-
grpc_call_stack *grpc_call_get_call_stack(grpc_call *call);
+grpc_call_error grpc_call_start_batch_and_execute(grpc_exec_ctx *exec_ctx,
+ grpc_call *call,
+ const grpc_op *ops,
+ size_t nops,
+ grpc_closure *closure);
+
/* Given the top call_element, get the call object. */
grpc_call *grpc_call_from_top_element(grpc_call_element *surface_element);
-extern int grpc_trace_batch;
-
void grpc_call_log_batch(char *file, int line, gpr_log_severity severity,
grpc_call *call, const grpc_op *ops, size_t nops,
void *tag);
-void grpc_server_log_request_call(char *file, int line,
- gpr_log_severity severity,
- grpc_server *server, grpc_call **call,
- grpc_call_details *details,
- grpc_metadata_array *initial_metadata,
- grpc_completion_queue *cq_bound_to_call,
- grpc_completion_queue *cq_for_notification,
- void *tag);
-
/* Set a context pointer.
No thread safety guarantees are made wrt this value. */
void grpc_call_context_set(grpc_call *call, grpc_context_index elem,
@@ -142,15 +100,17 @@ void grpc_call_context_set(grpc_call *call, grpc_context_index elem,
void *grpc_call_context_get(grpc_call *call, grpc_context_index elem);
#define GRPC_CALL_LOG_BATCH(sev, call, ops, nops, tag) \
- if (grpc_trace_batch) grpc_call_log_batch(sev, call, ops, nops, tag)
+ if (grpc_api_trace) grpc_call_log_batch(sev, call, ops, nops, tag)
-#define GRPC_SERVER_LOG_REQUEST_CALL(sev, server, call, details, \
- initial_metadata, cq_bound_to_call, \
- cq_for_notifications, tag) \
- if (grpc_trace_batch) \
- grpc_server_log_request_call(sev, server, call, details, initial_metadata, \
- cq_bound_to_call, cq_for_notifications, tag)
+uint8_t grpc_call_is_client(grpc_call *call);
-gpr_uint8 grpc_call_is_client(grpc_call *call);
+/* Return an appropriate compression algorithm for the requested compression \a
+ * level in the context of \a call. */
+grpc_compression_algorithm grpc_call_compression_for_level(
+ grpc_call *call, grpc_compression_level level);
+
+#ifdef __cplusplus
+}
+#endif
-#endif /* GRPC_INTERNAL_CORE_SURFACE_CALL_H */
+#endif /* GRPC_CORE_SURFACE_CALL_H */
diff --git a/src/core/surface/call_details.c b/src/core/surface/call_details.c
index 67862d7afe..60f0029819 100644
--- a/src/core/surface/call_details.c
+++ b/src/core/surface/call_details.c
@@ -36,11 +36,15 @@
#include <string.h>
-void grpc_call_details_init(grpc_call_details *cd) {
+#include "src/core/surface/api_trace.h"
+
+void grpc_call_details_init(grpc_call_details* cd) {
+ GRPC_API_TRACE("grpc_call_details_init(cd=%p)", 1, (cd));
memset(cd, 0, sizeof(*cd));
}
-void grpc_call_details_destroy(grpc_call_details *cd) {
+void grpc_call_details_destroy(grpc_call_details* cd) {
+ GRPC_API_TRACE("grpc_call_details_destroy(cd=%p)", 1, (cd));
gpr_free(cd->method);
gpr_free(cd->host);
}
diff --git a/src/core/surface/call_log_batch.c b/src/core/surface/call_log_batch.c
index 55663298c9..46756f418b 100644
--- a/src/core/surface/call_log_batch.c
+++ b/src/core/surface/call_log_batch.c
@@ -37,17 +37,15 @@
#include <grpc/support/alloc.h>
#include <grpc/support/string_util.h>
-int grpc_trace_batch = 0;
-
static void add_metadata(gpr_strvec *b, const grpc_metadata *md, size_t count) {
size_t i;
- for(i = 0; i < count; i++) {
+ for (i = 0; i < count; i++) {
gpr_strvec_add(b, gpr_strdup("\nkey="));
gpr_strvec_add(b, gpr_strdup(md[i].key));
gpr_strvec_add(b, gpr_strdup(" value="));
- gpr_strvec_add(b, gpr_hexdump(md[i].value, md[i].value_length,
- GPR_HEXDUMP_PLAINTEXT));
+ gpr_strvec_add(b, gpr_dump(md[i].value, md[i].value_length,
+ GPR_DUMP_HEX | GPR_DUMP_ASCII));
}
}
@@ -112,27 +110,9 @@ void grpc_call_log_batch(char *file, int line, gpr_log_severity severity,
void *tag) {
char *tmp;
size_t i;
- gpr_log(file, line, severity,
- "grpc_call_start_batch(call=%p, ops=%p, nops=%d, tag=%p)", call, ops, nops, tag);
- for(i = 0; i < nops; i++) {
+ for (i = 0; i < nops; i++) {
tmp = grpc_op_string(&ops[i]);
gpr_log(file, line, severity, "ops[%d]: %s", i, tmp);
gpr_free(tmp);
}
}
-
-void grpc_server_log_request_call(char *file, int line,
- gpr_log_severity severity,
- grpc_server *server,
- grpc_call **call,
- grpc_call_details *details,
- grpc_metadata_array *initial_metadata,
- grpc_completion_queue *cq_bound_to_call,
- grpc_completion_queue *cq_for_notification,
- void *tag) {
- gpr_log(file, line, severity,
- "grpc_server_request_call(server=%p, call=%p, details=%p, "
- "initial_metadata=%p, cq_bound_to_call=%p, cq_for_notification=%p, "
- "tag=%p)", server, call, details, initial_metadata,
- cq_bound_to_call, cq_for_notification, tag);
-}
diff --git a/src/core/surface/call_test_only.h b/src/core/surface/call_test_only.h
new file mode 100644
index 0000000000..fdc43a383b
--- /dev/null
+++ b/src/core/surface/call_test_only.h
@@ -0,0 +1,64 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_SURFACE_CALL_TEST_ONLY_H
+#define GRPC_CORE_SURFACE_CALL_TEST_ONLY_H
+
+#include <grpc/grpc.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** Return the compression algorithm from \a call.
+ *
+ * \warning This function should \b only be used in test code. */
+grpc_compression_algorithm grpc_call_test_only_get_compression_algorithm(
+ grpc_call *call);
+
+/** Return the message flags from \a call.
+ *
+ * \warning This function should \b only be used in test code. */
+uint32_t grpc_call_test_only_get_message_flags(grpc_call *call);
+
+/** Returns a bitset for the encodings (compression algorithms) supported by \a
+ * call's peer.
+ *
+ * To be indexed by grpc_compression_algorithm enum values. */
+uint32_t grpc_call_test_only_get_encodings_accepted_by_peer(grpc_call *call);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRPC_CORE_SURFACE_CALL_TEST_ONLY_H */
diff --git a/src/core/surface/channel.c b/src/core/surface/channel.c
index a3c4dcebc1..964ab34431 100644
--- a/src/core/surface/channel.c
+++ b/src/core/surface/channel.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -36,13 +36,18 @@
#include <stdlib.h>
#include <string.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+
+#include "src/core/surface/channel_init.h"
+#include "src/core/client_config/resolver_registry.h"
#include "src/core/iomgr/iomgr.h"
#include "src/core/support/string.h"
+#include "src/core/surface/api_trace.h"
#include "src/core/surface/call.h"
-#include "src/core/surface/client.h"
#include "src/core/surface/init.h"
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
+#include "src/core/transport/static_metadata.h"
/** Cache grpc-status: X mdelems for X = 0..NUM_CACHED_STATUS_ELEMS.
* Avoids needing to take a metadata context lock for sending status
@@ -59,21 +64,12 @@ typedef struct registered_call {
struct grpc_channel {
int is_client;
- gpr_refcount refs;
- gpr_uint32 max_message_length;
- grpc_mdctx *metadata_context;
- /** mdstr for the grpc-status key */
- grpc_mdstr *grpc_status_string;
- grpc_mdstr *grpc_compression_level_string;
- grpc_mdstr *grpc_message_string;
- grpc_mdstr *path_string;
- grpc_mdstr *authority_string;
- /** mdelem for grpc-status: 0 thru grpc-status: 2 */
- grpc_mdelem *grpc_status_elem[NUM_CACHED_STATUS_ELEMS];
+ uint32_t max_message_length;
+ grpc_mdelem *default_authority;
gpr_mu registered_call_mu;
registered_call *registered_calls;
- grpc_iomgr_closure destroy_closure;
+ char *target;
};
#define CHANNEL_STACK_FROM_CHANNEL(c) ((grpc_channel_stack *)((c) + 1))
@@ -85,40 +81,27 @@ struct grpc_channel {
/* the protobuf library will (by default) start warning at 100megs */
#define DEFAULT_MAX_MESSAGE_LENGTH (100 * 1024 * 1024)
-grpc_channel *grpc_channel_create_from_filters(
- const grpc_channel_filter **filters, size_t num_filters,
- const grpc_channel_args *args, grpc_mdctx *mdctx, int is_client) {
- size_t i;
- size_t size =
- sizeof(grpc_channel) + grpc_channel_stack_size(filters, num_filters);
- grpc_channel *channel = gpr_malloc(size);
- GPR_ASSERT(grpc_is_initialized() && "call grpc_init()");
+static void destroy_channel(grpc_exec_ctx *exec_ctx, void *arg, bool success);
+
+grpc_channel *grpc_channel_create(grpc_exec_ctx *exec_ctx, const char *target,
+ const grpc_channel_args *args,
+ grpc_channel_stack_type channel_stack_type,
+ grpc_transport *optional_transport) {
+ bool is_client = grpc_channel_stack_type_is_client(channel_stack_type);
+
+ grpc_channel *channel = grpc_channel_init_create_stack(
+ exec_ctx, channel_stack_type, sizeof(grpc_channel), args, 1,
+ destroy_channel, NULL, optional_transport);
+
+ memset(channel, 0, sizeof(*channel));
+ channel->target = gpr_strdup(target);
channel->is_client = is_client;
- /* decremented by grpc_channel_destroy, and grpc_client_channel_closed if
- * is_client */
- gpr_ref_init(&channel->refs, 1 + is_client);
- channel->metadata_context = mdctx;
- channel->grpc_status_string = grpc_mdstr_from_string(mdctx, "grpc-status");
- channel->grpc_compression_level_string =
- grpc_mdstr_from_string(mdctx, "grpc-compression-level");
- channel->grpc_message_string = grpc_mdstr_from_string(mdctx, "grpc-message");
- for (i = 0; i < NUM_CACHED_STATUS_ELEMS; i++) {
- char buf[GPR_LTOA_MIN_BUFSIZE];
- gpr_ltoa(i, buf);
- channel->grpc_status_elem[i] = grpc_mdelem_from_metadata_strings(
- mdctx, grpc_mdstr_ref(channel->grpc_status_string),
- grpc_mdstr_from_string(mdctx, buf));
- }
- channel->path_string = grpc_mdstr_from_string(mdctx, ":path");
- channel->authority_string = grpc_mdstr_from_string(mdctx, ":authority");
- grpc_channel_stack_init(filters, num_filters, args, channel->metadata_context,
- CHANNEL_STACK_FROM_CHANNEL(channel));
gpr_mu_init(&channel->registered_call_mu);
channel->registered_calls = NULL;
channel->max_message_length = DEFAULT_MAX_MESSAGE_LENGTH;
if (args) {
- for (i = 0; i < args->num_args; i++) {
+ for (size_t i = 0; i < args->num_args; i++) {
if (0 == strcmp(args->args[i].key, GRPC_ARG_MAX_MESSAGE_LENGTH)) {
if (args->args[i].type != GRPC_ARG_INTEGER) {
gpr_log(GPR_ERROR, "%s ignored: it must be an integer",
@@ -127,53 +110,116 @@ grpc_channel *grpc_channel_create_from_filters(
gpr_log(GPR_ERROR, "%s ignored: it must be >= 0",
GRPC_ARG_MAX_MESSAGE_LENGTH);
} else {
- channel->max_message_length = args->args[i].value.integer;
+ channel->max_message_length = (uint32_t)args->args[i].value.integer;
+ }
+ } else if (0 == strcmp(args->args[i].key, GRPC_ARG_DEFAULT_AUTHORITY)) {
+ if (args->args[i].type != GRPC_ARG_STRING) {
+ gpr_log(GPR_ERROR, "%s ignored: it must be a string",
+ GRPC_ARG_DEFAULT_AUTHORITY);
+ } else {
+ if (channel->default_authority) {
+ /* setting this takes precedence over anything else */
+ GRPC_MDELEM_UNREF(channel->default_authority);
+ }
+ channel->default_authority = grpc_mdelem_from_strings(
+ ":authority", args->args[i].value.string);
+ }
+ } else if (0 ==
+ strcmp(args->args[i].key, GRPC_SSL_TARGET_NAME_OVERRIDE_ARG)) {
+ if (args->args[i].type != GRPC_ARG_STRING) {
+ gpr_log(GPR_ERROR, "%s ignored: it must be a string",
+ GRPC_SSL_TARGET_NAME_OVERRIDE_ARG);
+ } else {
+ if (channel->default_authority) {
+ /* other ways of setting this (notably ssl) take precedence */
+ gpr_log(GPR_ERROR,
+ "%s ignored: default host already set some other way",
+ GRPC_SSL_TARGET_NAME_OVERRIDE_ARG);
+ } else {
+ channel->default_authority = grpc_mdelem_from_strings(
+ ":authority", args->args[i].value.string);
+ }
}
}
}
}
+ if (channel->is_client && channel->default_authority == NULL &&
+ target != NULL) {
+ char *default_authority = grpc_get_default_authority(target);
+ if (default_authority) {
+ channel->default_authority =
+ grpc_mdelem_from_strings(":authority", default_authority);
+ }
+ gpr_free(default_authority);
+ }
+
return channel;
}
+char *grpc_channel_get_target(grpc_channel *channel) {
+ GRPC_API_TRACE("grpc_channel_get_target(channel=%p)", 1, (channel));
+ return gpr_strdup(channel->target);
+}
+
static grpc_call *grpc_channel_create_call_internal(
- grpc_channel *channel, grpc_completion_queue *cq, grpc_mdelem *path_mdelem,
+ grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask,
+ grpc_completion_queue *cq, grpc_mdelem *path_mdelem,
grpc_mdelem *authority_mdelem, gpr_timespec deadline) {
grpc_mdelem *send_metadata[2];
+ size_t num_metadata = 0;
GPR_ASSERT(channel->is_client);
- send_metadata[0] = path_mdelem;
- send_metadata[1] = authority_mdelem;
+ send_metadata[num_metadata++] = path_mdelem;
+ if (authority_mdelem != NULL) {
+ send_metadata[num_metadata++] = authority_mdelem;
+ } else if (channel->default_authority != NULL) {
+ send_metadata[num_metadata++] = GRPC_MDELEM_REF(channel->default_authority);
+ }
- return grpc_call_create(channel, cq, NULL, send_metadata,
- GPR_ARRAY_SIZE(send_metadata), deadline);
+ return grpc_call_create(channel, parent_call, propagation_mask, cq, NULL,
+ send_metadata, num_metadata, deadline);
}
grpc_call *grpc_channel_create_call(grpc_channel *channel,
+ grpc_call *parent_call,
+ uint32_t propagation_mask,
grpc_completion_queue *cq,
const char *method, const char *host,
- gpr_timespec deadline) {
+ gpr_timespec deadline, void *reserved) {
+ GRPC_API_TRACE(
+ "grpc_channel_create_call("
+ "channel=%p, parent_call=%p, propagation_mask=%x, cq=%p, method=%s, "
+ "host=%s, "
+ "deadline=gpr_timespec { tv_sec: %lld, tv_nsec: %d, clock_type: %d }, "
+ "reserved=%p)",
+ 10, (channel, parent_call, (unsigned)propagation_mask, cq, method, host,
+ (long long)deadline.tv_sec, (int)deadline.tv_nsec,
+ (int)deadline.clock_type, reserved));
+ GPR_ASSERT(!reserved);
return grpc_channel_create_call_internal(
- channel, cq,
- grpc_mdelem_from_metadata_strings(
- channel->metadata_context, grpc_mdstr_ref(channel->path_string),
- grpc_mdstr_from_string(channel->metadata_context, method)),
- grpc_mdelem_from_metadata_strings(
- channel->metadata_context, grpc_mdstr_ref(channel->authority_string),
- grpc_mdstr_from_string(channel->metadata_context, host)),
+ channel, parent_call, propagation_mask, cq,
+ grpc_mdelem_from_metadata_strings(GRPC_MDSTR_PATH,
+ grpc_mdstr_from_string(method)),
+ host ? grpc_mdelem_from_metadata_strings(GRPC_MDSTR_AUTHORITY,
+ grpc_mdstr_from_string(host))
+ : NULL,
deadline);
}
void *grpc_channel_register_call(grpc_channel *channel, const char *method,
- const char *host) {
+ const char *host, void *reserved) {
registered_call *rc = gpr_malloc(sizeof(registered_call));
- rc->path = grpc_mdelem_from_metadata_strings(
- channel->metadata_context, grpc_mdstr_ref(channel->path_string),
- grpc_mdstr_from_string(channel->metadata_context, method));
- rc->authority = grpc_mdelem_from_metadata_strings(
- channel->metadata_context, grpc_mdstr_ref(channel->authority_string),
- grpc_mdstr_from_string(channel->metadata_context, host));
+ GRPC_API_TRACE(
+ "grpc_channel_register_call(channel=%p, method=%s, host=%s, reserved=%p)",
+ 4, (channel, method, host, reserved));
+ GPR_ASSERT(!reserved);
+ rc->path = grpc_mdelem_from_metadata_strings(GRPC_MDSTR_PATH,
+ grpc_mdstr_from_string(method));
+ rc->authority = host ? grpc_mdelem_from_metadata_strings(
+ GRPC_MDSTR_AUTHORITY, grpc_mdstr_from_string(host))
+ : NULL;
gpr_mu_lock(&channel->registered_call_mu);
rc->next = channel->registered_calls;
channel->registered_calls = rc;
@@ -182,118 +228,97 @@ void *grpc_channel_register_call(grpc_channel *channel, const char *method,
}
grpc_call *grpc_channel_create_registered_call(
- grpc_channel *channel, grpc_completion_queue *completion_queue,
- void *registered_call_handle, gpr_timespec deadline) {
+ grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask,
+ grpc_completion_queue *completion_queue, void *registered_call_handle,
+ gpr_timespec deadline, void *reserved) {
registered_call *rc = registered_call_handle;
+ GRPC_API_TRACE(
+ "grpc_channel_create_registered_call("
+ "channel=%p, parent_call=%p, propagation_mask=%x, completion_queue=%p, "
+ "registered_call_handle=%p, "
+ "deadline=gpr_timespec { tv_sec: %lld, tv_nsec: %d, clock_type: %d }, "
+ "reserved=%p)",
+ 9, (channel, parent_call, (unsigned)propagation_mask, completion_queue,
+ registered_call_handle, (long long)deadline.tv_sec,
+ (int)deadline.tv_nsec, (int)deadline.clock_type, reserved));
+ GPR_ASSERT(!reserved);
return grpc_channel_create_call_internal(
- channel, completion_queue, grpc_mdelem_ref(rc->path),
- grpc_mdelem_ref(rc->authority), deadline);
+ channel, parent_call, propagation_mask, completion_queue,
+ GRPC_MDELEM_REF(rc->path),
+ rc->authority ? GRPC_MDELEM_REF(rc->authority) : NULL, deadline);
}
-#ifdef GRPC_CHANNEL_REF_COUNT_DEBUG
-void grpc_channel_internal_ref(grpc_channel *c, const char *reason) {
- gpr_log(GPR_DEBUG, "CHANNEL: ref %p %d -> %d [%s]", c, c->refs.count,
- c->refs.count + 1, reason);
+#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#define REF_REASON reason
+#define REF_ARG , const char *reason
#else
-void grpc_channel_internal_ref(grpc_channel *c) {
+#define REF_REASON ""
+#define REF_ARG
#endif
- gpr_ref(&c->refs);
+void grpc_channel_internal_ref(grpc_channel *c REF_ARG) {
+ GRPC_CHANNEL_STACK_REF(CHANNEL_STACK_FROM_CHANNEL(c), REF_REASON);
}
-static void destroy_channel(void *p, int ok) {
- grpc_channel *channel = p;
- size_t i;
- grpc_channel_stack_destroy(CHANNEL_STACK_FROM_CHANNEL(channel));
- for (i = 0; i < NUM_CACHED_STATUS_ELEMS; i++) {
- grpc_mdelem_unref(channel->grpc_status_elem[i]);
- }
- grpc_mdstr_unref(channel->grpc_status_string);
- grpc_mdstr_unref(channel->grpc_compression_level_string);
- grpc_mdstr_unref(channel->grpc_message_string);
- grpc_mdstr_unref(channel->path_string);
- grpc_mdstr_unref(channel->authority_string);
+void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx,
+ grpc_channel *c REF_ARG) {
+ GRPC_CHANNEL_STACK_UNREF(exec_ctx, CHANNEL_STACK_FROM_CHANNEL(c), REF_REASON);
+}
+
+static void destroy_channel(grpc_exec_ctx *exec_ctx, void *arg,
+ bool iomgr_success) {
+ grpc_channel *channel = arg;
+ grpc_channel_stack_destroy(exec_ctx, CHANNEL_STACK_FROM_CHANNEL(channel));
while (channel->registered_calls) {
registered_call *rc = channel->registered_calls;
channel->registered_calls = rc->next;
- grpc_mdelem_unref(rc->path);
- grpc_mdelem_unref(rc->authority);
+ GRPC_MDELEM_UNREF(rc->path);
+ if (rc->authority) {
+ GRPC_MDELEM_UNREF(rc->authority);
+ }
gpr_free(rc);
}
- grpc_mdctx_unref(channel->metadata_context);
+ if (channel->default_authority != NULL) {
+ GRPC_MDELEM_UNREF(channel->default_authority);
+ }
gpr_mu_destroy(&channel->registered_call_mu);
+ gpr_free(channel->target);
gpr_free(channel);
}
-#ifdef GRPC_CHANNEL_REF_COUNT_DEBUG
-void grpc_channel_internal_unref(grpc_channel *channel, const char *reason) {
- gpr_log(GPR_DEBUG, "CHANNEL: unref %p %d -> %d [%s]", channel,
- channel->refs.count, channel->refs.count - 1, reason);
-#else
-void grpc_channel_internal_unref(grpc_channel *channel) {
-#endif
- if (gpr_unref(&channel->refs)) {
- channel->destroy_closure.cb = destroy_channel;
- channel->destroy_closure.cb_arg = channel;
- grpc_iomgr_add_callback(&channel->destroy_closure);
- }
-}
-
void grpc_channel_destroy(grpc_channel *channel) {
- grpc_channel_op op;
+ grpc_transport_op op;
grpc_channel_element *elem;
-
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ GRPC_API_TRACE("grpc_channel_destroy(channel=%p)", 1, (channel));
+ memset(&op, 0, sizeof(op));
+ op.disconnect = 1;
elem = grpc_channel_stack_element(CHANNEL_STACK_FROM_CHANNEL(channel), 0);
+ elem->filter->start_transport_op(&exec_ctx, elem, &op);
- op.type = GRPC_CHANNEL_GOAWAY;
- op.dir = GRPC_CALL_DOWN;
- op.data.goaway.status = GRPC_STATUS_OK;
- op.data.goaway.message = gpr_slice_from_copied_string("Client disconnect");
- elem->filter->channel_op(elem, NULL, &op);
+ GRPC_CHANNEL_INTERNAL_UNREF(&exec_ctx, channel, "channel");
- op.type = GRPC_CHANNEL_DISCONNECT;
- op.dir = GRPC_CALL_DOWN;
- elem->filter->channel_op(elem, NULL, &op);
-
- GRPC_CHANNEL_INTERNAL_UNREF(channel, "channel");
-}
-
-void grpc_client_channel_closed(grpc_channel_element *elem) {
- GRPC_CHANNEL_INTERNAL_UNREF(CHANNEL_FROM_TOP_ELEM(elem), "closed");
+ grpc_exec_ctx_finish(&exec_ctx);
}
grpc_channel_stack *grpc_channel_get_channel_stack(grpc_channel *channel) {
return CHANNEL_STACK_FROM_CHANNEL(channel);
}
-grpc_mdctx *grpc_channel_get_metadata_context(grpc_channel *channel) {
- return channel->metadata_context;
-}
-
-grpc_mdstr *grpc_channel_get_status_string(grpc_channel *channel) {
- return channel->grpc_status_string;
-}
-
-grpc_mdstr *grpc_channel_get_compresssion_level_string(grpc_channel *channel) {
- return channel->grpc_compression_level_string;
-}
-
-
grpc_mdelem *grpc_channel_get_reffed_status_elem(grpc_channel *channel, int i) {
- if (i >= 0 && i < NUM_CACHED_STATUS_ELEMS) {
- return grpc_mdelem_ref(channel->grpc_status_elem[i]);
- } else {
- char tmp[GPR_LTOA_MIN_BUFSIZE];
- gpr_ltoa(i, tmp);
- return grpc_mdelem_from_metadata_strings(
- channel->metadata_context, grpc_mdstr_ref(channel->grpc_status_string),
- grpc_mdstr_from_string(channel->metadata_context, tmp));
+ char tmp[GPR_LTOA_MIN_BUFSIZE];
+ switch (i) {
+ case 0:
+ return GRPC_MDELEM_GRPC_STATUS_0;
+ case 1:
+ return GRPC_MDELEM_GRPC_STATUS_1;
+ case 2:
+ return GRPC_MDELEM_GRPC_STATUS_2;
}
+ gpr_ltoa(i, tmp);
+ return grpc_mdelem_from_metadata_strings(GRPC_MDSTR_GRPC_STATUS,
+ grpc_mdstr_from_string(tmp));
}
-grpc_mdstr *grpc_channel_get_message_string(grpc_channel *channel) {
- return channel->grpc_message_string;
-}
-
-gpr_uint32 grpc_channel_get_max_message_length(grpc_channel *channel) {
+uint32_t grpc_channel_get_max_message_length(grpc_channel *channel) {
return channel->max_message_length;
}
diff --git a/src/core/surface/channel.h b/src/core/surface/channel.h
index 3c04676b43..c08988d9e7 100644
--- a/src/core/surface/channel.h
+++ b/src/core/surface/channel.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,48 +31,45 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_SURFACE_CHANNEL_H
-#define GRPC_INTERNAL_CORE_SURFACE_CHANNEL_H
+#ifndef GRPC_CORE_SURFACE_CHANNEL_H
+#define GRPC_CORE_SURFACE_CHANNEL_H
#include "src/core/channel/channel_stack.h"
+#include "src/core/surface/channel_stack_type.h"
+#include "src/core/client_config/subchannel_factory.h"
-grpc_channel *grpc_channel_create_from_filters(
- const grpc_channel_filter **filters, size_t count,
- const grpc_channel_args *args, grpc_mdctx *mdctx, int is_client);
+grpc_channel *grpc_channel_create(grpc_exec_ctx *exec_ctx, const char *target,
+ const grpc_channel_args *args,
+ grpc_channel_stack_type channel_stack_type,
+ grpc_transport *optional_transport);
/** Get a (borrowed) pointer to this channels underlying channel stack */
grpc_channel_stack *grpc_channel_get_channel_stack(grpc_channel *channel);
-/** Get a (borrowed) pointer to the channel wide metadata context */
-grpc_mdctx *grpc_channel_get_metadata_context(grpc_channel *channel);
-
/** Get a grpc_mdelem of grpc-status: X where X is the numeric value of
status_code.
The returned elem is owned by the caller. */
grpc_mdelem *grpc_channel_get_reffed_status_elem(grpc_channel *channel,
int status_code);
-grpc_mdstr *grpc_channel_get_status_string(grpc_channel *channel);
-grpc_mdstr *grpc_channel_get_compresssion_level_string(grpc_channel *channel);
-grpc_mdstr *grpc_channel_get_message_string(grpc_channel *channel);
-gpr_uint32 grpc_channel_get_max_message_length(grpc_channel *channel);
-
-void grpc_client_channel_closed(grpc_channel_element *elem);
+uint32_t grpc_channel_get_max_message_length(grpc_channel *channel);
-#ifdef GRPC_CHANNEL_REF_COUNT_DEBUG
+#ifdef GRPC_STREAM_REFCOUNT_DEBUG
void grpc_channel_internal_ref(grpc_channel *channel, const char *reason);
-void grpc_channel_internal_unref(grpc_channel *channel, const char *reason);
+void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx, grpc_channel *channel,
+ const char *reason);
#define GRPC_CHANNEL_INTERNAL_REF(channel, reason) \
grpc_channel_internal_ref(channel, reason)
-#define GRPC_CHANNEL_INTERNAL_UNREF(channel, reason) \
- grpc_channel_internal_unref(channel, reason)
+#define GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, reason) \
+ grpc_channel_internal_unref(exec_ctx, channel, reason)
#else
void grpc_channel_internal_ref(grpc_channel *channel);
-void grpc_channel_internal_unref(grpc_channel *channel);
+void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx,
+ grpc_channel *channel);
#define GRPC_CHANNEL_INTERNAL_REF(channel, reason) \
grpc_channel_internal_ref(channel)
-#define GRPC_CHANNEL_INTERNAL_UNREF(channel, reason) \
- grpc_channel_internal_unref(channel)
+#define GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, reason) \
+ grpc_channel_internal_unref(exec_ctx, channel)
#endif
-#endif /* GRPC_INTERNAL_CORE_SURFACE_CHANNEL_H */
+#endif /* GRPC_CORE_SURFACE_CHANNEL_H */
diff --git a/src/core/surface/channel_connectivity.c b/src/core/surface/channel_connectivity.c
new file mode 100644
index 0000000000..2dd4fce26b
--- /dev/null
+++ b/src/core/surface/channel_connectivity.c
@@ -0,0 +1,220 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/surface/channel.h"
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+
+#include "src/core/channel/client_channel.h"
+#include "src/core/channel/client_uchannel.h"
+#include "src/core/iomgr/timer.h"
+#include "src/core/surface/api_trace.h"
+#include "src/core/surface/completion_queue.h"
+
+grpc_connectivity_state grpc_channel_check_connectivity_state(
+ grpc_channel *channel, int try_to_connect) {
+ /* forward through to the underlying client channel */
+ grpc_channel_element *client_channel_elem =
+ grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_connectivity_state state;
+ GRPC_API_TRACE(
+ "grpc_channel_check_connectivity_state(channel=%p, try_to_connect=%d)", 2,
+ (channel, try_to_connect));
+ if (client_channel_elem->filter == &grpc_client_channel_filter) {
+ state = grpc_client_channel_check_connectivity_state(
+ &exec_ctx, client_channel_elem, try_to_connect);
+ grpc_exec_ctx_finish(&exec_ctx);
+ return state;
+ }
+ if (client_channel_elem->filter == &grpc_client_uchannel_filter) {
+ state = grpc_client_uchannel_check_connectivity_state(
+ &exec_ctx, client_channel_elem, try_to_connect);
+ grpc_exec_ctx_finish(&exec_ctx);
+ return state;
+ }
+ gpr_log(GPR_ERROR,
+ "grpc_channel_check_connectivity_state called on something that is "
+ "not a (u)client channel, but '%s'",
+ client_channel_elem->filter->name);
+ grpc_exec_ctx_finish(&exec_ctx);
+ return GRPC_CHANNEL_FATAL_FAILURE;
+}
+
+typedef enum {
+ WAITING,
+ CALLING_BACK,
+ CALLING_BACK_AND_FINISHED,
+ CALLED_BACK
+} callback_phase;
+
+typedef struct {
+ gpr_mu mu;
+ callback_phase phase;
+ int success;
+ grpc_closure on_complete;
+ grpc_timer alarm;
+ grpc_connectivity_state state;
+ grpc_completion_queue *cq;
+ grpc_cq_completion completion_storage;
+ grpc_channel *channel;
+ void *tag;
+} state_watcher;
+
+static void delete_state_watcher(grpc_exec_ctx *exec_ctx, state_watcher *w) {
+ grpc_channel_element *client_channel_elem = grpc_channel_stack_last_element(
+ grpc_channel_get_channel_stack(w->channel));
+ if (client_channel_elem->filter == &grpc_client_channel_filter) {
+ GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, w->channel,
+ "watch_channel_connectivity");
+ } else if (client_channel_elem->filter == &grpc_client_uchannel_filter) {
+ GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, w->channel,
+ "watch_uchannel_connectivity");
+ } else {
+ abort();
+ }
+ gpr_mu_destroy(&w->mu);
+ gpr_free(w);
+}
+
+static void finished_completion(grpc_exec_ctx *exec_ctx, void *pw,
+ grpc_cq_completion *ignored) {
+ int delete = 0;
+ state_watcher *w = pw;
+ gpr_mu_lock(&w->mu);
+ switch (w->phase) {
+ case WAITING:
+ case CALLED_BACK:
+ GPR_UNREACHABLE_CODE(return );
+ case CALLING_BACK:
+ w->phase = CALLED_BACK;
+ break;
+ case CALLING_BACK_AND_FINISHED:
+ delete = 1;
+ break;
+ }
+ gpr_mu_unlock(&w->mu);
+
+ if (delete) {
+ delete_state_watcher(exec_ctx, w);
+ }
+}
+
+static void partly_done(grpc_exec_ctx *exec_ctx, state_watcher *w,
+ int due_to_completion) {
+ int delete = 0;
+
+ if (due_to_completion) {
+ grpc_timer_cancel(exec_ctx, &w->alarm);
+ }
+
+ gpr_mu_lock(&w->mu);
+ if (due_to_completion) {
+ w->success = 1;
+ }
+ switch (w->phase) {
+ case WAITING:
+ w->phase = CALLING_BACK;
+ grpc_cq_end_op(exec_ctx, w->cq, w->tag, w->success, finished_completion,
+ w, &w->completion_storage);
+ break;
+ case CALLING_BACK:
+ w->phase = CALLING_BACK_AND_FINISHED;
+ break;
+ case CALLING_BACK_AND_FINISHED:
+ GPR_UNREACHABLE_CODE(return );
+ case CALLED_BACK:
+ delete = 1;
+ break;
+ }
+ gpr_mu_unlock(&w->mu);
+
+ if (delete) {
+ delete_state_watcher(exec_ctx, w);
+ }
+}
+
+static void watch_complete(grpc_exec_ctx *exec_ctx, void *pw, bool success) {
+ partly_done(exec_ctx, pw, 1);
+}
+
+static void timeout_complete(grpc_exec_ctx *exec_ctx, void *pw, bool success) {
+ partly_done(exec_ctx, pw, 0);
+}
+
+void grpc_channel_watch_connectivity_state(
+ grpc_channel *channel, grpc_connectivity_state last_observed_state,
+ gpr_timespec deadline, grpc_completion_queue *cq, void *tag) {
+ grpc_channel_element *client_channel_elem =
+ grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ state_watcher *w = gpr_malloc(sizeof(*w));
+
+ GRPC_API_TRACE(
+ "grpc_channel_watch_connectivity_state("
+ "channel=%p, last_observed_state=%d, "
+ "deadline=gpr_timespec { tv_sec: %lld, tv_nsec: %d, clock_type: %d }, "
+ "cq=%p, tag=%p)",
+ 7, (channel, (int)last_observed_state, (long long)deadline.tv_sec,
+ (int)deadline.tv_nsec, (int)deadline.clock_type, cq, tag));
+
+ grpc_cq_begin_op(cq, tag);
+
+ gpr_mu_init(&w->mu);
+ grpc_closure_init(&w->on_complete, watch_complete, w);
+ w->phase = WAITING;
+ w->state = last_observed_state;
+ w->success = 0;
+ w->cq = cq;
+ w->tag = tag;
+ w->channel = channel;
+
+ grpc_timer_init(&exec_ctx, &w->alarm,
+ gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
+ timeout_complete, w, gpr_now(GPR_CLOCK_MONOTONIC));
+
+ if (client_channel_elem->filter == &grpc_client_channel_filter) {
+ GRPC_CHANNEL_INTERNAL_REF(channel, "watch_channel_connectivity");
+ grpc_client_channel_watch_connectivity_state(&exec_ctx, client_channel_elem,
+ grpc_cq_pollset(cq), &w->state,
+ &w->on_complete);
+ } else if (client_channel_elem->filter == &grpc_client_uchannel_filter) {
+ GRPC_CHANNEL_INTERNAL_REF(channel, "watch_uchannel_connectivity");
+ grpc_client_uchannel_watch_connectivity_state(
+ &exec_ctx, client_channel_elem, grpc_cq_pollset(cq), &w->state,
+ &w->on_complete);
+ }
+
+ grpc_exec_ctx_finish(&exec_ctx);
+}
diff --git a/src/core/surface/channel_create.c b/src/core/surface/channel_create.c
index d069a04a9a..123447c8ed 100644
--- a/src/core/surface/channel_create.c
+++ b/src/core/surface/channel_create.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,188 +31,193 @@
*
*/
-#include "src/core/iomgr/sockaddr.h"
-
#include <grpc/grpc.h>
#include <stdlib.h>
#include <string.h>
-#include "src/core/channel/census_filter.h"
+#include <grpc/support/alloc.h>
+#include <grpc/support/slice.h>
+#include <grpc/support/slice_buffer.h>
+
+#include "src/core/census/grpc_filter.h"
#include "src/core/channel/channel_args.h"
#include "src/core/channel/client_channel.h"
-#include "src/core/channel/client_setup.h"
-#include "src/core/channel/connected_channel.h"
+#include "src/core/channel/compress_filter.h"
#include "src/core/channel/http_client_filter.h"
-#include "src/core/iomgr/endpoint.h"
-#include "src/core/iomgr/resolve_address.h"
+#include "src/core/client_config/resolver_registry.h"
#include "src/core/iomgr/tcp_client.h"
+#include "src/core/surface/api_trace.h"
#include "src/core/surface/channel.h"
-#include "src/core/surface/client.h"
-#include "src/core/support/string.h"
#include "src/core/transport/chttp2_transport.h"
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/string_util.h>
-#include <grpc/support/sync.h>
-#include <grpc/support/useful.h>
-typedef struct setup setup;
-
-/* A single setup request (started via initiate) */
typedef struct {
- grpc_client_setup_request *cs_request;
- setup *setup;
- /* Resolved addresses, or null if resolution not yet completed */
- grpc_resolved_addresses *resolved;
- /* which address in resolved should we pick for the next connection attempt */
- size_t resolved_index;
-} request;
-
-/* Global setup logic (may be running many simultaneous setup requests, but
- with only one 'active' */
-struct setup {
- const char *target;
- grpc_transport_setup_callback setup_callback;
- void *setup_user_data;
-};
-
-static int maybe_try_next_resolved(request *r);
-
-static void done(request *r, int was_successful) {
- grpc_client_setup_request_finish(r->cs_request, was_successful);
- if (r->resolved) {
- grpc_resolved_addresses_destroy(r->resolved);
- }
- gpr_free(r);
-}
+ grpc_connector base;
+ gpr_refcount refs;
-/* connection callback: tcp is either valid, or null on error */
-static void on_connect(void *rp, grpc_endpoint *tcp) {
- request *r = rp;
+ grpc_closure *notify;
+ grpc_connect_in_args args;
+ grpc_connect_out_args *result;
+ grpc_closure initial_string_sent;
+ gpr_slice_buffer initial_string_buffer;
- if (!grpc_client_setup_request_should_continue(r->cs_request, "on_connect")) {
- if (tcp) {
- grpc_endpoint_shutdown(tcp);
- grpc_endpoint_destroy(tcp);
- }
- done(r, 0);
- return;
- }
+ grpc_endpoint *tcp;
- if (!tcp) {
- if (!maybe_try_next_resolved(r)) {
- done(r, 0);
- return;
- } else {
- return;
- }
- } else if (grpc_client_setup_cb_begin(r->cs_request, "on_connect")) {
- grpc_create_chttp2_transport(
- r->setup->setup_callback, r->setup->setup_user_data,
- grpc_client_setup_get_channel_args(r->cs_request), tcp, NULL, 0,
- grpc_client_setup_get_mdctx(r->cs_request), 1);
- grpc_client_setup_cb_end(r->cs_request, "on_connect");
- done(r, 1);
- return;
- } else {
- done(r, 0);
- }
+ grpc_closure connected;
+} connector;
+
+static void connector_ref(grpc_connector *con) {
+ connector *c = (connector *)con;
+ gpr_ref(&c->refs);
}
-/* attempt to connect to the next available resolved address */
-static int maybe_try_next_resolved(request *r) {
- grpc_resolved_address *addr;
- if (!r->resolved) return 0;
- if (r->resolved_index == r->resolved->naddrs) return 0;
- addr = &r->resolved->addrs[r->resolved_index++];
- grpc_tcp_client_connect(
- on_connect, r, grpc_client_setup_get_interested_parties(r->cs_request),
- (struct sockaddr *)&addr->addr, addr->len,
- grpc_client_setup_request_deadline(r->cs_request));
- return 1;
+static void connector_unref(grpc_exec_ctx *exec_ctx, grpc_connector *con) {
+ connector *c = (connector *)con;
+ if (gpr_unref(&c->refs)) {
+ /* c->initial_string_buffer does not need to be destroyed */
+ gpr_free(c);
+ }
}
-/* callback for when our target address has been resolved */
-static void on_resolved(void *rp, grpc_resolved_addresses *resolved) {
- request *r = rp;
+static void on_initial_connect_string_sent(grpc_exec_ctx *exec_ctx, void *arg,
+ bool success) {
+ connector_unref(exec_ctx, arg);
+}
- /* if we're not still the active request, abort */
- if (!grpc_client_setup_request_should_continue(r->cs_request,
- "on_resolved")) {
- if (resolved) {
- grpc_resolved_addresses_destroy(resolved);
+static void connected(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
+ connector *c = arg;
+ grpc_closure *notify;
+ grpc_endpoint *tcp = c->tcp;
+ if (tcp != NULL) {
+ if (!GPR_SLICE_IS_EMPTY(c->args.initial_connect_string)) {
+ grpc_closure_init(&c->initial_string_sent, on_initial_connect_string_sent,
+ c);
+ gpr_slice_buffer_init(&c->initial_string_buffer);
+ gpr_slice_buffer_add(&c->initial_string_buffer,
+ c->args.initial_connect_string);
+ connector_ref(arg);
+ grpc_endpoint_write(exec_ctx, tcp, &c->initial_string_buffer,
+ &c->initial_string_sent);
}
- done(r, 0);
- return;
- }
-
- if (!resolved) {
- done(r, 0);
- return;
+ c->result->transport =
+ grpc_create_chttp2_transport(exec_ctx, c->args.channel_args, tcp, 1);
+ grpc_chttp2_transport_start_reading(exec_ctx, c->result->transport, NULL,
+ 0);
+ GPR_ASSERT(c->result->transport);
+ c->result->channel_args = c->args.channel_args;
} else {
- r->resolved = resolved;
- r->resolved_index = 0;
- if (!maybe_try_next_resolved(r)) {
- done(r, 0);
- }
+ memset(c->result, 0, sizeof(*c->result));
}
+ notify = c->notify;
+ c->notify = NULL;
+ notify->cb(exec_ctx, notify->cb_arg, 1);
}
-static void initiate_setup(void *sp, grpc_client_setup_request *cs_request) {
- request *r = gpr_malloc(sizeof(request));
- r->setup = sp;
- r->cs_request = cs_request;
- r->resolved = NULL;
- r->resolved_index = 0;
- /* TODO(klempner): Make grpc_resolve_address respect deadline */
- grpc_resolve_address(r->setup->target, "http", on_resolved, r);
+static void connector_shutdown(grpc_exec_ctx *exec_ctx, grpc_connector *con) {}
+
+static void connector_connect(grpc_exec_ctx *exec_ctx, grpc_connector *con,
+ const grpc_connect_in_args *args,
+ grpc_connect_out_args *result,
+ grpc_closure *notify) {
+ connector *c = (connector *)con;
+ GPR_ASSERT(c->notify == NULL);
+ GPR_ASSERT(notify->cb);
+ c->notify = notify;
+ c->args = *args;
+ c->result = result;
+ c->tcp = NULL;
+ grpc_closure_init(&c->connected, connected, c);
+ grpc_tcp_client_connect(exec_ctx, &c->connected, &c->tcp,
+ args->interested_parties, args->addr, args->addr_len,
+ args->deadline);
}
-static void done_setup(void *sp) {
- setup *s = sp;
- gpr_free((void *)s->target);
- gpr_free(s);
+static const grpc_connector_vtable connector_vtable = {
+ connector_ref, connector_unref, connector_shutdown, connector_connect};
+
+typedef struct {
+ grpc_subchannel_factory base;
+ gpr_refcount refs;
+ grpc_channel_args *merge_args;
+ grpc_channel *master;
+} subchannel_factory;
+
+static void subchannel_factory_ref(grpc_subchannel_factory *scf) {
+ subchannel_factory *f = (subchannel_factory *)scf;
+ gpr_ref(&f->refs);
}
-static grpc_transport_setup_result complete_setup(void *channel_stack,
- grpc_transport *transport,
- grpc_mdctx *mdctx) {
- static grpc_channel_filter const *extra_filters[] = {
- &grpc_http_client_filter};
- return grpc_client_channel_transport_setup_complete(
- channel_stack, transport, extra_filters, GPR_ARRAY_SIZE(extra_filters),
- mdctx);
+static void subchannel_factory_unref(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_factory *scf) {
+ subchannel_factory *f = (subchannel_factory *)scf;
+ if (gpr_unref(&f->refs)) {
+ GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, f->master, "subchannel_factory");
+ grpc_channel_args_destroy(f->merge_args);
+ gpr_free(f);
+ }
}
+static grpc_subchannel *subchannel_factory_create_subchannel(
+ grpc_exec_ctx *exec_ctx, grpc_subchannel_factory *scf,
+ grpc_subchannel_args *args) {
+ subchannel_factory *f = (subchannel_factory *)scf;
+ connector *c = gpr_malloc(sizeof(*c));
+ grpc_channel_args *final_args =
+ grpc_channel_args_merge(args->args, f->merge_args);
+ grpc_subchannel *s;
+ memset(c, 0, sizeof(*c));
+ c->base.vtable = &connector_vtable;
+ gpr_ref_init(&c->refs, 1);
+ args->args = final_args;
+ s = grpc_subchannel_create(exec_ctx, &c->base, args);
+ grpc_connector_unref(exec_ctx, &c->base);
+ grpc_channel_args_destroy(final_args);
+ return s;
+}
+
+static const grpc_subchannel_factory_vtable subchannel_factory_vtable = {
+ subchannel_factory_ref, subchannel_factory_unref,
+ subchannel_factory_create_subchannel};
+
/* Create a client channel:
Asynchronously: - resolve target
- connect to it (trying alternatives as presented)
- perform handshakes */
-grpc_channel *grpc_channel_create(const char *target,
- const grpc_channel_args *args) {
- setup *s = gpr_malloc(sizeof(setup));
- grpc_mdctx *mdctx = grpc_mdctx_create();
+grpc_channel *grpc_insecure_channel_create(const char *target,
+ const grpc_channel_args *args,
+ void *reserved) {
grpc_channel *channel = NULL;
-#define MAX_FILTERS 3
- const grpc_channel_filter *filters[MAX_FILTERS];
- int n = 0;
- filters[n++] = &grpc_client_surface_filter;
- /* TODO(census)
- if (grpc_channel_args_is_census_enabled(args)) {
- filters[n++] = &grpc_client_census_filter;
- } */
- filters[n++] = &grpc_client_channel_filter;
- GPR_ASSERT(n <= MAX_FILTERS);
- channel = grpc_channel_create_from_filters(filters, n, args, mdctx, 1);
-
- s->target = gpr_strdup(target);
- s->setup_callback = complete_setup;
- s->setup_user_data = grpc_channel_get_channel_stack(channel);
-
- grpc_client_setup_create_and_attach(grpc_channel_get_channel_stack(channel),
- args, mdctx, initiate_setup, done_setup,
- s);
+ grpc_resolver *resolver;
+ subchannel_factory *f;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ GRPC_API_TRACE(
+ "grpc_insecure_channel_create(target=%p, args=%p, reserved=%p)", 3,
+ (target, args, reserved));
+ GPR_ASSERT(!reserved);
+
+ channel =
+ grpc_channel_create(&exec_ctx, target, args, GRPC_CLIENT_CHANNEL, NULL);
+
+ f = gpr_malloc(sizeof(*f));
+ f->base.vtable = &subchannel_factory_vtable;
+ gpr_ref_init(&f->refs, 1);
+ f->merge_args = grpc_channel_args_copy(args);
+ f->master = channel;
+ GRPC_CHANNEL_INTERNAL_REF(f->master, "subchannel_factory");
+ resolver = grpc_resolver_create(target, &f->base);
+ if (!resolver) {
+ GRPC_CHANNEL_INTERNAL_UNREF(&exec_ctx, f->master, "subchannel_factory");
+ grpc_subchannel_factory_unref(&exec_ctx, &f->base);
+ grpc_exec_ctx_finish(&exec_ctx);
+ return NULL;
+ }
+
+ grpc_client_channel_set_resolver(
+ &exec_ctx, grpc_channel_get_channel_stack(channel), resolver);
+ GRPC_RESOLVER_UNREF(&exec_ctx, resolver, "create");
+ grpc_subchannel_factory_unref(&exec_ctx, &f->base);
+
+ grpc_exec_ctx_finish(&exec_ctx);
return channel;
}
diff --git a/src/core/surface/channel_init.c b/src/core/surface/channel_init.c
new file mode 100644
index 0000000000..538be84696
--- /dev/null
+++ b/src/core/surface/channel_init.c
@@ -0,0 +1,148 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/surface/channel_init.h"
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/useful.h>
+
+typedef struct stage_slot {
+ grpc_channel_init_stage fn;
+ void *arg;
+ int priority;
+ size_t insertion_order;
+} stage_slot;
+
+typedef struct stage_slots {
+ stage_slot *slots;
+ size_t num_slots;
+ size_t cap_slots;
+} stage_slots;
+
+static stage_slots g_slots[GRPC_NUM_CHANNEL_STACK_TYPES];
+static bool g_finalized;
+
+void grpc_channel_init_init(void) {
+ for (int i = 0; i < GRPC_NUM_CHANNEL_STACK_TYPES; i++) {
+ g_slots[i].slots = NULL;
+ g_slots[i].num_slots = 0;
+ g_slots[i].cap_slots = 0;
+ }
+ g_finalized = false;
+}
+
+void grpc_channel_init_register_stage(grpc_channel_stack_type type,
+ int priority,
+ grpc_channel_init_stage stage,
+ void *stage_arg) {
+ GPR_ASSERT(!g_finalized);
+ if (g_slots[type].cap_slots == g_slots[type].num_slots) {
+ g_slots[type].cap_slots = GPR_MAX(8, 3 * g_slots[type].cap_slots / 2);
+ g_slots[type].slots =
+ gpr_realloc(g_slots[type].slots,
+ g_slots[type].cap_slots * sizeof(*g_slots[type].slots));
+ }
+ stage_slot *s = &g_slots[type].slots[g_slots[type].num_slots++];
+ s->insertion_order = g_slots[type].num_slots;
+ s->priority = priority;
+ s->fn = stage;
+ s->arg = stage_arg;
+}
+
+static int compare_slots(const void *a, const void *b) {
+ const stage_slot *sa = a;
+ const stage_slot *sb = b;
+
+ int c = GPR_ICMP(sa->priority, sb->priority);
+ if (c != 0) return c;
+ return GPR_ICMP(sa->insertion_order, sb->insertion_order);
+}
+
+void grpc_channel_init_finalize(void) {
+ GPR_ASSERT(!g_finalized);
+ for (int i = 0; i < GRPC_NUM_CHANNEL_STACK_TYPES; i++) {
+ qsort(g_slots[i].slots, g_slots[i].num_slots, sizeof(*g_slots[i].slots),
+ compare_slots);
+ }
+ g_finalized = true;
+}
+
+void grpc_channel_init_shutdown(void) {
+ for (int i = 0; i < GRPC_NUM_CHANNEL_STACK_TYPES; i++) {
+ gpr_free(g_slots[i].slots);
+ g_slots[i].slots = (void *)(uintptr_t)0xdeadbeef;
+ }
+}
+
+static const char *name_for_type(grpc_channel_stack_type type) {
+ switch (type) {
+ case GRPC_CLIENT_CHANNEL:
+ return "CLIENT_CHANNEL";
+ case GRPC_CLIENT_SUBCHANNEL:
+ return "CLIENT_SUBCHANNEL";
+ case GRPC_SERVER_CHANNEL:
+ return "SERVER_CHANNEL";
+ case GRPC_CLIENT_UCHANNEL:
+ return "CLIENT_UCHANNEL";
+ case GRPC_CLIENT_LAME_CHANNEL:
+ return "CLIENT_LAME_CHANNEL";
+ case GRPC_CLIENT_DIRECT_CHANNEL:
+ return "CLIENT_DIRECT_CHANNEL";
+ case GRPC_NUM_CHANNEL_STACK_TYPES:
+ break;
+ }
+ GPR_UNREACHABLE_CODE(return "UNKNOWN");
+}
+
+void *grpc_channel_init_create_stack(
+ grpc_exec_ctx *exec_ctx, grpc_channel_stack_type type, size_t prefix_bytes,
+ const grpc_channel_args *args, int initial_refs, grpc_iomgr_cb_func destroy,
+ void *destroy_arg, grpc_transport *transport) {
+ GPR_ASSERT(g_finalized);
+
+ grpc_channel_stack_builder *builder = grpc_channel_stack_builder_create();
+ grpc_channel_stack_builder_set_name(builder, name_for_type(type));
+ grpc_channel_stack_builder_set_channel_arguments(builder, args);
+ grpc_channel_stack_builder_set_transport(builder, transport);
+
+ for (size_t i = 0; i < g_slots[type].num_slots; i++) {
+ const stage_slot *slot = &g_slots[type].slots[i];
+ if (!slot->fn(builder, slot->arg)) {
+ grpc_channel_stack_builder_destroy(builder);
+ return NULL;
+ }
+ }
+
+ return grpc_channel_stack_builder_finish(exec_ctx, builder, prefix_bytes,
+ initial_refs, destroy, destroy_arg);
+}
diff --git a/src/core/surface/channel_init.h b/src/core/surface/channel_init.h
new file mode 100644
index 0000000000..06faef6ddb
--- /dev/null
+++ b/src/core/surface/channel_init.h
@@ -0,0 +1,86 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_SURFACE_CHANNEL_INIT_H
+#define GRPC_CORE_SURFACE_CHANNEL_INIT_H
+
+#include "src/core/channel/channel_stack_builder.h"
+#include "src/core/surface/channel_stack_type.h"
+#include "src/core/transport/transport.h"
+
+/// This module provides a way for plugins (and the grpc core library itself)
+/// to register mutators for channel stacks.
+/// It also provides a universal entry path to run those mutators to build
+/// a channel stack for various subsystems.
+
+/// One stage of mutation: call functions against \a builder to influence the
+/// finally constructed channel stack
+typedef bool (*grpc_channel_init_stage)(grpc_channel_stack_builder *builder,
+ void *arg);
+
+/// Global initialization of the system
+void grpc_channel_init_init(void);
+
+/// Register one stage of mutators.
+/// Stages are run in priority order (lowest to highest), and then in
+/// registration order (in the case of a tie).
+/// Stages are registered against one of the pre-determined channel stack
+/// types.
+void grpc_channel_init_register_stage(grpc_channel_stack_type type,
+ int priority,
+ grpc_channel_init_stage stage_fn,
+ void *stage_arg);
+
+/// Finalize registration. No more calls to grpc_channel_init_register_stage are
+/// allowed.
+void grpc_channel_init_finalize(void);
+/// Shutdown the channel init system
+void grpc_channel_init_shutdown(void);
+
+/// Construct a channel stack of some sort: see channel_stack.h for details
+/// \a type is the type of channel stack to create
+/// \a prefix_bytes is the number of bytes before the channel stack to allocate
+/// \a args are configuration arguments for the channel stack
+/// \a initial_refs is the initial refcount to give the channel stack
+/// \a destroy and \a destroy_arg specify how to destroy the channel stack
+/// if destroy_arg is NULL, the returned value from this function will be
+/// substituted
+/// \a optional_transport is either NULL or a constructed transport object
+/// Returns a pointer to the base of the memory allocated (the actual channel
+/// stack object will be prefix_bytes past that pointer)
+void *grpc_channel_init_create_stack(
+ grpc_exec_ctx *exec_ctx, grpc_channel_stack_type type, size_t prefix_bytes,
+ const grpc_channel_args *args, int initial_refs, grpc_iomgr_cb_func destroy,
+ void *destroy_arg, grpc_transport *optional_transport);
+
+#endif /* GRPC_CORE_SURFACE_CHANNEL_INIT_H */
diff --git a/src/core/surface/channel_ping.c b/src/core/surface/channel_ping.c
new file mode 100644
index 0000000000..983f1c8a66
--- /dev/null
+++ b/src/core/surface/channel_ping.c
@@ -0,0 +1,79 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/surface/channel.h"
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+
+#include "src/core/surface/api_trace.h"
+#include "src/core/surface/completion_queue.h"
+
+typedef struct {
+ grpc_closure closure;
+ void *tag;
+ grpc_completion_queue *cq;
+ grpc_cq_completion completion_storage;
+} ping_result;
+
+static void ping_destroy(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_cq_completion *storage) {
+ gpr_free(arg);
+}
+
+static void ping_done(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
+ ping_result *pr = arg;
+ grpc_cq_end_op(exec_ctx, pr->cq, pr->tag, success, ping_destroy, pr,
+ &pr->completion_storage);
+}
+
+void grpc_channel_ping(grpc_channel *channel, grpc_completion_queue *cq,
+ void *tag, void *reserved) {
+ grpc_transport_op op;
+ ping_result *pr = gpr_malloc(sizeof(*pr));
+ grpc_channel_element *top_elem =
+ grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ GPR_ASSERT(reserved == NULL);
+ memset(&op, 0, sizeof(op));
+ pr->tag = tag;
+ pr->cq = cq;
+ grpc_closure_init(&pr->closure, ping_done, pr);
+ op.send_ping = &pr->closure;
+ op.bind_pollset = grpc_cq_pollset(cq);
+ grpc_cq_begin_op(cq, tag);
+ top_elem->filter->start_transport_op(&exec_ctx, top_elem, &op);
+ grpc_exec_ctx_finish(&exec_ctx);
+}
diff --git a/src/core/surface/channel_stack_type.c b/src/core/surface/channel_stack_type.c
new file mode 100644
index 0000000000..6fd33d411d
--- /dev/null
+++ b/src/core/surface/channel_stack_type.c
@@ -0,0 +1,56 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+#include "src/core/surface/channel_stack_type.h"
+#include <grpc/support/log.h>
+
+bool grpc_channel_stack_type_is_client(grpc_channel_stack_type type) {
+ switch (type) {
+ case GRPC_CLIENT_CHANNEL:
+ return true;
+ case GRPC_CLIENT_UCHANNEL:
+ return true;
+ case GRPC_CLIENT_SUBCHANNEL:
+ return true;
+ case GRPC_CLIENT_LAME_CHANNEL:
+ return true;
+ case GRPC_CLIENT_DIRECT_CHANNEL:
+ return true;
+ case GRPC_SERVER_CHANNEL:
+ return false;
+ case GRPC_NUM_CHANNEL_STACK_TYPES:
+ break;
+ }
+ GPR_UNREACHABLE_CODE(return true;);
+}
diff --git a/src/core/surface/channel_stack_type.h b/src/core/surface/channel_stack_type.h
new file mode 100644
index 0000000000..846391a68a
--- /dev/null
+++ b/src/core/surface/channel_stack_type.h
@@ -0,0 +1,61 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_SURFACE_CHANNEL_STACK_TYPE_H
+#define GRPC_CORE_SURFACE_CHANNEL_STACK_TYPE_H
+
+#include <stdbool.h>
+
+typedef enum {
+ // normal top-half client channel with load-balancing, connection management
+ GRPC_CLIENT_CHANNEL,
+ // abbreviated top-half client channel bound to one subchannel - for internal
+ // load balancing implementation
+ GRPC_CLIENT_UCHANNEL,
+ // bottom-half of a client channel: everything that happens post-load
+ // balancing (bound to a specific transport)
+ GRPC_CLIENT_SUBCHANNEL,
+ // a permanently broken client channel
+ GRPC_CLIENT_LAME_CHANNEL,
+ // a directly connected client channel (without load-balancing, directly talks
+ // to a transport)
+ GRPC_CLIENT_DIRECT_CHANNEL,
+ // server side channel
+ GRPC_SERVER_CHANNEL,
+ // must be last
+ GRPC_NUM_CHANNEL_STACK_TYPES
+} grpc_channel_stack_type;
+
+bool grpc_channel_stack_type_is_client(grpc_channel_stack_type type);
+
+#endif /* GRPC_CORE_SURFACE_CHANNEL_STACK_TYPE_H */
diff --git a/src/core/surface/client.c b/src/core/surface/client.c
deleted file mode 100644
index 8ac4dd1e0e..0000000000
--- a/src/core/surface/client.c
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include "src/core/surface/client.h"
-
-#include "src/core/surface/call.h"
-#include "src/core/surface/channel.h"
-#include "src/core/support/string.h"
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-
-typedef struct { void *unused; } call_data;
-
-typedef struct { void *unused; } channel_data;
-
-static void client_start_transport_op(grpc_call_element *elem,
- grpc_transport_op *op) {
- GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
- grpc_call_next_op(elem, op);
-}
-
-static void channel_op(grpc_channel_element *elem,
- grpc_channel_element *from_elem, grpc_channel_op *op) {
- switch (op->type) {
- case GRPC_ACCEPT_CALL:
- gpr_log(GPR_ERROR, "Client cannot accept new calls");
- break;
- case GRPC_TRANSPORT_CLOSED:
- grpc_client_channel_closed(elem);
- break;
- case GRPC_TRANSPORT_GOAWAY:
- gpr_slice_unref(op->data.goaway.message);
- break;
- default:
- GPR_ASSERT(op->dir == GRPC_CALL_DOWN);
- grpc_channel_next_op(elem, op);
- }
-}
-
-static void init_call_elem(grpc_call_element *elem,
- const void *transport_server_data,
- grpc_transport_op *initial_op) {}
-
-static void destroy_call_elem(grpc_call_element *elem) {}
-
-static void init_channel_elem(grpc_channel_element *elem,
- const grpc_channel_args *args, grpc_mdctx *mdctx,
- int is_first, int is_last) {
- GPR_ASSERT(is_first);
- GPR_ASSERT(!is_last);
-}
-
-static void destroy_channel_elem(grpc_channel_element *elem) {}
-
-const grpc_channel_filter grpc_client_surface_filter = {
- client_start_transport_op, channel_op, sizeof(call_data), init_call_elem,
- destroy_call_elem, sizeof(channel_data), init_channel_elem,
- destroy_channel_elem, "client",
-};
diff --git a/src/core/surface/completion_queue.c b/src/core/surface/completion_queue.c
index 030a8b4e6f..b22818ea87 100644
--- a/src/core/surface/completion_queue.c
+++ b/src/core/surface/completion_queue.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -36,70 +36,143 @@
#include <stdio.h>
#include <string.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/atm.h>
+#include <grpc/support/log.h>
+#include <grpc/support/time.h>
+
#include "src/core/iomgr/pollset.h"
+#include "src/core/iomgr/timer.h"
+#include "src/core/profiling/timers.h"
#include "src/core/support/string.h"
+#include "src/core/surface/api_trace.h"
#include "src/core/surface/call.h"
#include "src/core/surface/event_string.h"
#include "src/core/surface/surface_trace.h"
-#include <grpc/support/alloc.h>
-#include <grpc/support/atm.h>
-#include <grpc/support/log.h>
-
-#define NUM_TAG_BUCKETS 31
-/* A single event: extends grpc_event to form a linked list with a destruction
- function (on_finish) that is hidden from outside this module */
-typedef struct event {
- grpc_event base;
- struct event *queue_next;
- struct event *queue_prev;
- struct event *bucket_next;
- struct event *bucket_prev;
-} event;
+typedef struct {
+ grpc_pollset_worker **worker;
+ void *tag;
+} plucker;
/* Completion queue structure */
struct grpc_completion_queue {
- /* When refs drops to zero, we are in shutdown mode, and will be destroyable
- once all queued events are drained */
- gpr_refcount refs;
- /* Once owning_refs drops to zero, we will destroy the cq */
+ /** owned by pollset */
+ gpr_mu *mu;
+ /** completed events */
+ grpc_cq_completion completed_head;
+ grpc_cq_completion *completed_tail;
+ /** Number of pending events (+1 if we're not shutdown) */
+ gpr_refcount pending_events;
+ /** Once owning_refs drops to zero, we will destroy the cq */
gpr_refcount owning_refs;
- /* the set of low level i/o things that concern this cq */
- grpc_pollset pollset;
- /* 0 initially, 1 once we've begun shutting down */
+ /** 0 initially, 1 once we've begun shutting down */
int shutdown;
int shutdown_called;
- /* Head of a linked list of queued events (prev points to the last element) */
- event *queue;
- /* Fixed size chained hash table of events for pluck() */
- event *buckets[NUM_TAG_BUCKETS];
int is_server_cq;
+ int num_pluckers;
+ plucker pluckers[GRPC_MAX_COMPLETION_QUEUE_PLUCKERS];
+ grpc_closure pollset_shutdown_done;
+
+#ifndef NDEBUG
+ void **outstanding_tags;
+ size_t outstanding_tag_count;
+ size_t outstanding_tag_capacity;
+#endif
+
+ grpc_completion_queue *next_free;
};
-grpc_completion_queue *grpc_completion_queue_create(void) {
- grpc_completion_queue *cc = gpr_malloc(sizeof(grpc_completion_queue));
- memset(cc, 0, sizeof(*cc));
+#define POLLSET_FROM_CQ(cq) ((grpc_pollset *)(cq + 1))
+
+static gpr_mu g_freelist_mu;
+static grpc_completion_queue *g_freelist;
+
+static void on_pollset_shutdown_done(grpc_exec_ctx *exec_ctx, void *cc,
+ bool success);
+
+void grpc_cq_global_init(void) { gpr_mu_init(&g_freelist_mu); }
+
+void grpc_cq_global_shutdown(void) {
+ gpr_mu_destroy(&g_freelist_mu);
+ while (g_freelist) {
+ grpc_completion_queue *next = g_freelist->next_free;
+ grpc_pollset_destroy(POLLSET_FROM_CQ(g_freelist));
+#ifndef NDEBUG
+ gpr_free(g_freelist->outstanding_tags);
+#endif
+ gpr_free(g_freelist);
+ g_freelist = next;
+ }
+}
+
+struct grpc_cq_alarm {
+ grpc_timer alarm;
+ grpc_cq_completion completion;
+ /** completion queue where events about this alarm will be posted */
+ grpc_completion_queue *cq;
+ /** user supplied tag */
+ void *tag;
+};
+
+grpc_completion_queue *grpc_completion_queue_create(void *reserved) {
+ grpc_completion_queue *cc;
+ GPR_ASSERT(!reserved);
+
+ GPR_TIMER_BEGIN("grpc_completion_queue_create", 0);
+
+ GRPC_API_TRACE("grpc_completion_queue_create(reserved=%p)", 1, (reserved));
+
+ gpr_mu_lock(&g_freelist_mu);
+ if (g_freelist == NULL) {
+ gpr_mu_unlock(&g_freelist_mu);
+
+ cc = gpr_malloc(sizeof(grpc_completion_queue) + grpc_pollset_size());
+ grpc_pollset_init(POLLSET_FROM_CQ(cc), &cc->mu);
+#ifndef NDEBUG
+ cc->outstanding_tags = NULL;
+ cc->outstanding_tag_capacity = 0;
+#endif
+ } else {
+ cc = g_freelist;
+ g_freelist = g_freelist->next_free;
+ gpr_mu_unlock(&g_freelist_mu);
+ /* pollset already initialized */
+ }
+
/* Initial ref is dropped by grpc_completion_queue_shutdown */
- gpr_ref_init(&cc->refs, 1);
+ gpr_ref_init(&cc->pending_events, 1);
/* One for destroy(), one for pollset_shutdown */
gpr_ref_init(&cc->owning_refs, 2);
- grpc_pollset_init(&cc->pollset);
+ cc->completed_tail = &cc->completed_head;
+ cc->completed_head.next = (uintptr_t)cc->completed_tail;
+ cc->shutdown = 0;
+ cc->shutdown_called = 0;
+ cc->is_server_cq = 0;
+ cc->num_pluckers = 0;
+#ifndef NDEBUG
+ cc->outstanding_tag_count = 0;
+#endif
+ grpc_closure_init(&cc->pollset_shutdown_done, on_pollset_shutdown_done, cc);
+
+ GPR_TIMER_END("grpc_completion_queue_create", 0);
+
return cc;
}
#ifdef GRPC_CQ_REF_COUNT_DEBUG
void grpc_cq_internal_ref(grpc_completion_queue *cc, const char *reason,
const char *file, int line) {
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "CQ:%p ref %d -> %d %s",
- cc, (int)cc->owning_refs.count, (int)cc->owning_refs.count + 1,
- reason);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "CQ:%p ref %d -> %d %s", cc,
+ (int)cc->owning_refs.count, (int)cc->owning_refs.count + 1, reason);
#else
void grpc_cq_internal_ref(grpc_completion_queue *cc) {
#endif
gpr_ref(&cc->owning_refs);
}
-static void on_pollset_destroy_done(void *arg) {
+static void on_pollset_shutdown_done(grpc_exec_ctx *exec_ctx, void *arg,
+ bool success) {
grpc_completion_queue *cc = arg;
GRPC_CQ_INTERNAL_UNREF(cc, "pollset_destroy");
}
@@ -107,226 +180,331 @@ static void on_pollset_destroy_done(void *arg) {
#ifdef GRPC_CQ_REF_COUNT_DEBUG
void grpc_cq_internal_unref(grpc_completion_queue *cc, const char *reason,
const char *file, int line) {
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "CQ:%p unref %d -> %d %s",
- cc, (int)cc->owning_refs.count, (int)cc->owning_refs.count - 1,
- reason);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "CQ:%p unref %d -> %d %s", cc,
+ (int)cc->owning_refs.count, (int)cc->owning_refs.count - 1, reason);
#else
void grpc_cq_internal_unref(grpc_completion_queue *cc) {
#endif
if (gpr_unref(&cc->owning_refs)) {
- GPR_ASSERT(cc->queue == NULL);
- grpc_pollset_destroy(&cc->pollset);
- gpr_free(cc);
+ GPR_ASSERT(cc->completed_head.next == (uintptr_t)&cc->completed_head);
+ grpc_pollset_reset(POLLSET_FROM_CQ(cc));
+ gpr_mu_lock(&g_freelist_mu);
+ cc->next_free = g_freelist;
+ g_freelist = cc;
+ gpr_mu_unlock(&g_freelist_mu);
}
}
-/* Create and append an event to the queue. Returns the event so that its data
- members can be filled in.
- Requires GRPC_POLLSET_MU(&cc->pollset) locked. */
-static event *add_locked(grpc_completion_queue *cc, grpc_completion_type type,
- void *tag, grpc_call *call) {
- event *ev = gpr_malloc(sizeof(event));
- gpr_uintptr bucket = ((gpr_uintptr)tag) % NUM_TAG_BUCKETS;
- ev->base.type = type;
- ev->base.tag = tag;
- if (cc->queue == NULL) {
- cc->queue = ev->queue_next = ev->queue_prev = ev;
- } else {
- ev->queue_next = cc->queue;
- ev->queue_prev = cc->queue->queue_prev;
- ev->queue_next->queue_prev = ev->queue_prev->queue_next = ev;
+void grpc_cq_begin_op(grpc_completion_queue *cc, void *tag) {
+#ifndef NDEBUG
+ gpr_mu_lock(cc->mu);
+ GPR_ASSERT(!cc->shutdown_called);
+ if (cc->outstanding_tag_count == cc->outstanding_tag_capacity) {
+ cc->outstanding_tag_capacity = GPR_MAX(4, 2 * cc->outstanding_tag_capacity);
+ cc->outstanding_tags =
+ gpr_realloc(cc->outstanding_tags, sizeof(*cc->outstanding_tags) *
+ cc->outstanding_tag_capacity);
}
- if (cc->buckets[bucket] == NULL) {
- cc->buckets[bucket] = ev->bucket_next = ev->bucket_prev = ev;
- } else {
- ev->bucket_next = cc->buckets[bucket];
- ev->bucket_prev = cc->buckets[bucket]->bucket_prev;
- ev->bucket_next->bucket_prev = ev->bucket_prev->bucket_next = ev;
- }
- grpc_pollset_kick(&cc->pollset);
- return ev;
-}
-
-void grpc_cq_begin_op(grpc_completion_queue *cc, grpc_call *call) {
- gpr_ref(&cc->refs);
- if (call) GRPC_CALL_INTERNAL_REF(call, "cq");
+ cc->outstanding_tags[cc->outstanding_tag_count++] = tag;
+ gpr_mu_unlock(cc->mu);
+#endif
+ gpr_ref(&cc->pending_events);
}
/* Signal the end of an operation - if this is the last waiting-to-be-queued
event, then enter shutdown mode */
-void grpc_cq_end_op(grpc_completion_queue *cc, void *tag, grpc_call *call,
- int success) {
- event *ev;
- int shutdown = 0;
- gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
- ev = add_locked(cc, GRPC_OP_COMPLETE, tag, call);
- ev->base.success = success;
- if (gpr_unref(&cc->refs)) {
+/* Queue a GRPC_OP_COMPLETED operation */
+void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
+ void *tag, int success,
+ void (*done)(grpc_exec_ctx *exec_ctx, void *done_arg,
+ grpc_cq_completion *storage),
+ void *done_arg, grpc_cq_completion *storage) {
+ int shutdown;
+ int i;
+ grpc_pollset_worker *pluck_worker;
+#ifndef NDEBUG
+ int found = 0;
+#endif
+
+ GPR_TIMER_BEGIN("grpc_cq_end_op", 0);
+
+ storage->tag = tag;
+ storage->done = done;
+ storage->done_arg = done_arg;
+ storage->next =
+ ((uintptr_t)&cc->completed_head) | ((uintptr_t)(success != 0));
+
+ gpr_mu_lock(cc->mu);
+#ifndef NDEBUG
+ for (i = 0; i < (int)cc->outstanding_tag_count; i++) {
+ if (cc->outstanding_tags[i] == tag) {
+ cc->outstanding_tag_count--;
+ GPR_SWAP(void *, cc->outstanding_tags[i],
+ cc->outstanding_tags[cc->outstanding_tag_count]);
+ found = 1;
+ break;
+ }
+ }
+ GPR_ASSERT(found);
+#endif
+ shutdown = gpr_unref(&cc->pending_events);
+ if (!shutdown) {
+ cc->completed_tail->next =
+ ((uintptr_t)storage) | (1u & (uintptr_t)cc->completed_tail->next);
+ cc->completed_tail = storage;
+ pluck_worker = NULL;
+ for (i = 0; i < cc->num_pluckers; i++) {
+ if (cc->pluckers[i].tag == tag) {
+ pluck_worker = *cc->pluckers[i].worker;
+ break;
+ }
+ }
+ grpc_pollset_kick(POLLSET_FROM_CQ(cc), pluck_worker);
+ gpr_mu_unlock(cc->mu);
+ } else {
+ cc->completed_tail->next =
+ ((uintptr_t)storage) | (1u & (uintptr_t)cc->completed_tail->next);
+ cc->completed_tail = storage;
GPR_ASSERT(!cc->shutdown);
GPR_ASSERT(cc->shutdown_called);
cc->shutdown = 1;
- shutdown = 1;
+ grpc_pollset_shutdown(exec_ctx, POLLSET_FROM_CQ(cc),
+ &cc->pollset_shutdown_done);
+ gpr_mu_unlock(cc->mu);
}
- gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
- if (call) GRPC_CALL_INTERNAL_UNREF(call, "cq", 0);
- if (shutdown) {
- grpc_pollset_shutdown(&cc->pollset, on_pollset_destroy_done, cc);
- }
-}
-/* Create a GRPC_QUEUE_SHUTDOWN event without queuing it anywhere */
-static event *create_shutdown_event(void) {
- event *ev = gpr_malloc(sizeof(event));
- ev->base.type = GRPC_QUEUE_SHUTDOWN;
- ev->base.tag = NULL;
- return ev;
+ GPR_TIMER_END("grpc_cq_end_op", 0);
}
grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
- gpr_timespec deadline) {
- event *ev = NULL;
+ gpr_timespec deadline, void *reserved) {
grpc_event ret;
+ grpc_pollset_worker *worker = NULL;
+ int first_loop = 1;
+ gpr_timespec now;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
+ GPR_TIMER_BEGIN("grpc_completion_queue_next", 0);
+
+ GRPC_API_TRACE(
+ "grpc_completion_queue_next("
+ "cc=%p, "
+ "deadline=gpr_timespec { tv_sec: %lld, tv_nsec: %d, clock_type: %d }, "
+ "reserved=%p)",
+ 5, (cc, (long long)deadline.tv_sec, (int)deadline.tv_nsec,
+ (int)deadline.clock_type, reserved));
+ GPR_ASSERT(!reserved);
+
+ deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
GRPC_CQ_INTERNAL_REF(cc, "next");
- gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
+ gpr_mu_lock(cc->mu);
for (;;) {
- if (cc->queue != NULL) {
- gpr_uintptr bucket;
- ev = cc->queue;
- bucket = ((gpr_uintptr)ev->base.tag) % NUM_TAG_BUCKETS;
- cc->queue = ev->queue_next;
- ev->queue_next->queue_prev = ev->queue_prev;
- ev->queue_prev->queue_next = ev->queue_next;
- ev->bucket_next->bucket_prev = ev->bucket_prev;
- ev->bucket_prev->bucket_next = ev->bucket_next;
- if (ev == cc->buckets[bucket]) {
- cc->buckets[bucket] = ev->bucket_next;
- if (ev == cc->buckets[bucket]) {
- cc->buckets[bucket] = NULL;
- }
- }
- if (cc->queue == ev) {
- cc->queue = NULL;
+ if (cc->completed_tail != &cc->completed_head) {
+ grpc_cq_completion *c = (grpc_cq_completion *)cc->completed_head.next;
+ cc->completed_head.next = c->next & ~(uintptr_t)1;
+ if (c == cc->completed_tail) {
+ cc->completed_tail = &cc->completed_head;
}
+ gpr_mu_unlock(cc->mu);
+ ret.type = GRPC_OP_COMPLETE;
+ ret.success = c->next & 1u;
+ ret.tag = c->tag;
+ c->done(&exec_ctx, c->done_arg, c);
break;
}
if (cc->shutdown) {
- ev = create_shutdown_event();
+ gpr_mu_unlock(cc->mu);
+ memset(&ret, 0, sizeof(ret));
+ ret.type = GRPC_QUEUE_SHUTDOWN;
break;
}
- if (!grpc_pollset_work(&cc->pollset, deadline)) {
- gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
+ now = gpr_now(GPR_CLOCK_MONOTONIC);
+ if (!first_loop && gpr_time_cmp(now, deadline) >= 0) {
+ gpr_mu_unlock(cc->mu);
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
- GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
- GRPC_CQ_INTERNAL_UNREF(cc, "next");
- return ret;
+ break;
+ }
+ first_loop = 0;
+ /* Check alarms - these are a global resource so we just ping
+ each time through on every pollset.
+ May update deadline to ensure timely wakeups.
+ TODO(ctiller): can this work be localized? */
+ gpr_timespec iteration_deadline = deadline;
+ if (grpc_timer_check(&exec_ctx, now, &iteration_deadline)) {
+ GPR_TIMER_MARK("alarm_triggered", 0);
+ gpr_mu_unlock(cc->mu);
+ grpc_exec_ctx_flush(&exec_ctx);
+ gpr_mu_lock(cc->mu);
+ continue;
+ } else {
+ grpc_pollset_work(&exec_ctx, POLLSET_FROM_CQ(cc), &worker, now,
+ iteration_deadline);
}
}
- gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
- ret = ev->base;
- gpr_free(ev);
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
GRPC_CQ_INTERNAL_UNREF(cc, "next");
+ grpc_exec_ctx_finish(&exec_ctx);
+
+ GPR_TIMER_END("grpc_completion_queue_next", 0);
+
return ret;
}
-static event *pluck_event(grpc_completion_queue *cc, void *tag) {
- gpr_uintptr bucket = ((gpr_uintptr)tag) % NUM_TAG_BUCKETS;
- event *ev = cc->buckets[bucket];
- if (ev == NULL) return NULL;
- do {
- if (ev->base.tag == tag) {
- ev->queue_next->queue_prev = ev->queue_prev;
- ev->queue_prev->queue_next = ev->queue_next;
- ev->bucket_next->bucket_prev = ev->bucket_prev;
- ev->bucket_prev->bucket_next = ev->bucket_next;
- if (ev == cc->buckets[bucket]) {
- cc->buckets[bucket] = ev->bucket_next;
- if (ev == cc->buckets[bucket]) {
- cc->buckets[bucket] = NULL;
- }
- }
- if (cc->queue == ev) {
- cc->queue = ev->queue_next;
- if (cc->queue == ev) {
- cc->queue = NULL;
- }
- }
- return ev;
+static int add_plucker(grpc_completion_queue *cc, void *tag,
+ grpc_pollset_worker **worker) {
+ if (cc->num_pluckers == GRPC_MAX_COMPLETION_QUEUE_PLUCKERS) {
+ return 0;
+ }
+ cc->pluckers[cc->num_pluckers].tag = tag;
+ cc->pluckers[cc->num_pluckers].worker = worker;
+ cc->num_pluckers++;
+ return 1;
+}
+
+static void del_plucker(grpc_completion_queue *cc, void *tag,
+ grpc_pollset_worker **worker) {
+ int i;
+ for (i = 0; i < cc->num_pluckers; i++) {
+ if (cc->pluckers[i].tag == tag && cc->pluckers[i].worker == worker) {
+ cc->num_pluckers--;
+ GPR_SWAP(plucker, cc->pluckers[i], cc->pluckers[cc->num_pluckers]);
+ return;
}
- ev = ev->bucket_next;
- } while (ev != cc->buckets[bucket]);
- return NULL;
+ }
+ GPR_UNREACHABLE_CODE(return );
}
grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
- gpr_timespec deadline) {
- event *ev = NULL;
+ gpr_timespec deadline, void *reserved) {
grpc_event ret;
+ grpc_cq_completion *c;
+ grpc_cq_completion *prev;
+ grpc_pollset_worker *worker = NULL;
+ gpr_timespec now;
+ int first_loop = 1;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
+ GPR_TIMER_BEGIN("grpc_completion_queue_pluck", 0);
+
+ GRPC_API_TRACE(
+ "grpc_completion_queue_pluck("
+ "cc=%p, tag=%p, "
+ "deadline=gpr_timespec { tv_sec: %lld, tv_nsec: %d, clock_type: %d }, "
+ "reserved=%p)",
+ 6, (cc, tag, (long long)deadline.tv_sec, (int)deadline.tv_nsec,
+ (int)deadline.clock_type, reserved));
+ GPR_ASSERT(!reserved);
+
+ deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
GRPC_CQ_INTERNAL_REF(cc, "pluck");
- gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
+ gpr_mu_lock(cc->mu);
for (;;) {
- if ((ev = pluck_event(cc, tag))) {
- break;
+ prev = &cc->completed_head;
+ while ((c = (grpc_cq_completion *)(prev->next & ~(uintptr_t)1)) !=
+ &cc->completed_head) {
+ if (c->tag == tag) {
+ prev->next = (prev->next & (uintptr_t)1) | (c->next & ~(uintptr_t)1);
+ if (c == cc->completed_tail) {
+ cc->completed_tail = prev;
+ }
+ gpr_mu_unlock(cc->mu);
+ ret.type = GRPC_OP_COMPLETE;
+ ret.success = c->next & 1u;
+ ret.tag = c->tag;
+ c->done(&exec_ctx, c->done_arg, c);
+ goto done;
+ }
+ prev = c;
}
if (cc->shutdown) {
- ev = create_shutdown_event();
+ gpr_mu_unlock(cc->mu);
+ memset(&ret, 0, sizeof(ret));
+ ret.type = GRPC_QUEUE_SHUTDOWN;
break;
}
- if (!grpc_pollset_work(&cc->pollset, deadline)) {
- gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
+ if (!add_plucker(cc, tag, &worker)) {
+ gpr_log(GPR_DEBUG,
+ "Too many outstanding grpc_completion_queue_pluck calls: maximum "
+ "is %d",
+ GRPC_MAX_COMPLETION_QUEUE_PLUCKERS);
+ gpr_mu_unlock(cc->mu);
memset(&ret, 0, sizeof(ret));
+ /* TODO(ctiller): should we use a different result here */
ret.type = GRPC_QUEUE_TIMEOUT;
- GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
- GRPC_CQ_INTERNAL_UNREF(cc, "pluck");
- return ret;
+ break;
+ }
+ now = gpr_now(GPR_CLOCK_MONOTONIC);
+ if (!first_loop && gpr_time_cmp(now, deadline) >= 0) {
+ del_plucker(cc, tag, &worker);
+ gpr_mu_unlock(cc->mu);
+ memset(&ret, 0, sizeof(ret));
+ ret.type = GRPC_QUEUE_TIMEOUT;
+ break;
+ }
+ first_loop = 0;
+ /* Check alarms - these are a global resource so we just ping
+ each time through on every pollset.
+ May update deadline to ensure timely wakeups.
+ TODO(ctiller): can this work be localized? */
+ gpr_timespec iteration_deadline = deadline;
+ if (grpc_timer_check(&exec_ctx, now, &iteration_deadline)) {
+ GPR_TIMER_MARK("alarm_triggered", 0);
+ gpr_mu_unlock(cc->mu);
+ grpc_exec_ctx_flush(&exec_ctx);
+ gpr_mu_lock(cc->mu);
+ } else {
+ grpc_pollset_work(&exec_ctx, POLLSET_FROM_CQ(cc), &worker, now,
+ iteration_deadline);
}
+ del_plucker(cc, tag, &worker);
}
- gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
- ret = ev->base;
- gpr_free(ev);
+done:
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
GRPC_CQ_INTERNAL_UNREF(cc, "pluck");
+ grpc_exec_ctx_finish(&exec_ctx);
+
+ GPR_TIMER_END("grpc_completion_queue_pluck", 0);
+
return ret;
}
/* Shutdown simply drops a ref that we reserved at creation time; if we drop
to zero here, then enter shutdown mode and wake up any waiters */
void grpc_completion_queue_shutdown(grpc_completion_queue *cc) {
- gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ GPR_TIMER_BEGIN("grpc_completion_queue_shutdown", 0);
+ GRPC_API_TRACE("grpc_completion_queue_shutdown(cc=%p)", 1, (cc));
+ gpr_mu_lock(cc->mu);
if (cc->shutdown_called) {
- gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
+ gpr_mu_unlock(cc->mu);
+ GPR_TIMER_END("grpc_completion_queue_shutdown", 0);
return;
}
cc->shutdown_called = 1;
- gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
-
- if (gpr_unref(&cc->refs)) {
- gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
+ if (gpr_unref(&cc->pending_events)) {
GPR_ASSERT(!cc->shutdown);
cc->shutdown = 1;
- gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
- grpc_pollset_shutdown(&cc->pollset, on_pollset_destroy_done, cc);
+ grpc_pollset_shutdown(&exec_ctx, POLLSET_FROM_CQ(cc),
+ &cc->pollset_shutdown_done);
}
+ gpr_mu_unlock(cc->mu);
+ grpc_exec_ctx_finish(&exec_ctx);
+ GPR_TIMER_END("grpc_completion_queue_shutdown", 0);
}
void grpc_completion_queue_destroy(grpc_completion_queue *cc) {
+ GRPC_API_TRACE("grpc_completion_queue_destroy(cc=%p)", 1, (cc));
+ GPR_TIMER_BEGIN("grpc_completion_queue_destroy", 0);
grpc_completion_queue_shutdown(cc);
GRPC_CQ_INTERNAL_UNREF(cc, "destroy");
+ GPR_TIMER_END("grpc_completion_queue_destroy", 0);
}
grpc_pollset *grpc_cq_pollset(grpc_completion_queue *cc) {
- return &cc->pollset;
-}
-
-void grpc_cq_hack_spin_pollset(grpc_completion_queue *cc) {
- gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
- grpc_pollset_kick(&cc->pollset);
- grpc_pollset_work(&cc->pollset,
- gpr_time_add(gpr_now(), gpr_time_from_millis(100)));
- gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
+ return POLLSET_FROM_CQ(cc);
}
void grpc_cq_mark_server_cq(grpc_completion_queue *cc) { cc->is_server_cq = 1; }
diff --git a/src/core/surface/completion_queue.h b/src/core/surface/completion_queue.h
index 1b9010f462..07f6d0c8f6 100644
--- a/src/core/surface/completion_queue.h
+++ b/src/core/surface/completion_queue.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,14 +31,26 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_SURFACE_COMPLETION_QUEUE_H
-#define GRPC_INTERNAL_CORE_SURFACE_COMPLETION_QUEUE_H
+#ifndef GRPC_CORE_SURFACE_COMPLETION_QUEUE_H
+#define GRPC_CORE_SURFACE_COMPLETION_QUEUE_H
-/* Internal API for completion channels */
+/* Internal API for completion queues */
#include "src/core/iomgr/pollset.h"
#include <grpc/grpc.h>
+typedef struct grpc_cq_completion {
+ /** user supplied tag */
+ void *tag;
+ /** done callback - called when this queue element is no longer
+ needed by the completion queue */
+ void (*done)(grpc_exec_ctx *exec_ctx, void *done_arg,
+ struct grpc_cq_completion *c);
+ void *done_arg;
+ /** next pointer; low bit is used to indicate success or not */
+ uintptr_t next;
+} grpc_cq_completion;
+
#ifdef GRPC_CQ_REF_COUNT_DEBUG
void grpc_cq_internal_ref(grpc_completion_queue *cc, const char *reason,
const char *file, int line);
@@ -56,18 +68,24 @@ void grpc_cq_internal_unref(grpc_completion_queue *cc);
#endif
/* Flag that an operation is beginning: the completion channel will not finish
- shutdown until a corrensponding grpc_cq_end_* call is made */
-void grpc_cq_begin_op(grpc_completion_queue *cc, grpc_call *call);
+ shutdown until a corrensponding grpc_cq_end_* call is made.
+ \a tag is currently used only in debug builds. */
+void grpc_cq_begin_op(grpc_completion_queue *cc, void *tag);
-/* Queue a GRPC_OP_COMPLETED operation */
-void grpc_cq_end_op(grpc_completion_queue *cc, void *tag, grpc_call *call,
- int success);
+/* Queue a GRPC_OP_COMPLETED operation; tag must correspond to the tag passed to
+ grpc_cq_begin_op */
+void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
+ void *tag, int success,
+ void (*done)(grpc_exec_ctx *exec_ctx, void *done_arg,
+ grpc_cq_completion *storage),
+ void *done_arg, grpc_cq_completion *storage);
grpc_pollset *grpc_cq_pollset(grpc_completion_queue *cc);
-void grpc_cq_hack_spin_pollset(grpc_completion_queue *cc);
-
void grpc_cq_mark_server_cq(grpc_completion_queue *cc);
int grpc_cq_is_server_cq(grpc_completion_queue *cc);
-#endif /* GRPC_INTERNAL_CORE_SURFACE_COMPLETION_QUEUE_H */
+void grpc_cq_global_init(void);
+void grpc_cq_global_shutdown(void);
+
+#endif /* GRPC_CORE_SURFACE_COMPLETION_QUEUE_H */
diff --git a/src/core/surface/event_string.h b/src/core/surface/event_string.h
index e8a8f93518..d0598cecca 100644
--- a/src/core/surface/event_string.h
+++ b/src/core/surface/event_string.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,12 +31,12 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_SURFACE_EVENT_STRING_H
-#define GRPC_INTERNAL_CORE_SURFACE_EVENT_STRING_H
+#ifndef GRPC_CORE_SURFACE_EVENT_STRING_H
+#define GRPC_CORE_SURFACE_EVENT_STRING_H
#include <grpc/grpc.h>
/* Returns a string describing an event. Must be later freed with gpr_free() */
char *grpc_event_string(grpc_event *ev);
-#endif /* GRPC_INTERNAL_CORE_SURFACE_EVENT_STRING_H */
+#endif /* GRPC_CORE_SURFACE_EVENT_STRING_H */
diff --git a/src/core/surface/init.c b/src/core/surface/init.c
index ca61a38a35..b50770959f 100644
--- a/src/core/surface/init.c
+++ b/src/core/surface/init.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,16 +31,52 @@
*
*/
-#include <grpc/census.h>
+#include <grpc/support/port_platform.h>
+
+#include <limits.h>
+#include <memory.h>
+
#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/time.h>
+/* TODO(ctiller): find another way? - better not to include census here */
+#include "src/core/census/grpc_plugin.h"
#include "src/core/channel/channel_stack.h"
+#include "src/core/channel/compress_filter.h"
+#include "src/core/channel/connected_channel.h"
+#include "src/core/channel/client_channel.h"
+#include "src/core/channel/client_uchannel.h"
+#include "src/core/channel/http_client_filter.h"
+#include "src/core/channel/http_server_filter.h"
+#include "src/core/client_config/lb_policy_registry.h"
+#include "src/core/client_config/lb_policies/pick_first.h"
+#include "src/core/client_config/lb_policies/round_robin.h"
+#include "src/core/client_config/resolver_registry.h"
+#include "src/core/client_config/resolvers/dns_resolver.h"
+#include "src/core/client_config/resolvers/sockaddr_resolver.h"
+#include "src/core/client_config/subchannel.h"
+#include "src/core/client_config/subchannel_index.h"
#include "src/core/debug/trace.h"
+#include "src/core/iomgr/executor.h"
#include "src/core/iomgr/iomgr.h"
#include "src/core/profiling/timers.h"
+#include "src/core/surface/api_trace.h"
#include "src/core/surface/call.h"
+#include "src/core/surface/channel_init.h"
+#include "src/core/surface/completion_queue.h"
#include "src/core/surface/init.h"
+#include "src/core/surface/lame_client.h"
+#include "src/core/surface/server.h"
#include "src/core/surface/surface_trace.h"
#include "src/core/transport/chttp2_transport.h"
+#include "src/core/transport/connectivity_state.h"
+#include "src/core/transport/transport_impl.h"
+
+#ifndef GRPC_DEFAULT_NAME_PREFIX
+#define GRPC_DEFAULT_NAME_PREFIX "dns:///"
+#endif
+
+#define MAX_PLUGINS 128
static gpr_once g_basic_init = GPR_ONCE_INIT;
static gpr_mu g_init_mu;
@@ -48,37 +84,150 @@ static int g_initializations;
static void do_basic_init(void) {
gpr_mu_init(&g_init_mu);
+ /* TODO(ctiller): ideally remove this strict linkage */
+ grpc_register_plugin(census_grpc_plugin_init, census_grpc_plugin_destroy);
g_initializations = 0;
}
+static bool append_filter(grpc_channel_stack_builder *builder, void *arg) {
+ return grpc_channel_stack_builder_append_filter(builder, arg, NULL, NULL);
+}
+
+static bool prepend_filter(grpc_channel_stack_builder *builder, void *arg) {
+ return grpc_channel_stack_builder_prepend_filter(builder, arg, NULL, NULL);
+}
+
+static bool maybe_add_http_filter(grpc_channel_stack_builder *builder,
+ void *arg) {
+ grpc_transport *t = grpc_channel_stack_builder_get_transport(builder);
+ if (t && strstr(t->vtable->name, "http")) {
+ return grpc_channel_stack_builder_prepend_filter(builder, arg, NULL, NULL);
+ }
+ return true;
+}
+
+static void register_builtin_channel_init() {
+ grpc_channel_init_register_stage(GRPC_CLIENT_CHANNEL, INT_MAX, prepend_filter,
+ (void *)&grpc_compress_filter);
+ grpc_channel_init_register_stage(GRPC_CLIENT_DIRECT_CHANNEL, INT_MAX,
+ prepend_filter,
+ (void *)&grpc_compress_filter);
+ grpc_channel_init_register_stage(GRPC_CLIENT_UCHANNEL, INT_MAX,
+ prepend_filter,
+ (void *)&grpc_compress_filter);
+ grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX, prepend_filter,
+ (void *)&grpc_compress_filter);
+ grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL, INT_MAX,
+ maybe_add_http_filter,
+ (void *)&grpc_http_client_filter);
+ grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL, INT_MAX,
+ grpc_add_connected_filter, NULL);
+ grpc_channel_init_register_stage(GRPC_CLIENT_DIRECT_CHANNEL, INT_MAX,
+ maybe_add_http_filter,
+ (void *)&grpc_http_client_filter);
+ grpc_channel_init_register_stage(GRPC_CLIENT_DIRECT_CHANNEL, INT_MAX,
+ grpc_add_connected_filter, NULL);
+ grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX,
+ maybe_add_http_filter,
+ (void *)&grpc_http_server_filter);
+ grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX,
+ grpc_add_connected_filter, NULL);
+ grpc_channel_init_register_stage(GRPC_CLIENT_CHANNEL, INT_MAX, append_filter,
+ (void *)&grpc_client_channel_filter);
+ grpc_channel_init_register_stage(GRPC_CLIENT_UCHANNEL, INT_MAX, append_filter,
+ (void *)&grpc_client_uchannel_filter);
+ grpc_channel_init_register_stage(GRPC_CLIENT_LAME_CHANNEL, INT_MAX,
+ append_filter, (void *)&grpc_lame_filter);
+ grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX, prepend_filter,
+ (void *)&grpc_server_top_filter);
+}
+
+typedef struct grpc_plugin {
+ void (*init)();
+ void (*destroy)();
+} grpc_plugin;
+
+static grpc_plugin g_all_of_the_plugins[MAX_PLUGINS];
+static int g_number_of_plugins = 0;
+
+void grpc_register_plugin(void (*init)(void), void (*destroy)(void)) {
+ GRPC_API_TRACE("grpc_register_plugin(init=%p, destroy=%p)", 2,
+ ((void *)(intptr_t)init, (void *)(intptr_t)destroy));
+ GPR_ASSERT(g_number_of_plugins != MAX_PLUGINS);
+ g_all_of_the_plugins[g_number_of_plugins].init = init;
+ g_all_of_the_plugins[g_number_of_plugins].destroy = destroy;
+ g_number_of_plugins++;
+}
+
void grpc_init(void) {
+ int i;
gpr_once_init(&g_basic_init, do_basic_init);
gpr_mu_lock(&g_init_mu);
if (++g_initializations == 1) {
+ gpr_time_init();
+ grpc_mdctx_global_init();
+ grpc_channel_init_init();
+ grpc_lb_policy_registry_init(grpc_pick_first_lb_factory_create());
+ grpc_register_lb_policy(grpc_pick_first_lb_factory_create());
+ grpc_register_lb_policy(grpc_round_robin_lb_factory_create());
+ grpc_resolver_registry_init(GRPC_DEFAULT_NAME_PREFIX);
+ grpc_register_resolver_type(grpc_dns_resolver_factory_create());
+ grpc_register_resolver_type(grpc_ipv4_resolver_factory_create());
+ grpc_register_resolver_type(grpc_ipv6_resolver_factory_create());
+#ifdef GPR_POSIX_SOCKET
+ grpc_register_resolver_type(grpc_unix_resolver_factory_create());
+#endif
+ grpc_register_tracer("api", &grpc_api_trace);
grpc_register_tracer("channel", &grpc_trace_channel);
- grpc_register_tracer("surface", &grpc_surface_trace);
grpc_register_tracer("http", &grpc_http_trace);
grpc_register_tracer("flowctl", &grpc_flowctl_trace);
- grpc_register_tracer("batch", &grpc_trace_batch);
+ grpc_register_tracer("connectivity_state", &grpc_connectivity_state_trace);
+ grpc_register_tracer("channel_stack_builder",
+ &grpc_trace_channel_stack_builder);
grpc_security_pre_init();
grpc_iomgr_init();
+ grpc_executor_init();
grpc_tracer_init("GRPC_TRACE");
- if (census_initialize(CENSUS_NONE)) {
- gpr_log(GPR_ERROR, "Could not initialize census.");
+ gpr_timers_global_init();
+ grpc_cq_global_init();
+ grpc_subchannel_index_init();
+ for (i = 0; i < g_number_of_plugins; i++) {
+ if (g_all_of_the_plugins[i].init != NULL) {
+ g_all_of_the_plugins[i].init();
+ }
}
- grpc_timers_global_init();
+ /* register channel finalization AFTER all plugins, to ensure that it's run
+ * at the appropriate time */
+ grpc_register_security_filters();
+ register_builtin_channel_init();
+ /* no more changes to channel init pipelines */
+ grpc_channel_init_finalize();
}
gpr_mu_unlock(&g_init_mu);
+ GRPC_API_TRACE("grpc_init(void)", 0, ());
}
void grpc_shutdown(void) {
+ int i;
+ GRPC_API_TRACE("grpc_shutdown(void)", 0, ());
gpr_mu_lock(&g_init_mu);
if (--g_initializations == 0) {
+ grpc_executor_shutdown();
+ grpc_cq_global_shutdown();
grpc_iomgr_shutdown();
- census_shutdown();
- grpc_timers_global_destroy();
+ grpc_subchannel_index_shutdown();
+ gpr_timers_global_destroy();
grpc_tracer_shutdown();
+ grpc_resolver_registry_shutdown();
+ grpc_lb_policy_registry_shutdown();
+ for (i = 0; i < g_number_of_plugins; i++) {
+ if (g_all_of_the_plugins[i].destroy != NULL) {
+ g_all_of_the_plugins[i].destroy();
+ }
+ }
+ grpc_channel_init_shutdown();
+ grpc_mdctx_global_shutdown();
}
gpr_mu_unlock(&g_init_mu);
}
diff --git a/src/core/surface/init.h b/src/core/surface/init.h
index 416874020d..5e358c7022 100644
--- a/src/core/surface/init.h
+++ b/src/core/surface/init.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,10 +31,11 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_SURFACE_INIT_H
-#define GRPC_INTERNAL_CORE_SURFACE_INIT_H
+#ifndef GRPC_CORE_SURFACE_INIT_H
+#define GRPC_CORE_SURFACE_INIT_H
+void grpc_register_security_filters(void);
void grpc_security_pre_init(void);
int grpc_is_initialized(void);
-#endif /* GRPC_INTERNAL_CORE_SURFACE_INIT_H */
+#endif /* GRPC_CORE_SURFACE_INIT_H */
diff --git a/src/core/surface/init_secure.c b/src/core/surface/init_secure.c
index fa20e91583..311dda9864 100644
--- a/src/core/surface/init_secure.c
+++ b/src/core/surface/init_secure.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -32,11 +32,58 @@
*/
#include "src/core/surface/init.h"
+
+#include <limits.h>
+#include <string.h>
+
+#include "src/core/surface/channel_init.h"
#include "src/core/debug/trace.h"
+#include "src/core/security/auth_filters.h"
+#include "src/core/security/credentials.h"
#include "src/core/security/secure_endpoint.h"
+#include "src/core/security/security_connector.h"
#include "src/core/tsi/transport_security_interface.h"
void grpc_security_pre_init(void) {
grpc_register_tracer("secure_endpoint", &grpc_trace_secure_endpoint);
grpc_register_tracer("transport_security", &tsi_tracing_enabled);
}
+
+static bool maybe_prepend_client_auth_filter(
+ grpc_channel_stack_builder *builder, void *arg) {
+ const grpc_channel_args *args =
+ grpc_channel_stack_builder_get_channel_arguments(builder);
+ if (args) {
+ for (size_t i = 0; i < args->num_args; i++) {
+ if (0 == strcmp(GRPC_SECURITY_CONNECTOR_ARG, args->args[i].key)) {
+ return grpc_channel_stack_builder_prepend_filter(
+ builder, &grpc_client_auth_filter, NULL, NULL);
+ }
+ }
+ }
+ return true;
+}
+
+static bool maybe_prepend_server_auth_filter(
+ grpc_channel_stack_builder *builder, void *arg) {
+ const grpc_channel_args *args =
+ grpc_channel_stack_builder_get_channel_arguments(builder);
+ if (args) {
+ for (size_t i = 0; i < args->num_args; i++) {
+ if (0 == strcmp(GRPC_SERVER_CREDENTIALS_ARG, args->args[i].key)) {
+ return grpc_channel_stack_builder_prepend_filter(
+ builder, &grpc_server_auth_filter, NULL, NULL);
+ }
+ }
+ }
+ return true;
+}
+
+void grpc_register_security_filters(void) {
+ grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL, INT_MAX,
+ maybe_prepend_client_auth_filter, NULL);
+ grpc_channel_init_register_stage(GRPC_CLIENT_DIRECT_CHANNEL, INT_MAX,
+ maybe_prepend_client_auth_filter, NULL);
+ grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX,
+ maybe_prepend_server_auth_filter, NULL);
+}
diff --git a/src/core/surface/init_unsecure.c b/src/core/surface/init_unsecure.c
index ddb70cef8e..278fcc83ac 100644
--- a/src/core/surface/init_unsecure.c
+++ b/src/core/surface/init_unsecure.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -33,5 +33,6 @@
#include "src/core/surface/init.h"
-void grpc_security_pre_init(void) {
-}
+void grpc_security_pre_init(void) {}
+
+void grpc_register_security_filters(void) {}
diff --git a/src/core/surface/lame_client.c b/src/core/surface/lame_client.c
index 85e1ab5554..58f89946d2 100644
--- a/src/core/surface/lame_client.c
+++ b/src/core/surface/lame_client.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,12 +31,15 @@
*
*/
+#include "src/core/surface/lame_client.h"
+
#include <grpc/grpc.h>
#include <string.h>
#include "src/core/channel/channel_stack.h"
#include "src/core/support/string.h"
+#include "src/core/surface/api_trace.h"
#include "src/core/surface/channel.h"
#include "src/core/surface/call.h"
#include <grpc/support/alloc.h>
@@ -47,84 +50,99 @@ typedef struct {
grpc_linked_mdelem details;
} call_data;
-typedef struct { grpc_mdctx *mdctx; } channel_data;
+typedef struct {
+ grpc_status_code error_code;
+ const char *error_message;
+} channel_data;
-static void lame_start_transport_op(grpc_call_element *elem,
- grpc_transport_op *op) {
+static void fill_metadata(grpc_call_element *elem, grpc_metadata_batch *mdb) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
+ char tmp[GPR_LTOA_MIN_BUFSIZE];
+ gpr_ltoa(chand->error_code, tmp);
+ calld->status.md = grpc_mdelem_from_strings("grpc-status", tmp);
+ calld->details.md =
+ grpc_mdelem_from_strings("grpc-message", chand->error_message);
+ calld->status.prev = calld->details.next = NULL;
+ calld->status.next = &calld->details;
+ calld->details.prev = &calld->status;
+ mdb->list.head = &calld->status;
+ mdb->list.tail = &calld->details;
+ mdb->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
+}
+
+static void lame_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_transport_stream_op *op) {
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
- if (op->send_ops) {
- grpc_stream_ops_unref_owned_objects(op->send_ops->ops, op->send_ops->nops);
- op->on_done_send->cb(op->on_done_send->cb_arg, 0);
- }
- if (op->recv_ops) {
- char tmp[GPR_LTOA_MIN_BUFSIZE];
- grpc_metadata_batch mdb;
- gpr_ltoa(GRPC_STATUS_UNKNOWN, tmp);
- calld->status.md =
- grpc_mdelem_from_strings(chand->mdctx, "grpc-status", tmp);
- calld->details.md = grpc_mdelem_from_strings(chand->mdctx, "grpc-message",
- "Rpc sent on a lame channel.");
- calld->status.prev = calld->details.next = NULL;
- calld->status.next = &calld->details;
- calld->details.prev = &calld->status;
- mdb.list.head = &calld->status;
- mdb.list.tail = &calld->details;
- mdb.garbage.head = mdb.garbage.tail = NULL;
- mdb.deadline = gpr_inf_future;
- grpc_sopb_add_metadata(op->recv_ops, mdb);
- *op->recv_state = GRPC_STREAM_CLOSED;
- op->on_done_recv->cb(op->on_done_recv->cb_arg, 1);
- }
- if (op->on_consumed) {
- op->on_consumed->cb(op->on_consumed->cb_arg, 0);
+ if (op->recv_initial_metadata != NULL) {
+ fill_metadata(elem, op->recv_initial_metadata);
+ } else if (op->recv_trailing_metadata != NULL) {
+ fill_metadata(elem, op->recv_trailing_metadata);
}
+ grpc_transport_stream_op_finish_with_failure(exec_ctx, op);
}
-static void channel_op(grpc_channel_element *elem,
- grpc_channel_element *from_elem, grpc_channel_op *op) {
- switch (op->type) {
- case GRPC_CHANNEL_GOAWAY:
- gpr_slice_unref(op->data.goaway.message);
- break;
- case GRPC_CHANNEL_DISCONNECT:
- grpc_client_channel_closed(elem);
- break;
- default:
- break;
- }
+static char *lame_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
+ return NULL;
}
-static void init_call_elem(grpc_call_element *elem,
- const void *transport_server_data,
- grpc_transport_op *initial_op) {
- if (initial_op) {
- grpc_transport_op_finish_with_failure(initial_op);
+static void lame_start_transport_op(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ grpc_transport_op *op) {
+ if (op->on_connectivity_state_change) {
+ GPR_ASSERT(*op->connectivity_state != GRPC_CHANNEL_FATAL_FAILURE);
+ *op->connectivity_state = GRPC_CHANNEL_FATAL_FAILURE;
+ op->on_connectivity_state_change->cb(
+ exec_ctx, op->on_connectivity_state_change->cb_arg, 1);
+ }
+ if (op->on_consumed != NULL) {
+ op->on_consumed->cb(exec_ctx, op->on_consumed->cb_arg, 1);
}
}
-static void destroy_call_elem(grpc_call_element *elem) {}
+static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_call_element_args *args) {}
-static void init_channel_elem(grpc_channel_element *elem,
- const grpc_channel_args *args, grpc_mdctx *mdctx,
- int is_first, int is_last) {
- channel_data *chand = elem->channel_data;
- GPR_ASSERT(is_first);
- GPR_ASSERT(is_last);
- chand->mdctx = mdctx;
+static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {}
+
+static void init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
+ GPR_ASSERT(args->is_first);
+ GPR_ASSERT(args->is_last);
}
-static void destroy_channel_elem(grpc_channel_element *elem) {}
+static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem) {}
-static const grpc_channel_filter lame_filter = {
- lame_start_transport_op, channel_op, sizeof(call_data),
- init_call_elem, destroy_call_elem, sizeof(channel_data),
- init_channel_elem, destroy_channel_elem, "lame-client",
+const grpc_channel_filter grpc_lame_filter = {
+ lame_start_transport_stream_op, lame_start_transport_op, sizeof(call_data),
+ init_call_elem, grpc_call_stack_ignore_set_pollset, destroy_call_elem,
+ sizeof(channel_data), init_channel_elem, destroy_channel_elem,
+ lame_get_peer, "lame-client",
};
-grpc_channel *grpc_lame_client_channel_create(void) {
- static const grpc_channel_filter *filters[] = {&lame_filter};
- return grpc_channel_create_from_filters(filters, 1, NULL, grpc_mdctx_create(),
- 1);
+#define CHANNEL_STACK_FROM_CHANNEL(c) ((grpc_channel_stack *)((c) + 1))
+
+grpc_channel *grpc_lame_client_channel_create(const char *target,
+ grpc_status_code error_code,
+ const char *error_message) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_channel_element *elem;
+ channel_data *chand;
+ grpc_channel *channel = grpc_channel_create(&exec_ctx, target, NULL,
+ GRPC_CLIENT_LAME_CHANNEL, NULL);
+ elem = grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0);
+ GRPC_API_TRACE(
+ "grpc_lame_client_channel_create(target=%s, error_code=%d, "
+ "error_message=%s)",
+ 3, (target, (int)error_code, error_message));
+ GPR_ASSERT(elem->filter == &grpc_lame_filter);
+ chand = (channel_data *)elem->channel_data;
+ chand->error_code = error_code;
+ chand->error_message = error_message;
+ grpc_exec_ctx_finish(&exec_ctx);
+ return channel;
}
diff --git a/src/core/surface/client.h b/src/core/surface/lame_client.h
index 9db2ccf3d2..3f3abd2ffe 100644
--- a/src/core/surface/client.h
+++ b/src/core/surface/lame_client.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,11 +31,11 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_SURFACE_CLIENT_H
-#define GRPC_INTERNAL_CORE_SURFACE_CLIENT_H
+#ifndef GRPC_CORE_SURFACE_LAME_CLIENT_H
+#define GRPC_CORE_SURFACE_LAME_CLIENT_H
#include "src/core/channel/channel_stack.h"
-extern const grpc_channel_filter grpc_client_surface_filter;
+extern const grpc_channel_filter grpc_lame_filter;
-#endif /* GRPC_INTERNAL_CORE_SURFACE_CLIENT_H */
+#endif /* GRPC_CORE_SURFACE_LAME_CLIENT_H */
diff --git a/src/core/surface/metadata_array.c b/src/core/surface/metadata_array.c
index 4010977497..4c7bf17835 100644
--- a/src/core/surface/metadata_array.c
+++ b/src/core/surface/metadata_array.c
@@ -36,10 +36,14 @@
#include <string.h>
-void grpc_metadata_array_init(grpc_metadata_array *array) {
+#include "src/core/surface/api_trace.h"
+
+void grpc_metadata_array_init(grpc_metadata_array* array) {
+ GRPC_API_TRACE("grpc_metadata_array_init(array=%p)", 1, (array));
memset(array, 0, sizeof(*array));
}
-void grpc_metadata_array_destroy(grpc_metadata_array *array) {
+void grpc_metadata_array_destroy(grpc_metadata_array* array) {
+ GRPC_API_TRACE("grpc_metadata_array_destroy(array=%p)", 1, (array));
gpr_free(array->metadata);
}
diff --git a/src/core/surface/secure_channel_create.c b/src/core/surface/secure_channel_create.c
index fae3e4e90a..cc752227ee 100644
--- a/src/core/surface/secure_channel_create.c
+++ b/src/core/surface/secure_channel_create.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,232 +31,289 @@
*
*/
-#include "src/core/iomgr/sockaddr.h"
-
#include <grpc/grpc.h>
#include <stdlib.h>
#include <string.h>
-#include "src/core/channel/census_filter.h"
+#include <grpc/support/alloc.h>
+#include <grpc/support/slice.h>
+#include <grpc/support/slice_buffer.h>
+
#include "src/core/channel/channel_args.h"
#include "src/core/channel/client_channel.h"
-#include "src/core/channel/client_setup.h"
-#include "src/core/channel/connected_channel.h"
-#include "src/core/channel/http_client_filter.h"
-#include "src/core/iomgr/resolve_address.h"
+#include "src/core/client_config/resolver_registry.h"
#include "src/core/iomgr/tcp_client.h"
#include "src/core/security/auth_filters.h"
#include "src/core/security/credentials.h"
-#include "src/core/security/secure_transport_setup.h"
-#include "src/core/support/string.h"
+#include "src/core/security/security_context.h"
+#include "src/core/surface/api_trace.h"
#include "src/core/surface/channel.h"
-#include "src/core/surface/client.h"
#include "src/core/transport/chttp2_transport.h"
-#include <grpc/grpc_security.h>
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/string_util.h>
-#include <grpc/support/sync.h>
-#include <grpc/support/useful.h>
#include "src/core/tsi/transport_security_interface.h"
-typedef struct setup setup;
-
-/* A single setup request (started via initiate) */
typedef struct {
- grpc_client_setup_request *cs_request;
- setup *setup;
- /* Resolved addresses, or null if resolution not yet completed. */
- grpc_resolved_addresses *resolved;
- /* which address in resolved should we pick for the next connection attempt */
- size_t resolved_index;
-} request;
+ grpc_connector base;
+ gpr_refcount refs;
-struct setup {
grpc_channel_security_connector *security_connector;
- const char *target;
- grpc_transport_setup_callback setup_callback;
- void *setup_user_data;
-};
-static int maybe_try_next_resolved(request *r);
+ grpc_closure *notify;
+ grpc_connect_in_args args;
+ grpc_connect_out_args *result;
+ grpc_closure initial_string_sent;
+ gpr_slice_buffer initial_string_buffer;
+
+ gpr_mu mu;
+ grpc_endpoint *connecting_endpoint;
+ grpc_endpoint *newly_connecting_endpoint;
+
+ grpc_closure connected_closure;
+} connector;
+
+static void connector_ref(grpc_connector *con) {
+ connector *c = (connector *)con;
+ gpr_ref(&c->refs);
+}
-static void done(request *r, int was_successful) {
- grpc_client_setup_request_finish(r->cs_request, was_successful);
- if (r->resolved) {
- grpc_resolved_addresses_destroy(r->resolved);
+static void connector_unref(grpc_exec_ctx *exec_ctx, grpc_connector *con) {
+ connector *c = (connector *)con;
+ if (gpr_unref(&c->refs)) {
+ /* c->initial_string_buffer does not need to be destroyed */
+ gpr_free(c);
}
- gpr_free(r);
}
-static void on_secure_transport_setup_done(void *rp,
- grpc_security_status status,
- grpc_endpoint *secure_endpoint) {
- request *r = rp;
- if (status != GRPC_SECURITY_OK) {
- gpr_log(GPR_ERROR, "Secure transport setup failed with error %d.", status);
- done(r, 0);
- } else if (grpc_client_setup_cb_begin(r->cs_request,
- "on_secure_transport_setup_done")) {
- grpc_create_chttp2_transport(
- r->setup->setup_callback, r->setup->setup_user_data,
- grpc_client_setup_get_channel_args(r->cs_request), secure_endpoint,
- NULL, 0, grpc_client_setup_get_mdctx(r->cs_request), 1);
- grpc_client_setup_cb_end(r->cs_request, "on_secure_transport_setup_done");
- done(r, 1);
+static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_security_status status,
+ grpc_endpoint *secure_endpoint,
+ grpc_auth_context *auth_context) {
+ connector *c = arg;
+ grpc_closure *notify;
+ grpc_channel_args *args_copy = NULL;
+ gpr_mu_lock(&c->mu);
+ if (c->connecting_endpoint == NULL) {
+ memset(c->result, 0, sizeof(*c->result));
+ gpr_mu_unlock(&c->mu);
+ } else if (status != GRPC_SECURITY_OK) {
+ gpr_log(GPR_ERROR, "Secure handshake failed with error %d.", status);
+ memset(c->result, 0, sizeof(*c->result));
+ c->connecting_endpoint = NULL;
+ gpr_mu_unlock(&c->mu);
} else {
- done(r, 0);
+ grpc_arg auth_context_arg;
+ c->connecting_endpoint = NULL;
+ gpr_mu_unlock(&c->mu);
+ c->result->transport = grpc_create_chttp2_transport(
+ exec_ctx, c->args.channel_args, secure_endpoint, 1);
+ grpc_chttp2_transport_start_reading(exec_ctx, c->result->transport, NULL,
+ 0);
+ auth_context_arg = grpc_auth_context_to_arg(auth_context);
+ args_copy = grpc_channel_args_copy_and_add(c->args.channel_args,
+ &auth_context_arg, 1);
+ c->result->channel_args = args_copy;
}
+ notify = c->notify;
+ c->notify = NULL;
+ /* look at c->args which are connector args. */
+ notify->cb(exec_ctx, notify->cb_arg, 1);
+ if (args_copy != NULL) grpc_channel_args_destroy(args_copy);
}
-/* connection callback: tcp is either valid, or null on error */
-static void on_connect(void *rp, grpc_endpoint *tcp) {
- request *r = rp;
-
- if (!grpc_client_setup_request_should_continue(r->cs_request,
- "on_connect.secure")) {
- if (tcp) {
- grpc_endpoint_shutdown(tcp);
- grpc_endpoint_destroy(tcp);
- }
- done(r, 0);
- return;
- }
+static void on_initial_connect_string_sent(grpc_exec_ctx *exec_ctx, void *arg,
+ bool success) {
+ connector *c = arg;
+ grpc_channel_security_connector_do_handshake(exec_ctx, c->security_connector,
+ c->connecting_endpoint,
+ on_secure_handshake_done, c);
+}
- if (!tcp) {
- if (!maybe_try_next_resolved(r)) {
- done(r, 0);
- return;
+static void connected(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
+ connector *c = arg;
+ grpc_closure *notify;
+ grpc_endpoint *tcp = c->newly_connecting_endpoint;
+ if (tcp != NULL) {
+ gpr_mu_lock(&c->mu);
+ GPR_ASSERT(c->connecting_endpoint == NULL);
+ c->connecting_endpoint = tcp;
+ gpr_mu_unlock(&c->mu);
+ if (!GPR_SLICE_IS_EMPTY(c->args.initial_connect_string)) {
+ grpc_closure_init(&c->initial_string_sent, on_initial_connect_string_sent,
+ c);
+ gpr_slice_buffer_init(&c->initial_string_buffer);
+ gpr_slice_buffer_add(&c->initial_string_buffer,
+ c->args.initial_connect_string);
+ grpc_endpoint_write(exec_ctx, tcp, &c->initial_string_buffer,
+ &c->initial_string_sent);
} else {
- return;
+ grpc_channel_security_connector_do_handshake(
+ exec_ctx, c->security_connector, tcp, on_secure_handshake_done, c);
}
} else {
- grpc_setup_secure_transport(&r->setup->security_connector->base, tcp,
- on_secure_transport_setup_done, r);
+ memset(c->result, 0, sizeof(*c->result));
+ notify = c->notify;
+ c->notify = NULL;
+ notify->cb(exec_ctx, notify->cb_arg, 1);
+ }
+}
+
+static void connector_shutdown(grpc_exec_ctx *exec_ctx, grpc_connector *con) {
+ connector *c = (connector *)con;
+ grpc_endpoint *ep;
+ gpr_mu_lock(&c->mu);
+ ep = c->connecting_endpoint;
+ c->connecting_endpoint = NULL;
+ gpr_mu_unlock(&c->mu);
+ if (ep) {
+ grpc_endpoint_shutdown(exec_ctx, ep);
}
}
-/* attempt to connect to the next available resolved address */
-static int maybe_try_next_resolved(request *r) {
- grpc_resolved_address *addr;
- if (!r->resolved) return 0;
- if (r->resolved_index == r->resolved->naddrs) return 0;
- addr = &r->resolved->addrs[r->resolved_index++];
+static void connector_connect(grpc_exec_ctx *exec_ctx, grpc_connector *con,
+ const grpc_connect_in_args *args,
+ grpc_connect_out_args *result,
+ grpc_closure *notify) {
+ connector *c = (connector *)con;
+ GPR_ASSERT(c->notify == NULL);
+ GPR_ASSERT(notify->cb);
+ c->notify = notify;
+ c->args = *args;
+ c->result = result;
+ gpr_mu_lock(&c->mu);
+ GPR_ASSERT(c->connecting_endpoint == NULL);
+ gpr_mu_unlock(&c->mu);
+ grpc_closure_init(&c->connected_closure, connected, c);
grpc_tcp_client_connect(
- on_connect, r, grpc_client_setup_get_interested_parties(r->cs_request),
- (struct sockaddr *)&addr->addr, addr->len,
- grpc_client_setup_request_deadline(r->cs_request));
- return 1;
+ exec_ctx, &c->connected_closure, &c->newly_connecting_endpoint,
+ args->interested_parties, args->addr, args->addr_len, args->deadline);
}
-/* callback for when our target address has been resolved */
-static void on_resolved(void *rp, grpc_resolved_addresses *resolved) {
- request *r = rp;
+static const grpc_connector_vtable connector_vtable = {
+ connector_ref, connector_unref, connector_shutdown, connector_connect};
- /* if we're not still the active request, abort */
- if (!grpc_client_setup_request_should_continue(r->cs_request,
- "on_resolved.secure")) {
- if (resolved) {
- grpc_resolved_addresses_destroy(resolved);
- }
- done(r, 0);
- return;
- }
+typedef struct {
+ grpc_subchannel_factory base;
+ gpr_refcount refs;
+ grpc_channel_args *merge_args;
+ grpc_channel_security_connector *security_connector;
+ grpc_channel *master;
+} subchannel_factory;
- if (!resolved) {
- done(r, 0);
- return;
- } else {
- r->resolved = resolved;
- r->resolved_index = 0;
- if (!maybe_try_next_resolved(r)) {
- done(r, 0);
- }
- }
+static void subchannel_factory_ref(grpc_subchannel_factory *scf) {
+ subchannel_factory *f = (subchannel_factory *)scf;
+ gpr_ref(&f->refs);
}
-static void initiate_setup(void *sp, grpc_client_setup_request *cs_request) {
- request *r = gpr_malloc(sizeof(request));
- r->setup = sp;
- r->cs_request = cs_request;
- r->resolved = NULL;
- r->resolved_index = 0;
- /* TODO(klempner): Make grpc_resolve_address respect deadline */
- grpc_resolve_address(r->setup->target, "https", on_resolved, r);
+static void subchannel_factory_unref(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_factory *scf) {
+ subchannel_factory *f = (subchannel_factory *)scf;
+ if (gpr_unref(&f->refs)) {
+ GRPC_SECURITY_CONNECTOR_UNREF(&f->security_connector->base,
+ "subchannel_factory");
+ GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, f->master, "subchannel_factory");
+ grpc_channel_args_destroy(f->merge_args);
+ gpr_free(f);
+ }
}
-static void done_setup(void *sp) {
- setup *s = sp;
- gpr_free((void *)s->target);
- grpc_security_connector_unref(&s->security_connector->base);
- gpr_free(s);
+static grpc_subchannel *subchannel_factory_create_subchannel(
+ grpc_exec_ctx *exec_ctx, grpc_subchannel_factory *scf,
+ grpc_subchannel_args *args) {
+ subchannel_factory *f = (subchannel_factory *)scf;
+ connector *c = gpr_malloc(sizeof(*c));
+ grpc_channel_args *final_args =
+ grpc_channel_args_merge(args->args, f->merge_args);
+ grpc_subchannel *s;
+ memset(c, 0, sizeof(*c));
+ c->base.vtable = &connector_vtable;
+ c->security_connector = f->security_connector;
+ gpr_mu_init(&c->mu);
+ gpr_ref_init(&c->refs, 1);
+ args->args = final_args;
+ s = grpc_subchannel_create(exec_ctx, &c->base, args);
+ grpc_connector_unref(exec_ctx, &c->base);
+ grpc_channel_args_destroy(final_args);
+ return s;
}
-static grpc_transport_setup_result complete_setup(void *channel_stack,
- grpc_transport *transport,
- grpc_mdctx *mdctx) {
- static grpc_channel_filter const *extra_filters[] = {
- &grpc_client_auth_filter, &grpc_http_client_filter};
- return grpc_client_channel_transport_setup_complete(
- channel_stack, transport, extra_filters, GPR_ARRAY_SIZE(extra_filters),
- mdctx);
-}
+static const grpc_subchannel_factory_vtable subchannel_factory_vtable = {
+ subchannel_factory_ref, subchannel_factory_unref,
+ subchannel_factory_create_subchannel};
/* Create a secure client channel:
Asynchronously: - resolve target
- connect to it (trying alternatives as presented)
- perform handshakes */
-grpc_channel *grpc_secure_channel_create(grpc_credentials *creds,
+grpc_channel *grpc_secure_channel_create(grpc_channel_credentials *creds,
const char *target,
- const grpc_channel_args *args) {
- setup *s;
+ const grpc_channel_args *args,
+ void *reserved) {
grpc_channel *channel;
grpc_arg connector_arg;
grpc_channel_args *args_copy;
grpc_channel_args *new_args_from_connector;
- grpc_channel_security_connector *connector;
- grpc_mdctx *mdctx;
-#define MAX_FILTERS 3
- const grpc_channel_filter *filters[MAX_FILTERS];
- int n = 0;
+ grpc_channel_security_connector *security_connector;
+ grpc_resolver *resolver;
+ subchannel_factory *f;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
+ GRPC_API_TRACE(
+ "grpc_secure_channel_create(creds=%p, target=%s, args=%p, "
+ "reserved=%p)",
+ 4, (creds, target, args, reserved));
+ GPR_ASSERT(reserved == NULL);
if (grpc_find_security_connector_in_args(args) != NULL) {
gpr_log(GPR_ERROR, "Cannot set security context in channel args.");
- return grpc_lame_client_channel_create();
+ grpc_exec_ctx_finish(&exec_ctx);
+ return grpc_lame_client_channel_create(
+ target, GRPC_STATUS_INVALID_ARGUMENT,
+ "Security connector exists in channel args.");
}
- if (grpc_credentials_create_security_connector(
- creds, target, args, NULL, &connector, &new_args_from_connector) !=
+ if (grpc_channel_credentials_create_security_connector(
+ creds, target, args, &security_connector, &new_args_from_connector) !=
GRPC_SECURITY_OK) {
- return grpc_lame_client_channel_create();
+ grpc_exec_ctx_finish(&exec_ctx);
+ return grpc_lame_client_channel_create(
+ target, GRPC_STATUS_INVALID_ARGUMENT,
+ "Failed to create security connector.");
}
- mdctx = grpc_mdctx_create();
- s = gpr_malloc(sizeof(setup));
- connector_arg = grpc_security_connector_to_arg(&connector->base);
+ connector_arg = grpc_security_connector_to_arg(&security_connector->base);
args_copy = grpc_channel_args_copy_and_add(
new_args_from_connector != NULL ? new_args_from_connector : args,
- &connector_arg);
- filters[n++] = &grpc_client_surface_filter;
- /* TODO(census)
- if (grpc_channel_args_is_census_enabled(args)) {
- filters[n++] = &grpc_client_census_filter;
- } */
- filters[n++] = &grpc_client_channel_filter;
- GPR_ASSERT(n <= MAX_FILTERS);
- channel = grpc_channel_create_from_filters(filters, n, args_copy, mdctx, 1);
+ &connector_arg, 1);
+
+ channel = grpc_channel_create(&exec_ctx, target, args_copy,
+ GRPC_CLIENT_CHANNEL, NULL);
+
+ f = gpr_malloc(sizeof(*f));
+ f->base.vtable = &subchannel_factory_vtable;
+ gpr_ref_init(&f->refs, 1);
+ GRPC_SECURITY_CONNECTOR_REF(&security_connector->base, "subchannel_factory");
+ f->security_connector = security_connector;
+ f->merge_args = grpc_channel_args_copy(args_copy);
+ f->master = channel;
+ GRPC_CHANNEL_INTERNAL_REF(channel, "subchannel_factory");
+ resolver = grpc_resolver_create(target, &f->base);
+ if (resolver) {
+ grpc_client_channel_set_resolver(
+ &exec_ctx, grpc_channel_get_channel_stack(channel), resolver);
+ GRPC_RESOLVER_UNREF(&exec_ctx, resolver, "create");
+ }
+ grpc_subchannel_factory_unref(&exec_ctx, &f->base);
+ GRPC_SECURITY_CONNECTOR_UNREF(&security_connector->base, "channel_create");
grpc_channel_args_destroy(args_copy);
if (new_args_from_connector != NULL) {
grpc_channel_args_destroy(new_args_from_connector);
}
- s->target = gpr_strdup(target);
- s->setup_callback = complete_setup;
- s->setup_user_data = grpc_channel_get_channel_stack(channel);
- s->security_connector = connector;
- grpc_client_setup_create_and_attach(grpc_channel_get_channel_stack(channel),
- args, mdctx, initiate_setup, done_setup,
- s);
+ if (!resolver) {
+ GRPC_CHANNEL_INTERNAL_UNREF(&exec_ctx, channel, "subchannel_factory");
+ channel = NULL;
+ }
+ grpc_exec_ctx_finish(&exec_ctx);
+
return channel;
}
diff --git a/src/core/surface/server.c b/src/core/surface/server.c
index 13ec5bee94..da93474b26 100644
--- a/src/core/surface/server.c
+++ b/src/core/surface/server.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -33,32 +33,36 @@
#include "src/core/surface/server.h"
+#include <limits.h>
#include <stdlib.h>
#include <string.h>
-#include "src/core/channel/census_filter.h"
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/useful.h>
+
#include "src/core/channel/channel_args.h"
#include "src/core/channel/connected_channel.h"
#include "src/core/iomgr/iomgr.h"
+#include "src/core/support/stack_lockfree.h"
#include "src/core/support/string.h"
+#include "src/core/surface/api_trace.h"
#include "src/core/surface/call.h"
#include "src/core/surface/channel.h"
#include "src/core/surface/completion_queue.h"
#include "src/core/surface/init.h"
#include "src/core/transport/metadata.h"
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/string_util.h>
-#include <grpc/support/useful.h>
-
-typedef enum { PENDING_START, ALL_CALLS, CALL_LIST_COUNT } call_list;
+#include "src/core/transport/static_metadata.h"
typedef struct listener {
void *arg;
- void (*start)(grpc_server *server, void *arg, grpc_pollset **pollsets,
- size_t pollset_count);
- void (*destroy)(grpc_server *server, void *arg);
+ void (*start)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg,
+ grpc_pollset **pollsets, size_t pollset_count);
+ void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg,
+ grpc_closure *closure);
struct listener *next;
+ grpc_closure destroy_done;
} listener;
typedef struct call_data call_data;
@@ -72,40 +76,28 @@ typedef struct {
typedef enum { BATCH_CALL, REGISTERED_CALL } requested_call_type;
-typedef struct {
+typedef struct requested_call {
requested_call_type type;
void *tag;
+ grpc_server *server;
grpc_completion_queue *cq_bound_to_call;
grpc_completion_queue *cq_for_notification;
grpc_call **call;
+ grpc_cq_completion completion;
+ grpc_metadata_array *initial_metadata;
union {
struct {
grpc_call_details *details;
- grpc_metadata_array *initial_metadata;
} batch;
struct {
registered_method *registered_method;
gpr_timespec *deadline;
- grpc_metadata_array *initial_metadata;
grpc_byte_buffer **optional_payload;
} registered;
} data;
+ grpc_closure publish;
} requested_call;
-typedef struct {
- requested_call *calls;
- size_t count;
- size_t capacity;
-} requested_call_array;
-
-struct registered_method {
- char *method;
- char *host;
- call_data *pending;
- requested_call_array requested;
- registered_method *next;
-};
-
typedef struct channel_registered_method {
registered_method *server_registered_method;
grpc_mdstr *method;
@@ -114,27 +106,81 @@ typedef struct channel_registered_method {
struct channel_data {
grpc_server *server;
- size_t num_calls;
+ grpc_connectivity_state connectivity_state;
grpc_channel *channel;
- grpc_mdstr *path_key;
- grpc_mdstr *authority_key;
/* linked list of all channels on a server */
channel_data *next;
channel_data *prev;
channel_registered_method *registered_methods;
- gpr_uint32 registered_method_slots;
- gpr_uint32 registered_method_max_probes;
- grpc_iomgr_closure finish_destroy_channel_closure;
+ uint32_t registered_method_slots;
+ uint32_t registered_method_max_probes;
+ grpc_closure finish_destroy_channel_closure;
+ grpc_closure channel_connectivity_changed;
};
typedef struct shutdown_tag {
void *tag;
grpc_completion_queue *cq;
+ grpc_cq_completion completion;
} shutdown_tag;
+typedef enum {
+ /* waiting for metadata */
+ NOT_STARTED,
+ /* inital metadata read, not flow controlled in yet */
+ PENDING,
+ /* flow controlled in, on completion queue */
+ ACTIVATED,
+ /* cancelled before being queued */
+ ZOMBIED
+} call_state;
+
+typedef struct request_matcher request_matcher;
+
+struct call_data {
+ grpc_call *call;
+
+ /** protects state */
+ gpr_mu mu_state;
+ /** the current state of a call - see call_state */
+ call_state state;
+
+ grpc_mdstr *path;
+ grpc_mdstr *host;
+ gpr_timespec deadline;
+
+ grpc_completion_queue *cq_new;
+
+ grpc_metadata_batch *recv_initial_metadata;
+ grpc_metadata_array initial_metadata;
+
+ grpc_closure got_initial_metadata;
+ grpc_closure server_on_recv_initial_metadata;
+ grpc_closure kill_zombie_closure;
+ grpc_closure *on_done_recv_initial_metadata;
+
+ call_data *pending_next;
+};
+
+struct request_matcher {
+ call_data *pending_head;
+ call_data *pending_tail;
+ gpr_stack_lockfree *requests;
+};
+
+struct registered_method {
+ char *method;
+ char *host;
+ request_matcher request_matcher;
+ registered_method *next;
+};
+
+typedef struct {
+ grpc_channel **channels;
+ size_t num_channels;
+} channel_broadcaster;
+
struct grpc_server {
- size_t channel_filter_count;
- const grpc_channel_filter **channel_filters;
grpc_channel_args *channel_args;
grpc_completion_queue **cqs;
@@ -149,163 +195,185 @@ struct grpc_server {
before mu_call. This is currently used in shutdown processing
(grpc_server_shutdown_and_notify and maybe_finish_shutdown) */
gpr_mu mu_global; /* mutex for server and channel state */
- gpr_mu mu_call; /* mutex for call-specific state */
+ gpr_mu mu_call; /* mutex for call-specific state */
registered_method *registered_methods;
- requested_call_array requested_calls;
-
- gpr_uint8 shutdown;
- gpr_uint8 shutdown_published;
+ request_matcher unregistered_request_matcher;
+ /** free list of available requested_calls indices */
+ gpr_stack_lockfree *request_freelist;
+ /** requested call backing data */
+ requested_call *requested_calls;
+ size_t max_requested_calls;
+
+ gpr_atm shutdown_flag;
+ uint8_t shutdown_published;
size_t num_shutdown_tags;
shutdown_tag *shutdown_tags;
- call_data *lists[CALL_LIST_COUNT];
channel_data root_channel_data;
listener *listeners;
int listeners_destroyed;
gpr_refcount internal_refcount;
+
+ /** when did we print the last shutdown progress message */
+ gpr_timespec last_shutdown_message_time;
};
-typedef enum {
- /* waiting for metadata */
- NOT_STARTED,
- /* inital metadata read, not flow controlled in yet */
- PENDING,
- /* flow controlled in, on completion queue */
- ACTIVATED,
- /* cancelled before being queued */
- ZOMBIED
-} call_state;
+#define SERVER_FROM_CALL_ELEM(elem) \
+ (((channel_data *)(elem)->channel_data)->server)
-struct call_data {
- grpc_call *call;
+static void begin_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
+ call_data *calld, requested_call *rc);
+static void fail_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
+ requested_call *rc);
+/* Before calling maybe_finish_shutdown, we must hold mu_global and not
+ hold mu_call */
+static void maybe_finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_server *server);
- call_state state;
- grpc_mdstr *path;
- grpc_mdstr *host;
- gpr_timespec deadline;
- int got_initial_metadata;
+/*
+ * channel broadcaster
+ */
- grpc_completion_queue *cq_new;
+/* assumes server locked */
+static void channel_broadcaster_init(grpc_server *s, channel_broadcaster *cb) {
+ channel_data *c;
+ size_t count = 0;
+ for (c = s->root_channel_data.next; c != &s->root_channel_data; c = c->next) {
+ count++;
+ }
+ cb->num_channels = count;
+ cb->channels = gpr_malloc(sizeof(*cb->channels) * cb->num_channels);
+ count = 0;
+ for (c = s->root_channel_data.next; c != &s->root_channel_data; c = c->next) {
+ cb->channels[count++] = c->channel;
+ GRPC_CHANNEL_INTERNAL_REF(c->channel, "broadcast");
+ }
+}
- grpc_stream_op_buffer *recv_ops;
- grpc_stream_state *recv_state;
- grpc_iomgr_closure *on_done_recv;
+struct shutdown_cleanup_args {
+ grpc_closure closure;
+ gpr_slice slice;
+};
- grpc_iomgr_closure server_on_recv;
- grpc_iomgr_closure kill_zombie_closure;
+static void shutdown_cleanup(grpc_exec_ctx *exec_ctx, void *arg,
+ bool iomgr_status_ignored) {
+ struct shutdown_cleanup_args *a = arg;
+ gpr_slice_unref(a->slice);
+ gpr_free(a);
+}
- call_data **root[CALL_LIST_COUNT];
- call_link links[CALL_LIST_COUNT];
-};
+static void send_shutdown(grpc_exec_ctx *exec_ctx, grpc_channel *channel,
+ int send_goaway, int send_disconnect) {
+ grpc_transport_op op;
+ struct shutdown_cleanup_args *sc;
+ grpc_channel_element *elem;
+
+ memset(&op, 0, sizeof(op));
+ op.send_goaway = send_goaway;
+ sc = gpr_malloc(sizeof(*sc));
+ sc->slice = gpr_slice_from_copied_string("Server shutdown");
+ op.goaway_message = &sc->slice;
+ op.goaway_status = GRPC_STATUS_OK;
+ op.disconnect = send_disconnect;
+ grpc_closure_init(&sc->closure, shutdown_cleanup, sc);
+ op.on_consumed = &sc->closure;
+
+ elem = grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0);
+ elem->filter->start_transport_op(exec_ctx, elem, &op);
+}
-#define SERVER_FROM_CALL_ELEM(elem) \
- (((channel_data *)(elem)->channel_data)->server)
+static void channel_broadcaster_shutdown(grpc_exec_ctx *exec_ctx,
+ channel_broadcaster *cb,
+ int send_goaway,
+ int force_disconnect) {
+ size_t i;
-static void begin_call(grpc_server *server, call_data *calld,
- requested_call *rc);
-static void fail_call(grpc_server *server, requested_call *rc);
-static void shutdown_channel(channel_data *chand, int send_goaway,
- int send_disconnect);
-/* Before calling maybe_finish_shutdown, we must hold mu_global and not
- hold mu_call */
-static void maybe_finish_shutdown(grpc_server *server);
-
-static int call_list_join(call_data **root, call_data *call, call_list list) {
- GPR_ASSERT(!call->root[list]);
- call->root[list] = root;
- if (!*root) {
- *root = call;
- call->links[list].next = call->links[list].prev = call;
- } else {
- call->links[list].next = *root;
- call->links[list].prev = (*root)->links[list].prev;
- call->links[list].next->links[list].prev =
- call->links[list].prev->links[list].next = call;
+ for (i = 0; i < cb->num_channels; i++) {
+ send_shutdown(exec_ctx, cb->channels[i], send_goaway, force_disconnect);
+ GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, cb->channels[i], "broadcast");
}
- return 1;
+ gpr_free(cb->channels);
}
-static call_data *call_list_remove_head(call_data **root, call_list list) {
- call_data *out = *root;
- if (out) {
- out->root[list] = NULL;
- if (out->links[list].next == out) {
- *root = NULL;
- } else {
- *root = out->links[list].next;
- out->links[list].next->links[list].prev = out->links[list].prev;
- out->links[list].prev->links[list].next = out->links[list].next;
- }
- }
- return out;
+/*
+ * request_matcher
+ */
+
+static void request_matcher_init(request_matcher *rm, size_t entries) {
+ memset(rm, 0, sizeof(*rm));
+ rm->requests = gpr_stack_lockfree_create(entries);
}
-static int call_list_remove(call_data *call, call_list list) {
- call_data **root = call->root[list];
- if (root == NULL) return 0;
- call->root[list] = NULL;
- if (*root == call) {
- *root = call->links[list].next;
- if (*root == call) {
- *root = NULL;
- return 1;
- }
- }
- GPR_ASSERT(*root != call);
- call->links[list].next->links[list].prev = call->links[list].prev;
- call->links[list].prev->links[list].next = call->links[list].next;
- return 1;
+static void request_matcher_destroy(request_matcher *rm) {
+ GPR_ASSERT(gpr_stack_lockfree_pop(rm->requests) == -1);
+ gpr_stack_lockfree_destroy(rm->requests);
}
-static void requested_call_array_destroy(requested_call_array *array) {
- gpr_free(array->calls);
+static void kill_zombie(grpc_exec_ctx *exec_ctx, void *elem, bool success) {
+ grpc_call_destroy(grpc_call_from_top_element(elem));
}
-static requested_call *requested_call_array_add(requested_call_array *array) {
- requested_call *rc;
- if (array->count == array->capacity) {
- array->capacity = GPR_MAX(array->capacity + 8, array->capacity * 2);
- array->calls =
- gpr_realloc(array->calls, sizeof(requested_call) * array->capacity);
+static void request_matcher_zombify_all_pending_calls(grpc_exec_ctx *exec_ctx,
+ request_matcher *rm) {
+ while (rm->pending_head) {
+ call_data *calld = rm->pending_head;
+ rm->pending_head = calld->pending_next;
+ gpr_mu_lock(&calld->mu_state);
+ calld->state = ZOMBIED;
+ gpr_mu_unlock(&calld->mu_state);
+ grpc_closure_init(
+ &calld->kill_zombie_closure, kill_zombie,
+ grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0));
+ grpc_exec_ctx_enqueue(exec_ctx, &calld->kill_zombie_closure, true, NULL);
}
- rc = &array->calls[array->count++];
- memset(rc, 0, sizeof(*rc));
- return rc;
}
+static void request_matcher_kill_requests(grpc_exec_ctx *exec_ctx,
+ grpc_server *server,
+ request_matcher *rm) {
+ int request_id;
+ while ((request_id = gpr_stack_lockfree_pop(rm->requests)) != -1) {
+ fail_call(exec_ctx, server, &server->requested_calls[request_id]);
+ }
+}
+
+/*
+ * server proper
+ */
+
static void server_ref(grpc_server *server) {
gpr_ref(&server->internal_refcount);
}
-static void server_delete(grpc_server *server) {
+static void server_delete(grpc_exec_ctx *exec_ctx, grpc_server *server) {
registered_method *rm;
size_t i;
grpc_channel_args_destroy(server->channel_args);
gpr_mu_destroy(&server->mu_global);
gpr_mu_destroy(&server->mu_call);
- gpr_free(server->channel_filters);
- requested_call_array_destroy(&server->requested_calls);
while ((rm = server->registered_methods) != NULL) {
server->registered_methods = rm->next;
+ request_matcher_destroy(&rm->request_matcher);
gpr_free(rm->method);
gpr_free(rm->host);
- requested_call_array_destroy(&rm->requested);
gpr_free(rm);
}
for (i = 0; i < server->cq_count; i++) {
GRPC_CQ_INTERNAL_UNREF(server->cqs[i], "server");
}
+ request_matcher_destroy(&server->unregistered_request_matcher);
+ gpr_stack_lockfree_destroy(server->request_freelist);
gpr_free(server->cqs);
gpr_free(server->pollsets);
gpr_free(server->shutdown_tags);
+ gpr_free(server->requested_calls);
gpr_free(server);
}
-static void server_unref(grpc_server *server) {
+static void server_unref(grpc_exec_ctx *exec_ctx, grpc_server *server) {
if (gpr_unref(&server->internal_refcount)) {
- server_delete(server);
+ server_delete(exec_ctx, server);
}
}
@@ -319,64 +387,89 @@ static void orphan_channel(channel_data *chand) {
chand->next = chand->prev = chand;
}
-static void finish_destroy_channel(void *cd, int success) {
+static void finish_destroy_channel(grpc_exec_ctx *exec_ctx, void *cd,
+ bool success) {
channel_data *chand = cd;
grpc_server *server = chand->server;
- GRPC_CHANNEL_INTERNAL_UNREF(chand->channel, "server");
- server_unref(server);
+ GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, chand->channel, "server");
+ server_unref(exec_ctx, server);
}
-static void destroy_channel(channel_data *chand) {
+static void destroy_channel(grpc_exec_ctx *exec_ctx, channel_data *chand) {
if (is_channel_orphaned(chand)) return;
GPR_ASSERT(chand->server != NULL);
orphan_channel(chand);
server_ref(chand->server);
- maybe_finish_shutdown(chand->server);
+ maybe_finish_shutdown(exec_ctx, chand->server);
chand->finish_destroy_channel_closure.cb = finish_destroy_channel;
chand->finish_destroy_channel_closure.cb_arg = chand;
- grpc_iomgr_add_callback(&chand->finish_destroy_channel_closure);
+
+ grpc_transport_op op;
+ memset(&op, 0, sizeof(op));
+ op.set_accept_stream = true;
+ op.on_consumed = &chand->finish_destroy_channel_closure;
+ grpc_channel_next_op(exec_ctx,
+ grpc_channel_stack_element(
+ grpc_channel_get_channel_stack(chand->channel), 0),
+ &op);
}
-static void finish_start_new_rpc_and_unlock(grpc_server *server,
- grpc_call_element *elem,
- call_data **pending_root,
- requested_call_array *array) {
- requested_call rc;
+static void finish_start_new_rpc(grpc_exec_ctx *exec_ctx, grpc_server *server,
+ grpc_call_element *elem, request_matcher *rm) {
call_data *calld = elem->call_data;
- if (array->count == 0) {
+ int request_id;
+
+ if (gpr_atm_acq_load(&server->shutdown_flag)) {
+ gpr_mu_lock(&calld->mu_state);
+ calld->state = ZOMBIED;
+ gpr_mu_unlock(&calld->mu_state);
+ grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
+ grpc_exec_ctx_enqueue(exec_ctx, &calld->kill_zombie_closure, true, NULL);
+ return;
+ }
+
+ request_id = gpr_stack_lockfree_pop(rm->requests);
+ if (request_id == -1) {
+ gpr_mu_lock(&server->mu_call);
+ gpr_mu_lock(&calld->mu_state);
calld->state = PENDING;
- call_list_join(pending_root, calld, PENDING_START);
+ gpr_mu_unlock(&calld->mu_state);
+ if (rm->pending_head == NULL) {
+ rm->pending_tail = rm->pending_head = calld;
+ } else {
+ rm->pending_tail->pending_next = calld;
+ rm->pending_tail = calld;
+ }
+ calld->pending_next = NULL;
gpr_mu_unlock(&server->mu_call);
} else {
- rc = array->calls[--array->count];
+ gpr_mu_lock(&calld->mu_state);
calld->state = ACTIVATED;
- gpr_mu_unlock(&server->mu_call);
- begin_call(server, calld, &rc);
+ gpr_mu_unlock(&calld->mu_state);
+ begin_call(exec_ctx, server, calld, &server->requested_calls[request_id]);
}
}
-static void start_new_rpc(grpc_call_element *elem) {
+static void start_new_rpc(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
grpc_server *server = chand->server;
- gpr_uint32 i;
- gpr_uint32 hash;
+ uint32_t i;
+ uint32_t hash;
channel_registered_method *rm;
- gpr_mu_lock(&server->mu_call);
if (chand->registered_methods && calld->path && calld->host) {
/* TODO(ctiller): unify these two searches */
/* check for an exact match with host */
hash = GRPC_MDSTR_KV_HASH(calld->host->hash, calld->path->hash);
- for (i = 0; i < chand->registered_method_max_probes; i++) {
+ for (i = 0; i <= chand->registered_method_max_probes; i++) {
rm = &chand->registered_methods[(hash + i) %
chand->registered_method_slots];
if (!rm) break;
if (rm->host != calld->host) continue;
if (rm->method != calld->path) continue;
- finish_start_new_rpc_and_unlock(server, elem,
- &rm->server_registered_method->pending,
- &rm->server_registered_method->requested);
+ finish_start_new_rpc(exec_ctx, server, elem,
+ &rm->server_registered_method->request_matcher);
return;
}
/* check for a wildcard method definition (no host set) */
@@ -387,18 +480,13 @@ static void start_new_rpc(grpc_call_element *elem) {
if (!rm) break;
if (rm->host != NULL) continue;
if (rm->method != calld->path) continue;
- finish_start_new_rpc_and_unlock(server, elem,
- &rm->server_registered_method->pending,
- &rm->server_registered_method->requested);
+ finish_start_new_rpc(exec_ctx, server, elem,
+ &rm->server_registered_method->request_matcher);
return;
}
}
- finish_start_new_rpc_and_unlock(server, elem, &server->lists[PENDING_START],
- &server->requested_calls);
-}
-
-static void kill_zombie(void *elem, int success) {
- grpc_call_destroy(grpc_call_from_top_element(elem));
+ finish_start_new_rpc(exec_ctx, server, elem,
+ &server->unregistered_request_matcher);
}
static int num_listeners(grpc_server *server) {
@@ -410,294 +498,246 @@ static int num_listeners(grpc_server *server) {
return n;
}
-static void maybe_finish_shutdown(grpc_server *server) {
- size_t i;
- if (!server->shutdown || server->shutdown_published) {
- return;
+static void done_shutdown_event(grpc_exec_ctx *exec_ctx, void *server,
+ grpc_cq_completion *completion) {
+ server_unref(exec_ctx, server);
+}
+
+static int num_channels(grpc_server *server) {
+ channel_data *chand;
+ int n = 0;
+ for (chand = server->root_channel_data.next;
+ chand != &server->root_channel_data; chand = chand->next) {
+ n++;
}
+ return n;
+}
- gpr_mu_lock(&server->mu_call);
- if (server->lists[ALL_CALLS] != NULL) {
- gpr_log(GPR_DEBUG,
- "Waiting for all calls to finish before destroying server");
- gpr_mu_unlock(&server->mu_call);
- return;
+static void kill_pending_work_locked(grpc_exec_ctx *exec_ctx,
+ grpc_server *server) {
+ registered_method *rm;
+ request_matcher_kill_requests(exec_ctx, server,
+ &server->unregistered_request_matcher);
+ request_matcher_zombify_all_pending_calls(
+ exec_ctx, &server->unregistered_request_matcher);
+ for (rm = server->registered_methods; rm; rm = rm->next) {
+ request_matcher_kill_requests(exec_ctx, server, &rm->request_matcher);
+ request_matcher_zombify_all_pending_calls(exec_ctx, &rm->request_matcher);
}
- gpr_mu_unlock(&server->mu_call);
+}
- if (server->root_channel_data.next != &server->root_channel_data) {
- gpr_log(GPR_DEBUG,
- "Waiting for all channels to close before destroying server");
+static void maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
+ grpc_server *server) {
+ size_t i;
+ if (!gpr_atm_acq_load(&server->shutdown_flag) || server->shutdown_published) {
return;
}
- if (server->listeners_destroyed < num_listeners(server)) {
- gpr_log(GPR_DEBUG, "Waiting for all listeners to be destroyed (@ %d/%d)",
- server->listeners_destroyed, num_listeners(server));
+
+ kill_pending_work_locked(exec_ctx, server);
+
+ if (server->root_channel_data.next != &server->root_channel_data ||
+ server->listeners_destroyed < num_listeners(server)) {
+ if (gpr_time_cmp(gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME),
+ server->last_shutdown_message_time),
+ gpr_time_from_seconds(1, GPR_TIMESPAN)) >= 0) {
+ server->last_shutdown_message_time = gpr_now(GPR_CLOCK_REALTIME);
+ gpr_log(GPR_DEBUG,
+ "Waiting for %d channels and %d/%d listeners to be destroyed"
+ " before shutting down server",
+ num_channels(server),
+ num_listeners(server) - server->listeners_destroyed,
+ num_listeners(server));
+ }
return;
}
server->shutdown_published = 1;
for (i = 0; i < server->num_shutdown_tags; i++) {
- grpc_cq_end_op(server->shutdown_tags[i].cq, server->shutdown_tags[i].tag,
- NULL, 1);
+ server_ref(server);
+ grpc_cq_end_op(exec_ctx, server->shutdown_tags[i].cq,
+ server->shutdown_tags[i].tag, 1, done_shutdown_event, server,
+ &server->shutdown_tags[i].completion);
}
}
static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
grpc_call_element *elem = user_data;
- channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
- if (md->key == chand->path_key) {
- calld->path = grpc_mdstr_ref(md->value);
+ if (md->key == GRPC_MDSTR_PATH) {
+ calld->path = GRPC_MDSTR_REF(md->value);
return NULL;
- } else if (md->key == chand->authority_key) {
- calld->host = grpc_mdstr_ref(md->value);
+ } else if (md->key == GRPC_MDSTR_AUTHORITY) {
+ calld->host = GRPC_MDSTR_REF(md->value);
return NULL;
}
return md;
}
-static void decrement_call_count(channel_data *chand) {
- chand->num_calls--;
- if (0 == chand->num_calls && chand->server->shutdown) {
- shutdown_channel(chand, 0, 1);
- }
- maybe_finish_shutdown(chand->server);
-}
-
-static void server_on_recv(void *ptr, int success) {
+static void server_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
+ bool success) {
grpc_call_element *elem = ptr;
call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
- int remove_res;
-
- if (success && !calld->got_initial_metadata) {
- size_t i;
- size_t nops = calld->recv_ops->nops;
- grpc_stream_op *ops = calld->recv_ops->ops;
- for (i = 0; i < nops; i++) {
- grpc_stream_op *op = &ops[i];
- if (op->type != GRPC_OP_METADATA) continue;
- grpc_metadata_batch_filter(&op->data.metadata, server_filter, elem);
- if (0 != gpr_time_cmp(op->data.metadata.deadline, gpr_inf_future)) {
- calld->deadline = op->data.metadata.deadline;
- }
- calld->got_initial_metadata = 1;
- start_new_rpc(elem);
- break;
- }
- }
+ gpr_timespec op_deadline;
- switch (*calld->recv_state) {
- case GRPC_STREAM_OPEN:
- break;
- case GRPC_STREAM_SEND_CLOSED:
- break;
- case GRPC_STREAM_RECV_CLOSED:
- gpr_mu_lock(&chand->server->mu_call);
- if (calld->state == NOT_STARTED) {
- calld->state = ZOMBIED;
- grpc_iomgr_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
- grpc_iomgr_add_callback(&calld->kill_zombie_closure);
- }
- gpr_mu_unlock(&chand->server->mu_call);
- break;
- case GRPC_STREAM_CLOSED:
- gpr_mu_lock(&chand->server->mu_call);
- if (calld->state == NOT_STARTED) {
- calld->state = ZOMBIED;
- grpc_iomgr_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
- grpc_iomgr_add_callback(&calld->kill_zombie_closure);
- } else if (calld->state == PENDING) {
- call_list_remove(calld, PENDING_START);
- calld->state = ZOMBIED;
- grpc_iomgr_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
- grpc_iomgr_add_callback(&calld->kill_zombie_closure);
- }
- remove_res = call_list_remove(calld, ALL_CALLS);
- gpr_mu_unlock(&chand->server->mu_call);
- gpr_mu_lock(&chand->server->mu_global);
- if (remove_res) {
- decrement_call_count(chand);
- }
- gpr_mu_unlock(&chand->server->mu_global);
- break;
+ grpc_metadata_batch_filter(calld->recv_initial_metadata, server_filter, elem);
+ op_deadline = calld->recv_initial_metadata->deadline;
+ if (0 != gpr_time_cmp(op_deadline, gpr_inf_future(op_deadline.clock_type))) {
+ calld->deadline = op_deadline;
+ }
+ if (calld->host && calld->path) {
+ /* do nothing */
+ } else {
+ success = 0;
}
- calld->on_done_recv->cb(calld->on_done_recv->cb_arg, success);
+ calld->on_done_recv_initial_metadata->cb(
+ exec_ctx, calld->on_done_recv_initial_metadata->cb_arg, success);
}
-static void server_mutate_op(grpc_call_element *elem, grpc_transport_op *op) {
+static void server_mutate_op(grpc_call_element *elem,
+ grpc_transport_stream_op *op) {
call_data *calld = elem->call_data;
- if (op->recv_ops) {
- /* substitute our callback for the higher callback */
- calld->recv_ops = op->recv_ops;
- calld->recv_state = op->recv_state;
- calld->on_done_recv = op->on_done_recv;
- op->on_done_recv = &calld->server_on_recv;
+ if (op->recv_initial_metadata != NULL) {
+ calld->recv_initial_metadata = op->recv_initial_metadata;
+ calld->on_done_recv_initial_metadata = op->recv_initial_metadata_ready;
+ op->recv_initial_metadata_ready = &calld->server_on_recv_initial_metadata;
}
}
-static void server_start_transport_op(grpc_call_element *elem,
- grpc_transport_op *op) {
+static void server_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_transport_stream_op *op) {
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
server_mutate_op(elem, op);
- grpc_call_next_op(elem, op);
+ grpc_call_next_op(exec_ctx, elem, op);
}
-static void channel_op(grpc_channel_element *elem,
- grpc_channel_element *from_elem, grpc_channel_op *op) {
- channel_data *chand = elem->channel_data;
- grpc_server *server = chand->server;
-
- switch (op->type) {
- case GRPC_ACCEPT_CALL:
- /* create a call */
- grpc_call_create(chand->channel, NULL,
- op->data.accept_call.transport_server_data, NULL, 0,
- gpr_inf_future);
- break;
- case GRPC_TRANSPORT_CLOSED:
- /* if the transport is closed for a server channel, we destroy the
- channel */
- gpr_mu_lock(&server->mu_global);
- server_ref(server);
- destroy_channel(chand);
- gpr_mu_unlock(&server->mu_global);
- server_unref(server);
- break;
- case GRPC_TRANSPORT_GOAWAY:
- gpr_slice_unref(op->data.goaway.message);
- break;
- default:
- GPR_ASSERT(op->dir == GRPC_CALL_DOWN);
- grpc_channel_next_op(elem, op);
- break;
+static void got_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
+ bool success) {
+ grpc_call_element *elem = ptr;
+ call_data *calld = elem->call_data;
+ if (success) {
+ start_new_rpc(exec_ctx, elem);
+ } else {
+ gpr_mu_lock(&calld->mu_state);
+ if (calld->state == NOT_STARTED) {
+ calld->state = ZOMBIED;
+ gpr_mu_unlock(&calld->mu_state);
+ grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
+ grpc_exec_ctx_enqueue(exec_ctx, &calld->kill_zombie_closure, true, NULL);
+ } else if (calld->state == PENDING) {
+ calld->state = ZOMBIED;
+ gpr_mu_unlock(&calld->mu_state);
+ /* zombied call will be destroyed when it's removed from the pending
+ queue... later */
+ } else {
+ gpr_mu_unlock(&calld->mu_state);
+ }
}
}
-typedef struct {
- channel_data *chand;
- int send_goaway;
- int send_disconnect;
- grpc_iomgr_closure finish_shutdown_channel_closure;
-} shutdown_channel_args;
-
-static void finish_shutdown_channel(void *p, int success) {
- shutdown_channel_args *sca = p;
- grpc_channel_op op;
-
- if (sca->send_goaway) {
- op.type = GRPC_CHANNEL_GOAWAY;
- op.dir = GRPC_CALL_DOWN;
- op.data.goaway.status = GRPC_STATUS_OK;
- op.data.goaway.message = gpr_slice_from_copied_string("Server shutdown");
- channel_op(grpc_channel_stack_element(
- grpc_channel_get_channel_stack(sca->chand->channel), 0),
- NULL, &op);
- }
- if (sca->send_disconnect) {
- op.type = GRPC_CHANNEL_DISCONNECT;
- op.dir = GRPC_CALL_DOWN;
- channel_op(grpc_channel_stack_element(
- grpc_channel_get_channel_stack(sca->chand->channel), 0),
- NULL, &op);
- }
- GRPC_CHANNEL_INTERNAL_UNREF(sca->chand->channel, "shutdown");
-
- gpr_free(sca);
+static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd,
+ grpc_transport *transport,
+ const void *transport_server_data) {
+ channel_data *chand = cd;
+ /* create a call */
+ grpc_call *call =
+ grpc_call_create(chand->channel, NULL, 0, NULL, transport_server_data,
+ NULL, 0, gpr_inf_future(GPR_CLOCK_MONOTONIC));
+ grpc_call_element *elem =
+ grpc_call_stack_element(grpc_call_get_call_stack(call), 0);
+ call_data *calld = elem->call_data;
+ grpc_op op;
+ memset(&op, 0, sizeof(op));
+ op.op = GRPC_OP_RECV_INITIAL_METADATA;
+ op.data.recv_initial_metadata = &calld->initial_metadata;
+ grpc_closure_init(&calld->got_initial_metadata, got_initial_metadata, elem);
+ grpc_call_start_batch_and_execute(exec_ctx, call, &op, 1,
+ &calld->got_initial_metadata);
}
-static void shutdown_channel(channel_data *chand, int send_goaway,
- int send_disconnect) {
- shutdown_channel_args *sca;
- GRPC_CHANNEL_INTERNAL_REF(chand->channel, "shutdown");
- sca = gpr_malloc(sizeof(shutdown_channel_args));
- sca->chand = chand;
- sca->send_goaway = send_goaway;
- sca->send_disconnect = send_disconnect;
- sca->finish_shutdown_channel_closure.cb = finish_shutdown_channel;
- sca->finish_shutdown_channel_closure.cb_arg = sca;
- grpc_iomgr_add_callback(&sca->finish_shutdown_channel_closure);
+static void channel_connectivity_changed(grpc_exec_ctx *exec_ctx, void *cd,
+ bool iomgr_status_ignored) {
+ channel_data *chand = cd;
+ grpc_server *server = chand->server;
+ if (chand->connectivity_state != GRPC_CHANNEL_FATAL_FAILURE) {
+ grpc_transport_op op;
+ memset(&op, 0, sizeof(op));
+ op.on_connectivity_state_change = &chand->channel_connectivity_changed,
+ op.connectivity_state = &chand->connectivity_state;
+ grpc_channel_next_op(exec_ctx,
+ grpc_channel_stack_element(
+ grpc_channel_get_channel_stack(chand->channel), 0),
+ &op);
+ } else {
+ gpr_mu_lock(&server->mu_global);
+ destroy_channel(exec_ctx, chand);
+ gpr_mu_unlock(&server->mu_global);
+ GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, chand->channel, "connectivity");
+ }
}
-static void init_call_elem(grpc_call_element *elem,
- const void *server_transport_data,
- grpc_transport_op *initial_op) {
+static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_call_element_args *args) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
memset(calld, 0, sizeof(call_data));
- calld->deadline = gpr_inf_future;
+ calld->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
calld->call = grpc_call_from_top_element(elem);
+ gpr_mu_init(&calld->mu_state);
- grpc_iomgr_closure_init(&calld->server_on_recv, server_on_recv, elem);
-
- gpr_mu_lock(&chand->server->mu_call);
- call_list_join(&chand->server->lists[ALL_CALLS], calld, ALL_CALLS);
- gpr_mu_unlock(&chand->server->mu_call);
-
- gpr_mu_lock(&chand->server->mu_global);
- chand->num_calls++;
- gpr_mu_unlock(&chand->server->mu_global);
+ grpc_closure_init(&calld->server_on_recv_initial_metadata,
+ server_on_recv_initial_metadata, elem);
server_ref(chand->server);
-
- if (initial_op) server_mutate_op(elem, initial_op);
}
-static void destroy_call_elem(grpc_call_element *elem) {
+static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
- int removed[CALL_LIST_COUNT];
- size_t i;
- gpr_mu_lock(&chand->server->mu_call);
- for (i = 0; i < CALL_LIST_COUNT; i++) {
- removed[i] = call_list_remove(elem->call_data, i);
- }
- gpr_mu_unlock(&chand->server->mu_call);
- if (removed[ALL_CALLS]) {
- gpr_mu_lock(&chand->server->mu_global);
- decrement_call_count(chand);
- gpr_mu_unlock(&chand->server->mu_global);
- }
+ GPR_ASSERT(calld->state != PENDING);
if (calld->host) {
- grpc_mdstr_unref(calld->host);
+ GRPC_MDSTR_UNREF(calld->host);
}
if (calld->path) {
- grpc_mdstr_unref(calld->path);
+ GRPC_MDSTR_UNREF(calld->path);
}
+ grpc_metadata_array_destroy(&calld->initial_metadata);
- server_unref(chand->server);
+ gpr_mu_destroy(&calld->mu_state);
+
+ server_unref(exec_ctx, chand->server);
}
-static void init_channel_elem(grpc_channel_element *elem,
- const grpc_channel_args *args,
- grpc_mdctx *metadata_context, int is_first,
- int is_last) {
+static void init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
channel_data *chand = elem->channel_data;
- GPR_ASSERT(is_first);
- GPR_ASSERT(!is_last);
+ GPR_ASSERT(args->is_first);
+ GPR_ASSERT(!args->is_last);
chand->server = NULL;
- chand->num_calls = 0;
chand->channel = NULL;
- chand->path_key = grpc_mdstr_from_string(metadata_context, ":path");
- chand->authority_key = grpc_mdstr_from_string(metadata_context, ":authority");
chand->next = chand->prev = chand;
chand->registered_methods = NULL;
+ chand->connectivity_state = GRPC_CHANNEL_IDLE;
+ grpc_closure_init(&chand->channel_connectivity_changed,
+ channel_connectivity_changed, chand);
}
-static void destroy_channel_elem(grpc_channel_element *elem) {
+static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem) {
size_t i;
channel_data *chand = elem->channel_data;
if (chand->registered_methods) {
for (i = 0; i < chand->registered_method_slots; i++) {
if (chand->registered_methods[i].method) {
- grpc_mdstr_unref(chand->registered_methods[i].method);
+ GRPC_MDSTR_UNREF(chand->registered_methods[i].method);
}
if (chand->registered_methods[i].host) {
- grpc_mdstr_unref(chand->registered_methods[i].host);
+ GRPC_MDSTR_UNREF(chand->registered_methods[i].host);
}
}
gpr_free(chand->registered_methods);
@@ -707,29 +747,27 @@ static void destroy_channel_elem(grpc_channel_element *elem) {
chand->next->prev = chand->prev;
chand->prev->next = chand->next;
chand->next = chand->prev = chand;
- maybe_finish_shutdown(chand->server);
+ maybe_finish_shutdown(exec_ctx, chand->server);
gpr_mu_unlock(&chand->server->mu_global);
- grpc_mdstr_unref(chand->path_key);
- grpc_mdstr_unref(chand->authority_key);
- server_unref(chand->server);
+ server_unref(exec_ctx, chand->server);
}
}
-static const grpc_channel_filter server_surface_filter = {
- server_start_transport_op,
- channel_op,
- sizeof(call_data),
- init_call_elem,
- destroy_call_elem,
- sizeof(channel_data),
- init_channel_elem,
- destroy_channel_elem,
- "server",
+const grpc_channel_filter grpc_server_top_filter = {
+ server_start_transport_stream_op, grpc_channel_next_op, sizeof(call_data),
+ init_call_elem, grpc_call_stack_ignore_set_pollset, destroy_call_elem,
+ sizeof(channel_data), init_channel_elem, destroy_channel_elem,
+ grpc_call_next_get_peer, "server",
};
void grpc_server_register_completion_queue(grpc_server *server,
- grpc_completion_queue *cq) {
+ grpc_completion_queue *cq,
+ void *reserved) {
size_t i, n;
+ GRPC_API_TRACE(
+ "grpc_server_register_completion_queue(server=%p, cq=%p, reserved=%p)", 3,
+ (server, cq, reserved));
+ GPR_ASSERT(!reserved);
for (i = 0; i < server->cq_count; i++) {
if (server->cqs[i] == cq) return;
}
@@ -741,13 +779,10 @@ void grpc_server_register_completion_queue(grpc_server *server,
server->cqs[n] = cq;
}
-grpc_server *grpc_server_create_from_filters(grpc_channel_filter **filters,
- size_t filter_count,
- const grpc_channel_args *args) {
+grpc_server *grpc_server_create(const grpc_channel_args *args, void *reserved) {
size_t i;
- /* TODO(census): restore this once we finalize census filter etc.
- int census_enabled = grpc_channel_args_is_census_enabled(args); */
- int census_enabled = 0;
+
+ GRPC_API_TRACE("grpc_server_create(%p, %p)", 2, (args, reserved));
grpc_server *server = gpr_malloc(sizeof(grpc_server));
@@ -763,23 +798,17 @@ grpc_server *grpc_server_create_from_filters(grpc_channel_filter **filters,
server->root_channel_data.next = server->root_channel_data.prev =
&server->root_channel_data;
- /* Server filter stack is:
-
- server_surface_filter - for making surface API calls
- grpc_server_census_filter (optional) - for stats collection and tracing
- {passed in filter stack}
- grpc_connected_channel_filter - for interfacing with transports */
- server->channel_filter_count = filter_count + 1 + census_enabled;
- server->channel_filters =
- gpr_malloc(server->channel_filter_count * sizeof(grpc_channel_filter *));
- server->channel_filters[0] = &server_surface_filter;
- /* TODO(census): restore this once we rework census filter
- if (census_enabled) {
- server->channel_filters[1] = &grpc_server_census_filter;
- } */
- for (i = 0; i < filter_count; i++) {
- server->channel_filters[i + 1 + census_enabled] = filters[i];
+ /* TODO(ctiller): expose a channel_arg for this */
+ server->max_requested_calls = 32768;
+ server->request_freelist =
+ gpr_stack_lockfree_create(server->max_requested_calls);
+ for (i = 0; i < (size_t)server->max_requested_calls; i++) {
+ gpr_stack_lockfree_push(server->request_freelist, (int)i);
}
+ request_matcher_init(&server->unregistered_request_matcher,
+ server->max_requested_calls);
+ server->requested_calls = gpr_malloc(server->max_requested_calls *
+ sizeof(*server->requested_calls));
server->channel_args = grpc_channel_args_copy(args);
@@ -796,6 +825,8 @@ static int streq(const char *a, const char *b) {
void *grpc_server_register_method(grpc_server *server, const char *method,
const char *host) {
registered_method *m;
+ GRPC_API_TRACE("grpc_server_register_method(server=%p, method=%s, host=%s)",
+ 3, (server, method, host));
if (!method) {
gpr_log(GPR_ERROR,
"grpc_server_register_method method string cannot be NULL");
@@ -810,6 +841,7 @@ void *grpc_server_register_method(grpc_server *server, const char *method,
}
m = gpr_malloc(sizeof(registered_method));
memset(m, 0, sizeof(*m));
+ request_matcher_init(&m->request_matcher, server->max_requested_calls);
m->method = gpr_strdup(method);
m->host = gpr_strdup(host);
m->next = server->registered_methods;
@@ -820,6 +852,9 @@ void *grpc_server_register_method(grpc_server *server, const char *method,
void grpc_server_start(grpc_server *server) {
listener *l;
size_t i;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
+ GRPC_API_TRACE("grpc_server_start(server=%p)", 1, (server));
server->pollsets = gpr_malloc(sizeof(grpc_pollset *) * server->cq_count);
for (i = 0; i < server->cq_count; i++) {
@@ -827,17 +862,15 @@ void grpc_server_start(grpc_server *server) {
}
for (l = server->listeners; l; l = l->next) {
- l->start(server, l->arg, server->pollsets, server->cq_count);
+ l->start(&exec_ctx, server, l->arg, server->pollsets, server->cq_count);
}
+
+ grpc_exec_ctx_finish(&exec_ctx);
}
-grpc_transport_setup_result grpc_server_setup_transport(
- grpc_server *s, grpc_transport *transport,
- grpc_channel_filter const **extra_filters, size_t num_extra_filters,
- grpc_mdctx *mdctx, const grpc_channel_args *args) {
- size_t num_filters = s->channel_filter_count + num_extra_filters + 1;
- grpc_channel_filter const **filters =
- gpr_malloc(sizeof(grpc_channel_filter *) * num_filters);
+void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s,
+ grpc_transport *transport,
+ const grpc_channel_args *args) {
size_t i;
size_t num_registered_methods;
size_t alloc;
@@ -847,29 +880,22 @@ grpc_transport_setup_result grpc_server_setup_transport(
channel_data *chand;
grpc_mdstr *host;
grpc_mdstr *method;
- gpr_uint32 hash;
- gpr_uint32 slots;
- gpr_uint32 probes;
- gpr_uint32 max_probes = 0;
- grpc_transport_setup_result result;
-
- for (i = 0; i < s->channel_filter_count; i++) {
- filters[i] = s->channel_filters[i];
- }
- for (; i < s->channel_filter_count + num_extra_filters; i++) {
- filters[i] = extra_filters[i - s->channel_filter_count];
- }
- filters[i] = &grpc_connected_channel_filter;
+ uint32_t hash;
+ size_t slots;
+ uint32_t probes;
+ uint32_t max_probes = 0;
+ grpc_transport_op op;
for (i = 0; i < s->cq_count; i++) {
- grpc_transport_add_to_pollset(transport, grpc_cq_pollset(s->cqs[i]));
+ memset(&op, 0, sizeof(op));
+ op.bind_pollset = grpc_cq_pollset(s->cqs[i]);
+ grpc_transport_perform_op(exec_ctx, transport, &op);
}
channel =
- grpc_channel_create_from_filters(filters, num_filters, args, mdctx, 0);
+ grpc_channel_create(exec_ctx, NULL, args, GRPC_SERVER_CHANNEL, transport);
chand = (channel_data *)grpc_channel_stack_element(
- grpc_channel_get_channel_stack(channel), 0)
- ->channel_data;
+ grpc_channel_get_channel_stack(channel), 0)->channel_data;
chand->server = s;
server_ref(s);
chand->channel = channel;
@@ -886,11 +912,11 @@ grpc_transport_setup_result grpc_server_setup_transport(
chand->registered_methods = gpr_malloc(alloc);
memset(chand->registered_methods, 0, alloc);
for (rm = s->registered_methods; rm; rm = rm->next) {
- host = rm->host ? grpc_mdstr_from_string(mdctx, rm->host) : NULL;
- method = grpc_mdstr_from_string(mdctx, rm->method);
+ host = rm->host ? grpc_mdstr_from_string(rm->host) : NULL;
+ method = grpc_mdstr_from_string(rm->method);
hash = GRPC_MDSTR_KV_HASH(host ? host->hash : 0, method->hash);
for (probes = 0; chand->registered_methods[(hash + probes) % slots]
- .server_registered_method != NULL;
+ .server_registered_method != NULL;
probes++)
;
if (probes > max_probes) max_probes = probes;
@@ -899,147 +925,121 @@ grpc_transport_setup_result grpc_server_setup_transport(
crm->host = host;
crm->method = method;
}
- chand->registered_method_slots = slots;
+ GPR_ASSERT(slots <= UINT32_MAX);
+ chand->registered_method_slots = (uint32_t)slots;
chand->registered_method_max_probes = max_probes;
}
- result = grpc_connected_channel_bind_transport(
- grpc_channel_get_channel_stack(channel), transport);
-
gpr_mu_lock(&s->mu_global);
chand->next = &s->root_channel_data;
chand->prev = chand->next->prev;
chand->next->prev = chand->prev->next = chand;
gpr_mu_unlock(&s->mu_global);
- gpr_free(filters);
+ GRPC_CHANNEL_INTERNAL_REF(channel, "connectivity");
+ memset(&op, 0, sizeof(op));
+ op.set_accept_stream = true;
+ op.set_accept_stream_fn = accept_stream;
+ op.set_accept_stream_user_data = chand;
+ op.on_connectivity_state_change = &chand->channel_connectivity_changed;
+ op.connectivity_state = &chand->connectivity_state;
+ op.disconnect = gpr_atm_acq_load(&s->shutdown_flag) != 0;
+ grpc_transport_perform_op(exec_ctx, transport, &op);
+}
+
+void done_published_shutdown(grpc_exec_ctx *exec_ctx, void *done_arg,
+ grpc_cq_completion *storage) {
+ (void)done_arg;
+ gpr_free(storage);
+}
- return result;
+static void listener_destroy_done(grpc_exec_ctx *exec_ctx, void *s,
+ bool success) {
+ grpc_server *server = s;
+ gpr_mu_lock(&server->mu_global);
+ server->listeners_destroyed++;
+ maybe_finish_shutdown(exec_ctx, server);
+ gpr_mu_unlock(&server->mu_global);
}
void grpc_server_shutdown_and_notify(grpc_server *server,
grpc_completion_queue *cq, void *tag) {
listener *l;
- requested_call_array requested_calls;
- channel_data *c;
- size_t i;
- registered_method *rm;
shutdown_tag *sdt;
+ channel_broadcaster broadcaster;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
+ GRPC_API_TRACE("grpc_server_shutdown_and_notify(server=%p, cq=%p, tag=%p)", 3,
+ (server, cq, tag));
/* lock, and gather up some stuff to do */
gpr_mu_lock(&server->mu_global);
- grpc_cq_begin_op(cq, NULL);
+ grpc_cq_begin_op(cq, tag);
+ if (server->shutdown_published) {
+ grpc_cq_end_op(&exec_ctx, cq, tag, 1, done_published_shutdown, NULL,
+ gpr_malloc(sizeof(grpc_cq_completion)));
+ gpr_mu_unlock(&server->mu_global);
+ goto done;
+ }
server->shutdown_tags =
gpr_realloc(server->shutdown_tags,
sizeof(shutdown_tag) * (server->num_shutdown_tags + 1));
sdt = &server->shutdown_tags[server->num_shutdown_tags++];
sdt->tag = tag;
sdt->cq = cq;
- if (server->shutdown) {
+ if (gpr_atm_acq_load(&server->shutdown_flag)) {
gpr_mu_unlock(&server->mu_global);
- return;
+ goto done;
}
- for (c = server->root_channel_data.next; c != &server->root_channel_data;
- c = c->next) {
- shutdown_channel(c, 1, c->num_calls == 0);
- }
+ server->last_shutdown_message_time = gpr_now(GPR_CLOCK_REALTIME);
+
+ channel_broadcaster_init(server, &broadcaster);
+
+ gpr_atm_rel_store(&server->shutdown_flag, 1);
/* collect all unregistered then registered calls */
gpr_mu_lock(&server->mu_call);
- requested_calls = server->requested_calls;
- memset(&server->requested_calls, 0, sizeof(server->requested_calls));
- for (rm = server->registered_methods; rm; rm = rm->next) {
- if (requested_calls.count + rm->requested.count >
- requested_calls.capacity) {
- requested_calls.capacity =
- GPR_MAX(requested_calls.count + rm->requested.count,
- 2 * requested_calls.capacity);
- requested_calls.calls =
- gpr_realloc(requested_calls.calls, sizeof(*requested_calls.calls) *
- requested_calls.capacity);
- }
- memcpy(requested_calls.calls + requested_calls.count, rm->requested.calls,
- sizeof(*requested_calls.calls) * rm->requested.count);
- requested_calls.count += rm->requested.count;
- gpr_free(rm->requested.calls);
- memset(&rm->requested, 0, sizeof(rm->requested));
- }
+ kill_pending_work_locked(&exec_ctx, server);
gpr_mu_unlock(&server->mu_call);
- server->shutdown = 1;
- maybe_finish_shutdown(server);
+ maybe_finish_shutdown(&exec_ctx, server);
gpr_mu_unlock(&server->mu_global);
- /* terminate all the requested calls */
- for (i = 0; i < requested_calls.count; i++) {
- fail_call(server, &requested_calls.calls[i]);
- }
- gpr_free(requested_calls.calls);
-
/* Shutdown listeners */
for (l = server->listeners; l; l = l->next) {
- l->destroy(server, l->arg);
+ grpc_closure_init(&l->destroy_done, listener_destroy_done, server);
+ l->destroy(&exec_ctx, server, l->arg, &l->destroy_done);
}
-}
-void grpc_server_listener_destroy_done(void *s) {
- grpc_server *server = s;
- gpr_mu_lock(&server->mu_global);
- server->listeners_destroyed++;
- maybe_finish_shutdown(server);
- gpr_mu_unlock(&server->mu_global);
+ channel_broadcaster_shutdown(&exec_ctx, &broadcaster, 1, 0);
+
+done:
+ grpc_exec_ctx_finish(&exec_ctx);
}
void grpc_server_cancel_all_calls(grpc_server *server) {
- call_data *calld;
- grpc_call **calls;
- size_t call_count;
- size_t call_capacity;
- int is_first = 1;
- size_t i;
-
- gpr_mu_lock(&server->mu_call);
+ channel_broadcaster broadcaster;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- GPR_ASSERT(server->shutdown);
+ GRPC_API_TRACE("grpc_server_cancel_all_calls(server=%p)", 1, (server));
- if (!server->lists[ALL_CALLS]) {
- gpr_mu_unlock(&server->mu_call);
- return;
- }
-
- call_capacity = 8;
- call_count = 0;
- calls = gpr_malloc(sizeof(grpc_call *) * call_capacity);
-
- for (calld = server->lists[ALL_CALLS];
- calld != server->lists[ALL_CALLS] || is_first;
- calld = calld->links[ALL_CALLS].next) {
- if (call_count == call_capacity) {
- call_capacity *= 2;
- calls = gpr_realloc(calls, sizeof(grpc_call *) * call_capacity);
- }
- calls[call_count++] = calld->call;
- GRPC_CALL_INTERNAL_REF(calld->call, "cancel_all");
- is_first = 0;
- }
-
- gpr_mu_unlock(&server->mu_call);
-
- for (i = 0; i < call_count; i++) {
- grpc_call_cancel_with_status(calls[i], GRPC_STATUS_UNAVAILABLE,
- "Unavailable");
- GRPC_CALL_INTERNAL_UNREF(calls[i], "cancel_all", 1);
- }
+ gpr_mu_lock(&server->mu_global);
+ channel_broadcaster_init(server, &broadcaster);
+ gpr_mu_unlock(&server->mu_global);
- gpr_free(calls);
+ channel_broadcaster_shutdown(&exec_ctx, &broadcaster, 0, 1);
+ grpc_exec_ctx_finish(&exec_ctx);
}
void grpc_server_destroy(grpc_server *server) {
listener *l;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
+ GRPC_API_TRACE("grpc_server_destroy(server=%p)", 1, (server));
gpr_mu_lock(&server->mu_global);
- GPR_ASSERT(server->shutdown || !server->listeners);
+ GPR_ASSERT(gpr_atm_acq_load(&server->shutdown_flag) || !server->listeners);
GPR_ASSERT(server->listeners_destroyed == num_listeners(server));
while (server->listeners) {
@@ -1050,14 +1050,16 @@ void grpc_server_destroy(grpc_server *server) {
gpr_mu_unlock(&server->mu_global);
- server_unref(server);
+ server_unref(&exec_ctx, server);
+ grpc_exec_ctx_finish(&exec_ctx);
}
-void grpc_server_add_listener(grpc_server *server, void *arg,
- void (*start)(grpc_server *server, void *arg,
- grpc_pollset **pollsets,
- size_t pollset_count),
- void (*destroy)(grpc_server *server, void *arg)) {
+void grpc_server_add_listener(
+ grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg,
+ void (*start)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg,
+ grpc_pollset **pollsets, size_t pollset_count),
+ void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg,
+ grpc_closure *on_done)) {
listener *l = gpr_malloc(sizeof(listener));
l->arg = arg;
l->start = start;
@@ -1066,39 +1068,61 @@ void grpc_server_add_listener(grpc_server *server, void *arg,
server->listeners = l;
}
-static grpc_call_error queue_call_request(grpc_server *server,
+static grpc_call_error queue_call_request(grpc_exec_ctx *exec_ctx,
+ grpc_server *server,
requested_call *rc) {
call_data *calld = NULL;
- requested_call_array *requested_calls = NULL;
- gpr_mu_lock(&server->mu_call);
- if (server->shutdown) {
- gpr_mu_unlock(&server->mu_call);
- fail_call(server, rc);
+ request_matcher *rm = NULL;
+ int request_id;
+ if (gpr_atm_acq_load(&server->shutdown_flag)) {
+ fail_call(exec_ctx, server, rc);
+ return GRPC_CALL_OK;
+ }
+ request_id = gpr_stack_lockfree_pop(server->request_freelist);
+ if (request_id == -1) {
+ /* out of request ids: just fail this one */
+ fail_call(exec_ctx, server, rc);
return GRPC_CALL_OK;
}
switch (rc->type) {
case BATCH_CALL:
- calld =
- call_list_remove_head(&server->lists[PENDING_START], PENDING_START);
- requested_calls = &server->requested_calls;
+ rm = &server->unregistered_request_matcher;
break;
case REGISTERED_CALL:
- calld = call_list_remove_head(
- &rc->data.registered.registered_method->pending, PENDING_START);
- requested_calls = &rc->data.registered.registered_method->requested;
+ rm = &rc->data.registered.registered_method->request_matcher;
break;
}
- if (calld) {
- GPR_ASSERT(calld->state == PENDING);
- calld->state = ACTIVATED;
- gpr_mu_unlock(&server->mu_call);
- begin_call(server, calld, rc);
- return GRPC_CALL_OK;
- } else {
- *requested_call_array_add(requested_calls) = *rc;
+ server->requested_calls[request_id] = *rc;
+ gpr_free(rc);
+ if (gpr_stack_lockfree_push(rm->requests, request_id)) {
+ /* this was the first queued request: we need to lock and start
+ matching calls */
+ gpr_mu_lock(&server->mu_call);
+ while ((calld = rm->pending_head) != NULL) {
+ request_id = gpr_stack_lockfree_pop(rm->requests);
+ if (request_id == -1) break;
+ rm->pending_head = calld->pending_next;
+ gpr_mu_unlock(&server->mu_call);
+ gpr_mu_lock(&calld->mu_state);
+ if (calld->state == ZOMBIED) {
+ gpr_mu_unlock(&calld->mu_state);
+ grpc_closure_init(
+ &calld->kill_zombie_closure, kill_zombie,
+ grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0));
+ grpc_exec_ctx_enqueue(exec_ctx, &calld->kill_zombie_closure, true,
+ NULL);
+ } else {
+ GPR_ASSERT(calld->state == PENDING);
+ calld->state = ACTIVATED;
+ gpr_mu_unlock(&calld->mu_state);
+ begin_call(exec_ctx, server, calld,
+ &server->requested_calls[request_id]);
+ }
+ gpr_mu_lock(&server->mu_call);
+ }
gpr_mu_unlock(&server->mu_call);
- return GRPC_CALL_OK;
}
+ return GRPC_CALL_OK;
}
grpc_call_error grpc_server_request_call(
@@ -1106,52 +1130,76 @@ grpc_call_error grpc_server_request_call(
grpc_metadata_array *initial_metadata,
grpc_completion_queue *cq_bound_to_call,
grpc_completion_queue *cq_for_notification, void *tag) {
- requested_call rc;
- GRPC_SERVER_LOG_REQUEST_CALL(GPR_INFO, server, call, details,
- initial_metadata, cq_bound_to_call,
- cq_for_notification, tag);
+ grpc_call_error error;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ requested_call *rc = gpr_malloc(sizeof(*rc));
+ GRPC_API_TRACE(
+ "grpc_server_request_call("
+ "server=%p, call=%p, details=%p, initial_metadata=%p, "
+ "cq_bound_to_call=%p, cq_for_notification=%p, tag=%p)",
+ 7, (server, call, details, initial_metadata, cq_bound_to_call,
+ cq_for_notification, tag));
if (!grpc_cq_is_server_cq(cq_for_notification)) {
- return GRPC_CALL_ERROR_NOT_SERVER_COMPLETION_QUEUE;
+ gpr_free(rc);
+ error = GRPC_CALL_ERROR_NOT_SERVER_COMPLETION_QUEUE;
+ goto done;
}
- grpc_cq_begin_op(cq_for_notification, NULL);
- rc.type = BATCH_CALL;
- rc.tag = tag;
- rc.cq_bound_to_call = cq_bound_to_call;
- rc.cq_for_notification = cq_for_notification;
- rc.call = call;
- rc.data.batch.details = details;
- rc.data.batch.initial_metadata = initial_metadata;
- return queue_call_request(server, &rc);
+ grpc_cq_begin_op(cq_for_notification, tag);
+ details->reserved = NULL;
+ rc->type = BATCH_CALL;
+ rc->server = server;
+ rc->tag = tag;
+ rc->cq_bound_to_call = cq_bound_to_call;
+ rc->cq_for_notification = cq_for_notification;
+ rc->call = call;
+ rc->data.batch.details = details;
+ rc->initial_metadata = initial_metadata;
+ error = queue_call_request(&exec_ctx, server, rc);
+done:
+ grpc_exec_ctx_finish(&exec_ctx);
+ return error;
}
grpc_call_error grpc_server_request_registered_call(
- grpc_server *server, void *rm, grpc_call **call, gpr_timespec *deadline,
+ grpc_server *server, void *rmp, grpc_call **call, gpr_timespec *deadline,
grpc_metadata_array *initial_metadata, grpc_byte_buffer **optional_payload,
grpc_completion_queue *cq_bound_to_call,
grpc_completion_queue *cq_for_notification, void *tag) {
- requested_call rc;
- registered_method *registered_method = rm;
+ grpc_call_error error;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ requested_call *rc = gpr_malloc(sizeof(*rc));
+ registered_method *rm = rmp;
+ GRPC_API_TRACE(
+ "grpc_server_request_registered_call("
+ "server=%p, rmp=%p, call=%p, deadline=%p, initial_metadata=%p, "
+ "optional_payload=%p, cq_bound_to_call=%p, cq_for_notification=%p, "
+ "tag=%p)",
+ 9, (server, rmp, call, deadline, initial_metadata, optional_payload,
+ cq_bound_to_call, cq_for_notification, tag));
if (!grpc_cq_is_server_cq(cq_for_notification)) {
- return GRPC_CALL_ERROR_NOT_SERVER_COMPLETION_QUEUE;
+ gpr_free(rc);
+ error = GRPC_CALL_ERROR_NOT_SERVER_COMPLETION_QUEUE;
+ goto done;
}
- grpc_cq_begin_op(cq_for_notification, NULL);
- rc.type = REGISTERED_CALL;
- rc.tag = tag;
- rc.cq_bound_to_call = cq_bound_to_call;
- rc.cq_for_notification = cq_for_notification;
- rc.call = call;
- rc.data.registered.registered_method = registered_method;
- rc.data.registered.deadline = deadline;
- rc.data.registered.initial_metadata = initial_metadata;
- rc.data.registered.optional_payload = optional_payload;
- return queue_call_request(server, &rc);
+ grpc_cq_begin_op(cq_for_notification, tag);
+ rc->type = REGISTERED_CALL;
+ rc->server = server;
+ rc->tag = tag;
+ rc->cq_bound_to_call = cq_bound_to_call;
+ rc->cq_for_notification = cq_for_notification;
+ rc->call = call;
+ rc->data.registered.registered_method = rm;
+ rc->data.registered.deadline = deadline;
+ rc->initial_metadata = initial_metadata;
+ rc->data.registered.optional_payload = optional_payload;
+ error = queue_call_request(&exec_ctx, server, rc);
+done:
+ grpc_exec_ctx_finish(&exec_ctx);
+ return error;
}
-static void publish_registered_or_batch(grpc_call *call, int success,
- void *tag);
-static void publish_was_not_set(grpc_call *call, int success, void *tag) {
- abort();
-}
+static void publish_registered_or_batch(grpc_exec_ctx *exec_ctx,
+ void *user_data, bool success);
static void cpstr(char **dest, size_t *capacity, grpc_mdstr *value) {
gpr_slice slice = value->slice;
@@ -1164,11 +1212,12 @@ static void cpstr(char **dest, size_t *capacity, grpc_mdstr *value) {
memcpy(*dest, grpc_mdstr_as_c_string(value), len + 1);
}
-static void begin_call(grpc_server *server, call_data *calld,
- requested_call *rc) {
- grpc_ioreq_completion_func publish = publish_was_not_set;
- grpc_ioreq req[2];
- grpc_ioreq *r = req;
+static void begin_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
+ call_data *calld, requested_call *rc) {
+ grpc_op ops[1];
+ grpc_op *op = ops;
+
+ memset(ops, 0, sizeof(ops));
/* called once initial metadata has been read by the call, but BEFORE
the ioreq to fetch it out of the call has been executed.
@@ -1176,62 +1225,77 @@ static void begin_call(grpc_server *server, call_data *calld,
fill in the metadata array passed by the client, we need to perform
an ioreq op, that should complete immediately. */
- grpc_call_set_completion_queue(calld->call, rc->cq_bound_to_call);
+ grpc_call_set_completion_queue(exec_ctx, calld->call, rc->cq_bound_to_call);
+ grpc_closure_init(&rc->publish, publish_registered_or_batch, rc);
*rc->call = calld->call;
calld->cq_new = rc->cq_for_notification;
+ GPR_SWAP(grpc_metadata_array, *rc->initial_metadata, calld->initial_metadata);
switch (rc->type) {
case BATCH_CALL:
+ GPR_ASSERT(calld->host != NULL);
+ GPR_ASSERT(calld->path != NULL);
cpstr(&rc->data.batch.details->host,
&rc->data.batch.details->host_capacity, calld->host);
cpstr(&rc->data.batch.details->method,
&rc->data.batch.details->method_capacity, calld->path);
rc->data.batch.details->deadline = calld->deadline;
- r->op = GRPC_IOREQ_RECV_INITIAL_METADATA;
- r->data.recv_metadata = rc->data.batch.initial_metadata;
- r->flags = 0;
- r++;
- publish = publish_registered_or_batch;
break;
case REGISTERED_CALL:
*rc->data.registered.deadline = calld->deadline;
- r->op = GRPC_IOREQ_RECV_INITIAL_METADATA;
- r->data.recv_metadata = rc->data.registered.initial_metadata;
- r->flags = 0;
- r++;
if (rc->data.registered.optional_payload) {
- r->op = GRPC_IOREQ_RECV_MESSAGE;
- r->data.recv_message = rc->data.registered.optional_payload;
- r->flags = 0;
- r++;
+ op->op = GRPC_OP_RECV_MESSAGE;
+ op->data.recv_message = rc->data.registered.optional_payload;
+ op++;
}
- publish = publish_registered_or_batch;
break;
+ default:
+ GPR_UNREACHABLE_CODE(return );
}
GRPC_CALL_INTERNAL_REF(calld->call, "server");
- grpc_call_start_ioreq_and_call_back(calld->call, req, r - req, publish,
- rc->tag);
+ grpc_call_start_batch_and_execute(exec_ctx, calld->call, ops,
+ (size_t)(op - ops), &rc->publish);
}
-static void fail_call(grpc_server *server, requested_call *rc) {
- *rc->call = NULL;
- switch (rc->type) {
- case BATCH_CALL:
- rc->data.batch.initial_metadata->count = 0;
- break;
- case REGISTERED_CALL:
- rc->data.registered.initial_metadata->count = 0;
- break;
+static void done_request_event(grpc_exec_ctx *exec_ctx, void *req,
+ grpc_cq_completion *c) {
+ requested_call *rc = req;
+ grpc_server *server = rc->server;
+
+ if (rc >= server->requested_calls &&
+ rc < server->requested_calls + server->max_requested_calls) {
+ GPR_ASSERT(rc - server->requested_calls <= INT_MAX);
+ gpr_stack_lockfree_push(server->request_freelist,
+ (int)(rc - server->requested_calls));
+ } else {
+ gpr_free(req);
}
- grpc_cq_end_op(rc->cq_for_notification, rc->tag, NULL, 0);
+
+ server_unref(exec_ctx, server);
+}
+
+static void fail_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
+ requested_call *rc) {
+ *rc->call = NULL;
+ rc->initial_metadata->count = 0;
+
+ server_ref(server);
+ grpc_cq_end_op(exec_ctx, rc->cq_for_notification, rc->tag, 0,
+ done_request_event, rc, &rc->completion);
}
-static void publish_registered_or_batch(grpc_call *call, int success,
- void *tag) {
+static void publish_registered_or_batch(grpc_exec_ctx *exec_ctx, void *prc,
+ bool success) {
+ requested_call *rc = prc;
+ grpc_call *call = *rc->call;
grpc_call_element *elem =
grpc_call_stack_element(grpc_call_get_call_stack(call), 0);
call_data *calld = elem->call_data;
- grpc_cq_end_op(calld->cq_new, tag, call, success);
+ channel_data *chand = elem->channel_data;
+ server_ref(chand->server);
+ grpc_cq_end_op(exec_ctx, calld->cq_new, rc->tag, success, done_request_event,
+ rc, &rc->completion);
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "server");
}
const grpc_channel_args *grpc_server_get_channel_args(grpc_server *server) {
diff --git a/src/core/surface/server.h b/src/core/surface/server.h
index 91a1a2a7f6..cd62eadd7f 100644
--- a/src/core/surface/server.h
+++ b/src/core/surface/server.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,37 +31,32 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_SURFACE_SERVER_H
-#define GRPC_INTERNAL_CORE_SURFACE_SERVER_H
+#ifndef GRPC_CORE_SURFACE_SERVER_H
+#define GRPC_CORE_SURFACE_SERVER_H
-#include "src/core/channel/channel_stack.h"
#include <grpc/grpc.h>
+#include "src/core/channel/channel_stack.h"
#include "src/core/transport/transport.h"
-/* Create a server */
-grpc_server *grpc_server_create_from_filters(grpc_channel_filter **filters,
- size_t filter_count,
- const grpc_channel_args *args);
+extern const grpc_channel_filter grpc_server_top_filter;
/* Add a listener to the server: when the server starts, it will call start,
and when it shuts down, it will call destroy */
-void grpc_server_add_listener(grpc_server *server, void *listener,
- void (*start)(grpc_server *server, void *arg,
- grpc_pollset **pollsets,
- size_t npollsets),
- void (*destroy)(grpc_server *server, void *arg));
-
-void grpc_server_listener_destroy_done(void *server);
+void grpc_server_add_listener(
+ grpc_exec_ctx *exec_ctx, grpc_server *server, void *listener,
+ void (*start)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg,
+ grpc_pollset **pollsets, size_t npollsets),
+ void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg,
+ grpc_closure *on_done));
/* Setup a transport - creates a channel stack, binds the transport to the
server */
-grpc_transport_setup_result grpc_server_setup_transport(
- grpc_server *server, grpc_transport *transport,
- grpc_channel_filter const **extra_filters, size_t num_extra_filters,
- grpc_mdctx *mdctx, const grpc_channel_args *args);
+void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *server,
+ grpc_transport *transport,
+ const grpc_channel_args *args);
const grpc_channel_args *grpc_server_get_channel_args(grpc_server *server);
int grpc_server_has_open_connections(grpc_server *server);
-#endif /* GRPC_INTERNAL_CORE_SURFACE_SERVER_H */
+#endif /* GRPC_CORE_SURFACE_SERVER_H */
diff --git a/src/core/surface/server_chttp2.c b/src/core/surface/server_chttp2.c
index 7e49a531df..ff2840f655 100644
--- a/src/core/surface/server_chttp2.c
+++ b/src/core/surface/server_chttp2.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -36,23 +36,22 @@
#include "src/core/channel/http_server_filter.h"
#include "src/core/iomgr/resolve_address.h"
#include "src/core/iomgr/tcp_server.h"
+#include "src/core/surface/api_trace.h"
#include "src/core/surface/server.h"
#include "src/core/transport/chttp2_transport.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/useful.h>
-static grpc_transport_setup_result setup_transport(void *server,
- grpc_transport *transport,
- grpc_mdctx *mdctx) {
- static grpc_channel_filter const *extra_filters[] = {
- &grpc_http_server_filter};
- return grpc_server_setup_transport(server, transport, extra_filters,
- GPR_ARRAY_SIZE(extra_filters), mdctx,
- grpc_server_get_channel_args(server));
+static void setup_transport(grpc_exec_ctx *exec_ctx, void *server,
+ grpc_transport *transport) {
+ grpc_server_setup_transport(exec_ctx, server, transport,
+ grpc_server_get_channel_args(server));
}
-static void new_transport(void *server, grpc_endpoint *tcp) {
+static void new_transport(grpc_exec_ctx *exec_ctx, void *server,
+ grpc_endpoint *tcp,
+ grpc_tcp_server_acceptor *acceptor) {
/*
* Beware that the call to grpc_create_chttp2_transport() has to happen before
* grpc_tcp_server_destroy(). This is fine here, but similar code
@@ -60,48 +59,54 @@ static void new_transport(void *server, grpc_endpoint *tcp) {
* (as in server_secure_chttp2.c) needs to add synchronization to avoid this
* case.
*/
- grpc_create_chttp2_transport(setup_transport, server,
- grpc_server_get_channel_args(server), tcp, NULL,
- 0, grpc_mdctx_create(), 0);
+ grpc_transport *transport = grpc_create_chttp2_transport(
+ exec_ctx, grpc_server_get_channel_args(server), tcp, 0);
+ setup_transport(exec_ctx, server, transport);
+ grpc_chttp2_transport_start_reading(exec_ctx, transport, NULL, 0);
}
/* Server callback: start listening on our ports */
-static void start(grpc_server *server, void *tcpp, grpc_pollset **pollsets,
- size_t pollset_count) {
+static void start(grpc_exec_ctx *exec_ctx, grpc_server *server, void *tcpp,
+ grpc_pollset **pollsets, size_t pollset_count) {
grpc_tcp_server *tcp = tcpp;
- grpc_tcp_server_start(tcp, pollsets, pollset_count, new_transport, server);
+ grpc_tcp_server_start(exec_ctx, tcp, pollsets, pollset_count, new_transport,
+ server);
}
/* Server callback: destroy the tcp listener (so we don't generate further
callbacks) */
-static void destroy(grpc_server *server, void *tcpp) {
+static void destroy(grpc_exec_ctx *exec_ctx, grpc_server *server, void *tcpp,
+ grpc_closure *destroy_done) {
grpc_tcp_server *tcp = tcpp;
- grpc_tcp_server_destroy(tcp, grpc_server_listener_destroy_done, server);
+ grpc_tcp_server_unref(exec_ctx, tcp);
+ grpc_exec_ctx_enqueue(exec_ctx, destroy_done, true, NULL);
}
-int grpc_server_add_http2_port(grpc_server *server, const char *addr) {
+int grpc_server_add_insecure_http2_port(grpc_server *server, const char *addr) {
grpc_resolved_addresses *resolved = NULL;
grpc_tcp_server *tcp = NULL;
size_t i;
unsigned count = 0;
int port_num = -1;
int port_temp;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
+ GRPC_API_TRACE("grpc_server_add_insecure_http2_port(server=%p, addr=%s)", 2,
+ (server, addr));
resolved = grpc_blocking_resolve_address(addr, "http");
if (!resolved) {
goto error;
}
- tcp = grpc_tcp_server_create();
- if (!tcp) {
- goto error;
- }
+ tcp = grpc_tcp_server_create(NULL);
+ GPR_ASSERT(tcp);
for (i = 0; i < resolved->naddrs; i++) {
port_temp = grpc_tcp_server_add_port(
tcp, (struct sockaddr *)&resolved->addrs[i].addr,
resolved->addrs[i].len);
- if (port_temp >= 0) {
+ if (port_temp > 0) {
if (port_num == -1) {
port_num = port_temp;
} else {
@@ -122,9 +127,8 @@ int grpc_server_add_http2_port(grpc_server *server, const char *addr) {
grpc_resolved_addresses_destroy(resolved);
/* Register with the server only upon success */
- grpc_server_add_listener(server, tcp, start, destroy);
-
- return port_num;
+ grpc_server_add_listener(&exec_ctx, server, tcp, start, destroy);
+ goto done;
/* Error path: cleanup and return */
error:
@@ -132,7 +136,11 @@ error:
grpc_resolved_addresses_destroy(resolved);
}
if (tcp) {
- grpc_tcp_server_destroy(tcp, NULL, NULL);
+ grpc_tcp_server_unref(&exec_ctx, tcp);
}
- return 0;
+ port_num = 0;
+
+done:
+ grpc_exec_ctx_finish(&exec_ctx);
+ return port_num;
}
diff --git a/src/core/surface/surface_trace.h b/src/core/surface/surface_trace.h
index 01302bb5d4..ed820ebd05 100644
--- a/src/core/surface/surface_trace.h
+++ b/src/core/surface/surface_trace.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,19 +31,18 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_SURFACE_SURFACE_TRACE_H
-#define GRPC_INTERNAL_CORE_SURFACE_SURFACE_TRACE_H
+#ifndef GRPC_CORE_SURFACE_SURFACE_TRACE_H
+#define GRPC_CORE_SURFACE_SURFACE_TRACE_H
#include "src/core/debug/trace.h"
+#include "src/core/surface/api_trace.h"
#include <grpc/support/log.h>
-extern int grpc_surface_trace;
-
#define GRPC_SURFACE_TRACE_RETURNED_EVENT(cq, event) \
- if (grpc_surface_trace) { \
+ if (grpc_api_trace) { \
char *_ev = grpc_event_string(event); \
gpr_log(GPR_INFO, "RETURN_EVENT[%p]: %s", cq, _ev); \
gpr_free(_ev); \
}
-#endif /* GRPC_INTERNAL_CORE_SURFACE_SURFACE_TRACE_H */
+#endif /* GRPC_CORE_SURFACE_SURFACE_TRACE_H */
diff --git a/src/core/surface/validate_metadata.c b/src/core/surface/validate_metadata.c
new file mode 100644
index 0000000000..bf4126867f
--- /dev/null
+++ b/src/core/surface/validate_metadata.c
@@ -0,0 +1,73 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <grpc/support/port_platform.h>
+
+static int conforms_to(const char *s, size_t len, const uint8_t *legal_bits) {
+ const char *p = s;
+ const char *e = s + len;
+ for (; p != e; p++) {
+ int idx = *p;
+ int byte = idx / 8;
+ int bit = idx % 8;
+ if ((legal_bits[byte] & (1 << bit)) == 0) return 0;
+ }
+ return 1;
+}
+
+int grpc_header_key_is_legal(const char *key, size_t length) {
+ static const uint8_t legal_header_bits[256 / 8] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0xff, 0x03, 0x00, 0x00, 0x00,
+ 0x80, 0xfe, 0xff, 0xff, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ if (length == 0) {
+ return 0;
+ }
+ return conforms_to(key, length, legal_header_bits);
+}
+
+int grpc_header_nonbin_value_is_legal(const char *value, size_t length) {
+ static const uint8_t legal_header_bits[256 / 8] = {
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ return conforms_to(value, length, legal_header_bits);
+}
+
+int grpc_is_binary_header(const char *key, size_t length) {
+ if (length < 5) return 0;
+ return 0 == memcmp(key + length - 4, "-bin", 4);
+}
diff --git a/src/core/surface/version.c b/src/core/surface/version.c
new file mode 100644
index 0000000000..7723f39401
--- /dev/null
+++ b/src/core/surface/version.c
@@ -0,0 +1,39 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/* This file is autogenerated from:
+ templates/src/core/surface/version.c.template */
+
+#include <grpc/grpc.h>
+
+const char *grpc_version_string(void) { return "0.14.0-dev"; }
diff --git a/src/core/transport/byte_stream.c b/src/core/transport/byte_stream.c
new file mode 100644
index 0000000000..8e6fb2cbef
--- /dev/null
+++ b/src/core/transport/byte_stream.c
@@ -0,0 +1,78 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/transport/byte_stream.h"
+
+#include <stdlib.h>
+
+#include <grpc/support/log.h>
+
+int grpc_byte_stream_next(grpc_exec_ctx *exec_ctx,
+ grpc_byte_stream *byte_stream, gpr_slice *slice,
+ size_t max_size_hint, grpc_closure *on_complete) {
+ return byte_stream->next(exec_ctx, byte_stream, slice, max_size_hint,
+ on_complete);
+}
+
+void grpc_byte_stream_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_byte_stream *byte_stream) {
+ byte_stream->destroy(exec_ctx, byte_stream);
+}
+
+/* slice_buffer_stream */
+
+static int slice_buffer_stream_next(grpc_exec_ctx *exec_ctx,
+ grpc_byte_stream *byte_stream,
+ gpr_slice *slice, size_t max_size_hint,
+ grpc_closure *on_complete) {
+ grpc_slice_buffer_stream *stream = (grpc_slice_buffer_stream *)byte_stream;
+ GPR_ASSERT(stream->cursor < stream->backing_buffer->count);
+ *slice = gpr_slice_ref(stream->backing_buffer->slices[stream->cursor]);
+ stream->cursor++;
+ return 1;
+}
+
+static void slice_buffer_stream_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_byte_stream *byte_stream) {}
+
+void grpc_slice_buffer_stream_init(grpc_slice_buffer_stream *stream,
+ gpr_slice_buffer *slice_buffer,
+ uint32_t flags) {
+ GPR_ASSERT(slice_buffer->length <= UINT32_MAX);
+ stream->base.length = (uint32_t)slice_buffer->length;
+ stream->base.flags = flags;
+ stream->base.next = slice_buffer_stream_next;
+ stream->base.destroy = slice_buffer_stream_destroy;
+ stream->backing_buffer = slice_buffer;
+ stream->cursor = 0;
+}
diff --git a/src/core/transport/byte_stream.h b/src/core/transport/byte_stream.h
new file mode 100644
index 0000000000..b8d0ade2b5
--- /dev/null
+++ b/src/core/transport/byte_stream.h
@@ -0,0 +1,89 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_TRANSPORT_BYTE_STREAM_H
+#define GRPC_CORE_TRANSPORT_BYTE_STREAM_H
+
+#include "src/core/iomgr/exec_ctx.h"
+#include <grpc/support/slice_buffer.h>
+
+/** Internal bit flag for grpc_begin_message's \a flags signaling the use of
+ * compression for the message */
+#define GRPC_WRITE_INTERNAL_COMPRESS (0x80000000u)
+/** Mask of all valid internal flags. */
+#define GRPC_WRITE_INTERNAL_USED_MASK (GRPC_WRITE_INTERNAL_COMPRESS)
+
+struct grpc_byte_stream;
+typedef struct grpc_byte_stream grpc_byte_stream;
+
+struct grpc_byte_stream {
+ uint32_t length;
+ uint32_t flags;
+ int (*next)(grpc_exec_ctx *exec_ctx, grpc_byte_stream *byte_stream,
+ gpr_slice *slice, size_t max_size_hint,
+ grpc_closure *on_complete);
+ void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_byte_stream *byte_stream);
+};
+
+/* returns 1 if the bytes are available immediately (in which case
+ * on_complete will not be called), 0 if the bytes will be available
+ * asynchronously.
+ *
+ * on entry, *remaining can be set as a hint as to the maximum number
+ * of bytes that would be acceptable to read.
+ *
+ * fills *buffer, *length, *remaining with the bytes, length of bytes
+ * and length of data remaining to be read before either returning 1
+ * or calling on_complete.
+ *
+ * once a slice is returned into *slice, it is owned by the caller.
+ */
+int grpc_byte_stream_next(grpc_exec_ctx *exec_ctx,
+ grpc_byte_stream *byte_stream, gpr_slice *slice,
+ size_t max_size_hint, grpc_closure *on_complete);
+
+void grpc_byte_stream_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_byte_stream *byte_stream);
+
+/* grpc_byte_stream that wraps a slice buffer */
+typedef struct grpc_slice_buffer_stream {
+ grpc_byte_stream base;
+ gpr_slice_buffer *backing_buffer;
+ size_t cursor;
+} grpc_slice_buffer_stream;
+
+void grpc_slice_buffer_stream_init(grpc_slice_buffer_stream *stream,
+ gpr_slice_buffer *slice_buffer,
+ uint32_t flags);
+
+#endif /* GRPC_CORE_TRANSPORT_BYTE_STREAM_H */
diff --git a/src/core/transport/chttp2/alpn.c b/src/core/transport/chttp2/alpn.c
index 3ccd5796ba..69da4e6718 100644
--- a/src/core/transport/chttp2/alpn.c
+++ b/src/core/transport/chttp2/alpn.c
@@ -36,8 +36,7 @@
#include <grpc/support/useful.h>
/* in order of preference */
-static const char *const supported_versions[] = {"h2", "h2-17", "h2-16",
- "h2-15", "h2-14"};
+static const char *const supported_versions[] = {"h2"};
int grpc_chttp2_is_alpn_version_supported(const char *version, size_t size) {
size_t i;
diff --git a/src/core/transport/chttp2/alpn.h b/src/core/transport/chttp2/alpn.h
index f38b4c3167..68010e3155 100644
--- a/src/core/transport/chttp2/alpn.h
+++ b/src/core/transport/chttp2/alpn.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_ALPN_H
-#define GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_ALPN_H
+#ifndef GRPC_CORE_TRANSPORT_CHTTP2_ALPN_H
+#define GRPC_CORE_TRANSPORT_CHTTP2_ALPN_H
#include <string.h>
@@ -46,4 +46,4 @@ size_t grpc_chttp2_num_alpn_versions(void);
* grpc_chttp2_num_alpn_versions()) */
const char *grpc_chttp2_get_alpn_version_index(size_t i);
-#endif /* GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_ALPN_H */
+#endif /* GRPC_CORE_TRANSPORT_CHTTP2_ALPN_H */
diff --git a/src/core/transport/chttp2/bin_encoder.c b/src/core/transport/chttp2/bin_encoder.c
index dee6dbec8b..f26bc7e29b 100644
--- a/src/core/transport/chttp2/bin_encoder.c
+++ b/src/core/transport/chttp2/bin_encoder.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -42,24 +42,76 @@ static const char alphabet[] =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
typedef struct {
- gpr_uint16 bits;
- gpr_uint8 length;
+ uint16_t bits;
+ uint8_t length;
} b64_huff_sym;
-static const b64_huff_sym huff_alphabet[64] = {
- {0x21, 6}, {0x5d, 7}, {0x5e, 7}, {0x5f, 7}, {0x60, 7}, {0x61, 7},
- {0x62, 7}, {0x63, 7}, {0x64, 7}, {0x65, 7}, {0x66, 7}, {0x67, 7},
- {0x68, 7}, {0x69, 7}, {0x6a, 7}, {0x6b, 7}, {0x6c, 7}, {0x6d, 7},
- {0x6e, 7}, {0x6f, 7}, {0x70, 7}, {0x71, 7}, {0x72, 7}, {0xfc, 8},
- {0x73, 7}, {0xfd, 8}, {0x3, 5}, {0x23, 6}, {0x4, 5}, {0x24, 6},
- {0x5, 5}, {0x25, 6}, {0x26, 6}, {0x27, 6}, {0x6, 5}, {0x74, 7},
- {0x75, 7}, {0x28, 6}, {0x29, 6}, {0x2a, 6}, {0x7, 5}, {0x2b, 6},
- {0x76, 7}, {0x2c, 6}, {0x8, 5}, {0x9, 5}, {0x2d, 6}, {0x77, 7},
- {0x78, 7}, {0x79, 7}, {0x7a, 7}, {0x7b, 7}, {0x0, 5}, {0x1, 5},
- {0x2, 5}, {0x19, 6}, {0x1a, 6}, {0x1b, 6}, {0x1c, 6}, {0x1d, 6},
- {0x1e, 6}, {0x1f, 6}, {0x7fb, 11}, {0x18, 6}};
-
-static const gpr_uint8 tail_xtra[3] = {0, 2, 3};
+static const b64_huff_sym huff_alphabet[64] = {{0x21, 6},
+ {0x5d, 7},
+ {0x5e, 7},
+ {0x5f, 7},
+ {0x60, 7},
+ {0x61, 7},
+ {0x62, 7},
+ {0x63, 7},
+ {0x64, 7},
+ {0x65, 7},
+ {0x66, 7},
+ {0x67, 7},
+ {0x68, 7},
+ {0x69, 7},
+ {0x6a, 7},
+ {0x6b, 7},
+ {0x6c, 7},
+ {0x6d, 7},
+ {0x6e, 7},
+ {0x6f, 7},
+ {0x70, 7},
+ {0x71, 7},
+ {0x72, 7},
+ {0xfc, 8},
+ {0x73, 7},
+ {0xfd, 8},
+ {0x3, 5},
+ {0x23, 6},
+ {0x4, 5},
+ {0x24, 6},
+ {0x5, 5},
+ {0x25, 6},
+ {0x26, 6},
+ {0x27, 6},
+ {0x6, 5},
+ {0x74, 7},
+ {0x75, 7},
+ {0x28, 6},
+ {0x29, 6},
+ {0x2a, 6},
+ {0x7, 5},
+ {0x2b, 6},
+ {0x76, 7},
+ {0x2c, 6},
+ {0x8, 5},
+ {0x9, 5},
+ {0x2d, 6},
+ {0x77, 7},
+ {0x78, 7},
+ {0x79, 7},
+ {0x7a, 7},
+ {0x7b, 7},
+ {0x0, 5},
+ {0x1, 5},
+ {0x2, 5},
+ {0x19, 6},
+ {0x1a, 6},
+ {0x1b, 6},
+ {0x1c, 6},
+ {0x1d, 6},
+ {0x1e, 6},
+ {0x1f, 6},
+ {0x7fb, 11},
+ {0x18, 6}};
+
+static const uint8_t tail_xtra[3] = {0, 2, 3};
gpr_slice grpc_chttp2_base64_encode(gpr_slice input) {
size_t input_length = GPR_SLICE_LENGTH(input);
@@ -67,8 +119,8 @@ gpr_slice grpc_chttp2_base64_encode(gpr_slice input) {
size_t tail_case = input_length % 3;
size_t output_length = input_triplets * 4 + tail_xtra[tail_case];
gpr_slice output = gpr_slice_malloc(output_length);
- gpr_uint8 *in = GPR_SLICE_START_PTR(input);
- gpr_uint8 *out = GPR_SLICE_START_PTR(output);
+ uint8_t *in = GPR_SLICE_START_PTR(input);
+ char *out = (char *)GPR_SLICE_START_PTR(output);
size_t i;
/* encode full triplets */
@@ -100,18 +152,18 @@ gpr_slice grpc_chttp2_base64_encode(gpr_slice input) {
break;
}
- GPR_ASSERT(out == GPR_SLICE_END_PTR(output));
+ GPR_ASSERT(out == (char *)GPR_SLICE_END_PTR(output));
GPR_ASSERT(in == GPR_SLICE_END_PTR(input));
return output;
}
gpr_slice grpc_chttp2_huffman_compress(gpr_slice input) {
size_t nbits;
- gpr_uint8 *in;
- gpr_uint8 *out;
+ uint8_t *in;
+ uint8_t *out;
gpr_slice output;
- gpr_uint32 temp = 0;
- gpr_uint32 temp_length = 0;
+ uint32_t temp = 0;
+ uint32_t temp_length = 0;
nbits = 0;
for (in = GPR_SLICE_START_PTR(input); in != GPR_SLICE_END_PTR(input); ++in) {
@@ -128,12 +180,17 @@ gpr_slice grpc_chttp2_huffman_compress(gpr_slice input) {
while (temp_length > 8) {
temp_length -= 8;
- *out++ = temp >> temp_length;
+ *out++ = (uint8_t)(temp >> temp_length);
}
}
if (temp_length) {
- *out++ = (temp << (8 - temp_length)) | (0xff >> temp_length);
+ /* NB: the following integer arithmetic operation needs to be in its
+ * expanded form due to the "integral promotion" performed (see section
+ * 3.2.1.1 of the C89 draft standard). A cast to the smaller container type
+ * is then required to avoid the compiler warning */
+ *out++ = (uint8_t)((uint8_t)(temp << (8u - temp_length)) |
+ (uint8_t)(0xffu >> temp_length));
}
GPR_ASSERT(out == GPR_SLICE_END_PTR(output));
@@ -142,28 +199,28 @@ gpr_slice grpc_chttp2_huffman_compress(gpr_slice input) {
}
typedef struct {
- gpr_uint32 temp;
- gpr_uint32 temp_length;
- gpr_uint8 *out;
+ uint32_t temp;
+ uint32_t temp_length;
+ uint8_t *out;
} huff_out;
static void enc_flush_some(huff_out *out) {
while (out->temp_length > 8) {
out->temp_length -= 8;
- *out->out++ = out->temp >> out->temp_length;
+ *out->out++ = (uint8_t)(out->temp >> out->temp_length);
}
}
-static void enc_add2(huff_out *out, gpr_uint8 a, gpr_uint8 b) {
+static void enc_add2(huff_out *out, uint8_t a, uint8_t b) {
b64_huff_sym sa = huff_alphabet[a];
b64_huff_sym sb = huff_alphabet[b];
- out->temp =
- (out->temp << (sa.length + sb.length)) | (sa.bits << sb.length) | sb.bits;
- out->temp_length += sa.length + sb.length;
+ out->temp = (out->temp << (sa.length + sb.length)) |
+ ((uint32_t)sa.bits << sb.length) | sb.bits;
+ out->temp_length += (uint32_t)sa.length + (uint32_t)sb.length;
enc_flush_some(out);
}
-static void enc_add1(huff_out *out, gpr_uint8 a) {
+static void enc_add1(huff_out *out, uint8_t a) {
b64_huff_sym sa = huff_alphabet[a];
out->temp = (out->temp << sa.length) | sa.bits;
out->temp_length += sa.length;
@@ -178,8 +235,8 @@ gpr_slice grpc_chttp2_base64_encode_and_huffman_compress(gpr_slice input) {
size_t max_output_bits = 11 * output_syms;
size_t max_output_length = max_output_bits / 8 + (max_output_bits % 8 != 0);
gpr_slice output = gpr_slice_malloc(max_output_length);
- gpr_uint8 *in = GPR_SLICE_START_PTR(input);
- gpr_uint8 *start_out = GPR_SLICE_START_PTR(output);
+ uint8_t *in = GPR_SLICE_START_PTR(input);
+ uint8_t *start_out = GPR_SLICE_START_PTR(output);
huff_out out;
size_t i;
@@ -189,8 +246,9 @@ gpr_slice grpc_chttp2_base64_encode_and_huffman_compress(gpr_slice input) {
/* encode full triplets */
for (i = 0; i < input_triplets; i++) {
- enc_add2(&out, in[0] >> 2, ((in[0] & 0x3) << 4) | (in[1] >> 4));
- enc_add2(&out, ((in[1] & 0xf) << 2) | (in[2] >> 6), in[2] & 0x3f);
+ enc_add2(&out, in[0] >> 2, (uint8_t)((in[0] & 0x3) << 4) | (in[1] >> 4));
+ enc_add2(&out, (uint8_t)((in[1] & 0xf) << 2) | (in[2] >> 6),
+ (uint8_t)(in[2] & 0x3f));
in += 3;
}
@@ -199,19 +257,24 @@ gpr_slice grpc_chttp2_base64_encode_and_huffman_compress(gpr_slice input) {
case 0:
break;
case 1:
- enc_add2(&out, in[0] >> 2, (in[0] & 0x3) << 4);
+ enc_add2(&out, in[0] >> 2, (uint8_t)((in[0] & 0x3) << 4));
in += 1;
break;
case 2:
- enc_add2(&out, in[0] >> 2, ((in[0] & 0x3) << 4) | (in[1] >> 4));
- enc_add1(&out, (in[1] & 0xf) << 2);
+ enc_add2(&out, in[0] >> 2,
+ (uint8_t)((in[0] & 0x3) << 4) | (uint8_t)(in[1] >> 4));
+ enc_add1(&out, (uint8_t)((in[1] & 0xf) << 2));
in += 2;
break;
}
if (out.temp_length) {
- *out.out++ =
- (out.temp << (8 - out.temp_length)) | (0xff >> out.temp_length);
+ /* NB: the following integer arithmetic operation needs to be in its
+ * expanded form due to the "integral promotion" performed (see section
+ * 3.2.1.1 of the C89 draft standard). A cast to the smaller container type
+ * is then required to avoid the compiler warning */
+ *out.out++ = (uint8_t)((uint8_t)(out.temp << (8u - out.temp_length)) |
+ (uint8_t)(0xffu >> out.temp_length));
}
GPR_ASSERT(out.out <= GPR_SLICE_END_PTR(output));
@@ -220,8 +283,3 @@ gpr_slice grpc_chttp2_base64_encode_and_huffman_compress(gpr_slice input) {
GPR_ASSERT(in == GPR_SLICE_END_PTR(input));
return output;
}
-
-int grpc_is_binary_header(const char *key, size_t length) {
- if (length < 5) return 0;
- return 0 == memcmp(key + length - 4, "-bin", 4);
-}
diff --git a/src/core/transport/chttp2/bin_encoder.h b/src/core/transport/chttp2/bin_encoder.h
index d3e5a855dd..edb6f2dad1 100644
--- a/src/core/transport/chttp2/bin_encoder.h
+++ b/src/core/transport/chttp2/bin_encoder.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_BIN_ENCODER_H
-#define GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_BIN_ENCODER_H
+#ifndef GRPC_CORE_TRANSPORT_CHTTP2_BIN_ENCODER_H
+#define GRPC_CORE_TRANSPORT_CHTTP2_BIN_ENCODER_H
#include <grpc/support/slice.h>
@@ -51,6 +51,4 @@ gpr_slice grpc_chttp2_huffman_compress(gpr_slice input);
return y; */
gpr_slice grpc_chttp2_base64_encode_and_huffman_compress(gpr_slice input);
-int grpc_is_binary_header(const char *key, size_t length);
-
-#endif /* GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_BIN_ENCODER_H */
+#endif /* GRPC_CORE_TRANSPORT_CHTTP2_BIN_ENCODER_H */
diff --git a/src/core/transport/chttp2/frame.h b/src/core/transport/chttp2/frame.h
index 879ee036fa..560a6675af 100644
--- a/src/core/transport/chttp2/frame.h
+++ b/src/core/transport/chttp2/frame.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_H
-#define GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_H
+#ifndef GRPC_CORE_TRANSPORT_CHTTP2_FRAME_H
+#define GRPC_CORE_TRANSPORT_CHTTP2_FRAME_H
#include <grpc/support/port_platform.h>
#include <grpc/support/slice.h>
@@ -66,4 +66,4 @@ typedef struct grpc_chttp2_transport_parsing grpc_chttp2_transport_parsing;
#define GRPC_CHTTP2_DATA_FLAG_PADDED 8
#define GRPC_CHTTP2_FLAG_HAS_PRIORITY 0x20
-#endif /* GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_H */
+#endif /* GRPC_CORE_TRANSPORT_CHTTP2_FRAME_H */
diff --git a/src/core/transport/chttp2/frame_data.c b/src/core/transport/chttp2/frame_data.c
index 0ad62a9999..f9a1af8873 100644
--- a/src/core/transport/chttp2/frame_data.c
+++ b/src/core/transport/chttp2/frame_data.c
@@ -45,16 +45,25 @@
grpc_chttp2_parse_error grpc_chttp2_data_parser_init(
grpc_chttp2_data_parser *parser) {
parser->state = GRPC_CHTTP2_DATA_FH_0;
- grpc_sopb_init(&parser->incoming_sopb);
+ parser->parsing_frame = NULL;
return GRPC_CHTTP2_PARSE_OK;
}
-void grpc_chttp2_data_parser_destroy(grpc_chttp2_data_parser *parser) {
- grpc_sopb_destroy(&parser->incoming_sopb);
+void grpc_chttp2_data_parser_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_data_parser *parser) {
+ grpc_byte_stream *bs;
+ if (parser->parsing_frame) {
+ grpc_chttp2_incoming_byte_stream_finished(exec_ctx, parser->parsing_frame,
+ 0, 1);
+ }
+ while (
+ (bs = grpc_chttp2_incoming_frame_queue_pop(&parser->incoming_frames))) {
+ grpc_byte_stream_destroy(exec_ctx, bs);
+ }
}
grpc_chttp2_parse_error grpc_chttp2_data_parser_begin_frame(
- grpc_chttp2_data_parser *parser, gpr_uint8 flags) {
+ grpc_chttp2_data_parser *parser, uint8_t flags) {
if (flags & ~GRPC_CHTTP2_DATA_FLAG_END_STREAM) {
gpr_log(GPR_ERROR, "unsupported data flags: 0x%02x", flags);
return GRPC_CHTTP2_STREAM_ERROR;
@@ -69,13 +78,72 @@ grpc_chttp2_parse_error grpc_chttp2_data_parser_begin_frame(
return GRPC_CHTTP2_PARSE_OK;
}
+void grpc_chttp2_incoming_frame_queue_merge(
+ grpc_chttp2_incoming_frame_queue *head_dst,
+ grpc_chttp2_incoming_frame_queue *tail_src) {
+ if (tail_src->head == NULL) {
+ return;
+ }
+
+ if (head_dst->head == NULL) {
+ *head_dst = *tail_src;
+ memset(tail_src, 0, sizeof(*tail_src));
+ return;
+ }
+
+ head_dst->tail->next_message = tail_src->head;
+ head_dst->tail = tail_src->tail;
+ memset(tail_src, 0, sizeof(*tail_src));
+}
+
+grpc_byte_stream *grpc_chttp2_incoming_frame_queue_pop(
+ grpc_chttp2_incoming_frame_queue *q) {
+ grpc_byte_stream *out;
+ if (q->head == NULL) {
+ return NULL;
+ }
+ out = &q->head->base;
+ if (q->head == q->tail) {
+ memset(q, 0, sizeof(*q));
+ } else {
+ q->head = q->head->next_message;
+ }
+ return out;
+}
+
+void grpc_chttp2_encode_data(uint32_t id, gpr_slice_buffer *inbuf,
+ uint32_t write_bytes, int is_eof,
+ gpr_slice_buffer *outbuf) {
+ gpr_slice hdr;
+ uint8_t *p;
+
+ hdr = gpr_slice_malloc(9);
+ p = GPR_SLICE_START_PTR(hdr);
+ GPR_ASSERT(write_bytes < (1 << 24));
+ *p++ = (uint8_t)(write_bytes >> 16);
+ *p++ = (uint8_t)(write_bytes >> 8);
+ *p++ = (uint8_t)(write_bytes);
+ *p++ = GRPC_CHTTP2_FRAME_DATA;
+ *p++ = is_eof ? GRPC_CHTTP2_DATA_FLAG_END_STREAM : 0;
+ *p++ = (uint8_t)(id >> 24);
+ *p++ = (uint8_t)(id >> 16);
+ *p++ = (uint8_t)(id >> 8);
+ *p++ = (uint8_t)(id);
+ gpr_slice_buffer_add(outbuf, hdr);
+
+ gpr_slice_buffer_move_first(inbuf, write_bytes, outbuf);
+}
+
grpc_chttp2_parse_error grpc_chttp2_data_parser_parse(
- void *parser, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_exec_ctx *exec_ctx, void *parser,
+ grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
- gpr_uint8 *const beg = GPR_SLICE_START_PTR(slice);
- gpr_uint8 *const end = GPR_SLICE_END_PTR(slice);
- gpr_uint8 *cur = beg;
+ uint8_t *const beg = GPR_SLICE_START_PTR(slice);
+ uint8_t *const end = GPR_SLICE_END_PTR(slice);
+ uint8_t *cur = beg;
grpc_chttp2_data_parser *p = parser;
+ uint32_t message_flags;
+ grpc_chttp2_incoming_byte_stream *incoming_byte_stream;
if (is_last && p->is_last_frame) {
stream_parsing->received_close = 1;
@@ -89,47 +157,55 @@ grpc_chttp2_parse_error grpc_chttp2_data_parser_parse(
fh_0:
case GRPC_CHTTP2_DATA_FH_0:
p->frame_type = *cur;
- if (++cur == end) {
- p->state = GRPC_CHTTP2_DATA_FH_1;
- return GRPC_CHTTP2_PARSE_OK;
- }
switch (p->frame_type) {
case 0:
+ p->is_frame_compressed = 0; /* GPR_FALSE */
break;
case 1:
- gpr_log(GPR_ERROR, "Compressed GRPC frames not yet supported");
- return GRPC_CHTTP2_STREAM_ERROR;
+ p->is_frame_compressed = 1; /* GPR_TRUE */
+ break;
default:
gpr_log(GPR_ERROR, "Bad GRPC frame type 0x%02x", p->frame_type);
return GRPC_CHTTP2_STREAM_ERROR;
}
+ if (++cur == end) {
+ p->state = GRPC_CHTTP2_DATA_FH_1;
+ return GRPC_CHTTP2_PARSE_OK;
+ }
/* fallthrough */
case GRPC_CHTTP2_DATA_FH_1:
- p->frame_size = ((gpr_uint32)*cur) << 24;
+ p->frame_size = ((uint32_t)*cur) << 24;
if (++cur == end) {
p->state = GRPC_CHTTP2_DATA_FH_2;
return GRPC_CHTTP2_PARSE_OK;
}
/* fallthrough */
case GRPC_CHTTP2_DATA_FH_2:
- p->frame_size |= ((gpr_uint32)*cur) << 16;
+ p->frame_size |= ((uint32_t)*cur) << 16;
if (++cur == end) {
p->state = GRPC_CHTTP2_DATA_FH_3;
return GRPC_CHTTP2_PARSE_OK;
}
/* fallthrough */
case GRPC_CHTTP2_DATA_FH_3:
- p->frame_size |= ((gpr_uint32)*cur) << 8;
+ p->frame_size |= ((uint32_t)*cur) << 8;
if (++cur == end) {
p->state = GRPC_CHTTP2_DATA_FH_4;
return GRPC_CHTTP2_PARSE_OK;
}
/* fallthrough */
case GRPC_CHTTP2_DATA_FH_4:
- p->frame_size |= ((gpr_uint32)*cur);
+ p->frame_size |= ((uint32_t)*cur);
p->state = GRPC_CHTTP2_DATA_FRAME;
++cur;
- grpc_sopb_add_begin_message(&p->incoming_sopb, p->frame_size, 0);
+ message_flags = 0;
+ if (p->is_frame_compressed) {
+ message_flags |= GRPC_WRITE_INTERNAL_COMPRESS;
+ }
+ p->parsing_frame = incoming_byte_stream =
+ grpc_chttp2_incoming_byte_stream_create(
+ exec_ctx, transport_parsing, stream_parsing, p->frame_size,
+ message_flags, &p->incoming_frames);
/* fallthrough */
case GRPC_CHTTP2_DATA_FRAME:
if (cur == end) {
@@ -139,26 +215,34 @@ grpc_chttp2_parse_error grpc_chttp2_data_parser_parse(
}
grpc_chttp2_list_add_parsing_seen_stream(transport_parsing,
stream_parsing);
- if ((gpr_uint32)(end - cur) == p->frame_size) {
- grpc_sopb_add_slice(&p->incoming_sopb,
- gpr_slice_sub(slice, cur - beg, end - beg));
+ if ((uint32_t)(end - cur) == p->frame_size) {
+ grpc_chttp2_incoming_byte_stream_push(
+ exec_ctx, p->parsing_frame,
+ gpr_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg)));
+ grpc_chttp2_incoming_byte_stream_finished(exec_ctx, p->parsing_frame, 1,
+ 1);
+ p->parsing_frame = NULL;
p->state = GRPC_CHTTP2_DATA_FH_0;
return GRPC_CHTTP2_PARSE_OK;
- } else if ((gpr_uint32)(end - cur) > p->frame_size) {
- grpc_sopb_add_slice(
- &p->incoming_sopb,
- gpr_slice_sub(slice, cur - beg, cur + p->frame_size - beg));
+ } else if ((uint32_t)(end - cur) > p->frame_size) {
+ grpc_chttp2_incoming_byte_stream_push(
+ exec_ctx, p->parsing_frame,
+ gpr_slice_sub(slice, (size_t)(cur - beg),
+ (size_t)(cur + p->frame_size - beg)));
+ grpc_chttp2_incoming_byte_stream_finished(exec_ctx, p->parsing_frame, 1,
+ 1);
+ p->parsing_frame = NULL;
cur += p->frame_size;
goto fh_0; /* loop */
} else {
- grpc_sopb_add_slice(&p->incoming_sopb,
- gpr_slice_sub(slice, cur - beg, end - beg));
- p->frame_size -= (end - cur);
+ grpc_chttp2_incoming_byte_stream_push(
+ exec_ctx, p->parsing_frame,
+ gpr_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg)));
+ GPR_ASSERT((size_t)(end - cur) <= p->frame_size);
+ p->frame_size -= (uint32_t)(end - cur);
return GRPC_CHTTP2_PARSE_OK;
}
}
- gpr_log(GPR_ERROR, "should never reach here");
- abort();
- return GRPC_CHTTP2_CONNECTION_ERROR;
+ GPR_UNREACHABLE_CODE(return GRPC_CHTTP2_CONNECTION_ERROR);
}
diff --git a/src/core/transport/chttp2/frame_data.h b/src/core/transport/chttp2/frame_data.h
index 8d6cfcb841..92929d5c97 100644
--- a/src/core/transport/chttp2/frame_data.h
+++ b/src/core/transport/chttp2/frame_data.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,14 +31,15 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_DATA_H
-#define GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_DATA_H
+#ifndef GRPC_CORE_TRANSPORT_CHTTP2_FRAME_DATA_H
+#define GRPC_CORE_TRANSPORT_CHTTP2_FRAME_DATA_H
/* Parser for GRPC streams embedded in DATA frames */
+#include "src/core/iomgr/exec_ctx.h"
#include <grpc/support/slice.h>
#include <grpc/support/slice_buffer.h>
-#include "src/core/transport/stream_op.h"
+#include "src/core/transport/byte_stream.h"
#include "src/core/transport/chttp2/frame.h"
typedef enum {
@@ -50,32 +51,51 @@ typedef enum {
GRPC_CHTTP2_DATA_FRAME
} grpc_chttp2_stream_state;
+typedef struct grpc_chttp2_incoming_byte_stream
+ grpc_chttp2_incoming_byte_stream;
+
+typedef struct grpc_chttp2_incoming_frame_queue {
+ grpc_chttp2_incoming_byte_stream *head;
+ grpc_chttp2_incoming_byte_stream *tail;
+} grpc_chttp2_incoming_frame_queue;
+
typedef struct {
grpc_chttp2_stream_state state;
- gpr_uint8 is_last_frame;
- gpr_uint8 frame_type;
- gpr_uint32 frame_size;
+ uint8_t is_last_frame;
+ uint8_t frame_type;
+ uint32_t frame_size;
- grpc_stream_op_buffer incoming_sopb;
+ int is_frame_compressed;
+ grpc_chttp2_incoming_frame_queue incoming_frames;
+ grpc_chttp2_incoming_byte_stream *parsing_frame;
} grpc_chttp2_data_parser;
+void grpc_chttp2_incoming_frame_queue_merge(
+ grpc_chttp2_incoming_frame_queue *head_dst,
+ grpc_chttp2_incoming_frame_queue *tail_src);
+grpc_byte_stream *grpc_chttp2_incoming_frame_queue_pop(
+ grpc_chttp2_incoming_frame_queue *q);
+
/* initialize per-stream state for data frame parsing */
grpc_chttp2_parse_error grpc_chttp2_data_parser_init(
grpc_chttp2_data_parser *parser);
-void grpc_chttp2_data_parser_destroy(grpc_chttp2_data_parser *parser);
+void grpc_chttp2_data_parser_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_data_parser *parser);
/* start processing a new data frame */
grpc_chttp2_parse_error grpc_chttp2_data_parser_begin_frame(
- grpc_chttp2_data_parser *parser, gpr_uint8 flags);
+ grpc_chttp2_data_parser *parser, uint8_t flags);
/* handle a slice of a data frame - is_last indicates the last slice of a
frame */
grpc_chttp2_parse_error grpc_chttp2_data_parser_parse(
- void *parser, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_exec_ctx *exec_ctx, void *parser,
+ grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
-/* create a slice with an empty data frame and is_last set */
-gpr_slice grpc_chttp2_data_frame_create_empty_close(gpr_uint32 id);
+void grpc_chttp2_encode_data(uint32_t id, gpr_slice_buffer *inbuf,
+ uint32_t write_bytes, int is_eof,
+ gpr_slice_buffer *outbuf);
-#endif /* GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_DATA_H */
+#endif /* GRPC_CORE_TRANSPORT_CHTTP2_FRAME_DATA_H */
diff --git a/src/core/transport/chttp2/frame_goaway.c b/src/core/transport/chttp2/frame_goaway.c
index 1ccbba840c..2fa525e989 100644
--- a/src/core/transport/chttp2/frame_goaway.c
+++ b/src/core/transport/chttp2/frame_goaway.c
@@ -48,7 +48,7 @@ void grpc_chttp2_goaway_parser_destroy(grpc_chttp2_goaway_parser *p) {
}
grpc_chttp2_parse_error grpc_chttp2_goaway_parser_begin_frame(
- grpc_chttp2_goaway_parser *p, gpr_uint32 length, gpr_uint8 flags) {
+ grpc_chttp2_goaway_parser *p, uint32_t length, uint8_t flags) {
if (length < 8) {
gpr_log(GPR_ERROR, "goaway frame too short (%d bytes)", length);
return GRPC_CHTTP2_CONNECTION_ERROR;
@@ -63,11 +63,12 @@ grpc_chttp2_parse_error grpc_chttp2_goaway_parser_begin_frame(
}
grpc_chttp2_parse_error grpc_chttp2_goaway_parser_parse(
- void *parser, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_exec_ctx *exec_ctx, void *parser,
+ grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
- gpr_uint8 *const beg = GPR_SLICE_START_PTR(slice);
- gpr_uint8 *const end = GPR_SLICE_END_PTR(slice);
- gpr_uint8 *cur = beg;
+ uint8_t *const beg = GPR_SLICE_START_PTR(slice);
+ uint8_t *const end = GPR_SLICE_END_PTR(slice);
+ uint8_t *cur = beg;
grpc_chttp2_goaway_parser *p = parser;
switch (p->state) {
@@ -76,7 +77,7 @@ grpc_chttp2_parse_error grpc_chttp2_goaway_parser_parse(
p->state = GRPC_CHTTP2_GOAWAY_LSI0;
return GRPC_CHTTP2_PARSE_OK;
}
- p->last_stream_id = ((gpr_uint32)*cur) << 24;
+ p->last_stream_id = ((uint32_t)*cur) << 24;
++cur;
/* fallthrough */
case GRPC_CHTTP2_GOAWAY_LSI1:
@@ -84,7 +85,7 @@ grpc_chttp2_parse_error grpc_chttp2_goaway_parser_parse(
p->state = GRPC_CHTTP2_GOAWAY_LSI1;
return GRPC_CHTTP2_PARSE_OK;
}
- p->last_stream_id |= ((gpr_uint32)*cur) << 16;
+ p->last_stream_id |= ((uint32_t)*cur) << 16;
++cur;
/* fallthrough */
case GRPC_CHTTP2_GOAWAY_LSI2:
@@ -92,7 +93,7 @@ grpc_chttp2_parse_error grpc_chttp2_goaway_parser_parse(
p->state = GRPC_CHTTP2_GOAWAY_LSI2;
return GRPC_CHTTP2_PARSE_OK;
}
- p->last_stream_id |= ((gpr_uint32)*cur) << 8;
+ p->last_stream_id |= ((uint32_t)*cur) << 8;
++cur;
/* fallthrough */
case GRPC_CHTTP2_GOAWAY_LSI3:
@@ -100,7 +101,7 @@ grpc_chttp2_parse_error grpc_chttp2_goaway_parser_parse(
p->state = GRPC_CHTTP2_GOAWAY_LSI3;
return GRPC_CHTTP2_PARSE_OK;
}
- p->last_stream_id |= ((gpr_uint32)*cur);
+ p->last_stream_id |= ((uint32_t)*cur);
++cur;
/* fallthrough */
case GRPC_CHTTP2_GOAWAY_ERR0:
@@ -108,7 +109,7 @@ grpc_chttp2_parse_error grpc_chttp2_goaway_parser_parse(
p->state = GRPC_CHTTP2_GOAWAY_ERR0;
return GRPC_CHTTP2_PARSE_OK;
}
- p->error_code = ((gpr_uint32)*cur) << 24;
+ p->error_code = ((uint32_t)*cur) << 24;
++cur;
/* fallthrough */
case GRPC_CHTTP2_GOAWAY_ERR1:
@@ -116,7 +117,7 @@ grpc_chttp2_parse_error grpc_chttp2_goaway_parser_parse(
p->state = GRPC_CHTTP2_GOAWAY_ERR1;
return GRPC_CHTTP2_PARSE_OK;
}
- p->error_code |= ((gpr_uint32)*cur) << 16;
+ p->error_code |= ((uint32_t)*cur) << 16;
++cur;
/* fallthrough */
case GRPC_CHTTP2_GOAWAY_ERR2:
@@ -124,7 +125,7 @@ grpc_chttp2_parse_error grpc_chttp2_goaway_parser_parse(
p->state = GRPC_CHTTP2_GOAWAY_ERR2;
return GRPC_CHTTP2_PARSE_OK;
}
- p->error_code |= ((gpr_uint32)*cur) << 8;
+ p->error_code |= ((uint32_t)*cur) << 8;
++cur;
/* fallthrough */
case GRPC_CHTTP2_GOAWAY_ERR3:
@@ -132,40 +133,41 @@ grpc_chttp2_parse_error grpc_chttp2_goaway_parser_parse(
p->state = GRPC_CHTTP2_GOAWAY_ERR3;
return GRPC_CHTTP2_PARSE_OK;
}
- p->error_code |= ((gpr_uint32)*cur);
+ p->error_code |= ((uint32_t)*cur);
++cur;
/* fallthrough */
case GRPC_CHTTP2_GOAWAY_DEBUG:
- memcpy(p->debug_data + p->debug_pos, cur, end - cur);
- p->debug_pos += end - cur;
+ memcpy(p->debug_data + p->debug_pos, cur, (size_t)(end - cur));
+ GPR_ASSERT((size_t)(end - cur) < UINT32_MAX - p->debug_pos);
+ p->debug_pos += (uint32_t)(end - cur);
p->state = GRPC_CHTTP2_GOAWAY_DEBUG;
if (is_last) {
transport_parsing->goaway_received = 1;
transport_parsing->goaway_last_stream_index = p->last_stream_id;
gpr_slice_unref(transport_parsing->goaway_text);
- transport_parsing->goaway_error = p->error_code;
+ transport_parsing->goaway_error = (grpc_status_code)p->error_code;
transport_parsing->goaway_text =
gpr_slice_new(p->debug_data, p->debug_length, gpr_free);
p->debug_data = NULL;
}
return GRPC_CHTTP2_PARSE_OK;
}
- gpr_log(GPR_ERROR, "Should never end up here");
- abort();
- return GRPC_CHTTP2_CONNECTION_ERROR;
+ GPR_UNREACHABLE_CODE(return GRPC_CHTTP2_CONNECTION_ERROR);
}
-void grpc_chttp2_goaway_append(gpr_uint32 last_stream_id, gpr_uint32 error_code,
+void grpc_chttp2_goaway_append(uint32_t last_stream_id, uint32_t error_code,
gpr_slice debug_data,
gpr_slice_buffer *slice_buffer) {
gpr_slice header = gpr_slice_malloc(9 + 4 + 4);
- gpr_uint8 *p = GPR_SLICE_START_PTR(header);
- gpr_uint32 frame_length = 4 + 4 + GPR_SLICE_LENGTH(debug_data);
+ uint8_t *p = GPR_SLICE_START_PTR(header);
+ uint32_t frame_length;
+ GPR_ASSERT(GPR_SLICE_LENGTH(debug_data) < UINT32_MAX - 4 - 4);
+ frame_length = 4 + 4 + (uint32_t)GPR_SLICE_LENGTH(debug_data);
/* frame header: length */
- *p++ = frame_length >> 16;
- *p++ = frame_length >> 8;
- *p++ = frame_length;
+ *p++ = (uint8_t)(frame_length >> 16);
+ *p++ = (uint8_t)(frame_length >> 8);
+ *p++ = (uint8_t)(frame_length);
/* frame header: type */
*p++ = GRPC_CHTTP2_FRAME_GOAWAY;
/* frame header: flags */
@@ -176,15 +178,15 @@ void grpc_chttp2_goaway_append(gpr_uint32 last_stream_id, gpr_uint32 error_code,
*p++ = 0;
*p++ = 0;
/* payload: last stream id */
- *p++ = last_stream_id >> 24;
- *p++ = last_stream_id >> 16;
- *p++ = last_stream_id >> 8;
- *p++ = last_stream_id;
+ *p++ = (uint8_t)(last_stream_id >> 24);
+ *p++ = (uint8_t)(last_stream_id >> 16);
+ *p++ = (uint8_t)(last_stream_id >> 8);
+ *p++ = (uint8_t)(last_stream_id);
/* payload: error code */
- *p++ = error_code >> 24;
- *p++ = error_code >> 16;
- *p++ = error_code >> 8;
- *p++ = error_code;
+ *p++ = (uint8_t)(error_code >> 24);
+ *p++ = (uint8_t)(error_code >> 16);
+ *p++ = (uint8_t)(error_code >> 8);
+ *p++ = (uint8_t)(error_code);
GPR_ASSERT(p == GPR_SLICE_END_PTR(header));
gpr_slice_buffer_add(slice_buffer, header);
gpr_slice_buffer_add(slice_buffer, debug_data);
diff --git a/src/core/transport/chttp2/frame_goaway.h b/src/core/transport/chttp2/frame_goaway.h
index 9c5edfc821..616287e9ee 100644
--- a/src/core/transport/chttp2/frame_goaway.h
+++ b/src/core/transport/chttp2/frame_goaway.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,9 +31,10 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_GOAWAY_H
-#define GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_GOAWAY_H
+#ifndef GRPC_CORE_TRANSPORT_CHTTP2_FRAME_GOAWAY_H
+#define GRPC_CORE_TRANSPORT_CHTTP2_FRAME_GOAWAY_H
+#include "src/core/iomgr/exec_ctx.h"
#include "src/core/transport/chttp2/frame.h"
#include <grpc/support/port_platform.h>
#include <grpc/support/slice.h>
@@ -53,23 +54,24 @@ typedef enum {
typedef struct {
grpc_chttp2_goaway_parse_state state;
- gpr_uint32 last_stream_id;
- gpr_uint32 error_code;
+ uint32_t last_stream_id;
+ uint32_t error_code;
char *debug_data;
- gpr_uint32 debug_length;
- gpr_uint32 debug_pos;
+ uint32_t debug_length;
+ uint32_t debug_pos;
} grpc_chttp2_goaway_parser;
void grpc_chttp2_goaway_parser_init(grpc_chttp2_goaway_parser *p);
void grpc_chttp2_goaway_parser_destroy(grpc_chttp2_goaway_parser *p);
grpc_chttp2_parse_error grpc_chttp2_goaway_parser_begin_frame(
- grpc_chttp2_goaway_parser *parser, gpr_uint32 length, gpr_uint8 flags);
+ grpc_chttp2_goaway_parser *parser, uint32_t length, uint8_t flags);
grpc_chttp2_parse_error grpc_chttp2_goaway_parser_parse(
- void *parser, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_exec_ctx *exec_ctx, void *parser,
+ grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
-void grpc_chttp2_goaway_append(gpr_uint32 last_stream_id, gpr_uint32 error_code,
+void grpc_chttp2_goaway_append(uint32_t last_stream_id, uint32_t error_code,
gpr_slice debug_data,
gpr_slice_buffer *slice_buffer);
-#endif /* GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_GOAWAY_H */
+#endif /* GRPC_CORE_TRANSPORT_CHTTP2_FRAME_GOAWAY_H */
diff --git a/src/core/transport/chttp2/frame_ping.c b/src/core/transport/chttp2/frame_ping.c
index 05451c7a8a..c6ab522283 100644
--- a/src/core/transport/chttp2/frame_ping.c
+++ b/src/core/transport/chttp2/frame_ping.c
@@ -39,9 +39,9 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-gpr_slice grpc_chttp2_ping_create(gpr_uint8 ack, gpr_uint8 *opaque_8bytes) {
+gpr_slice grpc_chttp2_ping_create(uint8_t ack, uint8_t *opaque_8bytes) {
gpr_slice slice = gpr_slice_malloc(9 + 8);
- gpr_uint8 *p = GPR_SLICE_START_PTR(slice);
+ uint8_t *p = GPR_SLICE_START_PTR(slice);
*p++ = 0;
*p++ = 0;
@@ -58,7 +58,7 @@ gpr_slice grpc_chttp2_ping_create(gpr_uint8 ack, gpr_uint8 *opaque_8bytes) {
}
grpc_chttp2_parse_error grpc_chttp2_ping_parser_begin_frame(
- grpc_chttp2_ping_parser *parser, gpr_uint32 length, gpr_uint8 flags) {
+ grpc_chttp2_ping_parser *parser, uint32_t length, uint8_t flags) {
if (flags & 0xfe || length != 8) {
gpr_log(GPR_ERROR, "invalid ping: length=%d, flags=%02x", length, flags);
return GRPC_CHTTP2_CONNECTION_ERROR;
@@ -69,13 +69,13 @@ grpc_chttp2_parse_error grpc_chttp2_ping_parser_begin_frame(
}
grpc_chttp2_parse_error grpc_chttp2_ping_parser_parse(
- void *parser, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_exec_ctx *exec_ctx, void *parser,
+ grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
- gpr_uint8 *const beg = GPR_SLICE_START_PTR(slice);
- gpr_uint8 *const end = GPR_SLICE_END_PTR(slice);
- gpr_uint8 *cur = beg;
+ uint8_t *const beg = GPR_SLICE_START_PTR(slice);
+ uint8_t *const end = GPR_SLICE_END_PTR(slice);
+ uint8_t *cur = beg;
grpc_chttp2_ping_parser *p = parser;
- grpc_chttp2_outstanding_ping *ping;
while (p->byte != 8 && cur != end) {
p->opaque_8bytes[p->byte] = *cur;
@@ -86,15 +86,7 @@ grpc_chttp2_parse_error grpc_chttp2_ping_parser_parse(
if (p->byte == 8) {
GPR_ASSERT(is_last);
if (p->is_ack) {
- for (ping = transport_parsing->pings.next;
- ping != &transport_parsing->pings; ping = ping->next) {
- if (0 == memcmp(p->opaque_8bytes, ping->id, 8)) {
- grpc_iomgr_add_delayed_callback(ping->on_recv, 1);
- }
- ping->next->prev = ping->prev;
- ping->prev->next = ping->next;
- gpr_free(ping);
- }
+ grpc_chttp2_ack_ping(exec_ctx, transport_parsing, p->opaque_8bytes);
} else {
gpr_slice_buffer_add(&transport_parsing->qbuf,
grpc_chttp2_ping_create(1, p->opaque_8bytes));
diff --git a/src/core/transport/chttp2/frame_ping.h b/src/core/transport/chttp2/frame_ping.h
index 99197e8352..fc4dd7ac58 100644
--- a/src/core/transport/chttp2/frame_ping.h
+++ b/src/core/transport/chttp2/frame_ping.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,24 +31,26 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_PING_H
-#define GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_PING_H
+#ifndef GRPC_CORE_TRANSPORT_CHTTP2_FRAME_PING_H
+#define GRPC_CORE_TRANSPORT_CHTTP2_FRAME_PING_H
+#include "src/core/iomgr/exec_ctx.h"
#include <grpc/support/slice.h>
#include "src/core/transport/chttp2/frame.h"
typedef struct {
- gpr_uint8 byte;
- gpr_uint8 is_ack;
- gpr_uint8 opaque_8bytes[8];
+ uint8_t byte;
+ uint8_t is_ack;
+ uint8_t opaque_8bytes[8];
} grpc_chttp2_ping_parser;
-gpr_slice grpc_chttp2_ping_create(gpr_uint8 ack, gpr_uint8 *opaque_8bytes);
+gpr_slice grpc_chttp2_ping_create(uint8_t ack, uint8_t *opaque_8bytes);
grpc_chttp2_parse_error grpc_chttp2_ping_parser_begin_frame(
- grpc_chttp2_ping_parser *parser, gpr_uint32 length, gpr_uint8 flags);
+ grpc_chttp2_ping_parser *parser, uint32_t length, uint8_t flags);
grpc_chttp2_parse_error grpc_chttp2_ping_parser_parse(
- void *parser, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_exec_ctx *exec_ctx, void *parser,
+ grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
-#endif /* GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_PING_H */
+#endif /* GRPC_CORE_TRANSPORT_CHTTP2_FRAME_PING_H */
diff --git a/src/core/transport/chttp2/frame_rst_stream.c b/src/core/transport/chttp2/frame_rst_stream.c
index a878d936c1..754529e4b9 100644
--- a/src/core/transport/chttp2/frame_rst_stream.c
+++ b/src/core/transport/chttp2/frame_rst_stream.c
@@ -38,29 +38,29 @@
#include "src/core/transport/chttp2/frame.h"
-gpr_slice grpc_chttp2_rst_stream_create(gpr_uint32 id, gpr_uint32 code) {
+gpr_slice grpc_chttp2_rst_stream_create(uint32_t id, uint32_t code) {
gpr_slice slice = gpr_slice_malloc(13);
- gpr_uint8 *p = GPR_SLICE_START_PTR(slice);
+ uint8_t *p = GPR_SLICE_START_PTR(slice);
*p++ = 0;
*p++ = 0;
*p++ = 4;
*p++ = GRPC_CHTTP2_FRAME_RST_STREAM;
*p++ = 0;
- *p++ = id >> 24;
- *p++ = id >> 16;
- *p++ = id >> 8;
- *p++ = id;
- *p++ = code >> 24;
- *p++ = code >> 16;
- *p++ = code >> 8;
- *p++ = code;
+ *p++ = (uint8_t)(id >> 24);
+ *p++ = (uint8_t)(id >> 16);
+ *p++ = (uint8_t)(id >> 8);
+ *p++ = (uint8_t)(id);
+ *p++ = (uint8_t)(code >> 24);
+ *p++ = (uint8_t)(code >> 16);
+ *p++ = (uint8_t)(code >> 8);
+ *p++ = (uint8_t)(code);
return slice;
}
grpc_chttp2_parse_error grpc_chttp2_rst_stream_parser_begin_frame(
- grpc_chttp2_rst_stream_parser *parser, gpr_uint32 length, gpr_uint8 flags) {
+ grpc_chttp2_rst_stream_parser *parser, uint32_t length, uint8_t flags) {
if (length != 4) {
gpr_log(GPR_ERROR, "invalid rst_stream: length=%d, flags=%02x", length,
flags);
@@ -71,11 +71,12 @@ grpc_chttp2_parse_error grpc_chttp2_rst_stream_parser_begin_frame(
}
grpc_chttp2_parse_error grpc_chttp2_rst_stream_parser_parse(
- void *parser, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_exec_ctx *exec_ctx, void *parser,
+ grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
- gpr_uint8 *const beg = GPR_SLICE_START_PTR(slice);
- gpr_uint8 *const end = GPR_SLICE_END_PTR(slice);
- gpr_uint8 *cur = beg;
+ uint8_t *const beg = GPR_SLICE_START_PTR(slice);
+ uint8_t *const end = GPR_SLICE_END_PTR(slice);
+ uint8_t *cur = beg;
grpc_chttp2_rst_stream_parser *p = parser;
while (p->byte != 4 && cur != end) {
@@ -88,11 +89,10 @@ grpc_chttp2_parse_error grpc_chttp2_rst_stream_parser_parse(
GPR_ASSERT(is_last);
stream_parsing->received_close = 1;
stream_parsing->saw_rst_stream = 1;
- stream_parsing->rst_stream_reason =
- (((gpr_uint32)p->reason_bytes[0]) << 24) |
- (((gpr_uint32)p->reason_bytes[1]) << 16) |
- (((gpr_uint32)p->reason_bytes[2]) << 8) |
- (((gpr_uint32)p->reason_bytes[3]));
+ stream_parsing->rst_stream_reason = (((uint32_t)p->reason_bytes[0]) << 24) |
+ (((uint32_t)p->reason_bytes[1]) << 16) |
+ (((uint32_t)p->reason_bytes[2]) << 8) |
+ (((uint32_t)p->reason_bytes[3]));
}
return GRPC_CHTTP2_PARSE_OK;
diff --git a/src/core/transport/chttp2/frame_rst_stream.h b/src/core/transport/chttp2/frame_rst_stream.h
index ed69e588af..d563a22e24 100644
--- a/src/core/transport/chttp2/frame_rst_stream.h
+++ b/src/core/transport/chttp2/frame_rst_stream.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,23 +31,25 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_RST_STREAM_H
-#define GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_RST_STREAM_H
+#ifndef GRPC_CORE_TRANSPORT_CHTTP2_FRAME_RST_STREAM_H
+#define GRPC_CORE_TRANSPORT_CHTTP2_FRAME_RST_STREAM_H
#include <grpc/support/slice.h>
#include "src/core/transport/chttp2/frame.h"
+#include "src/core/iomgr/exec_ctx.h"
typedef struct {
- gpr_uint8 byte;
- gpr_uint8 reason_bytes[4];
+ uint8_t byte;
+ uint8_t reason_bytes[4];
} grpc_chttp2_rst_stream_parser;
-gpr_slice grpc_chttp2_rst_stream_create(gpr_uint32 stream_id, gpr_uint32 code);
+gpr_slice grpc_chttp2_rst_stream_create(uint32_t stream_id, uint32_t code);
grpc_chttp2_parse_error grpc_chttp2_rst_stream_parser_begin_frame(
- grpc_chttp2_rst_stream_parser *parser, gpr_uint32 length, gpr_uint8 flags);
+ grpc_chttp2_rst_stream_parser *parser, uint32_t length, uint8_t flags);
grpc_chttp2_parse_error grpc_chttp2_rst_stream_parser_parse(
- void *parser, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_exec_ctx *exec_ctx, void *parser,
+ grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
-#endif /* GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_RST_STREAM_H */
+#endif /* GRPC_CORE_TRANSPORT_CHTTP2_FRAME_RST_STREAM_H */
diff --git a/src/core/transport/chttp2/frame_settings.c b/src/core/transport/chttp2/frame_settings.c
index d42bc000ae..cc49dd4f69 100644
--- a/src/core/transport/chttp2/frame_settings.c
+++ b/src/core/transport/chttp2/frame_settings.c
@@ -36,34 +36,41 @@
#include <string.h>
+#include <grpc/support/log.h>
+#include <grpc/support/useful.h>
+
#include "src/core/debug/trace.h"
#include "src/core/transport/chttp2/frame.h"
+#include "src/core/transport/chttp2/http2_errors.h"
#include "src/core/transport/chttp2_transport.h"
-#include <grpc/support/log.h>
-#include <grpc/support/useful.h>
+
+#define MAX_MAX_HEADER_LIST_SIZE (1024 * 1024 * 1024)
/* HTTP/2 mandated initial connection settings */
const grpc_chttp2_setting_parameters
grpc_chttp2_settings_parameters[GRPC_CHTTP2_NUM_SETTINGS] = {
- {NULL, 0, 0, 0, GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE},
+ {NULL, 0, 0, 0, GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE,
+ GRPC_CHTTP2_PROTOCOL_ERROR},
{"HEADER_TABLE_SIZE", 4096, 0, 0xffffffff,
- GRPC_CHTTP2_CLAMP_INVALID_VALUE},
- {"ENABLE_PUSH", 1, 0, 1, GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE},
+ GRPC_CHTTP2_CLAMP_INVALID_VALUE, GRPC_CHTTP2_PROTOCOL_ERROR},
+ {"ENABLE_PUSH", 1, 0, 1, GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE,
+ GRPC_CHTTP2_PROTOCOL_ERROR},
{"MAX_CONCURRENT_STREAMS", 0xffffffffu, 0, 0xffffffffu,
- GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE},
- {"INITIAL_WINDOW_SIZE", 65535, 0, 0xffffffffu,
- GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE},
+ GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE, GRPC_CHTTP2_PROTOCOL_ERROR},
+ {"INITIAL_WINDOW_SIZE", 65535, 0, 0x7fffffffu,
+ GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE,
+ GRPC_CHTTP2_FLOW_CONTROL_ERROR},
{"MAX_FRAME_SIZE", 16384, 16384, 16777215,
- GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE},
- {"MAX_HEADER_LIST_SIZE", 0xffffffffu, 0, 0xffffffffu,
- GRPC_CHTTP2_CLAMP_INVALID_VALUE},
+ GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE, GRPC_CHTTP2_PROTOCOL_ERROR},
+ {"MAX_HEADER_LIST_SIZE", MAX_MAX_HEADER_LIST_SIZE, 0,
+ MAX_MAX_HEADER_LIST_SIZE, GRPC_CHTTP2_CLAMP_INVALID_VALUE,
+ GRPC_CHTTP2_PROTOCOL_ERROR},
};
-static gpr_uint8 *fill_header(gpr_uint8 *out, gpr_uint32 length,
- gpr_uint8 flags) {
- *out++ = length >> 16;
- *out++ = length >> 8;
- *out++ = length;
+static uint8_t *fill_header(uint8_t *out, uint32_t length, uint8_t flags) {
+ *out++ = (uint8_t)(length >> 16);
+ *out++ = (uint8_t)(length >> 8);
+ *out++ = (uint8_t)(length);
*out++ = GRPC_CHTTP2_FRAME_SETTINGS;
*out++ = flags;
*out++ = 0;
@@ -73,29 +80,29 @@ static gpr_uint8 *fill_header(gpr_uint8 *out, gpr_uint32 length,
return out;
}
-gpr_slice grpc_chttp2_settings_create(gpr_uint32 *old, const gpr_uint32 *new,
- gpr_uint32 force_mask, size_t count) {
+gpr_slice grpc_chttp2_settings_create(uint32_t *old, const uint32_t *new,
+ uint32_t force_mask, size_t count) {
size_t i;
- size_t n = 0;
+ uint32_t n = 0;
gpr_slice output;
- gpr_uint8 *p;
+ uint8_t *p;
for (i = 0; i < count; i++) {
- n += (new[i] != old[i] || (force_mask & (1 << i)) != 0);
+ n += (new[i] != old[i] || (force_mask & (1u << i)) != 0);
}
output = gpr_slice_malloc(9 + 6 * n);
p = fill_header(GPR_SLICE_START_PTR(output), 6 * n, 0);
for (i = 0; i < count; i++) {
- if (new[i] != old[i] || (force_mask & (1 << i)) != 0) {
+ if (new[i] != old[i] || (force_mask & (1u << i)) != 0) {
GPR_ASSERT(i);
- *p++ = i >> 8;
- *p++ = i;
- *p++ = new[i] >> 24;
- *p++ = new[i] >> 16;
- *p++ = new[i] >> 8;
- *p++ = new[i];
+ *p++ = (uint8_t)(i >> 8);
+ *p++ = (uint8_t)(i);
+ *p++ = (uint8_t)(new[i] >> 24);
+ *p++ = (uint8_t)(new[i] >> 16);
+ *p++ = (uint8_t)(new[i] >> 8);
+ *p++ = (uint8_t)(new[i]);
old[i] = new[i];
}
}
@@ -112,11 +119,11 @@ gpr_slice grpc_chttp2_settings_ack_create(void) {
}
grpc_chttp2_parse_error grpc_chttp2_settings_parser_begin_frame(
- grpc_chttp2_settings_parser *parser, gpr_uint32 length, gpr_uint8 flags,
- gpr_uint32 *settings) {
+ grpc_chttp2_settings_parser *parser, uint32_t length, uint8_t flags,
+ uint32_t *settings) {
parser->target_settings = settings;
memcpy(parser->incoming_settings, settings,
- GRPC_CHTTP2_NUM_SETTINGS * sizeof(gpr_uint32));
+ GRPC_CHTTP2_NUM_SETTINGS * sizeof(uint32_t));
parser->is_ack = 0;
parser->state = GRPC_CHTTP2_SPS_ID0;
if (flags == GRPC_CHTTP2_FLAG_ACK) {
@@ -138,11 +145,12 @@ grpc_chttp2_parse_error grpc_chttp2_settings_parser_begin_frame(
}
grpc_chttp2_parse_error grpc_chttp2_settings_parser_parse(
- void *p, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_exec_ctx *exec_ctx, void *p,
+ grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
grpc_chttp2_settings_parser *parser = p;
- const gpr_uint8 *cur = GPR_SLICE_START_PTR(slice);
- const gpr_uint8 *end = GPR_SLICE_END_PTR(slice);
+ const uint8_t *cur = GPR_SLICE_START_PTR(slice);
+ const uint8_t *end = GPR_SLICE_END_PTR(slice);
if (parser->is_ack) {
return GRPC_CHTTP2_PARSE_OK;
@@ -156,13 +164,13 @@ grpc_chttp2_parse_error grpc_chttp2_settings_parser_parse(
if (is_last) {
transport_parsing->settings_updated = 1;
memcpy(parser->target_settings, parser->incoming_settings,
- GRPC_CHTTP2_NUM_SETTINGS * sizeof(gpr_uint32));
+ GRPC_CHTTP2_NUM_SETTINGS * sizeof(uint32_t));
gpr_slice_buffer_add(&transport_parsing->qbuf,
grpc_chttp2_settings_ack_create());
}
return GRPC_CHTTP2_PARSE_OK;
}
- parser->id = ((gpr_uint16)*cur) << 8;
+ parser->id = (uint16_t)(((uint16_t)*cur) << 8);
cur++;
/* fallthrough */
case GRPC_CHTTP2_SPS_ID1:
@@ -170,7 +178,7 @@ grpc_chttp2_parse_error grpc_chttp2_settings_parser_parse(
parser->state = GRPC_CHTTP2_SPS_ID1;
return GRPC_CHTTP2_PARSE_OK;
}
- parser->id |= (*cur);
+ parser->id = (uint16_t)(parser->id | (*cur));
cur++;
/* fallthrough */
case GRPC_CHTTP2_SPS_VAL0:
@@ -178,7 +186,7 @@ grpc_chttp2_parse_error grpc_chttp2_settings_parser_parse(
parser->state = GRPC_CHTTP2_SPS_VAL0;
return GRPC_CHTTP2_PARSE_OK;
}
- parser->value = ((gpr_uint32)*cur) << 24;
+ parser->value = ((uint32_t)*cur) << 24;
cur++;
/* fallthrough */
case GRPC_CHTTP2_SPS_VAL1:
@@ -186,7 +194,7 @@ grpc_chttp2_parse_error grpc_chttp2_settings_parser_parse(
parser->state = GRPC_CHTTP2_SPS_VAL1;
return GRPC_CHTTP2_PARSE_OK;
}
- parser->value |= ((gpr_uint32)*cur) << 16;
+ parser->value |= ((uint32_t)*cur) << 16;
cur++;
/* fallthrough */
case GRPC_CHTTP2_SPS_VAL2:
@@ -194,7 +202,7 @@ grpc_chttp2_parse_error grpc_chttp2_settings_parser_parse(
parser->state = GRPC_CHTTP2_SPS_VAL2;
return GRPC_CHTTP2_PARSE_OK;
}
- parser->value |= ((gpr_uint32)*cur) << 8;
+ parser->value |= ((uint32_t)*cur) << 8;
cur++;
/* fallthrough */
case GRPC_CHTTP2_SPS_VAL3:
@@ -217,6 +225,10 @@ grpc_chttp2_parse_error grpc_chttp2_settings_parser_parse(
GPR_CLAMP(parser->value, sp->min_value, sp->max_value);
break;
case GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE:
+ grpc_chttp2_goaway_append(
+ transport_parsing->last_incoming_stream_id, sp->error_value,
+ gpr_slice_from_static_string("HTTP2 settings error"),
+ &transport_parsing->qbuf);
gpr_log(GPR_ERROR, "invalid value %u passed for %s",
parser->value, sp->name);
return GRPC_CHTTP2_CONNECTION_ERROR;
@@ -225,10 +237,11 @@ grpc_chttp2_parse_error grpc_chttp2_settings_parser_parse(
if (parser->id == GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE &&
parser->incoming_settings[parser->id] != parser->value) {
transport_parsing->initial_window_update =
- (gpr_int64)parser->value -
- parser->incoming_settings[parser->id];
- gpr_log(GPR_DEBUG, "adding %d for initial_window change",
- (int)transport_parsing->initial_window_update);
+ (int64_t)parser->value - parser->incoming_settings[parser->id];
+ if (grpc_http_trace) {
+ gpr_log(GPR_DEBUG, "adding %d for initial_window change",
+ (int)transport_parsing->initial_window_update);
+ }
}
parser->incoming_settings[parser->id] = parser->value;
if (grpc_http_trace) {
diff --git a/src/core/transport/chttp2/frame_settings.h b/src/core/transport/chttp2/frame_settings.h
index 0ac68a9fa8..e3c10d3cc5 100644
--- a/src/core/transport/chttp2/frame_settings.h
+++ b/src/core/transport/chttp2/frame_settings.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,12 +31,13 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_SETTINGS_H
-#define GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_SETTINGS_H
+#ifndef GRPC_CORE_TRANSPORT_CHTTP2_FRAME_SETTINGS_H
+#define GRPC_CORE_TRANSPORT_CHTTP2_FRAME_SETTINGS_H
#include <grpc/support/port_platform.h>
#include <grpc/support/slice.h>
#include "src/core/transport/chttp2/frame.h"
+#include "src/core/iomgr/exec_ctx.h"
typedef enum {
GRPC_CHTTP2_SPS_ID0,
@@ -60,11 +61,11 @@ typedef enum {
typedef struct {
grpc_chttp2_settings_parse_state state;
- gpr_uint32 *target_settings;
- gpr_uint8 is_ack;
- gpr_uint16 id;
- gpr_uint32 value;
- gpr_uint32 incoming_settings[GRPC_CHTTP2_NUM_SETTINGS];
+ uint32_t *target_settings;
+ uint8_t is_ack;
+ uint16_t id;
+ uint32_t value;
+ uint32_t incoming_settings[GRPC_CHTTP2_NUM_SETTINGS];
} grpc_chttp2_settings_parser;
typedef enum {
@@ -74,10 +75,11 @@ typedef enum {
typedef struct {
const char *name;
- gpr_uint32 default_value;
- gpr_uint32 min_value;
- gpr_uint32 max_value;
+ uint32_t default_value;
+ uint32_t min_value;
+ uint32_t max_value;
grpc_chttp2_invalid_value_behavior invalid_value_behavior;
+ uint32_t error_value;
} grpc_chttp2_setting_parameters;
/* HTTP/2 mandated connection setting parameters */
@@ -85,16 +87,17 @@ extern const grpc_chttp2_setting_parameters
grpc_chttp2_settings_parameters[GRPC_CHTTP2_NUM_SETTINGS];
/* Create a settings frame by diffing old & new, and updating old to be new */
-gpr_slice grpc_chttp2_settings_create(gpr_uint32 *old, const gpr_uint32 *new,
- gpr_uint32 force_mask, size_t count);
+gpr_slice grpc_chttp2_settings_create(uint32_t *old, const uint32_t *new,
+ uint32_t force_mask, size_t count);
/* Create an ack settings frame */
gpr_slice grpc_chttp2_settings_ack_create(void);
grpc_chttp2_parse_error grpc_chttp2_settings_parser_begin_frame(
- grpc_chttp2_settings_parser *parser, gpr_uint32 length, gpr_uint8 flags,
- gpr_uint32 *settings);
+ grpc_chttp2_settings_parser *parser, uint32_t length, uint8_t flags,
+ uint32_t *settings);
grpc_chttp2_parse_error grpc_chttp2_settings_parser_parse(
- void *parser, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_exec_ctx *exec_ctx, void *parser,
+ grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
-#endif /* GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_SETTINGS_H */
+#endif /* GRPC_CORE_TRANSPORT_CHTTP2_FRAME_SETTINGS_H */
diff --git a/src/core/transport/chttp2/frame_window_update.c b/src/core/transport/chttp2/frame_window_update.c
index b817df7745..62d9bac117 100644
--- a/src/core/transport/chttp2/frame_window_update.c
+++ b/src/core/transport/chttp2/frame_window_update.c
@@ -36,10 +36,10 @@
#include <grpc/support/log.h>
-gpr_slice grpc_chttp2_window_update_create(gpr_uint32 id,
- gpr_uint32 window_update) {
+gpr_slice grpc_chttp2_window_update_create(uint32_t id,
+ uint32_t window_update) {
gpr_slice slice = gpr_slice_malloc(13);
- gpr_uint8 *p = GPR_SLICE_START_PTR(slice);
+ uint8_t *p = GPR_SLICE_START_PTR(slice);
GPR_ASSERT(window_update);
@@ -48,21 +48,20 @@ gpr_slice grpc_chttp2_window_update_create(gpr_uint32 id,
*p++ = 4;
*p++ = GRPC_CHTTP2_FRAME_WINDOW_UPDATE;
*p++ = 0;
- *p++ = id >> 24;
- *p++ = id >> 16;
- *p++ = id >> 8;
- *p++ = id;
- *p++ = window_update >> 24;
- *p++ = window_update >> 16;
- *p++ = window_update >> 8;
- *p++ = window_update;
+ *p++ = (uint8_t)(id >> 24);
+ *p++ = (uint8_t)(id >> 16);
+ *p++ = (uint8_t)(id >> 8);
+ *p++ = (uint8_t)(id);
+ *p++ = (uint8_t)(window_update >> 24);
+ *p++ = (uint8_t)(window_update >> 16);
+ *p++ = (uint8_t)(window_update >> 8);
+ *p++ = (uint8_t)(window_update);
return slice;
}
grpc_chttp2_parse_error grpc_chttp2_window_update_parser_begin_frame(
- grpc_chttp2_window_update_parser *parser, gpr_uint32 length,
- gpr_uint8 flags) {
+ grpc_chttp2_window_update_parser *parser, uint32_t length, uint8_t flags) {
if (flags || length != 4) {
gpr_log(GPR_ERROR, "invalid window update: length=%d, flags=%02x", length,
flags);
@@ -74,39 +73,39 @@ grpc_chttp2_parse_error grpc_chttp2_window_update_parser_begin_frame(
}
grpc_chttp2_parse_error grpc_chttp2_window_update_parser_parse(
- void *parser, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_exec_ctx *exec_ctx, void *parser,
+ grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
- gpr_uint8 *const beg = GPR_SLICE_START_PTR(slice);
- gpr_uint8 *const end = GPR_SLICE_END_PTR(slice);
- gpr_uint8 *cur = beg;
+ uint8_t *const beg = GPR_SLICE_START_PTR(slice);
+ uint8_t *const end = GPR_SLICE_END_PTR(slice);
+ uint8_t *cur = beg;
grpc_chttp2_window_update_parser *p = parser;
while (p->byte != 4 && cur != end) {
- p->amount |= ((gpr_uint32)*cur) << (8 * (3 - p->byte));
+ p->amount |= ((uint32_t)*cur) << (8 * (3 - p->byte));
cur++;
p->byte++;
}
if (p->byte == 4) {
- if (p->amount == 0 || (p->amount & 0x80000000u)) {
+ uint32_t received_update = p->amount;
+ if (received_update == 0 || (received_update & 0x80000000u)) {
gpr_log(GPR_ERROR, "invalid window update bytes: %d", p->amount);
return GRPC_CHTTP2_CONNECTION_ERROR;
}
GPR_ASSERT(is_last);
- if (transport_parsing->incoming_stream_id) {
- if (stream_parsing) {
- GRPC_CHTTP2_FLOWCTL_TRACE_STREAM("update", transport_parsing,
- stream_parsing, outgoing_window_update,
- p->amount);
- stream_parsing->outgoing_window_update += p->amount;
+ if (transport_parsing->incoming_stream_id != 0) {
+ if (stream_parsing != NULL) {
+ GRPC_CHTTP2_FLOW_CREDIT_STREAM("parse", transport_parsing,
+ stream_parsing, outgoing_window,
+ received_update);
grpc_chttp2_list_add_parsing_seen_stream(transport_parsing,
stream_parsing);
}
} else {
- GRPC_CHTTP2_FLOWCTL_TRACE_TRANSPORT("update", transport_parsing,
- outgoing_window_update, p->amount);
- transport_parsing->outgoing_window_update += p->amount;
+ GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parse", transport_parsing,
+ outgoing_window, received_update);
}
}
diff --git a/src/core/transport/chttp2/frame_window_update.h b/src/core/transport/chttp2/frame_window_update.h
index deba801d00..0b3712b091 100644
--- a/src/core/transport/chttp2/frame_window_update.h
+++ b/src/core/transport/chttp2/frame_window_update.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,26 +31,26 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_WINDOW_UPDATE_H
-#define GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_WINDOW_UPDATE_H
+#ifndef GRPC_CORE_TRANSPORT_CHTTP2_FRAME_WINDOW_UPDATE_H
+#define GRPC_CORE_TRANSPORT_CHTTP2_FRAME_WINDOW_UPDATE_H
+#include "src/core/iomgr/exec_ctx.h"
#include <grpc/support/slice.h>
#include "src/core/transport/chttp2/frame.h"
typedef struct {
- gpr_uint8 byte;
- gpr_uint8 is_connection_update;
- gpr_uint32 amount;
+ uint8_t byte;
+ uint8_t is_connection_update;
+ uint32_t amount;
} grpc_chttp2_window_update_parser;
-gpr_slice grpc_chttp2_window_update_create(gpr_uint32 id,
- gpr_uint32 window_delta);
+gpr_slice grpc_chttp2_window_update_create(uint32_t id, uint32_t window_delta);
grpc_chttp2_parse_error grpc_chttp2_window_update_parser_begin_frame(
- grpc_chttp2_window_update_parser *parser, gpr_uint32 length,
- gpr_uint8 flags);
+ grpc_chttp2_window_update_parser *parser, uint32_t length, uint8_t flags);
grpc_chttp2_parse_error grpc_chttp2_window_update_parser_parse(
- void *parser, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_exec_ctx *exec_ctx, void *parser,
+ grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
-#endif /* GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_WINDOW_UPDATE_H */
+#endif /* GRPC_CORE_TRANSPORT_CHTTP2_FRAME_WINDOW_UPDATE_H */
diff --git a/src/core/transport/chttp2/hpack_encoder.c b/src/core/transport/chttp2/hpack_encoder.c
new file mode 100644
index 0000000000..f30f574d06
--- /dev/null
+++ b/src/core/transport/chttp2/hpack_encoder.c
@@ -0,0 +1,568 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/transport/chttp2/hpack_encoder.h"
+
+#include <assert.h>
+#include <string.h>
+
+/* This is here for grpc_is_binary_header
+ * TODO(murgatroid99): Remove this
+ */
+#include <grpc/grpc.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/useful.h>
+
+#include "src/core/transport/chttp2/bin_encoder.h"
+#include "src/core/transport/chttp2/hpack_table.h"
+#include "src/core/transport/chttp2/timeout_encoding.h"
+#include "src/core/transport/chttp2/varint.h"
+#include "src/core/transport/static_metadata.h"
+
+#define HASH_FRAGMENT_1(x) ((x)&255)
+#define HASH_FRAGMENT_2(x) ((x >> 8) & 255)
+#define HASH_FRAGMENT_3(x) ((x >> 16) & 255)
+#define HASH_FRAGMENT_4(x) ((x >> 24) & 255)
+
+/* if the probability of this item being seen again is < 1/x then don't add
+ it to the table */
+#define ONE_ON_ADD_PROBABILITY 128
+/* don't consider adding anything bigger than this to the hpack table */
+#define MAX_DECODER_SPACE_USAGE 512
+
+typedef struct {
+ int is_first_frame;
+ /* number of bytes in 'output' when we started the frame - used to calculate
+ frame length */
+ size_t output_length_at_start_of_frame;
+ /* index (in output) of the header for the current frame */
+ size_t header_idx;
+ /* have we seen a regular (non-colon-prefixed) header yet? */
+ uint8_t seen_regular_header;
+ /* output stream id */
+ uint32_t stream_id;
+ gpr_slice_buffer *output;
+} framer_state;
+
+/* fills p (which is expected to be 9 bytes long) with a data frame header */
+static void fill_header(uint8_t *p, uint8_t type, uint32_t id, size_t len,
+ uint8_t flags) {
+ GPR_ASSERT(len < 16777316);
+ *p++ = (uint8_t)(len >> 16);
+ *p++ = (uint8_t)(len >> 8);
+ *p++ = (uint8_t)(len);
+ *p++ = type;
+ *p++ = flags;
+ *p++ = (uint8_t)(id >> 24);
+ *p++ = (uint8_t)(id >> 16);
+ *p++ = (uint8_t)(id >> 8);
+ *p++ = (uint8_t)(id);
+}
+
+/* finish a frame - fill in the previously reserved header */
+static void finish_frame(framer_state *st, int is_header_boundary,
+ int is_last_in_stream) {
+ uint8_t type = 0xff;
+ type = st->is_first_frame ? GRPC_CHTTP2_FRAME_HEADER
+ : GRPC_CHTTP2_FRAME_CONTINUATION;
+ fill_header(
+ GPR_SLICE_START_PTR(st->output->slices[st->header_idx]), type,
+ st->stream_id, st->output->length - st->output_length_at_start_of_frame,
+ (uint8_t)((is_last_in_stream ? GRPC_CHTTP2_DATA_FLAG_END_STREAM : 0) |
+ (is_header_boundary ? GRPC_CHTTP2_DATA_FLAG_END_HEADERS : 0)));
+ st->is_first_frame = 0;
+}
+
+/* begin a new frame: reserve off header space, remember how many bytes we'd
+ output before beginning */
+static void begin_frame(framer_state *st) {
+ st->header_idx =
+ gpr_slice_buffer_add_indexed(st->output, gpr_slice_malloc(9));
+ st->output_length_at_start_of_frame = st->output->length;
+}
+
+/* make sure that the current frame is of the type desired, and has sufficient
+ space to add at least about_to_add bytes -- finishes the current frame if
+ needed */
+static void ensure_space(framer_state *st, size_t need_bytes) {
+ if (st->output->length - st->output_length_at_start_of_frame + need_bytes <=
+ GRPC_CHTTP2_MAX_PAYLOAD_LENGTH) {
+ return;
+ }
+ finish_frame(st, 0, 0);
+ begin_frame(st);
+}
+
+/* increment a filter count, halve all counts if one element reaches max */
+static void inc_filter(uint8_t idx, uint32_t *sum, uint8_t *elems) {
+ elems[idx]++;
+ if (elems[idx] < 255) {
+ (*sum)++;
+ } else {
+ int i;
+ *sum = 0;
+ for (i = 0; i < GRPC_CHTTP2_HPACKC_NUM_FILTERS; i++) {
+ elems[i] /= 2;
+ (*sum) += elems[i];
+ }
+ }
+}
+
+static void add_header_data(framer_state *st, gpr_slice slice) {
+ size_t len = GPR_SLICE_LENGTH(slice);
+ size_t remaining;
+ if (len == 0) return;
+ remaining = GRPC_CHTTP2_MAX_PAYLOAD_LENGTH +
+ st->output_length_at_start_of_frame - st->output->length;
+ if (len <= remaining) {
+ gpr_slice_buffer_add(st->output, slice);
+ } else {
+ gpr_slice_buffer_add(st->output, gpr_slice_split_head(&slice, remaining));
+ finish_frame(st, 0, 0);
+ begin_frame(st);
+ add_header_data(st, slice);
+ }
+}
+
+static uint8_t *add_tiny_header_data(framer_state *st, size_t len) {
+ ensure_space(st, len);
+ return gpr_slice_buffer_tiny_add(st->output, len);
+}
+
+static void evict_entry(grpc_chttp2_hpack_compressor *c) {
+ c->tail_remote_index++;
+ GPR_ASSERT(c->tail_remote_index > 0);
+ GPR_ASSERT(c->table_size >=
+ c->table_elem_size[c->tail_remote_index % c->cap_table_elems]);
+ GPR_ASSERT(c->table_elems > 0);
+ c->table_size =
+ (uint16_t)(c->table_size -
+ c->table_elem_size[c->tail_remote_index % c->cap_table_elems]);
+ c->table_elems--;
+}
+
+/* add an element to the decoder table */
+static void add_elem(grpc_chttp2_hpack_compressor *c, grpc_mdelem *elem) {
+ uint32_t key_hash = elem->key->hash;
+ uint32_t elem_hash = GRPC_MDSTR_KV_HASH(key_hash, elem->value->hash);
+ uint32_t new_index = c->tail_remote_index + c->table_elems + 1;
+ size_t elem_size = 32 + GPR_SLICE_LENGTH(elem->key->slice) +
+ GPR_SLICE_LENGTH(elem->value->slice);
+
+ GPR_ASSERT(elem_size < 65536);
+
+ if (elem_size > c->max_table_size) {
+ while (c->table_size > 0) {
+ evict_entry(c);
+ }
+ return;
+ }
+
+ /* Reserve space for this element in the remote table: if this overflows
+ the current table, drop elements until it fits, matching the decompressor
+ algorithm */
+ while (c->table_size + elem_size > c->max_table_size) {
+ evict_entry(c);
+ }
+ GPR_ASSERT(c->table_elems < c->max_table_size);
+ c->table_elem_size[new_index % c->cap_table_elems] = (uint16_t)elem_size;
+ c->table_size = (uint16_t)(c->table_size + elem_size);
+ c->table_elems++;
+
+ /* Store this element into {entries,indices}_elem */
+ if (c->entries_elems[HASH_FRAGMENT_2(elem_hash)] == elem) {
+ /* already there: update with new index */
+ c->indices_elems[HASH_FRAGMENT_2(elem_hash)] = new_index;
+ } else if (c->entries_elems[HASH_FRAGMENT_3(elem_hash)] == elem) {
+ /* already there (cuckoo): update with new index */
+ c->indices_elems[HASH_FRAGMENT_3(elem_hash)] = new_index;
+ } else if (c->entries_elems[HASH_FRAGMENT_2(elem_hash)] == NULL) {
+ /* not there, but a free element: add */
+ c->entries_elems[HASH_FRAGMENT_2(elem_hash)] = GRPC_MDELEM_REF(elem);
+ c->indices_elems[HASH_FRAGMENT_2(elem_hash)] = new_index;
+ } else if (c->entries_elems[HASH_FRAGMENT_3(elem_hash)] == NULL) {
+ /* not there (cuckoo), but a free element: add */
+ c->entries_elems[HASH_FRAGMENT_3(elem_hash)] = GRPC_MDELEM_REF(elem);
+ c->indices_elems[HASH_FRAGMENT_3(elem_hash)] = new_index;
+ } else if (c->indices_elems[HASH_FRAGMENT_2(elem_hash)] <
+ c->indices_elems[HASH_FRAGMENT_3(elem_hash)]) {
+ /* not there: replace oldest */
+ GRPC_MDELEM_UNREF(c->entries_elems[HASH_FRAGMENT_2(elem_hash)]);
+ c->entries_elems[HASH_FRAGMENT_2(elem_hash)] = GRPC_MDELEM_REF(elem);
+ c->indices_elems[HASH_FRAGMENT_2(elem_hash)] = new_index;
+ } else {
+ /* not there: replace oldest */
+ GRPC_MDELEM_UNREF(c->entries_elems[HASH_FRAGMENT_3(elem_hash)]);
+ c->entries_elems[HASH_FRAGMENT_3(elem_hash)] = GRPC_MDELEM_REF(elem);
+ c->indices_elems[HASH_FRAGMENT_3(elem_hash)] = new_index;
+ }
+
+ /* do exactly the same for the key (so we can find by that again too) */
+
+ if (c->entries_keys[HASH_FRAGMENT_2(key_hash)] == elem->key) {
+ c->indices_keys[HASH_FRAGMENT_2(key_hash)] = new_index;
+ } else if (c->entries_keys[HASH_FRAGMENT_3(key_hash)] == elem->key) {
+ c->indices_keys[HASH_FRAGMENT_3(key_hash)] = new_index;
+ } else if (c->entries_keys[HASH_FRAGMENT_2(key_hash)] == NULL) {
+ c->entries_keys[HASH_FRAGMENT_2(key_hash)] = GRPC_MDSTR_REF(elem->key);
+ c->indices_keys[HASH_FRAGMENT_2(key_hash)] = new_index;
+ } else if (c->entries_keys[HASH_FRAGMENT_3(key_hash)] == NULL) {
+ c->entries_keys[HASH_FRAGMENT_3(key_hash)] = GRPC_MDSTR_REF(elem->key);
+ c->indices_keys[HASH_FRAGMENT_3(key_hash)] = new_index;
+ } else if (c->indices_keys[HASH_FRAGMENT_2(key_hash)] <
+ c->indices_keys[HASH_FRAGMENT_3(key_hash)]) {
+ GRPC_MDSTR_UNREF(c->entries_keys[HASH_FRAGMENT_2(key_hash)]);
+ c->entries_keys[HASH_FRAGMENT_2(key_hash)] = GRPC_MDSTR_REF(elem->key);
+ c->indices_keys[HASH_FRAGMENT_2(key_hash)] = new_index;
+ } else {
+ GRPC_MDSTR_UNREF(c->entries_keys[HASH_FRAGMENT_3(key_hash)]);
+ c->entries_keys[HASH_FRAGMENT_3(key_hash)] = GRPC_MDSTR_REF(elem->key);
+ c->indices_keys[HASH_FRAGMENT_3(key_hash)] = new_index;
+ }
+}
+
+static void emit_indexed(grpc_chttp2_hpack_compressor *c, uint32_t elem_index,
+ framer_state *st) {
+ uint32_t len = GRPC_CHTTP2_VARINT_LENGTH(elem_index, 1);
+ GRPC_CHTTP2_WRITE_VARINT(elem_index, 1, 0x80, add_tiny_header_data(st, len),
+ len);
+}
+
+static gpr_slice get_wire_value(grpc_mdelem *elem, uint8_t *huffman_prefix) {
+ if (grpc_is_binary_header((const char *)GPR_SLICE_START_PTR(elem->key->slice),
+ GPR_SLICE_LENGTH(elem->key->slice))) {
+ *huffman_prefix = 0x80;
+ return grpc_mdstr_as_base64_encoded_and_huffman_compressed(elem->value);
+ }
+ /* TODO(ctiller): opportunistically compress non-binary headers */
+ *huffman_prefix = 0x00;
+ return elem->value->slice;
+}
+
+static void emit_lithdr_incidx(grpc_chttp2_hpack_compressor *c,
+ uint32_t key_index, grpc_mdelem *elem,
+ framer_state *st) {
+ uint32_t len_pfx = GRPC_CHTTP2_VARINT_LENGTH(key_index, 2);
+ uint8_t huffman_prefix;
+ gpr_slice value_slice = get_wire_value(elem, &huffman_prefix);
+ size_t len_val = GPR_SLICE_LENGTH(value_slice);
+ uint32_t len_val_len;
+ GPR_ASSERT(len_val <= UINT32_MAX);
+ len_val_len = GRPC_CHTTP2_VARINT_LENGTH((uint32_t)len_val, 1);
+ GRPC_CHTTP2_WRITE_VARINT(key_index, 2, 0x40,
+ add_tiny_header_data(st, len_pfx), len_pfx);
+ GRPC_CHTTP2_WRITE_VARINT((uint32_t)len_val, 1, huffman_prefix,
+ add_tiny_header_data(st, len_val_len), len_val_len);
+ add_header_data(st, gpr_slice_ref(value_slice));
+}
+
+static void emit_lithdr_noidx(grpc_chttp2_hpack_compressor *c,
+ uint32_t key_index, grpc_mdelem *elem,
+ framer_state *st) {
+ uint32_t len_pfx = GRPC_CHTTP2_VARINT_LENGTH(key_index, 4);
+ uint8_t huffman_prefix;
+ gpr_slice value_slice = get_wire_value(elem, &huffman_prefix);
+ size_t len_val = GPR_SLICE_LENGTH(value_slice);
+ uint32_t len_val_len;
+ GPR_ASSERT(len_val <= UINT32_MAX);
+ len_val_len = GRPC_CHTTP2_VARINT_LENGTH((uint32_t)len_val, 1);
+ GRPC_CHTTP2_WRITE_VARINT(key_index, 4, 0x00,
+ add_tiny_header_data(st, len_pfx), len_pfx);
+ GRPC_CHTTP2_WRITE_VARINT((uint32_t)len_val, 1, huffman_prefix,
+ add_tiny_header_data(st, len_val_len), len_val_len);
+ add_header_data(st, gpr_slice_ref(value_slice));
+}
+
+static void emit_lithdr_incidx_v(grpc_chttp2_hpack_compressor *c,
+ grpc_mdelem *elem, framer_state *st) {
+ uint32_t len_key = (uint32_t)GPR_SLICE_LENGTH(elem->key->slice);
+ uint8_t huffman_prefix;
+ gpr_slice value_slice = get_wire_value(elem, &huffman_prefix);
+ uint32_t len_val = (uint32_t)GPR_SLICE_LENGTH(value_slice);
+ uint32_t len_key_len = GRPC_CHTTP2_VARINT_LENGTH(len_key, 1);
+ uint32_t len_val_len = GRPC_CHTTP2_VARINT_LENGTH(len_val, 1);
+ GPR_ASSERT(len_key <= UINT32_MAX);
+ GPR_ASSERT(GPR_SLICE_LENGTH(value_slice) <= UINT32_MAX);
+ *add_tiny_header_data(st, 1) = 0x40;
+ GRPC_CHTTP2_WRITE_VARINT(len_key, 1, 0x00,
+ add_tiny_header_data(st, len_key_len), len_key_len);
+ add_header_data(st, gpr_slice_ref(elem->key->slice));
+ GRPC_CHTTP2_WRITE_VARINT(len_val, 1, huffman_prefix,
+ add_tiny_header_data(st, len_val_len), len_val_len);
+ add_header_data(st, gpr_slice_ref(value_slice));
+}
+
+static void emit_lithdr_noidx_v(grpc_chttp2_hpack_compressor *c,
+ grpc_mdelem *elem, framer_state *st) {
+ uint32_t len_key = (uint32_t)GPR_SLICE_LENGTH(elem->key->slice);
+ uint8_t huffman_prefix;
+ gpr_slice value_slice = get_wire_value(elem, &huffman_prefix);
+ uint32_t len_val = (uint32_t)GPR_SLICE_LENGTH(value_slice);
+ uint32_t len_key_len = GRPC_CHTTP2_VARINT_LENGTH(len_key, 1);
+ uint32_t len_val_len = GRPC_CHTTP2_VARINT_LENGTH(len_val, 1);
+ GPR_ASSERT(len_key <= UINT32_MAX);
+ GPR_ASSERT(GPR_SLICE_LENGTH(value_slice) <= UINT32_MAX);
+ *add_tiny_header_data(st, 1) = 0x00;
+ GRPC_CHTTP2_WRITE_VARINT(len_key, 1, 0x00,
+ add_tiny_header_data(st, len_key_len), len_key_len);
+ add_header_data(st, gpr_slice_ref(elem->key->slice));
+ GRPC_CHTTP2_WRITE_VARINT(len_val, 1, huffman_prefix,
+ add_tiny_header_data(st, len_val_len), len_val_len);
+ add_header_data(st, gpr_slice_ref(value_slice));
+}
+
+static void emit_advertise_table_size_change(grpc_chttp2_hpack_compressor *c,
+ framer_state *st) {
+ uint32_t len = GRPC_CHTTP2_VARINT_LENGTH(c->max_table_size, 3);
+ GRPC_CHTTP2_WRITE_VARINT(c->max_table_size, 3, 0x20,
+ add_tiny_header_data(st, len), len);
+ c->advertise_table_size_change = 0;
+}
+
+static uint32_t dynidx(grpc_chttp2_hpack_compressor *c, uint32_t elem_index) {
+ return 1 + GRPC_CHTTP2_LAST_STATIC_ENTRY + c->tail_remote_index +
+ c->table_elems - elem_index;
+}
+
+/* encode an mdelem */
+static void hpack_enc(grpc_chttp2_hpack_compressor *c, grpc_mdelem *elem,
+ framer_state *st) {
+ uint32_t key_hash = elem->key->hash;
+ uint32_t elem_hash = GRPC_MDSTR_KV_HASH(key_hash, elem->value->hash);
+ size_t decoder_space_usage;
+ uint32_t indices_key;
+ int should_add_elem;
+
+ GPR_ASSERT(GPR_SLICE_LENGTH(elem->key->slice) > 0);
+ if (GPR_SLICE_START_PTR(elem->key->slice)[0] != ':') { /* regular header */
+ st->seen_regular_header = 1;
+ } else {
+ GPR_ASSERT(
+ st->seen_regular_header == 0 &&
+ "Reserved header (colon-prefixed) happening after regular ones.");
+ }
+
+ inc_filter(HASH_FRAGMENT_1(elem_hash), &c->filter_elems_sum, c->filter_elems);
+
+ /* is this elem currently in the decoders table? */
+
+ if (c->entries_elems[HASH_FRAGMENT_2(elem_hash)] == elem &&
+ c->indices_elems[HASH_FRAGMENT_2(elem_hash)] > c->tail_remote_index) {
+ /* HIT: complete element (first cuckoo hash) */
+ emit_indexed(c, dynidx(c, c->indices_elems[HASH_FRAGMENT_2(elem_hash)]),
+ st);
+ return;
+ }
+
+ if (c->entries_elems[HASH_FRAGMENT_3(elem_hash)] == elem &&
+ c->indices_elems[HASH_FRAGMENT_3(elem_hash)] > c->tail_remote_index) {
+ /* HIT: complete element (second cuckoo hash) */
+ emit_indexed(c, dynidx(c, c->indices_elems[HASH_FRAGMENT_3(elem_hash)]),
+ st);
+ return;
+ }
+
+ /* should this elem be in the table? */
+ decoder_space_usage = 32 + GPR_SLICE_LENGTH(elem->key->slice) +
+ GPR_SLICE_LENGTH(elem->value->slice);
+ should_add_elem = decoder_space_usage < MAX_DECODER_SPACE_USAGE &&
+ c->filter_elems[HASH_FRAGMENT_1(elem_hash)] >=
+ c->filter_elems_sum / ONE_ON_ADD_PROBABILITY;
+
+ /* no hits for the elem... maybe there's a key? */
+
+ indices_key = c->indices_keys[HASH_FRAGMENT_2(key_hash)];
+ if (c->entries_keys[HASH_FRAGMENT_2(key_hash)] == elem->key &&
+ indices_key > c->tail_remote_index) {
+ /* HIT: key (first cuckoo hash) */
+ if (should_add_elem) {
+ emit_lithdr_incidx(c, dynidx(c, indices_key), elem, st);
+ add_elem(c, elem);
+ return;
+ } else {
+ emit_lithdr_noidx(c, dynidx(c, indices_key), elem, st);
+ return;
+ }
+ GPR_UNREACHABLE_CODE(return );
+ }
+
+ indices_key = c->indices_keys[HASH_FRAGMENT_3(key_hash)];
+ if (c->entries_keys[HASH_FRAGMENT_3(key_hash)] == elem->key &&
+ indices_key > c->tail_remote_index) {
+ /* HIT: key (first cuckoo hash) */
+ if (should_add_elem) {
+ emit_lithdr_incidx(c, dynidx(c, indices_key), elem, st);
+ add_elem(c, elem);
+ return;
+ } else {
+ emit_lithdr_noidx(c, dynidx(c, indices_key), elem, st);
+ return;
+ }
+ GPR_UNREACHABLE_CODE(return );
+ }
+
+ /* no elem, key in the table... fall back to literal emission */
+
+ if (should_add_elem) {
+ emit_lithdr_incidx_v(c, elem, st);
+ add_elem(c, elem);
+ return;
+ } else {
+ emit_lithdr_noidx_v(c, elem, st);
+ return;
+ }
+ GPR_UNREACHABLE_CODE(return );
+}
+
+#define STRLEN_LIT(x) (sizeof(x) - 1)
+#define TIMEOUT_KEY "grpc-timeout"
+
+static void deadline_enc(grpc_chttp2_hpack_compressor *c, gpr_timespec deadline,
+ framer_state *st) {
+ char timeout_str[GRPC_CHTTP2_TIMEOUT_ENCODE_MIN_BUFSIZE];
+ grpc_mdelem *mdelem;
+ grpc_chttp2_encode_timeout(
+ gpr_time_sub(deadline, gpr_now(deadline.clock_type)), timeout_str);
+ mdelem = grpc_mdelem_from_metadata_strings(
+ GRPC_MDSTR_GRPC_TIMEOUT, grpc_mdstr_from_string(timeout_str));
+ hpack_enc(c, mdelem, st);
+ GRPC_MDELEM_UNREF(mdelem);
+}
+
+static uint32_t elems_for_bytes(uint32_t bytes) { return (bytes + 31) / 32; }
+
+void grpc_chttp2_hpack_compressor_init(grpc_chttp2_hpack_compressor *c) {
+ memset(c, 0, sizeof(*c));
+ c->max_table_size = GRPC_CHTTP2_HPACKC_INITIAL_TABLE_SIZE;
+ c->cap_table_elems = elems_for_bytes(c->max_table_size);
+ c->max_table_elems = c->cap_table_elems;
+ c->max_usable_size = GRPC_CHTTP2_HPACKC_INITIAL_TABLE_SIZE;
+ c->table_elem_size =
+ gpr_malloc(sizeof(*c->table_elem_size) * c->cap_table_elems);
+ memset(c->table_elem_size, 0,
+ sizeof(*c->table_elem_size) * c->cap_table_elems);
+}
+
+void grpc_chttp2_hpack_compressor_destroy(grpc_chttp2_hpack_compressor *c) {
+ int i;
+ for (i = 0; i < GRPC_CHTTP2_HPACKC_NUM_VALUES; i++) {
+ if (c->entries_keys[i]) GRPC_MDSTR_UNREF(c->entries_keys[i]);
+ if (c->entries_elems[i]) GRPC_MDELEM_UNREF(c->entries_elems[i]);
+ }
+ gpr_free(c->table_elem_size);
+}
+
+void grpc_chttp2_hpack_compressor_set_max_usable_size(
+ grpc_chttp2_hpack_compressor *c, uint32_t max_table_size) {
+ c->max_usable_size = max_table_size;
+ grpc_chttp2_hpack_compressor_set_max_table_size(
+ c, GPR_MIN(c->max_table_size, max_table_size));
+}
+
+static void rebuild_elems(grpc_chttp2_hpack_compressor *c, uint32_t new_cap) {
+ uint16_t *table_elem_size = gpr_malloc(sizeof(*table_elem_size) * new_cap);
+ uint32_t i;
+
+ memset(table_elem_size, 0, sizeof(*table_elem_size) * new_cap);
+ GPR_ASSERT(c->table_elems <= new_cap);
+
+ for (i = 0; i < c->table_elems; i++) {
+ uint32_t ofs = c->tail_remote_index + i + 1;
+ table_elem_size[ofs % new_cap] =
+ c->table_elem_size[ofs % c->cap_table_elems];
+ }
+
+ c->cap_table_elems = new_cap;
+ gpr_free(c->table_elem_size);
+ c->table_elem_size = table_elem_size;
+}
+
+void grpc_chttp2_hpack_compressor_set_max_table_size(
+ grpc_chttp2_hpack_compressor *c, uint32_t max_table_size) {
+ max_table_size = GPR_MIN(max_table_size, c->max_usable_size);
+ if (max_table_size == c->max_table_size) {
+ return;
+ }
+ while (c->table_size > 0 && c->table_size > max_table_size) {
+ evict_entry(c);
+ }
+ c->max_table_size = max_table_size;
+ c->max_table_elems = elems_for_bytes(max_table_size);
+ if (c->max_table_elems > c->cap_table_elems) {
+ rebuild_elems(c, GPR_MAX(c->max_table_elems, 2 * c->cap_table_elems));
+ } else if (c->max_table_elems < c->cap_table_elems / 3) {
+ uint32_t new_cap = GPR_MAX(c->max_table_elems, 16);
+ if (new_cap != c->cap_table_elems) {
+ rebuild_elems(c, new_cap);
+ }
+ }
+ c->advertise_table_size_change = 1;
+ gpr_log(GPR_DEBUG, "set max table size from encoder to %d", max_table_size);
+}
+
+void grpc_chttp2_encode_header(grpc_chttp2_hpack_compressor *c,
+ uint32_t stream_id,
+ grpc_metadata_batch *metadata, int is_eof,
+ gpr_slice_buffer *outbuf) {
+ framer_state st;
+ grpc_linked_mdelem *l;
+ gpr_timespec deadline;
+
+ GPR_ASSERT(stream_id != 0);
+
+ st.seen_regular_header = 0;
+ st.stream_id = stream_id;
+ st.output = outbuf;
+ st.is_first_frame = 1;
+
+ /* Encode a metadata batch; store the returned values, representing
+ a metadata element that needs to be unreffed back into the metadata
+ slot. THIS MAY NOT BE THE SAME ELEMENT (if a decoder table slot got
+ updated). After this loop, we'll do a batch unref of elements. */
+ begin_frame(&st);
+ if (c->advertise_table_size_change != 0) {
+ emit_advertise_table_size_change(c, &st);
+ }
+ grpc_metadata_batch_assert_ok(metadata);
+ for (l = metadata->list.head; l; l = l->next) {
+ hpack_enc(c, l->md, &st);
+ }
+ deadline = metadata->deadline;
+ if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) != 0) {
+ deadline_enc(c, deadline, &st);
+ }
+
+ finish_frame(&st, 1, is_eof);
+}
diff --git a/src/core/transport/chttp2/stream_encoder.h b/src/core/transport/chttp2/hpack_encoder.h
index db52f2a0f6..90aaf867c5 100644
--- a/src/core/transport/chttp2/stream_encoder.h
+++ b/src/core/transport/chttp2/hpack_encoder.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,63 +31,65 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_STREAM_ENCODER_H
-#define GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_STREAM_ENCODER_H
+#ifndef GRPC_CORE_TRANSPORT_CHTTP2_HPACK_ENCODER_H
+#define GRPC_CORE_TRANSPORT_CHTTP2_HPACK_ENCODER_H
#include "src/core/transport/chttp2/frame.h"
#include "src/core/transport/metadata.h"
-#include "src/core/transport/stream_op.h"
+#include "src/core/transport/metadata_batch.h"
#include <grpc/support/port_platform.h>
#include <grpc/support/slice.h>
#include <grpc/support/slice_buffer.h>
#define GRPC_CHTTP2_HPACKC_NUM_FILTERS 256
#define GRPC_CHTTP2_HPACKC_NUM_VALUES 256
-#define GRPC_CHTTP2_HPACKC_MAX_TABLE_ELEMS (4096 / 32)
+/* initial table size, per spec */
+#define GRPC_CHTTP2_HPACKC_INITIAL_TABLE_SIZE 4096
+/* maximum table size we'll actually use */
+#define GRPC_CHTTP2_HPACKC_MAX_TABLE_SIZE (1024 * 1024)
typedef struct {
- gpr_uint32 filter_elems_sum;
+ uint32_t filter_elems_sum;
+ uint32_t max_table_size;
+ uint32_t max_table_elems;
+ uint32_t cap_table_elems;
+ /** if non-zero, advertise to the decoder that we'll start using a table
+ of this size */
+ uint8_t advertise_table_size_change;
+ /** maximum number of bytes we'll use for the decode table (to guard against
+ peers ooming us by setting decode table size high) */
+ uint32_t max_usable_size;
/* one before the lowest usable table index */
- gpr_uint32 tail_remote_index;
- gpr_uint16 table_size;
- gpr_uint16 table_elems;
+ uint32_t tail_remote_index;
+ uint32_t table_size;
+ uint32_t table_elems;
/* filter tables for elems: this tables provides an approximate
popularity count for particular hashes, and are used to determine whether
a new literal should be added to the compression table or not.
They track a single integer that counts how often a particular value has
been seen. When that count reaches max (255), all values are halved. */
- gpr_uint8 filter_elems[GRPC_CHTTP2_HPACKC_NUM_FILTERS];
-
- /* metadata context */
- grpc_mdctx *mdctx;
- /* the string 'grpc-timeout' */
- grpc_mdstr *timeout_key_str;
+ uint8_t filter_elems[GRPC_CHTTP2_HPACKC_NUM_FILTERS];
/* entry tables for keys & elems: these tables track values that have been
seen and *may* be in the decompressor table */
grpc_mdstr *entries_keys[GRPC_CHTTP2_HPACKC_NUM_VALUES];
grpc_mdelem *entries_elems[GRPC_CHTTP2_HPACKC_NUM_VALUES];
- gpr_uint32 indices_keys[GRPC_CHTTP2_HPACKC_NUM_VALUES];
- gpr_uint32 indices_elems[GRPC_CHTTP2_HPACKC_NUM_VALUES];
+ uint32_t indices_keys[GRPC_CHTTP2_HPACKC_NUM_VALUES];
+ uint32_t indices_elems[GRPC_CHTTP2_HPACKC_NUM_VALUES];
- gpr_uint16 table_elem_size[GRPC_CHTTP2_HPACKC_MAX_TABLE_ELEMS];
+ uint16_t *table_elem_size;
} grpc_chttp2_hpack_compressor;
-void grpc_chttp2_hpack_compressor_init(grpc_chttp2_hpack_compressor *c,
- grpc_mdctx *mdctx);
+void grpc_chttp2_hpack_compressor_init(grpc_chttp2_hpack_compressor *c);
void grpc_chttp2_hpack_compressor_destroy(grpc_chttp2_hpack_compressor *c);
+void grpc_chttp2_hpack_compressor_set_max_table_size(
+ grpc_chttp2_hpack_compressor *c, uint32_t max_table_size);
+void grpc_chttp2_hpack_compressor_set_max_usable_size(
+ grpc_chttp2_hpack_compressor *c, uint32_t max_table_size);
-/* select stream ops to be encoded, moving them from inops to outops, and
- moving subsequent ops in inops forward in the queue */
-gpr_uint32 grpc_chttp2_preencode(grpc_stream_op *inops, size_t *inops_count,
- gpr_uint32 max_flow_controlled_bytes,
- grpc_stream_op_buffer *outops);
-
-/* encode stream ops to output */
-void grpc_chttp2_encode(grpc_stream_op *ops, size_t ops_count, int eof,
- gpr_uint32 stream_id,
- grpc_chttp2_hpack_compressor *compressor,
- gpr_slice_buffer *output);
+void grpc_chttp2_encode_header(grpc_chttp2_hpack_compressor *c, uint32_t id,
+ grpc_metadata_batch *metadata, int is_eof,
+ gpr_slice_buffer *outbuf);
-#endif /* GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_STREAM_ENCODER_H */
+#endif /* GRPC_CORE_TRANSPORT_CHTTP2_HPACK_ENCODER_H */
diff --git a/src/core/transport/chttp2/hpack_parser.c b/src/core/transport/chttp2/hpack_parser.c
index b8ab664db5..a63c7db1f6 100644
--- a/src/core/transport/chttp2/hpack_parser.c
+++ b/src/core/transport/chttp2/hpack_parser.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -38,13 +38,20 @@
#include <string.h>
#include <assert.h>
-#include "src/core/transport/chttp2/bin_encoder.h"
-#include "src/core/support/string.h"
+/* This is here for grpc_is_binary_header
+ * TODO(murgatroid99): Remove this
+ */
+#include <grpc/grpc.h>
+
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include <grpc/support/useful.h>
+#include "src/core/profiling/timers.h"
+#include "src/core/support/string.h"
+#include "src/core/transport/chttp2/bin_encoder.h"
+
typedef enum {
NOT_BINARY,
B64_BYTE0,
@@ -68,61 +75,63 @@ typedef enum {
a set of indirect jumps, and so not waste stack space. */
/* forward declarations for parsing states */
-static int parse_begin(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
- const gpr_uint8 *end);
-static int parse_error(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
- const gpr_uint8 *end);
-
-static int parse_string_prefix(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end);
-static int parse_key_string(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
- const gpr_uint8 *end);
+static int parse_begin(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end);
+static int parse_error(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end);
+static int parse_illegal_op(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end);
+
+static int parse_string_prefix(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end);
+static int parse_key_string(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end);
static int parse_value_string_with_indexed_key(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur,
- const gpr_uint8 *end);
+ const uint8_t *cur,
+ const uint8_t *end);
static int parse_value_string_with_literal_key(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur,
- const gpr_uint8 *end);
-
-static int parse_value0(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
- const gpr_uint8 *end);
-static int parse_value1(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
- const gpr_uint8 *end);
-static int parse_value2(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
- const gpr_uint8 *end);
-static int parse_value3(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
- const gpr_uint8 *end);
-static int parse_value4(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
- const gpr_uint8 *end);
-static int parse_value5up(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
- const gpr_uint8 *end);
-
-static int parse_indexed_field(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end);
+ const uint8_t *cur,
+ const uint8_t *end);
+
+static int parse_value0(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end);
+static int parse_value1(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end);
+static int parse_value2(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end);
+static int parse_value3(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end);
+static int parse_value4(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end);
+static int parse_value5up(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end);
+
+static int parse_indexed_field(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end);
static int parse_indexed_field_x(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end);
-static int parse_lithdr_incidx(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end);
+ const uint8_t *cur, const uint8_t *end);
+static int parse_lithdr_incidx(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end);
static int parse_lithdr_incidx_x(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end);
+ const uint8_t *cur, const uint8_t *end);
static int parse_lithdr_incidx_v(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end);
-static int parse_lithdr_notidx(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end);
+ const uint8_t *cur, const uint8_t *end);
+static int parse_lithdr_notidx(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end);
static int parse_lithdr_notidx_x(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end);
+ const uint8_t *cur, const uint8_t *end);
static int parse_lithdr_notidx_v(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end);
-static int parse_lithdr_nvridx(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end);
+ const uint8_t *cur, const uint8_t *end);
+static int parse_lithdr_nvridx(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end);
static int parse_lithdr_nvridx_x(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end);
+ const uint8_t *cur, const uint8_t *end);
static int parse_lithdr_nvridx_v(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end);
-static int parse_max_tbl_size(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
- const gpr_uint8 *end);
-static int parse_max_tbl_size_x(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end);
+ const uint8_t *cur, const uint8_t *end);
+static int parse_max_tbl_size(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end);
+static int parse_max_tbl_size_x(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end);
/* we translate the first byte of a hpack field into one of these decoding
cases, then use a lookup table to jump directly to the appropriate parser.
@@ -150,17 +159,15 @@ typedef enum {
/* jump table of parse state functions -- order must match first_byte_type
above */
static const grpc_chttp2_hpack_parser_state first_byte_action[] = {
- parse_indexed_field, parse_indexed_field_x,
- parse_lithdr_incidx, parse_lithdr_incidx_x,
- parse_lithdr_incidx_v, parse_lithdr_notidx,
- parse_lithdr_notidx_x, parse_lithdr_notidx_v,
- parse_lithdr_nvridx, parse_lithdr_nvridx_x,
- parse_lithdr_nvridx_v, parse_max_tbl_size,
- parse_max_tbl_size_x, parse_error};
+ parse_indexed_field, parse_indexed_field_x, parse_lithdr_incidx,
+ parse_lithdr_incidx_x, parse_lithdr_incidx_v, parse_lithdr_notidx,
+ parse_lithdr_notidx_x, parse_lithdr_notidx_v, parse_lithdr_nvridx,
+ parse_lithdr_nvridx_x, parse_lithdr_nvridx_v, parse_max_tbl_size,
+ parse_max_tbl_size_x, parse_illegal_op};
/* indexes the first byte to a parse state function - generated by
gen_hpack_tables.c */
-static const gpr_uint8 first_byte_lut[256] = {
+static const uint8_t first_byte_lut[256] = {
LITHDR_NOTIDX_V, LITHDR_NOTIDX, LITHDR_NOTIDX, LITHDR_NOTIDX,
LITHDR_NOTIDX, LITHDR_NOTIDX, LITHDR_NOTIDX, LITHDR_NOTIDX,
LITHDR_NOTIDX, LITHDR_NOTIDX, LITHDR_NOTIDX, LITHDR_NOTIDX,
@@ -169,7 +176,7 @@ static const gpr_uint8 first_byte_lut[256] = {
LITHDR_NVRIDX, LITHDR_NVRIDX, LITHDR_NVRIDX, LITHDR_NVRIDX,
LITHDR_NVRIDX, LITHDR_NVRIDX, LITHDR_NVRIDX, LITHDR_NVRIDX,
LITHDR_NVRIDX, LITHDR_NVRIDX, LITHDR_NVRIDX, LITHDR_NVRIDX_X,
- ILLEGAL, MAX_TBL_SIZE, MAX_TBL_SIZE, MAX_TBL_SIZE,
+ MAX_TBL_SIZE, MAX_TBL_SIZE, MAX_TBL_SIZE, MAX_TBL_SIZE,
MAX_TBL_SIZE, MAX_TBL_SIZE, MAX_TBL_SIZE, MAX_TBL_SIZE,
MAX_TBL_SIZE, MAX_TBL_SIZE, MAX_TBL_SIZE, MAX_TBL_SIZE,
MAX_TBL_SIZE, MAX_TBL_SIZE, MAX_TBL_SIZE, MAX_TBL_SIZE,
@@ -232,7 +239,7 @@ static const gpr_uint8 first_byte_lut[256] = {
considered returns the next state.
generated by gen_hpack_tables.c */
-static const gpr_uint8 next_tbl[256] = {
+static const uint8_t next_tbl[256] = {
0, 1, 2, 3, 4, 1, 2, 5, 6, 1, 7, 8, 1, 3, 3, 9, 10, 11, 1, 1,
1, 12, 1, 2, 13, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2,
14, 1, 15, 16, 1, 17, 1, 15, 2, 7, 3, 18, 19, 1, 1, 1, 1, 20, 1, 1,
@@ -247,9 +254,10 @@ static const gpr_uint8 next_tbl[256] = {
41, 1, 1, 1, 42, 43, 1, 1, 44, 1, 1, 1, 1, 15, 2, 2, 2, 2, 2, 2,
3, 3, 3, 45, 46, 1, 1, 2, 2, 2, 35, 3, 3, 18, 47, 2,
};
+
/* next state, based upon current state and the current nibble: see above.
generated by gen_hpack_tables.c */
-static const gpr_int16 next_sub_tbl[48 * 16] = {
+static const int16_t next_sub_tbl[48 * 16] = {
1, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217,
218, 2, 6, 10, 13, 14, 15, 16, 17, 2, 6, 10, 13, 14, 15,
16, 17, 3, 7, 11, 24, 3, 7, 11, 24, 3, 7, 11, 24, 3,
@@ -303,11 +311,12 @@ static const gpr_int16 next_sub_tbl[48 * 16] = {
253, 254, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 255,
};
+
/* emission table: indexed like next_tbl, ultimately gives the byte to be
emitted, or -1 for no byte, or 256 for end of stream
generated by gen_hpack_tables.c */
-static const gpr_uint16 emit_tbl[256] = {
+static const uint16_t emit_tbl[256] = {
0, 1, 2, 3, 4, 5, 6, 7, 0, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 0, 23, 24, 25, 26, 27,
28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
@@ -327,8 +336,9 @@ static const gpr_uint16 emit_tbl[256] = {
233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
248,
};
+
/* generated by gen_hpack_tables.c */
-static const gpr_int16 emit_sub_tbl[249 * 16] = {
+static const int16_t emit_sub_tbl[249 * 16] = {
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, 48, 48, 48, 48, 48, 48, 48, 48, 49, 49, 49, 49, 49, 49,
49, 49, 48, 48, 48, 48, 49, 49, 49, 49, 50, 50, 50, 50, 97,
@@ -597,7 +607,7 @@ static const gpr_int16 emit_sub_tbl[249 * 16] = {
13, 22, 22, 22, 22, 256, 256, 256, 256,
};
-static const gpr_uint8 inverse_base64[256] = {
+static const uint8_t inverse_base64[256] = {
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 62, 255,
@@ -619,34 +629,35 @@ static const gpr_uint8 inverse_base64[256] = {
};
/* emission helpers */
-static void on_hdr(grpc_chttp2_hpack_parser *p, grpc_mdelem *md,
- int add_to_table) {
+static int on_hdr(grpc_chttp2_hpack_parser *p, grpc_mdelem *md,
+ int add_to_table) {
if (add_to_table) {
- grpc_mdelem_ref(md);
- grpc_chttp2_hptbl_add(&p->table, md);
+ if (!grpc_chttp2_hptbl_add(&p->table, md)) {
+ return 0;
+ }
}
p->on_header(p->on_header_user_data, md);
+ return 1;
}
static grpc_mdstr *take_string(grpc_chttp2_hpack_parser *p,
grpc_chttp2_hpack_parser_string *str) {
- grpc_mdstr *s = grpc_mdstr_from_buffer(p->table.mdctx, (gpr_uint8 *)str->str,
- str->length);
+ grpc_mdstr *s = grpc_mdstr_from_buffer((uint8_t *)str->str, str->length);
str->length = 0;
return s;
}
/* jump to the next state */
-static int parse_next(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
- const gpr_uint8 *end) {
+static int parse_next(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
p->state = *p->next_state++;
return p->state(p, cur, end);
}
/* begin parsing a header: all functionality is encoded into lookup tables
above */
-static int parse_begin(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
- const gpr_uint8 *end) {
+static int parse_begin(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
if (cur == end) {
p->state = parse_begin;
return 1;
@@ -656,8 +667,8 @@ static int parse_begin(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
}
/* stream dependency and prioritization data: we just skip it */
-static int parse_stream_weight(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end) {
+static int parse_stream_weight(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
if (cur == end) {
p->state = parse_stream_weight;
return 1;
@@ -666,8 +677,8 @@ static int parse_stream_weight(grpc_chttp2_hpack_parser *p,
return p->after_prioritization(p, cur + 1, end);
}
-static int parse_stream_dep3(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
- const gpr_uint8 *end) {
+static int parse_stream_dep3(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
if (cur == end) {
p->state = parse_stream_dep3;
return 1;
@@ -676,8 +687,8 @@ static int parse_stream_dep3(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
return parse_stream_weight(p, cur + 1, end);
}
-static int parse_stream_dep2(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
- const gpr_uint8 *end) {
+static int parse_stream_dep2(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
if (cur == end) {
p->state = parse_stream_dep2;
return 1;
@@ -686,8 +697,8 @@ static int parse_stream_dep2(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
return parse_stream_dep3(p, cur + 1, end);
}
-static int parse_stream_dep1(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
- const gpr_uint8 *end) {
+static int parse_stream_dep1(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
if (cur == end) {
p->state = parse_stream_dep1;
return 1;
@@ -696,8 +707,8 @@ static int parse_stream_dep1(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
return parse_stream_dep2(p, cur + 1, end);
}
-static int parse_stream_dep0(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
- const gpr_uint8 *end) {
+static int parse_stream_dep0(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
if (cur == end) {
p->state = parse_stream_dep0;
return 1;
@@ -708,26 +719,31 @@ static int parse_stream_dep0(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
/* emit an indexed field; for now just logs it to console; jumps to
begin the next field on completion */
-static int finish_indexed_field(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end) {
+static int finish_indexed_field(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
grpc_mdelem *md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
- grpc_mdelem_ref(md);
- on_hdr(p, md, 0);
- return parse_begin(p, cur, end);
+ if (md == NULL) {
+ gpr_log(GPR_ERROR, "Invalid HPACK index received: %d", p->index);
+ return 0;
+ }
+ GRPC_MDELEM_REF(md);
+ return on_hdr(p, md, 0) && parse_begin(p, cur, end);
}
/* parse an indexed field with index < 127 */
-static int parse_indexed_field(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end) {
+static int parse_indexed_field(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
+ p->dynamic_table_update_allowed = 0;
p->index = (*cur) & 0x7f;
return finish_indexed_field(p, cur + 1, end);
}
/* parse an indexed field with index >= 127 */
static int parse_indexed_field_x(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end) {
+ const uint8_t *cur, const uint8_t *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
finish_indexed_field};
+ p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
p->index = 0x7f;
p->parsing.value = &p->index;
@@ -736,31 +752,31 @@ static int parse_indexed_field_x(grpc_chttp2_hpack_parser *p,
/* finish a literal header with incremental indexing: just log, and jump to '
begin */
-static int finish_lithdr_incidx(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end) {
+static int finish_lithdr_incidx(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
grpc_mdelem *md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
- on_hdr(p, grpc_mdelem_from_metadata_strings(p->table.mdctx,
- grpc_mdstr_ref(md->key),
- take_string(p, &p->value)),
- 1);
- return parse_begin(p, cur, end);
+ GPR_ASSERT(md != NULL); /* handled in string parsing */
+ return on_hdr(p, grpc_mdelem_from_metadata_strings(GRPC_MDSTR_REF(md->key),
+ take_string(p, &p->value)),
+ 1) &&
+ parse_begin(p, cur, end);
}
/* finish a literal header with incremental indexing with no index */
static int finish_lithdr_incidx_v(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end) {
- on_hdr(p, grpc_mdelem_from_metadata_strings(p->table.mdctx,
- take_string(p, &p->key),
- take_string(p, &p->value)),
- 1);
- return parse_begin(p, cur, end);
+ const uint8_t *cur, const uint8_t *end) {
+ return on_hdr(p, grpc_mdelem_from_metadata_strings(take_string(p, &p->key),
+ take_string(p, &p->value)),
+ 1) &&
+ parse_begin(p, cur, end);
}
/* parse a literal header with incremental indexing; index < 63 */
-static int parse_lithdr_incidx(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end) {
+static int parse_lithdr_incidx(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_value_string_with_indexed_key, finish_lithdr_incidx};
+ p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
p->index = (*cur) & 0x3f;
return parse_string_prefix(p, cur + 1, end);
@@ -768,10 +784,11 @@ static int parse_lithdr_incidx(grpc_chttp2_hpack_parser *p,
/* parse a literal header with incremental indexing; index >= 63 */
static int parse_lithdr_incidx_x(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end) {
+ const uint8_t *cur, const uint8_t *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_string_prefix, parse_value_string_with_indexed_key,
finish_lithdr_incidx};
+ p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
p->index = 0x3f;
p->parsing.value = &p->index;
@@ -780,40 +797,41 @@ static int parse_lithdr_incidx_x(grpc_chttp2_hpack_parser *p,
/* parse a literal header with incremental indexing; index = 0 */
static int parse_lithdr_incidx_v(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end) {
+ const uint8_t *cur, const uint8_t *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_key_string, parse_string_prefix,
parse_value_string_with_literal_key, finish_lithdr_incidx_v};
+ p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
return parse_string_prefix(p, cur + 1, end);
}
/* finish a literal header without incremental indexing */
-static int finish_lithdr_notidx(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end) {
+static int finish_lithdr_notidx(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
grpc_mdelem *md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
- on_hdr(p, grpc_mdelem_from_metadata_strings(p->table.mdctx,
- grpc_mdstr_ref(md->key),
- take_string(p, &p->value)),
- 0);
- return parse_begin(p, cur, end);
+ GPR_ASSERT(md != NULL); /* handled in string parsing */
+ return on_hdr(p, grpc_mdelem_from_metadata_strings(GRPC_MDSTR_REF(md->key),
+ take_string(p, &p->value)),
+ 0) &&
+ parse_begin(p, cur, end);
}
/* finish a literal header without incremental indexing with index = 0 */
static int finish_lithdr_notidx_v(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end) {
- on_hdr(p, grpc_mdelem_from_metadata_strings(p->table.mdctx,
- take_string(p, &p->key),
- take_string(p, &p->value)),
- 0);
- return parse_begin(p, cur, end);
+ const uint8_t *cur, const uint8_t *end) {
+ return on_hdr(p, grpc_mdelem_from_metadata_strings(take_string(p, &p->key),
+ take_string(p, &p->value)),
+ 0) &&
+ parse_begin(p, cur, end);
}
/* parse a literal header without incremental indexing; index < 15 */
-static int parse_lithdr_notidx(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end) {
+static int parse_lithdr_notidx(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_value_string_with_indexed_key, finish_lithdr_notidx};
+ p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
p->index = (*cur) & 0xf;
return parse_string_prefix(p, cur + 1, end);
@@ -821,10 +839,11 @@ static int parse_lithdr_notidx(grpc_chttp2_hpack_parser *p,
/* parse a literal header without incremental indexing; index >= 15 */
static int parse_lithdr_notidx_x(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end) {
+ const uint8_t *cur, const uint8_t *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_string_prefix, parse_value_string_with_indexed_key,
finish_lithdr_notidx};
+ p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
p->index = 0xf;
p->parsing.value = &p->index;
@@ -833,40 +852,41 @@ static int parse_lithdr_notidx_x(grpc_chttp2_hpack_parser *p,
/* parse a literal header without incremental indexing; index == 0 */
static int parse_lithdr_notidx_v(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end) {
+ const uint8_t *cur, const uint8_t *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_key_string, parse_string_prefix,
parse_value_string_with_literal_key, finish_lithdr_notidx_v};
+ p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
return parse_string_prefix(p, cur + 1, end);
}
/* finish a literal header that is never indexed */
-static int finish_lithdr_nvridx(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end) {
+static int finish_lithdr_nvridx(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
grpc_mdelem *md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
- on_hdr(p, grpc_mdelem_from_metadata_strings(p->table.mdctx,
- grpc_mdstr_ref(md->key),
- take_string(p, &p->value)),
- 0);
- return parse_begin(p, cur, end);
+ GPR_ASSERT(md != NULL); /* handled in string parsing */
+ return on_hdr(p, grpc_mdelem_from_metadata_strings(GRPC_MDSTR_REF(md->key),
+ take_string(p, &p->value)),
+ 0) &&
+ parse_begin(p, cur, end);
}
/* finish a literal header that is never indexed with an extra value */
static int finish_lithdr_nvridx_v(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end) {
- on_hdr(p, grpc_mdelem_from_metadata_strings(p->table.mdctx,
- take_string(p, &p->key),
- take_string(p, &p->value)),
- 0);
- return parse_begin(p, cur, end);
+ const uint8_t *cur, const uint8_t *end) {
+ return on_hdr(p, grpc_mdelem_from_metadata_strings(take_string(p, &p->key),
+ take_string(p, &p->value)),
+ 0) &&
+ parse_begin(p, cur, end);
}
/* parse a literal header that is never indexed; index < 15 */
-static int parse_lithdr_nvridx(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end) {
+static int parse_lithdr_nvridx(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_value_string_with_indexed_key, finish_lithdr_nvridx};
+ p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
p->index = (*cur) & 0xf;
return parse_string_prefix(p, cur + 1, end);
@@ -874,10 +894,11 @@ static int parse_lithdr_nvridx(grpc_chttp2_hpack_parser *p,
/* parse a literal header that is never indexed; index >= 15 */
static int parse_lithdr_nvridx_x(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end) {
+ const uint8_t *cur, const uint8_t *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_string_prefix, parse_value_string_with_indexed_key,
finish_lithdr_nvridx};
+ p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
p->index = 0xf;
p->parsing.value = &p->index;
@@ -886,51 +907,67 @@ static int parse_lithdr_nvridx_x(grpc_chttp2_hpack_parser *p,
/* parse a literal header that is never indexed; index == 0 */
static int parse_lithdr_nvridx_v(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end) {
+ const uint8_t *cur, const uint8_t *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_key_string, parse_string_prefix,
parse_value_string_with_literal_key, finish_lithdr_nvridx_v};
+ p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
return parse_string_prefix(p, cur + 1, end);
}
/* finish parsing a max table size change */
-static int finish_max_tbl_size(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end) {
+static int finish_max_tbl_size(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
gpr_log(GPR_INFO, "MAX TABLE SIZE: %d", p->index);
- abort(); /* not implemented */
- return parse_begin(p, cur, end);
+ return grpc_chttp2_hptbl_set_current_table_size(&p->table, p->index) &&
+ parse_begin(p, cur, end);
}
/* parse a max table size change, max size < 15 */
-static int parse_max_tbl_size(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
- const gpr_uint8 *end) {
- p->index = (*cur) & 0xf;
+static int parse_max_tbl_size(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
+ if (p->dynamic_table_update_allowed == 0) {
+ return 0;
+ }
+ p->dynamic_table_update_allowed--;
+ p->index = (*cur) & 0x1f;
return finish_max_tbl_size(p, cur + 1, end);
}
/* parse a max table size change, max size >= 15 */
-static int parse_max_tbl_size_x(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end) {
+static int parse_max_tbl_size_x(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
finish_max_tbl_size};
+ if (p->dynamic_table_update_allowed == 0) {
+ return 0;
+ }
+ p->dynamic_table_update_allowed--;
p->next_state = and_then;
- p->index = 0xf;
+ p->index = 0x1f;
p->parsing.value = &p->index;
return parse_value0(p, cur + 1, end);
}
/* a parse error: jam the parse state into parse_error, and return error */
-static int parse_error(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
- const gpr_uint8 *end) {
+static int parse_error(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
p->state = parse_error;
return 0;
}
+static int parse_illegal_op(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
+ GPR_ASSERT(cur != end);
+ gpr_log(GPR_DEBUG, "Illegal hpack op code %d", *cur);
+ return parse_error(p, cur, end);
+}
+
/* parse the 1st byte of a varint into p->parsing.value
no overflow is possible */
-static int parse_value0(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
- const gpr_uint8 *end) {
+static int parse_value0(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
if (cur == end) {
p->state = parse_value0;
return 1;
@@ -947,14 +984,14 @@ static int parse_value0(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
/* parse the 2nd byte of a varint into p->parsing.value
no overflow is possible */
-static int parse_value1(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
- const gpr_uint8 *end) {
+static int parse_value1(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
if (cur == end) {
p->state = parse_value1;
return 1;
}
- *p->parsing.value += (((gpr_uint32)*cur) & 0x7f) << 7;
+ *p->parsing.value += (((uint32_t)*cur) & 0x7f) << 7;
if ((*cur) & 0x80) {
return parse_value2(p, cur + 1, end);
@@ -965,14 +1002,14 @@ static int parse_value1(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
/* parse the 3rd byte of a varint into p->parsing.value
no overflow is possible */
-static int parse_value2(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
- const gpr_uint8 *end) {
+static int parse_value2(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
if (cur == end) {
p->state = parse_value2;
return 1;
}
- *p->parsing.value += (((gpr_uint32)*cur) & 0x7f) << 14;
+ *p->parsing.value += (((uint32_t)*cur) & 0x7f) << 14;
if ((*cur) & 0x80) {
return parse_value3(p, cur + 1, end);
@@ -983,14 +1020,14 @@ static int parse_value2(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
/* parse the 4th byte of a varint into p->parsing.value
no overflow is possible */
-static int parse_value3(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
- const gpr_uint8 *end) {
+static int parse_value3(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
if (cur == end) {
p->state = parse_value3;
return 1;
}
- *p->parsing.value += (((gpr_uint32)*cur) & 0x7f) << 21;
+ *p->parsing.value += (((uint32_t)*cur) & 0x7f) << 21;
if ((*cur) & 0x80) {
return parse_value4(p, cur + 1, end);
@@ -1001,11 +1038,11 @@ static int parse_value3(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
/* parse the 5th byte of a varint into p->parsing.value
depending on the byte, we may overflow, and care must be taken */
-static int parse_value4(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
- const gpr_uint8 *end) {
- gpr_uint8 c;
- gpr_uint32 cur_value;
- gpr_uint32 add_value;
+static int parse_value4(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
+ uint8_t c;
+ uint32_t cur_value;
+ uint32_t add_value;
if (cur == end) {
p->state = parse_value4;
@@ -1018,7 +1055,7 @@ static int parse_value4(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
}
cur_value = *p->parsing.value;
- add_value = ((gpr_uint32)c) << 28;
+ add_value = ((uint32_t)c) << 28;
if (add_value > 0xffffffffu - cur_value) {
goto error;
}
@@ -1034,7 +1071,7 @@ static int parse_value4(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
error:
gpr_log(GPR_ERROR,
"integer overflow in hpack integer decoding: have 0x%08x, "
- "got byte 0x%02x",
+ "got byte 0x%02x on byte 5",
*p->parsing.value, *cur);
return parse_error(p, cur, end);
}
@@ -1042,8 +1079,8 @@ error:
/* parse any trailing bytes in a varint: it's possible to append an arbitrary
number of 0x80's and not affect the value - a zero will terminate - and
anything else will overflow */
-static int parse_value5up(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
- const gpr_uint8 *end) {
+static int parse_value5up(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
while (cur != end && *cur == 0x80) {
++cur;
}
@@ -1059,13 +1096,14 @@ static int parse_value5up(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
gpr_log(GPR_ERROR,
"integer overflow in hpack integer decoding: have 0x%08x, "
- "got byte 0x%02x sometime after byte 4");
+ "got byte 0x%02x sometime after byte 5",
+ *p->parsing.value, *cur);
return parse_error(p, cur, end);
}
/* parse a string prefix */
-static int parse_string_prefix(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur, const gpr_uint8 *end) {
+static int parse_string_prefix(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
if (cur == end) {
p->state = parse_string_prefix;
return 1;
@@ -1083,23 +1121,25 @@ static int parse_string_prefix(grpc_chttp2_hpack_parser *p,
/* append some bytes to a string */
static void append_bytes(grpc_chttp2_hpack_parser_string *str,
- const gpr_uint8 *data, size_t length) {
+ const uint8_t *data, size_t length) {
if (length + str->length > str->capacity) {
- str->capacity = str->length + length;
+ GPR_ASSERT(str->length + length <= UINT32_MAX);
+ str->capacity = (uint32_t)(str->length + length);
str->str = gpr_realloc(str->str, str->capacity);
}
memcpy(str->str + str->length, data, length);
- str->length += length;
+ GPR_ASSERT(length <= UINT32_MAX - str->length);
+ str->length += (uint32_t)length;
}
-static int append_string(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
- const gpr_uint8 *end) {
+static int append_string(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
grpc_chttp2_hpack_parser_string *str = p->parsing.str;
- gpr_uint32 bits;
- gpr_uint8 decoded[3];
+ uint32_t bits;
+ uint8_t decoded[3];
switch ((binary_state)p->binary) {
case NOT_BINARY:
- append_bytes(str, cur, end - cur);
+ append_bytes(str, cur, (size_t)(end - cur));
return 1;
b64_byte0:
case B64_BYTE0:
@@ -1157,22 +1197,20 @@ static int append_string(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
goto b64_byte3;
p->base64_buffer |= bits;
bits = p->base64_buffer;
- decoded[0] = bits >> 16;
- decoded[1] = bits >> 8;
- decoded[2] = bits;
+ decoded[0] = (uint8_t)(bits >> 16);
+ decoded[1] = (uint8_t)(bits >> 8);
+ decoded[2] = (uint8_t)(bits);
append_bytes(str, decoded, 3);
goto b64_byte0;
}
- gpr_log(GPR_ERROR, "should never reach here");
- abort();
- return 1;
+ GPR_UNREACHABLE_CODE(return 1);
}
/* append a null terminator to a string */
static int finish_str(grpc_chttp2_hpack_parser *p) {
- gpr_uint8 terminator = 0;
- gpr_uint8 decoded[2];
- gpr_uint32 bits;
+ uint8_t terminator = 0;
+ uint8_t decoded[2];
+ uint32_t bits;
grpc_chttp2_hpack_parser_string *str = p->parsing.str;
switch ((binary_state)p->binary) {
case NOT_BINARY:
@@ -1189,7 +1227,7 @@ static int finish_str(grpc_chttp2_hpack_parser *p) {
bits & 0xffff);
return 0;
}
- decoded[0] = bits >> 16;
+ decoded[0] = (uint8_t)(bits >> 16);
append_bytes(str, decoded, 1);
break;
case B64_BYTE3:
@@ -1199,8 +1237,8 @@ static int finish_str(grpc_chttp2_hpack_parser *p) {
bits & 0xff);
return 0;
}
- decoded[0] = bits >> 16;
- decoded[1] = bits >> 8;
+ decoded[0] = (uint8_t)(bits >> 16);
+ decoded[1] = (uint8_t)(bits >> 8);
append_bytes(str, decoded, 2);
break;
}
@@ -1210,12 +1248,12 @@ static int finish_str(grpc_chttp2_hpack_parser *p) {
}
/* decode a nibble from a huffman encoded stream */
-static int huff_nibble(grpc_chttp2_hpack_parser *p, gpr_uint8 nibble) {
- gpr_int16 emit = emit_sub_tbl[16 * emit_tbl[p->huff_state] + nibble];
- gpr_int16 next = next_sub_tbl[16 * next_tbl[p->huff_state] + nibble];
+static int huff_nibble(grpc_chttp2_hpack_parser *p, uint8_t nibble) {
+ int16_t emit = emit_sub_tbl[16 * emit_tbl[p->huff_state] + nibble];
+ int16_t next = next_sub_tbl[16 * next_tbl[p->huff_state] + nibble];
if (emit != -1) {
if (emit >= 0 && emit < 256) {
- gpr_uint8 c = (gpr_uint8)emit;
+ uint8_t c = (uint8_t)emit;
if (!append_string(p, &c, (&c) + 1)) return 0;
} else {
assert(emit == 256);
@@ -1226,8 +1264,8 @@ static int huff_nibble(grpc_chttp2_hpack_parser *p, gpr_uint8 nibble) {
}
/* decode full bytes from a huffman encoded stream */
-static int add_huff_bytes(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
- const gpr_uint8 *end) {
+static int add_huff_bytes(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
for (; cur != end; ++cur) {
if (!huff_nibble(p, *cur >> 4) || !huff_nibble(p, *cur & 0xf)) return 0;
}
@@ -1236,8 +1274,8 @@ static int add_huff_bytes(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
/* decode some string bytes based on the current decoding mode
(huffman or not) */
-static int add_str_bytes(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
- const gpr_uint8 *end) {
+static int add_str_bytes(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
if (p->huff) {
return add_huff_bytes(p, cur, end);
} else {
@@ -1246,24 +1284,25 @@ static int add_str_bytes(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
}
/* parse a string - tries to do large chunks at a time */
-static int parse_string(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
- const gpr_uint8 *end) {
+static int parse_string(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
size_t remaining = p->strlen - p->strgot;
- size_t given = end - cur;
+ size_t given = (size_t)(end - cur);
if (remaining <= given) {
return add_str_bytes(p, cur, cur + remaining) && finish_str(p) &&
parse_next(p, cur + remaining, end);
} else {
if (!add_str_bytes(p, cur, cur + given)) return 0;
- p->strgot += given;
+ GPR_ASSERT(given <= UINT32_MAX - p->strgot);
+ p->strgot += (uint32_t)given;
p->state = parse_string;
return 1;
}
}
/* begin parsing a string - performs setup, calls parse_string */
-static int begin_parse_string(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
- const gpr_uint8 *end, gpr_uint8 binary,
+static int begin_parse_string(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end, uint8_t binary,
grpc_chttp2_hpack_parser_string *str) {
p->strgot = 0;
str->length = 0;
@@ -1274,8 +1313,8 @@ static int begin_parse_string(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
}
/* parse the key string */
-static int parse_key_string(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
- const gpr_uint8 *end) {
+static int parse_key_string(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
return begin_parse_string(p, cur, end, NOT_BINARY, &p->key);
}
@@ -1289,7 +1328,10 @@ static is_binary_header is_binary_literal_header(grpc_chttp2_hpack_parser *p) {
static is_binary_header is_binary_indexed_header(grpc_chttp2_hpack_parser *p) {
grpc_mdelem *elem = grpc_chttp2_hptbl_lookup(&p->table, p->index);
- if (!elem) return ERROR_HEADER;
+ if (!elem) {
+ gpr_log(GPR_ERROR, "Invalid HPACK index received: %d", p->index);
+ return ERROR_HEADER;
+ }
return grpc_is_binary_header(
(const char *)GPR_SLICE_START_PTR(elem->key->slice),
GPR_SLICE_LENGTH(elem->key->slice))
@@ -1298,8 +1340,8 @@ static is_binary_header is_binary_indexed_header(grpc_chttp2_hpack_parser *p) {
}
/* parse the value string */
-static int parse_value_string(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
- const gpr_uint8 *end, is_binary_header type) {
+static int parse_value_string(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end, is_binary_header type) {
switch (type) {
case BINARY_HEADER:
return begin_parse_string(p, cur, end, B64_BYTE0, &p->value);
@@ -1309,42 +1351,28 @@ static int parse_value_string(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
return 0;
}
/* Add code to prevent return without value error */
- gpr_log(GPR_ERROR, "Should never reach beyond switch in parse_value_string");
- abort();
- return 0;
+ GPR_UNREACHABLE_CODE(return 0);
}
static int parse_value_string_with_indexed_key(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur,
- const gpr_uint8 *end) {
+ const uint8_t *cur,
+ const uint8_t *end) {
return parse_value_string(p, cur, end, is_binary_indexed_header(p));
}
static int parse_value_string_with_literal_key(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *cur,
- const gpr_uint8 *end) {
+ const uint8_t *cur,
+ const uint8_t *end) {
return parse_value_string(p, cur, end, is_binary_literal_header(p));
}
/* PUBLIC INTERFACE */
static void on_header_not_set(void *user_data, grpc_mdelem *md) {
- char *keyhex =
- gpr_hexdump(grpc_mdstr_as_c_string(md->key),
- GPR_SLICE_LENGTH(md->key->slice), GPR_HEXDUMP_PLAINTEXT);
- char *valuehex =
- gpr_hexdump(grpc_mdstr_as_c_string(md->value),
- GPR_SLICE_LENGTH(md->value->slice), GPR_HEXDUMP_PLAINTEXT);
- gpr_log(GPR_ERROR, "on_header callback not set; key=%s value=%s", keyhex,
- valuehex);
- gpr_free(keyhex);
- gpr_free(valuehex);
- grpc_mdelem_unref(md);
- abort();
+ GPR_UNREACHABLE_CODE(return );
}
-void grpc_chttp2_hpack_parser_init(grpc_chttp2_hpack_parser *p,
- grpc_mdctx *mdctx) {
+void grpc_chttp2_hpack_parser_init(grpc_chttp2_hpack_parser *p) {
p->on_header = on_header_not_set;
p->on_header_user_data = NULL;
p->state = parse_begin;
@@ -1354,7 +1382,8 @@ void grpc_chttp2_hpack_parser_init(grpc_chttp2_hpack_parser *p,
p->value.str = NULL;
p->value.capacity = 0;
p->value.length = 0;
- grpc_chttp2_hptbl_init(&p->table, mdctx);
+ p->dynamic_table_update_allowed = 2;
+ grpc_chttp2_hptbl_init(&p->table);
}
void grpc_chttp2_hpack_parser_set_has_priority(grpc_chttp2_hpack_parser *p) {
@@ -1369,42 +1398,52 @@ void grpc_chttp2_hpack_parser_destroy(grpc_chttp2_hpack_parser *p) {
}
int grpc_chttp2_hpack_parser_parse(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *beg, const gpr_uint8 *end) {
+ const uint8_t *beg, const uint8_t *end) {
/* TODO(ctiller): limit the distance of end from beg, and perform multiple
- steps in the event of a large chunk of data to limit
- stack space usage when no tail call optimization is
- available */
+ steps in the event of a large chunk of data to limit
+ stack space usage when no tail call optimization is
+ available */
return p->state(p, beg, end);
}
grpc_chttp2_parse_error grpc_chttp2_header_parser_parse(
- void *hpack_parser, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_exec_ctx *exec_ctx, void *hpack_parser,
+ grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
grpc_chttp2_hpack_parser *parser = hpack_parser;
+ GPR_TIMER_BEGIN("grpc_chttp2_hpack_parser_parse", 0);
if (!grpc_chttp2_hpack_parser_parse(parser, GPR_SLICE_START_PTR(slice),
GPR_SLICE_END_PTR(slice))) {
+ GPR_TIMER_END("grpc_chttp2_hpack_parser_parse", 0);
return GRPC_CHTTP2_CONNECTION_ERROR;
}
if (is_last) {
if (parser->is_boundary && parser->state != parse_begin) {
gpr_log(GPR_ERROR,
"end of header frame not aligned with a hpack record boundary");
+ GPR_TIMER_END("grpc_chttp2_hpack_parser_parse", 0);
return GRPC_CHTTP2_CONNECTION_ERROR;
}
- if (parser->is_boundary) {
- grpc_chttp2_incoming_metadata_buffer_place_metadata_batch_into(
- &stream_parsing->incoming_metadata,
- &stream_parsing->data_parser.incoming_sopb);
- grpc_chttp2_list_add_parsing_seen_stream(transport_parsing,
- stream_parsing);
- }
- if (parser->is_eof) {
- stream_parsing->received_close = 1;
+ /* need to check for null stream: this can occur if we receive an invalid
+ stream id on a header */
+ if (stream_parsing != NULL) {
+ if (parser->is_boundary) {
+ stream_parsing
+ ->got_metadata_on_parse[stream_parsing->header_frames_received] = 1;
+ stream_parsing->header_frames_received++;
+ grpc_chttp2_list_add_parsing_seen_stream(transport_parsing,
+ stream_parsing);
+ }
+ if (parser->is_eof) {
+ stream_parsing->received_close = 1;
+ }
}
parser->on_header = on_header_not_set;
parser->on_header_user_data = NULL;
parser->is_boundary = 0xde;
parser->is_eof = 0xde;
+ parser->dynamic_table_update_allowed = 2;
}
+ GPR_TIMER_END("grpc_chttp2_hpack_parser_parse", 0);
return GRPC_CHTTP2_PARSE_OK;
}
diff --git a/src/core/transport/chttp2/hpack_parser.h b/src/core/transport/chttp2/hpack_parser.h
index c1768d9d5d..6a6d136da2 100644
--- a/src/core/transport/chttp2/hpack_parser.h
+++ b/src/core/transport/chttp2/hpack_parser.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,12 +31,13 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_HPACK_PARSER_H
-#define GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_HPACK_PARSER_H
+#ifndef GRPC_CORE_TRANSPORT_CHTTP2_HPACK_PARSER_H
+#define GRPC_CORE_TRANSPORT_CHTTP2_HPACK_PARSER_H
#include <stddef.h>
#include <grpc/support/port_platform.h>
+#include "src/core/iomgr/exec_ctx.h"
#include "src/core/transport/chttp2/frame.h"
#include "src/core/transport/chttp2/hpack_table.h"
#include "src/core/transport/metadata.h"
@@ -44,13 +45,13 @@
typedef struct grpc_chttp2_hpack_parser grpc_chttp2_hpack_parser;
typedef int (*grpc_chttp2_hpack_parser_state)(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *beg,
- const gpr_uint8 *end);
+ const uint8_t *beg,
+ const uint8_t *end);
typedef struct {
char *str;
- gpr_uint32 length;
- gpr_uint32 capacity;
+ uint32_t length;
+ uint32_t capacity;
} grpc_chttp2_hpack_parser_string;
struct grpc_chttp2_hpack_parser {
@@ -66,48 +67,50 @@ struct grpc_chttp2_hpack_parser {
grpc_chttp2_hpack_parser_state after_prioritization;
/* the value we're currently parsing */
union {
- gpr_uint32 *value;
+ uint32_t *value;
grpc_chttp2_hpack_parser_string *str;
} parsing;
/* string parameters for each chunk */
grpc_chttp2_hpack_parser_string key;
grpc_chttp2_hpack_parser_string value;
/* parsed index */
- gpr_uint32 index;
+ uint32_t index;
/* length of source bytes for the currently parsing string */
- gpr_uint32 strlen;
+ uint32_t strlen;
/* number of source bytes read for the currently parsing string */
- gpr_uint32 strgot;
+ uint32_t strgot;
/* huffman decoding state */
- gpr_uint16 huff_state;
+ int16_t huff_state;
/* is the string being decoded binary? */
- gpr_uint8 binary;
+ uint8_t binary;
/* is the current string huffman encoded? */
- gpr_uint8 huff;
+ uint8_t huff;
+ /* is a dynamic table update allowed? */
+ uint8_t dynamic_table_update_allowed;
/* set by higher layers, used by grpc_chttp2_header_parser_parse to signal
it should append a metadata boundary at the end of frame */
- gpr_uint8 is_boundary;
- gpr_uint8 is_eof;
- gpr_uint32 base64_buffer;
+ uint8_t is_boundary;
+ uint8_t is_eof;
+ uint32_t base64_buffer;
/* hpack table */
grpc_chttp2_hptbl table;
};
-void grpc_chttp2_hpack_parser_init(grpc_chttp2_hpack_parser *p,
- grpc_mdctx *mdctx);
+void grpc_chttp2_hpack_parser_init(grpc_chttp2_hpack_parser *p);
void grpc_chttp2_hpack_parser_destroy(grpc_chttp2_hpack_parser *p);
void grpc_chttp2_hpack_parser_set_has_priority(grpc_chttp2_hpack_parser *p);
/* returns 1 on success, 0 on error */
int grpc_chttp2_hpack_parser_parse(grpc_chttp2_hpack_parser *p,
- const gpr_uint8 *beg, const gpr_uint8 *end);
+ const uint8_t *beg, const uint8_t *end);
/* wraps grpc_chttp2_hpack_parser_parse to provide a frame level parser for
the transport */
grpc_chttp2_parse_error grpc_chttp2_header_parser_parse(
- void *hpack_parser, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_exec_ctx *exec_ctx, void *hpack_parser,
+ grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
-#endif /* GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_HPACK_PARSER_H */
+#endif /* GRPC_CORE_TRANSPORT_CHTTP2_HPACK_PARSER_H */
diff --git a/src/core/transport/chttp2/hpack_table.c b/src/core/transport/chttp2/hpack_table.c
index 372e71d68f..f1ce3b84fd 100644
--- a/src/core/transport/chttp2/hpack_table.c
+++ b/src/core/transport/chttp2/hpack_table.c
@@ -36,111 +36,184 @@
#include <assert.h>
#include <string.h>
+#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
+
#include "src/core/support/murmur_hash.h"
static struct {
const char *key;
const char *value;
} static_table[] = {
- /* 0: */ {NULL, NULL},
- /* 1: */ {":authority", ""},
- /* 2: */ {":method", "GET"},
- /* 3: */ {":method", "POST"},
- /* 4: */ {":path", "/"},
- /* 5: */ {":path", "/index.html"},
- /* 6: */ {":scheme", "http"},
- /* 7: */ {":scheme", "https"},
- /* 8: */ {":status", "200"},
- /* 9: */ {":status", "204"},
- /* 10: */ {":status", "206"},
- /* 11: */ {":status", "304"},
- /* 12: */ {":status", "400"},
- /* 13: */ {":status", "404"},
- /* 14: */ {":status", "500"},
- /* 15: */ {"accept-charset", ""},
- /* 16: */ {"accept-encoding", "gzip, deflate"},
- /* 17: */ {"accept-language", ""},
- /* 18: */ {"accept-ranges", ""},
- /* 19: */ {"accept", ""},
- /* 20: */ {"access-control-allow-origin", ""},
- /* 21: */ {"age", ""},
- /* 22: */ {"allow", ""},
- /* 23: */ {"authorization", ""},
- /* 24: */ {"cache-control", ""},
- /* 25: */ {"content-disposition", ""},
- /* 26: */ {"content-encoding", ""},
- /* 27: */ {"content-language", ""},
- /* 28: */ {"content-length", ""},
- /* 29: */ {"content-location", ""},
- /* 30: */ {"content-range", ""},
- /* 31: */ {"content-type", ""},
- /* 32: */ {"cookie", ""},
- /* 33: */ {"date", ""},
- /* 34: */ {"etag", ""},
- /* 35: */ {"expect", ""},
- /* 36: */ {"expires", ""},
- /* 37: */ {"from", ""},
- /* 38: */ {"host", ""},
- /* 39: */ {"if-match", ""},
- /* 40: */ {"if-modified-since", ""},
- /* 41: */ {"if-none-match", ""},
- /* 42: */ {"if-range", ""},
- /* 43: */ {"if-unmodified-since", ""},
- /* 44: */ {"last-modified", ""},
- /* 45: */ {"link", ""},
- /* 46: */ {"location", ""},
- /* 47: */ {"max-forwards", ""},
- /* 48: */ {"proxy-authenticate", ""},
- /* 49: */ {"proxy-authorization", ""},
- /* 50: */ {"range", ""},
- /* 51: */ {"referer", ""},
- /* 52: */ {"refresh", ""},
- /* 53: */ {"retry-after", ""},
- /* 54: */ {"server", ""},
- /* 55: */ {"set-cookie", ""},
- /* 56: */ {"strict-transport-security", ""},
- /* 57: */ {"transfer-encoding", ""},
- /* 58: */ {"user-agent", ""},
- /* 59: */ {"vary", ""},
- /* 60: */ {"via", ""},
- /* 61: */ {"www-authenticate", ""},
+ /* 0: */
+ {NULL, NULL},
+ /* 1: */
+ {":authority", ""},
+ /* 2: */
+ {":method", "GET"},
+ /* 3: */
+ {":method", "POST"},
+ /* 4: */
+ {":path", "/"},
+ /* 5: */
+ {":path", "/index.html"},
+ /* 6: */
+ {":scheme", "http"},
+ /* 7: */
+ {":scheme", "https"},
+ /* 8: */
+ {":status", "200"},
+ /* 9: */
+ {":status", "204"},
+ /* 10: */
+ {":status", "206"},
+ /* 11: */
+ {":status", "304"},
+ /* 12: */
+ {":status", "400"},
+ /* 13: */
+ {":status", "404"},
+ /* 14: */
+ {":status", "500"},
+ /* 15: */
+ {"accept-charset", ""},
+ /* 16: */
+ {"accept-encoding", "gzip, deflate"},
+ /* 17: */
+ {"accept-language", ""},
+ /* 18: */
+ {"accept-ranges", ""},
+ /* 19: */
+ {"accept", ""},
+ /* 20: */
+ {"access-control-allow-origin", ""},
+ /* 21: */
+ {"age", ""},
+ /* 22: */
+ {"allow", ""},
+ /* 23: */
+ {"authorization", ""},
+ /* 24: */
+ {"cache-control", ""},
+ /* 25: */
+ {"content-disposition", ""},
+ /* 26: */
+ {"content-encoding", ""},
+ /* 27: */
+ {"content-language", ""},
+ /* 28: */
+ {"content-length", ""},
+ /* 29: */
+ {"content-location", ""},
+ /* 30: */
+ {"content-range", ""},
+ /* 31: */
+ {"content-type", ""},
+ /* 32: */
+ {"cookie", ""},
+ /* 33: */
+ {"date", ""},
+ /* 34: */
+ {"etag", ""},
+ /* 35: */
+ {"expect", ""},
+ /* 36: */
+ {"expires", ""},
+ /* 37: */
+ {"from", ""},
+ /* 38: */
+ {"host", ""},
+ /* 39: */
+ {"if-match", ""},
+ /* 40: */
+ {"if-modified-since", ""},
+ /* 41: */
+ {"if-none-match", ""},
+ /* 42: */
+ {"if-range", ""},
+ /* 43: */
+ {"if-unmodified-since", ""},
+ /* 44: */
+ {"last-modified", ""},
+ /* 45: */
+ {"link", ""},
+ /* 46: */
+ {"location", ""},
+ /* 47: */
+ {"max-forwards", ""},
+ /* 48: */
+ {"proxy-authenticate", ""},
+ /* 49: */
+ {"proxy-authorization", ""},
+ /* 50: */
+ {"range", ""},
+ /* 51: */
+ {"referer", ""},
+ /* 52: */
+ {"refresh", ""},
+ /* 53: */
+ {"retry-after", ""},
+ /* 54: */
+ {"server", ""},
+ /* 55: */
+ {"set-cookie", ""},
+ /* 56: */
+ {"strict-transport-security", ""},
+ /* 57: */
+ {"transfer-encoding", ""},
+ /* 58: */
+ {"user-agent", ""},
+ /* 59: */
+ {"vary", ""},
+ /* 60: */
+ {"via", ""},
+ /* 61: */
+ {"www-authenticate", ""},
};
-void grpc_chttp2_hptbl_init(grpc_chttp2_hptbl *tbl, grpc_mdctx *mdctx) {
+static uint32_t entries_for_bytes(uint32_t bytes) {
+ return (bytes + GRPC_CHTTP2_HPACK_ENTRY_OVERHEAD - 1) /
+ GRPC_CHTTP2_HPACK_ENTRY_OVERHEAD;
+}
+
+void grpc_chttp2_hptbl_init(grpc_chttp2_hptbl *tbl) {
size_t i;
memset(tbl, 0, sizeof(*tbl));
- tbl->mdctx = mdctx;
- tbl->max_bytes = GRPC_CHTTP2_INITIAL_HPACK_TABLE_SIZE;
+ tbl->current_table_bytes = tbl->max_bytes =
+ GRPC_CHTTP2_INITIAL_HPACK_TABLE_SIZE;
+ tbl->max_entries = tbl->cap_entries =
+ entries_for_bytes(tbl->current_table_bytes);
+ tbl->ents = gpr_malloc(sizeof(*tbl->ents) * tbl->cap_entries);
+ memset(tbl->ents, 0, sizeof(*tbl->ents) * tbl->cap_entries);
for (i = 1; i <= GRPC_CHTTP2_LAST_STATIC_ENTRY; i++) {
- tbl->static_ents[i - 1] = grpc_mdelem_from_strings(
- mdctx, static_table[i].key, static_table[i].value);
+ tbl->static_ents[i - 1] =
+ grpc_mdelem_from_strings(static_table[i].key, static_table[i].value);
}
}
void grpc_chttp2_hptbl_destroy(grpc_chttp2_hptbl *tbl) {
size_t i;
for (i = 0; i < GRPC_CHTTP2_LAST_STATIC_ENTRY; i++) {
- grpc_mdelem_unref(tbl->static_ents[i]);
+ GRPC_MDELEM_UNREF(tbl->static_ents[i]);
}
for (i = 0; i < tbl->num_ents; i++) {
- grpc_mdelem_unref(
- tbl->ents[(tbl->first_ent + i) % GRPC_CHTTP2_MAX_TABLE_COUNT]);
+ GRPC_MDELEM_UNREF(tbl->ents[(tbl->first_ent + i) % tbl->cap_entries]);
}
+ gpr_free(tbl->ents);
}
grpc_mdelem *grpc_chttp2_hptbl_lookup(const grpc_chttp2_hptbl *tbl,
- gpr_uint32 index) {
+ uint32_t tbl_index) {
/* Static table comes first, just return an entry from it */
- if (index <= GRPC_CHTTP2_LAST_STATIC_ENTRY) {
- return tbl->static_ents[index - 1];
+ if (tbl_index <= GRPC_CHTTP2_LAST_STATIC_ENTRY) {
+ return tbl->static_ents[tbl_index - 1];
}
/* Otherwise, find the value in the list of valid entries */
- index -= (GRPC_CHTTP2_LAST_STATIC_ENTRY + 1);
- if (index < tbl->num_ents) {
- gpr_uint32 offset = (tbl->num_ents - 1 - index + tbl->first_ent) %
- GRPC_CHTTP2_MAX_TABLE_COUNT;
+ tbl_index -= (GRPC_CHTTP2_LAST_STATIC_ENTRY + 1);
+ if (tbl_index < tbl->num_ents) {
+ uint32_t offset =
+ (tbl->num_ents - 1u - tbl_index + tbl->first_ent) % tbl->cap_entries;
return tbl->ents[offset];
}
/* Invalid entry: return error */
@@ -150,22 +223,85 @@ grpc_mdelem *grpc_chttp2_hptbl_lookup(const grpc_chttp2_hptbl *tbl,
/* Evict one element from the table */
static void evict1(grpc_chttp2_hptbl *tbl) {
grpc_mdelem *first_ent = tbl->ents[tbl->first_ent];
- tbl->mem_used -= GPR_SLICE_LENGTH(first_ent->key->slice) +
- GPR_SLICE_LENGTH(first_ent->value->slice) +
- GRPC_CHTTP2_HPACK_ENTRY_OVERHEAD;
- tbl->first_ent = (tbl->first_ent + 1) % GRPC_CHTTP2_MAX_TABLE_COUNT;
+ size_t elem_bytes = GPR_SLICE_LENGTH(first_ent->key->slice) +
+ GPR_SLICE_LENGTH(first_ent->value->slice) +
+ GRPC_CHTTP2_HPACK_ENTRY_OVERHEAD;
+ GPR_ASSERT(elem_bytes <= tbl->mem_used);
+ tbl->mem_used -= (uint32_t)elem_bytes;
+ tbl->first_ent = ((tbl->first_ent + 1) % tbl->cap_entries);
tbl->num_ents--;
- grpc_mdelem_unref(first_ent);
+ GRPC_MDELEM_UNREF(first_ent);
}
-void grpc_chttp2_hptbl_add(grpc_chttp2_hptbl *tbl, grpc_mdelem *md) {
+static void rebuild_ents(grpc_chttp2_hptbl *tbl, uint32_t new_cap) {
+ grpc_mdelem **ents = gpr_malloc(sizeof(*ents) * new_cap);
+ uint32_t i;
+
+ for (i = 0; i < tbl->num_ents; i++) {
+ ents[i] = tbl->ents[(tbl->first_ent + i) % tbl->cap_entries];
+ }
+ gpr_free(tbl->ents);
+ tbl->ents = ents;
+ tbl->cap_entries = new_cap;
+ tbl->first_ent = 0;
+}
+
+void grpc_chttp2_hptbl_set_max_bytes(grpc_chttp2_hptbl *tbl,
+ uint32_t max_bytes) {
+ if (tbl->max_bytes == max_bytes) {
+ return;
+ }
+ gpr_log(GPR_DEBUG, "Update hpack parser max size to %d", max_bytes);
+ while (tbl->mem_used > max_bytes) {
+ evict1(tbl);
+ }
+ tbl->max_bytes = max_bytes;
+}
+
+int grpc_chttp2_hptbl_set_current_table_size(grpc_chttp2_hptbl *tbl,
+ uint32_t bytes) {
+ if (tbl->current_table_bytes == bytes) {
+ return 1;
+ }
+ if (bytes > tbl->max_bytes) {
+ gpr_log(GPR_ERROR,
+ "Attempt to make hpack table %d bytes when max is %d bytes", bytes,
+ tbl->max_bytes);
+ return 0;
+ }
+ gpr_log(GPR_DEBUG, "Update hpack parser table size to %d", bytes);
+ while (tbl->mem_used > bytes) {
+ evict1(tbl);
+ }
+ tbl->current_table_bytes = bytes;
+ tbl->max_entries = entries_for_bytes(bytes);
+ if (tbl->max_entries > tbl->cap_entries) {
+ rebuild_ents(tbl, GPR_MAX(tbl->max_entries, 2 * tbl->cap_entries));
+ } else if (tbl->max_entries < tbl->cap_entries / 3) {
+ uint32_t new_cap = GPR_MAX(tbl->max_entries, 16u);
+ if (new_cap != tbl->cap_entries) {
+ rebuild_ents(tbl, new_cap);
+ }
+ }
+ return 1;
+}
+
+int grpc_chttp2_hptbl_add(grpc_chttp2_hptbl *tbl, grpc_mdelem *md) {
/* determine how many bytes of buffer this entry represents */
- gpr_uint16 elem_bytes = GPR_SLICE_LENGTH(md->key->slice) +
- GPR_SLICE_LENGTH(md->value->slice) +
- GRPC_CHTTP2_HPACK_ENTRY_OVERHEAD;
+ size_t elem_bytes = GPR_SLICE_LENGTH(md->key->slice) +
+ GPR_SLICE_LENGTH(md->value->slice) +
+ GRPC_CHTTP2_HPACK_ENTRY_OVERHEAD;
+
+ if (tbl->current_table_bytes > tbl->max_bytes) {
+ gpr_log(GPR_ERROR,
+ "HPACK max table size reduced to %d but not reflected by hpack "
+ "stream (still at %d)",
+ tbl->max_bytes, tbl->current_table_bytes);
+ return 0;
+ }
/* we can't add elements bigger than the max table size */
- if (elem_bytes > tbl->max_bytes) {
+ if (elem_bytes > tbl->current_table_bytes) {
/* HPACK draft 10 section 4.4 states:
* If the size of the new entry is less than or equal to the maximum
* size, that entry is added to the table. It is not an error to
@@ -178,42 +314,43 @@ void grpc_chttp2_hptbl_add(grpc_chttp2_hptbl *tbl, grpc_mdelem *md) {
while (tbl->num_ents) {
evict1(tbl);
}
- return;
+ return 1;
}
/* evict entries to ensure no overflow */
- while (elem_bytes > tbl->max_bytes - tbl->mem_used) {
+ while (elem_bytes > (size_t)tbl->current_table_bytes - tbl->mem_used) {
evict1(tbl);
}
/* copy the finalized entry in */
- tbl->ents[tbl->last_ent] = md;
+ tbl->ents[(tbl->first_ent + tbl->num_ents) % tbl->cap_entries] =
+ GRPC_MDELEM_REF(md);
/* update accounting values */
- tbl->last_ent = (tbl->last_ent + 1) % GRPC_CHTTP2_MAX_TABLE_COUNT;
tbl->num_ents++;
- tbl->mem_used += elem_bytes;
+ tbl->mem_used += (uint32_t)elem_bytes;
+ return 1;
}
grpc_chttp2_hptbl_find_result grpc_chttp2_hptbl_find(
const grpc_chttp2_hptbl *tbl, grpc_mdelem *md) {
grpc_chttp2_hptbl_find_result r = {0, 0};
- int i;
+ uint32_t i;
/* See if the string is in the static table */
for (i = 0; i < GRPC_CHTTP2_LAST_STATIC_ENTRY; i++) {
grpc_mdelem *ent = tbl->static_ents[i];
if (md->key != ent->key) continue;
- r.index = i + 1;
+ r.index = i + 1u;
r.has_value = md->value == ent->value;
if (r.has_value) return r;
}
/* Scan the dynamic table */
for (i = 0; i < tbl->num_ents; i++) {
- int idx = tbl->num_ents - i + GRPC_CHTTP2_LAST_STATIC_ENTRY;
- grpc_mdelem *ent =
- tbl->ents[(tbl->first_ent + i) % GRPC_CHTTP2_MAX_TABLE_COUNT];
+ uint32_t idx =
+ (uint32_t)(tbl->num_ents - i + GRPC_CHTTP2_LAST_STATIC_ENTRY);
+ grpc_mdelem *ent = tbl->ents[(tbl->first_ent + i) % tbl->cap_entries];
if (md->key != ent->key) continue;
r.index = idx;
r.has_value = md->value == ent->value;
diff --git a/src/core/transport/chttp2/hpack_table.h b/src/core/transport/chttp2/hpack_table.h
index 4f882e2e03..6e1b5e66b5 100644
--- a/src/core/transport/chttp2/hpack_table.h
+++ b/src/core/transport/chttp2/hpack_table.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_HPACK_TABLE_H
-#define GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_HPACK_TABLE_H
+#ifndef GRPC_CORE_TRANSPORT_CHTTP2_HPACK_TABLE_H
+#define GRPC_CORE_TRANSPORT_CHTTP2_HPACK_TABLE_H
#include "src/core/transport/metadata.h"
#include <grpc/support/port_platform.h>
@@ -49,49 +49,60 @@
#define GRPC_CHTTP2_MAX_HPACK_TABLE_SIZE GRPC_CHTTP2_INITIAL_HPACK_TABLE_SIZE
/* Per entry overhead bytes as per the spec */
#define GRPC_CHTTP2_HPACK_ENTRY_OVERHEAD 32
+#if 0
/* Maximum number of entries we could possibly fit in the table, given defined
overheads */
#define GRPC_CHTTP2_MAX_TABLE_COUNT \
((GRPC_CHTTP2_MAX_HPACK_TABLE_SIZE + GRPC_CHTTP2_HPACK_ENTRY_OVERHEAD - 1) / \
GRPC_CHTTP2_HPACK_ENTRY_OVERHEAD)
+#endif
/* hpack decoder table */
typedef struct {
- grpc_mdctx *mdctx;
/* the first used entry in ents */
- gpr_uint16 first_ent;
- /* the last used entry in ents */
- gpr_uint16 last_ent;
+ uint32_t first_ent;
/* how many entries are in the table */
- gpr_uint16 num_ents;
+ uint32_t num_ents;
/* the amount of memory used by the table, according to the hpack algorithm */
- gpr_uint16 mem_used;
+ uint32_t mem_used;
/* the max memory allowed to be used by the table, according to the hpack
algorithm */
- gpr_uint16 max_bytes;
+ uint32_t max_bytes;
+ /* the currently agreed size of the table, according to the hpack algorithm */
+ uint32_t current_table_bytes;
+ /* Maximum number of entries we could possibly fit in the table, given defined
+ overheads */
+ uint32_t max_entries;
+ /* Number of entries allocated in ents */
+ uint32_t cap_entries;
/* a circular buffer of headers - this is stored in the opposite order to
what hpack specifies, in order to simplify table management a little...
meaning lookups need to SUBTRACT from the end position */
- grpc_mdelem *ents[GRPC_CHTTP2_MAX_TABLE_COUNT];
+ grpc_mdelem **ents;
grpc_mdelem *static_ents[GRPC_CHTTP2_LAST_STATIC_ENTRY];
} grpc_chttp2_hptbl;
/* initialize a hpack table */
-void grpc_chttp2_hptbl_init(grpc_chttp2_hptbl *tbl, grpc_mdctx *mdctx);
+void grpc_chttp2_hptbl_init(grpc_chttp2_hptbl *tbl);
void grpc_chttp2_hptbl_destroy(grpc_chttp2_hptbl *tbl);
+void grpc_chttp2_hptbl_set_max_bytes(grpc_chttp2_hptbl *tbl,
+ uint32_t max_bytes);
+int grpc_chttp2_hptbl_set_current_table_size(grpc_chttp2_hptbl *tbl,
+ uint32_t bytes);
/* lookup a table entry based on its hpack index */
grpc_mdelem *grpc_chttp2_hptbl_lookup(const grpc_chttp2_hptbl *tbl,
- gpr_uint32 index);
+ uint32_t index);
/* add a table entry to the index */
-void grpc_chttp2_hptbl_add(grpc_chttp2_hptbl *tbl, grpc_mdelem *md);
+int grpc_chttp2_hptbl_add(grpc_chttp2_hptbl *tbl,
+ grpc_mdelem *md) GRPC_MUST_USE_RESULT;
/* Find a key/value pair in the table... returns the index in the table of the
most similar entry, or 0 if the value was not found */
typedef struct {
- gpr_uint16 index;
- gpr_uint8 has_value;
+ uint32_t index;
+ int has_value;
} grpc_chttp2_hptbl_find_result;
grpc_chttp2_hptbl_find_result grpc_chttp2_hptbl_find(
const grpc_chttp2_hptbl *tbl, grpc_mdelem *md);
-#endif /* GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_HPACK_TABLE_H */
+#endif /* GRPC_CORE_TRANSPORT_CHTTP2_HPACK_TABLE_H */
diff --git a/src/core/transport/chttp2/http2_errors.h b/src/core/transport/chttp2/http2_errors.h
index a4f309e056..4290df3d89 100644
--- a/src/core/transport/chttp2/http2_errors.h
+++ b/src/core/transport/chttp2/http2_errors.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_HTTP2_ERRORS_H
-#define GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_HTTP2_ERRORS_H
+#ifndef GRPC_CORE_TRANSPORT_CHTTP2_HTTP2_ERRORS_H
+#define GRPC_CORE_TRANSPORT_CHTTP2_HTTP2_ERRORS_H
/* error codes for RST_STREAM from http2 draft 14 section 7 */
typedef enum {
@@ -53,4 +53,4 @@ typedef enum {
GRPC_CHTTP2__ERROR_DO_NOT_USE = -1
} grpc_chttp2_error_code;
-#endif /* GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_HTTP2_ERRORS_H */
+#endif /* GRPC_CORE_TRANSPORT_CHTTP2_HTTP2_ERRORS_H */
diff --git a/src/core/transport/chttp2/huffsyms.c b/src/core/transport/chttp2/huffsyms.c
index 6f5cf6a2a9..7b138e9b5d 100644
--- a/src/core/transport/chttp2/huffsyms.c
+++ b/src/core/transport/chttp2/huffsyms.c
@@ -37,69 +37,261 @@
command:
:%s/.* \([0-9a-f]\+\) \[ *\([0-9]\+\)\]/{0x\1, \2},/g */
const grpc_chttp2_huffsym grpc_chttp2_huffsyms[GRPC_CHTTP2_NUM_HUFFSYMS] = {
- {0x1ff8, 13}, {0x7fffd8, 23}, {0xfffffe2, 28}, {0xfffffe3, 28},
- {0xfffffe4, 28}, {0xfffffe5, 28}, {0xfffffe6, 28}, {0xfffffe7, 28},
- {0xfffffe8, 28}, {0xffffea, 24}, {0x3ffffffc, 30}, {0xfffffe9, 28},
- {0xfffffea, 28}, {0x3ffffffd, 30}, {0xfffffeb, 28}, {0xfffffec, 28},
- {0xfffffed, 28}, {0xfffffee, 28}, {0xfffffef, 28}, {0xffffff0, 28},
- {0xffffff1, 28}, {0xffffff2, 28}, {0x3ffffffe, 30}, {0xffffff3, 28},
- {0xffffff4, 28}, {0xffffff5, 28}, {0xffffff6, 28}, {0xffffff7, 28},
- {0xffffff8, 28}, {0xffffff9, 28}, {0xffffffa, 28}, {0xffffffb, 28},
- {0x14, 6}, {0x3f8, 10}, {0x3f9, 10}, {0xffa, 12},
- {0x1ff9, 13}, {0x15, 6}, {0xf8, 8}, {0x7fa, 11},
- {0x3fa, 10}, {0x3fb, 10}, {0xf9, 8}, {0x7fb, 11},
- {0xfa, 8}, {0x16, 6}, {0x17, 6}, {0x18, 6},
- {0x0, 5}, {0x1, 5}, {0x2, 5}, {0x19, 6},
- {0x1a, 6}, {0x1b, 6}, {0x1c, 6}, {0x1d, 6},
- {0x1e, 6}, {0x1f, 6}, {0x5c, 7}, {0xfb, 8},
- {0x7ffc, 15}, {0x20, 6}, {0xffb, 12}, {0x3fc, 10},
- {0x1ffa, 13}, {0x21, 6}, {0x5d, 7}, {0x5e, 7},
- {0x5f, 7}, {0x60, 7}, {0x61, 7}, {0x62, 7},
- {0x63, 7}, {0x64, 7}, {0x65, 7}, {0x66, 7},
- {0x67, 7}, {0x68, 7}, {0x69, 7}, {0x6a, 7},
- {0x6b, 7}, {0x6c, 7}, {0x6d, 7}, {0x6e, 7},
- {0x6f, 7}, {0x70, 7}, {0x71, 7}, {0x72, 7},
- {0xfc, 8}, {0x73, 7}, {0xfd, 8}, {0x1ffb, 13},
- {0x7fff0, 19}, {0x1ffc, 13}, {0x3ffc, 14}, {0x22, 6},
- {0x7ffd, 15}, {0x3, 5}, {0x23, 6}, {0x4, 5},
- {0x24, 6}, {0x5, 5}, {0x25, 6}, {0x26, 6},
- {0x27, 6}, {0x6, 5}, {0x74, 7}, {0x75, 7},
- {0x28, 6}, {0x29, 6}, {0x2a, 6}, {0x7, 5},
- {0x2b, 6}, {0x76, 7}, {0x2c, 6}, {0x8, 5},
- {0x9, 5}, {0x2d, 6}, {0x77, 7}, {0x78, 7},
- {0x79, 7}, {0x7a, 7}, {0x7b, 7}, {0x7ffe, 15},
- {0x7fc, 11}, {0x3ffd, 14}, {0x1ffd, 13}, {0xffffffc, 28},
- {0xfffe6, 20}, {0x3fffd2, 22}, {0xfffe7, 20}, {0xfffe8, 20},
- {0x3fffd3, 22}, {0x3fffd4, 22}, {0x3fffd5, 22}, {0x7fffd9, 23},
- {0x3fffd6, 22}, {0x7fffda, 23}, {0x7fffdb, 23}, {0x7fffdc, 23},
- {0x7fffdd, 23}, {0x7fffde, 23}, {0xffffeb, 24}, {0x7fffdf, 23},
- {0xffffec, 24}, {0xffffed, 24}, {0x3fffd7, 22}, {0x7fffe0, 23},
- {0xffffee, 24}, {0x7fffe1, 23}, {0x7fffe2, 23}, {0x7fffe3, 23},
- {0x7fffe4, 23}, {0x1fffdc, 21}, {0x3fffd8, 22}, {0x7fffe5, 23},
- {0x3fffd9, 22}, {0x7fffe6, 23}, {0x7fffe7, 23}, {0xffffef, 24},
- {0x3fffda, 22}, {0x1fffdd, 21}, {0xfffe9, 20}, {0x3fffdb, 22},
- {0x3fffdc, 22}, {0x7fffe8, 23}, {0x7fffe9, 23}, {0x1fffde, 21},
- {0x7fffea, 23}, {0x3fffdd, 22}, {0x3fffde, 22}, {0xfffff0, 24},
- {0x1fffdf, 21}, {0x3fffdf, 22}, {0x7fffeb, 23}, {0x7fffec, 23},
- {0x1fffe0, 21}, {0x1fffe1, 21}, {0x3fffe0, 22}, {0x1fffe2, 21},
- {0x7fffed, 23}, {0x3fffe1, 22}, {0x7fffee, 23}, {0x7fffef, 23},
- {0xfffea, 20}, {0x3fffe2, 22}, {0x3fffe3, 22}, {0x3fffe4, 22},
- {0x7ffff0, 23}, {0x3fffe5, 22}, {0x3fffe6, 22}, {0x7ffff1, 23},
- {0x3ffffe0, 26}, {0x3ffffe1, 26}, {0xfffeb, 20}, {0x7fff1, 19},
- {0x3fffe7, 22}, {0x7ffff2, 23}, {0x3fffe8, 22}, {0x1ffffec, 25},
- {0x3ffffe2, 26}, {0x3ffffe3, 26}, {0x3ffffe4, 26}, {0x7ffffde, 27},
- {0x7ffffdf, 27}, {0x3ffffe5, 26}, {0xfffff1, 24}, {0x1ffffed, 25},
- {0x7fff2, 19}, {0x1fffe3, 21}, {0x3ffffe6, 26}, {0x7ffffe0, 27},
- {0x7ffffe1, 27}, {0x3ffffe7, 26}, {0x7ffffe2, 27}, {0xfffff2, 24},
- {0x1fffe4, 21}, {0x1fffe5, 21}, {0x3ffffe8, 26}, {0x3ffffe9, 26},
- {0xffffffd, 28}, {0x7ffffe3, 27}, {0x7ffffe4, 27}, {0x7ffffe5, 27},
- {0xfffec, 20}, {0xfffff3, 24}, {0xfffed, 20}, {0x1fffe6, 21},
- {0x3fffe9, 22}, {0x1fffe7, 21}, {0x1fffe8, 21}, {0x7ffff3, 23},
- {0x3fffea, 22}, {0x3fffeb, 22}, {0x1ffffee, 25}, {0x1ffffef, 25},
- {0xfffff4, 24}, {0xfffff5, 24}, {0x3ffffea, 26}, {0x7ffff4, 23},
- {0x3ffffeb, 26}, {0x7ffffe6, 27}, {0x3ffffec, 26}, {0x3ffffed, 26},
- {0x7ffffe7, 27}, {0x7ffffe8, 27}, {0x7ffffe9, 27}, {0x7ffffea, 27},
- {0x7ffffeb, 27}, {0xffffffe, 28}, {0x7ffffec, 27}, {0x7ffffed, 27},
- {0x7ffffee, 27}, {0x7ffffef, 27}, {0x7fffff0, 27}, {0x3ffffee, 26},
+ {0x1ff8, 13},
+ {0x7fffd8, 23},
+ {0xfffffe2, 28},
+ {0xfffffe3, 28},
+ {0xfffffe4, 28},
+ {0xfffffe5, 28},
+ {0xfffffe6, 28},
+ {0xfffffe7, 28},
+ {0xfffffe8, 28},
+ {0xffffea, 24},
+ {0x3ffffffc, 30},
+ {0xfffffe9, 28},
+ {0xfffffea, 28},
+ {0x3ffffffd, 30},
+ {0xfffffeb, 28},
+ {0xfffffec, 28},
+ {0xfffffed, 28},
+ {0xfffffee, 28},
+ {0xfffffef, 28},
+ {0xffffff0, 28},
+ {0xffffff1, 28},
+ {0xffffff2, 28},
+ {0x3ffffffe, 30},
+ {0xffffff3, 28},
+ {0xffffff4, 28},
+ {0xffffff5, 28},
+ {0xffffff6, 28},
+ {0xffffff7, 28},
+ {0xffffff8, 28},
+ {0xffffff9, 28},
+ {0xffffffa, 28},
+ {0xffffffb, 28},
+ {0x14, 6},
+ {0x3f8, 10},
+ {0x3f9, 10},
+ {0xffa, 12},
+ {0x1ff9, 13},
+ {0x15, 6},
+ {0xf8, 8},
+ {0x7fa, 11},
+ {0x3fa, 10},
+ {0x3fb, 10},
+ {0xf9, 8},
+ {0x7fb, 11},
+ {0xfa, 8},
+ {0x16, 6},
+ {0x17, 6},
+ {0x18, 6},
+ {0x0, 5},
+ {0x1, 5},
+ {0x2, 5},
+ {0x19, 6},
+ {0x1a, 6},
+ {0x1b, 6},
+ {0x1c, 6},
+ {0x1d, 6},
+ {0x1e, 6},
+ {0x1f, 6},
+ {0x5c, 7},
+ {0xfb, 8},
+ {0x7ffc, 15},
+ {0x20, 6},
+ {0xffb, 12},
+ {0x3fc, 10},
+ {0x1ffa, 13},
+ {0x21, 6},
+ {0x5d, 7},
+ {0x5e, 7},
+ {0x5f, 7},
+ {0x60, 7},
+ {0x61, 7},
+ {0x62, 7},
+ {0x63, 7},
+ {0x64, 7},
+ {0x65, 7},
+ {0x66, 7},
+ {0x67, 7},
+ {0x68, 7},
+ {0x69, 7},
+ {0x6a, 7},
+ {0x6b, 7},
+ {0x6c, 7},
+ {0x6d, 7},
+ {0x6e, 7},
+ {0x6f, 7},
+ {0x70, 7},
+ {0x71, 7},
+ {0x72, 7},
+ {0xfc, 8},
+ {0x73, 7},
+ {0xfd, 8},
+ {0x1ffb, 13},
+ {0x7fff0, 19},
+ {0x1ffc, 13},
+ {0x3ffc, 14},
+ {0x22, 6},
+ {0x7ffd, 15},
+ {0x3, 5},
+ {0x23, 6},
+ {0x4, 5},
+ {0x24, 6},
+ {0x5, 5},
+ {0x25, 6},
+ {0x26, 6},
+ {0x27, 6},
+ {0x6, 5},
+ {0x74, 7},
+ {0x75, 7},
+ {0x28, 6},
+ {0x29, 6},
+ {0x2a, 6},
+ {0x7, 5},
+ {0x2b, 6},
+ {0x76, 7},
+ {0x2c, 6},
+ {0x8, 5},
+ {0x9, 5},
+ {0x2d, 6},
+ {0x77, 7},
+ {0x78, 7},
+ {0x79, 7},
+ {0x7a, 7},
+ {0x7b, 7},
+ {0x7ffe, 15},
+ {0x7fc, 11},
+ {0x3ffd, 14},
+ {0x1ffd, 13},
+ {0xffffffc, 28},
+ {0xfffe6, 20},
+ {0x3fffd2, 22},
+ {0xfffe7, 20},
+ {0xfffe8, 20},
+ {0x3fffd3, 22},
+ {0x3fffd4, 22},
+ {0x3fffd5, 22},
+ {0x7fffd9, 23},
+ {0x3fffd6, 22},
+ {0x7fffda, 23},
+ {0x7fffdb, 23},
+ {0x7fffdc, 23},
+ {0x7fffdd, 23},
+ {0x7fffde, 23},
+ {0xffffeb, 24},
+ {0x7fffdf, 23},
+ {0xffffec, 24},
+ {0xffffed, 24},
+ {0x3fffd7, 22},
+ {0x7fffe0, 23},
+ {0xffffee, 24},
+ {0x7fffe1, 23},
+ {0x7fffe2, 23},
+ {0x7fffe3, 23},
+ {0x7fffe4, 23},
+ {0x1fffdc, 21},
+ {0x3fffd8, 22},
+ {0x7fffe5, 23},
+ {0x3fffd9, 22},
+ {0x7fffe6, 23},
+ {0x7fffe7, 23},
+ {0xffffef, 24},
+ {0x3fffda, 22},
+ {0x1fffdd, 21},
+ {0xfffe9, 20},
+ {0x3fffdb, 22},
+ {0x3fffdc, 22},
+ {0x7fffe8, 23},
+ {0x7fffe9, 23},
+ {0x1fffde, 21},
+ {0x7fffea, 23},
+ {0x3fffdd, 22},
+ {0x3fffde, 22},
+ {0xfffff0, 24},
+ {0x1fffdf, 21},
+ {0x3fffdf, 22},
+ {0x7fffeb, 23},
+ {0x7fffec, 23},
+ {0x1fffe0, 21},
+ {0x1fffe1, 21},
+ {0x3fffe0, 22},
+ {0x1fffe2, 21},
+ {0x7fffed, 23},
+ {0x3fffe1, 22},
+ {0x7fffee, 23},
+ {0x7fffef, 23},
+ {0xfffea, 20},
+ {0x3fffe2, 22},
+ {0x3fffe3, 22},
+ {0x3fffe4, 22},
+ {0x7ffff0, 23},
+ {0x3fffe5, 22},
+ {0x3fffe6, 22},
+ {0x7ffff1, 23},
+ {0x3ffffe0, 26},
+ {0x3ffffe1, 26},
+ {0xfffeb, 20},
+ {0x7fff1, 19},
+ {0x3fffe7, 22},
+ {0x7ffff2, 23},
+ {0x3fffe8, 22},
+ {0x1ffffec, 25},
+ {0x3ffffe2, 26},
+ {0x3ffffe3, 26},
+ {0x3ffffe4, 26},
+ {0x7ffffde, 27},
+ {0x7ffffdf, 27},
+ {0x3ffffe5, 26},
+ {0xfffff1, 24},
+ {0x1ffffed, 25},
+ {0x7fff2, 19},
+ {0x1fffe3, 21},
+ {0x3ffffe6, 26},
+ {0x7ffffe0, 27},
+ {0x7ffffe1, 27},
+ {0x3ffffe7, 26},
+ {0x7ffffe2, 27},
+ {0xfffff2, 24},
+ {0x1fffe4, 21},
+ {0x1fffe5, 21},
+ {0x3ffffe8, 26},
+ {0x3ffffe9, 26},
+ {0xffffffd, 28},
+ {0x7ffffe3, 27},
+ {0x7ffffe4, 27},
+ {0x7ffffe5, 27},
+ {0xfffec, 20},
+ {0xfffff3, 24},
+ {0xfffed, 20},
+ {0x1fffe6, 21},
+ {0x3fffe9, 22},
+ {0x1fffe7, 21},
+ {0x1fffe8, 21},
+ {0x7ffff3, 23},
+ {0x3fffea, 22},
+ {0x3fffeb, 22},
+ {0x1ffffee, 25},
+ {0x1ffffef, 25},
+ {0xfffff4, 24},
+ {0xfffff5, 24},
+ {0x3ffffea, 26},
+ {0x7ffff4, 23},
+ {0x3ffffeb, 26},
+ {0x7ffffe6, 27},
+ {0x3ffffec, 26},
+ {0x3ffffed, 26},
+ {0x7ffffe7, 27},
+ {0x7ffffe8, 27},
+ {0x7ffffe9, 27},
+ {0x7ffffea, 27},
+ {0x7ffffeb, 27},
+ {0xffffffe, 28},
+ {0x7ffffec, 27},
+ {0x7ffffed, 27},
+ {0x7ffffee, 27},
+ {0x7ffffef, 27},
+ {0x7fffff0, 27},
+ {0x3ffffee, 26},
{0x3fffffff, 30},
};
diff --git a/src/core/transport/chttp2/huffsyms.h b/src/core/transport/chttp2/huffsyms.h
index a3cdba8235..9c4f09dcf6 100644
--- a/src/core/transport/chttp2/huffsyms.h
+++ b/src/core/transport/chttp2/huffsyms.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_HUFFSYMS_H
-#define GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_HUFFSYMS_H
+#ifndef GRPC_CORE_TRANSPORT_CHTTP2_HUFFSYMS_H
+#define GRPC_CORE_TRANSPORT_CHTTP2_HUFFSYMS_H
/* HPACK static huffman table */
@@ -45,4 +45,4 @@ typedef struct {
extern const grpc_chttp2_huffsym grpc_chttp2_huffsyms[GRPC_CHTTP2_NUM_HUFFSYMS];
-#endif /* GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_HUFFSYMS_H */
+#endif /* GRPC_CORE_TRANSPORT_CHTTP2_HUFFSYMS_H */
diff --git a/src/core/transport/chttp2/incoming_metadata.c b/src/core/transport/chttp2/incoming_metadata.c
index a4b7174329..315bc2faa1 100644
--- a/src/core/transport/chttp2/incoming_metadata.c
+++ b/src/core/transport/chttp2/incoming_metadata.c
@@ -42,20 +42,23 @@
void grpc_chttp2_incoming_metadata_buffer_init(
grpc_chttp2_incoming_metadata_buffer *buffer) {
- buffer->deadline = gpr_inf_future;
+ buffer->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
}
void grpc_chttp2_incoming_metadata_buffer_destroy(
grpc_chttp2_incoming_metadata_buffer *buffer) {
size_t i;
- for (i = 0; i < buffer->count; i++) {
- grpc_mdelem_unref(buffer->elems[i].md);
+ if (!buffer->published) {
+ for (i = 0; i < buffer->count; i++) {
+ GRPC_MDELEM_UNREF(buffer->elems[i].md);
+ }
}
gpr_free(buffer->elems);
}
void grpc_chttp2_incoming_metadata_buffer_add(
grpc_chttp2_incoming_metadata_buffer *buffer, grpc_mdelem *elem) {
+ GPR_ASSERT(!buffer->published);
if (buffer->capacity == buffer->count) {
buffer->capacity = GPR_MAX(8, 2 * buffer->capacity);
buffer->elems =
@@ -66,116 +69,28 @@ void grpc_chttp2_incoming_metadata_buffer_add(
void grpc_chttp2_incoming_metadata_buffer_set_deadline(
grpc_chttp2_incoming_metadata_buffer *buffer, gpr_timespec deadline) {
+ GPR_ASSERT(!buffer->published);
buffer->deadline = deadline;
}
-void grpc_chttp2_incoming_metadata_live_op_buffer_end(
- grpc_chttp2_incoming_metadata_live_op_buffer *buffer) {
- gpr_free(buffer->elems);
- buffer->elems = NULL;
-}
-
-void grpc_chttp2_incoming_metadata_buffer_place_metadata_batch_into(
- grpc_chttp2_incoming_metadata_buffer *buffer, grpc_stream_op_buffer *sopb) {
- grpc_metadata_batch b;
-
- b.list.head = NULL;
- /* Store away the last element of the list, so that in patch_metadata_ops
- we can reconstitute the list.
- We can't do list building here as later incoming metadata may reallocate
- the underlying array. */
- b.list.tail = (void *)(gpr_intptr)buffer->count;
- b.garbage.head = b.garbage.tail = NULL;
- b.deadline = buffer->deadline;
- buffer->deadline = gpr_inf_future;
-
- grpc_sopb_add_metadata(sopb, b);
-}
-
-void grpc_chttp2_incoming_metadata_buffer_swap(
- grpc_chttp2_incoming_metadata_buffer *a,
- grpc_chttp2_incoming_metadata_buffer *b) {
- GPR_SWAP(grpc_chttp2_incoming_metadata_buffer, *a, *b);
-}
-
-void grpc_incoming_metadata_buffer_move_to_referencing_sopb(
- grpc_chttp2_incoming_metadata_buffer *src,
- grpc_chttp2_incoming_metadata_buffer *dst, grpc_stream_op_buffer *sopb) {
- size_t delta;
- size_t i;
- dst->deadline = gpr_time_min(src->deadline, dst->deadline);
-
- if (src->count == 0) {
- return;
- }
- if (dst->count == 0) {
- grpc_chttp2_incoming_metadata_buffer_swap(src, dst);
- return;
- }
- delta = dst->count;
- if (dst->capacity < src->count + dst->count) {
- dst->capacity = GPR_MAX(dst->capacity * 2, src->count + dst->count);
- dst->elems = gpr_realloc(dst->elems, dst->capacity * sizeof(*dst->elems));
- }
- memcpy(dst->elems + dst->count, src->elems, src->count * sizeof(*src->elems));
- dst->count += src->count;
- for (i = 0; i < sopb->nops; i++) {
- if (sopb->ops[i].type != GRPC_OP_METADATA) continue;
- sopb->ops[i].data.metadata.list.tail =
- (void *)(delta + (gpr_intptr)sopb->ops[i].data.metadata.list.tail);
- }
-}
-
-void grpc_chttp2_incoming_metadata_buffer_postprocess_sopb_and_begin_live_op(
- grpc_chttp2_incoming_metadata_buffer *buffer, grpc_stream_op_buffer *sopb,
- grpc_chttp2_incoming_metadata_live_op_buffer *live_op_buffer) {
- grpc_stream_op *ops = sopb->ops;
- size_t nops = sopb->nops;
- size_t i;
- size_t j;
- size_t mdidx = 0;
- size_t last_mdidx;
- int found_metadata = 0;
-
- /* rework the array of metadata into a linked list, making use
- of the breadcrumbs we left in metadata batches during
- add_metadata_batch */
- for (i = 0; i < nops; i++) {
- grpc_stream_op *op = &ops[i];
- if (op->type != GRPC_OP_METADATA) continue;
- found_metadata = 1;
- /* we left a breadcrumb indicating where the end of this list is,
- and since we add sequentially, we know from the end of the last
- segment where this segment begins */
- last_mdidx = (size_t)(gpr_intptr)(op->data.metadata.list.tail);
- GPR_ASSERT(last_mdidx > mdidx);
- GPR_ASSERT(last_mdidx <= buffer->count);
- /* turn the array into a doubly linked list */
- op->data.metadata.list.head = &buffer->elems[mdidx];
- op->data.metadata.list.tail = &buffer->elems[last_mdidx - 1];
- for (j = mdidx + 1; j < last_mdidx; j++) {
- buffer->elems[j].prev = &buffer->elems[j - 1];
- buffer->elems[j - 1].next = &buffer->elems[j];
+void grpc_chttp2_incoming_metadata_buffer_publish(
+ grpc_chttp2_incoming_metadata_buffer *buffer, grpc_metadata_batch *batch) {
+ GPR_ASSERT(!buffer->published);
+ buffer->published = 1;
+ if (buffer->count > 0) {
+ size_t i;
+ for (i = 1; i < buffer->count; i++) {
+ buffer->elems[i].prev = &buffer->elems[i - 1];
}
- buffer->elems[mdidx].prev = NULL;
- buffer->elems[last_mdidx - 1].next = NULL;
- /* track where we're up to */
- mdidx = last_mdidx;
- }
- if (found_metadata) {
- live_op_buffer->elems = buffer->elems;
- if (mdidx != buffer->count) {
- /* we have a partially read metadata batch still in incoming_metadata */
- size_t new_count = buffer->count - mdidx;
- size_t copy_bytes = sizeof(*buffer->elems) * new_count;
- GPR_ASSERT(mdidx < buffer->count);
- buffer->elems = gpr_malloc(copy_bytes);
- memcpy(live_op_buffer->elems + mdidx, buffer->elems, copy_bytes);
- buffer->count = buffer->capacity = new_count;
- } else {
- buffer->elems = NULL;
- buffer->count = 0;
- buffer->capacity = 0;
+ for (i = 0; i < buffer->count - 1; i++) {
+ buffer->elems[i].next = &buffer->elems[i + 1];
}
+ buffer->elems[0].prev = NULL;
+ buffer->elems[buffer->count - 1].next = NULL;
+ batch->list.head = &buffer->elems[0];
+ batch->list.tail = &buffer->elems[buffer->count - 1];
+ } else {
+ batch->list.head = batch->list.tail = NULL;
}
+ batch->deadline = buffer->deadline;
}
diff --git a/src/core/transport/chttp2/incoming_metadata.h b/src/core/transport/chttp2/incoming_metadata.h
index 2f1de411ba..52454f348c 100644
--- a/src/core/transport/chttp2/incoming_metadata.h
+++ b/src/core/transport/chttp2/incoming_metadata.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_CHTTP2_INCOMING_METADATA_H
-#define GRPC_INTERNAL_CORE_CHTTP2_INCOMING_METADATA_H
+#ifndef GRPC_CORE_TRANSPORT_CHTTP2_INCOMING_METADATA_H
+#define GRPC_CORE_TRANSPORT_CHTTP2_INCOMING_METADATA_H
#include "src/core/transport/transport.h"
@@ -41,40 +41,20 @@ typedef struct {
size_t count;
size_t capacity;
gpr_timespec deadline;
+ int published;
} grpc_chttp2_incoming_metadata_buffer;
-typedef struct {
- grpc_linked_mdelem *elems;
-} grpc_chttp2_incoming_metadata_live_op_buffer;
-
/** assumes everything initially zeroed */
void grpc_chttp2_incoming_metadata_buffer_init(
grpc_chttp2_incoming_metadata_buffer *buffer);
void grpc_chttp2_incoming_metadata_buffer_destroy(
grpc_chttp2_incoming_metadata_buffer *buffer);
-void grpc_chttp2_incoming_metadata_buffer_reset(
- grpc_chttp2_incoming_metadata_buffer *buffer);
+void grpc_chttp2_incoming_metadata_buffer_publish(
+ grpc_chttp2_incoming_metadata_buffer *buffer, grpc_metadata_batch *batch);
void grpc_chttp2_incoming_metadata_buffer_add(
grpc_chttp2_incoming_metadata_buffer *buffer, grpc_mdelem *elem);
void grpc_chttp2_incoming_metadata_buffer_set_deadline(
grpc_chttp2_incoming_metadata_buffer *buffer, gpr_timespec deadline);
-/** extend sopb with a metadata batch; this must be post-processed by
- grpc_chttp2_incoming_metadata_buffer_postprocess_sopb before being handed
- out of the transport */
-void grpc_chttp2_incoming_metadata_buffer_place_metadata_batch_into(
- grpc_chttp2_incoming_metadata_buffer *buffer, grpc_stream_op_buffer *sopb);
-
-void grpc_incoming_metadata_buffer_move_to_referencing_sopb(
- grpc_chttp2_incoming_metadata_buffer *src,
- grpc_chttp2_incoming_metadata_buffer *dst, grpc_stream_op_buffer *sopb);
-
-void grpc_chttp2_incoming_metadata_buffer_postprocess_sopb_and_begin_live_op(
- grpc_chttp2_incoming_metadata_buffer *buffer, grpc_stream_op_buffer *sopb,
- grpc_chttp2_incoming_metadata_live_op_buffer *live_op_buffer);
-
-void grpc_chttp2_incoming_metadata_live_op_buffer_end(
- grpc_chttp2_incoming_metadata_live_op_buffer *live_op_buffer);
-
-#endif /* GRPC_INTERNAL_CORE_CHTTP2_INCOMING_METADATA_H */
+#endif /* GRPC_CORE_TRANSPORT_CHTTP2_INCOMING_METADATA_H */
diff --git a/src/core/transport/chttp2/internal.h b/src/core/transport/chttp2/internal.h
index a7a9880edb..c2977c7b3f 100644
--- a/src/core/transport/chttp2/internal.h
+++ b/src/core/transport/chttp2/internal.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,10 +31,12 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_CHTTP2_INTERNAL_H
-#define GRPC_INTERNAL_CORE_CHTTP2_INTERNAL_H
+#ifndef GRPC_CORE_TRANSPORT_CHTTP2_INTERNAL_H
+#define GRPC_CORE_TRANSPORT_CHTTP2_INTERNAL_H
+
+#include <assert.h>
+#include <stdbool.h>
-#include "src/core/transport/transport_impl.h"
#include "src/core/iomgr/endpoint.h"
#include "src/core/transport/chttp2/frame.h"
#include "src/core/transport/chttp2/frame_data.h"
@@ -43,10 +45,12 @@
#include "src/core/transport/chttp2/frame_rst_stream.h"
#include "src/core/transport/chttp2/frame_settings.h"
#include "src/core/transport/chttp2/frame_window_update.h"
+#include "src/core/transport/chttp2/hpack_encoder.h"
#include "src/core/transport/chttp2/hpack_parser.h"
#include "src/core/transport/chttp2/incoming_metadata.h"
-#include "src/core/transport/chttp2/stream_encoder.h"
#include "src/core/transport/chttp2/stream_map.h"
+#include "src/core/transport/connectivity_state.h"
+#include "src/core/transport/transport_impl.h"
typedef struct grpc_chttp2_transport grpc_chttp2_transport;
typedef struct grpc_chttp2_stream grpc_chttp2_stream;
@@ -55,14 +59,18 @@ typedef struct grpc_chttp2_stream grpc_chttp2_stream;
happen to them... this enum labels each list */
typedef enum {
GRPC_CHTTP2_LIST_ALL_STREAMS,
- GRPC_CHTTP2_LIST_READ_WRITE_STATE_CHANGED,
+ GRPC_CHTTP2_LIST_CHECK_READ_OPS,
+ GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE,
GRPC_CHTTP2_LIST_WRITABLE,
GRPC_CHTTP2_LIST_WRITING,
GRPC_CHTTP2_LIST_WRITTEN,
- GRPC_CHTTP2_LIST_WRITABLE_WINDOW_UPDATE,
GRPC_CHTTP2_LIST_PARSING_SEEN,
GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_PARSING,
- GRPC_CHTTP2_LIST_INCOMING_WINDOW_UPDATED,
+ GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_WRITING,
+ GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT,
+ /* streams waiting for the outgoing window in the writing path, they will be
+ * merged to the stalled list or writable list under transport lock. */
+ GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT,
/** streams that are waiting to start because there are too many concurrent
streams on the connection */
GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY,
@@ -112,18 +120,6 @@ typedef enum {
GRPC_DTS_FRAME
} grpc_chttp2_deframe_transport_state;
-typedef enum {
- GRPC_WRITE_STATE_OPEN,
- GRPC_WRITE_STATE_QUEUED_CLOSE,
- GRPC_WRITE_STATE_SENT_CLOSE
-} grpc_chttp2_write_state;
-
-typedef enum {
- GRPC_DONT_SEND_CLOSED = 0,
- GRPC_SEND_CLOSED,
- GRPC_SEND_CLOSED_WITH_RST_STREAM
-} grpc_chttp2_send_closed;
-
typedef struct {
grpc_chttp2_stream *head;
grpc_chttp2_stream *tail;
@@ -134,12 +130,6 @@ typedef struct {
grpc_chttp2_stream *prev;
} grpc_chttp2_stream_link;
-typedef enum {
- GRPC_CHTTP2_ERROR_STATE_NONE,
- GRPC_CHTTP2_ERROR_STATE_SEEN,
- GRPC_CHTTP2_ERROR_STATE_NOTIFIED
-} grpc_chttp2_error_state;
-
/* We keep several sets of connection wide parameters */
typedef enum {
/* The settings our peer has asked for (and we have acked) */
@@ -155,62 +145,72 @@ typedef enum {
/* Outstanding ping request data */
typedef struct grpc_chttp2_outstanding_ping {
- gpr_uint8 id[8];
- grpc_iomgr_closure *on_recv;
+ uint8_t id[8];
+ grpc_closure *on_recv;
struct grpc_chttp2_outstanding_ping *next;
struct grpc_chttp2_outstanding_ping *prev;
} grpc_chttp2_outstanding_ping;
+/* forward declared in frame_data.h */
+struct grpc_chttp2_incoming_byte_stream {
+ grpc_byte_stream base;
+ gpr_refcount refs;
+ struct grpc_chttp2_incoming_byte_stream *next_message;
+ int failed;
+
+ grpc_chttp2_transport *transport;
+ grpc_chttp2_stream *stream;
+ int is_tail;
+ gpr_slice_buffer slices;
+ grpc_closure *on_next;
+ gpr_slice *next;
+};
+
typedef struct {
/** data to write next write */
gpr_slice_buffer qbuf;
- /** queued callbacks */
- grpc_iomgr_closure *pending_closures;
/** window available for us to send to peer */
- gpr_uint32 outgoing_window;
- /** window available for peer to send to us - updated after parse */
- gpr_uint32 incoming_window;
+ int64_t outgoing_window;
+ /** window available to announce to peer */
+ int64_t announce_incoming_window;
/** how much window would we like to have for incoming_window */
- gpr_uint32 connection_window_target;
+ uint32_t connection_window_target;
+
+ /** have we seen a goaway */
+ uint8_t seen_goaway;
+ /** have we sent a goaway */
+ uint8_t sent_goaway;
/** is this transport a client? */
- gpr_uint8 is_client;
+ uint8_t is_client;
/** are the local settings dirty and need to be sent? */
- gpr_uint8 dirtied_local_settings;
+ uint8_t dirtied_local_settings;
/** have local settings been sent? */
- gpr_uint8 sent_local_settings;
+ uint8_t sent_local_settings;
/** bitmask of setting indexes to send out */
- gpr_uint32 force_send_settings;
+ uint32_t force_send_settings;
/** settings values */
- gpr_uint32 settings[GRPC_NUM_SETTING_SETS][GRPC_CHTTP2_NUM_SETTINGS];
-
- /** has there been a connection level error, and have we notified
- anyone about it? */
- grpc_chttp2_error_state error_state;
+ uint32_t settings[GRPC_NUM_SETTING_SETS][GRPC_CHTTP2_NUM_SETTINGS];
/** what is the next stream id to be allocated by this peer?
copied to next_stream_id in parsing when parsing commences */
- gpr_uint32 next_stream_id;
+ uint32_t next_stream_id;
+
+ /** how far to lookahead in a stream? */
+ uint32_t stream_lookahead;
/** last received stream id */
- gpr_uint32 last_incoming_stream_id;
+ uint32_t last_incoming_stream_id;
/** pings awaiting responses */
grpc_chttp2_outstanding_ping pings;
/** next payload for an outgoing ping */
- gpr_uint64 ping_counter;
+ uint64_t ping_counter;
/** concurrent stream count: updated when not parsing,
so this is a strict over-estimation on the client */
- gpr_uint32 concurrent_stream_count;
-
- /** is there a goaway available? (boolean) */
- grpc_chttp2_error_state goaway_state;
- /** what is the debug text of the goaway? */
- gpr_slice goaway_text;
- /** what is the status code of the goaway? */
- grpc_status_code goaway_error;
+ uint32_t concurrent_stream_count;
} grpc_chttp2_transport_global;
typedef struct {
@@ -218,28 +218,32 @@ typedef struct {
gpr_slice_buffer outbuf;
/** hpack encoding */
grpc_chttp2_hpack_compressor hpack_compressor;
+ int64_t outgoing_window;
/** is this a client? */
- gpr_uint8 is_client;
+ uint8_t is_client;
+ /** callback for when writing is done */
+ grpc_closure done_cb;
} grpc_chttp2_transport_writing;
struct grpc_chttp2_transport_parsing {
/** is this transport a client? (boolean) */
- gpr_uint8 is_client;
+ uint8_t is_client;
/** were settings updated? */
- gpr_uint8 settings_updated;
+ uint8_t settings_updated;
/** was a settings ack received? */
- gpr_uint8 settings_ack_received;
+ uint8_t settings_ack_received;
/** was a goaway frame received? */
- gpr_uint8 goaway_received;
+ uint8_t goaway_received;
+
+ /** the last sent max_table_size setting */
+ uint32_t last_sent_max_table_size;
/** initial window change */
- gpr_int64 initial_window_update;
+ int64_t initial_window_update;
/** data to write later - after parsing */
gpr_slice_buffer qbuf;
- /* metadata object cache */
- grpc_mdstr *str_grpc_timeout;
/** parser for headers */
grpc_chttp2_hpack_parser hpack_parser;
/** simple one shot parsers */
@@ -253,41 +257,38 @@ struct grpc_chttp2_transport_parsing {
grpc_chttp2_goaway_parser goaway_parser;
/** window available for peer to send to us */
- gpr_uint32 incoming_window;
- gpr_uint32 incoming_window_delta;
+ int64_t incoming_window;
/** next stream id available at the time of beginning parsing */
- gpr_uint32 next_stream_id;
- gpr_uint32 last_incoming_stream_id;
+ uint32_t next_stream_id;
+ uint32_t last_incoming_stream_id;
/* deframing */
grpc_chttp2_deframe_transport_state deframe_state;
- gpr_uint8 incoming_frame_type;
- gpr_uint8 incoming_frame_flags;
- gpr_uint8 header_eof;
- gpr_uint32 expect_continuation_stream_id;
- gpr_uint32 incoming_frame_size;
- gpr_uint32 incoming_stream_id;
+ uint8_t incoming_frame_type;
+ uint8_t incoming_frame_flags;
+ uint8_t header_eof;
+ uint32_t expect_continuation_stream_id;
+ uint32_t incoming_frame_size;
+ uint32_t incoming_stream_id;
/* active parser */
void *parser_data;
grpc_chttp2_stream_parsing *incoming_stream;
grpc_chttp2_parse_error (*parser)(
- void *parser_user_data, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_exec_ctx *exec_ctx, void *parser_user_data,
+ grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
/* received settings */
- gpr_uint32 settings[GRPC_CHTTP2_NUM_SETTINGS];
+ uint32_t settings[GRPC_CHTTP2_NUM_SETTINGS];
/* goaway data */
grpc_status_code goaway_error;
- gpr_uint32 goaway_last_stream_index;
+ uint32_t goaway_last_stream_index;
gpr_slice goaway_text;
- gpr_uint64 outgoing_window_update;
-
- /** pings awaiting responses */
- grpc_chttp2_outstanding_ping pings;
+ int64_t outgoing_window;
};
typedef struct grpc_chttp2_executor_action_header {
@@ -301,30 +302,33 @@ struct grpc_chttp2_transport {
grpc_transport base; /* must be first */
gpr_refcount refs;
grpc_endpoint *ep;
- grpc_mdctx *metadata_context;
+ char *peer_string;
+
+ /** when this drops to zero it's safe to shutdown the endpoint */
+ gpr_refcount shutdown_ep_refs;
struct {
gpr_mu mu;
/** is a thread currently in the global lock */
- gpr_uint8 global_active;
+ uint8_t global_active;
/** is a thread currently writing */
- gpr_uint8 writing_active;
+ uint8_t writing_active;
/** is a thread currently parsing */
- gpr_uint8 parsing_active;
+ uint8_t parsing_active;
/** is a thread currently executing channel callbacks */
- gpr_uint8 channel_callback_active;
+ uint8_t channel_callback_active;
grpc_chttp2_executor_action_header *pending_actions;
} executor;
/** is the transport destroying itself? */
- gpr_uint8 destroying;
+ uint8_t destroying;
/** has the upper layer closed the transport? */
- gpr_uint8 closed;
+ uint8_t closed;
/** is there a read request to the endpoint outstanding? */
- gpr_uint8 endpoint_reading;
+ uint8_t endpoint_reading;
/** various lists of streams */
grpc_chttp2_stream_list lists[STREAM_LIST_COUNT];
@@ -359,99 +363,138 @@ struct grpc_chttp2_transport {
gpr_slice *slices;
} executor_parsing;
+ /** incoming read bytes */
+ gpr_slice_buffer read_buffer;
+
/** address to place a newly accepted stream - set and unset by
grpc_chttp2_parsing_accept_stream; used by init_stream to
publish the accepted server stream */
grpc_chttp2_stream **accepting_stream;
struct {
- /** transport channel-level callback */
- const grpc_transport_callbacks *cb;
- /** user data for cb calls */
- void *cb_user_data;
- /** closure for notifying transport closure */
- grpc_iomgr_closure notify_closed;
+ /* accept stream callback */
+ void (*accept_stream)(grpc_exec_ctx *exec_ctx, void *user_data,
+ grpc_transport *transport, const void *server_data);
+ void *accept_stream_user_data;
+
+ /** connectivity tracking */
+ grpc_connectivity_state_tracker state_tracker;
} channel_callback;
+
+ /** Transport op to be applied post-parsing */
+ grpc_transport_op *post_parsing_op;
};
typedef struct {
/** HTTP2 stream id for this stream, or zero if one has not been assigned */
- gpr_uint32 id;
-
- grpc_iomgr_closure *send_done_closure;
- grpc_iomgr_closure *recv_done_closure;
+ uint32_t id;
/** window available for us to send to peer */
- gpr_int64 outgoing_window;
- /** window available for peer to send to us - updated after parse */
- gpr_uint32 incoming_window;
- /** stream ops the transport user would like to send */
- grpc_stream_op_buffer *outgoing_sopb;
+ int64_t outgoing_window;
+ /** The number of bytes the upper layers have offered to receive.
+ As the upper layer offers more bytes, this value increases.
+ As bytes are read, this value decreases. */
+ uint32_t max_recv_bytes;
+ /** The number of bytes the upper layer has offered to read but we have
+ not yet announced to HTTP2 flow control.
+ As the upper layers offer to read more bytes, this value increases.
+ As we advertise incoming flow control window, this value decreases. */
+ uint32_t unannounced_incoming_window_for_parse;
+ uint32_t unannounced_incoming_window_for_writing;
+ /** things the upper layers would like to send */
+ grpc_metadata_batch *send_initial_metadata;
+ grpc_closure *send_initial_metadata_finished;
+ grpc_byte_stream *send_message;
+ grpc_closure *send_message_finished;
+ grpc_metadata_batch *send_trailing_metadata;
+ grpc_closure *send_trailing_metadata_finished;
+
+ grpc_metadata_batch *recv_initial_metadata;
+ grpc_closure *recv_initial_metadata_ready;
+ grpc_byte_stream **recv_message;
+ grpc_closure *recv_message_ready;
+ grpc_metadata_batch *recv_trailing_metadata;
+ grpc_closure *recv_trailing_metadata_finished;
+
/** when the application requests writes be closed, the write_closed is
'queued'; when the close is flow controlled into the send path, we are
'sending' it; when the write has been performed it is 'sent' */
- grpc_chttp2_write_state write_state;
- /** is this stream closed (boolean) */
- gpr_uint8 read_closed;
- /** has this stream been cancelled? (boolean) */
- gpr_uint8 cancelled;
- grpc_status_code cancelled_status;
- /** have we told the upper layer that this stream is cancelled? */
- gpr_uint8 published_cancelled;
+ uint8_t write_closed;
+ /** is this stream reading half-closed (boolean) */
+ uint8_t read_closed;
/** is this stream in the stream map? (boolean) */
- gpr_uint8 in_stream_map;
+ uint8_t in_stream_map;
+ /** has this stream seen an error? if 1, then pending incoming frames
+ can be thrown away */
+ uint8_t seen_error;
- /** stream state already published to the upper layer */
- grpc_stream_state published_state;
- /** address to publish next stream state to */
- grpc_stream_state *publish_state;
- /** pointer to sop buffer to fill in with new stream ops */
- grpc_stream_op_buffer *publish_sopb;
- grpc_stream_op_buffer incoming_sopb;
+ uint8_t published_initial_metadata;
+ uint8_t published_trailing_metadata;
+ uint8_t faked_trailing_metadata;
- /** incoming metadata */
- grpc_chttp2_incoming_metadata_buffer incoming_metadata;
- grpc_chttp2_incoming_metadata_live_op_buffer outstanding_metadata;
+ grpc_chttp2_incoming_metadata_buffer received_initial_metadata;
+ grpc_chttp2_incoming_metadata_buffer received_trailing_metadata;
+
+ grpc_chttp2_incoming_frame_queue incoming_frames;
} grpc_chttp2_stream_global;
typedef struct {
/** HTTP2 stream id for this stream, or zero if one has not been assigned */
- gpr_uint32 id;
- /** sops that have passed flow control to be written */
- grpc_stream_op_buffer sopb;
- /** how strongly should we indicate closure with the next write */
- grpc_chttp2_send_closed send_closed;
+ uint32_t id;
+ uint8_t fetching;
+ bool sent_initial_metadata;
+ uint8_t sent_message;
+ uint8_t sent_trailing_metadata;
+ uint8_t read_closed;
+ /** send this initial metadata */
+ grpc_metadata_batch *send_initial_metadata;
+ grpc_byte_stream *send_message;
+ grpc_metadata_batch *send_trailing_metadata;
+ int64_t outgoing_window;
+ /** how much window should we announce? */
+ uint32_t announce_window;
+ gpr_slice_buffer flow_controlled_buffer;
+ gpr_slice fetching_slice;
+ size_t stream_fetched;
+ grpc_closure finished_fetch;
} grpc_chttp2_stream_writing;
struct grpc_chttp2_stream_parsing {
/** HTTP2 stream id for this stream, or zero if one has not been assigned */
- gpr_uint32 id;
+ uint32_t id;
/** has this stream received a close */
- gpr_uint8 received_close;
+ uint8_t received_close;
/** saw a rst_stream */
- gpr_uint8 saw_rst_stream;
- /** incoming_window has been reduced by this much during parsing */
- gpr_uint32 incoming_window_delta;
+ uint8_t saw_rst_stream;
+ /** how many header frames have we received? */
+ uint8_t header_frames_received;
+ /** which metadata did we get (on this parse) */
+ uint8_t got_metadata_on_parse[2];
+ /** should we raise the seen_error flag in transport_global */
+ uint8_t seen_error;
/** window available for peer to send to us */
- gpr_uint32 incoming_window;
+ int64_t incoming_window;
/** parsing state for data frames */
grpc_chttp2_data_parser data_parser;
/** reason give to rst_stream */
- gpr_uint32 rst_stream_reason;
- /* amount of window given */
- gpr_uint64 outgoing_window_update;
+ uint32_t rst_stream_reason;
+ /** amount of window given */
+ int64_t outgoing_window;
+ /** number of bytes received - reset at end of parse thread execution */
+ int64_t received_bytes;
/** incoming metadata */
- grpc_chttp2_incoming_metadata_buffer incoming_metadata;
+ grpc_chttp2_incoming_metadata_buffer metadata_buffer[2];
};
struct grpc_chttp2_stream {
+ grpc_stream_refcount *refcount;
grpc_chttp2_stream_global global;
grpc_chttp2_stream_writing writing;
grpc_chttp2_stream_parsing parsing;
grpc_chttp2_stream_link links[STREAM_LIST_COUNT];
- gpr_uint8 included[STREAM_LIST_COUNT];
+ uint8_t included[STREAM_LIST_COUNT];
};
/** Transport writing call flow:
@@ -467,46 +510,43 @@ struct grpc_chttp2_stream {
/** Someone is unlocking the transport mutex: check to see if writes
are required, and schedule them if so */
-int grpc_chttp2_unlocking_check_writes(grpc_chttp2_transport_global *global,
- grpc_chttp2_transport_writing *writing);
+int grpc_chttp2_unlocking_check_writes(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_global *global,
+ grpc_chttp2_transport_writing *writing,
+ int is_parsing);
void grpc_chttp2_perform_writes(
- grpc_chttp2_transport_writing *transport_writing, grpc_endpoint *endpoint);
-void grpc_chttp2_terminate_writing(
- grpc_chttp2_transport_writing *transport_writing, int success);
-void grpc_chttp2_cleanup_writing(grpc_chttp2_transport_global *global,
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing,
+ grpc_endpoint *endpoint);
+void grpc_chttp2_terminate_writing(grpc_exec_ctx *exec_ctx,
+ void *transport_writing, bool success);
+void grpc_chttp2_cleanup_writing(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_global *global,
grpc_chttp2_transport_writing *writing);
void grpc_chttp2_prepare_to_read(grpc_chttp2_transport_global *global,
grpc_chttp2_transport_parsing *parsing);
/** Process one slice of incoming data; return 1 if the connection is still
viable after reading, or 0 if the connection should be torn down */
-int grpc_chttp2_perform_read(grpc_chttp2_transport_parsing *transport_parsing,
+int grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_parsing *transport_parsing,
gpr_slice slice);
-void grpc_chttp2_publish_reads(grpc_chttp2_transport_global *global,
+void grpc_chttp2_publish_reads(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_global *global,
grpc_chttp2_transport_parsing *parsing);
-/** Get a writable stream
- returns non-zero if there was a stream available */
-void grpc_chttp2_list_add_writable_stream(
+bool grpc_chttp2_list_add_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
+/** Get a writable stream
+ returns non-zero if there was a stream available */
int grpc_chttp2_list_pop_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_writing **stream_writing);
-
-void grpc_chttp2_list_add_incoming_window_updated(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global);
-int grpc_chttp2_list_pop_incoming_window_updated(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_global **stream_global,
- grpc_chttp2_stream_parsing **stream_parsing);
-void grpc_chttp2_list_remove_incoming_window_updated(
+bool grpc_chttp2_list_remove_writable_stream(
grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global);
+ grpc_chttp2_stream_global *stream_global) GRPC_MUST_USE_RESULT;
void grpc_chttp2_list_add_writing_stream(
grpc_chttp2_transport_writing *transport_writing,
@@ -526,16 +566,6 @@ int grpc_chttp2_list_pop_written_stream(
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_writing **stream_writing);
-void grpc_chttp2_list_add_writable_window_update_stream(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global);
-int grpc_chttp2_list_pop_writable_window_update_stream(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global **stream_global);
-void grpc_chttp2_list_remove_writable_window_update_stream(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global);
-
void grpc_chttp2_list_add_parsing_seen_stream(
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing);
@@ -552,6 +582,42 @@ int grpc_chttp2_list_pop_waiting_for_concurrency(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
+void grpc_chttp2_list_add_check_read_ops(
+ grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_stream_global *stream_global);
+int grpc_chttp2_list_pop_check_read_ops(
+ grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_stream_global **stream_global);
+
+void grpc_chttp2_list_add_writing_stalled_by_transport(
+ grpc_chttp2_transport_writing *transport_writing,
+ grpc_chttp2_stream_writing *stream_writing);
+void grpc_chttp2_list_flush_writing_stalled_by_transport(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing,
+ bool is_window_available);
+
+void grpc_chttp2_list_add_stalled_by_transport(
+ grpc_chttp2_transport_writing *transport_writing,
+ grpc_chttp2_stream_writing *stream_writing);
+int grpc_chttp2_list_pop_stalled_by_transport(
+ grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_stream_global **stream_global);
+void grpc_chttp2_list_remove_stalled_by_transport(
+ grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_stream_global *stream_global);
+
+void grpc_chttp2_list_add_unannounced_incoming_window_available(
+ grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_stream_global *stream_global);
+void grpc_chttp2_list_remove_unannounced_incoming_window_available(
+ grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_stream_global *stream_global);
+int grpc_chttp2_list_pop_unannounced_incoming_window_available(
+ grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_chttp2_stream_global **stream_global,
+ grpc_chttp2_stream_parsing **stream_parsing);
+
void grpc_chttp2_list_add_closed_waiting_for_parsing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
@@ -559,43 +625,44 @@ int grpc_chttp2_list_pop_closed_waiting_for_parsing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
-void grpc_chttp2_list_add_read_write_state_changed(
+void grpc_chttp2_list_add_closed_waiting_for_writing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
-int grpc_chttp2_list_pop_read_write_state_changed(
+int grpc_chttp2_list_pop_closed_waiting_for_writing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
-/** schedule a closure to run without the transport lock taken */
-void grpc_chttp2_schedule_closure(
- grpc_chttp2_transport_global *transport_global, grpc_iomgr_closure *closure,
- int success);
-
grpc_chttp2_stream_parsing *grpc_chttp2_parsing_lookup_stream(
- grpc_chttp2_transport_parsing *transport_parsing, gpr_uint32 id);
+ grpc_chttp2_transport_parsing *transport_parsing, uint32_t id);
grpc_chttp2_stream_parsing *grpc_chttp2_parsing_accept_stream(
- grpc_chttp2_transport_parsing *transport_parsing, gpr_uint32 id);
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
+ uint32_t id);
void grpc_chttp2_add_incoming_goaway(
- grpc_chttp2_transport_global *transport_global, gpr_uint32 goaway_error,
- gpr_slice goaway_text);
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
+ uint32_t goaway_error, gpr_slice goaway_text);
void grpc_chttp2_register_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
-void grpc_chttp2_unregister_stream(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s);
+/* returns 1 if this is the last stream, 0 otherwise */
+int grpc_chttp2_unregister_stream(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s) GRPC_MUST_USE_RESULT;
+int grpc_chttp2_has_streams(grpc_chttp2_transport *t);
void grpc_chttp2_for_all_streams(
grpc_chttp2_transport_global *transport_global, void *user_data,
void (*cb)(grpc_chttp2_transport_global *transport_global, void *user_data,
grpc_chttp2_stream_global *stream_global));
void grpc_chttp2_parsing_become_skip_parser(
- grpc_chttp2_transport_parsing *transport_parsing);
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
+
+void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
+ grpc_closure **pclosure, int success);
void grpc_chttp2_run_with_global_lock(
- grpc_chttp2_transport *transport, grpc_chttp2_stream *optional_stream,
- void (*action)(grpc_chttp2_transport *t, grpc_chttp2_stream *s, void *arg),
- void *arg, size_t sizeof_arg);
+ grpc_chttp2_transport *transport, grpc_chttp2_stream *optional_stream,
+ void (*action)(grpc_chttp2_transport *t, grpc_chttp2_stream *s, void *arg),
+ void *arg, size_t sizeof_arg);
#define GRPC_CHTTP2_CLIENT_CONNECT_STRING "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
#define GRPC_CHTTP2_CLIENT_CONNECT_STRLEN \
@@ -610,25 +677,131 @@ extern int grpc_flowctl_trace;
else \
stmt
-#define GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(reason, transport, context, var, \
- delta) \
- if (!(grpc_flowctl_trace)) { \
- } else { \
- grpc_chttp2_flowctl_trace(__FILE__, __LINE__, reason, #context, #var, \
- transport->is_client, context->id, context->var, \
- delta); \
- }
-
-#define GRPC_CHTTP2_FLOWCTL_TRACE_TRANSPORT(reason, context, var, delta) \
- if (!(grpc_flowctl_trace)) { \
- } else { \
- grpc_chttp2_flowctl_trace(__FILE__, __LINE__, reason, #context, #var, \
- context->is_client, 0, context->var, delta); \
- }
-
-void grpc_chttp2_flowctl_trace(const char *file, int line, const char *reason,
- const char *context, const char *var,
- int is_client, gpr_uint32 stream_id,
- gpr_int64 current_value, gpr_int64 delta);
-
+typedef enum {
+ GRPC_CHTTP2_FLOWCTL_MOVE,
+ GRPC_CHTTP2_FLOWCTL_CREDIT,
+ GRPC_CHTTP2_FLOWCTL_DEBIT
+} grpc_chttp2_flowctl_op;
+
+#define GRPC_CHTTP2_FLOW_MOVE_COMMON(phase, transport, id1, id2, dst_context, \
+ dst_var, src_context, src_var) \
+ do { \
+ assert(id1 == id2); \
+ if (grpc_flowctl_trace) { \
+ grpc_chttp2_flowctl_trace( \
+ __FILE__, __LINE__, phase, GRPC_CHTTP2_FLOWCTL_MOVE, #dst_context, \
+ #dst_var, #src_context, #src_var, transport->is_client, id1, \
+ dst_context->dst_var, src_context->src_var); \
+ } \
+ dst_context->dst_var += src_context->src_var; \
+ src_context->src_var = 0; \
+ } while (0)
+
+#define GRPC_CHTTP2_FLOW_MOVE_STREAM(phase, transport, dst_context, dst_var, \
+ src_context, src_var) \
+ GRPC_CHTTP2_FLOW_MOVE_COMMON(phase, transport, dst_context->id, \
+ src_context->id, dst_context, dst_var, \
+ src_context, src_var)
+#define GRPC_CHTTP2_FLOW_MOVE_TRANSPORT(phase, dst_context, dst_var, \
+ src_context, src_var) \
+ GRPC_CHTTP2_FLOW_MOVE_COMMON(phase, dst_context, 0, 0, dst_context, dst_var, \
+ src_context, src_var)
+
+#define GRPC_CHTTP2_FLOW_CREDIT_COMMON(phase, transport, id, dst_context, \
+ dst_var, amount) \
+ do { \
+ if (grpc_flowctl_trace) { \
+ grpc_chttp2_flowctl_trace(__FILE__, __LINE__, phase, \
+ GRPC_CHTTP2_FLOWCTL_CREDIT, #dst_context, \
+ #dst_var, NULL, #amount, transport->is_client, \
+ id, dst_context->dst_var, amount); \
+ } \
+ dst_context->dst_var += amount; \
+ } while (0)
+
+#define GRPC_CHTTP2_FLOW_CREDIT_STREAM(phase, transport, dst_context, dst_var, \
+ amount) \
+ GRPC_CHTTP2_FLOW_CREDIT_COMMON(phase, transport, dst_context->id, \
+ dst_context, dst_var, amount)
+#define GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT(phase, dst_context, dst_var, amount) \
+ GRPC_CHTTP2_FLOW_CREDIT_COMMON(phase, dst_context, 0, dst_context, dst_var, \
+ amount)
+
+#define GRPC_CHTTP2_FLOW_DEBIT_COMMON(phase, transport, id, dst_context, \
+ dst_var, amount) \
+ do { \
+ if (grpc_flowctl_trace) { \
+ grpc_chttp2_flowctl_trace(__FILE__, __LINE__, phase, \
+ GRPC_CHTTP2_FLOWCTL_DEBIT, #dst_context, \
+ #dst_var, NULL, #amount, transport->is_client, \
+ id, dst_context->dst_var, amount); \
+ } \
+ dst_context->dst_var -= amount; \
+ } while (0)
+
+#define GRPC_CHTTP2_FLOW_DEBIT_STREAM(phase, transport, dst_context, dst_var, \
+ amount) \
+ GRPC_CHTTP2_FLOW_DEBIT_COMMON(phase, transport, dst_context->id, \
+ dst_context, dst_var, amount)
+#define GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT(phase, dst_context, dst_var, amount) \
+ GRPC_CHTTP2_FLOW_DEBIT_COMMON(phase, dst_context, 0, dst_context, dst_var, \
+ amount)
+
+void grpc_chttp2_flowctl_trace(const char *file, int line, const char *phase,
+ grpc_chttp2_flowctl_op op, const char *context1,
+ const char *var1, const char *context2,
+ const char *var2, int is_client,
+ uint32_t stream_id, int64_t val1, int64_t val2);
+
+void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_stream_global *stream,
+ grpc_status_code status, gpr_slice *details);
+void grpc_chttp2_mark_stream_closed(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_stream_global *stream_global, int close_reads,
+ int close_writes);
+void grpc_chttp2_start_writing(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_global *transport_global);
+
+#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#define GRPC_CHTTP2_STREAM_REF(stream_global, reason) \
+ grpc_chttp2_stream_ref(stream_global, reason)
+#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, reason) \
+ grpc_chttp2_stream_unref(exec_ctx, stream_global, reason)
+void grpc_chttp2_stream_ref(grpc_chttp2_stream_global *stream_global,
+ const char *reason);
+void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_stream_global *stream_global,
+ const char *reason);
+#else
+#define GRPC_CHTTP2_STREAM_REF(stream_global, reason) \
+ grpc_chttp2_stream_ref(stream_global)
+#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, reason) \
+ grpc_chttp2_stream_unref(exec_ctx, stream_global)
+void grpc_chttp2_stream_ref(grpc_chttp2_stream_global *stream_global);
+void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_stream_global *stream_global);
#endif
+
+grpc_chttp2_incoming_byte_stream *grpc_chttp2_incoming_byte_stream_create(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_chttp2_stream_parsing *stream_parsing, uint32_t frame_size,
+ uint32_t flags, grpc_chttp2_incoming_frame_queue *add_to_queue);
+void grpc_chttp2_incoming_byte_stream_push(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_incoming_byte_stream *bs,
+ gpr_slice slice);
+void grpc_chttp2_incoming_byte_stream_finished(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs, int success,
+ int from_parsing_thread);
+
+void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_parsing *parsing,
+ const uint8_t *opaque_8bytes);
+
+/** add a ref to the stream and add it to the writable list;
+ ref will be dropped in writing.c */
+void grpc_chttp2_become_writable(grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_stream_global *stream_global);
+
+#endif /* GRPC_CORE_TRANSPORT_CHTTP2_INTERNAL_H */
diff --git a/src/core/transport/chttp2/parsing.c b/src/core/transport/chttp2/parsing.c
index 4664a0895c..0516f39fa9 100644
--- a/src/core/transport/chttp2/parsing.c
+++ b/src/core/transport/chttp2/parsing.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -35,30 +35,39 @@
#include <string.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+
+#include "src/core/profiling/timers.h"
#include "src/core/transport/chttp2/http2_errors.h"
#include "src/core/transport/chttp2/status_conversion.h"
#include "src/core/transport/chttp2/timeout_encoding.h"
+#include "src/core/transport/static_metadata.h"
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-
-static int init_frame_parser(grpc_chttp2_transport_parsing *transport_parsing);
+static int init_frame_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_parsing *transport_parsing);
static int init_header_frame_parser(
- grpc_chttp2_transport_parsing *transport_parsing, int is_continuation);
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
+ int is_continuation);
static int init_data_frame_parser(
- grpc_chttp2_transport_parsing *transport_parsing);
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
static int init_rst_stream_parser(
- grpc_chttp2_transport_parsing *transport_parsing);
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
static int init_settings_frame_parser(
- grpc_chttp2_transport_parsing *transport_parsing);
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
static int init_window_update_frame_parser(
- grpc_chttp2_transport_parsing *transport_parsing);
-static int init_ping_parser(grpc_chttp2_transport_parsing *transport_parsing);
-static int init_goaway_parser(grpc_chttp2_transport_parsing *transport_parsing);
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
+static int init_ping_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_parsing *transport_parsing);
+static int init_goaway_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_parsing *transport_parsing);
static int init_skip_frame_parser(
- grpc_chttp2_transport_parsing *transport_parsing, int is_header);
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
+ int is_header);
-static int parse_frame_slice(grpc_chttp2_transport_parsing *transport_parsing,
+static int parse_frame_slice(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_parsing *transport_parsing,
gpr_slice slice, int is_last);
void grpc_chttp2_prepare_to_read(
@@ -67,34 +76,31 @@ void grpc_chttp2_prepare_to_read(
grpc_chttp2_stream_global *stream_global;
grpc_chttp2_stream_parsing *stream_parsing;
+ GPR_TIMER_BEGIN("grpc_chttp2_prepare_to_read", 0);
+
transport_parsing->next_stream_id = transport_global->next_stream_id;
+ transport_parsing->last_sent_max_table_size =
+ transport_global->settings[GRPC_SENT_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE];
/* update the parsing view of incoming window */
- if (transport_parsing->incoming_window != transport_global->incoming_window) {
- GRPC_CHTTP2_FLOWCTL_TRACE_TRANSPORT(
- "parse", transport_parsing, incoming_window,
- (gpr_int64)transport_global->incoming_window -
- (gpr_int64)transport_parsing->incoming_window);
- transport_parsing->incoming_window = transport_global->incoming_window;
- }
- while (grpc_chttp2_list_pop_incoming_window_updated(
+ while (grpc_chttp2_list_pop_unannounced_incoming_window_available(
transport_global, transport_parsing, &stream_global, &stream_parsing)) {
- stream_parsing->id = stream_global->id;
- if (stream_parsing->incoming_window != stream_global->incoming_window) {
- GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
- "parse", transport_parsing, stream_parsing, incoming_window,
- (gpr_int64)stream_global->incoming_window -
- (gpr_int64)stream_parsing->incoming_window);
- stream_parsing->incoming_window = stream_global->incoming_window;
- }
+ GRPC_CHTTP2_FLOW_MOVE_STREAM("parse", transport_parsing, stream_parsing,
+ incoming_window, stream_global,
+ unannounced_incoming_window_for_parse);
}
+
+ GPR_TIMER_END("grpc_chttp2_prepare_to_read", 0);
}
void grpc_chttp2_publish_reads(
- grpc_chttp2_transport_global *transport_global,
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_parsing *transport_parsing) {
grpc_chttp2_stream_global *stream_global;
grpc_chttp2_stream_parsing *stream_parsing;
+ int was_zero;
+ int is_zero;
/* transport_parsing->last_incoming_stream_id is used as
last-grpc_chttp2_stream-id when
@@ -109,9 +115,6 @@ void grpc_chttp2_publish_reads(
transport_parsing->incoming_stream_id;
}
- /* copy parsing qbuf to global qbuf */
- gpr_slice_buffer_move_into(&transport_parsing->qbuf, &transport_global->qbuf);
-
/* update global settings */
if (transport_parsing->settings_updated) {
memcpy(transport_global->settings[GRPC_PEER_SETTINGS],
@@ -123,114 +126,128 @@ void grpc_chttp2_publish_reads(
if (transport_parsing->settings_ack_received) {
memcpy(transport_global->settings[GRPC_ACKED_SETTINGS],
transport_global->settings[GRPC_SENT_SETTINGS],
- GRPC_CHTTP2_NUM_SETTINGS * sizeof(gpr_uint32));
+ GRPC_CHTTP2_NUM_SETTINGS * sizeof(uint32_t));
transport_parsing->settings_ack_received = 0;
+ transport_global->sent_local_settings = 0;
}
/* move goaway to the global state if we received one (it will be
published later */
if (transport_parsing->goaway_received) {
- grpc_chttp2_add_incoming_goaway(transport_global,
- transport_parsing->goaway_error,
+ grpc_chttp2_add_incoming_goaway(exec_ctx, transport_global,
+ (uint32_t)transport_parsing->goaway_error,
transport_parsing->goaway_text);
transport_parsing->goaway_text = gpr_empty_slice();
transport_parsing->goaway_received = 0;
}
/* propagate flow control tokens to global state */
- if (transport_parsing->outgoing_window_update) {
- GRPC_CHTTP2_FLOWCTL_TRACE_TRANSPORT(
- "parsed", transport_global, outgoing_window,
- transport_parsing->outgoing_window_update);
- GRPC_CHTTP2_FLOWCTL_TRACE_TRANSPORT(
- "parsed", transport_parsing, outgoing_window_update,
- -(gpr_int64)transport_parsing->outgoing_window_update);
- transport_global->outgoing_window +=
- transport_parsing->outgoing_window_update;
- transport_parsing->outgoing_window_update = 0;
- }
-
- if (transport_parsing->incoming_window_delta) {
- GRPC_CHTTP2_FLOWCTL_TRACE_TRANSPORT(
- "parsed", transport_global, incoming_window,
- -(gpr_int64)transport_parsing->incoming_window_delta);
- GRPC_CHTTP2_FLOWCTL_TRACE_TRANSPORT(
- "parsed", transport_parsing, incoming_window_delta,
- -(gpr_int64)transport_parsing->incoming_window_delta);
- transport_global->incoming_window -=
- transport_parsing->incoming_window_delta;
- transport_parsing->incoming_window_delta = 0;
+ was_zero = transport_global->outgoing_window <= 0;
+ GRPC_CHTTP2_FLOW_MOVE_TRANSPORT("parsed", transport_global, outgoing_window,
+ transport_parsing, outgoing_window);
+ is_zero = transport_global->outgoing_window <= 0;
+ if (was_zero && !is_zero) {
+ while (grpc_chttp2_list_pop_stalled_by_transport(transport_global,
+ &stream_global)) {
+ grpc_chttp2_become_writable(transport_global, stream_global);
+ }
+ }
+
+ if (transport_parsing->incoming_window <
+ transport_global->connection_window_target * 3 / 4) {
+ int64_t announce_bytes = transport_global->connection_window_target -
+ transport_parsing->incoming_window;
+ GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parsed", transport_global,
+ announce_incoming_window, announce_bytes);
+ GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parsed", transport_parsing,
+ incoming_window, announce_bytes);
}
/* for each stream that saw an update, fixup global state */
while (grpc_chttp2_list_pop_parsing_seen_stream(
transport_global, transport_parsing, &stream_global, &stream_parsing)) {
- /* update incoming flow control window */
- if (stream_parsing->incoming_window_delta) {
- GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
- "parsed", transport_parsing, stream_global, incoming_window,
- -(gpr_int64)stream_parsing->incoming_window_delta);
- GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
- "parsed", transport_parsing, stream_parsing, incoming_window_delta,
- -(gpr_int64)stream_parsing->incoming_window_delta);
- stream_global->incoming_window -= stream_parsing->incoming_window_delta;
- stream_parsing->incoming_window_delta = 0;
- grpc_chttp2_list_add_writable_window_update_stream(transport_global,
- stream_global);
+ if (stream_parsing->seen_error) {
+ stream_global->seen_error = 1;
+ grpc_chttp2_list_add_check_read_ops(transport_global, stream_global);
}
/* update outgoing flow control window */
- if (stream_parsing->outgoing_window_update) {
- int was_zero = stream_global->outgoing_window <= 0;
- int is_zero;
- GRPC_CHTTP2_FLOWCTL_TRACE_STREAM("parsed", transport_parsing,
- stream_global, outgoing_window,
- stream_parsing->outgoing_window_update);
- GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
- "parsed", transport_parsing, stream_parsing, outgoing_window_update,
- -(gpr_int64)stream_parsing->outgoing_window_update);
- stream_global->outgoing_window += stream_parsing->outgoing_window_update;
- stream_parsing->outgoing_window_update = 0;
- is_zero = stream_global->outgoing_window <= 0;
- if (was_zero && !is_zero) {
- grpc_chttp2_list_add_writable_stream(transport_global, stream_global);
- }
+ was_zero = stream_global->outgoing_window <= 0;
+ GRPC_CHTTP2_FLOW_MOVE_STREAM("parsed", transport_global, stream_global,
+ outgoing_window, stream_parsing,
+ outgoing_window);
+ is_zero = stream_global->outgoing_window <= 0;
+ if (was_zero && !is_zero) {
+ grpc_chttp2_become_writable(transport_global, stream_global);
}
- /* updating closed status */
- if (stream_parsing->received_close) {
- stream_global->read_closed = 1;
- grpc_chttp2_list_add_read_write_state_changed(transport_global,
- stream_global);
+ stream_global->max_recv_bytes -= (uint32_t)GPR_MIN(
+ stream_global->max_recv_bytes, stream_parsing->received_bytes);
+ stream_parsing->received_bytes = 0;
+
+ /* publish incoming stream ops */
+ if (stream_global->incoming_frames.tail != NULL) {
+ stream_global->incoming_frames.tail->is_tail = 0;
+ }
+ if (stream_parsing->data_parser.incoming_frames.head != NULL) {
+ grpc_chttp2_list_add_check_read_ops(transport_global, stream_global);
+ }
+ grpc_chttp2_incoming_frame_queue_merge(
+ &stream_global->incoming_frames,
+ &stream_parsing->data_parser.incoming_frames);
+ if (stream_global->incoming_frames.tail != NULL) {
+ stream_global->incoming_frames.tail->is_tail = 1;
}
+
+ if (!stream_global->published_initial_metadata &&
+ stream_parsing->got_metadata_on_parse[0]) {
+ stream_parsing->got_metadata_on_parse[0] = 0;
+ stream_global->published_initial_metadata = 1;
+ GPR_SWAP(grpc_chttp2_incoming_metadata_buffer,
+ stream_parsing->metadata_buffer[0],
+ stream_global->received_initial_metadata);
+ grpc_chttp2_list_add_check_read_ops(transport_global, stream_global);
+ }
+ if (!stream_global->published_trailing_metadata &&
+ stream_parsing->got_metadata_on_parse[1]) {
+ stream_parsing->got_metadata_on_parse[1] = 0;
+ stream_global->published_trailing_metadata = 1;
+ GPR_SWAP(grpc_chttp2_incoming_metadata_buffer,
+ stream_parsing->metadata_buffer[1],
+ stream_global->received_trailing_metadata);
+ grpc_chttp2_list_add_check_read_ops(transport_global, stream_global);
+ }
+
if (stream_parsing->saw_rst_stream) {
- stream_global->cancelled = 1;
- stream_global->cancelled_status = grpc_chttp2_http2_error_to_grpc_status(stream_parsing->rst_stream_reason);
- if (stream_parsing->rst_stream_reason == GRPC_CHTTP2_NO_ERROR) {
- stream_global->published_cancelled = 1;
+ if (stream_parsing->rst_stream_reason != GRPC_CHTTP2_NO_ERROR) {
+ grpc_status_code status_code = grpc_chttp2_http2_error_to_grpc_status(
+ (grpc_chttp2_error_code)stream_parsing->rst_stream_reason);
+ char *status_details;
+ gpr_slice slice_details;
+ gpr_asprintf(&status_details, "Received RST_STREAM err=%d",
+ stream_parsing->rst_stream_reason);
+ slice_details = gpr_slice_from_copied_string(status_details);
+ gpr_free(status_details);
+ grpc_chttp2_fake_status(exec_ctx, transport_global, stream_global,
+ status_code, &slice_details);
}
- grpc_chttp2_list_add_read_write_state_changed(transport_global,
- stream_global);
+ grpc_chttp2_mark_stream_closed(exec_ctx, transport_global, stream_global,
+ 1, 1);
}
- /* publish incoming stream ops */
- if (stream_parsing->data_parser.incoming_sopb.nops > 0) {
- grpc_incoming_metadata_buffer_move_to_referencing_sopb(
- &stream_parsing->incoming_metadata, &stream_global->incoming_metadata,
- &stream_parsing->data_parser.incoming_sopb);
- grpc_sopb_move_to(&stream_parsing->data_parser.incoming_sopb,
- &stream_global->incoming_sopb);
- grpc_chttp2_list_add_read_write_state_changed(transport_global,
- stream_global);
+ if (stream_parsing->received_close) {
+ grpc_chttp2_mark_stream_closed(exec_ctx, transport_global, stream_global,
+ 1, 0);
}
}
}
-int grpc_chttp2_perform_read(grpc_chttp2_transport_parsing *transport_parsing,
+int grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_parsing *transport_parsing,
gpr_slice slice) {
- gpr_uint8 *beg = GPR_SLICE_START_PTR(slice);
- gpr_uint8 *end = GPR_SLICE_END_PTR(slice);
- gpr_uint8 *cur = beg;
+ uint8_t *beg = GPR_SLICE_START_PTR(slice);
+ uint8_t *end = GPR_SLICE_END_PTR(slice);
+ uint8_t *cur = beg;
if (cur == end) return 1;
@@ -267,7 +284,7 @@ int grpc_chttp2_perform_read(grpc_chttp2_transport_parsing *transport_parsing,
"at byte %d",
GRPC_CHTTP2_CLIENT_CONNECT_STRING[transport_parsing
->deframe_state],
- (int)(gpr_uint8)GRPC_CHTTP2_CLIENT_CONNECT_STRING
+ (int)(uint8_t)GRPC_CHTTP2_CLIENT_CONNECT_STRING
[transport_parsing->deframe_state],
*cur, (int)*cur, transport_parsing->deframe_state);
return 0;
@@ -282,7 +299,7 @@ int grpc_chttp2_perform_read(grpc_chttp2_transport_parsing *transport_parsing,
dts_fh_0:
case GRPC_DTS_FH_0:
GPR_ASSERT(cur < end);
- transport_parsing->incoming_frame_size = ((gpr_uint32)*cur) << 16;
+ transport_parsing->incoming_frame_size = ((uint32_t)*cur) << 16;
if (++cur == end) {
transport_parsing->deframe_state = GRPC_DTS_FH_1;
return 1;
@@ -290,7 +307,7 @@ int grpc_chttp2_perform_read(grpc_chttp2_transport_parsing *transport_parsing,
/* fallthrough */
case GRPC_DTS_FH_1:
GPR_ASSERT(cur < end);
- transport_parsing->incoming_frame_size |= ((gpr_uint32)*cur) << 8;
+ transport_parsing->incoming_frame_size |= ((uint32_t)*cur) << 8;
if (++cur == end) {
transport_parsing->deframe_state = GRPC_DTS_FH_2;
return 1;
@@ -322,7 +339,7 @@ int grpc_chttp2_perform_read(grpc_chttp2_transport_parsing *transport_parsing,
/* fallthrough */
case GRPC_DTS_FH_5:
GPR_ASSERT(cur < end);
- transport_parsing->incoming_stream_id = (((gpr_uint32)*cur) & 0x7f) << 24;
+ transport_parsing->incoming_stream_id = (((uint32_t)*cur) & 0x7f) << 24;
if (++cur == end) {
transport_parsing->deframe_state = GRPC_DTS_FH_6;
return 1;
@@ -330,7 +347,7 @@ int grpc_chttp2_perform_read(grpc_chttp2_transport_parsing *transport_parsing,
/* fallthrough */
case GRPC_DTS_FH_6:
GPR_ASSERT(cur < end);
- transport_parsing->incoming_stream_id |= ((gpr_uint32)*cur) << 16;
+ transport_parsing->incoming_stream_id |= ((uint32_t)*cur) << 16;
if (++cur == end) {
transport_parsing->deframe_state = GRPC_DTS_FH_7;
return 1;
@@ -338,7 +355,7 @@ int grpc_chttp2_perform_read(grpc_chttp2_transport_parsing *transport_parsing,
/* fallthrough */
case GRPC_DTS_FH_7:
GPR_ASSERT(cur < end);
- transport_parsing->incoming_stream_id |= ((gpr_uint32)*cur) << 8;
+ transport_parsing->incoming_stream_id |= ((uint32_t)*cur) << 8;
if (++cur == end) {
transport_parsing->deframe_state = GRPC_DTS_FH_8;
return 1;
@@ -346,9 +363,9 @@ int grpc_chttp2_perform_read(grpc_chttp2_transport_parsing *transport_parsing,
/* fallthrough */
case GRPC_DTS_FH_8:
GPR_ASSERT(cur < end);
- transport_parsing->incoming_stream_id |= ((gpr_uint32)*cur);
+ transport_parsing->incoming_stream_id |= ((uint32_t)*cur);
transport_parsing->deframe_state = GRPC_DTS_FRAME;
- if (!init_frame_parser(transport_parsing)) {
+ if (!init_frame_parser(exec_ctx, transport_parsing)) {
return 0;
}
if (transport_parsing->incoming_stream_id) {
@@ -356,7 +373,8 @@ int grpc_chttp2_perform_read(grpc_chttp2_transport_parsing *transport_parsing,
transport_parsing->incoming_stream_id;
}
if (transport_parsing->incoming_frame_size == 0) {
- if (!parse_frame_slice(transport_parsing, gpr_empty_slice(), 1)) {
+ if (!parse_frame_slice(exec_ctx, transport_parsing, gpr_empty_slice(),
+ 1)) {
return 0;
}
transport_parsing->incoming_stream = NULL;
@@ -372,22 +390,24 @@ int grpc_chttp2_perform_read(grpc_chttp2_transport_parsing *transport_parsing,
/* fallthrough */
case GRPC_DTS_FRAME:
GPR_ASSERT(cur < end);
- if ((gpr_uint32)(end - cur) == transport_parsing->incoming_frame_size) {
- if (!parse_frame_slice(
- transport_parsing,
- gpr_slice_sub_no_ref(slice, cur - beg, end - beg), 1)) {
+ if ((uint32_t)(end - cur) == transport_parsing->incoming_frame_size) {
+ if (!parse_frame_slice(exec_ctx, transport_parsing,
+ gpr_slice_sub_no_ref(slice, (size_t)(cur - beg),
+ (size_t)(end - beg)),
+ 1)) {
return 0;
}
transport_parsing->deframe_state = GRPC_DTS_FH_0;
transport_parsing->incoming_stream = NULL;
return 1;
- } else if ((gpr_uint32)(end - cur) >
+ } else if ((uint32_t)(end - cur) >
transport_parsing->incoming_frame_size) {
+ size_t cur_offset = (size_t)(cur - beg);
if (!parse_frame_slice(
- transport_parsing,
+ exec_ctx, transport_parsing,
gpr_slice_sub_no_ref(
- slice, cur - beg,
- cur + transport_parsing->incoming_frame_size - beg),
+ slice, cur_offset,
+ cur_offset + transport_parsing->incoming_frame_size),
1)) {
return 0;
}
@@ -395,25 +415,23 @@ int grpc_chttp2_perform_read(grpc_chttp2_transport_parsing *transport_parsing,
transport_parsing->incoming_stream = NULL;
goto dts_fh_0; /* loop */
} else {
- if (!parse_frame_slice(
- transport_parsing,
- gpr_slice_sub_no_ref(slice, cur - beg, end - beg), 0)) {
+ if (!parse_frame_slice(exec_ctx, transport_parsing,
+ gpr_slice_sub_no_ref(slice, (size_t)(cur - beg),
+ (size_t)(end - beg)),
+ 0)) {
return 0;
}
- transport_parsing->incoming_frame_size -= (end - cur);
+ transport_parsing->incoming_frame_size -= (uint32_t)(end - cur);
return 1;
}
- gpr_log(GPR_ERROR, "should never reach here");
- abort();
+ GPR_UNREACHABLE_CODE(return 0);
}
- gpr_log(GPR_ERROR, "should never reach here");
- abort();
-
- return 0;
+ GPR_UNREACHABLE_CODE(return 0);
}
-static int init_frame_parser(grpc_chttp2_transport_parsing *transport_parsing) {
+static int init_frame_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_parsing *transport_parsing) {
if (transport_parsing->expect_continuation_stream_id != 0) {
if (transport_parsing->incoming_frame_type !=
GRPC_CHTTP2_FRAME_CONTINUATION) {
@@ -430,52 +448,54 @@ static int init_frame_parser(grpc_chttp2_transport_parsing *transport_parsing) {
transport_parsing->incoming_stream_id);
return 0;
}
- return init_header_frame_parser(transport_parsing, 1);
+ return init_header_frame_parser(exec_ctx, transport_parsing, 1);
}
switch (transport_parsing->incoming_frame_type) {
case GRPC_CHTTP2_FRAME_DATA:
- return init_data_frame_parser(transport_parsing);
+ return init_data_frame_parser(exec_ctx, transport_parsing);
case GRPC_CHTTP2_FRAME_HEADER:
- return init_header_frame_parser(transport_parsing, 0);
+ return init_header_frame_parser(exec_ctx, transport_parsing, 0);
case GRPC_CHTTP2_FRAME_CONTINUATION:
gpr_log(GPR_ERROR, "Unexpected CONTINUATION frame");
return 0;
case GRPC_CHTTP2_FRAME_RST_STREAM:
- return init_rst_stream_parser(transport_parsing);
+ return init_rst_stream_parser(exec_ctx, transport_parsing);
case GRPC_CHTTP2_FRAME_SETTINGS:
- return init_settings_frame_parser(transport_parsing);
+ return init_settings_frame_parser(exec_ctx, transport_parsing);
case GRPC_CHTTP2_FRAME_WINDOW_UPDATE:
- return init_window_update_frame_parser(transport_parsing);
+ return init_window_update_frame_parser(exec_ctx, transport_parsing);
case GRPC_CHTTP2_FRAME_PING:
- return init_ping_parser(transport_parsing);
+ return init_ping_parser(exec_ctx, transport_parsing);
case GRPC_CHTTP2_FRAME_GOAWAY:
- return init_goaway_parser(transport_parsing);
+ return init_goaway_parser(exec_ctx, transport_parsing);
default:
gpr_log(GPR_ERROR, "Unknown frame type %02x",
transport_parsing->incoming_frame_type);
- return init_skip_frame_parser(transport_parsing, 0);
+ return init_skip_frame_parser(exec_ctx, transport_parsing, 0);
}
}
static grpc_chttp2_parse_error skip_parser(
- void *parser, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_exec_ctx *exec_ctx, void *parser,
+ grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
return GRPC_CHTTP2_PARSE_OK;
}
-static void skip_header(void *tp, grpc_mdelem *md) { grpc_mdelem_unref(md); }
+static void skip_header(void *tp, grpc_mdelem *md) { GRPC_MDELEM_UNREF(md); }
static int init_skip_frame_parser(
- grpc_chttp2_transport_parsing *transport_parsing, int is_header) {
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
+ int is_header) {
if (is_header) {
- int is_eoh = transport_parsing->expect_continuation_stream_id != 0;
+ uint8_t is_eoh = transport_parsing->expect_continuation_stream_id != 0;
transport_parsing->parser = grpc_chttp2_header_parser_parse;
transport_parsing->parser_data = &transport_parsing->hpack_parser;
transport_parsing->hpack_parser.on_header = skip_header;
transport_parsing->hpack_parser.on_header_user_data = NULL;
transport_parsing->hpack_parser.is_boundary = is_eoh;
transport_parsing->hpack_parser.is_eof =
- is_eoh ? transport_parsing->header_eof : 0;
+ (uint8_t)(is_eoh ? transport_parsing->header_eof : 0);
} else {
transport_parsing->parser = skip_parser;
}
@@ -483,65 +503,51 @@ static int init_skip_frame_parser(
}
void grpc_chttp2_parsing_become_skip_parser(
- grpc_chttp2_transport_parsing *transport_parsing) {
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
init_skip_frame_parser(
- transport_parsing,
+ exec_ctx, transport_parsing,
transport_parsing->parser == grpc_chttp2_header_parser_parse);
}
static grpc_chttp2_parse_error update_incoming_window(
- grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing) {
- if (transport_parsing->incoming_frame_size >
- transport_parsing->incoming_window) {
+ uint32_t incoming_frame_size = transport_parsing->incoming_frame_size;
+ if (incoming_frame_size > transport_parsing->incoming_window) {
gpr_log(GPR_ERROR, "frame of size %d overflows incoming window of %d",
transport_parsing->incoming_frame_size,
transport_parsing->incoming_window);
return GRPC_CHTTP2_CONNECTION_ERROR;
}
- if (transport_parsing->incoming_frame_size >
- stream_parsing->incoming_window) {
+ if (incoming_frame_size > stream_parsing->incoming_window) {
gpr_log(GPR_ERROR, "frame of size %d overflows incoming window of %d",
transport_parsing->incoming_frame_size,
stream_parsing->incoming_window);
return GRPC_CHTTP2_CONNECTION_ERROR;
}
- GRPC_CHTTP2_FLOWCTL_TRACE_TRANSPORT(
- "data", transport_parsing, incoming_window,
- -(gpr_int64)transport_parsing->incoming_frame_size);
- GRPC_CHTTP2_FLOWCTL_TRACE_TRANSPORT("data", transport_parsing,
- incoming_window_delta,
- transport_parsing->incoming_frame_size);
- GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
- "data", transport_parsing, stream_parsing, incoming_window,
- -(gpr_int64)transport_parsing->incoming_frame_size);
- GRPC_CHTTP2_FLOWCTL_TRACE_STREAM("data", transport_parsing, stream_parsing,
- incoming_window_delta,
- transport_parsing->incoming_frame_size);
-
- transport_parsing->incoming_window -= transport_parsing->incoming_frame_size;
- transport_parsing->incoming_window_delta +=
- transport_parsing->incoming_frame_size;
- stream_parsing->incoming_window -= transport_parsing->incoming_frame_size;
- stream_parsing->incoming_window_delta +=
- transport_parsing->incoming_frame_size;
+ GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("parse", transport_parsing, incoming_window,
+ incoming_frame_size);
+ GRPC_CHTTP2_FLOW_DEBIT_STREAM("parse", transport_parsing, stream_parsing,
+ incoming_window, incoming_frame_size);
+ stream_parsing->received_bytes += incoming_frame_size;
+
grpc_chttp2_list_add_parsing_seen_stream(transport_parsing, stream_parsing);
return GRPC_CHTTP2_PARSE_OK;
}
static int init_data_frame_parser(
- grpc_chttp2_transport_parsing *transport_parsing) {
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
grpc_chttp2_stream_parsing *stream_parsing =
grpc_chttp2_parsing_lookup_stream(transport_parsing,
transport_parsing->incoming_stream_id);
grpc_chttp2_parse_error err = GRPC_CHTTP2_PARSE_OK;
if (!stream_parsing || stream_parsing->received_close)
- return init_skip_frame_parser(transport_parsing, 0);
+ return init_skip_frame_parser(exec_ctx, transport_parsing, 0);
if (err == GRPC_CHTTP2_PARSE_OK) {
- err = update_incoming_window(transport_parsing, stream_parsing);
+ err = update_incoming_window(exec_ctx, transport_parsing, stream_parsing);
}
if (err == GRPC_CHTTP2_PARSE_OK) {
err = grpc_chttp2_data_parser_begin_frame(
@@ -561,30 +567,35 @@ static int init_data_frame_parser(
&transport_parsing->qbuf,
grpc_chttp2_rst_stream_create(transport_parsing->incoming_stream_id,
GRPC_CHTTP2_PROTOCOL_ERROR));
- return init_skip_frame_parser(transport_parsing, 0);
+ return init_skip_frame_parser(exec_ctx, transport_parsing, 0);
case GRPC_CHTTP2_CONNECTION_ERROR:
return 0;
}
- gpr_log(GPR_ERROR, "should never reach here");
- abort();
- return 0;
+ GPR_UNREACHABLE_CODE(return 0);
}
static void free_timeout(void *p) { gpr_free(p); }
-static void on_header(void *tp, grpc_mdelem *md) {
+static void on_initial_header(void *tp, grpc_mdelem *md) {
grpc_chttp2_transport_parsing *transport_parsing = tp;
grpc_chttp2_stream_parsing *stream_parsing =
transport_parsing->incoming_stream;
+ GPR_TIMER_BEGIN("on_initial_header", 0);
+
GPR_ASSERT(stream_parsing);
GRPC_CHTTP2_IF_TRACING(gpr_log(
- GPR_INFO, "HTTP:%d:HDR: %s: %s", stream_parsing->id,
+ GPR_INFO, "HTTP:%d:HDR:%s: %s: %s", stream_parsing->id,
transport_parsing->is_client ? "CLI" : "SVR",
grpc_mdstr_as_c_string(md->key), grpc_mdstr_as_c_string(md->value)));
- if (md->key == transport_parsing->str_grpc_timeout) {
+ if (md->key == GRPC_MDSTR_GRPC_STATUS && md != GRPC_MDELEM_GRPC_STATUS_0) {
+ /* TODO(ctiller): check for a status like " 0" */
+ stream_parsing->seen_error = 1;
+ }
+
+ if (md->key == GRPC_MDSTR_GRPC_TIMEOUT) {
gpr_timespec *cached_timeout = grpc_mdelem_get_user_data(md, free_timeout);
if (!cached_timeout) {
/* not already parsed: parse it now, and store the result away */
@@ -593,29 +604,61 @@ static void on_header(void *tp, grpc_mdelem *md) {
cached_timeout)) {
gpr_log(GPR_ERROR, "Ignoring bad timeout value '%s'",
grpc_mdstr_as_c_string(md->value));
- *cached_timeout = gpr_inf_future;
+ *cached_timeout = gpr_inf_future(GPR_TIMESPAN);
}
grpc_mdelem_set_user_data(md, free_timeout, cached_timeout);
}
grpc_chttp2_incoming_metadata_buffer_set_deadline(
- &stream_parsing->incoming_metadata,
- gpr_time_add(gpr_now(), *cached_timeout));
- grpc_mdelem_unref(md);
+ &stream_parsing->metadata_buffer[0],
+ gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), *cached_timeout));
+ GRPC_MDELEM_UNREF(md);
} else {
- grpc_chttp2_incoming_metadata_buffer_add(&stream_parsing->incoming_metadata,
- md);
+ grpc_chttp2_incoming_metadata_buffer_add(
+ &stream_parsing->metadata_buffer[0], md);
}
grpc_chttp2_list_add_parsing_seen_stream(transport_parsing, stream_parsing);
+
+ GPR_TIMER_END("on_initial_header", 0);
+}
+
+static void on_trailing_header(void *tp, grpc_mdelem *md) {
+ grpc_chttp2_transport_parsing *transport_parsing = tp;
+ grpc_chttp2_stream_parsing *stream_parsing =
+ transport_parsing->incoming_stream;
+
+ GPR_TIMER_BEGIN("on_trailing_header", 0);
+
+ GPR_ASSERT(stream_parsing);
+
+ GRPC_CHTTP2_IF_TRACING(gpr_log(
+ GPR_INFO, "HTTP:%d:TRL:%s: %s: %s", stream_parsing->id,
+ transport_parsing->is_client ? "CLI" : "SVR",
+ grpc_mdstr_as_c_string(md->key), grpc_mdstr_as_c_string(md->value)));
+
+ if (md->key == GRPC_MDSTR_GRPC_STATUS && md != GRPC_MDELEM_GRPC_STATUS_0) {
+ /* TODO(ctiller): check for a status like " 0" */
+ stream_parsing->seen_error = 1;
+ }
+
+ grpc_chttp2_incoming_metadata_buffer_add(&stream_parsing->metadata_buffer[1],
+ md);
+
+ grpc_chttp2_list_add_parsing_seen_stream(transport_parsing, stream_parsing);
+
+ GPR_TIMER_END("on_trailing_header", 0);
}
static int init_header_frame_parser(
- grpc_chttp2_transport_parsing *transport_parsing, int is_continuation) {
- int is_eoh = (transport_parsing->incoming_frame_flags &
- GRPC_CHTTP2_DATA_FLAG_END_HEADERS) != 0;
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
+ int is_continuation) {
+ uint8_t is_eoh = (transport_parsing->incoming_frame_flags &
+ GRPC_CHTTP2_DATA_FLAG_END_HEADERS) != 0;
int via_accept = 0;
grpc_chttp2_stream_parsing *stream_parsing;
+ /* TODO(ctiller): when to increment header_frames_received? */
+
if (is_eoh) {
transport_parsing->expect_continuation_stream_id = 0;
} else {
@@ -635,7 +678,7 @@ static int init_header_frame_parser(
if (is_continuation) {
gpr_log(GPR_ERROR,
"grpc_chttp2_stream disbanded before CONTINUATION received");
- return init_skip_frame_parser(transport_parsing, 1);
+ return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
}
if (transport_parsing->is_client) {
if ((transport_parsing->incoming_stream_id & 1) &&
@@ -646,7 +689,7 @@ static int init_header_frame_parser(
gpr_log(GPR_ERROR,
"ignoring new grpc_chttp2_stream creation on client");
}
- return init_skip_frame_parser(transport_parsing, 1);
+ return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
} else if (transport_parsing->last_incoming_stream_id >
transport_parsing->incoming_stream_id) {
gpr_log(GPR_ERROR,
@@ -655,19 +698,19 @@ static int init_header_frame_parser(
"id=%d, new grpc_chttp2_stream id=%d",
transport_parsing->last_incoming_stream_id,
transport_parsing->incoming_stream_id);
- return init_skip_frame_parser(transport_parsing, 1);
+ return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
} else if ((transport_parsing->incoming_stream_id & 1) == 0) {
gpr_log(GPR_ERROR,
"ignoring grpc_chttp2_stream with non-client generated index %d",
transport_parsing->incoming_stream_id);
- return init_skip_frame_parser(transport_parsing, 1);
+ return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
}
stream_parsing = transport_parsing->incoming_stream =
grpc_chttp2_parsing_accept_stream(
- transport_parsing, transport_parsing->incoming_stream_id);
+ exec_ctx, transport_parsing, transport_parsing->incoming_stream_id);
if (stream_parsing == NULL) {
gpr_log(GPR_ERROR, "grpc_chttp2_stream not accepted");
- return init_skip_frame_parser(transport_parsing, 1);
+ return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
}
via_accept = 1;
} else {
@@ -677,15 +720,25 @@ static int init_header_frame_parser(
if (stream_parsing->received_close) {
gpr_log(GPR_ERROR, "skipping already closed grpc_chttp2_stream header");
transport_parsing->incoming_stream = NULL;
- return init_skip_frame_parser(transport_parsing, 1);
+ return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
}
transport_parsing->parser = grpc_chttp2_header_parser_parse;
transport_parsing->parser_data = &transport_parsing->hpack_parser;
- transport_parsing->hpack_parser.on_header = on_header;
+ switch (stream_parsing->header_frames_received) {
+ case 0:
+ transport_parsing->hpack_parser.on_header = on_initial_header;
+ break;
+ case 1:
+ transport_parsing->hpack_parser.on_header = on_trailing_header;
+ break;
+ case 2:
+ gpr_log(GPR_ERROR, "too many header frames received");
+ return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
+ }
transport_parsing->hpack_parser.on_header_user_data = transport_parsing;
transport_parsing->hpack_parser.is_boundary = is_eoh;
transport_parsing->hpack_parser.is_eof =
- is_eoh ? transport_parsing->header_eof : 0;
+ (uint8_t)(is_eoh ? transport_parsing->header_eof : 0);
if (!is_continuation && (transport_parsing->incoming_frame_flags &
GRPC_CHTTP2_FLAG_HAS_PRIORITY)) {
grpc_chttp2_hpack_parser_set_has_priority(&transport_parsing->hpack_parser);
@@ -694,7 +747,7 @@ static int init_header_frame_parser(
}
static int init_window_update_frame_parser(
- grpc_chttp2_transport_parsing *transport_parsing) {
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
int ok = GRPC_CHTTP2_PARSE_OK == grpc_chttp2_window_update_parser_begin_frame(
&transport_parsing->simple.window_update,
transport_parsing->incoming_frame_size,
@@ -708,7 +761,8 @@ static int init_window_update_frame_parser(
return ok;
}
-static int init_ping_parser(grpc_chttp2_transport_parsing *transport_parsing) {
+static int init_ping_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_parsing *transport_parsing) {
int ok = GRPC_CHTTP2_PARSE_OK == grpc_chttp2_ping_parser_begin_frame(
&transport_parsing->simple.ping,
transport_parsing->incoming_frame_size,
@@ -719,7 +773,7 @@ static int init_ping_parser(grpc_chttp2_transport_parsing *transport_parsing) {
}
static int init_rst_stream_parser(
- grpc_chttp2_transport_parsing *transport_parsing) {
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
int ok = GRPC_CHTTP2_PARSE_OK == grpc_chttp2_rst_stream_parser_begin_frame(
&transport_parsing->simple.rst_stream,
transport_parsing->incoming_frame_size,
@@ -727,7 +781,7 @@ static int init_rst_stream_parser(
transport_parsing->incoming_stream = grpc_chttp2_parsing_lookup_stream(
transport_parsing, transport_parsing->incoming_stream_id);
if (!transport_parsing->incoming_stream) {
- return init_skip_frame_parser(transport_parsing, 0);
+ return init_skip_frame_parser(exec_ctx, transport_parsing, 0);
}
transport_parsing->parser = grpc_chttp2_rst_stream_parser_parse;
transport_parsing->parser_data = &transport_parsing->simple.rst_stream;
@@ -735,7 +789,7 @@ static int init_rst_stream_parser(
}
static int init_goaway_parser(
- grpc_chttp2_transport_parsing *transport_parsing) {
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
int ok = GRPC_CHTTP2_PARSE_OK == grpc_chttp2_goaway_parser_begin_frame(
&transport_parsing->goaway_parser,
transport_parsing->incoming_frame_size,
@@ -746,7 +800,7 @@ static int init_goaway_parser(
}
static int init_settings_frame_parser(
- grpc_chttp2_transport_parsing *transport_parsing) {
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
int ok;
if (transport_parsing->incoming_stream_id != 0) {
@@ -765,6 +819,9 @@ static int init_settings_frame_parser(
}
if (transport_parsing->incoming_frame_flags & GRPC_CHTTP2_FLAG_ACK) {
transport_parsing->settings_ack_received = 1;
+ grpc_chttp2_hptbl_set_max_bytes(
+ &transport_parsing->hpack_parser.table,
+ transport_parsing->last_sent_max_table_size);
}
transport_parsing->parser = grpc_chttp2_settings_parser_parse;
transport_parsing->parser_data = &transport_parsing->simple.settings;
@@ -772,16 +829,17 @@ static int init_settings_frame_parser(
}
/*
-static int is_window_update_legal(gpr_int64 window_update, gpr_int64 window) {
+static int is_window_update_legal(int64_t window_update, int64_t window) {
return window + window_update < MAX_WINDOW;
}
*/
-static int parse_frame_slice(grpc_chttp2_transport_parsing *transport_parsing,
+static int parse_frame_slice(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_parsing *transport_parsing,
gpr_slice slice, int is_last) {
grpc_chttp2_stream_parsing *stream_parsing =
transport_parsing->incoming_stream;
- switch (transport_parsing->parser(transport_parsing->parser_data,
+ switch (transport_parsing->parser(exec_ctx, transport_parsing->parser_data,
transport_parsing, stream_parsing, slice,
is_last)) {
case GRPC_CHTTP2_PARSE_OK:
@@ -791,7 +849,7 @@ static int parse_frame_slice(grpc_chttp2_transport_parsing *transport_parsing,
}
return 1;
case GRPC_CHTTP2_STREAM_ERROR:
- grpc_chttp2_parsing_become_skip_parser(transport_parsing);
+ grpc_chttp2_parsing_become_skip_parser(exec_ctx, transport_parsing);
if (stream_parsing) {
stream_parsing->saw_rst_stream = 1;
stream_parsing->rst_stream_reason = GRPC_CHTTP2_PROTOCOL_ERROR;
@@ -804,7 +862,5 @@ static int parse_frame_slice(grpc_chttp2_transport_parsing *transport_parsing,
case GRPC_CHTTP2_CONNECTION_ERROR:
return 0;
}
- gpr_log(GPR_ERROR, "should never reach here");
- abort();
- return 0;
+ GPR_UNREACHABLE_CODE(return 0);
}
diff --git a/src/core/transport/chttp2/status_conversion.h b/src/core/transport/chttp2/status_conversion.h
index 0ec5b560b8..c6e066bb5d 100644
--- a/src/core/transport/chttp2/status_conversion.h
+++ b/src/core/transport/chttp2/status_conversion.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_STATUS_CONVERSION_H
-#define GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_STATUS_CONVERSION_H
+#ifndef GRPC_CORE_TRANSPORT_CHTTP2_STATUS_CONVERSION_H
+#define GRPC_CORE_TRANSPORT_CHTTP2_STATUS_CONVERSION_H
#include <grpc/grpc.h>
#include "src/core/transport/chttp2/http2_errors.h"
@@ -47,4 +47,4 @@ grpc_status_code grpc_chttp2_http2_error_to_grpc_status(
grpc_status_code grpc_chttp2_http2_status_to_grpc_status(int status);
int grpc_chttp2_grpc_status_to_http2_status(grpc_status_code status);
-#endif /* GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_STATUS_CONVERSION_H */
+#endif /* GRPC_CORE_TRANSPORT_CHTTP2_STATUS_CONVERSION_H */
diff --git a/src/core/transport/chttp2/stream_encoder.c b/src/core/transport/chttp2/stream_encoder.c
deleted file mode 100644
index cf78ac50cc..0000000000
--- a/src/core/transport/chttp2/stream_encoder.c
+++ /dev/null
@@ -1,631 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include "src/core/transport/chttp2/stream_encoder.h"
-
-#include <assert.h>
-#include <string.h>
-
-#include <grpc/support/log.h>
-#include <grpc/support/useful.h>
-#include "src/core/transport/chttp2/bin_encoder.h"
-#include "src/core/transport/chttp2/hpack_table.h"
-#include "src/core/transport/chttp2/timeout_encoding.h"
-#include "src/core/transport/chttp2/varint.h"
-
-#define HASH_FRAGMENT_1(x) ((x)&255)
-#define HASH_FRAGMENT_2(x) ((x >> 8) & 255)
-#define HASH_FRAGMENT_3(x) ((x >> 16) & 255)
-#define HASH_FRAGMENT_4(x) ((x >> 24) & 255)
-
-/* if the probability of this item being seen again is < 1/x then don't add
- it to the table */
-#define ONE_ON_ADD_PROBABILITY 128
-/* don't consider adding anything bigger than this to the hpack table */
-#define MAX_DECODER_SPACE_USAGE 512
-
-/* what kind of frame our we encoding? */
-typedef enum { HEADER, DATA, NONE } frame_type;
-
-typedef struct {
- frame_type cur_frame_type;
- /* number of bytes in 'output' when we started the frame - used to calculate
- frame length */
- size_t output_length_at_start_of_frame;
- /* index (in output) of the header for the current frame */
- size_t header_idx;
- /* was the last frame emitted a header? (if yes, we'll need a CONTINUATION */
- gpr_uint8 last_was_header;
- /* output stream id */
- gpr_uint32 stream_id;
- gpr_slice_buffer *output;
-} framer_state;
-
-/* fills p (which is expected to be 9 bytes long) with a data frame header */
-static void fill_header(gpr_uint8 *p, gpr_uint8 type, gpr_uint32 id,
- gpr_uint32 len, gpr_uint8 flags) {
- *p++ = len >> 16;
- *p++ = len >> 8;
- *p++ = len;
- *p++ = type;
- *p++ = flags;
- *p++ = id >> 24;
- *p++ = id >> 16;
- *p++ = id >> 8;
- *p++ = id;
-}
-
-/* finish a frame - fill in the previously reserved header */
-static void finish_frame(framer_state *st, int is_header_boundary,
- int is_last_in_stream) {
- gpr_uint8 type = 0xff;
- switch (st->cur_frame_type) {
- case HEADER:
- type = st->last_was_header ? GRPC_CHTTP2_FRAME_CONTINUATION
- : GRPC_CHTTP2_FRAME_HEADER;
- st->last_was_header = 1;
- break;
- case DATA:
- type = GRPC_CHTTP2_FRAME_DATA;
- st->last_was_header = 0;
- is_header_boundary = 0;
- break;
- case NONE:
- return;
- }
- fill_header(GPR_SLICE_START_PTR(st->output->slices[st->header_idx]), type,
- st->stream_id,
- st->output->length - st->output_length_at_start_of_frame,
- (is_last_in_stream ? GRPC_CHTTP2_DATA_FLAG_END_STREAM : 0) |
- (is_header_boundary ? GRPC_CHTTP2_DATA_FLAG_END_HEADERS : 0));
- st->cur_frame_type = NONE;
-}
-
-/* begin a new frame: reserve off header space, remember how many bytes we'd
- output before beginning */
-static void begin_frame(framer_state *st, frame_type type) {
- GPR_ASSERT(type != NONE);
- GPR_ASSERT(st->cur_frame_type == NONE);
- st->cur_frame_type = type;
- st->header_idx =
- gpr_slice_buffer_add_indexed(st->output, gpr_slice_malloc(9));
- st->output_length_at_start_of_frame = st->output->length;
-}
-
-static void begin_new_frame(framer_state *st, frame_type type) {
- finish_frame(st, 1, 0);
- st->last_was_header = 0;
- begin_frame(st, type);
-}
-
-/* make sure that the current frame is of the type desired, and has sufficient
- space to add at least about_to_add bytes -- finishes the current frame if
- needed */
-static void ensure_frame_type(framer_state *st, frame_type type,
- int need_bytes) {
- if (st->cur_frame_type == type &&
- st->output->length - st->output_length_at_start_of_frame + need_bytes <=
- GRPC_CHTTP2_MAX_PAYLOAD_LENGTH) {
- return;
- }
- finish_frame(st, type != HEADER, 0);
- begin_frame(st, type);
-}
-
-/* increment a filter count, halve all counts if one element reaches max */
-static void inc_filter(gpr_uint8 idx, gpr_uint32 *sum, gpr_uint8 *elems) {
- elems[idx]++;
- if (elems[idx] < 255) {
- (*sum)++;
- } else {
- int i;
- *sum = 0;
- for (i = 0; i < GRPC_CHTTP2_HPACKC_NUM_FILTERS; i++) {
- elems[i] /= 2;
- (*sum) += elems[i];
- }
- }
-}
-
-static void add_header_data(framer_state *st, gpr_slice slice) {
- size_t len = GPR_SLICE_LENGTH(slice);
- size_t remaining;
- if (len == 0) return;
- ensure_frame_type(st, HEADER, 1);
- remaining = GRPC_CHTTP2_MAX_PAYLOAD_LENGTH +
- st->output_length_at_start_of_frame - st->output->length;
- if (len <= remaining) {
- gpr_slice_buffer_add(st->output, slice);
- } else {
- gpr_slice_buffer_add(st->output, gpr_slice_split_head(&slice, remaining));
- add_header_data(st, slice);
- }
-}
-
-static gpr_uint8 *add_tiny_header_data(framer_state *st, int len) {
- ensure_frame_type(st, HEADER, len);
- return gpr_slice_buffer_tiny_add(st->output, len);
-}
-
-/* add an element to the decoder table: returns metadata element to unref */
-static grpc_mdelem *add_elem(grpc_chttp2_hpack_compressor *c,
- grpc_mdelem *elem) {
- gpr_uint32 key_hash = elem->key->hash;
- gpr_uint32 elem_hash = GRPC_MDSTR_KV_HASH(key_hash, elem->value->hash);
- gpr_uint32 new_index = c->tail_remote_index + c->table_elems + 1;
- gpr_uint32 elem_size = 32 + GPR_SLICE_LENGTH(elem->key->slice) +
- GPR_SLICE_LENGTH(elem->value->slice);
- grpc_mdelem *elem_to_unref;
-
- /* Reserve space for this element in the remote table: if this overflows
- the current table, drop elements until it fits, matching the decompressor
- algorithm */
- /* TODO(ctiller): constant */
- while (c->table_size + elem_size > 4096) {
- c->tail_remote_index++;
- GPR_ASSERT(c->tail_remote_index > 0);
- GPR_ASSERT(c->table_size >=
- c->table_elem_size[c->tail_remote_index %
- GRPC_CHTTP2_HPACKC_MAX_TABLE_ELEMS]);
- GPR_ASSERT(c->table_elems > 0);
- c->table_size -= c->table_elem_size[c->tail_remote_index %
- GRPC_CHTTP2_HPACKC_MAX_TABLE_ELEMS];
- c->table_elems--;
- }
- GPR_ASSERT(c->table_elems < GRPC_CHTTP2_HPACKC_MAX_TABLE_ELEMS);
- c->table_elem_size[new_index % GRPC_CHTTP2_HPACKC_MAX_TABLE_ELEMS] =
- elem_size;
- c->table_size += elem_size;
- c->table_elems++;
-
- /* Store this element into {entries,indices}_elem */
- if (c->entries_elems[HASH_FRAGMENT_2(elem_hash)] == elem) {
- /* already there: update with new index */
- c->indices_elems[HASH_FRAGMENT_2(elem_hash)] = new_index;
- elem_to_unref = elem;
- } else if (c->entries_elems[HASH_FRAGMENT_3(elem_hash)] == elem) {
- /* already there (cuckoo): update with new index */
- c->indices_elems[HASH_FRAGMENT_3(elem_hash)] = new_index;
- elem_to_unref = elem;
- } else if (c->entries_elems[HASH_FRAGMENT_2(elem_hash)] == NULL) {
- /* not there, but a free element: add */
- c->entries_elems[HASH_FRAGMENT_2(elem_hash)] = elem;
- c->indices_elems[HASH_FRAGMENT_2(elem_hash)] = new_index;
- elem_to_unref = NULL;
- } else if (c->entries_elems[HASH_FRAGMENT_3(elem_hash)] == NULL) {
- /* not there (cuckoo), but a free element: add */
- c->entries_elems[HASH_FRAGMENT_3(elem_hash)] = elem;
- c->indices_elems[HASH_FRAGMENT_3(elem_hash)] = new_index;
- elem_to_unref = NULL;
- } else if (c->indices_elems[HASH_FRAGMENT_2(elem_hash)] <
- c->indices_elems[HASH_FRAGMENT_3(elem_hash)]) {
- /* not there: replace oldest */
- elem_to_unref = c->entries_elems[HASH_FRAGMENT_2(elem_hash)];
- c->entries_elems[HASH_FRAGMENT_2(elem_hash)] = elem;
- c->indices_elems[HASH_FRAGMENT_2(elem_hash)] = new_index;
- } else {
- /* not there: replace oldest */
- elem_to_unref = c->entries_elems[HASH_FRAGMENT_3(elem_hash)];
- c->entries_elems[HASH_FRAGMENT_3(elem_hash)] = elem;
- c->indices_elems[HASH_FRAGMENT_3(elem_hash)] = new_index;
- }
-
- /* do exactly the same for the key (so we can find by that again too) */
-
- if (c->entries_keys[HASH_FRAGMENT_2(key_hash)] == elem->key) {
- c->indices_keys[HASH_FRAGMENT_2(key_hash)] = new_index;
- } else if (c->entries_keys[HASH_FRAGMENT_3(key_hash)] == elem->key) {
- c->indices_keys[HASH_FRAGMENT_3(key_hash)] = new_index;
- } else if (c->entries_keys[HASH_FRAGMENT_2(key_hash)] == NULL) {
- c->entries_keys[HASH_FRAGMENT_2(key_hash)] = grpc_mdstr_ref(elem->key);
- c->indices_keys[HASH_FRAGMENT_2(key_hash)] = new_index;
- } else if (c->entries_keys[HASH_FRAGMENT_3(key_hash)] == NULL) {
- c->entries_keys[HASH_FRAGMENT_3(key_hash)] = grpc_mdstr_ref(elem->key);
- c->indices_keys[HASH_FRAGMENT_3(key_hash)] = new_index;
- } else if (c->indices_keys[HASH_FRAGMENT_2(key_hash)] <
- c->indices_keys[HASH_FRAGMENT_3(key_hash)]) {
- grpc_mdstr_unref(c->entries_keys[HASH_FRAGMENT_2(key_hash)]);
- c->entries_keys[HASH_FRAGMENT_2(key_hash)] = grpc_mdstr_ref(elem->key);
- c->indices_keys[HASH_FRAGMENT_2(key_hash)] = new_index;
- } else {
- grpc_mdstr_unref(c->entries_keys[HASH_FRAGMENT_3(key_hash)]);
- c->entries_keys[HASH_FRAGMENT_3(key_hash)] = grpc_mdstr_ref(elem->key);
- c->indices_keys[HASH_FRAGMENT_3(key_hash)] = new_index;
- }
-
- return elem_to_unref;
-}
-
-static void emit_indexed(grpc_chttp2_hpack_compressor *c, gpr_uint32 index,
- framer_state *st) {
- int len = GRPC_CHTTP2_VARINT_LENGTH(index, 1);
- GRPC_CHTTP2_WRITE_VARINT(index, 1, 0x80, add_tiny_header_data(st, len), len);
-}
-
-static gpr_slice get_wire_value(grpc_mdelem *elem, gpr_uint8 *huffman_prefix) {
- if (grpc_is_binary_header((const char *)GPR_SLICE_START_PTR(elem->key->slice),
- GPR_SLICE_LENGTH(elem->key->slice))) {
- *huffman_prefix = 0x80;
- return grpc_mdstr_as_base64_encoded_and_huffman_compressed(elem->value);
- }
- /* TODO(ctiller): opportunistically compress non-binary headers */
- *huffman_prefix = 0x00;
- return elem->value->slice;
-}
-
-static void emit_lithdr_incidx(grpc_chttp2_hpack_compressor *c,
- gpr_uint32 key_index, grpc_mdelem *elem,
- framer_state *st) {
- int len_pfx = GRPC_CHTTP2_VARINT_LENGTH(key_index, 2);
- gpr_uint8 huffman_prefix;
- gpr_slice value_slice = get_wire_value(elem, &huffman_prefix);
- int len_val = GPR_SLICE_LENGTH(value_slice);
- int len_val_len = GRPC_CHTTP2_VARINT_LENGTH(len_val, 1);
- GRPC_CHTTP2_WRITE_VARINT(key_index, 2, 0x40,
- add_tiny_header_data(st, len_pfx), len_pfx);
- GRPC_CHTTP2_WRITE_VARINT(len_val, 1, 0x00,
- add_tiny_header_data(st, len_val_len), len_val_len);
- add_header_data(st, gpr_slice_ref(value_slice));
-}
-
-static void emit_lithdr_noidx(grpc_chttp2_hpack_compressor *c,
- gpr_uint32 key_index, grpc_mdelem *elem,
- framer_state *st) {
- int len_pfx = GRPC_CHTTP2_VARINT_LENGTH(key_index, 4);
- gpr_uint8 huffman_prefix;
- gpr_slice value_slice = get_wire_value(elem, &huffman_prefix);
- int len_val = GPR_SLICE_LENGTH(value_slice);
- int len_val_len = GRPC_CHTTP2_VARINT_LENGTH(len_val, 1);
- GRPC_CHTTP2_WRITE_VARINT(key_index, 4, 0x00,
- add_tiny_header_data(st, len_pfx), len_pfx);
- GRPC_CHTTP2_WRITE_VARINT(len_val, 1, 0x00,
- add_tiny_header_data(st, len_val_len), len_val_len);
- add_header_data(st, gpr_slice_ref(value_slice));
-}
-
-static void emit_lithdr_incidx_v(grpc_chttp2_hpack_compressor *c,
- grpc_mdelem *elem, framer_state *st) {
- int len_key = GPR_SLICE_LENGTH(elem->key->slice);
- gpr_uint8 huffman_prefix;
- gpr_slice value_slice = get_wire_value(elem, &huffman_prefix);
- int len_val = GPR_SLICE_LENGTH(value_slice);
- int len_key_len = GRPC_CHTTP2_VARINT_LENGTH(len_key, 1);
- int len_val_len = GRPC_CHTTP2_VARINT_LENGTH(len_val, 1);
- *add_tiny_header_data(st, 1) = 0x40;
- GRPC_CHTTP2_WRITE_VARINT(len_key, 1, 0x00,
- add_tiny_header_data(st, len_key_len), len_key_len);
- add_header_data(st, gpr_slice_ref(elem->key->slice));
- GRPC_CHTTP2_WRITE_VARINT(len_val, 1, huffman_prefix,
- add_tiny_header_data(st, len_val_len), len_val_len);
- add_header_data(st, gpr_slice_ref(value_slice));
-}
-
-static void emit_lithdr_noidx_v(grpc_chttp2_hpack_compressor *c,
- grpc_mdelem *elem, framer_state *st) {
- int len_key = GPR_SLICE_LENGTH(elem->key->slice);
- gpr_uint8 huffman_prefix;
- gpr_slice value_slice = get_wire_value(elem, &huffman_prefix);
- int len_val = GPR_SLICE_LENGTH(value_slice);
- int len_key_len = GRPC_CHTTP2_VARINT_LENGTH(len_key, 1);
- int len_val_len = GRPC_CHTTP2_VARINT_LENGTH(len_val, 1);
- *add_tiny_header_data(st, 1) = 0x00;
- GRPC_CHTTP2_WRITE_VARINT(len_key, 1, 0x00,
- add_tiny_header_data(st, len_key_len), len_key_len);
- add_header_data(st, gpr_slice_ref(elem->key->slice));
- GRPC_CHTTP2_WRITE_VARINT(len_val, 1, huffman_prefix,
- add_tiny_header_data(st, len_val_len), len_val_len);
- add_header_data(st, gpr_slice_ref(value_slice));
-}
-
-static gpr_uint32 dynidx(grpc_chttp2_hpack_compressor *c, gpr_uint32 index) {
- return 1 + GRPC_CHTTP2_LAST_STATIC_ENTRY + c->tail_remote_index +
- c->table_elems - index;
-}
-
-/* encode an mdelem; returns metadata element to unref */
-static grpc_mdelem *hpack_enc(grpc_chttp2_hpack_compressor *c,
- grpc_mdelem *elem, framer_state *st) {
- gpr_uint32 key_hash = elem->key->hash;
- gpr_uint32 elem_hash = GRPC_MDSTR_KV_HASH(key_hash, elem->value->hash);
- size_t decoder_space_usage;
- gpr_uint32 indices_key;
- int should_add_elem;
-
- inc_filter(HASH_FRAGMENT_1(elem_hash), &c->filter_elems_sum, c->filter_elems);
-
- /* is this elem currently in the decoders table? */
-
- if (c->entries_elems[HASH_FRAGMENT_2(elem_hash)] == elem &&
- c->indices_elems[HASH_FRAGMENT_2(elem_hash)] > c->tail_remote_index) {
- /* HIT: complete element (first cuckoo hash) */
- emit_indexed(c, dynidx(c, c->indices_elems[HASH_FRAGMENT_2(elem_hash)]),
- st);
- return elem;
- }
-
- if (c->entries_elems[HASH_FRAGMENT_3(elem_hash)] == elem &&
- c->indices_elems[HASH_FRAGMENT_3(elem_hash)] > c->tail_remote_index) {
- /* HIT: complete element (second cuckoo hash) */
- emit_indexed(c, dynidx(c, c->indices_elems[HASH_FRAGMENT_3(elem_hash)]),
- st);
- return elem;
- }
-
- /* should this elem be in the table? */
- decoder_space_usage = 32 + GPR_SLICE_LENGTH(elem->key->slice) +
- GPR_SLICE_LENGTH(elem->value->slice);
- should_add_elem = decoder_space_usage < MAX_DECODER_SPACE_USAGE &&
- c->filter_elems[HASH_FRAGMENT_1(elem_hash)] >=
- c->filter_elems_sum / ONE_ON_ADD_PROBABILITY;
-
- /* no hits for the elem... maybe there's a key? */
-
- indices_key = c->indices_keys[HASH_FRAGMENT_2(key_hash)];
- if (c->entries_keys[HASH_FRAGMENT_2(key_hash)] == elem->key &&
- indices_key > c->tail_remote_index) {
- /* HIT: key (first cuckoo hash) */
- if (should_add_elem) {
- emit_lithdr_incidx(c, dynidx(c, indices_key), elem, st);
- return add_elem(c, elem);
- } else {
- emit_lithdr_noidx(c, dynidx(c, indices_key), elem, st);
- return elem;
- }
- abort();
- }
-
- indices_key = c->indices_keys[HASH_FRAGMENT_3(key_hash)];
- if (c->entries_keys[HASH_FRAGMENT_3(key_hash)] == elem->key &&
- indices_key > c->tail_remote_index) {
- /* HIT: key (first cuckoo hash) */
- if (should_add_elem) {
- emit_lithdr_incidx(c, dynidx(c, indices_key), elem, st);
- return add_elem(c, elem);
- } else {
- emit_lithdr_noidx(c, dynidx(c, indices_key), elem, st);
- return elem;
- }
- abort();
- }
-
- /* no elem, key in the table... fall back to literal emission */
-
- if (should_add_elem) {
- emit_lithdr_incidx_v(c, elem, st);
- return add_elem(c, elem);
- } else {
- emit_lithdr_noidx_v(c, elem, st);
- return elem;
- }
- abort();
-}
-
-#define STRLEN_LIT(x) (sizeof(x) - 1)
-#define TIMEOUT_KEY "grpc-timeout"
-
-static void deadline_enc(grpc_chttp2_hpack_compressor *c, gpr_timespec deadline,
- framer_state *st) {
- char timeout_str[GRPC_CHTTP2_TIMEOUT_ENCODE_MIN_BUFSIZE];
- grpc_mdelem *mdelem;
- grpc_chttp2_encode_timeout(gpr_time_sub(deadline, gpr_now()), timeout_str);
- mdelem = grpc_mdelem_from_metadata_strings(
- c->mdctx, grpc_mdstr_ref(c->timeout_key_str),
- grpc_mdstr_from_string(c->mdctx, timeout_str));
- mdelem = hpack_enc(c, mdelem, st);
- if (mdelem) grpc_mdelem_unref(mdelem);
-}
-
-gpr_slice grpc_chttp2_data_frame_create_empty_close(gpr_uint32 id) {
- gpr_slice slice = gpr_slice_malloc(9);
- fill_header(GPR_SLICE_START_PTR(slice), GRPC_CHTTP2_FRAME_DATA, id, 0, 1);
- return slice;
-}
-
-void grpc_chttp2_hpack_compressor_init(grpc_chttp2_hpack_compressor *c,
- grpc_mdctx *ctx) {
- memset(c, 0, sizeof(*c));
- c->mdctx = ctx;
- c->timeout_key_str = grpc_mdstr_from_string(ctx, "grpc-timeout");
-}
-
-void grpc_chttp2_hpack_compressor_destroy(grpc_chttp2_hpack_compressor *c) {
- int i;
- for (i = 0; i < GRPC_CHTTP2_HPACKC_NUM_VALUES; i++) {
- if (c->entries_keys[i]) grpc_mdstr_unref(c->entries_keys[i]);
- if (c->entries_elems[i]) grpc_mdelem_unref(c->entries_elems[i]);
- }
- grpc_mdstr_unref(c->timeout_key_str);
-}
-
-gpr_uint32 grpc_chttp2_preencode(grpc_stream_op *inops, size_t *inops_count,
- gpr_uint32 max_flow_controlled_bytes,
- grpc_stream_op_buffer *outops) {
- gpr_slice slice;
- grpc_stream_op *op;
- gpr_uint32 max_take_size;
- gpr_uint32 flow_controlled_bytes_taken = 0;
- gpr_uint32 curop = 0;
- gpr_uint8 *p;
-
- while (curop < *inops_count) {
- GPR_ASSERT(flow_controlled_bytes_taken <= max_flow_controlled_bytes);
- op = &inops[curop];
- switch (op->type) {
- case GRPC_NO_OP:
- /* skip */
- curop++;
- break;
- case GRPC_OP_METADATA:
- grpc_metadata_batch_assert_ok(&op->data.metadata);
- /* these just get copied as they don't impact the number of flow
- controlled bytes */
- grpc_sopb_append(outops, op, 1);
- curop++;
- break;
- case GRPC_OP_BEGIN_MESSAGE:
- /* begin op: for now we just convert the op to a slice and fall
- through - this lets us reuse the slice framing code below */
- slice = gpr_slice_malloc(5);
- p = GPR_SLICE_START_PTR(slice);
- p[0] = 0;
- p[1] = op->data.begin_message.length >> 24;
- p[2] = op->data.begin_message.length >> 16;
- p[3] = op->data.begin_message.length >> 8;
- p[4] = op->data.begin_message.length;
- op->type = GRPC_OP_SLICE;
- op->data.slice = slice;
- /* fallthrough */
- case GRPC_OP_SLICE:
- slice = op->data.slice;
- if (!GPR_SLICE_LENGTH(slice)) {
- /* skip zero length slices */
- gpr_slice_unref(slice);
- curop++;
- break;
- }
- max_take_size = max_flow_controlled_bytes - flow_controlled_bytes_taken;
- if (max_take_size == 0) {
- goto exit_loop;
- }
- if (GPR_SLICE_LENGTH(slice) > max_take_size) {
- slice = gpr_slice_split_head(&op->data.slice, max_take_size);
- grpc_sopb_add_slice(outops, slice);
- } else {
- /* consume this op immediately */
- grpc_sopb_append(outops, op, 1);
- curop++;
- }
- flow_controlled_bytes_taken += GPR_SLICE_LENGTH(slice);
- break;
- }
- }
-exit_loop:
- *inops_count -= curop;
- memmove(inops, inops + curop, *inops_count * sizeof(grpc_stream_op));
-
- for (curop = 0; curop < *inops_count; curop++) {
- if (inops[curop].type == GRPC_OP_METADATA) {
- grpc_metadata_batch_assert_ok(&inops[curop].data.metadata);
- }
- }
-
- return flow_controlled_bytes_taken;
-}
-
-void grpc_chttp2_encode(grpc_stream_op *ops, size_t ops_count, int eof,
- gpr_uint32 stream_id,
- grpc_chttp2_hpack_compressor *compressor,
- gpr_slice_buffer *output) {
- framer_state st;
- gpr_slice slice;
- grpc_stream_op *op;
- gpr_uint32 max_take_size;
- gpr_uint32 curop = 0;
- gpr_uint32 unref_op;
- grpc_mdctx *mdctx = compressor->mdctx;
- grpc_linked_mdelem *l;
- int need_unref = 0;
-
- GPR_ASSERT(stream_id != 0);
-
- st.cur_frame_type = NONE;
- st.last_was_header = 0;
- st.stream_id = stream_id;
- st.output = output;
-
- while (curop < ops_count) {
- op = &ops[curop];
- switch (op->type) {
- case GRPC_NO_OP:
- case GRPC_OP_BEGIN_MESSAGE:
- gpr_log(
- GPR_ERROR,
- "These stream ops should be filtered out by grpc_chttp2_preencode");
- abort();
- case GRPC_OP_METADATA:
- /* Encode a metadata batch; store the returned values, representing
- a metadata element that needs to be unreffed back into the metadata
- slot. THIS MAY NOT BE THE SAME ELEMENT (if a decoder table slot got
- updated). After this loop, we'll do a batch unref of elements. */
- begin_new_frame(&st, HEADER);
- need_unref |= op->data.metadata.garbage.head != NULL;
- grpc_metadata_batch_assert_ok(&op->data.metadata);
- for (l = op->data.metadata.list.head; l; l = l->next) {
- l->md = hpack_enc(compressor, l->md, &st);
- need_unref |= l->md != NULL;
- }
- if (gpr_time_cmp(op->data.metadata.deadline, gpr_inf_future) != 0) {
- deadline_enc(compressor, op->data.metadata.deadline, &st);
- }
- curop++;
- break;
- case GRPC_OP_SLICE:
- slice = op->data.slice;
- if (st.cur_frame_type == DATA &&
- st.output->length - st.output_length_at_start_of_frame ==
- GRPC_CHTTP2_MAX_PAYLOAD_LENGTH) {
- finish_frame(&st, 0, 0);
- }
- ensure_frame_type(&st, DATA, 1);
- max_take_size = GRPC_CHTTP2_MAX_PAYLOAD_LENGTH +
- st.output_length_at_start_of_frame - st.output->length;
- if (GPR_SLICE_LENGTH(slice) > max_take_size) {
- slice = gpr_slice_split_head(&op->data.slice, max_take_size);
- } else {
- /* consume this op immediately */
- curop++;
- }
- gpr_slice_buffer_add(output, slice);
- break;
- }
- }
- if (eof && st.cur_frame_type == NONE) {
- begin_frame(&st, DATA);
- }
- finish_frame(&st, 1, eof);
-
- if (need_unref) {
- grpc_mdctx_lock(mdctx);
- for (unref_op = 0; unref_op < curop; unref_op++) {
- op = &ops[unref_op];
- if (op->type != GRPC_OP_METADATA) continue;
- for (l = op->data.metadata.list.head; l; l = l->next) {
- if (l->md) grpc_mdctx_locked_mdelem_unref(mdctx, l->md);
- }
- for (l = op->data.metadata.garbage.head; l; l = l->next) {
- grpc_mdctx_locked_mdelem_unref(mdctx, l->md);
- }
- }
- grpc_mdctx_unlock(mdctx);
- }
-}
diff --git a/src/core/transport/chttp2/stream_lists.c b/src/core/transport/chttp2/stream_lists.c
index c6ba12fca8..60fe735cfc 100644
--- a/src/core/transport/chttp2/stream_lists.c
+++ b/src/core/transport/chttp2/stream_lists.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -100,11 +100,14 @@ static void stream_list_remove(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
}
}
-static void stream_list_maybe_remove(grpc_chttp2_transport *t,
+static bool stream_list_maybe_remove(grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
grpc_chttp2_stream_list_id id) {
if (s->included[id]) {
stream_list_remove(t, s, id);
+ return true;
+ } else {
+ return false;
}
}
@@ -119,28 +122,30 @@ static void stream_list_add_tail(grpc_chttp2_transport *t,
if (old_tail) {
old_tail->links[id].next = s;
} else {
- s->links[id].prev = NULL;
t->lists[id].head = s;
}
t->lists[id].tail = s;
s->included[id] = 1;
}
-static void stream_list_add(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
+static bool stream_list_add(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
grpc_chttp2_stream_list_id id) {
if (s->included[id]) {
- return;
+ return false;
}
stream_list_add_tail(t, s, id);
+ return true;
}
/* wrappers for specializations */
-void grpc_chttp2_list_add_writable_stream(
+bool grpc_chttp2_list_add_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
- stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
- STREAM_FROM_GLOBAL(stream_global), GRPC_CHTTP2_LIST_WRITABLE);
+ GPR_ASSERT(stream_global->id != 0);
+ return stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
+ STREAM_FROM_GLOBAL(stream_global),
+ GRPC_CHTTP2_LIST_WRITABLE);
}
int grpc_chttp2_list_pop_writable_stream(
@@ -151,17 +156,27 @@ int grpc_chttp2_list_pop_writable_stream(
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_WRITABLE);
- *stream_global = &stream->global;
- *stream_writing = &stream->writing;
+ if (r != 0) {
+ *stream_global = &stream->global;
+ *stream_writing = &stream->writing;
+ }
return r;
}
+bool grpc_chttp2_list_remove_writable_stream(
+ grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_stream_global *stream_global) {
+ return stream_list_maybe_remove(TRANSPORT_FROM_GLOBAL(transport_global),
+ STREAM_FROM_GLOBAL(stream_global),
+ GRPC_CHTTP2_LIST_WRITABLE);
+}
+
void grpc_chttp2_list_add_writing_stream(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing) {
- stream_list_add(TRANSPORT_FROM_WRITING(transport_writing),
- STREAM_FROM_WRITING(stream_writing),
- GRPC_CHTTP2_LIST_WRITING);
+ GPR_ASSERT(stream_list_add(TRANSPORT_FROM_WRITING(transport_writing),
+ STREAM_FROM_WRITING(stream_writing),
+ GRPC_CHTTP2_LIST_WRITING));
}
int grpc_chttp2_list_have_writing_streams(
@@ -176,7 +191,9 @@ int grpc_chttp2_list_pop_writing_stream(
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_WRITING(transport_writing), &stream,
GRPC_CHTTP2_LIST_WRITING);
- *stream_writing = &stream->writing;
+ if (r != 0) {
+ *stream_writing = &stream->writing;
+ }
return r;
}
@@ -196,33 +213,45 @@ int grpc_chttp2_list_pop_written_stream(
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_WRITING(transport_writing), &stream,
GRPC_CHTTP2_LIST_WRITTEN);
- *stream_global = &stream->global;
- *stream_writing = &stream->writing;
+ if (r != 0) {
+ *stream_global = &stream->global;
+ *stream_writing = &stream->writing;
+ }
return r;
}
-void grpc_chttp2_list_add_writable_window_update_stream(
+void grpc_chttp2_list_add_unannounced_incoming_window_available(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
+ GPR_ASSERT(stream_global->id != 0);
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
- GRPC_CHTTP2_LIST_WRITABLE_WINDOW_UPDATE);
+ GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE);
}
-int grpc_chttp2_list_pop_writable_window_update_stream(
+void grpc_chttp2_list_remove_unannounced_incoming_window_available(
grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global **stream_global) {
- grpc_chttp2_stream *stream;
- int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
- GRPC_CHTTP2_LIST_WRITABLE_WINDOW_UPDATE);
- *stream_global = &stream->global;
- return r;
+ grpc_chttp2_stream_global *stream_global) {
+ stream_list_maybe_remove(
+ TRANSPORT_FROM_GLOBAL(transport_global),
+ STREAM_FROM_GLOBAL(stream_global),
+ GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE);
}
-void grpc_chttp2_list_remove_writable_window_update_stream(
+int grpc_chttp2_list_pop_unannounced_incoming_window_available(
grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global) {
- stream_list_maybe_remove(TRANSPORT_FROM_GLOBAL(transport_global), STREAM_FROM_GLOBAL(stream_global), GRPC_CHTTP2_LIST_WRITABLE_WINDOW_UPDATE);
+ grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_chttp2_stream_global **stream_global,
+ grpc_chttp2_stream_parsing **stream_parsing) {
+ grpc_chttp2_stream *stream;
+ int r =
+ stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
+ GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE);
+ if (r != 0) {
+ *stream_global = &stream->global;
+ *stream_parsing = &stream->parsing;
+ }
+ return r;
}
void grpc_chttp2_list_add_parsing_seen_stream(
@@ -241,8 +270,10 @@ int grpc_chttp2_list_pop_parsing_seen_stream(
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_PARSING(transport_parsing), &stream,
GRPC_CHTTP2_LIST_PARSING_SEEN);
- *stream_global = &stream->global;
- *stream_parsing = &stream->parsing;
+ if (r != 0) {
+ *stream_global = &stream->global;
+ *stream_parsing = &stream->parsing;
+ }
return r;
}
@@ -260,72 +291,126 @@ int grpc_chttp2_list_pop_waiting_for_concurrency(
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY);
- *stream_global = &stream->global;
+ if (r != 0) {
+ *stream_global = &stream->global;
+ }
return r;
}
-void grpc_chttp2_list_add_closed_waiting_for_parsing(
+void grpc_chttp2_list_add_check_read_ops(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
- GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_PARSING);
+ GRPC_CHTTP2_LIST_CHECK_READ_OPS);
}
-int grpc_chttp2_list_pop_closed_waiting_for_parsing(
+int grpc_chttp2_list_pop_check_read_ops(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
- GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_PARSING);
- *stream_global = &stream->global;
+ GRPC_CHTTP2_LIST_CHECK_READ_OPS);
+ if (r != 0) {
+ *stream_global = &stream->global;
+ }
return r;
}
-void grpc_chttp2_list_add_incoming_window_updated(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global) {
- stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
- STREAM_FROM_GLOBAL(stream_global),
- GRPC_CHTTP2_LIST_INCOMING_WINDOW_UPDATED);
+void grpc_chttp2_list_add_writing_stalled_by_transport(
+ grpc_chttp2_transport_writing *transport_writing,
+ grpc_chttp2_stream_writing *stream_writing) {
+ grpc_chttp2_stream *stream = STREAM_FROM_WRITING(stream_writing);
+ if (!stream->included[GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT]) {
+ GRPC_CHTTP2_STREAM_REF(&stream->global, "chttp2_writing_stalled");
+ }
+ stream_list_add(TRANSPORT_FROM_WRITING(transport_writing), stream,
+ GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT);
}
-int grpc_chttp2_list_pop_incoming_window_updated(
+void grpc_chttp2_list_flush_writing_stalled_by_transport(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing,
+ bool is_window_available) {
+ grpc_chttp2_stream *stream;
+ grpc_chttp2_transport *transport = TRANSPORT_FROM_WRITING(transport_writing);
+ while (stream_list_pop(transport, &stream,
+ GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT)) {
+ if (is_window_available) {
+ grpc_chttp2_become_writable(&transport->global, &stream->global);
+ } else {
+ grpc_chttp2_list_add_stalled_by_transport(transport_writing,
+ &stream->writing);
+ }
+ GRPC_CHTTP2_STREAM_UNREF(exec_ctx, &stream->global,
+ "chttp2_writing_stalled");
+ }
+}
+
+void grpc_chttp2_list_add_stalled_by_transport(
+ grpc_chttp2_transport_writing *transport_writing,
+ grpc_chttp2_stream_writing *stream_writing) {
+ stream_list_add(TRANSPORT_FROM_WRITING(transport_writing),
+ STREAM_FROM_WRITING(stream_writing),
+ GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
+}
+
+int grpc_chttp2_list_pop_stalled_by_transport(
grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_global **stream_global,
- grpc_chttp2_stream_parsing **stream_parsing) {
+ grpc_chttp2_stream_global **stream_global) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
- GRPC_CHTTP2_LIST_INCOMING_WINDOW_UPDATED);
- *stream_global = &stream->global;
- *stream_parsing = &stream->parsing;
+ GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
+ if (r != 0) {
+ *stream_global = &stream->global;
+ }
return r;
}
-void grpc_chttp2_list_remove_incoming_window_updated(
+void grpc_chttp2_list_remove_stalled_by_transport(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
stream_list_maybe_remove(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
- GRPC_CHTTP2_LIST_INCOMING_WINDOW_UPDATED);
+ GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}
-void grpc_chttp2_list_add_read_write_state_changed(
+void grpc_chttp2_list_add_closed_waiting_for_parsing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
- GRPC_CHTTP2_LIST_READ_WRITE_STATE_CHANGED);
+ GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_PARSING);
}
-int grpc_chttp2_list_pop_read_write_state_changed(
+int grpc_chttp2_list_pop_closed_waiting_for_parsing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
- GRPC_CHTTP2_LIST_READ_WRITE_STATE_CHANGED);
- *stream_global = &stream->global;
+ GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_PARSING);
+ if (r != 0) {
+ *stream_global = &stream->global;
+ }
+ return r;
+}
+
+void grpc_chttp2_list_add_closed_waiting_for_writing(
+ grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_stream_global *stream_global) {
+ stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
+ STREAM_FROM_GLOBAL(stream_global),
+ GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_WRITING);
+}
+
+int grpc_chttp2_list_pop_closed_waiting_for_writing(
+ grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_stream_global **stream_global) {
+ grpc_chttp2_stream *stream;
+ int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
+ GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_WRITING);
+ if (r != 0) {
+ *stream_global = &stream->global;
+ }
return r;
}
@@ -334,9 +419,14 @@ void grpc_chttp2_register_stream(grpc_chttp2_transport *t,
stream_list_add_tail(t, s, GRPC_CHTTP2_LIST_ALL_STREAMS);
}
-void grpc_chttp2_unregister_stream(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s) {
- stream_list_remove(t, s, GRPC_CHTTP2_LIST_ALL_STREAMS);
+int grpc_chttp2_unregister_stream(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s) {
+ stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_ALL_STREAMS);
+ return stream_list_empty(t, GRPC_CHTTP2_LIST_ALL_STREAMS);
+}
+
+int grpc_chttp2_has_streams(grpc_chttp2_transport *t) {
+ return !stream_list_empty(t, GRPC_CHTTP2_LIST_ALL_STREAMS);
}
void grpc_chttp2_for_all_streams(
diff --git a/src/core/transport/chttp2/stream_map.c b/src/core/transport/chttp2/stream_map.c
index 0ec2f27291..555a16fb72 100644
--- a/src/core/transport/chttp2/stream_map.c
+++ b/src/core/transport/chttp2/stream_map.c
@@ -42,7 +42,7 @@
void grpc_chttp2_stream_map_init(grpc_chttp2_stream_map *map,
size_t initial_capacity) {
GPR_ASSERT(initial_capacity > 1);
- map->keys = gpr_malloc(sizeof(gpr_uint32) * initial_capacity);
+ map->keys = gpr_malloc(sizeof(uint32_t) * initial_capacity);
map->values = gpr_malloc(sizeof(void *) * initial_capacity);
map->count = 0;
map->free = 0;
@@ -54,7 +54,7 @@ void grpc_chttp2_stream_map_destroy(grpc_chttp2_stream_map *map) {
gpr_free(map->values);
}
-static size_t compact(gpr_uint32 *keys, void **values, size_t count) {
+static size_t compact(uint32_t *keys, void **values, size_t count) {
size_t i, out;
for (i = 0, out = 0; i < count; i++) {
@@ -68,11 +68,11 @@ static size_t compact(gpr_uint32 *keys, void **values, size_t count) {
return out;
}
-void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map *map, gpr_uint32 key,
+void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map *map, uint32_t key,
void *value) {
size_t count = map->count;
size_t capacity = map->capacity;
- gpr_uint32 *keys = map->keys;
+ uint32_t *keys = map->keys;
void **values = map->values;
GPR_ASSERT(count == 0 || keys[count - 1] < key);
@@ -86,7 +86,7 @@ void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map *map, gpr_uint32 key,
/* resize when less than 25% of the table is free, because compaction
won't help much */
map->capacity = capacity = 3 * capacity / 2;
- map->keys = keys = gpr_realloc(keys, capacity * sizeof(gpr_uint32));
+ map->keys = keys = gpr_realloc(keys, capacity * sizeof(uint32_t));
map->values = values = gpr_realloc(values, capacity * sizeof(void *));
}
}
@@ -119,25 +119,24 @@ void grpc_chttp2_stream_map_move_into(grpc_chttp2_stream_map *src,
/* if dst doesn't have capacity, resize */
if (dst->count + src->count > dst->capacity) {
dst->capacity = GPR_MAX(dst->capacity * 3 / 2, dst->count + src->count);
- dst->keys = gpr_realloc(dst->keys, dst->capacity * sizeof(gpr_uint32));
+ dst->keys = gpr_realloc(dst->keys, dst->capacity * sizeof(uint32_t));
dst->values = gpr_realloc(dst->values, dst->capacity * sizeof(void *));
}
- memcpy(dst->keys + dst->count, src->keys, src->count * sizeof(gpr_uint32));
- memcpy(dst->values + dst->count, src->values,
- src->count * sizeof(void*));
+ memcpy(dst->keys + dst->count, src->keys, src->count * sizeof(uint32_t));
+ memcpy(dst->values + dst->count, src->values, src->count * sizeof(void *));
dst->count += src->count;
dst->free += src->free;
src->count = 0;
src->free = 0;
}
-static void **find(grpc_chttp2_stream_map *map, gpr_uint32 key) {
+static void **find(grpc_chttp2_stream_map *map, uint32_t key) {
size_t min_idx = 0;
size_t max_idx = map->count;
size_t mid_idx;
- gpr_uint32 *keys = map->keys;
+ uint32_t *keys = map->keys;
void **values = map->values;
- gpr_uint32 mid_key;
+ uint32_t mid_key;
if (max_idx == 0) return NULL;
@@ -150,7 +149,8 @@ static void **find(grpc_chttp2_stream_map *map, gpr_uint32 key) {
min_idx = mid_idx + 1;
} else if (mid_key > key) {
max_idx = mid_idx;
- } else /* mid_key == key */ {
+ } else /* mid_key == key */
+ {
return &values[mid_idx];
}
}
@@ -158,8 +158,7 @@ static void **find(grpc_chttp2_stream_map *map, gpr_uint32 key) {
return NULL;
}
-void *grpc_chttp2_stream_map_delete(grpc_chttp2_stream_map *map,
- gpr_uint32 key) {
+void *grpc_chttp2_stream_map_delete(grpc_chttp2_stream_map *map, uint32_t key) {
void **pvalue = find(map, key);
void *out = NULL;
if (pvalue != NULL) {
@@ -175,7 +174,7 @@ void *grpc_chttp2_stream_map_delete(grpc_chttp2_stream_map *map,
return out;
}
-void *grpc_chttp2_stream_map_find(grpc_chttp2_stream_map *map, gpr_uint32 key) {
+void *grpc_chttp2_stream_map_find(grpc_chttp2_stream_map *map, uint32_t key) {
void **pvalue = find(map, key);
return pvalue != NULL ? *pvalue : NULL;
}
@@ -185,7 +184,7 @@ size_t grpc_chttp2_stream_map_size(grpc_chttp2_stream_map *map) {
}
void grpc_chttp2_stream_map_for_each(grpc_chttp2_stream_map *map,
- void (*f)(void *user_data, gpr_uint32 key,
+ void (*f)(void *user_data, uint32_t key,
void *value),
void *user_data) {
size_t i;
diff --git a/src/core/transport/chttp2/stream_map.h b/src/core/transport/chttp2/stream_map.h
index 71b0582054..957a58a4f2 100644
--- a/src/core/transport/chttp2/stream_map.h
+++ b/src/core/transport/chttp2/stream_map.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,21 +31,21 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_STREAM_MAP_H
-#define GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_STREAM_MAP_H
+#ifndef GRPC_CORE_TRANSPORT_CHTTP2_STREAM_MAP_H
+#define GRPC_CORE_TRANSPORT_CHTTP2_STREAM_MAP_H
#include <grpc/support/port_platform.h>
#include <stddef.h>
-/* Data structure to map a gpr_uint32 to a data object (represented by a void*)
+/* Data structure to map a uint32_t to a data object (represented by a void*)
Represented as a sorted array of keys, and a corresponding array of values.
Lookups are performed with binary search.
Adds are restricted to strictly higher keys than previously seen (this is
guaranteed by http2). */
typedef struct {
- gpr_uint32 *keys;
+ uint32_t *keys;
void **values;
size_t count;
size_t free;
@@ -58,28 +58,27 @@ void grpc_chttp2_stream_map_destroy(grpc_chttp2_stream_map *map);
/* Add a new key: given http2 semantics, new keys must always be greater than
existing keys - this is asserted */
-void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map *map, gpr_uint32 key,
+void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map *map, uint32_t key,
void *value);
/* Delete an existing key - returns the previous value of the key if it existed,
or NULL otherwise */
-void *grpc_chttp2_stream_map_delete(grpc_chttp2_stream_map *map,
- gpr_uint32 key);
+void *grpc_chttp2_stream_map_delete(grpc_chttp2_stream_map *map, uint32_t key);
/* Move all elements of src into dst */
void grpc_chttp2_stream_map_move_into(grpc_chttp2_stream_map *src,
grpc_chttp2_stream_map *dst);
/* Return an existing key, or NULL if it does not exist */
-void *grpc_chttp2_stream_map_find(grpc_chttp2_stream_map *map, gpr_uint32 key);
+void *grpc_chttp2_stream_map_find(grpc_chttp2_stream_map *map, uint32_t key);
/* How many (populated) entries are in the stream map? */
size_t grpc_chttp2_stream_map_size(grpc_chttp2_stream_map *map);
/* Callback on each stream */
void grpc_chttp2_stream_map_for_each(grpc_chttp2_stream_map *map,
- void (*f)(void *user_data, gpr_uint32 key,
+ void (*f)(void *user_data, uint32_t key,
void *value),
void *user_data);
-#endif /* GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_STREAM_MAP_H */
+#endif /* GRPC_CORE_TRANSPORT_CHTTP2_STREAM_MAP_H */
diff --git a/src/core/transport/chttp2/timeout_encoding.c b/src/core/transport/chttp2/timeout_encoding.c
index 33915c4039..a6f7081d21 100644
--- a/src/core/transport/chttp2/timeout_encoding.c
+++ b/src/core/transport/chttp2/timeout_encoding.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -36,14 +36,15 @@
#include <stdio.h>
#include <string.h>
+#include <grpc/support/port_platform.h>
#include "src/core/support/string.h"
-static int round_up(int x, int divisor) {
+static int64_t round_up(int64_t x, int64_t divisor) {
return (x / divisor + (x % divisor != 0)) * divisor;
}
/* round an integer up to the next value with three significant figures */
-static int round_up_to_three_sig_figs(int x) {
+static int64_t round_up_to_three_sig_figs(int64_t x) {
if (x < 1000) return x;
if (x < 10000) return round_up(x, 10);
if (x < 100000) return round_up(x, 100);
@@ -57,13 +58,13 @@ static int round_up_to_three_sig_figs(int x) {
/* encode our minimum viable timeout value */
static void enc_tiny(char *buffer) { memcpy(buffer, "1n", 3); }
-static void enc_ext(char *buffer, long value, char ext) {
- int n = gpr_ltoa(value, buffer);
+static void enc_ext(char *buffer, int64_t value, char ext) {
+ int n = int64_ttoa(value, buffer);
buffer[n] = ext;
buffer[n + 1] = 0;
}
-static void enc_seconds(char *buffer, long sec) {
+static void enc_seconds(char *buffer, int64_t sec) {
if (sec % 3600 == 0) {
enc_ext(buffer, sec / 3600, 'H');
} else if (sec % 60 == 0) {
@@ -73,7 +74,7 @@ static void enc_seconds(char *buffer, long sec) {
}
}
-static void enc_nanos(char *buffer, int x) {
+static void enc_nanos(char *buffer, int64_t x) {
x = round_up_to_three_sig_figs(x);
if (x < 100000) {
if (x % 1000 == 0) {
@@ -97,7 +98,7 @@ static void enc_nanos(char *buffer, int x) {
}
}
-static void enc_micros(char *buffer, int x) {
+static void enc_micros(char *buffer, int64_t x) {
x = round_up_to_three_sig_figs(x);
if (x < 100000) {
if (x % 1000 == 0) {
@@ -123,7 +124,7 @@ void grpc_chttp2_encode_timeout(gpr_timespec timeout, char *buffer) {
enc_nanos(buffer, timeout.tv_nsec);
} else if (timeout.tv_sec < 1000 && timeout.tv_nsec != 0) {
enc_micros(buffer,
- timeout.tv_sec * 1000000 +
+ (int64_t)(timeout.tv_sec * 1000000) +
(timeout.tv_nsec / 1000 + (timeout.tv_nsec % 1000 != 0)));
} else {
enc_seconds(buffer, timeout.tv_sec + (timeout.tv_nsec != 0));
@@ -136,21 +137,24 @@ static int is_all_whitespace(const char *p) {
}
int grpc_chttp2_decode_timeout(const char *buffer, gpr_timespec *timeout) {
- gpr_uint32 x = 0;
- const char *p = buffer;
+ int32_t x = 0;
+ const uint8_t *p = (const uint8_t *)buffer;
int have_digit = 0;
/* skip whitespace */
for (; *p == ' '; p++)
;
/* decode numeric part */
for (; *p >= '0' && *p <= '9'; p++) {
- gpr_uint32 xp = x * 10 + *p - '0';
+ int32_t digit = (int32_t)(*p - (uint8_t)'0');
have_digit = 1;
- if (xp < x) {
- *timeout = gpr_inf_future;
- return 1;
+ /* spec allows max. 8 digits, but we allow values up to 1,000,000,000 */
+ if (x >= (100 * 1000 * 1000)) {
+ if (x != (100 * 1000 * 1000) || digit != 0) {
+ *timeout = gpr_inf_future(GPR_CLOCK_REALTIME);
+ return 1;
+ }
}
- x = xp;
+ x = x * 10 + digit;
}
if (!have_digit) return 0;
/* skip whitespace */
@@ -159,26 +163,26 @@ int grpc_chttp2_decode_timeout(const char *buffer, gpr_timespec *timeout) {
/* decode unit specifier */
switch (*p) {
case 'n':
- *timeout = gpr_time_from_nanos(x);
+ *timeout = gpr_time_from_nanos(x, GPR_TIMESPAN);
break;
case 'u':
- *timeout = gpr_time_from_micros(x);
+ *timeout = gpr_time_from_micros(x, GPR_TIMESPAN);
break;
case 'm':
- *timeout = gpr_time_from_millis(x);
+ *timeout = gpr_time_from_millis(x, GPR_TIMESPAN);
break;
case 'S':
- *timeout = gpr_time_from_seconds(x);
+ *timeout = gpr_time_from_seconds(x, GPR_TIMESPAN);
break;
case 'M':
- *timeout = gpr_time_from_minutes(x);
+ *timeout = gpr_time_from_minutes(x, GPR_TIMESPAN);
break;
case 'H':
- *timeout = gpr_time_from_hours(x);
+ *timeout = gpr_time_from_hours(x, GPR_TIMESPAN);
break;
default:
return 0;
}
p++;
- return is_all_whitespace(p);
+ return is_all_whitespace((const char *)p);
}
diff --git a/src/core/transport/chttp2/timeout_encoding.h b/src/core/transport/chttp2/timeout_encoding.h
index 9d8756e799..81bae8e936 100644
--- a/src/core/transport/chttp2/timeout_encoding.h
+++ b/src/core/transport/chttp2/timeout_encoding.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_TIMEOUT_ENCODING_H
-#define GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_TIMEOUT_ENCODING_H
+#ifndef GRPC_CORE_TRANSPORT_CHTTP2_TIMEOUT_ENCODING_H
+#define GRPC_CORE_TRANSPORT_CHTTP2_TIMEOUT_ENCODING_H
#include "src/core/support/string.h"
#include <grpc/support/time.h>
@@ -44,4 +44,4 @@
void grpc_chttp2_encode_timeout(gpr_timespec timeout, char *buffer);
int grpc_chttp2_decode_timeout(const char *buffer, gpr_timespec *timeout);
-#endif /* GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_TIMEOUT_ENCODING_H */
+#endif /* GRPC_CORE_TRANSPORT_CHTTP2_TIMEOUT_ENCODING_H */
diff --git a/src/core/transport/chttp2/varint.c b/src/core/transport/chttp2/varint.c
index 0722c9ada9..1cc235e989 100644
--- a/src/core/transport/chttp2/varint.c
+++ b/src/core/transport/chttp2/varint.c
@@ -33,7 +33,7 @@
#include "src/core/transport/chttp2/varint.h"
-int grpc_chttp2_hpack_varint_length(gpr_uint32 tail_value) {
+uint32_t grpc_chttp2_hpack_varint_length(uint32_t tail_value) {
if (tail_value < (1 << 7)) {
return 2;
} else if (tail_value < (1 << 14)) {
@@ -47,19 +47,19 @@ int grpc_chttp2_hpack_varint_length(gpr_uint32 tail_value) {
}
}
-void grpc_chttp2_hpack_write_varint_tail(gpr_uint32 tail_value,
- gpr_uint8* target, int tail_length) {
+void grpc_chttp2_hpack_write_varint_tail(uint32_t tail_value, uint8_t* target,
+ uint32_t tail_length) {
switch (tail_length) {
case 5:
- target[4] = (gpr_uint8)((tail_value >> 28) | 0x80);
+ target[4] = (uint8_t)((tail_value >> 28) | 0x80);
case 4:
- target[3] = (gpr_uint8)((tail_value >> 21) | 0x80);
+ target[3] = (uint8_t)((tail_value >> 21) | 0x80);
case 3:
- target[2] = (gpr_uint8)((tail_value >> 14) | 0x80);
+ target[2] = (uint8_t)((tail_value >> 14) | 0x80);
case 2:
- target[1] = (gpr_uint8)((tail_value >> 7) | 0x80);
+ target[1] = (uint8_t)((tail_value >> 7) | 0x80);
case 1:
- target[0] = (gpr_uint8)((tail_value) | 0x80);
+ target[0] = (uint8_t)((tail_value) | 0x80);
}
target[tail_length - 1] &= 0x7f;
}
diff --git a/src/core/transport/chttp2/varint.h b/src/core/transport/chttp2/varint.h
index 0a6fb55248..7ab9d22ab5 100644
--- a/src/core/transport/chttp2/varint.h
+++ b/src/core/transport/chttp2/varint.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_VARINT_H
-#define GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_VARINT_H
+#ifndef GRPC_CORE_TRANSPORT_CHTTP2_VARINT_H
+#define GRPC_CORE_TRANSPORT_CHTTP2_VARINT_H
#include <grpc/support/port_platform.h>
@@ -41,33 +41,35 @@
/* length of a value that needs varint tail encoding (it's bigger than can be
bitpacked into the opcode byte) - returned value includes the length of the
opcode byte */
-int grpc_chttp2_hpack_varint_length(gpr_uint32 tail_value);
+uint32_t grpc_chttp2_hpack_varint_length(uint32_t tail_value);
-void grpc_chttp2_hpack_write_varint_tail(gpr_uint32 tail_value,
- gpr_uint8* target, int tail_length);
+void grpc_chttp2_hpack_write_varint_tail(uint32_t tail_value, uint8_t* target,
+ uint32_t tail_length);
/* maximum value that can be bitpacked with the opcode if the opcode has a
prefix
of length prefix_bits */
-#define GRPC_CHTTP2_MAX_IN_PREFIX(prefix_bits) ((1 << (8 - (prefix_bits))) - 1)
+#define GRPC_CHTTP2_MAX_IN_PREFIX(prefix_bits) \
+ ((uint32_t)((1 << (8 - (prefix_bits))) - 1))
/* length required to bitpack a value */
#define GRPC_CHTTP2_VARINT_LENGTH(n, prefix_bits) \
((n) < GRPC_CHTTP2_MAX_IN_PREFIX(prefix_bits) \
- ? 1 \
+ ? 1u \
: grpc_chttp2_hpack_varint_length( \
(n)-GRPC_CHTTP2_MAX_IN_PREFIX(prefix_bits)))
#define GRPC_CHTTP2_WRITE_VARINT(n, prefix_bits, prefix_or, target, length) \
do { \
- gpr_uint8* tgt = target; \
- if ((length) == 1) { \
- (tgt)[0] = (prefix_or) | (n); \
+ uint8_t* tgt = target; \
+ if ((length) == 1u) { \
+ (tgt)[0] = (uint8_t)((prefix_or) | (n)); \
} else { \
- (tgt)[0] = (prefix_or) | GRPC_CHTTP2_MAX_IN_PREFIX(prefix_bits); \
+ (tgt)[0] = \
+ (prefix_or) | (uint8_t)GRPC_CHTTP2_MAX_IN_PREFIX(prefix_bits); \
grpc_chttp2_hpack_write_varint_tail( \
(n)-GRPC_CHTTP2_MAX_IN_PREFIX(prefix_bits), (tgt) + 1, (length)-1); \
} \
} while (0)
-#endif /* GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_VARINT_H */
+#endif /* GRPC_CORE_TRANSPORT_CHTTP2_VARINT_H */
diff --git a/src/core/transport/chttp2/writing.c b/src/core/transport/chttp2/writing.c
index fdcc300099..107725cbc7 100644
--- a/src/core/transport/chttp2/writing.c
+++ b/src/core/transport/chttp2/writing.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -32,26 +32,36 @@
*/
#include "src/core/transport/chttp2/internal.h"
-#include "src/core/transport/chttp2/http2_errors.h"
+
+#include <limits.h>
#include <grpc/support/log.h>
-static void finalize_outbuf(grpc_chttp2_transport_writing *transport_writing);
-static void finish_write_cb(void *tw, grpc_endpoint_cb_status write_status);
+#include "src/core/profiling/timers.h"
+#include "src/core/transport/chttp2/http2_errors.h"
+
+static void finalize_outbuf(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_writing *transport_writing);
int grpc_chttp2_unlocking_check_writes(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_transport_writing *transport_writing) {
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_transport_writing *transport_writing, int is_parsing) {
grpc_chttp2_stream_global *stream_global;
grpc_chttp2_stream_writing *stream_writing;
- gpr_uint32 window_delta;
+
+ GPR_TIMER_BEGIN("grpc_chttp2_unlocking_check_writes", 0);
/* simple writes are queued to qbuf, and flushed here */
gpr_slice_buffer_swap(&transport_global->qbuf, &transport_writing->outbuf);
GPR_ASSERT(transport_global->qbuf.count == 0);
+ grpc_chttp2_hpack_compressor_set_max_table_size(
+ &transport_writing->hpack_compressor,
+ transport_global->settings[GRPC_PEER_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE]);
+
if (transport_global->dirtied_local_settings &&
- !transport_global->sent_local_settings) {
+ !transport_global->sent_local_settings && !is_parsing) {
gpr_slice_buffer_add(
&transport_writing->outbuf,
grpc_chttp2_settings_create(
@@ -63,153 +73,278 @@ int grpc_chttp2_unlocking_check_writes(
transport_global->sent_local_settings = 1;
}
+ GRPC_CHTTP2_FLOW_MOVE_TRANSPORT("write", transport_writing, outgoing_window,
+ transport_global, outgoing_window);
+ bool is_window_available = transport_writing->outgoing_window > 0;
+ grpc_chttp2_list_flush_writing_stalled_by_transport(
+ exec_ctx, transport_writing, is_window_available);
+
/* for each grpc_chttp2_stream that's become writable, frame it's data
- (according to
- available window sizes) and add to the output buffer */
- while (transport_global->outgoing_window &&
- grpc_chttp2_list_pop_writable_stream(transport_global,
- transport_writing, &stream_global,
- &stream_writing) &&
- stream_global->outgoing_window > 0) {
+ (according to available window sizes) and add to the output buffer */
+ while (grpc_chttp2_list_pop_writable_stream(
+ transport_global, transport_writing, &stream_global, &stream_writing)) {
+ bool sent_initial_metadata = stream_writing->sent_initial_metadata;
+ bool become_writable = false;
+
stream_writing->id = stream_global->id;
- window_delta = grpc_chttp2_preencode(
- stream_global->outgoing_sopb->ops, &stream_global->outgoing_sopb->nops,
- GPR_MIN(transport_global->outgoing_window,
- stream_global->outgoing_window),
- &stream_writing->sopb);
- GRPC_CHTTP2_FLOWCTL_TRACE_TRANSPORT(
- "write", transport_global, outgoing_window, -(gpr_int64)window_delta);
- GRPC_CHTTP2_FLOWCTL_TRACE_STREAM("write", transport_global, stream_global,
- outgoing_window, -(gpr_int64)window_delta);
- transport_global->outgoing_window -= window_delta;
- stream_global->outgoing_window -= window_delta;
-
- if (stream_global->write_state == GRPC_WRITE_STATE_QUEUED_CLOSE &&
- stream_global->outgoing_sopb->nops == 0) {
- if (!transport_global->is_client && !stream_global->read_closed) {
- stream_writing->send_closed = GRPC_SEND_CLOSED_WITH_RST_STREAM;
- } else {
- stream_writing->send_closed = GRPC_SEND_CLOSED;
- }
+ stream_writing->read_closed = stream_global->read_closed;
+
+ GRPC_CHTTP2_FLOW_MOVE_STREAM("write", transport_writing, stream_writing,
+ outgoing_window, stream_global,
+ outgoing_window);
+
+ if (!sent_initial_metadata && stream_global->send_initial_metadata) {
+ stream_writing->send_initial_metadata =
+ stream_global->send_initial_metadata;
+ stream_global->send_initial_metadata = NULL;
+ become_writable = true;
+ sent_initial_metadata = true;
}
- if (stream_writing->sopb.nops > 0 ||
- stream_writing->send_closed != GRPC_DONT_SEND_CLOSED) {
- grpc_chttp2_list_add_writing_stream(transport_writing, stream_writing);
+ if (sent_initial_metadata) {
+ if (stream_global->send_message != NULL) {
+ gpr_slice hdr = gpr_slice_malloc(5);
+ uint8_t *p = GPR_SLICE_START_PTR(hdr);
+ uint32_t len = stream_global->send_message->length;
+ GPR_ASSERT(stream_writing->send_message == NULL);
+ p[0] = (stream_global->send_message->flags &
+ GRPC_WRITE_INTERNAL_COMPRESS) != 0;
+ p[1] = (uint8_t)(len >> 24);
+ p[2] = (uint8_t)(len >> 16);
+ p[3] = (uint8_t)(len >> 8);
+ p[4] = (uint8_t)(len);
+ gpr_slice_buffer_add(&stream_writing->flow_controlled_buffer, hdr);
+ if (stream_global->send_message->length > 0) {
+ stream_writing->send_message = stream_global->send_message;
+ } else {
+ stream_writing->send_message = NULL;
+ }
+ stream_writing->stream_fetched = 0;
+ stream_global->send_message = NULL;
+ }
+ if ((stream_writing->send_message != NULL ||
+ stream_writing->flow_controlled_buffer.length > 0) &&
+ stream_writing->outgoing_window > 0) {
+ if (transport_writing->outgoing_window > 0) {
+ become_writable = true;
+ } else {
+ grpc_chttp2_list_add_stalled_by_transport(transport_writing,
+ stream_writing);
+ }
+ }
+ if (stream_global->send_trailing_metadata) {
+ stream_writing->send_trailing_metadata =
+ stream_global->send_trailing_metadata;
+ stream_global->send_trailing_metadata = NULL;
+ become_writable = true;
+ }
}
- /* we should either exhaust window or have no ops left, but not both */
- if (stream_global->outgoing_sopb->nops == 0) {
- stream_global->outgoing_sopb = NULL;
- grpc_chttp2_schedule_closure(transport_global,
- stream_global->send_done_closure, 1);
- } else if (stream_global->outgoing_window > 0) {
- grpc_chttp2_list_add_writable_stream(transport_global, stream_global);
+ if (!stream_global->read_closed &&
+ stream_global->unannounced_incoming_window_for_writing > 1024) {
+ GRPC_CHTTP2_FLOW_MOVE_STREAM("write", transport_global, stream_writing,
+ announce_window, stream_global,
+ unannounced_incoming_window_for_writing);
+ become_writable = true;
}
- }
- /* for each grpc_chttp2_stream that wants to update its window, add that
- * window here */
- while (grpc_chttp2_list_pop_writable_window_update_stream(transport_global,
- &stream_global)) {
- window_delta =
- transport_global->settings[GRPC_LOCAL_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE] -
- stream_global->incoming_window;
- if (!stream_global->read_closed && window_delta > 0) {
- gpr_slice_buffer_add(
- &transport_writing->outbuf,
- grpc_chttp2_window_update_create(stream_global->id, window_delta));
- GRPC_CHTTP2_FLOWCTL_TRACE_STREAM("write", transport_global, stream_global,
- incoming_window, window_delta);
- stream_global->incoming_window += window_delta;
- grpc_chttp2_list_add_incoming_window_updated(transport_global,
- stream_global);
+ if (become_writable) {
+ grpc_chttp2_list_add_writing_stream(transport_writing, stream_writing);
+ } else {
+ GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "chttp2_writing");
}
}
/* if the grpc_chttp2_transport is ready to send a window update, do so here
also; 3/4 is a magic number that will likely get tuned soon */
- if (transport_global->incoming_window <
- transport_global->connection_window_target * 3 / 4) {
- window_delta = transport_global->connection_window_target -
- transport_global->incoming_window;
+ if (transport_global->announce_incoming_window > 0) {
+ uint32_t announced = (uint32_t)GPR_MIN(
+ transport_global->announce_incoming_window, UINT32_MAX);
+ GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("write", transport_global,
+ announce_incoming_window, announced);
gpr_slice_buffer_add(&transport_writing->outbuf,
- grpc_chttp2_window_update_create(0, window_delta));
- GRPC_CHTTP2_FLOWCTL_TRACE_TRANSPORT("write", transport_global,
- incoming_window, window_delta);
- transport_global->incoming_window += window_delta;
+ grpc_chttp2_window_update_create(0, announced));
}
+ GPR_TIMER_END("grpc_chttp2_unlocking_check_writes", 0);
+
return transport_writing->outbuf.count > 0 ||
grpc_chttp2_list_have_writing_streams(transport_writing);
}
void grpc_chttp2_perform_writes(
- grpc_chttp2_transport_writing *transport_writing, grpc_endpoint *endpoint) {
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing,
+ grpc_endpoint *endpoint) {
GPR_ASSERT(transport_writing->outbuf.count > 0 ||
grpc_chttp2_list_have_writing_streams(transport_writing));
- finalize_outbuf(transport_writing);
+ finalize_outbuf(exec_ctx, transport_writing);
- GPR_ASSERT(transport_writing->outbuf.count > 0);
GPR_ASSERT(endpoint);
- switch (grpc_endpoint_write(endpoint, transport_writing->outbuf.slices,
- transport_writing->outbuf.count, finish_write_cb,
- transport_writing)) {
- case GRPC_ENDPOINT_WRITE_DONE:
- grpc_chttp2_terminate_writing(transport_writing, 1);
- break;
- case GRPC_ENDPOINT_WRITE_ERROR:
- grpc_chttp2_terminate_writing(transport_writing, 0);
- break;
- case GRPC_ENDPOINT_WRITE_PENDING:
- break;
+ if (transport_writing->outbuf.count > 0) {
+ grpc_endpoint_write(exec_ctx, endpoint, &transport_writing->outbuf,
+ &transport_writing->done_cb);
+ } else {
+ grpc_exec_ctx_enqueue(exec_ctx, &transport_writing->done_cb, true, NULL);
}
}
-static void finalize_outbuf(grpc_chttp2_transport_writing *transport_writing) {
+static void finalize_outbuf(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_writing *transport_writing) {
grpc_chttp2_stream_writing *stream_writing;
+ GPR_TIMER_BEGIN("finalize_outbuf", 0);
+
while (
grpc_chttp2_list_pop_writing_stream(transport_writing, &stream_writing)) {
- grpc_chttp2_encode(stream_writing->sopb.ops, stream_writing->sopb.nops,
- stream_writing->send_closed != GRPC_DONT_SEND_CLOSED,
- stream_writing->id, &transport_writing->hpack_compressor,
- &transport_writing->outbuf);
- stream_writing->sopb.nops = 0;
- if (stream_writing->send_closed == GRPC_SEND_CLOSED_WITH_RST_STREAM) {
- gpr_slice_buffer_add(&transport_writing->outbuf,
- grpc_chttp2_rst_stream_create(stream_writing->id,
- GRPC_CHTTP2_NO_ERROR));
+ uint32_t max_outgoing =
+ (uint32_t)GPR_MIN(GRPC_CHTTP2_MAX_PAYLOAD_LENGTH,
+ GPR_MIN(stream_writing->outgoing_window,
+ transport_writing->outgoing_window));
+ /* send initial metadata if it's available */
+ if (stream_writing->send_initial_metadata != NULL) {
+ grpc_chttp2_encode_header(
+ &transport_writing->hpack_compressor, stream_writing->id,
+ stream_writing->send_initial_metadata, 0, &transport_writing->outbuf);
+ stream_writing->send_initial_metadata = NULL;
+ stream_writing->sent_initial_metadata = 1;
+ }
+ /* send any window updates */
+ if (stream_writing->announce_window > 0 &&
+ stream_writing->send_initial_metadata == NULL) {
+ uint32_t announce = stream_writing->announce_window;
+ gpr_slice_buffer_add(
+ &transport_writing->outbuf,
+ grpc_chttp2_window_update_create(stream_writing->id,
+ stream_writing->announce_window));
+ GRPC_CHTTP2_FLOW_DEBIT_STREAM("write", transport_writing, stream_writing,
+ announce_window, announce);
+ stream_writing->announce_window = 0;
+ }
+ /* fetch any body bytes */
+ while (!stream_writing->fetching && stream_writing->send_message &&
+ stream_writing->flow_controlled_buffer.length < max_outgoing &&
+ stream_writing->stream_fetched <
+ stream_writing->send_message->length) {
+ if (grpc_byte_stream_next(exec_ctx, stream_writing->send_message,
+ &stream_writing->fetching_slice, max_outgoing,
+ &stream_writing->finished_fetch)) {
+ stream_writing->stream_fetched +=
+ GPR_SLICE_LENGTH(stream_writing->fetching_slice);
+ if (stream_writing->stream_fetched ==
+ stream_writing->send_message->length) {
+ stream_writing->send_message = NULL;
+ }
+ gpr_slice_buffer_add(&stream_writing->flow_controlled_buffer,
+ stream_writing->fetching_slice);
+ } else {
+ stream_writing->fetching = 1;
+ }
+ }
+ /* send any body bytes */
+ if (stream_writing->flow_controlled_buffer.length > 0) {
+ if (max_outgoing > 0) {
+ uint32_t send_bytes = (uint32_t)GPR_MIN(
+ max_outgoing, stream_writing->flow_controlled_buffer.length);
+ int is_last_data_frame =
+ stream_writing->send_message == NULL &&
+ send_bytes == stream_writing->flow_controlled_buffer.length;
+ int is_last_frame = is_last_data_frame &&
+ stream_writing->send_trailing_metadata != NULL &&
+ grpc_metadata_batch_is_empty(
+ stream_writing->send_trailing_metadata);
+ grpc_chttp2_encode_data(
+ stream_writing->id, &stream_writing->flow_controlled_buffer,
+ send_bytes, is_last_frame, &transport_writing->outbuf);
+ GRPC_CHTTP2_FLOW_DEBIT_STREAM("write", transport_writing,
+ stream_writing, outgoing_window,
+ send_bytes);
+ GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("write", transport_writing,
+ outgoing_window, send_bytes);
+ if (is_last_frame) {
+ stream_writing->send_trailing_metadata = NULL;
+ stream_writing->sent_trailing_metadata = 1;
+ }
+ if (is_last_data_frame) {
+ GPR_ASSERT(stream_writing->send_message == NULL);
+ stream_writing->sent_message = 1;
+ }
+ } else if (transport_writing->outgoing_window == 0) {
+ grpc_chttp2_list_add_writing_stalled_by_transport(transport_writing,
+ stream_writing);
+ grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
+ }
+ }
+ /* send trailing metadata if it's available and we're ready for it */
+ if (stream_writing->send_message == NULL &&
+ stream_writing->flow_controlled_buffer.length == 0 &&
+ stream_writing->send_trailing_metadata != NULL) {
+ if (grpc_metadata_batch_is_empty(
+ stream_writing->send_trailing_metadata)) {
+ grpc_chttp2_encode_data(stream_writing->id,
+ &stream_writing->flow_controlled_buffer, 0, 1,
+ &transport_writing->outbuf);
+ } else {
+ grpc_chttp2_encode_header(&transport_writing->hpack_compressor,
+ stream_writing->id,
+ stream_writing->send_trailing_metadata, 1,
+ &transport_writing->outbuf);
+ }
+ if (!transport_writing->is_client && !stream_writing->read_closed) {
+ gpr_slice_buffer_add(&transport_writing->outbuf,
+ grpc_chttp2_rst_stream_create(
+ stream_writing->id, GRPC_CHTTP2_NO_ERROR));
+ }
+ stream_writing->send_trailing_metadata = NULL;
+ stream_writing->sent_trailing_metadata = 1;
+ }
+ /* if there's more to write, then loop, otherwise prepare to finish the
+ * write */
+ if ((stream_writing->flow_controlled_buffer.length > 0 ||
+ (stream_writing->send_message && !stream_writing->fetching)) &&
+ stream_writing->outgoing_window > 0) {
+ if (transport_writing->outgoing_window > 0) {
+ grpc_chttp2_list_add_writing_stream(transport_writing, stream_writing);
+ } else {
+ grpc_chttp2_list_add_writing_stalled_by_transport(transport_writing,
+ stream_writing);
+ grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
+ }
+ } else {
+ grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
}
- grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
}
-}
-static void finish_write_cb(void *tw, grpc_endpoint_cb_status write_status) {
- grpc_chttp2_transport_writing *transport_writing = tw;
- grpc_chttp2_terminate_writing(transport_writing,
- write_status == GRPC_ENDPOINT_CB_OK);
+ GPR_TIMER_END("finalize_outbuf", 0);
}
void grpc_chttp2_cleanup_writing(
- grpc_chttp2_transport_global *transport_global,
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_writing *transport_writing) {
grpc_chttp2_stream_writing *stream_writing;
grpc_chttp2_stream_global *stream_global;
while (grpc_chttp2_list_pop_written_stream(
transport_global, transport_writing, &stream_global, &stream_writing)) {
- if (stream_writing->send_closed != GRPC_DONT_SEND_CLOSED) {
- stream_global->write_state = GRPC_WRITE_STATE_SENT_CLOSE;
- if (!transport_global->is_client) {
- stream_global->read_closed = 1;
- }
- grpc_chttp2_list_add_read_write_state_changed(transport_global,
- stream_global);
+ if (stream_writing->sent_initial_metadata) {
+ grpc_chttp2_complete_closure_step(
+ exec_ctx, &stream_global->send_initial_metadata_finished, 1);
+ }
+ if (stream_writing->sent_message) {
+ GPR_ASSERT(stream_writing->send_message == NULL);
+ grpc_chttp2_complete_closure_step(
+ exec_ctx, &stream_global->send_message_finished, 1);
+ stream_writing->sent_message = 0;
+ }
+ if (stream_writing->sent_trailing_metadata) {
+ grpc_chttp2_complete_closure_step(
+ exec_ctx, &stream_global->send_trailing_metadata_finished, 1);
+ }
+ if (stream_writing->sent_trailing_metadata) {
+ grpc_chttp2_mark_stream_closed(exec_ctx, transport_global, stream_global,
+ !transport_global->is_client, 1);
}
+ GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "chttp2_writing");
}
- transport_writing->outbuf.count = 0;
- transport_writing->outbuf.length = 0;
+ gpr_slice_buffer_reset_and_unref(&transport_writing->outbuf);
}
diff --git a/src/core/transport/chttp2_transport.c b/src/core/transport/chttp2_transport.c
index 4dca5ddaa6..a7844ea8e5 100644
--- a/src/core/transport/chttp2_transport.c
+++ b/src/core/transport/chttp2_transport.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -37,18 +37,20 @@
#include <stdio.h>
#include <string.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/slice_buffer.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/useful.h>
+
#include "src/core/profiling/timers.h"
#include "src/core/support/string.h"
#include "src/core/transport/chttp2/http2_errors.h"
+#include "src/core/transport/chttp2/internal.h"
#include "src/core/transport/chttp2/status_conversion.h"
#include "src/core/transport/chttp2/timeout_encoding.h"
-#include "src/core/transport/chttp2/internal.h"
+#include "src/core/transport/static_metadata.h"
#include "src/core/transport/transport_impl.h"
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/slice_buffer.h>
-#include <grpc/support/string_util.h>
-#include <grpc/support/useful.h>
#define DEFAULT_WINDOW 65535
#define DEFAULT_CONNECTION_WINDOW_TARGET (1024 * 1024)
@@ -74,59 +76,85 @@ int grpc_flowctl_trace = 0;
#define STREAM_FROM_GLOBAL(sg) \
((grpc_chttp2_stream *)((char *)(sg)-offsetof(grpc_chttp2_stream, global)))
-static const grpc_transport_vtable vtable;
+#define STREAM_FROM_PARSING(sg) \
+ ((grpc_chttp2_stream *)((char *)(sg)-offsetof(grpc_chttp2_stream, parsing)))
-#if 0
-static void lock(grpc_chttp2_transport *t);
-static void unlock(grpc_chttp2_transport *t);
-#endif
+static const grpc_transport_vtable vtable;
static void unlock_check_channel_callbacks(grpc_chttp2_transport *t);
static void unlock_check_read_write_state(grpc_chttp2_transport *t);
/* forward declarations of various callbacks that we'll build closures around */
-static void writing_action(void *t, int iomgr_success_ignored);
-static void reading_action(void *t, int iomgr_success_ignored);
-static void parsing_action(void *t, int iomgr_success_ignored);
-static void notify_closed(void *t, int iomgr_success_ignored);
+static void writing_action(grpc_exec_ctx *exec_ctx, void *t,
+ bool iomgr_success_ignored);
+static void reading_action(grpc_exec_ctx *exec_ctx, void *t,
+ bool iomgr_success_ignored);
+static void parsing_action(grpc_exec_ctx *exec_ctx, void *t,
+ bool iomgr_success_ignored);
/** Set a transport level setting, and push it to our peer */
static void push_setting(grpc_chttp2_transport *t, grpc_chttp2_setting_id id,
- gpr_uint32 value);
+ uint32_t value);
/** Endpoint callback to process incoming data */
-static void recv_data(void *tp, gpr_slice *slices, size_t nslices,
- grpc_endpoint_cb_status error);
+static void recv_data(grpc_exec_ctx *exec_ctx, void *tp, bool success);
/** Start disconnection chain */
-static void drop_connection(grpc_chttp2_transport *t);
+static void drop_connection(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t);
/** Perform a transport_op */
-static void perform_op_locked(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s,
- void *transport_op);
+static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s, void *transport_op);
/** Cancel a stream: coming from the transport API */
-static void cancel_from_api(grpc_chttp2_transport_global *transport_global,
+static void cancel_from_api(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global,
grpc_status_code status);
+static void close_from_api(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_stream_global *stream_global,
+ grpc_status_code status,
+ gpr_slice *optional_message);
+
/** Add endpoint from this transport to pollset */
-static void add_to_pollset_locked(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s_ignored,
- void *pollset);
+static void add_to_pollset_locked(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s_ignored, void *pollset);
+static void add_to_pollset_set_locked(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s_ignored,
+ void *pollset_set);
/** Start new streams that have been created if we can */
static void maybe_start_some_streams(
- grpc_chttp2_transport_global *transport_global);
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global);
static void finish_global_actions(grpc_chttp2_transport *t);
-/*
+static void connectivity_state_set(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
+ grpc_connectivity_state state, const char *reason);
+
+static void check_read_ops(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_global *transport_global);
+
+static void incoming_byte_stream_update_flow_control(
+ grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_stream_global *stream_global, size_t max_size_hint,
+ size_t have_already);
+
+static void fail_pending_writes(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_stream_global *stream_global);
+
+/*******************************************************************************
* CONSTRUCTION/DESTRUCTION/REFCOUNTING
*/
-static void destruct_transport(grpc_chttp2_transport *t) {
+static void destruct_transport(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
size_t i;
gpr_mu_lock(&t->executor.mu);
@@ -139,11 +167,10 @@ static void destruct_transport(grpc_chttp2_transport *t) {
grpc_chttp2_hpack_compressor_destroy(&t->writing.hpack_compressor);
gpr_slice_buffer_destroy(&t->parsing.qbuf);
+ gpr_slice_buffer_destroy(&t->read_buffer);
grpc_chttp2_hpack_parser_destroy(&t->parsing.hpack_parser);
grpc_chttp2_goaway_parser_destroy(&t->parsing.goaway_parser);
- grpc_mdstr_unref(t->parsing.str_grpc_timeout);
-
for (i = 0; i < STREAM_LIST_COUNT; i++) {
GPR_ASSERT(t->lists[i].head == NULL);
GPR_ASSERT(t->lists[i].tail == NULL);
@@ -154,6 +181,7 @@ static void destruct_transport(grpc_chttp2_transport *t) {
grpc_chttp2_stream_map_destroy(&t->parsing_stream_map);
grpc_chttp2_stream_map_destroy(&t->new_stream_map);
+ grpc_connectivity_state_destroy(exec_ctx, &t->channel_callback.state_tracker);
gpr_mu_unlock(&t->executor.mu);
gpr_mu_destroy(&t->executor.mu);
@@ -162,26 +190,25 @@ static void destruct_transport(grpc_chttp2_transport *t) {
and maybe they hold resources that need to be freed */
while (t->global.pings.next != &t->global.pings) {
grpc_chttp2_outstanding_ping *ping = t->global.pings.next;
- grpc_iomgr_add_delayed_callback(ping->on_recv, 0);
+ grpc_exec_ctx_enqueue(exec_ctx, ping->on_recv, false, NULL);
ping->next->prev = ping->prev;
ping->prev->next = ping->next;
gpr_free(ping);
}
- grpc_mdctx_unref(t->metadata_context);
-
+ gpr_free(t->peer_string);
gpr_free(t);
}
#ifdef REFCOUNTING_DEBUG
#define REF_TRANSPORT(t, r) ref_transport(t, r, __FILE__, __LINE__)
-#define UNREF_TRANSPORT(t, r) unref_transport(t, r, __FILE__, __LINE__)
-static void unref_transport(grpc_chttp2_transport *t, const char *reason,
- const char *file, int line) {
+#define UNREF_TRANSPORT(cl, t, r) unref_transport(cl, t, r, __FILE__, __LINE__)
+static void unref_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ const char *reason, const char *file, int line) {
gpr_log(GPR_DEBUG, "chttp2:unref:%p %d->%d %s [%s:%d]", t, t->refs.count,
t->refs.count - 1, reason, file, line);
if (!gpr_unref(&t->refs)) return;
- destruct_transport(t);
+ destruct_transport(exec_ctx, t);
}
static void ref_transport(grpc_chttp2_transport *t, const char *reason,
@@ -192,23 +219,20 @@ static void ref_transport(grpc_chttp2_transport *t, const char *reason,
}
#else
#define REF_TRANSPORT(t, r) ref_transport(t)
-#define UNREF_TRANSPORT(t, r) unref_transport(t)
-static void unref_transport(grpc_chttp2_transport *t) {
+#define UNREF_TRANSPORT(cl, t, r) unref_transport(cl, t)
+static void unref_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) {
if (!gpr_unref(&t->refs)) return;
- destruct_transport(t);
+ destruct_transport(exec_ctx, t);
}
static void ref_transport(grpc_chttp2_transport *t) { gpr_ref(&t->refs); }
#endif
-static void init_transport(grpc_chttp2_transport *t,
- grpc_transport_setup_callback setup, void *arg,
+static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
const grpc_channel_args *channel_args,
- grpc_endpoint *ep, gpr_slice *slices, size_t nslices,
- grpc_mdctx *mdctx, int is_client) {
+ grpc_endpoint *ep, uint8_t is_client) {
size_t i;
int j;
- grpc_transport_setup_result sr;
GPR_ASSERT(strlen(GRPC_CHTTP2_CLIENT_CONNECT_STRING) ==
GRPC_CHTTP2_CLIENT_CONNECT_STRLEN);
@@ -219,38 +243,46 @@ static void init_transport(grpc_chttp2_transport *t,
t->ep = ep;
/* one ref is for destroy, the other for when ep becomes NULL */
gpr_ref_init(&t->refs, 2);
+ /* ref is dropped at transport close() */
+ gpr_ref_init(&t->shutdown_ep_refs, 1);
gpr_mu_init(&t->executor.mu);
grpc_mdctx_ref(mdctx);
+ t->peer_string = grpc_endpoint_get_peer(ep);
t->metadata_context = mdctx;
t->endpoint_reading = 1;
- t->global.error_state = GRPC_CHTTP2_ERROR_STATE_NONE;
t->global.next_stream_id = is_client ? 1 : 2;
t->global.is_client = is_client;
- t->global.outgoing_window = DEFAULT_WINDOW;
- t->global.incoming_window = DEFAULT_WINDOW;
+ t->writing.outgoing_window = DEFAULT_WINDOW;
+ t->parsing.incoming_window = DEFAULT_WINDOW;
+ t->global.stream_lookahead = DEFAULT_WINDOW;
t->global.connection_window_target = DEFAULT_CONNECTION_WINDOW_TARGET;
t->global.ping_counter = 1;
t->global.pings.next = t->global.pings.prev = &t->global.pings;
t->parsing.is_client = is_client;
- t->parsing.str_grpc_timeout =
- grpc_mdstr_from_string(t->metadata_context, "grpc-timeout");
t->parsing.deframe_state =
is_client ? GRPC_DTS_FH_0 : GRPC_DTS_CLIENT_PREFIX_0;
t->writing.is_client = is_client;
+ grpc_connectivity_state_init(
+ &t->channel_callback.state_tracker, GRPC_CHANNEL_READY,
+ is_client ? "client_transport" : "server_transport");
gpr_slice_buffer_init(&t->global.qbuf);
gpr_slice_buffer_init(&t->writing.outbuf);
- grpc_chttp2_hpack_compressor_init(&t->writing.hpack_compressor, mdctx);
- grpc_iomgr_closure_init(&t->writing_action, writing_action, t);
- grpc_iomgr_closure_init(&t->reading_action, reading_action, t);
- grpc_iomgr_closure_init(&t->parsing_action, parsing_action, t);
+ grpc_chttp2_hpack_compressor_init(&t->writing.hpack_compressor);
+ grpc_closure_init(&t->writing_action, writing_action, t);
+ grpc_closure_init(&t->reading_action, reading_action, t);
+ grpc_closure_init(&t->parsing_action, parsing_action, t);
gpr_slice_buffer_init(&t->parsing.qbuf);
grpc_chttp2_goaway_parser_init(&t->parsing.goaway_parser);
- grpc_chttp2_hpack_parser_init(&t->parsing.hpack_parser, t->metadata_context);
+ grpc_chttp2_hpack_parser_init(&t->parsing.hpack_parser);
+
+ grpc_closure_init(&t->writing.done_cb, grpc_chttp2_terminate_writing,
+ &t->writing);
+ grpc_closure_init(&t->recv_data, recv_data, t);
+ gpr_slice_buffer_init(&t->read_buffer);
- grpc_iomgr_closure_init(&t->channel_callback.notify_closed, notify_closed, t);
if (is_client) {
gpr_slice_buffer_add(
&t->global.qbuf,
@@ -297,7 +329,7 @@ static void init_transport(grpc_chttp2_transport *t,
GRPC_ARG_MAX_CONCURRENT_STREAMS);
} else {
push_setting(t, GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS,
- channel_args->args[i].value.integer);
+ (uint32_t)channel_args->args[i].value.integer);
}
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER)) {
@@ -311,33 +343,61 @@ static void init_transport(grpc_chttp2_transport *t,
t->global.next_stream_id & 1,
is_client ? "client" : "server");
} else {
- t->global.next_stream_id = channel_args->args[i].value.integer;
+ t->global.next_stream_id =
+ (uint32_t)channel_args->args[i].value.integer;
+ }
+ } else if (0 == strcmp(channel_args->args[i].key,
+ GRPC_ARG_HTTP2_STREAM_LOOKAHEAD_BYTES)) {
+ if (channel_args->args[i].type != GRPC_ARG_INTEGER) {
+ gpr_log(GPR_ERROR, "%s: must be an integer",
+ GRPC_ARG_HTTP2_STREAM_LOOKAHEAD_BYTES);
+ } else if (channel_args->args[i].value.integer <= 5) {
+ gpr_log(GPR_ERROR, "%s: must be at least 5",
+ GRPC_ARG_HTTP2_STREAM_LOOKAHEAD_BYTES);
+ } else {
+ t->global.stream_lookahead =
+ (uint32_t)channel_args->args[i].value.integer;
+ }
+ } else if (0 == strcmp(channel_args->args[i].key,
+ GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_DECODER)) {
+ if (channel_args->args[i].type != GRPC_ARG_INTEGER) {
+ gpr_log(GPR_ERROR, "%s: must be an integer",
+ GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_DECODER);
+ } else if (channel_args->args[i].value.integer < 0) {
+ gpr_log(GPR_ERROR, "%s: must be non-negative",
+ GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_DECODER);
+ } else {
+ push_setting(t, GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE,
+ (uint32_t)channel_args->args[i].value.integer);
+ }
+ } else if (0 == strcmp(channel_args->args[i].key,
+ GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_ENCODER)) {
+ if (channel_args->args[i].type != GRPC_ARG_INTEGER) {
+ gpr_log(GPR_ERROR, "%s: must be an integer",
+ GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_ENCODER);
+ } else if (channel_args->args[i].value.integer < 0) {
+ gpr_log(GPR_ERROR, "%s: must be non-negative",
+ GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_ENCODER);
+ } else {
+ grpc_chttp2_hpack_compressor_set_max_usable_size(
+ &t->writing.hpack_compressor,
+ (uint32_t)channel_args->args[i].value.integer);
}
}
}
}
+}
- gpr_mu_lock(&t->executor.mu);
- t->executor.channel_callback_active = 1;
- t->executor.global_active = 1;
- REF_TRANSPORT(t, "init"); /* matches unref at end of this function */
- gpr_mu_unlock(&t->executor.mu);
-
- sr = setup(arg, &t->base, t->metadata_context);
-
- t->channel_callback.cb = sr.callbacks;
- t->channel_callback.cb_user_data = sr.user_data;
- t->executor.channel_callback_active = 0;
-
- finish_global_actions(t);
-
- REF_TRANSPORT(t, "recv_data"); /* matches unref inside recv_data */
- recv_data(t, slices, nslices, GRPC_ENDPOINT_CB_OK);
-
- UNREF_TRANSPORT(t, "init");
+/** block grpc_endpoint_shutdown being called until a paired
+ allow_endpoint_shutdown is made */
+static void prevent_endpoint_shutdown(grpc_chttp2_transport *t) {
+ GPR_ASSERT(t->ep);
+ gpr_ref(&t->shutdown_ep_refs);
}
-static void destroy_transport_locked(grpc_chttp2_transport *t, grpc_chttp2_stream *s_ignored, void *arg_ignored) {
+static void destroy_transport_locked(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s_ignored,
+ void *arg_ignored) {
t->destroying = 1;
drop_connection(t);
}
@@ -348,15 +408,77 @@ static void destroy_transport(grpc_transport *gt) {
UNREF_TRANSPORT(t, "destroy");
}
-static void close_transport_locked(grpc_chttp2_transport *t, grpc_chttp2_stream *s_ignored, void *arg_ignored) {
+static void allow_endpoint_shutdown_locked(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
+ if (gpr_unref(&t->shutdown_ep_refs)) {
+ if (t->ep) {
+ grpc_endpoint_shutdown(exec_ctx, t->ep);
+ }
+ }
+}
+
+static void allow_endpoint_shutdown_unlocked(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
+ if (gpr_unref(&t->shutdown_ep_refs)) {
+ gpr_mu_lock(&t->mu);
+ if (t->ep) {
+ grpc_endpoint_shutdown(exec_ctx, t->ep);
+ }
+ gpr_mu_unlock(&t->mu);
+ }
+}
+
+static void destroy_endpoint(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
+ grpc_endpoint_destroy(exec_ctx, t->ep);
+ t->ep = NULL;
+ /* safe because we'll still have the ref for write */
+ UNREF_TRANSPORT(exec_ctx, t, "disconnect");
+}
+
+static void close_transport_locked(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s_ignored,
+ void *arg_ignored) {
if (!t->closed) {
t->closed = 1;
+ connectivity_state_set(exec_ctx, &t->global, GRPC_CHANNEL_FATAL_FAILURE,
+ "close_transport");
if (t->ep) {
- grpc_endpoint_shutdown(t->ep);
+ allow_endpoint_shutdown_locked(exec_ctx, t);
+ }
+
+ /* flush writable stream list to avoid dangling references */
+ grpc_chttp2_stream_global *stream_global;
+ grpc_chttp2_stream_writing *stream_writing;
+ while (grpc_chttp2_list_pop_writable_stream(
+ &t->global, &t->writing, &stream_global, &stream_writing)) {
+ GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "chttp2_writing");
}
}
}
+#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+void grpc_chttp2_stream_ref(grpc_chttp2_stream_global *stream_global,
+ const char *reason) {
+ grpc_stream_ref(STREAM_FROM_GLOBAL(stream_global)->refcount, reason);
+}
+void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_stream_global *stream_global,
+ const char *reason) {
+ grpc_stream_unref(exec_ctx, STREAM_FROM_GLOBAL(stream_global)->refcount,
+ reason);
+}
+#else
+void grpc_chttp2_stream_ref(grpc_chttp2_stream_global *stream_global) {
+ grpc_stream_ref(STREAM_FROM_GLOBAL(stream_global)->refcount);
+}
+void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_stream_global *stream_global) {
+ grpc_stream_unref(exec_ctx, STREAM_FROM_GLOBAL(stream_global)->refcount);
+}
+#endif
+
static void close_transport(grpc_transport *gt) {
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
grpc_chttp2_run_with_global_lock(t, NULL, close_transport_locked, NULL, 0);
@@ -367,7 +489,8 @@ typedef struct {
gpr_slice debug_data;
} goaway_arg;
-static void goaway_locked(grpc_chttp2_transport *t, grpc_chttp2_stream *s_ignored, void *a) {
+static void goaway_locked(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s_ignored, void *a) {
goaway_arg *arg = a;
grpc_chttp2_goaway_append(t->global.last_incoming_stream_id,
grpc_chttp2_grpc_status_to_http2_error(arg->status),
@@ -383,32 +506,42 @@ static void goaway(grpc_transport *gt, grpc_status_code status,
grpc_chttp2_run_with_global_lock(t, NULL, goaway_locked, &arg, sizeof(arg));
}
-static void finish_init_stream_locked(grpc_chttp2_transport *t, grpc_chttp2_stream *s, void *arg_ignored) {
+static void finish_init_stream_locked(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ void *arg_ignored) {
grpc_chttp2_register_stream(t, s);
}
-static int init_stream(grpc_transport *gt, grpc_stream *gs,
- const void *server_data, grpc_transport_op *initial_op) {
+static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
+ grpc_stream *gs, grpc_stream_refcount *refcount,
+ const void *server_data) {
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs;
memset(s, 0, sizeof(*s));
- grpc_chttp2_incoming_metadata_buffer_init(&s->parsing.incoming_metadata);
- grpc_chttp2_incoming_metadata_buffer_init(&s->global.incoming_metadata);
- grpc_sopb_init(&s->writing.sopb);
- grpc_sopb_init(&s->global.incoming_sopb);
+ s->refcount = refcount;
+ GRPC_CHTTP2_STREAM_REF(&s->global, "chttp2");
+
+ grpc_chttp2_incoming_metadata_buffer_init(&s->parsing.metadata_buffer[0]);
+ grpc_chttp2_incoming_metadata_buffer_init(&s->parsing.metadata_buffer[1]);
+ grpc_chttp2_incoming_metadata_buffer_init(
+ &s->global.received_initial_metadata);
+ grpc_chttp2_incoming_metadata_buffer_init(
+ &s->global.received_trailing_metadata);
grpc_chttp2_data_parser_init(&s->parsing.data_parser);
+ gpr_slice_buffer_init(&s->writing.flow_controlled_buffer);
REF_TRANSPORT(t, "stream");
if (server_data) {
GPR_ASSERT(t->executor.parsing_active);
- s->global.id = (gpr_uint32)(gpr_uintptr)server_data;
+ s->global.id = (uint32_t)(uintptr_t)server_data;
+ s->parsing.id = s->global.id;
s->global.outgoing_window =
t->global.settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
- s->parsing.incoming_window = s->global.incoming_window =
+ s->parsing.incoming_window = s->global.max_recv_bytes =
t->global.settings[GRPC_SENT_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
*t->accepting_stream = s;
@@ -417,36 +550,76 @@ static int init_stream(grpc_transport *gt, grpc_stream *gs,
}
grpc_chttp2_run_with_global_lock(t, s, finish_init_stream_locked, NULL, 0);
- if (initial_op) grpc_chttp2_run_with_global_lock(t, s, perform_op_locked, initial_op, sizeof(*initial_op));
return 0;
}
-static void destroy_stream(grpc_transport *gt, grpc_stream *gs) {
+static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
+ grpc_stream *gs) {
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs;
+#if 0
+ int i;
+ grpc_byte_stream *bs;
+
+ GPR_TIMER_BEGIN("destroy_stream", 0);
+
+ gpr_mu_lock(&t->mu);
+
+ GPR_ASSERT((s->global.write_closed && s->global.read_closed) ||
+ s->global.id == 0);
+ GPR_ASSERT(!s->global.in_stream_map);
+ if (grpc_chttp2_unregister_stream(t, s) && t->global.sent_goaway) {
+ close_transport_locked(exec_ctx, t);
+ }
+ if (!t->parsing_active && s->global.id) {
+ GPR_ASSERT(grpc_chttp2_stream_map_find(&t->parsing_stream_map,
+ s->global.id) == NULL);
+ }
+
+ grpc_chttp2_list_remove_unannounced_incoming_window_available(&t->global,
+ &s->global);
+ grpc_chttp2_list_remove_stalled_by_transport(&t->global, &s->global);
+#endif
+
int i;
for (i = 0; i < STREAM_LIST_COUNT; i++) {
- GPR_ASSERT(!s->included[i]);
+ if (s->included[i]) {
+ gpr_log(GPR_ERROR, "%s stream %d still included in list %d",
+ t->global.is_client ? "client" : "server", s->global.id, i);
+ abort();
+ }
}
- GPR_ASSERT(s->global.outgoing_sopb == NULL);
- GPR_ASSERT(s->global.publish_sopb == NULL);
- grpc_sopb_destroy(&s->writing.sopb);
- grpc_sopb_destroy(&s->global.incoming_sopb);
- grpc_chttp2_data_parser_destroy(&s->parsing.data_parser);
- grpc_chttp2_incoming_metadata_buffer_destroy(&s->parsing.incoming_metadata);
- grpc_chttp2_incoming_metadata_buffer_destroy(&s->global.incoming_metadata);
- grpc_chttp2_incoming_metadata_live_op_buffer_end(
- &s->global.outstanding_metadata);
+ while (
+ (bs = grpc_chttp2_incoming_frame_queue_pop(&s->global.incoming_frames))) {
+ grpc_byte_stream_destroy(exec_ctx, bs);
+ }
- UNREF_TRANSPORT(t, "stream");
+ GPR_ASSERT(s->global.send_initial_metadata_finished == NULL);
+ GPR_ASSERT(s->global.send_message_finished == NULL);
+ GPR_ASSERT(s->global.send_trailing_metadata_finished == NULL);
+ GPR_ASSERT(s->global.recv_initial_metadata_ready == NULL);
+ GPR_ASSERT(s->global.recv_message_ready == NULL);
+ GPR_ASSERT(s->global.recv_trailing_metadata_finished == NULL);
+ grpc_chttp2_data_parser_destroy(exec_ctx, &s->parsing.data_parser);
+ grpc_chttp2_incoming_metadata_buffer_destroy(&s->parsing.metadata_buffer[0]);
+ grpc_chttp2_incoming_metadata_buffer_destroy(&s->parsing.metadata_buffer[1]);
+ grpc_chttp2_incoming_metadata_buffer_destroy(
+ &s->global.received_initial_metadata);
+ grpc_chttp2_incoming_metadata_buffer_destroy(
+ &s->global.received_trailing_metadata);
+ gpr_slice_buffer_destroy(&s->writing.flow_controlled_buffer);
+
+ UNREF_TRANSPORT(exec_ctx, t, "stream");
+
+ GPR_TIMER_END("destroy_stream", 0);
}
grpc_chttp2_stream_parsing *grpc_chttp2_parsing_lookup_stream(
- grpc_chttp2_transport_parsing *transport_parsing, gpr_uint32 id) {
+ grpc_chttp2_transport_parsing *transport_parsing, uint32_t id) {
grpc_chttp2_transport *t = TRANSPORT_FROM_PARSING(transport_parsing);
grpc_chttp2_stream *s =
grpc_chttp2_stream_map_find(&t->parsing_stream_map, id);
@@ -454,18 +627,20 @@ grpc_chttp2_stream_parsing *grpc_chttp2_parsing_lookup_stream(
}
grpc_chttp2_stream_parsing *grpc_chttp2_parsing_accept_stream(
- grpc_chttp2_transport_parsing *transport_parsing, gpr_uint32 id) {
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
+ uint32_t id) {
grpc_chttp2_stream *accepting;
grpc_chttp2_transport *t = TRANSPORT_FROM_PARSING(transport_parsing);
GPR_ASSERT(t->accepting_stream == NULL);
t->accepting_stream = &accepting;
- t->channel_callback.cb->accept_stream(t->channel_callback.cb_user_data,
- &t->base, (void *)(gpr_uintptr)id);
+ t->channel_callback.accept_stream(exec_ctx,
+ t->channel_callback.accept_stream_user_data,
+ &t->base, (void *)(uintptr_t)id);
t->accepting_stream = NULL;
return &accepting->parsing;
}
-/*
+/*******************************************************************************
* LOCK MANAGEMENT
*/
@@ -476,12 +651,14 @@ static void finish_global_actions(grpc_chttp2_transport *t) {
for (;;) {
unlock_check_read_write_state(t);
- if (!t->executor.writing_active && t->global.error_state == GRPC_CHTTP2_ERROR_STATE_NONE &&
+ if (!t->executor.writing_active && !t->closed &&
grpc_chttp2_unlocking_check_writes(&t->global, &t->writing)) {
t->executor.writing_active = 1;
REF_TRANSPORT(t, "writing");
+ prevent_endpoint_shutdown(t);
grpc_chttp2_schedule_closure(&t->global, &t->writing_action, 1);
}
+ check_read_ops(exec_ctx, &t->global);
unlock_check_channel_callbacks(t);
run_closures = t->global.pending_closures;
@@ -517,9 +694,10 @@ static void finish_global_actions(grpc_chttp2_transport *t) {
}
}
-void grpc_chttp2_run_with_global_lock(grpc_chttp2_transport *t, grpc_chttp2_stream *optional_stream,
- void (*action)(grpc_chttp2_transport *t, grpc_chttp2_stream *s, void *arg),
- void *arg, size_t sizeof_arg) {
+void grpc_chttp2_run_with_global_lock(
+ grpc_chttp2_transport *t, grpc_chttp2_stream *optional_stream,
+ void (*action)(grpc_chttp2_transport *t, grpc_chttp2_stream *s, void *arg),
+ void *arg, size_t sizeof_arg) {
grpc_chttp2_executor_action_header *hdr;
REF_TRANSPORT(t, "run_global");
@@ -562,15 +740,23 @@ void grpc_chttp2_run_with_global_lock(grpc_chttp2_transport *t, grpc_chttp2_stre
UNREF_TRANSPORT(t, "run_global");
}
-/*
+/*******************************************************************************
* OUTPUT PROCESSING
*/
+void grpc_chttp2_become_writable(grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_stream_global *stream_global) {
+ if (!TRANSPORT_FROM_GLOBAL(transport_global)->closed &&
+ grpc_chttp2_list_add_writable_stream(transport_global, stream_global)) {
+ GRPC_CHTTP2_STREAM_REF(stream_global, "chttp2_writing");
+ }
+}
+
static void push_setting(grpc_chttp2_transport *t, grpc_chttp2_setting_id id,
- gpr_uint32 value) {
+ uint32_t value) {
const grpc_chttp2_setting_parameters *sp =
&grpc_chttp2_settings_parameters[id];
- gpr_uint32 use_value = GPR_CLAMP(value, sp->min_value, sp->max_value);
+ uint32_t use_value = GPR_CLAMP(value, sp->min_value, sp->max_value);
if (use_value != value) {
gpr_log(GPR_INFO, "Requested parameter %s clamped from %d to %d", sp->name,
value, use_value);
@@ -581,24 +767,31 @@ static void push_setting(grpc_chttp2_transport *t, grpc_chttp2_setting_id id,
}
}
-static void terminate_writing_with_lock(grpc_chttp2_transport *t, grpc_chttp2_stream *s_ignored, void *a) {
- int success = *(int*)a;
+static void terminate_writing_with_lock(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s_ignored,
+ void *a) {
+ int success = *(int *)a;
+
+ allow_endpoint_shutdown_locked(exec_ctx, t);
if (!success) {
- drop_connection(t);
+ drop_connection(exec_ctx, t);
}
- /* cleanup writing related jazz */
- grpc_chttp2_cleanup_writing(&t->global, &t->writing);
+ grpc_chttp2_cleanup_writing(exec_ctx, &t->global, &t->writing);
- /* leave the writing flag up on shutdown to prevent further writes in unlock()
+ while (grpc_chttp2_list_pop_closed_waiting_for_writing(&t->global,
+ &stream_global)) {
+ fail_pending_writes(exec_ctx, stream_global);
+ GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "finish_writes");
+ }
+
+ /* leave the writing flag up on shutdown to prevent further writes in
+ unlock()
from starting */
t->executor.writing_active = 0;
if (t->ep && !t->endpoint_reading) {
- grpc_endpoint_destroy(t->ep);
- t->ep = NULL;
- UNREF_TRANSPORT(
- t, "disconnect"); /* safe because we'll still have the ref for write */
+ destroy_endpoint(exec_ctx, t);
}
UNREF_TRANSPORT(t, "writing");
@@ -607,31 +800,34 @@ static void terminate_writing_with_lock(grpc_chttp2_transport *t, grpc_chttp2_st
void grpc_chttp2_terminate_writing(
grpc_chttp2_transport_writing *transport_writing, int success) {
grpc_chttp2_transport *t = TRANSPORT_FROM_WRITING(transport_writing);
- grpc_chttp2_run_with_global_lock(t, NULL, terminate_writing_with_lock, &success, sizeof(success));
+ grpc_chttp2_run_with_global_lock(t, NULL, terminate_writing_with_lock,
+ &success, sizeof(success));
}
-static void writing_action(void *gt, int iomgr_success_ignored) {
+static void writing_action(grpc_exec_ctx *exec_ctx, void *gt,
+ bool iomgr_success_ignored) {
grpc_chttp2_transport *t = gt;
- grpc_chttp2_perform_writes(&t->writing, t->ep);
+ GPR_TIMER_BEGIN("writing_action", 0);
+ grpc_chttp2_perform_writes(exec_ctx, &t->writing, t->ep);
+ GPR_TIMER_END("writing_action", 0);
}
void grpc_chttp2_add_incoming_goaway(
- grpc_chttp2_transport_global *transport_global, gpr_uint32 goaway_error,
- gpr_slice goaway_text) {
- char *msg = gpr_hexdump((char*)GPR_SLICE_START_PTR(goaway_text), GPR_SLICE_LENGTH(goaway_text), GPR_HEXDUMP_PLAINTEXT);
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
+ uint32_t goaway_error, gpr_slice goaway_text) {
+ char *msg = gpr_dump_slice(goaway_text, GPR_DUMP_HEX | GPR_DUMP_ASCII);
+ gpr_log(GPR_DEBUG, "got goaway [%d]: %s", goaway_error, msg);
gpr_free(msg);
- if (transport_global->goaway_state == GRPC_CHTTP2_ERROR_STATE_NONE) {
- transport_global->goaway_state = GRPC_CHTTP2_ERROR_STATE_SEEN;
- transport_global->goaway_text = goaway_text;
- transport_global->goaway_error = goaway_error;
- } else {
- gpr_slice_unref(goaway_text);
- }
+ gpr_slice_unref(goaway_text);
+ transport_global->seen_goaway = 1;
+ connectivity_state_set(exec_ctx, transport_global, GRPC_CHANNEL_FATAL_FAILURE,
+ "got_goaway");
}
static void maybe_start_some_streams(
- grpc_chttp2_transport_global *transport_global) {
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global) {
grpc_chttp2_stream_global *stream_global;
+ uint32_t stream_incoming_window;
/* start streams where we have free grpc_chttp2_stream ids and free
* concurrency */
while (transport_global->next_stream_id <= MAX_CLIENT_STREAM_ID &&
@@ -641,115 +837,214 @@ static void maybe_start_some_streams(
[GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS] &&
grpc_chttp2_list_pop_waiting_for_concurrency(transport_global,
&stream_global)) {
+ /* safe since we can't (legally) be parsing this stream yet */
+ grpc_chttp2_stream_parsing *stream_parsing =
+ &STREAM_FROM_GLOBAL(stream_global)->parsing;
GRPC_CHTTP2_IF_TRACING(gpr_log(
GPR_DEBUG, "HTTP:%s: Allocating new grpc_chttp2_stream %p to id %d",
transport_global->is_client ? "CLI" : "SVR", stream_global,
transport_global->next_stream_id));
GPR_ASSERT(stream_global->id == 0);
- stream_global->id = transport_global->next_stream_id;
+ stream_global->id = stream_parsing->id = transport_global->next_stream_id;
transport_global->next_stream_id += 2;
if (transport_global->next_stream_id >= MAX_CLIENT_STREAM_ID) {
- grpc_chttp2_add_incoming_goaway(
- transport_global, GRPC_CHTTP2_NO_ERROR,
- gpr_slice_from_copied_string("Exceeded sequence number limit"));
+ connectivity_state_set(exec_ctx, transport_global,
+ GRPC_CHANNEL_TRANSIENT_FAILURE,
+ "no_more_stream_ids");
}
stream_global->outgoing_window =
transport_global->settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
- stream_global->incoming_window =
+ stream_parsing->incoming_window = stream_incoming_window =
transport_global->settings[GRPC_SENT_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
+ stream_global->max_recv_bytes =
+ GPR_MAX(stream_incoming_window, stream_global->max_recv_bytes);
grpc_chttp2_stream_map_add(
&TRANSPORT_FROM_GLOBAL(transport_global)->new_stream_map,
stream_global->id, STREAM_FROM_GLOBAL(stream_global));
stream_global->in_stream_map = 1;
transport_global->concurrent_stream_count++;
- grpc_chttp2_list_add_incoming_window_updated(transport_global,
- stream_global);
- grpc_chttp2_list_add_writable_stream(transport_global, stream_global);
+ grpc_chttp2_become_writable(transport_global, stream_global);
}
/* cancel out streams that will never be started */
while (transport_global->next_stream_id >= MAX_CLIENT_STREAM_ID &&
grpc_chttp2_list_pop_waiting_for_concurrency(transport_global,
&stream_global)) {
- cancel_from_api(transport_global, stream_global, GRPC_STATUS_UNAVAILABLE);
+ cancel_from_api(exec_ctx, transport_global, stream_global,
+ GRPC_STATUS_UNAVAILABLE);
}
}
-static void perform_op_locked(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s,
- void *transport_op) {
- grpc_chttp2_transport_global *transport_global = &t->global;
- grpc_chttp2_stream_global *stream_global = &s->global;
- grpc_transport_op *op = transport_op;
+static grpc_closure *add_closure_barrier(grpc_closure *closure) {
+ closure->final_data += 2;
+ return closure;
+}
+
+void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
+ grpc_closure **pclosure, int success) {
+ grpc_closure *closure = *pclosure;
+ if (closure == NULL) {
+ return;
+ }
+ closure->final_data -= 2;
+ if (!success) {
+ closure->final_data |= 1;
+ }
+ if (closure->final_data < 2) {
+ grpc_exec_ctx_enqueue(exec_ctx, closure, closure->final_data == 0, NULL);
+ }
+ *pclosure = NULL;
+}
+
+static int contains_non_ok_status(
+ grpc_chttp2_transport_global *transport_global,
+ grpc_metadata_batch *batch) {
+ grpc_linked_mdelem *l;
+ for (l = batch->list.head; l; l = l->next) {
+ if (l->md->key == GRPC_MDSTR_GRPC_STATUS &&
+ l->md != GRPC_MDELEM_GRPC_STATUS_0) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static void do_nothing(grpc_exec_ctx *exec_ctx, void *arg, bool success) {}
+
+static void perform_stream_op_locked(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_stream_global *stream_global, grpc_transport_stream_op *op) {
+ grpc_closure *on_complete;
+
+ GPR_TIMER_BEGIN("perform_stream_op_locked", 0);
+
+ on_complete = op->on_complete;
+ if (on_complete == NULL) {
+ on_complete = grpc_closure_create(do_nothing, NULL);
+ }
+ /* use final_data as a barrier until enqueue time; the inital counter is
+ dropped at the end of this function */
+ on_complete->final_data = 2;
if (op->cancel_with_status != GRPC_STATUS_OK) {
- cancel_from_api(transport_global, stream_global, op->cancel_with_status);
+ cancel_from_api(exec_ctx, transport_global, stream_global,
+ op->cancel_with_status);
}
- if (op->send_ops) {
- GPR_ASSERT(stream_global->outgoing_sopb == NULL);
- stream_global->send_done_closure = op->on_done_send;
- if (!stream_global->cancelled) {
- stream_global->outgoing_sopb = op->send_ops;
- if (op->is_last_send &&
- stream_global->write_state == GRPC_WRITE_STATE_OPEN) {
- stream_global->write_state = GRPC_WRITE_STATE_QUEUED_CLOSE;
- }
- if (stream_global->id == 0) {
- GRPC_CHTTP2_IF_TRACING(gpr_log(
- GPR_DEBUG,
- "HTTP:%s: New grpc_chttp2_stream %p waiting for concurrency",
- transport_global->is_client ? "CLI" : "SVR", stream_global));
+ if (op->close_with_status != GRPC_STATUS_OK) {
+ close_from_api(exec_ctx, transport_global, stream_global,
+ op->close_with_status, op->optional_close_message);
+ }
+
+ if (op->send_initial_metadata != NULL) {
+ GPR_ASSERT(stream_global->send_initial_metadata_finished == NULL);
+ stream_global->send_initial_metadata_finished =
+ add_closure_barrier(on_complete);
+ stream_global->send_initial_metadata = op->send_initial_metadata;
+ if (contains_non_ok_status(transport_global, op->send_initial_metadata)) {
+ stream_global->seen_error = 1;
+ grpc_chttp2_list_add_check_read_ops(transport_global, stream_global);
+ }
+ if (!stream_global->write_closed) {
+ if (transport_global->is_client) {
+ GPR_ASSERT(stream_global->id == 0);
grpc_chttp2_list_add_waiting_for_concurrency(transport_global,
stream_global);
- maybe_start_some_streams(transport_global);
- } else if (stream_global->outgoing_window > 0) {
- grpc_chttp2_list_add_writable_stream(transport_global, stream_global);
+ maybe_start_some_streams(exec_ctx, transport_global);
+ } else {
+ GPR_ASSERT(stream_global->id != 0);
+ grpc_chttp2_become_writable(transport_global, stream_global);
}
} else {
- grpc_sopb_reset(op->send_ops);
- grpc_chttp2_schedule_closure(transport_global,
- stream_global->send_done_closure, 0);
+ grpc_chttp2_complete_closure_step(
+ exec_ctx, &stream_global->send_initial_metadata_finished, 0);
+ }
+ }
+
+ if (op->send_message != NULL) {
+ GPR_ASSERT(stream_global->send_message_finished == NULL);
+ GPR_ASSERT(stream_global->send_message == NULL);
+ stream_global->send_message_finished = add_closure_barrier(on_complete);
+ if (stream_global->write_closed) {
+ grpc_chttp2_complete_closure_step(
+ exec_ctx, &stream_global->send_message_finished, 0);
+ } else {
+ stream_global->send_message = op->send_message;
+ if (stream_global->id != 0) {
+ grpc_chttp2_become_writable(transport_global, stream_global);
+ }
}
}
- if (op->recv_ops) {
- GPR_ASSERT(stream_global->publish_sopb == NULL);
- GPR_ASSERT(stream_global->published_state != GRPC_STREAM_CLOSED);
- stream_global->recv_done_closure = op->on_done_recv;
- stream_global->publish_sopb = op->recv_ops;
- stream_global->publish_sopb->nops = 0;
- stream_global->publish_state = op->recv_state;
- grpc_chttp2_incoming_metadata_live_op_buffer_end(
- &stream_global->outstanding_metadata);
- grpc_chttp2_list_add_read_write_state_changed(transport_global,
- stream_global);
- grpc_chttp2_list_add_writable_window_update_stream(transport_global,
- stream_global);
+ if (op->send_trailing_metadata != NULL) {
+ GPR_ASSERT(stream_global->send_trailing_metadata_finished == NULL);
+ stream_global->send_trailing_metadata_finished =
+ add_closure_barrier(on_complete);
+ stream_global->send_trailing_metadata = op->send_trailing_metadata;
+ if (contains_non_ok_status(transport_global, op->send_trailing_metadata)) {
+ stream_global->seen_error = 1;
+ grpc_chttp2_list_add_check_read_ops(transport_global, stream_global);
+ }
+ if (stream_global->write_closed) {
+ grpc_chttp2_complete_closure_step(
+ exec_ctx, &stream_global->send_trailing_metadata_finished,
+ grpc_metadata_batch_is_empty(op->send_trailing_metadata));
+ } else if (stream_global->id != 0) {
+ /* TODO(ctiller): check if there's flow control for any outstanding
+ bytes before going writable */
+ grpc_chttp2_become_writable(transport_global, stream_global);
+ }
}
- if (op->bind_pollset) {
- add_to_pollset_locked(TRANSPORT_FROM_GLOBAL(transport_global), NULL,
- op->bind_pollset);
+ if (op->recv_initial_metadata != NULL) {
+ GPR_ASSERT(stream_global->recv_initial_metadata_ready == NULL);
+ stream_global->recv_initial_metadata_ready =
+ op->recv_initial_metadata_ready;
+ stream_global->recv_initial_metadata = op->recv_initial_metadata;
+ grpc_chttp2_list_add_check_read_ops(transport_global, stream_global);
}
- if (op->on_consumed) {
- grpc_chttp2_schedule_closure(transport_global, op->on_consumed, 1);
+ if (op->recv_message != NULL) {
+ GPR_ASSERT(stream_global->recv_message_ready == NULL);
+ stream_global->recv_message_ready = op->recv_message_ready;
+ stream_global->recv_message = op->recv_message;
+ if (stream_global->id != 0 &&
+ (stream_global->incoming_frames.head == NULL ||
+ stream_global->incoming_frames.head->is_tail)) {
+ incoming_byte_stream_update_flow_control(
+ transport_global, stream_global, transport_global->stream_lookahead,
+ 0);
+ }
+ grpc_chttp2_list_add_check_read_ops(transport_global, stream_global);
+ }
+
+ if (op->recv_trailing_metadata != NULL) {
+ GPR_ASSERT(stream_global->recv_trailing_metadata_finished == NULL);
+ stream_global->recv_trailing_metadata_finished =
+ add_closure_barrier(on_complete);
+ stream_global->recv_trailing_metadata = op->recv_trailing_metadata;
+ grpc_chttp2_list_add_check_read_ops(transport_global, stream_global);
}
+
+ grpc_chttp2_complete_closure_step(exec_ctx, &on_complete, 1);
+
+ GPR_TIMER_END("perform_stream_op_locked", 0);
}
-static void perform_op(grpc_transport *gt, grpc_stream *gs,
- grpc_transport_op *op) {
+static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
+ grpc_stream *gs, grpc_transport_stream_op *op) {
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs;
- grpc_chttp2_run_with_global_lock(t, s, perform_op_locked, op, sizeof(*op));
+ grpc_chttp2_run_with_global_lock(t, s, perform_stream_op_locked, op,
+ sizeof(*op));
}
-static void send_ping_locked(grpc_chttp2_transport *t, grpc_chttp2_stream *s_ignored, void *a) {
+static void send_ping_locked(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s_ignored, void *a) {
grpc_chttp2_outstanding_ping *p = gpr_malloc(sizeof(*p));
p->next = &t->global.pings;
p->prev = p->next->prev;
@@ -762,28 +1057,163 @@ static void send_ping_locked(grpc_chttp2_transport *t, grpc_chttp2_stream *s_ign
p->id[5] = (t->global.ping_counter >> 16) & 0xff;
p->id[6] = (t->global.ping_counter >> 8) & 0xff;
p->id[7] = t->global.ping_counter & 0xff;
- p->on_recv = *(grpc_iomgr_closure**)a;
+ p->on_recv = *(grpc_iomgr_closure **)a;
gpr_slice_buffer_add(&t->global.qbuf, grpc_chttp2_ping_create(0, p->id));
}
static void send_ping(grpc_transport *gt, grpc_iomgr_closure *on_recv) {
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
- grpc_chttp2_run_with_global_lock(t, NULL, send_ping_locked, &on_recv, sizeof(on_recv));
+ grpc_chttp2_run_with_global_lock(t, NULL, send_ping_locked, &on_recv,
+ sizeof(on_recv));
}
-/*
+void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_parsing *transport_parsing,
+ const uint8_t *opaque_8bytes) {
+ grpc_chttp2_outstanding_ping *ping;
+ grpc_chttp2_transport *t = TRANSPORT_FROM_PARSING(transport_parsing);
+ grpc_chttp2_transport_global *transport_global = &t->global;
+ lock(t);
+ for (ping = transport_global->pings.next; ping != &transport_global->pings;
+ ping = ping->next) {
+ if (0 == memcmp(opaque_8bytes, ping->id, 8)) {
+ grpc_exec_ctx_enqueue(exec_ctx, ping->on_recv, true, NULL);
+ ping->next->prev = ping->prev;
+ ping->prev->next = ping->next;
+ gpr_free(ping);
+ break;
+ }
+ }
+ unlock(exec_ctx, t);
+}
+
+static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_transport_op *op) {
+ bool close_transport = false;
+
+ grpc_exec_ctx_enqueue(exec_ctx, op->on_consumed, true, NULL);
+
+ if (op->on_connectivity_state_change != NULL) {
+ grpc_connectivity_state_notify_on_state_change(
+ exec_ctx, &t->channel_callback.state_tracker, op->connectivity_state,
+ op->on_connectivity_state_change);
+ }
+
+ if (op->send_goaway) {
+ t->global.sent_goaway = 1;
+ grpc_chttp2_goaway_append(
+ t->global.last_incoming_stream_id,
+ (uint32_t)grpc_chttp2_grpc_status_to_http2_error(op->goaway_status),
+ gpr_slice_ref(*op->goaway_message), &t->global.qbuf);
+ close_transport = !grpc_chttp2_has_streams(t);
+ }
+
+ if (op->set_accept_stream) {
+ t->channel_callback.accept_stream = op->set_accept_stream_fn;
+ t->channel_callback.accept_stream_user_data =
+ op->set_accept_stream_user_data;
+ }
+
+ if (op->bind_pollset) {
+ add_to_pollset_locked(exec_ctx, t, op->bind_pollset);
+ }
+
+ if (op->bind_pollset_set) {
+ add_to_pollset_set_locked(exec_ctx, t, op->bind_pollset_set);
+ }
+
+ if (op->send_ping) {
+ send_ping_locked(t, op->send_ping);
+ }
+
+ if (op->disconnect) {
+ close_transport_locked(exec_ctx, t);
+ }
+
+ if (close_transport) {
+ close_transport_locked(exec_ctx, t);
+ }
+}
+
+static void perform_transport_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
+ grpc_transport_op *op) {
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
+
+ lock(t);
+
+ /* If there's a set_accept_stream ensure that we're not parsing
+ to avoid changing things out from underneath */
+ if (t->parsing_active && op->set_accept_stream) {
+ GPR_ASSERT(t->post_parsing_op == NULL);
+ t->post_parsing_op = gpr_malloc(sizeof(*op));
+ memcpy(t->post_parsing_op, op, sizeof(*op));
+ } else {
+ perform_transport_op_locked(exec_ctx, t, op);
+ }
+
+ unlock(exec_ctx, t);
+}
+
+/*******************************************************************************
* INPUT PROCESSING
*/
-static grpc_stream_state compute_state(gpr_uint8 write_closed,
- gpr_uint8 read_closed) {
- if (write_closed && read_closed) return GRPC_STREAM_CLOSED;
- if (write_closed) return GRPC_STREAM_SEND_CLOSED;
- if (read_closed) return GRPC_STREAM_RECV_CLOSED;
- return GRPC_STREAM_OPEN;
+static void check_read_ops(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_global *transport_global) {
+ grpc_chttp2_stream_global *stream_global;
+ grpc_byte_stream *bs;
+ while (
+ grpc_chttp2_list_pop_check_read_ops(transport_global, &stream_global)) {
+ if (stream_global->recv_initial_metadata_ready != NULL &&
+ stream_global->published_initial_metadata) {
+ grpc_chttp2_incoming_metadata_buffer_publish(
+ &stream_global->received_initial_metadata,
+ stream_global->recv_initial_metadata);
+ grpc_exec_ctx_enqueue(
+ exec_ctx, stream_global->recv_initial_metadata_ready, true, NULL);
+ stream_global->recv_initial_metadata_ready = NULL;
+ }
+ if (stream_global->recv_message_ready != NULL) {
+ while (stream_global->seen_error &&
+ (bs = grpc_chttp2_incoming_frame_queue_pop(
+ &stream_global->incoming_frames)) != NULL) {
+ grpc_byte_stream_destroy(exec_ctx, bs);
+ }
+ if (stream_global->incoming_frames.head != NULL) {
+ *stream_global->recv_message = grpc_chttp2_incoming_frame_queue_pop(
+ &stream_global->incoming_frames);
+ GPR_ASSERT(*stream_global->recv_message != NULL);
+ grpc_exec_ctx_enqueue(exec_ctx, stream_global->recv_message_ready, true,
+ NULL);
+ stream_global->recv_message_ready = NULL;
+ } else if (stream_global->published_trailing_metadata) {
+ *stream_global->recv_message = NULL;
+ grpc_exec_ctx_enqueue(exec_ctx, stream_global->recv_message_ready, true,
+ NULL);
+ stream_global->recv_message_ready = NULL;
+ }
+ }
+ if (stream_global->recv_trailing_metadata_finished != NULL &&
+ stream_global->read_closed && stream_global->write_closed) {
+ while (stream_global->seen_error &&
+ (bs = grpc_chttp2_incoming_frame_queue_pop(
+ &stream_global->incoming_frames)) != NULL) {
+ grpc_byte_stream_destroy(exec_ctx, bs);
+ }
+ if (stream_global->incoming_frames.head == NULL) {
+ grpc_chttp2_incoming_metadata_buffer_publish(
+ &stream_global->received_trailing_metadata,
+ stream_global->recv_trailing_metadata);
+ grpc_chttp2_complete_closure_step(
+ exec_ctx, &stream_global->recv_trailing_metadata_finished, 1);
+ }
+ }
+ }
}
-static void remove_stream(grpc_chttp2_transport *t, gpr_uint32 id) {
+static void remove_stream(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ uint32_t id) {
size_t new_stream_count;
grpc_chttp2_stream *s =
grpc_chttp2_stream_map_delete(&t->parsing_stream_map, id);
@@ -794,130 +1224,256 @@ static void remove_stream(grpc_chttp2_transport *t, gpr_uint32 id) {
s->global.in_stream_map = 0;
if (t->parsing.incoming_stream == &s->parsing) {
t->parsing.incoming_stream = NULL;
- grpc_chttp2_parsing_become_skip_parser(&t->parsing);
+ grpc_chttp2_parsing_become_skip_parser(exec_ctx, &t->parsing);
+ }
+ if (s->parsing.data_parser.parsing_frame != NULL) {
+ grpc_chttp2_incoming_byte_stream_finished(
+ exec_ctx, s->parsing.data_parser.parsing_frame, 0, 0);
+ s->parsing.data_parser.parsing_frame = NULL;
}
- new_stream_count =
- grpc_chttp2_stream_map_size(&t->parsing_stream_map) +
- grpc_chttp2_stream_map_size(&t->new_stream_map);
+ if (grpc_chttp2_unregister_stream(t, s) && t->global.sent_goaway) {
+ close_transport_locked(exec_ctx, t);
+ }
+ if (grpc_chttp2_list_remove_writable_stream(&t->global, &s->global)) {
+ GRPC_CHTTP2_STREAM_UNREF(exec_ctx, &s->global, "chttp2_writing");
+ }
+
+ new_stream_count = grpc_chttp2_stream_map_size(&t->parsing_stream_map) +
+ grpc_chttp2_stream_map_size(&t->new_stream_map);
+ GPR_ASSERT(new_stream_count <= UINT32_MAX);
if (new_stream_count != t->global.concurrent_stream_count) {
- t->global.concurrent_stream_count = new_stream_count;
- maybe_start_some_streams(&t->global);
+ t->global.concurrent_stream_count = (uint32_t)new_stream_count;
+ maybe_start_some_streams(exec_ctx, &t->global);
}
}
-static void unlock_check_read_write_state(grpc_chttp2_transport *t) {
- grpc_chttp2_transport_global *transport_global = &t->global;
- grpc_chttp2_stream_global *stream_global;
- grpc_stream_state state;
-
- if (!t->executor.parsing_active) {
- /* if a stream is in the stream map, and gets cancelled, we need to ensure
- we are not parsing before continuing the cancellation to keep things in
- a sane state */
- while (grpc_chttp2_list_pop_closed_waiting_for_parsing(transport_global,
- &stream_global)) {
- GPR_ASSERT(stream_global->in_stream_map);
- GPR_ASSERT(stream_global->write_state != GRPC_WRITE_STATE_OPEN);
- GPR_ASSERT(stream_global->read_closed);
- remove_stream(t, stream_global->id);
- grpc_chttp2_list_add_read_write_state_changed(transport_global,
- stream_global);
+static void cancel_from_api(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_stream_global *stream_global,
+ grpc_status_code status) {
+ if (stream_global->id != 0) {
+ gpr_slice_buffer_add(
+ &transport_global->qbuf,
+ grpc_chttp2_rst_stream_create(
+ stream_global->id,
+ (uint32_t)grpc_chttp2_grpc_status_to_http2_error(status)));
+ }
+ grpc_chttp2_fake_status(exec_ctx, transport_global, stream_global, status,
+ NULL);
+ grpc_chttp2_mark_stream_closed(exec_ctx, transport_global, stream_global, 1,
+ 1);
+}
+
+void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_stream_global *stream_global,
+ grpc_status_code status, gpr_slice *slice) {
+ if (status != GRPC_STATUS_OK) {
+ stream_global->seen_error = 1;
+ grpc_chttp2_list_add_check_read_ops(transport_global, stream_global);
+ }
+ /* stream_global->recv_trailing_metadata_finished gives us a
+ last chance replacement: we've received trailing metadata,
+ but something more important has become available to signal
+ to the upper layers - drop what we've got, and then publish
+ what we want - which is safe because we haven't told anyone
+ about the metadata yet */
+ if (!stream_global->published_trailing_metadata ||
+ stream_global->recv_trailing_metadata_finished != NULL) {
+ char status_string[GPR_LTOA_MIN_BUFSIZE];
+ gpr_ltoa(status, status_string);
+ grpc_chttp2_incoming_metadata_buffer_add(
+ &stream_global->received_trailing_metadata,
+ grpc_mdelem_from_metadata_strings(
+ GRPC_MDSTR_GRPC_STATUS, grpc_mdstr_from_string(status_string)));
+ if (slice) {
+ grpc_chttp2_incoming_metadata_buffer_add(
+ &stream_global->received_trailing_metadata,
+ grpc_mdelem_from_metadata_strings(
+ GRPC_MDSTR_GRPC_MESSAGE,
+ grpc_mdstr_from_slice(gpr_slice_ref(*slice))));
}
+ stream_global->published_trailing_metadata = 1;
+ grpc_chttp2_list_add_check_read_ops(transport_global, stream_global);
+ }
+ if (slice) {
+ gpr_slice_unref(*slice);
}
+}
- while (grpc_chttp2_list_pop_read_write_state_changed(transport_global,
- &stream_global)) {
- if (stream_global->cancelled) {
- stream_global->write_state = GRPC_WRITE_STATE_SENT_CLOSE;
- stream_global->read_closed = 1;
- if (!stream_global->published_cancelled) {
- char buffer[GPR_LTOA_MIN_BUFSIZE];
- gpr_ltoa(stream_global->cancelled_status, buffer);
- grpc_chttp2_incoming_metadata_buffer_add(&stream_global->incoming_metadata,
- grpc_mdelem_from_strings(t->metadata_context, "grpc-status", buffer));
- grpc_chttp2_incoming_metadata_buffer_place_metadata_batch_into(
- &stream_global->incoming_metadata,
- &stream_global->incoming_sopb);
- stream_global->published_cancelled = 1;
- }
+static void fail_pending_writes(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_stream_global *stream_global) {
+ grpc_chttp2_complete_closure_step(
+ exec_ctx, &stream_global->send_initial_metadata_finished, 0);
+ grpc_chttp2_complete_closure_step(
+ exec_ctx, &stream_global->send_trailing_metadata_finished, 0);
+ grpc_chttp2_complete_closure_step(exec_ctx,
+ &stream_global->send_message_finished, 0);
+}
+
+void grpc_chttp2_mark_stream_closed(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_stream_global *stream_global, int close_reads,
+ int close_writes) {
+ if (stream_global->read_closed && stream_global->write_closed) {
+ /* already closed */
+ return;
+ }
+ grpc_chttp2_list_add_check_read_ops(transport_global, stream_global);
+ if (close_reads && !stream_global->read_closed) {
+ stream_global->read_closed = 1;
+ stream_global->published_initial_metadata = 1;
+ stream_global->published_trailing_metadata = 1;
+ }
+ if (close_writes && !stream_global->write_closed) {
+ stream_global->write_closed = 1;
+ if (TRANSPORT_FROM_GLOBAL(transport_global)->writing_active) {
+ GRPC_CHTTP2_STREAM_REF(stream_global, "finish_writes");
+ grpc_chttp2_list_add_closed_waiting_for_writing(transport_global,
+ stream_global);
+ } else {
+ fail_pending_writes(exec_ctx, stream_global);
}
- if (stream_global->write_state == GRPC_WRITE_STATE_SENT_CLOSE &&
- stream_global->read_closed && stream_global->in_stream_map) {
- if (t->executor.parsing_active) {
- grpc_chttp2_list_add_closed_waiting_for_parsing(transport_global,
- stream_global);
- } else {
- remove_stream(t, stream_global->id);
+ }
+ if (stream_global->read_closed && stream_global->write_closed) {
+ if (stream_global->id != 0 &&
+ TRANSPORT_FROM_GLOBAL(transport_global)->parsing_active) {
+ grpc_chttp2_list_add_closed_waiting_for_parsing(transport_global,
+ stream_global);
+ } else {
+ if (stream_global->id != 0) {
+ remove_stream(exec_ctx, TRANSPORT_FROM_GLOBAL(transport_global),
+ stream_global->id);
}
+ GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "chttp2");
}
- if (!stream_global->publish_sopb) {
- continue;
- }
- /* FIXME(ctiller): we include in_stream_map in our computation of
- whether the stream is write-closed. This is completely bogus,
- but has the effect of delaying stream-closed until the stream
- is indeed evicted from the stream map, making it safe to delete.
- To fix this will require having an edge after stream-closed
- indicating that the stream is closed AND safe to delete. */
- state = compute_state(
- stream_global->write_state == GRPC_WRITE_STATE_SENT_CLOSE &&
- !stream_global->in_stream_map,
- stream_global->read_closed);
- if (stream_global->incoming_sopb.nops == 0 &&
- state == stream_global->published_state) {
- continue;
- }
- grpc_chttp2_incoming_metadata_buffer_postprocess_sopb_and_begin_live_op(
- &stream_global->incoming_metadata, &stream_global->incoming_sopb,
- &stream_global->outstanding_metadata);
- if (state == GRPC_STREAM_CLOSED) {
- GPR_ASSERT(!stream_global->in_stream_map);
- grpc_chttp2_unregister_stream(TRANSPORT_FROM_GLOBAL(transport_global), STREAM_FROM_GLOBAL(stream_global));
- grpc_chttp2_list_remove_incoming_window_updated(transport_global, stream_global);
- grpc_chttp2_list_remove_writable_window_update_stream(transport_global, stream_global);
- }
- grpc_sopb_swap(stream_global->publish_sopb, &stream_global->incoming_sopb);
- stream_global->published_state = *stream_global->publish_state = state;
- grpc_chttp2_schedule_closure(transport_global,
- stream_global->recv_done_closure, 1);
- stream_global->recv_done_closure = NULL;
- stream_global->publish_sopb = NULL;
- stream_global->publish_state = NULL;
}
}
-static void cancel_from_api(grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global,
- grpc_status_code status) {
- stream_global->cancelled = 1;
- stream_global->cancelled_status = status;
- if (stream_global->id != 0) {
+static void close_from_api(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_stream_global *stream_global,
+ grpc_status_code status,
+ gpr_slice *optional_message) {
+ gpr_slice hdr;
+ gpr_slice status_hdr;
+ gpr_slice message_pfx;
+ uint8_t *p;
+ uint32_t len = 0;
+
+ GPR_ASSERT(status >= 0 && (int)status < 100);
+
+ GPR_ASSERT(stream_global->id != 0);
+
+ /* Hand roll a header block.
+ This is unnecessarily ugly - at some point we should find a more elegant
+ solution.
+ It's complicated by the fact that our send machinery would be dead by the
+ time we got around to sending this, so instead we ignore HPACK compression
+ and just write the uncompressed bytes onto the wire. */
+ status_hdr = gpr_slice_malloc(15 + (status >= 10));
+ p = GPR_SLICE_START_PTR(status_hdr);
+ *p++ = 0x40; /* literal header */
+ *p++ = 11; /* len(grpc-status) */
+ *p++ = 'g';
+ *p++ = 'r';
+ *p++ = 'p';
+ *p++ = 'c';
+ *p++ = '-';
+ *p++ = 's';
+ *p++ = 't';
+ *p++ = 'a';
+ *p++ = 't';
+ *p++ = 'u';
+ *p++ = 's';
+ if (status < 10) {
+ *p++ = 1;
+ *p++ = (uint8_t)('0' + status);
+ } else {
+ *p++ = 2;
+ *p++ = (uint8_t)('0' + (status / 10));
+ *p++ = (uint8_t)('0' + (status % 10));
+ }
+ GPR_ASSERT(p == GPR_SLICE_END_PTR(status_hdr));
+ len += (uint32_t)GPR_SLICE_LENGTH(status_hdr);
+
+ if (optional_message) {
+ GPR_ASSERT(GPR_SLICE_LENGTH(*optional_message) < 127);
+ message_pfx = gpr_slice_malloc(15);
+ p = GPR_SLICE_START_PTR(message_pfx);
+ *p++ = 0x40;
+ *p++ = 12; /* len(grpc-message) */
+ *p++ = 'g';
+ *p++ = 'r';
+ *p++ = 'p';
+ *p++ = 'c';
+ *p++ = '-';
+ *p++ = 'm';
+ *p++ = 'e';
+ *p++ = 's';
+ *p++ = 's';
+ *p++ = 'a';
+ *p++ = 'g';
+ *p++ = 'e';
+ *p++ = (uint8_t)GPR_SLICE_LENGTH(*optional_message);
+ GPR_ASSERT(p == GPR_SLICE_END_PTR(message_pfx));
+ len += (uint32_t)GPR_SLICE_LENGTH(message_pfx);
+ len += (uint32_t)GPR_SLICE_LENGTH(*optional_message);
+ }
+
+ hdr = gpr_slice_malloc(9);
+ p = GPR_SLICE_START_PTR(hdr);
+ *p++ = (uint8_t)(len >> 16);
+ *p++ = (uint8_t)(len >> 8);
+ *p++ = (uint8_t)(len);
+ *p++ = GRPC_CHTTP2_FRAME_HEADER;
+ *p++ = GRPC_CHTTP2_DATA_FLAG_END_STREAM | GRPC_CHTTP2_DATA_FLAG_END_HEADERS;
+ *p++ = (uint8_t)(stream_global->id >> 24);
+ *p++ = (uint8_t)(stream_global->id >> 16);
+ *p++ = (uint8_t)(stream_global->id >> 8);
+ *p++ = (uint8_t)(stream_global->id);
+ GPR_ASSERT(p == GPR_SLICE_END_PTR(hdr));
+
+ gpr_slice_buffer_add(&transport_global->qbuf, hdr);
+ gpr_slice_buffer_add(&transport_global->qbuf, status_hdr);
+ if (optional_message) {
+ gpr_slice_buffer_add(&transport_global->qbuf, message_pfx);
gpr_slice_buffer_add(&transport_global->qbuf,
- grpc_chttp2_rst_stream_create(
- stream_global->id,
- grpc_chttp2_grpc_status_to_http2_error(status)));
+ gpr_slice_ref(*optional_message));
+ }
+
+ gpr_slice_buffer_add(
+ &transport_global->qbuf,
+ grpc_chttp2_rst_stream_create(stream_global->id, GRPC_CHTTP2_NO_ERROR));
+
+ if (optional_message) {
+ gpr_slice_ref(*optional_message);
}
- grpc_chttp2_list_add_read_write_state_changed(transport_global,
- stream_global);
+ grpc_chttp2_fake_status(exec_ctx, transport_global, stream_global, status,
+ optional_message);
+ grpc_chttp2_mark_stream_closed(exec_ctx, transport_global, stream_global, 1,
+ 1);
}
static void cancel_stream_cb(grpc_chttp2_transport_global *transport_global,
void *user_data,
grpc_chttp2_stream_global *stream_global) {
- cancel_from_api(transport_global, stream_global, GRPC_STATUS_UNAVAILABLE);
+ cancel_from_api(user_data, transport_global, stream_global,
+ GRPC_STATUS_UNAVAILABLE);
}
-static void end_all_the_calls(grpc_chttp2_transport *t) {
- grpc_chttp2_for_all_streams(&t->global, NULL, cancel_stream_cb);
+static void end_all_the_calls(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
+ grpc_chttp2_for_all_streams(&t->global, exec_ctx, cancel_stream_cb);
}
-static void drop_connection(grpc_chttp2_transport *t) {
+static void drop_connection(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) {
if (t->global.error_state == GRPC_CHTTP2_ERROR_STATE_NONE) {
t->global.error_state = GRPC_CHTTP2_ERROR_STATE_SEEN;
}
- close_transport_locked(t, NULL, NULL);
- end_all_the_calls(t);
+ close_transport_locked(exec_ctx, t, NULL, NULL);
+ end_all_the_calls(exec_ctx, t);
}
static void read_error_locked(grpc_chttp2_transport *t) {
@@ -930,255 +1486,464 @@ static void read_error_locked(grpc_chttp2_transport *t) {
}
}
-static void recv_data_error_locked(grpc_chttp2_transport *t, grpc_chttp2_stream *s, void *a) {
+static void recv_data_error_locked(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s, void *a) {
size_t i;
drop_connection(t);
read_error_locked(t);
- for (i = 0; i < t->executor_parsing.nslices; i++) gpr_slice_unref(t->executor_parsing.slices[i]);
+ for (i = 0; i < t->executor_parsing.nslices; i++)
+ gpr_slice_unref(t->executor_parsing.slices[i]);
memset(&t->executor_parsing, 0, sizeof(t->executor_parsing));
UNREF_TRANSPORT(t, "recv_data");
}
/** update window from a settings change */
-static void update_global_window(void *args, gpr_uint32 id, void *stream) {
+static void update_global_window(void *args, uint32_t id, void *stream) {
grpc_chttp2_transport *t = args;
grpc_chttp2_stream *s = stream;
grpc_chttp2_transport_global *transport_global = &t->global;
grpc_chttp2_stream_global *stream_global = &s->global;
+ int was_zero;
+ int is_zero;
+ int64_t initial_window_update = t->parsing.initial_window_update;
+
+ was_zero = stream_global->outgoing_window <= 0;
+ GRPC_CHTTP2_FLOW_CREDIT_STREAM("settings", transport_global, stream_global,
+ outgoing_window, initial_window_update);
+ is_zero = stream_global->outgoing_window <= 0;
- GRPC_CHTTP2_FLOWCTL_TRACE_STREAM("settings", transport_global, stream_global,
- outgoing_window,
- t->parsing.initial_window_update);
- stream_global->outgoing_window += t->parsing.initial_window_update;
+ if (was_zero && !is_zero) {
+ grpc_chttp2_become_writable(transport_global, stream_global);
+ }
}
-static void finish_parsing_locked(grpc_chttp2_transport *t, grpc_chttp2_stream *s_ignored, void *a) {
- size_t i = *(size_t *)a;
+static void recv_data(grpc_exec_ctx *exec_ctx, void *tp, bool success) {
+ grpc_chttp2_run_with_global_lock(t, NULL, recv_data_locked,
+ (void *)(uintptr_t)success, 0);
+ /* Control flow:
+ recv_data_locked ->
+ (parse_unlocked -> post_parse_locked)? ->
+ post_recv_data_locked */
+}
- if (i != t->executor_parsing.nslices) {
- drop_connection(t);
+static void recv_data_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s_unused, void *arg) {
+ size_t i;
+ int keep_reading = 0;
+ grpc_chttp2_transport_global *transport_global = &t->global;
+ grpc_chttp2_transport_parsing *transport_parsing = &t->parsing;
+ grpc_chttp2_stream_global *stream_global;
+ bool success = (bool)(uintptr_t)arg;
+
+ i = 0;
+ GPR_ASSERT(!t->parsing_active);
+ if (!t->closed) {
+ t->executor.parsing_active = 1;
+ /* merge stream lists */
+ grpc_chttp2_stream_map_move_into(&t->new_stream_map,
+ &t->parsing_stream_map);
+ grpc_chttp2_prepare_to_read(transport_global, transport_parsing);
+ grpc_exec_ctx_enqueue(exec_ctx, parse_locked, t, NULL);
+ } else {
+ post_recv_data_locked(exec_ctx, t, s_unused, arg);
+ }
+}
+
+static void parse_locked(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
+ GPR_TIMER_BEGIN("recv_data.parse", 0);
+ for (; i < t->read_buffer.count &&
+ grpc_chttp2_perform_read(exec_ctx, transport_parsing,
+ t->read_buffer.slices[i]);
+ i++)
+ ;
+ GPR_TIMER_END("recv_data.parse", 0);
+ grpc_chttp2_run_with_global_lock(t, s_unused, post_parse_locked, arg, 0)
+}
+
+static void post_parse_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s_unused, void *arg) {
+ /* copy parsing qbuf to global qbuf */
+ gpr_slice_buffer_move_into(&t->parsing.qbuf, &t->global.qbuf);
+ if (i != t->read_buffer.count) {
+ unlock(exec_ctx, t);
+ lock(t);
+ drop_connection(exec_ctx, t);
}
/* merge stream lists */
- grpc_chttp2_stream_map_move_into(&t->new_stream_map,
- &t->parsing_stream_map);
- t->global.concurrent_stream_count = grpc_chttp2_stream_map_size(&t->parsing_stream_map);
- if (t->parsing.initial_window_update != 0) {
+ grpc_chttp2_stream_map_move_into(&t->new_stream_map, &t->parsing_stream_map);
+ transport_global->concurrent_stream_count =
+ (uint32_t)grpc_chttp2_stream_map_size(&t->parsing_stream_map);
+ if (transport_parsing->initial_window_update != 0) {
grpc_chttp2_stream_map_for_each(&t->parsing_stream_map,
update_global_window, t);
- t->parsing.initial_window_update = 0;
+ transport_parsing->initial_window_update = 0;
}
/* handle higher level things */
- grpc_chttp2_publish_reads(&t->global, &t->parsing);
- t->executor.parsing_active = 0;
-
- for (; i < t->executor_parsing.nslices; i++) gpr_slice_unref(t->executor_parsing.slices[i]);
-
- if (i == t->executor_parsing.nslices) {
- grpc_chttp2_schedule_closure(&t->global, &t->reading_action, 1);
- } else {
- read_error_locked(t);
- UNREF_TRANSPORT(t, "recv_data");
+ grpc_chttp2_publish_reads(exec_ctx, transport_global, transport_parsing);
+ t->parsing_active = 0;
+ /* handle delayed transport ops (if there is one) */
+ if (t->post_parsing_op) {
+ grpc_transport_op *op = t->post_parsing_op;
+ t->post_parsing_op = NULL;
+ perform_transport_op_locked(exec_ctx, t, op);
+ gpr_free(op);
+ }
+ /* if a stream is in the stream map, and gets cancelled, we need to
+ * ensure
+ * we are not parsing before continuing the cancellation to keep
+ * things
+ * in
+ * a sane state */
+ while (grpc_chttp2_list_pop_closed_waiting_for_parsing(transport_global,
+ &stream_global)) {
+ GPR_ASSERT(stream_global->in_stream_map);
+ GPR_ASSERT(stream_global->write_closed);
+ GPR_ASSERT(stream_global->read_closed);
+ remove_stream(exec_ctx, t, stream_global->id);
+ GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "chttp2");
}
- memset(&t->executor_parsing, 0, sizeof(t->executor_parsing));
+ post_recv_data_locked(exec_ctx, t, s_unused, arg);
}
-static void parsing_action(void *pt, int iomgr_success_ignored) {
- size_t i;
- grpc_chttp2_transport *t = pt;
- for (i = 0; i < t->executor_parsing.nslices && grpc_chttp2_perform_read(&t->parsing, t->executor_parsing.slices[i]);
- i++) {
- gpr_slice_unref(t->executor_parsing.slices[i]);
+static void post_recv_data_locked(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s_unused, void *arg) {
+ if (!success || i != t->read_buffer.count || t->closed) {
+ drop_connection(exec_ctx, t);
+ read_error_locked(exec_ctx, t);
+ } else if (!t->closed) {
+ keep_reading = 1;
+ REF_TRANSPORT(t, "keep_reading");
+ prevent_endpoint_shutdown(t);
}
- grpc_chttp2_run_with_global_lock(t, NULL, finish_parsing_locked, &i, sizeof(i));
-}
+ gpr_slice_buffer_reset_and_unref(&t->read_buffer);
-static void recv_data_ok_locked(grpc_chttp2_transport *t, grpc_chttp2_stream *s, void *a) {
- size_t i;
- GPR_ASSERT(!t->executor.parsing_active);
- if (t->global.error_state == GRPC_CHTTP2_ERROR_STATE_NONE) {
- t->executor.parsing_active = 1;
- /* merge stream lists */
- grpc_chttp2_stream_map_move_into(&t->new_stream_map,
- &t->parsing_stream_map);
- grpc_chttp2_prepare_to_read(&t->global, &t->parsing);
- /* schedule more work to do unlocked */
- grpc_chttp2_schedule_closure(&t->global, &t->parsing_action, 1);
+ if (keep_reading) {
+ grpc_endpoint_read(exec_ctx, t->ep, &t->read_buffer, &t->recv_data);
+ allow_endpoint_shutdown_locked(exec_ctx, t);
+ UNREF_TRANSPORT(exec_ctx, t, "keep_reading");
} else {
- for (i = 0; i < t->executor_parsing.nslices; i++) gpr_slice_unref(t->executor_parsing.slices[i]);
- memset(&t->executor_parsing, 0, sizeof(t->executor_parsing));
+ UNREF_TRANSPORT(exec_ctx, t, "recv_data");
}
}
-/* tcp read callback */
-static void recv_data(void *tp, gpr_slice *slices, size_t nslices,
- grpc_endpoint_cb_status error) {
- grpc_chttp2_transport *t = tp;
+/*******************************************************************************
+ * CALLBACK LOOP
+ */
- t->executor_parsing.slices = slices;
- t->executor_parsing.nslices = nslices;
+static void connectivity_state_set(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
+ grpc_connectivity_state state, const char *reason) {
+ GRPC_CHTTP2_IF_TRACING(
+ gpr_log(GPR_DEBUG, "set connectivity_state=%d", state));
+ grpc_connectivity_state_set(
+ exec_ctx,
+ &TRANSPORT_FROM_GLOBAL(transport_global)->channel_callback.state_tracker,
+ state, reason);
+}
- switch (error) {
- case GRPC_ENDPOINT_CB_SHUTDOWN:
- case GRPC_ENDPOINT_CB_EOF:
- case GRPC_ENDPOINT_CB_ERROR:
- grpc_chttp2_run_with_global_lock(t, NULL, recv_data_error_locked, NULL, 0);
- break;
- case GRPC_ENDPOINT_CB_OK:
- grpc_chttp2_run_with_global_lock(t, NULL, recv_data_ok_locked, NULL, 0);
- break;
+/*******************************************************************************
+ * POLLSET STUFF
+ */
+
+static void add_to_pollset_locked(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_pollset *pollset) {
+ if (t->ep) {
+ grpc_endpoint_add_to_pollset(exec_ctx, t->ep, pollset);
}
}
-static void reading_action(void *pt, int iomgr_success_ignored) {
- grpc_chttp2_transport *t = pt;
- grpc_endpoint_notify_on_read(t->ep, recv_data, t);
+static void add_to_pollset_set_locked(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_pollset_set *pollset_set) {
+ if (t->ep) {
+ grpc_endpoint_add_to_pollset_set(exec_ctx, t->ep, pollset_set);
+ }
}
-/*
- * CALLBACK LOOP
- */
+static void set_pollset(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
+ grpc_stream *gs, grpc_pollset *pollset) {
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
+ /* TODO(ctiller): keep pollset alive */
+ grpc_chttp2_run_with_global_lock(gt, gs, add_to_pollset_locked, pollset,
+ NULL);
+}
-typedef struct {
- grpc_chttp2_transport *t;
- gpr_uint32 error;
- gpr_slice text;
- grpc_iomgr_closure closure;
-} notify_goaways_args;
+/*******************************************************************************
+ * BYTE STREAM
+ */
-static void finished_channel_callbacks_locked(grpc_chttp2_transport *t, grpc_chttp2_stream *s_ignored, void *arg_ignored) {
- t->executor.channel_callback_active = 0;
-}
+static void incoming_byte_stream_update_flow_control(
+ grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_stream_global *stream_global, size_t max_size_hint,
+ size_t have_already) {
+ uint32_t max_recv_bytes;
-static void notify_goaways(void *p, int iomgr_success_ignored) {
- notify_goaways_args *a = p;
- grpc_chttp2_transport *t = a->t;
+ /* clamp max recv hint to an allowable size */
+ if (max_size_hint >= UINT32_MAX - transport_global->stream_lookahead) {
+ max_recv_bytes = UINT32_MAX - transport_global->stream_lookahead;
+ } else {
+ max_recv_bytes = (uint32_t)max_size_hint;
+ }
- t->channel_callback.cb->goaway(t->channel_callback.cb_user_data, &t->base,
- a->error, a->text);
+ /* account for bytes already received but unknown to higher layers */
+ if (max_recv_bytes >= have_already) {
+ max_recv_bytes -= (uint32_t)have_already;
+ } else {
+ max_recv_bytes = 0;
+ }
- gpr_free(a);
+ /* add some small lookahead to keep pipelines flowing */
+ GPR_ASSERT(max_recv_bytes <= UINT32_MAX - transport_global->stream_lookahead);
+ max_recv_bytes += transport_global->stream_lookahead;
+ if (stream_global->max_recv_bytes < max_recv_bytes) {
+ uint32_t add_max_recv_bytes =
+ max_recv_bytes - stream_global->max_recv_bytes;
+ GRPC_CHTTP2_FLOW_CREDIT_STREAM("op", transport_global, stream_global,
+ max_recv_bytes, add_max_recv_bytes);
+ GRPC_CHTTP2_FLOW_CREDIT_STREAM("op", transport_global, stream_global,
+ unannounced_incoming_window_for_parse,
+ add_max_recv_bytes);
+ GRPC_CHTTP2_FLOW_CREDIT_STREAM("op", transport_global, stream_global,
+ unannounced_incoming_window_for_writing,
+ add_max_recv_bytes);
+ grpc_chttp2_list_add_unannounced_incoming_window_available(transport_global,
+ stream_global);
+ grpc_chttp2_become_writable(transport_global, stream_global);
+ }
+}
- grpc_chttp2_run_with_global_lock(t, NULL, finished_channel_callbacks_locked, NULL, 0);
- UNREF_TRANSPORT(t, "notify_goaways");
+static int incoming_byte_stream_next(grpc_exec_ctx *exec_ctx,
+ grpc_byte_stream *byte_stream,
+ gpr_slice *slice, size_t max_size_hint,
+ grpc_closure *on_complete) {
+ grpc_chttp2_incoming_byte_stream *bs =
+ (grpc_chttp2_incoming_byte_stream *)byte_stream;
+ grpc_chttp2_transport_global *transport_global = &bs->transport->global;
+ grpc_chttp2_stream_global *stream_global = &bs->stream->global;
+
+ lock(bs->transport);
+ if (bs->is_tail) {
+ incoming_byte_stream_update_flow_control(transport_global, stream_global,
+ max_size_hint, bs->slices.length);
+ }
+ if (bs->slices.count > 0) {
+ *slice = gpr_slice_buffer_take_first(&bs->slices);
+ unlock(exec_ctx, bs->transport);
+ return 1;
+ } else if (bs->failed) {
+ grpc_exec_ctx_enqueue(exec_ctx, on_complete, false, NULL);
+ unlock(exec_ctx, bs->transport);
+ return 0;
+ } else {
+ bs->on_next = on_complete;
+ bs->next = slice;
+ unlock(exec_ctx, bs->transport);
+ return 0;
+ }
}
-static void notify_closed(void *gt, int iomgr_success_ignored) {
- grpc_chttp2_transport *t = gt;
- t->channel_callback.cb->closed(t->channel_callback.cb_user_data, &t->base);
+static void incoming_byte_stream_unref(grpc_chttp2_incoming_byte_stream *bs) {
+ if (gpr_unref(&bs->refs)) {
+ gpr_slice_buffer_destroy(&bs->slices);
+ gpr_free(bs);
+ }
+}
- grpc_chttp2_run_with_global_lock(t, NULL, finished_channel_callbacks_locked, NULL, 0);
- UNREF_TRANSPORT(t, "notify_closed");
+static void incoming_byte_stream_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_byte_stream *byte_stream) {
+ incoming_byte_stream_unref((grpc_chttp2_incoming_byte_stream *)byte_stream);
}
-static void unlock_check_channel_callbacks(grpc_chttp2_transport *t) {
- if (t->executor.channel_callback_active) {
- return;
+void grpc_chttp2_incoming_byte_stream_push(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_incoming_byte_stream *bs,
+ gpr_slice slice) {
+ gpr_mu_lock(&bs->transport->mu);
+ if (bs->on_next != NULL) {
+ *bs->next = slice;
+ grpc_exec_ctx_enqueue(exec_ctx, bs->on_next, true, NULL);
+ bs->on_next = NULL;
+ } else {
+ gpr_slice_buffer_add(&bs->slices, slice);
}
- if (t->global.goaway_state != GRPC_CHTTP2_ERROR_STATE_NONE) {
- if (t->global.goaway_state == GRPC_CHTTP2_ERROR_STATE_SEEN &&
- t->global.error_state != GRPC_CHTTP2_ERROR_STATE_NOTIFIED) {
- notify_goaways_args *a = gpr_malloc(sizeof(*a));
- a->t = t;
- a->error = t->global.goaway_error;
- a->text = t->global.goaway_text;
- t->global.goaway_state = GRPC_CHTTP2_ERROR_STATE_NOTIFIED;
- t->executor.channel_callback_active = 1;
- grpc_iomgr_closure_init(&a->closure, notify_goaways, a);
- REF_TRANSPORT(t, "notify_goaways");
- grpc_chttp2_schedule_closure(&t->global, &a->closure, 1);
- return;
- } else if (t->global.goaway_state != GRPC_CHTTP2_ERROR_STATE_NOTIFIED) {
- return;
+ gpr_mu_unlock(&bs->transport->mu);
+}
+
+void grpc_chttp2_incoming_byte_stream_finished(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs, int success,
+ int from_parsing_thread) {
+ if (!success) {
+ if (from_parsing_thread) {
+ gpr_mu_lock(&bs->transport->mu);
}
+ grpc_exec_ctx_enqueue(exec_ctx, bs->on_next, false, NULL);
+ bs->on_next = NULL;
+ bs->failed = 1;
+ if (from_parsing_thread) {
+ gpr_mu_unlock(&bs->transport->mu);
+ }
+ } else {
+#ifndef NDEBUG
+ if (from_parsing_thread) {
+ gpr_mu_lock(&bs->transport->mu);
+ }
+ GPR_ASSERT(bs->on_next == NULL);
+ if (from_parsing_thread) {
+ gpr_mu_unlock(&bs->transport->mu);
+ }
+#endif
}
- if (t->global.error_state == GRPC_CHTTP2_ERROR_STATE_SEEN) {
- t->global.error_state = GRPC_CHTTP2_ERROR_STATE_NOTIFIED;
- t->executor.channel_callback_active = 1;
- REF_TRANSPORT(t, "notify_closed");
- grpc_chttp2_schedule_closure(&t->global, &t->channel_callback.notify_closed,
- 1);
- }
+ incoming_byte_stream_unref(bs);
}
-void grpc_chttp2_schedule_closure(
- grpc_chttp2_transport_global *transport_global, grpc_iomgr_closure *closure,
- int success) {
- closure->success = success;
- closure->next = transport_global->pending_closures;
- transport_global->pending_closures = closure;
+grpc_chttp2_incoming_byte_stream *grpc_chttp2_incoming_byte_stream_create(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_chttp2_stream_parsing *stream_parsing, uint32_t frame_size,
+ uint32_t flags, grpc_chttp2_incoming_frame_queue *add_to_queue) {
+ grpc_chttp2_incoming_byte_stream *incoming_byte_stream =
+ gpr_malloc(sizeof(*incoming_byte_stream));
+ incoming_byte_stream->base.length = frame_size;
+ incoming_byte_stream->base.flags = flags;
+ incoming_byte_stream->base.next = incoming_byte_stream_next;
+ incoming_byte_stream->base.destroy = incoming_byte_stream_destroy;
+ gpr_ref_init(&incoming_byte_stream->refs, 2);
+ incoming_byte_stream->next_message = NULL;
+ incoming_byte_stream->transport = TRANSPORT_FROM_PARSING(transport_parsing);
+ incoming_byte_stream->stream = STREAM_FROM_PARSING(stream_parsing);
+ gpr_slice_buffer_init(&incoming_byte_stream->slices);
+ incoming_byte_stream->on_next = NULL;
+ incoming_byte_stream->is_tail = 1;
+ incoming_byte_stream->failed = 0;
+ if (add_to_queue->head == NULL) {
+ add_to_queue->head = incoming_byte_stream;
+ } else {
+ add_to_queue->tail->is_tail = 0;
+ add_to_queue->tail->next_message = incoming_byte_stream;
+ }
+ add_to_queue->tail = incoming_byte_stream;
+ return incoming_byte_stream;
}
-/*
- * POLLSET STUFF
+/*******************************************************************************
+ * TRACING
*/
-static void add_to_pollset_locked(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s,
- void *pollset) {
- if (t->ep) {
- grpc_endpoint_add_to_pollset(t->ep, pollset);
+static char *format_flowctl_context_var(const char *context, const char *var,
+ int64_t val, uint32_t id,
+ char **scope) {
+ char *underscore_pos;
+ char *result;
+ if (context == NULL) {
+ *scope = NULL;
+ gpr_asprintf(&result, "%s(%lld)", var, val);
+ return result;
+ }
+ underscore_pos = strchr(context, '_');
+ *scope = gpr_strdup(context);
+ (*scope)[underscore_pos - context] = 0;
+ if (id != 0) {
+ char *tmp = *scope;
+ gpr_asprintf(scope, "%s[%d]", tmp, id);
+ gpr_free(tmp);
}
+ gpr_asprintf(&result, "%s.%s(%lld)", underscore_pos + 1, var, val);
+ return result;
}
-static void add_to_pollset(grpc_transport *gt, grpc_pollset *pollset) {
- grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
- grpc_chttp2_run_with_global_lock(t, NULL, add_to_pollset_locked, pollset, 0);
+static int samestr(char *a, char *b) {
+ if (a == NULL) {
+ return b == NULL;
+ }
+ if (b == NULL) {
+ return 0;
+ }
+ return 0 == strcmp(a, b);
}
-/*
- * TRACING
- */
-
-void grpc_chttp2_flowctl_trace(const char *file, int line, const char *reason,
- const char *context, const char *var,
- int is_client, gpr_uint32 stream_id,
- gpr_int64 current_value, gpr_int64 delta) {
- char *identifier;
- char *context_scope;
- char *context_thread;
- char *underscore_pos = strchr(context, '_');
- GPR_ASSERT(underscore_pos);
- context_thread = gpr_strdup(underscore_pos + 1);
- context_scope = gpr_strdup(context);
- context_scope[underscore_pos - context] = 0;
- if (stream_id) {
- gpr_asprintf(&identifier, "%s[%d]", context_scope, stream_id);
- } else {
- identifier = gpr_strdup(context_scope);
+void grpc_chttp2_flowctl_trace(const char *file, int line, const char *phase,
+ grpc_chttp2_flowctl_op op, const char *context1,
+ const char *var1, const char *context2,
+ const char *var2, int is_client,
+ uint32_t stream_id, int64_t val1, int64_t val2) {
+ char *scope1;
+ char *scope2;
+ char *label1 =
+ format_flowctl_context_var(context1, var1, val1, stream_id, &scope1);
+ char *label2 =
+ format_flowctl_context_var(context2, var2, val2, stream_id, &scope2);
+ char *clisvr = is_client ? "client" : "server";
+ char *prefix;
+
+ gpr_asprintf(&prefix, "FLOW % 8s: %s % 11s ", phase, clisvr, scope1);
+
+ switch (op) {
+ case GRPC_CHTTP2_FLOWCTL_MOVE:
+ GPR_ASSERT(samestr(scope1, scope2));
+ if (val2 != 0) {
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "%sMOVE % 40s <- % 40s giving %d", prefix, label1, label2,
+ val1 + val2);
+ }
+ break;
+ case GRPC_CHTTP2_FLOWCTL_CREDIT:
+ GPR_ASSERT(val2 >= 0);
+ if (val2 != 0) {
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "%sCREDIT % 40s by % 40s giving %d", prefix, label1, label2,
+ val1 + val2);
+ }
+ break;
+ case GRPC_CHTTP2_FLOWCTL_DEBIT:
+ GPR_ASSERT(val2 >= 0);
+ if (val2 != 0) {
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "%sDEBIT % 40s by % 40s giving %d", prefix, label1, label2,
+ val1 - val2);
+ }
+ break;
}
- gpr_log(GPR_INFO,
- "FLOWCTL: %s %-10s %8s %-23s %8lld %c %8lld = %8lld %-10s [%s:%d]",
- is_client ? "client" : "server", identifier, context_thread, var,
- current_value, delta < 0 ? '-' : '+', delta < 0 ? -delta : delta,
- current_value + delta, reason, file, line);
- gpr_free(identifier);
- gpr_free(context_thread);
- gpr_free(context_scope);
+
+ gpr_free(scope1);
+ gpr_free(scope2);
+ gpr_free(label1);
+ gpr_free(label2);
+ gpr_free(prefix);
}
-/*
+/*******************************************************************************
* INTEGRATION GLUE
*/
+static char *chttp2_get_peer(grpc_exec_ctx *exec_ctx, grpc_transport *t) {
+ return gpr_strdup(((grpc_chttp2_transport *)t)->peer_string);
+}
+
static const grpc_transport_vtable vtable = {sizeof(grpc_chttp2_stream),
+ "chttp2",
init_stream,
- perform_op,
- add_to_pollset,
+ set_pollset,
+ perform_stream_op,
+ perform_transport_op,
destroy_stream,
- goaway,
- close_transport,
- send_ping,
- destroy_transport};
-
-void grpc_create_chttp2_transport(grpc_transport_setup_callback setup,
- void *arg,
- const grpc_channel_args *channel_args,
- grpc_endpoint *ep, gpr_slice *slices,
- size_t nslices, grpc_mdctx *mdctx,
- int is_client) {
+ destroy_transport,
+ chttp2_get_peer};
+
+grpc_transport *grpc_create_chttp2_transport(
+ grpc_exec_ctx *exec_ctx, const grpc_channel_args *channel_args,
+ grpc_endpoint *ep, int is_client) {
grpc_chttp2_transport *t = gpr_malloc(sizeof(grpc_chttp2_transport));
- init_transport(t, setup, arg, channel_args, ep, slices, nslices, mdctx,
- is_client);
+ init_transport(exec_ctx, t, channel_args, ep, is_client != 0);
+ return &t->base;
+}
+
+void grpc_chttp2_transport_start_reading(grpc_exec_ctx *exec_ctx,
+ grpc_transport *transport,
+ gpr_slice *slices, size_t nslices) {
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)transport;
+ REF_TRANSPORT(t, "recv_data"); /* matches unref inside recv_data */
+ gpr_slice_buffer_addn(&t->read_buffer, slices, nslices);
+ recv_data(exec_ctx, t, 1);
}
diff --git a/src/core/transport/chttp2_transport.h b/src/core/transport/chttp2_transport.h
index 18e19f03af..9a6cf0ed35 100644
--- a/src/core/transport/chttp2_transport.h
+++ b/src/core/transport/chttp2_transport.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_TRANSPORT_H
-#define GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_TRANSPORT_H
+#ifndef GRPC_CORE_TRANSPORT_CHTTP2_TRANSPORT_H
+#define GRPC_CORE_TRANSPORT_CHTTP2_TRANSPORT_H
#include "src/core/iomgr/endpoint.h"
#include "src/core/transport/transport.h"
@@ -40,11 +40,12 @@
extern int grpc_http_trace;
extern int grpc_flowctl_trace;
-void grpc_create_chttp2_transport(grpc_transport_setup_callback setup,
- void *arg,
- const grpc_channel_args *channel_args,
- grpc_endpoint *ep, gpr_slice *slices,
- size_t nslices, grpc_mdctx *metadata_context,
- int is_client);
+grpc_transport *grpc_create_chttp2_transport(
+ grpc_exec_ctx *exec_ctx, const grpc_channel_args *channel_args,
+ grpc_endpoint *ep, int is_client);
-#endif /* GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_TRANSPORT_H */
+void grpc_chttp2_transport_start_reading(grpc_exec_ctx *exec_ctx,
+ grpc_transport *transport,
+ gpr_slice *slices, size_t nslices);
+
+#endif /* GRPC_CORE_TRANSPORT_CHTTP2_TRANSPORT_H */
diff --git a/src/core/transport/connectivity_state.c b/src/core/transport/connectivity_state.c
new file mode 100644
index 0000000000..87765b9799
--- /dev/null
+++ b/src/core/transport/connectivity_state.c
@@ -0,0 +1,164 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/transport/connectivity_state.h"
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+
+int grpc_connectivity_state_trace = 0;
+
+const char *grpc_connectivity_state_name(grpc_connectivity_state state) {
+ switch (state) {
+ case GRPC_CHANNEL_IDLE:
+ return "IDLE";
+ case GRPC_CHANNEL_CONNECTING:
+ return "CONNECTING";
+ case GRPC_CHANNEL_READY:
+ return "READY";
+ case GRPC_CHANNEL_TRANSIENT_FAILURE:
+ return "TRANSIENT_FAILURE";
+ case GRPC_CHANNEL_FATAL_FAILURE:
+ return "FATAL_FAILURE";
+ }
+ GPR_UNREACHABLE_CODE(return "UNKNOWN");
+}
+
+void grpc_connectivity_state_init(grpc_connectivity_state_tracker *tracker,
+ grpc_connectivity_state init_state,
+ const char *name) {
+ tracker->current_state = init_state;
+ tracker->watchers = NULL;
+ tracker->name = gpr_strdup(name);
+}
+
+void grpc_connectivity_state_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_connectivity_state_tracker *tracker) {
+ int success;
+ grpc_connectivity_state_watcher *w;
+ while ((w = tracker->watchers)) {
+ tracker->watchers = w->next;
+
+ if (GRPC_CHANNEL_FATAL_FAILURE != *w->current) {
+ *w->current = GRPC_CHANNEL_FATAL_FAILURE;
+ success = 1;
+ } else {
+ success = 0;
+ }
+ grpc_exec_ctx_enqueue(exec_ctx, w->notify, success, NULL);
+ gpr_free(w);
+ }
+ gpr_free(tracker->name);
+}
+
+grpc_connectivity_state grpc_connectivity_state_check(
+ grpc_connectivity_state_tracker *tracker) {
+ if (grpc_connectivity_state_trace) {
+ gpr_log(GPR_DEBUG, "CONWATCH: %p %s: get %s", tracker, tracker->name,
+ grpc_connectivity_state_name(tracker->current_state));
+ }
+ return tracker->current_state;
+}
+
+int grpc_connectivity_state_notify_on_state_change(
+ grpc_exec_ctx *exec_ctx, grpc_connectivity_state_tracker *tracker,
+ grpc_connectivity_state *current, grpc_closure *notify) {
+ if (grpc_connectivity_state_trace) {
+ if (current == NULL) {
+ gpr_log(GPR_DEBUG, "CONWATCH: %p %s: unsubscribe notify=%p", tracker,
+ tracker->name, notify);
+ } else {
+ gpr_log(GPR_DEBUG, "CONWATCH: %p %s: from %s [cur=%s] notify=%p", tracker,
+ tracker->name, grpc_connectivity_state_name(*current),
+ grpc_connectivity_state_name(tracker->current_state), notify);
+ }
+ }
+ if (current == NULL) {
+ grpc_connectivity_state_watcher *w = tracker->watchers;
+ if (w != NULL && w->notify == notify) {
+ grpc_exec_ctx_enqueue(exec_ctx, notify, false, NULL);
+ tracker->watchers = w->next;
+ gpr_free(w);
+ return 0;
+ }
+ while (w != NULL) {
+ grpc_connectivity_state_watcher *rm_candidate = w->next;
+ if (rm_candidate != NULL && rm_candidate->notify == notify) {
+ grpc_exec_ctx_enqueue(exec_ctx, notify, false, NULL);
+ w->next = w->next->next;
+ gpr_free(rm_candidate);
+ return 0;
+ }
+ w = w->next;
+ }
+ return 0;
+ } else {
+ if (tracker->current_state != *current) {
+ *current = tracker->current_state;
+ grpc_exec_ctx_enqueue(exec_ctx, notify, true, NULL);
+ } else {
+ grpc_connectivity_state_watcher *w = gpr_malloc(sizeof(*w));
+ w->current = current;
+ w->notify = notify;
+ w->next = tracker->watchers;
+ tracker->watchers = w;
+ }
+ return tracker->current_state == GRPC_CHANNEL_IDLE;
+ }
+}
+
+void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx,
+ grpc_connectivity_state_tracker *tracker,
+ grpc_connectivity_state state,
+ const char *reason) {
+ grpc_connectivity_state_watcher *w;
+ if (grpc_connectivity_state_trace) {
+ gpr_log(GPR_DEBUG, "SET: %p %s: %s --> %s [%s]", tracker, tracker->name,
+ grpc_connectivity_state_name(tracker->current_state),
+ grpc_connectivity_state_name(state), reason);
+ }
+ if (tracker->current_state == state) {
+ return;
+ }
+ GPR_ASSERT(tracker->current_state != GRPC_CHANNEL_FATAL_FAILURE);
+ tracker->current_state = state;
+ while ((w = tracker->watchers) != NULL) {
+ *w->current = tracker->current_state;
+ tracker->watchers = w->next;
+ grpc_exec_ctx_enqueue(exec_ctx, w->notify, true, NULL);
+ gpr_free(w);
+ }
+}
diff --git a/src/core/transport/connectivity_state.h b/src/core/transport/connectivity_state.h
new file mode 100644
index 0000000000..b4a3ce924d
--- /dev/null
+++ b/src/core/transport/connectivity_state.h
@@ -0,0 +1,85 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_TRANSPORT_CONNECTIVITY_STATE_H
+#define GRPC_CORE_TRANSPORT_CONNECTIVITY_STATE_H
+
+#include <grpc/grpc.h>
+#include "src/core/iomgr/exec_ctx.h"
+
+typedef struct grpc_connectivity_state_watcher {
+ /** we keep watchers in a linked list */
+ struct grpc_connectivity_state_watcher *next;
+ /** closure to notify on change */
+ grpc_closure *notify;
+ /** the current state as believed by the watcher */
+ grpc_connectivity_state *current;
+} grpc_connectivity_state_watcher;
+
+typedef struct {
+ /** current connectivity state */
+ grpc_connectivity_state current_state;
+ /** all our watchers */
+ grpc_connectivity_state_watcher *watchers;
+ /** a name to help debugging */
+ char *name;
+} grpc_connectivity_state_tracker;
+
+extern int grpc_connectivity_state_trace;
+
+const char *grpc_connectivity_state_name(grpc_connectivity_state state);
+
+void grpc_connectivity_state_init(grpc_connectivity_state_tracker *tracker,
+ grpc_connectivity_state init_state,
+ const char *name);
+void grpc_connectivity_state_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_connectivity_state_tracker *tracker);
+
+/** Set connectivity state; not thread safe; access must be serialized with an
+ * external lock */
+void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx,
+ grpc_connectivity_state_tracker *tracker,
+ grpc_connectivity_state state,
+ const char *reason);
+
+grpc_connectivity_state grpc_connectivity_state_check(
+ grpc_connectivity_state_tracker *tracker);
+
+/** Return 1 if the channel should start connecting, 0 otherwise.
+ If current==NULL cancel notify if it is already queued (success==0 in that
+ case) */
+int grpc_connectivity_state_notify_on_state_change(
+ grpc_exec_ctx *exec_ctx, grpc_connectivity_state_tracker *tracker,
+ grpc_connectivity_state *current, grpc_closure *notify);
+
+#endif /* GRPC_CORE_TRANSPORT_CONNECTIVITY_STATE_H */
diff --git a/src/core/transport/metadata.c b/src/core/transport/metadata.c
index c80d67823f..807ae071a3 100644
--- a/src/core/transport/metadata.c
+++ b/src/core/transport/metadata.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,274 +31,353 @@
*
*/
-#include "src/core/iomgr/sockaddr.h"
#include "src/core/transport/metadata.h"
#include <assert.h>
#include <stddef.h>
#include <string.h>
+#include <grpc/compression.h>
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/time.h>
+
+#include "src/core/profiling/timers.h"
#include "src/core/support/murmur_hash.h"
+#include "src/core/support/string.h"
#include "src/core/transport/chttp2/bin_encoder.h"
-#include <grpc/support/time.h>
+#include "src/core/transport/static_metadata.h"
+#include "src/core/iomgr/iomgr_internal.h"
+
+/* There are two kinds of mdelem and mdstr instances.
+ * Static instances are declared in static_metadata.{h,c} and
+ * are initialized by grpc_mdctx_global_init().
+ * Dynamic instances are stored in hash tables on grpc_mdctx, and are backed
+ * by internal_string and internal_element structures.
+ * Internal helper functions here-in (is_mdstr_static, is_mdelem_static) are
+ * used to determine which kind of element a pointer refers to.
+ */
#define INITIAL_STRTAB_CAPACITY 4
#define INITIAL_MDTAB_CAPACITY 4
+#ifdef GRPC_METADATA_REFCOUNT_DEBUG
+#define DEBUG_ARGS , const char *file, int line
+#define FWD_DEBUG_ARGS , file, line
+#define REF_MD_LOCKED(shard, s) ref_md_locked((shard), (s), __FILE__, __LINE__)
+#else
+#define DEBUG_ARGS
+#define FWD_DEBUG_ARGS
+#define REF_MD_LOCKED(shard, s) ref_md_locked((shard), (s))
+#endif
+
+#define TABLE_IDX(hash, log2_shards, capacity) \
+ (((hash) >> (log2_shards)) % (capacity))
+#define SHARD_IDX(hash, log2_shards) ((hash) & ((1 << (log2_shards)) - 1))
+
+typedef void (*destroy_user_data_func)(void *user_data);
+
+/* Shadow structure for grpc_mdstr for non-static values */
typedef struct internal_string {
/* must be byte compatible with grpc_mdstr */
gpr_slice slice;
- gpr_uint32 hash;
+ uint32_t hash;
/* private only data */
- gpr_uint32 refs;
- gpr_uint8 has_base64_and_huffman_encoded;
+ gpr_atm refcnt;
+
+ uint8_t has_base64_and_huffman_encoded;
gpr_slice_refcount refcount;
gpr_slice base64_and_huffman;
- grpc_mdctx *context;
-
struct internal_string *bucket_next;
} internal_string;
+/* Shadow structure for grpc_mdelem for non-static elements */
typedef struct internal_metadata {
/* must be byte compatible with grpc_mdelem */
internal_string *key;
internal_string *value;
+ /* private only data */
gpr_atm refcnt;
- /* private only data */
- void *user_data;
- void (*destroy_user_data)(void *user_data);
+ gpr_mu mu_user_data;
+ gpr_atm destroy_user_data;
+ gpr_atm user_data;
- grpc_mdctx *context;
struct internal_metadata *bucket_next;
} internal_metadata;
-struct grpc_mdctx {
- gpr_uint32 hash_seed;
- int refs;
+typedef struct strtab_shard {
+ gpr_mu mu;
+ internal_string **strs;
+ size_t count;
+ size_t capacity;
+} strtab_shard;
+typedef struct mdtab_shard {
gpr_mu mu;
+ internal_metadata **elems;
+ size_t count;
+ size_t capacity;
+ size_t free;
+} mdtab_shard;
- internal_string **strtab;
- size_t strtab_count;
- size_t strtab_capacity;
+#define LOG2_STRTAB_SHARD_COUNT 5
+#define LOG2_MDTAB_SHARD_COUNT 4
+#define STRTAB_SHARD_COUNT ((size_t)(1 << LOG2_STRTAB_SHARD_COUNT))
+#define MDTAB_SHARD_COUNT ((size_t)(1 << LOG2_MDTAB_SHARD_COUNT))
- internal_metadata **mdtab;
- size_t mdtab_count;
- size_t mdtab_free;
- size_t mdtab_capacity;
-};
-
-static void internal_string_ref(internal_string *s);
-static void internal_string_unref(internal_string *s);
-static void discard_metadata(grpc_mdctx *ctx);
-static void gc_mdtab(grpc_mdctx *ctx);
-static void metadata_context_destroy_locked(grpc_mdctx *ctx);
-
-static void lock(grpc_mdctx *ctx) { gpr_mu_lock(&ctx->mu); }
-
-static void unlock(grpc_mdctx *ctx) {
- /* If the context has been orphaned we'd like to delete it soon. We check
- conditions in unlock as it signals the end of mutations on a context.
-
- We need to ensure all grpc_mdelem and grpc_mdstr elements have been deleted
- first. This is equivalent to saying that both tables have zero counts,
- which is equivalent to saying that strtab_count is zero (as mdelem's MUST
- reference an mdstr for their key and value slots).
-
- To encourage that to happen, we start discarding zero reference count
- mdelems on every unlock (instead of the usual 'I'm too loaded' trigger
- case), since otherwise we can be stuck waiting for a garbage collection
- that will never happen. */
- if (ctx->refs == 0) {
- /* uncomment if you're having trouble diagnosing an mdelem leak to make
- things clearer (slows down destruction a lot, however) */
- gc_mdtab(ctx);
- if (ctx->mdtab_count && ctx->mdtab_count == ctx->mdtab_free) {
- discard_metadata(ctx);
- }
- if (ctx->strtab_count == 0) {
- metadata_context_destroy_locked(ctx);
- return;
- }
- }
- gpr_mu_unlock(&ctx->mu);
-}
+/* hash seed: decided at initialization time */
+static uint32_t g_hash_seed;
+static int g_forced_hash_seed = 0;
-static void ref_md_locked(internal_metadata *md) {
- if (0 == gpr_atm_no_barrier_fetch_add(&md->refcnt, 1)) {
- md->context->mdtab_free--;
- }
-}
+/* linearly probed hash tables for static element lookup */
+static grpc_mdstr *g_static_strtab[GRPC_STATIC_MDSTR_COUNT * 2];
+static grpc_mdelem *g_static_mdtab[GRPC_STATIC_MDELEM_COUNT * 2];
+static size_t g_static_strtab_maxprobe;
+static size_t g_static_mdtab_maxprobe;
+
+static strtab_shard g_strtab_shard[STRTAB_SHARD_COUNT];
+static mdtab_shard g_mdtab_shard[MDTAB_SHARD_COUNT];
-grpc_mdctx *grpc_mdctx_create_with_seed(gpr_uint32 seed) {
- grpc_mdctx *ctx = gpr_malloc(sizeof(grpc_mdctx));
-
- ctx->refs = 1;
- ctx->hash_seed = seed;
- gpr_mu_init(&ctx->mu);
- ctx->strtab = gpr_malloc(sizeof(internal_string *) * INITIAL_STRTAB_CAPACITY);
- memset(ctx->strtab, 0, sizeof(grpc_mdstr *) * INITIAL_STRTAB_CAPACITY);
- ctx->strtab_count = 0;
- ctx->strtab_capacity = INITIAL_STRTAB_CAPACITY;
- ctx->mdtab = gpr_malloc(sizeof(internal_metadata *) * INITIAL_MDTAB_CAPACITY);
- memset(ctx->mdtab, 0, sizeof(grpc_mdelem *) * INITIAL_MDTAB_CAPACITY);
- ctx->mdtab_count = 0;
- ctx->mdtab_capacity = INITIAL_MDTAB_CAPACITY;
- ctx->mdtab_free = 0;
-
- return ctx;
+static void gc_mdtab(mdtab_shard *shard);
+
+void grpc_test_only_set_metadata_hash_seed(uint32_t seed) {
+ g_hash_seed = seed;
+ g_forced_hash_seed = 1;
}
-grpc_mdctx *grpc_mdctx_create(void) {
- /* This seed is used to prevent remote connections from controlling hash table
- * collisions. It needs to be somewhat unpredictable to a remote connection.
- */
- return grpc_mdctx_create_with_seed(gpr_now().tv_nsec);
+void grpc_mdctx_global_init(void) {
+ size_t i, j;
+ if (!g_forced_hash_seed) {
+ g_hash_seed = (uint32_t)gpr_now(GPR_CLOCK_REALTIME).tv_nsec;
+ }
+ g_static_strtab_maxprobe = 0;
+ g_static_mdtab_maxprobe = 0;
+ /* build static tables */
+ memset(g_static_mdtab, 0, sizeof(g_static_mdtab));
+ memset(g_static_strtab, 0, sizeof(g_static_strtab));
+ for (i = 0; i < GRPC_STATIC_MDSTR_COUNT; i++) {
+ grpc_mdstr *elem = &grpc_static_mdstr_table[i];
+ const char *str = grpc_static_metadata_strings[i];
+ uint32_t hash = gpr_murmur_hash3(str, strlen(str), g_hash_seed);
+ *(gpr_slice *)&elem->slice = gpr_slice_from_static_string(str);
+ *(uint32_t *)&elem->hash = hash;
+ for (j = 0;; j++) {
+ size_t idx = (hash + j) % GPR_ARRAY_SIZE(g_static_strtab);
+ if (g_static_strtab[idx] == NULL) {
+ g_static_strtab[idx] = &grpc_static_mdstr_table[i];
+ break;
+ }
+ }
+ if (j > g_static_strtab_maxprobe) {
+ g_static_strtab_maxprobe = j;
+ }
+ }
+ for (i = 0; i < GRPC_STATIC_MDELEM_COUNT; i++) {
+ grpc_mdelem *elem = &grpc_static_mdelem_table[i];
+ grpc_mdstr *key =
+ &grpc_static_mdstr_table[grpc_static_metadata_elem_indices[2 * i + 0]];
+ grpc_mdstr *value =
+ &grpc_static_mdstr_table[grpc_static_metadata_elem_indices[2 * i + 1]];
+ uint32_t hash = GRPC_MDSTR_KV_HASH(key->hash, value->hash);
+ *(grpc_mdstr **)&elem->key = key;
+ *(grpc_mdstr **)&elem->value = value;
+ for (j = 0;; j++) {
+ size_t idx = (hash + j) % GPR_ARRAY_SIZE(g_static_mdtab);
+ if (g_static_mdtab[idx] == NULL) {
+ g_static_mdtab[idx] = elem;
+ break;
+ }
+ }
+ if (j > g_static_mdtab_maxprobe) {
+ g_static_mdtab_maxprobe = j;
+ }
+ }
+ /* initialize shards */
+ for (i = 0; i < STRTAB_SHARD_COUNT; i++) {
+ strtab_shard *shard = &g_strtab_shard[i];
+ gpr_mu_init(&shard->mu);
+ shard->count = 0;
+ shard->capacity = INITIAL_STRTAB_CAPACITY;
+ shard->strs = gpr_malloc(sizeof(*shard->strs) * shard->capacity);
+ memset(shard->strs, 0, sizeof(*shard->strs) * shard->capacity);
+ }
+ for (i = 0; i < MDTAB_SHARD_COUNT; i++) {
+ mdtab_shard *shard = &g_mdtab_shard[i];
+ gpr_mu_init(&shard->mu);
+ shard->count = 0;
+ shard->free = 0;
+ shard->capacity = INITIAL_MDTAB_CAPACITY;
+ shard->elems = gpr_malloc(sizeof(*shard->elems) * shard->capacity);
+ memset(shard->elems, 0, sizeof(*shard->elems) * shard->capacity);
+ }
}
-static void discard_metadata(grpc_mdctx *ctx) {
+void grpc_mdctx_global_shutdown(void) {
size_t i;
- internal_metadata *next, *cur;
-
- for (i = 0; i < ctx->mdtab_capacity; i++) {
- cur = ctx->mdtab[i];
- while (cur) {
- GPR_ASSERT(gpr_atm_acq_load(&cur->refcnt) == 0);
- next = cur->bucket_next;
- internal_string_unref(cur->key);
- internal_string_unref(cur->value);
- if (cur->user_data) {
- cur->destroy_user_data(cur->user_data);
+ for (i = 0; i < MDTAB_SHARD_COUNT; i++) {
+ mdtab_shard *shard = &g_mdtab_shard[i];
+ gpr_mu_destroy(&shard->mu);
+ gc_mdtab(shard);
+ /* TODO(ctiller): GPR_ASSERT(shard->count == 0); */
+ if (shard->count != 0) {
+ gpr_log(GPR_DEBUG, "WARNING: %d metadata elements were leaked",
+ shard->count);
+ if (grpc_iomgr_abort_on_leaks()) {
+ abort();
}
- gpr_free(cur);
- cur = next;
- ctx->mdtab_free--;
- ctx->mdtab_count--;
}
- ctx->mdtab[i] = NULL;
+ gpr_free(shard->elems);
+ }
+ for (i = 0; i < STRTAB_SHARD_COUNT; i++) {
+ strtab_shard *shard = &g_strtab_shard[i];
+ gpr_mu_destroy(&shard->mu);
+ /* TODO(ctiller): GPR_ASSERT(shard->count == 0); */
+ if (shard->count != 0) {
+ gpr_log(GPR_DEBUG, "WARNING: %d metadata strings were leaked",
+ shard->count);
+ if (grpc_iomgr_abort_on_leaks()) {
+ abort();
+ }
+ }
+ gpr_free(shard->strs);
}
}
-static void metadata_context_destroy_locked(grpc_mdctx *ctx) {
- GPR_ASSERT(ctx->strtab_count == 0);
- GPR_ASSERT(ctx->mdtab_count == 0);
- GPR_ASSERT(ctx->mdtab_free == 0);
- gpr_free(ctx->strtab);
- gpr_free(ctx->mdtab);
- gpr_mu_unlock(&ctx->mu);
- gpr_mu_destroy(&ctx->mu);
- gpr_free(ctx);
+static int is_mdstr_static(grpc_mdstr *s) {
+ return s >= &grpc_static_mdstr_table[0] &&
+ s < &grpc_static_mdstr_table[GRPC_STATIC_MDSTR_COUNT];
}
-void grpc_mdctx_ref(grpc_mdctx *ctx) {
- lock(ctx);
- GPR_ASSERT(ctx->refs > 0);
- ctx->refs++;
- unlock(ctx);
+static int is_mdelem_static(grpc_mdelem *e) {
+ return e >= &grpc_static_mdelem_table[0] &&
+ e < &grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT];
}
-void grpc_mdctx_unref(grpc_mdctx *ctx) {
- lock(ctx);
- GPR_ASSERT(ctx->refs > 0);
- ctx->refs--;
- unlock(ctx);
+static void ref_md_locked(mdtab_shard *shard,
+ internal_metadata *md DEBUG_ARGS) {
+#ifdef GRPC_METADATA_REFCOUNT_DEBUG
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "ELM REF:%p:%d->%d: '%s' = '%s'", md,
+ gpr_atm_no_barrier_load(&md->refcnt),
+ gpr_atm_no_barrier_load(&md->refcnt) + 1,
+ grpc_mdstr_as_c_string((grpc_mdstr *)md->key),
+ grpc_mdstr_as_c_string((grpc_mdstr *)md->value));
+#endif
+ if (0 == gpr_atm_no_barrier_fetch_add(&md->refcnt, 2)) {
+ shard->free--;
+ } else {
+ GPR_ASSERT(1 != gpr_atm_no_barrier_fetch_add(&md->refcnt, -1));
+ }
}
-static void grow_strtab(grpc_mdctx *ctx) {
- size_t capacity = ctx->strtab_capacity * 2;
+static void grow_strtab(strtab_shard *shard) {
+ size_t capacity = shard->capacity * 2;
size_t i;
- internal_string **strtab = gpr_malloc(sizeof(internal_string *) * capacity);
+ internal_string **strtab;
internal_string *s, *next;
+
+ GPR_TIMER_BEGIN("grow_strtab", 0);
+
+ strtab = gpr_malloc(sizeof(internal_string *) * capacity);
memset(strtab, 0, sizeof(internal_string *) * capacity);
- for (i = 0; i < ctx->strtab_capacity; i++) {
- for (s = ctx->strtab[i]; s; s = next) {
+ for (i = 0; i < shard->capacity; i++) {
+ for (s = shard->strs[i]; s; s = next) {
+ size_t idx = TABLE_IDX(s->hash, LOG2_STRTAB_SHARD_COUNT, capacity);
next = s->bucket_next;
- s->bucket_next = strtab[s->hash % capacity];
- strtab[s->hash % capacity] = s;
+ s->bucket_next = strtab[idx];
+ strtab[idx] = s;
}
}
- gpr_free(ctx->strtab);
- ctx->strtab = strtab;
- ctx->strtab_capacity = capacity;
+ gpr_free(shard->strs);
+ shard->strs = strtab;
+ shard->capacity = capacity;
+
+ GPR_TIMER_END("grow_strtab", 0);
}
-static void internal_destroy_string(internal_string *is) {
+static void internal_destroy_string(strtab_shard *shard, internal_string *is) {
internal_string **prev_next;
internal_string *cur;
- grpc_mdctx *ctx = is->context;
+ GPR_TIMER_BEGIN("internal_destroy_string", 0);
if (is->has_base64_and_huffman_encoded) {
gpr_slice_unref(is->base64_and_huffman);
}
- for (prev_next = &ctx->strtab[is->hash % ctx->strtab_capacity],
+ for (prev_next = &shard->strs[TABLE_IDX(is->hash, LOG2_STRTAB_SHARD_COUNT,
+ shard->capacity)],
cur = *prev_next;
cur != is; prev_next = &cur->bucket_next, cur = cur->bucket_next)
;
*prev_next = cur->bucket_next;
- ctx->strtab_count--;
+ shard->count--;
gpr_free(is);
-}
-
-static void internal_string_ref(internal_string *s) { ++s->refs; }
-
-static void internal_string_unref(internal_string *s) {
- GPR_ASSERT(s->refs > 0);
- if (0 == --s->refs) {
- internal_destroy_string(s);
- }
+ GPR_TIMER_END("internal_destroy_string", 0);
}
static void slice_ref(void *p) {
internal_string *is =
(internal_string *)((char *)p - offsetof(internal_string, refcount));
- grpc_mdctx *ctx = is->context;
- lock(ctx);
- internal_string_ref(is);
- unlock(ctx);
+ GRPC_MDSTR_REF((grpc_mdstr *)(is));
}
static void slice_unref(void *p) {
internal_string *is =
(internal_string *)((char *)p - offsetof(internal_string, refcount));
- grpc_mdctx *ctx = is->context;
- lock(ctx);
- internal_string_unref(is);
- unlock(ctx);
+ GRPC_MDSTR_UNREF((grpc_mdstr *)(is));
}
-grpc_mdstr *grpc_mdstr_from_string(grpc_mdctx *ctx, const char *str) {
- return grpc_mdstr_from_buffer(ctx, (const gpr_uint8 *)str, strlen(str));
+grpc_mdstr *grpc_mdstr_from_string(const char *str) {
+ return grpc_mdstr_from_buffer((const uint8_t *)str, strlen(str));
}
-grpc_mdstr *grpc_mdstr_from_slice(grpc_mdctx *ctx, gpr_slice slice) {
- grpc_mdstr *result = grpc_mdstr_from_buffer(ctx, GPR_SLICE_START_PTR(slice),
+grpc_mdstr *grpc_mdstr_from_slice(gpr_slice slice) {
+ grpc_mdstr *result = grpc_mdstr_from_buffer(GPR_SLICE_START_PTR(slice),
GPR_SLICE_LENGTH(slice));
gpr_slice_unref(slice);
return result;
}
-grpc_mdstr *grpc_mdstr_from_buffer(grpc_mdctx *ctx, const gpr_uint8 *buf,
- size_t length) {
- gpr_uint32 hash = gpr_murmur_hash3(buf, length, ctx->hash_seed);
+grpc_mdstr *grpc_mdstr_from_buffer(const uint8_t *buf, size_t length) {
+ uint32_t hash = gpr_murmur_hash3(buf, length, g_hash_seed);
internal_string *s;
+ strtab_shard *shard =
+ &g_strtab_shard[SHARD_IDX(hash, LOG2_STRTAB_SHARD_COUNT)];
+ size_t i;
+ size_t idx;
+
+ GPR_TIMER_BEGIN("grpc_mdstr_from_buffer", 0);
+
+ /* search for a static string */
+ for (i = 0; i <= g_static_strtab_maxprobe; i++) {
+ grpc_mdstr *ss;
+ idx = (hash + i) % GPR_ARRAY_SIZE(g_static_strtab);
+ ss = g_static_strtab[idx];
+ if (ss == NULL) break;
+ if (ss->hash == hash && GPR_SLICE_LENGTH(ss->slice) == length &&
+ 0 == memcmp(buf, GPR_SLICE_START_PTR(ss->slice), length)) {
+ GPR_TIMER_END("grpc_mdstr_from_buffer", 0);
+ return ss;
+ }
+ }
- lock(ctx);
+ gpr_mu_lock(&shard->mu);
/* search for an existing string */
- for (s = ctx->strtab[hash % ctx->strtab_capacity]; s; s = s->bucket_next) {
+ idx = TABLE_IDX(hash, LOG2_STRTAB_SHARD_COUNT, shard->capacity);
+ for (s = shard->strs[idx]; s; s = s->bucket_next) {
if (s->hash == hash && GPR_SLICE_LENGTH(s->slice) == length &&
0 == memcmp(buf, GPR_SLICE_START_PTR(s->slice), length)) {
- internal_string_ref(s);
- unlock(ctx);
+ GRPC_MDSTR_REF((grpc_mdstr *)s);
+ gpr_mu_unlock(&shard->mu);
+ GPR_TIMER_END("grpc_mdstr_from_buffer", 0);
return (grpc_mdstr *)s;
}
}
@@ -307,20 +386,20 @@ grpc_mdstr *grpc_mdstr_from_buffer(grpc_mdctx *ctx, const gpr_uint8 *buf,
if (length + 1 < GPR_SLICE_INLINED_SIZE) {
/* string data goes directly into the slice */
s = gpr_malloc(sizeof(internal_string));
- s->refs = 1;
+ gpr_atm_rel_store(&s->refcnt, 2);
s->slice.refcount = NULL;
memcpy(s->slice.data.inlined.bytes, buf, length);
s->slice.data.inlined.bytes[length] = 0;
- s->slice.data.inlined.length = length;
+ s->slice.data.inlined.length = (uint8_t)length;
} else {
/* string data goes after the internal_string header, and we +1 for null
terminator */
s = gpr_malloc(sizeof(internal_string) + length + 1);
- s->refs = 1;
+ gpr_atm_rel_store(&s->refcnt, 2);
s->refcount.ref = slice_ref;
s->refcount.unref = slice_unref;
s->slice.refcount = &s->refcount;
- s->slice.data.refcounted.bytes = (gpr_uint8 *)(s + 1);
+ s->slice.data.refcounted.bytes = (uint8_t *)(s + 1);
s->slice.data.refcounted.length = length;
memcpy(s->slice.data.refcounted.bytes, buf, length);
/* add a null terminator for cheap c string conversion when desired */
@@ -328,260 +407,292 @@ grpc_mdstr *grpc_mdstr_from_buffer(grpc_mdctx *ctx, const gpr_uint8 *buf,
}
s->has_base64_and_huffman_encoded = 0;
s->hash = hash;
- s->context = ctx;
- s->bucket_next = ctx->strtab[hash % ctx->strtab_capacity];
- ctx->strtab[hash % ctx->strtab_capacity] = s;
+ s->bucket_next = shard->strs[idx];
+ shard->strs[idx] = s;
- ctx->strtab_count++;
+ shard->count++;
- if (ctx->strtab_count > ctx->strtab_capacity * 2) {
- grow_strtab(ctx);
+ if (shard->count > shard->capacity * 2) {
+ grow_strtab(shard);
}
- unlock(ctx);
+ gpr_mu_unlock(&shard->mu);
+ GPR_TIMER_END("grpc_mdstr_from_buffer", 0);
return (grpc_mdstr *)s;
}
-static void gc_mdtab(grpc_mdctx *ctx) {
+static void gc_mdtab(mdtab_shard *shard) {
size_t i;
internal_metadata **prev_next;
internal_metadata *md, *next;
- for (i = 0; i < ctx->mdtab_capacity; i++) {
- prev_next = &ctx->mdtab[i];
- for (md = ctx->mdtab[i]; md; md = next) {
+ GPR_TIMER_BEGIN("gc_mdtab", 0);
+ for (i = 0; i < shard->capacity; i++) {
+ prev_next = &shard->elems[i];
+ for (md = shard->elems[i]; md; md = next) {
+ void *user_data = (void *)gpr_atm_no_barrier_load(&md->user_data);
next = md->bucket_next;
if (gpr_atm_acq_load(&md->refcnt) == 0) {
- internal_string_unref(md->key);
- internal_string_unref(md->value);
+ GRPC_MDSTR_UNREF((grpc_mdstr *)md->key);
+ GRPC_MDSTR_UNREF((grpc_mdstr *)md->value);
if (md->user_data) {
- md->destroy_user_data(md->user_data);
+ ((destroy_user_data_func)gpr_atm_no_barrier_load(
+ &md->destroy_user_data))(user_data);
}
gpr_free(md);
*prev_next = next;
- ctx->mdtab_free--;
- ctx->mdtab_count--;
+ shard->free--;
+ shard->count--;
} else {
prev_next = &md->bucket_next;
}
}
}
-
- GPR_ASSERT(ctx->mdtab_free == 0);
+ GPR_TIMER_END("gc_mdtab", 0);
}
-static void grow_mdtab(grpc_mdctx *ctx) {
- size_t capacity = ctx->mdtab_capacity * 2;
+static void grow_mdtab(mdtab_shard *shard) {
+ size_t capacity = shard->capacity * 2;
size_t i;
- internal_metadata **mdtab =
- gpr_malloc(sizeof(internal_metadata *) * capacity);
+ internal_metadata **mdtab;
internal_metadata *md, *next;
- gpr_uint32 hash;
+ uint32_t hash;
+
+ GPR_TIMER_BEGIN("grow_mdtab", 0);
+
+ mdtab = gpr_malloc(sizeof(internal_metadata *) * capacity);
memset(mdtab, 0, sizeof(internal_metadata *) * capacity);
- for (i = 0; i < ctx->mdtab_capacity; i++) {
- for (md = ctx->mdtab[i]; md; md = next) {
+ for (i = 0; i < shard->capacity; i++) {
+ for (md = shard->elems[i]; md; md = next) {
+ size_t idx;
hash = GRPC_MDSTR_KV_HASH(md->key->hash, md->value->hash);
next = md->bucket_next;
- md->bucket_next = mdtab[hash % capacity];
- mdtab[hash % capacity] = md;
+ idx = TABLE_IDX(hash, LOG2_MDTAB_SHARD_COUNT, capacity);
+ md->bucket_next = mdtab[idx];
+ mdtab[idx] = md;
}
}
- gpr_free(ctx->mdtab);
- ctx->mdtab = mdtab;
- ctx->mdtab_capacity = capacity;
+ gpr_free(shard->elems);
+ shard->elems = mdtab;
+ shard->capacity = capacity;
+
+ GPR_TIMER_END("grow_mdtab", 0);
}
-static void rehash_mdtab(grpc_mdctx *ctx) {
- if (ctx->mdtab_free > ctx->mdtab_capacity / 4) {
- gc_mdtab(ctx);
+static void rehash_mdtab(mdtab_shard *shard) {
+ if (shard->free > shard->capacity / 4) {
+ gc_mdtab(shard);
} else {
- grow_mdtab(ctx);
+ grow_mdtab(shard);
}
}
-grpc_mdelem *grpc_mdelem_from_metadata_strings(grpc_mdctx *ctx,
- grpc_mdstr *mkey,
+grpc_mdelem *grpc_mdelem_from_metadata_strings(grpc_mdstr *mkey,
grpc_mdstr *mvalue) {
internal_string *key = (internal_string *)mkey;
internal_string *value = (internal_string *)mvalue;
- gpr_uint32 hash = GRPC_MDSTR_KV_HASH(mkey->hash, mvalue->hash);
+ uint32_t hash = GRPC_MDSTR_KV_HASH(mkey->hash, mvalue->hash);
internal_metadata *md;
+ mdtab_shard *shard = &g_mdtab_shard[SHARD_IDX(hash, LOG2_MDTAB_SHARD_COUNT)];
+ size_t i;
+ size_t idx;
+
+ GPR_TIMER_BEGIN("grpc_mdelem_from_metadata_strings", 0);
+
+ if (is_mdstr_static(mkey) && is_mdstr_static(mvalue)) {
+ for (i = 0; i <= g_static_mdtab_maxprobe; i++) {
+ grpc_mdelem *smd;
+ idx = (hash + i) % GPR_ARRAY_SIZE(g_static_mdtab);
+ smd = g_static_mdtab[idx];
+ if (smd == NULL) break;
+ if (smd->key == mkey && smd->value == mvalue) {
+ GPR_TIMER_END("grpc_mdelem_from_metadata_strings", 0);
+ return smd;
+ }
+ }
+ }
- GPR_ASSERT(key->context == ctx);
- GPR_ASSERT(value->context == ctx);
-
- lock(ctx);
+ gpr_mu_lock(&shard->mu);
+ idx = TABLE_IDX(hash, LOG2_MDTAB_SHARD_COUNT, shard->capacity);
/* search for an existing pair */
- for (md = ctx->mdtab[hash % ctx->mdtab_capacity]; md; md = md->bucket_next) {
+ for (md = shard->elems[idx]; md; md = md->bucket_next) {
if (md->key == key && md->value == value) {
- ref_md_locked(md);
- internal_string_unref(key);
- internal_string_unref(value);
- unlock(ctx);
+ REF_MD_LOCKED(shard, md);
+ GRPC_MDSTR_UNREF((grpc_mdstr *)key);
+ GRPC_MDSTR_UNREF((grpc_mdstr *)value);
+ gpr_mu_unlock(&shard->mu);
+ GPR_TIMER_END("grpc_mdelem_from_metadata_strings", 0);
return (grpc_mdelem *)md;
}
}
/* not found: create a new pair */
md = gpr_malloc(sizeof(internal_metadata));
- gpr_atm_rel_store(&md->refcnt, 1);
- md->context = ctx;
+ gpr_atm_rel_store(&md->refcnt, 2);
md->key = key;
md->value = value;
- md->user_data = NULL;
- md->destroy_user_data = NULL;
- md->bucket_next = ctx->mdtab[hash % ctx->mdtab_capacity];
- ctx->mdtab[hash % ctx->mdtab_capacity] = md;
- ctx->mdtab_count++;
-
- if (ctx->mdtab_count > ctx->mdtab_capacity * 2) {
- rehash_mdtab(ctx);
+ md->user_data = 0;
+ md->destroy_user_data = 0;
+ md->bucket_next = shard->elems[idx];
+ shard->elems[idx] = md;
+ gpr_mu_init(&md->mu_user_data);
+#ifdef GRPC_METADATA_REFCOUNT_DEBUG
+ gpr_log(GPR_DEBUG, "ELM NEW:%p:%d: '%s' = '%s'", md,
+ gpr_atm_no_barrier_load(&md->refcnt),
+ grpc_mdstr_as_c_string((grpc_mdstr *)md->key),
+ grpc_mdstr_as_c_string((grpc_mdstr *)md->value));
+#endif
+ shard->count++;
+
+ if (shard->count > shard->capacity * 2) {
+ rehash_mdtab(shard);
}
- unlock(ctx);
+ gpr_mu_unlock(&shard->mu);
+
+ GPR_TIMER_END("grpc_mdelem_from_metadata_strings", 0);
return (grpc_mdelem *)md;
}
-grpc_mdelem *grpc_mdelem_from_strings(grpc_mdctx *ctx, const char *key,
- const char *value) {
- return grpc_mdelem_from_metadata_strings(ctx,
- grpc_mdstr_from_string(ctx, key),
- grpc_mdstr_from_string(ctx, value));
+grpc_mdelem *grpc_mdelem_from_strings(const char *key, const char *value) {
+ return grpc_mdelem_from_metadata_strings(grpc_mdstr_from_string(key),
+ grpc_mdstr_from_string(value));
}
-grpc_mdelem *grpc_mdelem_from_slices(grpc_mdctx *ctx, gpr_slice key,
- gpr_slice value) {
- return grpc_mdelem_from_metadata_strings(ctx, grpc_mdstr_from_slice(ctx, key),
- grpc_mdstr_from_slice(ctx, value));
+grpc_mdelem *grpc_mdelem_from_slices(gpr_slice key, gpr_slice value) {
+ return grpc_mdelem_from_metadata_strings(grpc_mdstr_from_slice(key),
+ grpc_mdstr_from_slice(value));
}
-grpc_mdelem *grpc_mdelem_from_string_and_buffer(grpc_mdctx *ctx,
- const char *key,
- const gpr_uint8 *value,
+grpc_mdelem *grpc_mdelem_from_string_and_buffer(const char *key,
+ const uint8_t *value,
size_t value_length) {
return grpc_mdelem_from_metadata_strings(
- ctx, grpc_mdstr_from_string(ctx, key),
- grpc_mdstr_from_buffer(ctx, value, value_length));
+ grpc_mdstr_from_string(key), grpc_mdstr_from_buffer(value, value_length));
}
-grpc_mdelem *grpc_mdelem_ref(grpc_mdelem *gmd) {
+grpc_mdelem *grpc_mdelem_ref(grpc_mdelem *gmd DEBUG_ARGS) {
internal_metadata *md = (internal_metadata *)gmd;
+ if (is_mdelem_static(gmd)) return gmd;
+#ifdef GRPC_METADATA_REFCOUNT_DEBUG
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "ELM REF:%p:%d->%d: '%s' = '%s'", md,
+ gpr_atm_no_barrier_load(&md->refcnt),
+ gpr_atm_no_barrier_load(&md->refcnt) + 1,
+ grpc_mdstr_as_c_string((grpc_mdstr *)md->key),
+ grpc_mdstr_as_c_string((grpc_mdstr *)md->value));
+#endif
/* we can assume the ref count is >= 1 as the application is calling
this function - meaning that no adjustment to mdtab_free is necessary,
simplifying the logic here to be just an atomic increment */
/* use C assert to have this removed in opt builds */
- assert(gpr_atm_no_barrier_load(&md->refcnt) >= 1);
+ assert(gpr_atm_no_barrier_load(&md->refcnt) >= 2);
gpr_atm_no_barrier_fetch_add(&md->refcnt, 1);
return gmd;
}
-void grpc_mdelem_unref(grpc_mdelem *gmd) {
+void grpc_mdelem_unref(grpc_mdelem *gmd DEBUG_ARGS) {
internal_metadata *md = (internal_metadata *)gmd;
- grpc_mdctx *ctx = md->context;
- lock(ctx);
- assert(gpr_atm_no_barrier_load(&md->refcnt) >= 1);
- if (1 == gpr_atm_full_fetch_add(&md->refcnt, -1)) {
- ctx->mdtab_free++;
+ if (!md) return;
+ if (is_mdelem_static(gmd)) return;
+#ifdef GRPC_METADATA_REFCOUNT_DEBUG
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "ELM UNREF:%p:%d->%d: '%s' = '%s'", md,
+ gpr_atm_no_barrier_load(&md->refcnt),
+ gpr_atm_no_barrier_load(&md->refcnt) - 1,
+ grpc_mdstr_as_c_string((grpc_mdstr *)md->key),
+ grpc_mdstr_as_c_string((grpc_mdstr *)md->value));
+#endif
+ if (2 == gpr_atm_full_fetch_add(&md->refcnt, -1)) {
+ uint32_t hash = GRPC_MDSTR_KV_HASH(md->key->hash, md->value->hash);
+ mdtab_shard *shard =
+ &g_mdtab_shard[SHARD_IDX(hash, LOG2_MDTAB_SHARD_COUNT)];
+ GPR_TIMER_BEGIN("grpc_mdelem_unref.to_zero", 0);
+ gpr_mu_lock(&shard->mu);
+ if (1 == gpr_atm_no_barrier_load(&md->refcnt)) {
+ shard->free++;
+ gpr_atm_no_barrier_store(&md->refcnt, 0);
+ }
+ gpr_mu_unlock(&shard->mu);
+ GPR_TIMER_END("grpc_mdelem_unref.to_zero", 0);
}
- unlock(ctx);
}
const char *grpc_mdstr_as_c_string(grpc_mdstr *s) {
return (const char *)GPR_SLICE_START_PTR(s->slice);
}
-grpc_mdstr *grpc_mdstr_ref(grpc_mdstr *gs) {
+grpc_mdstr *grpc_mdstr_ref(grpc_mdstr *gs DEBUG_ARGS) {
internal_string *s = (internal_string *)gs;
- grpc_mdctx *ctx = s->context;
- lock(ctx);
- internal_string_ref(s);
- unlock(ctx);
+ if (is_mdstr_static(gs)) return gs;
+ GPR_ASSERT(gpr_atm_full_fetch_add(&s->refcnt, 1) != 0);
return gs;
}
-void grpc_mdstr_unref(grpc_mdstr *gs) {
+void grpc_mdstr_unref(grpc_mdstr *gs DEBUG_ARGS) {
internal_string *s = (internal_string *)gs;
- grpc_mdctx *ctx = s->context;
- lock(ctx);
- internal_string_unref(s);
- unlock(ctx);
-}
-
-size_t grpc_mdctx_get_mdtab_capacity_test_only(grpc_mdctx *ctx) {
- return ctx->mdtab_capacity;
-}
-
-size_t grpc_mdctx_get_mdtab_count_test_only(grpc_mdctx *ctx) {
- return ctx->mdtab_count;
-}
-
-size_t grpc_mdctx_get_mdtab_free_test_only(grpc_mdctx *ctx) {
- return ctx->mdtab_free;
+ if (is_mdstr_static(gs)) return;
+ if (2 == gpr_atm_full_fetch_add(&s->refcnt, -1)) {
+ strtab_shard *shard =
+ &g_strtab_shard[SHARD_IDX(s->hash, LOG2_STRTAB_SHARD_COUNT)];
+ gpr_mu_lock(&shard->mu);
+ if (1 == gpr_atm_no_barrier_load(&s->refcnt)) {
+ internal_destroy_string(shard, s);
+ }
+ gpr_mu_unlock(&shard->mu);
+ }
}
-void *grpc_mdelem_get_user_data(grpc_mdelem *md,
- void (*if_destroy_func)(void *)) {
+void *grpc_mdelem_get_user_data(grpc_mdelem *md, void (*destroy_func)(void *)) {
internal_metadata *im = (internal_metadata *)md;
- return im->destroy_user_data == if_destroy_func ? im->user_data : NULL;
+ void *result;
+ if (is_mdelem_static(md)) {
+ return (void *)grpc_static_mdelem_user_data[md - grpc_static_mdelem_table];
+ }
+ if (gpr_atm_acq_load(&im->destroy_user_data) == (gpr_atm)destroy_func) {
+ return (void *)gpr_atm_no_barrier_load(&im->user_data);
+ } else {
+ return NULL;
+ }
+ return result;
}
void grpc_mdelem_set_user_data(grpc_mdelem *md, void (*destroy_func)(void *),
void *user_data) {
internal_metadata *im = (internal_metadata *)md;
+ GPR_ASSERT(!is_mdelem_static(md));
GPR_ASSERT((user_data == NULL) == (destroy_func == NULL));
- if (im->destroy_user_data) {
- im->destroy_user_data(im->user_data);
+ gpr_mu_lock(&im->mu_user_data);
+ if (gpr_atm_no_barrier_load(&im->destroy_user_data)) {
+ /* user data can only be set once */
+ gpr_mu_unlock(&im->mu_user_data);
+ if (destroy_func != NULL) {
+ destroy_func(user_data);
+ }
+ return;
}
- im->destroy_user_data = destroy_func;
- im->user_data = user_data;
+ gpr_atm_no_barrier_store(&im->user_data, (gpr_atm)user_data);
+ gpr_atm_rel_store(&im->destroy_user_data, (gpr_atm)destroy_func);
+ gpr_mu_unlock(&im->mu_user_data);
}
gpr_slice grpc_mdstr_as_base64_encoded_and_huffman_compressed(grpc_mdstr *gs) {
internal_string *s = (internal_string *)gs;
gpr_slice slice;
- grpc_mdctx *ctx = s->context;
- lock(ctx);
+ strtab_shard *shard =
+ &g_strtab_shard[SHARD_IDX(s->hash, LOG2_STRTAB_SHARD_COUNT)];
+ gpr_mu_lock(&shard->mu);
if (!s->has_base64_and_huffman_encoded) {
s->base64_and_huffman =
grpc_chttp2_base64_encode_and_huffman_compress(s->slice);
s->has_base64_and_huffman_encoded = 1;
}
slice = s->base64_and_huffman;
- unlock(ctx);
+ gpr_mu_unlock(&shard->mu);
return slice;
}
-
-void grpc_mdctx_lock(grpc_mdctx *ctx) { lock(ctx); }
-
-void grpc_mdctx_locked_mdelem_unref(grpc_mdctx *ctx, grpc_mdelem *gmd) {
- internal_metadata *md = (internal_metadata *)gmd;
- grpc_mdctx *elem_ctx = md->context;
- GPR_ASSERT(ctx == elem_ctx);
- assert(gpr_atm_no_barrier_load(&md->refcnt) >= 1);
- if (1 == gpr_atm_full_fetch_add(&md->refcnt, -1)) {
- ctx->mdtab_free++;
- }
-}
-
-void grpc_mdctx_unlock(grpc_mdctx *ctx) { unlock(ctx); }
-
-int grpc_mdstr_is_legal_header(grpc_mdstr *s) {
- /* TODO(ctiller): consider caching this, or computing it on construction */
- const gpr_uint8 *p = GPR_SLICE_START_PTR(s->slice);
- const gpr_uint8 *e = GPR_SLICE_END_PTR(s->slice);
- for (; p != e; p++) {
- if (*p < 32 || *p > 126) return 0;
- }
- return 1;
-}
-
-int grpc_mdstr_is_bin_suffixed(grpc_mdstr *s) {
- /* TODO(ctiller): consider caching this */
- return grpc_is_binary_header((const char *)GPR_SLICE_START_PTR(s->slice),
- GPR_SLICE_LENGTH(s->slice));
-}
diff --git a/src/core/transport/metadata.h b/src/core/transport/metadata.h
index 76e3f3c1f8..5ab397848c 100644
--- a/src/core/transport/metadata.h
+++ b/src/core/transport/metadata.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_TRANSPORT_METADATA_H
-#define GRPC_INTERNAL_CORE_TRANSPORT_METADATA_H
+#ifndef GRPC_CORE_TRANSPORT_METADATA_H
+#define GRPC_CORE_TRANSPORT_METADATA_H
#include <grpc/support/slice.h>
#include <grpc/support/useful.h>
@@ -59,17 +59,22 @@
grpc_mdelem instances MAY live longer than their refcount implies, and are
garbage collected periodically, meaning cached data can easily outlive a
- single request. */
+ single request.
+
+ STATIC METADATA: in static_metadata.h we declare a set of static metadata.
+ These mdelems and mdstrs are available via pre-declared code generated macros
+ and are available to code anywhere between grpc_init() and grpc_shutdown().
+ They are not refcounted, but can be passed to _ref and _unref functions
+ declared here - in which case those functions are effectively no-ops. */
/* Forward declarations */
-typedef struct grpc_mdctx grpc_mdctx;
typedef struct grpc_mdstr grpc_mdstr;
typedef struct grpc_mdelem grpc_mdelem;
/* if changing this, make identical changes in internal_string in metadata.c */
struct grpc_mdstr {
const gpr_slice slice;
- const gpr_uint32 hash;
+ const uint32_t hash;
/* there is a private part to this in metadata.c */
};
@@ -81,25 +86,14 @@ struct grpc_mdelem {
/* there is a private part to this in metadata.c */
};
-/* Create/orphan a metadata context */
-grpc_mdctx *grpc_mdctx_create(void);
-grpc_mdctx *grpc_mdctx_create_with_seed(gpr_uint32 seed);
-void grpc_mdctx_ref(grpc_mdctx *mdctx);
-void grpc_mdctx_unref(grpc_mdctx *mdctx);
-
-/* Test only accessors to internal state - only for testing this code - do not
- rely on it outside of metadata_test.c */
-size_t grpc_mdctx_get_mdtab_capacity_test_only(grpc_mdctx *mdctx);
-size_t grpc_mdctx_get_mdtab_count_test_only(grpc_mdctx *mdctx);
-size_t grpc_mdctx_get_mdtab_free_test_only(grpc_mdctx *mdctx);
+void grpc_test_only_set_metadata_hash_seed(uint32_t seed);
/* Constructors for grpc_mdstr instances; take a variety of data types that
clients may have handy */
-grpc_mdstr *grpc_mdstr_from_string(grpc_mdctx *ctx, const char *str);
+grpc_mdstr *grpc_mdstr_from_string(const char *str);
/* Unrefs the slice. */
-grpc_mdstr *grpc_mdstr_from_slice(grpc_mdctx *ctx, gpr_slice slice);
-grpc_mdstr *grpc_mdstr_from_buffer(grpc_mdctx *ctx, const gpr_uint8 *str,
- size_t length);
+grpc_mdstr *grpc_mdstr_from_slice(gpr_slice slice);
+grpc_mdstr *grpc_mdstr_from_buffer(const uint8_t *str, size_t length);
/* Returns a borrowed slice from the mdstr with its contents base64 encoded
and huffman compressed */
@@ -107,16 +101,13 @@ gpr_slice grpc_mdstr_as_base64_encoded_and_huffman_compressed(grpc_mdstr *str);
/* Constructors for grpc_mdelem instances; take a variety of data types that
clients may have handy */
-grpc_mdelem *grpc_mdelem_from_metadata_strings(grpc_mdctx *ctx, grpc_mdstr *key,
+grpc_mdelem *grpc_mdelem_from_metadata_strings(grpc_mdstr *key,
grpc_mdstr *value);
-grpc_mdelem *grpc_mdelem_from_strings(grpc_mdctx *ctx, const char *key,
- const char *value);
+grpc_mdelem *grpc_mdelem_from_strings(const char *key, const char *value);
/* Unrefs the slices. */
-grpc_mdelem *grpc_mdelem_from_slices(grpc_mdctx *ctx, gpr_slice key,
- gpr_slice value);
-grpc_mdelem *grpc_mdelem_from_string_and_buffer(grpc_mdctx *ctx,
- const char *key,
- const gpr_uint8 *value,
+grpc_mdelem *grpc_mdelem_from_slices(gpr_slice key, gpr_slice value);
+grpc_mdelem *grpc_mdelem_from_string_and_buffer(const char *key,
+ const uint8_t *value,
size_t value_length);
/* Mutator and accessor for grpc_mdelem user data. The destructor function
@@ -127,31 +118,39 @@ void grpc_mdelem_set_user_data(grpc_mdelem *md, void (*destroy_func)(void *),
void *user_data);
/* Reference counting */
+#ifdef GRPC_METADATA_REFCOUNT_DEBUG
+#define GRPC_MDSTR_REF(s) grpc_mdstr_ref((s), __FILE__, __LINE__)
+#define GRPC_MDSTR_UNREF(s) grpc_mdstr_unref((s), __FILE__, __LINE__)
+#define GRPC_MDELEM_REF(s) grpc_mdelem_ref((s), __FILE__, __LINE__)
+#define GRPC_MDELEM_UNREF(s) grpc_mdelem_unref((s), __FILE__, __LINE__)
+grpc_mdstr *grpc_mdstr_ref(grpc_mdstr *s, const char *file, int line);
+void grpc_mdstr_unref(grpc_mdstr *s, const char *file, int line);
+grpc_mdelem *grpc_mdelem_ref(grpc_mdelem *md, const char *file, int line);
+void grpc_mdelem_unref(grpc_mdelem *md, const char *file, int line);
+#else
+#define GRPC_MDSTR_REF(s) grpc_mdstr_ref((s))
+#define GRPC_MDSTR_UNREF(s) grpc_mdstr_unref((s))
+#define GRPC_MDELEM_REF(s) grpc_mdelem_ref((s))
+#define GRPC_MDELEM_UNREF(s) grpc_mdelem_unref((s))
grpc_mdstr *grpc_mdstr_ref(grpc_mdstr *s);
void grpc_mdstr_unref(grpc_mdstr *s);
-
grpc_mdelem *grpc_mdelem_ref(grpc_mdelem *md);
void grpc_mdelem_unref(grpc_mdelem *md);
+#endif
/* Recover a char* from a grpc_mdstr. The returned string is null terminated.
Does not promise that the returned string has no embedded nulls however. */
const char *grpc_mdstr_as_c_string(grpc_mdstr *s);
+#define GRPC_MDSTR_LENGTH(s) (GPR_SLICE_LENGTH(s->slice))
+
int grpc_mdstr_is_legal_header(grpc_mdstr *s);
+int grpc_mdstr_is_legal_nonbin_header(grpc_mdstr *s);
int grpc_mdstr_is_bin_suffixed(grpc_mdstr *s);
-/* Batch mode metadata functions.
- These API's have equivalents above, but allow taking the mdctx just once,
- performing a bunch of work, and then leaving the mdctx. */
-
-/* Lock the metadata context: it's only safe to call _locked_ functions against
- this context from the calling thread until grpc_mdctx_unlock is called */
-void grpc_mdctx_lock(grpc_mdctx *ctx);
-/* Unref a metadata element */
-void grpc_mdctx_locked_mdelem_unref(grpc_mdctx *ctx, grpc_mdelem *elem);
-/* Unlock the metadata context */
-void grpc_mdctx_unlock(grpc_mdctx *ctx);
-
#define GRPC_MDSTR_KV_HASH(k_hash, v_hash) (GPR_ROTL((k_hash), 2) ^ (v_hash))
-#endif /* GRPC_INTERNAL_CORE_TRANSPORT_METADATA_H */
+void grpc_mdctx_global_init(void);
+void grpc_mdctx_global_shutdown(void);
+
+#endif /* GRPC_CORE_TRANSPORT_METADATA_H */
diff --git a/src/core/transport/stream_op.c b/src/core/transport/metadata_batch.c
index 81df5455f6..1266862f82 100644
--- a/src/core/transport/stream_op.c
+++ b/src/core/transport/metadata_batch.c
@@ -31,149 +31,14 @@
*
*/
-#include "src/core/transport/stream_op.h"
+#include "src/core/transport/metadata_batch.h"
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-/* Exponential growth function: Given x, return a larger x.
- Currently we grow by 1.5 times upon reallocation. */
-#define GROW(x) (3 * (x) / 2)
-
-void grpc_sopb_init(grpc_stream_op_buffer *sopb) {
- sopb->ops = sopb->inlined_ops;
- sopb->nops = 0;
- sopb->capacity = GRPC_SOPB_INLINE_ELEMENTS;
-}
-
-void grpc_sopb_destroy(grpc_stream_op_buffer *sopb) {
- grpc_stream_ops_unref_owned_objects(sopb->ops, sopb->nops);
- if (sopb->ops != sopb->inlined_ops) gpr_free(sopb->ops);
-}
-
-void grpc_sopb_reset(grpc_stream_op_buffer *sopb) {
- grpc_stream_ops_unref_owned_objects(sopb->ops, sopb->nops);
- sopb->nops = 0;
-}
-
-void grpc_sopb_swap(grpc_stream_op_buffer *a, grpc_stream_op_buffer *b) {
- GPR_SWAP(size_t, a->nops, b->nops);
- GPR_SWAP(size_t, a->capacity, b->capacity);
-
- if (a->ops == a->inlined_ops) {
- if (b->ops == b->inlined_ops) {
- /* swap contents of inlined buffer */
- grpc_stream_op temp[GRPC_SOPB_INLINE_ELEMENTS];
- memcpy(temp, a->ops, b->nops * sizeof(grpc_stream_op));
- memcpy(a->ops, b->ops, a->nops * sizeof(grpc_stream_op));
- memcpy(b->ops, temp, b->nops * sizeof(grpc_stream_op));
- } else {
- /* a is inlined, b is not - copy a inlined into b, fix pointers */
- a->ops = b->ops;
- b->ops = b->inlined_ops;
- memcpy(b->ops, a->inlined_ops, b->nops * sizeof(grpc_stream_op));
- }
- } else if (b->ops == b->inlined_ops) {
- /* b is inlined, a is not - copy b inlined int a, fix pointers */
- b->ops = a->ops;
- a->ops = a->inlined_ops;
- memcpy(a->ops, b->inlined_ops, a->nops * sizeof(grpc_stream_op));
- } else {
- /* no inlining: easy swap */
- GPR_SWAP(grpc_stream_op *, a->ops, b->ops);
- }
-}
-
-void grpc_stream_ops_unref_owned_objects(grpc_stream_op *ops, size_t nops) {
- size_t i;
- for (i = 0; i < nops; i++) {
- switch (ops[i].type) {
- case GRPC_OP_SLICE:
- gpr_slice_unref(ops[i].data.slice);
- break;
- case GRPC_OP_METADATA:
- grpc_metadata_batch_destroy(&ops[i].data.metadata);
- break;
- case GRPC_NO_OP:
- case GRPC_OP_BEGIN_MESSAGE:
- break;
- }
- }
-}
-
-static void expandto(grpc_stream_op_buffer *sopb, size_t new_capacity) {
- sopb->capacity = new_capacity;
- if (sopb->ops == sopb->inlined_ops) {
- sopb->ops = gpr_malloc(sizeof(grpc_stream_op) * new_capacity);
- memcpy(sopb->ops, sopb->inlined_ops, sopb->nops * sizeof(grpc_stream_op));
- } else {
- sopb->ops = gpr_realloc(sopb->ops, sizeof(grpc_stream_op) * new_capacity);
- }
-}
-
-static grpc_stream_op *add(grpc_stream_op_buffer *sopb) {
- grpc_stream_op *out;
-
- GPR_ASSERT(sopb->nops <= sopb->capacity);
- if (sopb->nops == sopb->capacity) {
- expandto(sopb, GROW(sopb->capacity));
- }
- out = sopb->ops + sopb->nops;
- sopb->nops++;
- return out;
-}
-
-void grpc_sopb_add_no_op(grpc_stream_op_buffer *sopb) {
- add(sopb)->type = GRPC_NO_OP;
-}
-
-void grpc_sopb_add_begin_message(grpc_stream_op_buffer *sopb, gpr_uint32 length,
- gpr_uint32 flags) {
- grpc_stream_op *op = add(sopb);
- op->type = GRPC_OP_BEGIN_MESSAGE;
- op->data.begin_message.length = length;
- op->data.begin_message.flags = flags;
-}
-
-void grpc_sopb_add_metadata(grpc_stream_op_buffer *sopb,
- grpc_metadata_batch b) {
- grpc_stream_op *op = add(sopb);
- op->type = GRPC_OP_METADATA;
- op->data.metadata = b;
-}
-
-void grpc_sopb_add_slice(grpc_stream_op_buffer *sopb, gpr_slice slice) {
- grpc_stream_op *op = add(sopb);
- op->type = GRPC_OP_SLICE;
- op->data.slice = slice;
-}
-
-void grpc_sopb_append(grpc_stream_op_buffer *sopb, grpc_stream_op *ops,
- size_t nops) {
- size_t orig_nops = sopb->nops;
- size_t new_nops = orig_nops + nops;
-
- if (new_nops > sopb->capacity) {
- expandto(sopb, GPR_MAX(GROW(sopb->capacity), new_nops));
- }
-
- memcpy(sopb->ops + orig_nops, ops, sizeof(grpc_stream_op) * nops);
- sopb->nops = new_nops;
-}
-
-void grpc_sopb_move_to(grpc_stream_op_buffer *src, grpc_stream_op_buffer *dst) {
- if (src->nops == 0) {
- return;
- }
- if (dst->nops == 0) {
- grpc_sopb_swap(src, dst);
- return;
- }
- grpc_sopb_append(dst, src->ops, src->nops);
- src->nops = 0;
-}
+#include "src/core/profiling/timers.h"
static void assert_valid_list(grpc_mdelem_list *list) {
#ifndef NDEBUG
@@ -198,23 +63,18 @@ static void assert_valid_list(grpc_mdelem_list *list) {
#ifndef NDEBUG
void grpc_metadata_batch_assert_ok(grpc_metadata_batch *batch) {
assert_valid_list(&batch->list);
- assert_valid_list(&batch->garbage);
}
#endif /* NDEBUG */
void grpc_metadata_batch_init(grpc_metadata_batch *batch) {
- batch->list.head = batch->list.tail = batch->garbage.head = batch->garbage.tail =
- NULL;
- batch->deadline = gpr_inf_future;
+ batch->list.head = batch->list.tail = NULL;
+ batch->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
}
void grpc_metadata_batch_destroy(grpc_metadata_batch *batch) {
grpc_linked_mdelem *l;
for (l = batch->list.head; l; l = l->next) {
- grpc_mdelem_unref(l->md);
- }
- for (l = batch->garbage.head; l; l = l->next) {
- grpc_mdelem_unref(l->md);
+ GRPC_MDELEM_UNREF(l->md);
}
}
@@ -258,6 +118,7 @@ static void link_tail(grpc_mdelem_list *list, grpc_linked_mdelem *storage) {
GPR_ASSERT(storage->md);
storage->prev = list->tail;
storage->next = NULL;
+ storage->reserved = NULL;
if (list->tail != NULL) {
list->tail->next = storage;
} else {
@@ -272,18 +133,10 @@ void grpc_metadata_batch_link_tail(grpc_metadata_batch *batch,
link_tail(&batch->list, storage);
}
-void grpc_metadata_batch_merge(grpc_metadata_batch *target,
- grpc_metadata_batch *add) {
- grpc_linked_mdelem *l;
- grpc_linked_mdelem *next;
- for (l = add->list.head; l; l = next) {
- next = l->next;
- link_tail(&target->list, l);
- }
- for (l = add->garbage.head; l; l = next) {
- next = l->next;
- link_tail(&target->garbage, l);
- }
+void grpc_metadata_batch_move(grpc_metadata_batch *dst,
+ grpc_metadata_batch *src) {
+ *dst = *src;
+ memset(src, 0, sizeof(grpc_metadata_batch));
}
void grpc_metadata_batch_filter(grpc_metadata_batch *batch,
@@ -293,8 +146,9 @@ void grpc_metadata_batch_filter(grpc_metadata_batch *batch,
grpc_linked_mdelem *l;
grpc_linked_mdelem *next;
+ GPR_TIMER_BEGIN("grpc_metadata_batch_filter", 0);
+
assert_valid_list(&batch->list);
- assert_valid_list(&batch->garbage);
for (l = batch->list.head; l; l = next) {
grpc_mdelem *orig = l->md;
grpc_mdelem *filt = filter(user_data, orig);
@@ -313,12 +167,28 @@ void grpc_metadata_batch_filter(grpc_metadata_batch *batch,
batch->list.tail = l->prev;
}
assert_valid_list(&batch->list);
- link_head(&batch->garbage, l);
+ GRPC_MDELEM_UNREF(l->md);
} else if (filt != orig) {
- grpc_mdelem_unref(orig);
+ GRPC_MDELEM_UNREF(orig);
l->md = filt;
}
}
assert_valid_list(&batch->list);
- assert_valid_list(&batch->garbage);
+
+ GPR_TIMER_END("grpc_metadata_batch_filter", 0);
+}
+
+static grpc_mdelem *no_metadata_for_you(void *user_data, grpc_mdelem *elem) {
+ return NULL;
+}
+
+void grpc_metadata_batch_clear(grpc_metadata_batch *batch) {
+ batch->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
+ grpc_metadata_batch_filter(batch, no_metadata_for_you, NULL);
+}
+
+int grpc_metadata_batch_is_empty(grpc_metadata_batch *batch) {
+ return batch->list.head == NULL &&
+ gpr_time_cmp(gpr_inf_future(batch->deadline.clock_type),
+ batch->deadline) == 0;
}
diff --git a/src/core/transport/stream_op.h b/src/core/transport/metadata_batch.h
index 842fc932b9..9337b28328 100644
--- a/src/core/transport/stream_op.h
+++ b/src/core/transport/metadata_batch.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_TRANSPORT_STREAM_OP_H
-#define GRPC_INTERNAL_CORE_TRANSPORT_STREAM_OP_H
+#ifndef GRPC_CORE_TRANSPORT_METADATA_BATCH_H
+#define GRPC_CORE_TRANSPORT_METADATA_BATCH_H
#include <grpc/grpc.h>
#include <grpc/support/port_platform.h>
@@ -40,43 +40,11 @@
#include <grpc/support/time.h>
#include "src/core/transport/metadata.h"
-/* this many stream ops are inlined into a sopb before allocating */
-#define GRPC_SOPB_INLINE_ELEMENTS 16
-
-/* Operations that can be performed on a stream.
- Used by grpc_stream_op. */
-typedef enum grpc_stream_op_code {
- /* Do nothing code. Useful if rewriting a batch to exclude some operations.
- Must be ignored by receivers */
- GRPC_NO_OP,
- GRPC_OP_METADATA,
- /* Begin a message/metadata element/status - as defined by
- grpc_message_type. */
- GRPC_OP_BEGIN_MESSAGE,
- /* Add a slice of data to the current message/metadata element/status.
- Must not overflow the forward declared length. */
- GRPC_OP_SLICE
-} grpc_stream_op_code;
-
-/** Internal bit flag for grpc_begin_message's \a flags signaling the use of
- * compression for the message */
-#define GRPC_WRITE_INTERNAL_COMPRESS (0x80000000u)
-/** Mask of all valid internal flags. */
-#define GRPC_WRITE_INTERNAL_USED_MASK (GRPC_WRITE_INTERNAL_COMPRESS)
-
-/* Arguments for GRPC_OP_BEGIN_MESSAGE */
-typedef struct grpc_begin_message {
- /* How many bytes of data will this message contain */
- gpr_uint32 length;
- /* Write flags for the message: see grpc.h GRPC_WRITE_* for the public bits,
- * GRPC_WRITE_INTERNAL_* for the internal ones. */
- gpr_uint32 flags;
-} grpc_begin_message;
-
typedef struct grpc_linked_mdelem {
grpc_mdelem *md;
struct grpc_linked_mdelem *next;
struct grpc_linked_mdelem *prev;
+ void *reserved;
} grpc_linked_mdelem;
typedef struct grpc_mdelem_list {
@@ -87,10 +55,6 @@ typedef struct grpc_mdelem_list {
typedef struct grpc_metadata_batch {
/** Metadata elements in this batch */
grpc_mdelem_list list;
- /** Elements that have been removed from the batch, but have
- not yet been unreffed - used to allow collecting garbage
- under a single metadata context lock */
- grpc_mdelem_list garbage;
/** Used to calculate grpc-timeout at the point of sending,
or gpr_inf_future if this batch does not need to send a
grpc-timeout */
@@ -99,11 +63,16 @@ typedef struct grpc_metadata_batch {
void grpc_metadata_batch_init(grpc_metadata_batch *batch);
void grpc_metadata_batch_destroy(grpc_metadata_batch *batch);
-void grpc_metadata_batch_merge(grpc_metadata_batch *target,
- grpc_metadata_batch *add);
+void grpc_metadata_batch_clear(grpc_metadata_batch *batch);
+int grpc_metadata_batch_is_empty(grpc_metadata_batch *batch);
+
+/** Moves the metadata information from \a src to \a dst. Upon return, \a src is
+ * zeroed. */
+void grpc_metadata_batch_move(grpc_metadata_batch *dst,
+ grpc_metadata_batch *src);
/** Add \a storage to the beginning of \a batch. storage->md is
- assumed to be valid.
+ assumed to be valid.
\a storage is owned by the caller and must survive for the
lifetime of batch. This usually means it should be around
for the lifetime of the call. */
@@ -153,54 +122,4 @@ void grpc_metadata_batch_assert_ok(grpc_metadata_batch *comd);
} while (0)
#endif
-/* Represents a single operation performed on a stream/transport */
-typedef struct grpc_stream_op {
- /* the operation to be applied */
- enum grpc_stream_op_code type;
- /* the arguments to this operation. union fields are named according to the
- associated op-code */
- union {
- grpc_begin_message begin_message;
- grpc_metadata_batch metadata;
- gpr_slice slice;
- } data;
-} grpc_stream_op;
-
-/** A stream op buffer is a wrapper around stream operations that is
- * dynamically extendable. */
-typedef struct grpc_stream_op_buffer {
- grpc_stream_op *ops;
- size_t nops;
- size_t capacity;
- grpc_stream_op inlined_ops[GRPC_SOPB_INLINE_ELEMENTS];
-} grpc_stream_op_buffer;
-
-/* Initialize a stream op buffer */
-void grpc_sopb_init(grpc_stream_op_buffer *sopb);
-/* Destroy a stream op buffer */
-void grpc_sopb_destroy(grpc_stream_op_buffer *sopb);
-/* Reset a sopb to no elements */
-void grpc_sopb_reset(grpc_stream_op_buffer *sopb);
-/* Swap two sopbs */
-void grpc_sopb_swap(grpc_stream_op_buffer *a, grpc_stream_op_buffer *b);
-
-void grpc_stream_ops_unref_owned_objects(grpc_stream_op *ops, size_t nops);
-
-/* Append a GRPC_NO_OP to a buffer */
-void grpc_sopb_add_no_op(grpc_stream_op_buffer *sopb);
-/* Append a GRPC_OP_BEGIN to a buffer */
-void grpc_sopb_add_begin_message(grpc_stream_op_buffer *sopb, gpr_uint32 length,
- gpr_uint32 flags);
-void grpc_sopb_add_metadata(grpc_stream_op_buffer *sopb,
- grpc_metadata_batch metadata);
-/* Append a GRPC_SLICE to a buffer - does not ref/unref the slice */
-void grpc_sopb_add_slice(grpc_stream_op_buffer *sopb, gpr_slice slice);
-/* Append a buffer to a buffer - does not ref/unref any internal objects */
-void grpc_sopb_append(grpc_stream_op_buffer *sopb, grpc_stream_op *ops,
- size_t nops);
-
-void grpc_sopb_move_to(grpc_stream_op_buffer *src, grpc_stream_op_buffer *dst);
-
-char *grpc_sopb_string(grpc_stream_op_buffer *sopb);
-
-#endif /* GRPC_INTERNAL_CORE_TRANSPORT_STREAM_OP_H */
+#endif /* GRPC_CORE_TRANSPORT_METADATA_BATCH_H */
diff --git a/src/core/transport/static_metadata.c b/src/core/transport/static_metadata.c
new file mode 100644
index 0000000000..eeedae0619
--- /dev/null
+++ b/src/core/transport/static_metadata.c
@@ -0,0 +1,89 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * WARNING: Auto-generated code.
+ *
+ * To make changes to this file, change
+ * tools/codegen/core/gen_static_metadata.py,
+ * and then re-run it.
+ *
+ * See metadata.h for an explanation of the interface here, and metadata.c for
+ * an
+ * explanation of what's going on.
+ */
+
+#include "src/core/transport/static_metadata.h"
+
+grpc_mdstr grpc_static_mdstr_table[GRPC_STATIC_MDSTR_COUNT];
+
+grpc_mdelem grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT];
+uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 3, 7, 5, 2, 4, 8, 6, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+const uint8_t grpc_static_metadata_elem_indices[GRPC_STATIC_MDELEM_COUNT * 2] =
+ {11, 35, 10, 35, 12, 35, 12, 49, 13, 35, 14, 35, 15, 35, 16, 35, 17, 35,
+ 19, 35, 20, 35, 21, 35, 24, 35, 25, 35, 26, 35, 27, 35, 28, 35, 29, 35,
+ 30, 18, 30, 35, 31, 35, 32, 35, 36, 35, 37, 35, 38, 35, 39, 35, 42, 33,
+ 42, 34, 42, 48, 42, 53, 42, 54, 42, 55, 42, 56, 43, 33, 43, 48, 43, 53,
+ 46, 0, 46, 1, 46, 2, 50, 35, 57, 35, 58, 35, 59, 35, 60, 35, 61, 35,
+ 62, 35, 63, 35, 64, 35, 65, 35, 66, 40, 66, 68, 67, 78, 67, 79, 69, 35,
+ 70, 35, 71, 35, 72, 35, 73, 35, 74, 35, 75, 41, 75, 51, 75, 52, 76, 35,
+ 77, 35, 80, 3, 80, 4, 80, 5, 80, 6, 80, 7, 80, 8, 80, 9, 81, 35,
+ 82, 83, 84, 35, 85, 35, 86, 35, 87, 35, 88, 35};
+
+const char *const grpc_static_metadata_strings[GRPC_STATIC_MDSTR_COUNT] = {
+ "0", "1", "2", "200", "204", "206", "304", "400", "404", "500", "accept",
+ "accept-charset", "accept-encoding", "accept-language", "accept-ranges",
+ "access-control-allow-origin", "age", "allow", "application/grpc",
+ ":authority", "authorization", "cache-control", "census-bin",
+ "census-binary-bin", "content-disposition", "content-encoding",
+ "content-language", "content-length", "content-location", "content-range",
+ "content-type", "cookie", "date", "deflate", "deflate,gzip", "", "etag",
+ "expect", "expires", "from", "GET", "grpc", "grpc-accept-encoding",
+ "grpc-encoding", "grpc-internal-encoding-request", "grpc-message",
+ "grpc-status", "grpc-timeout", "gzip", "gzip, deflate", "host", "http",
+ "https", "identity", "identity,deflate", "identity,deflate,gzip",
+ "identity,gzip", "if-match", "if-modified-since", "if-none-match",
+ "if-range", "if-unmodified-since", "last-modified", "link", "location",
+ "max-forwards", ":method", ":path", "POST", "proxy-authenticate",
+ "proxy-authorization", "range", "referer", "refresh", "retry-after",
+ ":scheme", "server", "set-cookie", "/", "/index.html", ":status",
+ "strict-transport-security", "te", "trailers", "transfer-encoding",
+ "user-agent", "vary", "via", "www-authenticate"};
+
+const uint8_t grpc_static_accept_encoding_metadata[8] = {0, 29, 26, 30,
+ 28, 32, 27, 31};
diff --git a/src/core/transport/static_metadata.h b/src/core/transport/static_metadata.h
new file mode 100644
index 0000000000..85442f8107
--- /dev/null
+++ b/src/core/transport/static_metadata.h
@@ -0,0 +1,408 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * WARNING: Auto-generated code.
+ *
+ * To make changes to this file, change
+ * tools/codegen/core/gen_static_metadata.py,
+ * and then re-run it.
+ *
+ * See metadata.h for an explanation of the interface here, and metadata.c for
+ * an
+ * explanation of what's going on.
+ */
+
+#ifndef GRPC_CORE_TRANSPORT_STATIC_METADATA_H
+#define GRPC_CORE_TRANSPORT_STATIC_METADATA_H
+
+#include "src/core/transport/metadata.h"
+
+#define GRPC_STATIC_MDSTR_COUNT 89
+extern grpc_mdstr grpc_static_mdstr_table[GRPC_STATIC_MDSTR_COUNT];
+/* "0" */
+#define GRPC_MDSTR_0 (&grpc_static_mdstr_table[0])
+/* "1" */
+#define GRPC_MDSTR_1 (&grpc_static_mdstr_table[1])
+/* "2" */
+#define GRPC_MDSTR_2 (&grpc_static_mdstr_table[2])
+/* "200" */
+#define GRPC_MDSTR_200 (&grpc_static_mdstr_table[3])
+/* "204" */
+#define GRPC_MDSTR_204 (&grpc_static_mdstr_table[4])
+/* "206" */
+#define GRPC_MDSTR_206 (&grpc_static_mdstr_table[5])
+/* "304" */
+#define GRPC_MDSTR_304 (&grpc_static_mdstr_table[6])
+/* "400" */
+#define GRPC_MDSTR_400 (&grpc_static_mdstr_table[7])
+/* "404" */
+#define GRPC_MDSTR_404 (&grpc_static_mdstr_table[8])
+/* "500" */
+#define GRPC_MDSTR_500 (&grpc_static_mdstr_table[9])
+/* "accept" */
+#define GRPC_MDSTR_ACCEPT (&grpc_static_mdstr_table[10])
+/* "accept-charset" */
+#define GRPC_MDSTR_ACCEPT_CHARSET (&grpc_static_mdstr_table[11])
+/* "accept-encoding" */
+#define GRPC_MDSTR_ACCEPT_ENCODING (&grpc_static_mdstr_table[12])
+/* "accept-language" */
+#define GRPC_MDSTR_ACCEPT_LANGUAGE (&grpc_static_mdstr_table[13])
+/* "accept-ranges" */
+#define GRPC_MDSTR_ACCEPT_RANGES (&grpc_static_mdstr_table[14])
+/* "access-control-allow-origin" */
+#define GRPC_MDSTR_ACCESS_CONTROL_ALLOW_ORIGIN (&grpc_static_mdstr_table[15])
+/* "age" */
+#define GRPC_MDSTR_AGE (&grpc_static_mdstr_table[16])
+/* "allow" */
+#define GRPC_MDSTR_ALLOW (&grpc_static_mdstr_table[17])
+/* "application/grpc" */
+#define GRPC_MDSTR_APPLICATION_SLASH_GRPC (&grpc_static_mdstr_table[18])
+/* ":authority" */
+#define GRPC_MDSTR_AUTHORITY (&grpc_static_mdstr_table[19])
+/* "authorization" */
+#define GRPC_MDSTR_AUTHORIZATION (&grpc_static_mdstr_table[20])
+/* "cache-control" */
+#define GRPC_MDSTR_CACHE_CONTROL (&grpc_static_mdstr_table[21])
+/* "census-bin" */
+#define GRPC_MDSTR_CENSUS_BIN (&grpc_static_mdstr_table[22])
+/* "census-binary-bin" */
+#define GRPC_MDSTR_CENSUS_BINARY_BIN (&grpc_static_mdstr_table[23])
+/* "content-disposition" */
+#define GRPC_MDSTR_CONTENT_DISPOSITION (&grpc_static_mdstr_table[24])
+/* "content-encoding" */
+#define GRPC_MDSTR_CONTENT_ENCODING (&grpc_static_mdstr_table[25])
+/* "content-language" */
+#define GRPC_MDSTR_CONTENT_LANGUAGE (&grpc_static_mdstr_table[26])
+/* "content-length" */
+#define GRPC_MDSTR_CONTENT_LENGTH (&grpc_static_mdstr_table[27])
+/* "content-location" */
+#define GRPC_MDSTR_CONTENT_LOCATION (&grpc_static_mdstr_table[28])
+/* "content-range" */
+#define GRPC_MDSTR_CONTENT_RANGE (&grpc_static_mdstr_table[29])
+/* "content-type" */
+#define GRPC_MDSTR_CONTENT_TYPE (&grpc_static_mdstr_table[30])
+/* "cookie" */
+#define GRPC_MDSTR_COOKIE (&grpc_static_mdstr_table[31])
+/* "date" */
+#define GRPC_MDSTR_DATE (&grpc_static_mdstr_table[32])
+/* "deflate" */
+#define GRPC_MDSTR_DEFLATE (&grpc_static_mdstr_table[33])
+/* "deflate,gzip" */
+#define GRPC_MDSTR_DEFLATE_COMMA_GZIP (&grpc_static_mdstr_table[34])
+/* "" */
+#define GRPC_MDSTR_EMPTY (&grpc_static_mdstr_table[35])
+/* "etag" */
+#define GRPC_MDSTR_ETAG (&grpc_static_mdstr_table[36])
+/* "expect" */
+#define GRPC_MDSTR_EXPECT (&grpc_static_mdstr_table[37])
+/* "expires" */
+#define GRPC_MDSTR_EXPIRES (&grpc_static_mdstr_table[38])
+/* "from" */
+#define GRPC_MDSTR_FROM (&grpc_static_mdstr_table[39])
+/* "GET" */
+#define GRPC_MDSTR_GET (&grpc_static_mdstr_table[40])
+/* "grpc" */
+#define GRPC_MDSTR_GRPC (&grpc_static_mdstr_table[41])
+/* "grpc-accept-encoding" */
+#define GRPC_MDSTR_GRPC_ACCEPT_ENCODING (&grpc_static_mdstr_table[42])
+/* "grpc-encoding" */
+#define GRPC_MDSTR_GRPC_ENCODING (&grpc_static_mdstr_table[43])
+/* "grpc-internal-encoding-request" */
+#define GRPC_MDSTR_GRPC_INTERNAL_ENCODING_REQUEST (&grpc_static_mdstr_table[44])
+/* "grpc-message" */
+#define GRPC_MDSTR_GRPC_MESSAGE (&grpc_static_mdstr_table[45])
+/* "grpc-status" */
+#define GRPC_MDSTR_GRPC_STATUS (&grpc_static_mdstr_table[46])
+/* "grpc-timeout" */
+#define GRPC_MDSTR_GRPC_TIMEOUT (&grpc_static_mdstr_table[47])
+/* "gzip" */
+#define GRPC_MDSTR_GZIP (&grpc_static_mdstr_table[48])
+/* "gzip, deflate" */
+#define GRPC_MDSTR_GZIP_COMMA_DEFLATE (&grpc_static_mdstr_table[49])
+/* "host" */
+#define GRPC_MDSTR_HOST (&grpc_static_mdstr_table[50])
+/* "http" */
+#define GRPC_MDSTR_HTTP (&grpc_static_mdstr_table[51])
+/* "https" */
+#define GRPC_MDSTR_HTTPS (&grpc_static_mdstr_table[52])
+/* "identity" */
+#define GRPC_MDSTR_IDENTITY (&grpc_static_mdstr_table[53])
+/* "identity,deflate" */
+#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE (&grpc_static_mdstr_table[54])
+/* "identity,deflate,gzip" */
+#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE_COMMA_GZIP \
+ (&grpc_static_mdstr_table[55])
+/* "identity,gzip" */
+#define GRPC_MDSTR_IDENTITY_COMMA_GZIP (&grpc_static_mdstr_table[56])
+/* "if-match" */
+#define GRPC_MDSTR_IF_MATCH (&grpc_static_mdstr_table[57])
+/* "if-modified-since" */
+#define GRPC_MDSTR_IF_MODIFIED_SINCE (&grpc_static_mdstr_table[58])
+/* "if-none-match" */
+#define GRPC_MDSTR_IF_NONE_MATCH (&grpc_static_mdstr_table[59])
+/* "if-range" */
+#define GRPC_MDSTR_IF_RANGE (&grpc_static_mdstr_table[60])
+/* "if-unmodified-since" */
+#define GRPC_MDSTR_IF_UNMODIFIED_SINCE (&grpc_static_mdstr_table[61])
+/* "last-modified" */
+#define GRPC_MDSTR_LAST_MODIFIED (&grpc_static_mdstr_table[62])
+/* "link" */
+#define GRPC_MDSTR_LINK (&grpc_static_mdstr_table[63])
+/* "location" */
+#define GRPC_MDSTR_LOCATION (&grpc_static_mdstr_table[64])
+/* "max-forwards" */
+#define GRPC_MDSTR_MAX_FORWARDS (&grpc_static_mdstr_table[65])
+/* ":method" */
+#define GRPC_MDSTR_METHOD (&grpc_static_mdstr_table[66])
+/* ":path" */
+#define GRPC_MDSTR_PATH (&grpc_static_mdstr_table[67])
+/* "POST" */
+#define GRPC_MDSTR_POST (&grpc_static_mdstr_table[68])
+/* "proxy-authenticate" */
+#define GRPC_MDSTR_PROXY_AUTHENTICATE (&grpc_static_mdstr_table[69])
+/* "proxy-authorization" */
+#define GRPC_MDSTR_PROXY_AUTHORIZATION (&grpc_static_mdstr_table[70])
+/* "range" */
+#define GRPC_MDSTR_RANGE (&grpc_static_mdstr_table[71])
+/* "referer" */
+#define GRPC_MDSTR_REFERER (&grpc_static_mdstr_table[72])
+/* "refresh" */
+#define GRPC_MDSTR_REFRESH (&grpc_static_mdstr_table[73])
+/* "retry-after" */
+#define GRPC_MDSTR_RETRY_AFTER (&grpc_static_mdstr_table[74])
+/* ":scheme" */
+#define GRPC_MDSTR_SCHEME (&grpc_static_mdstr_table[75])
+/* "server" */
+#define GRPC_MDSTR_SERVER (&grpc_static_mdstr_table[76])
+/* "set-cookie" */
+#define GRPC_MDSTR_SET_COOKIE (&grpc_static_mdstr_table[77])
+/* "/" */
+#define GRPC_MDSTR_SLASH (&grpc_static_mdstr_table[78])
+/* "/index.html" */
+#define GRPC_MDSTR_SLASH_INDEX_DOT_HTML (&grpc_static_mdstr_table[79])
+/* ":status" */
+#define GRPC_MDSTR_STATUS (&grpc_static_mdstr_table[80])
+/* "strict-transport-security" */
+#define GRPC_MDSTR_STRICT_TRANSPORT_SECURITY (&grpc_static_mdstr_table[81])
+/* "te" */
+#define GRPC_MDSTR_TE (&grpc_static_mdstr_table[82])
+/* "trailers" */
+#define GRPC_MDSTR_TRAILERS (&grpc_static_mdstr_table[83])
+/* "transfer-encoding" */
+#define GRPC_MDSTR_TRANSFER_ENCODING (&grpc_static_mdstr_table[84])
+/* "user-agent" */
+#define GRPC_MDSTR_USER_AGENT (&grpc_static_mdstr_table[85])
+/* "vary" */
+#define GRPC_MDSTR_VARY (&grpc_static_mdstr_table[86])
+/* "via" */
+#define GRPC_MDSTR_VIA (&grpc_static_mdstr_table[87])
+/* "www-authenticate" */
+#define GRPC_MDSTR_WWW_AUTHENTICATE (&grpc_static_mdstr_table[88])
+
+#define GRPC_STATIC_MDELEM_COUNT 78
+extern grpc_mdelem grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT];
+extern uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT];
+/* "accept-charset": "" */
+#define GRPC_MDELEM_ACCEPT_CHARSET_EMPTY (&grpc_static_mdelem_table[0])
+/* "accept": "" */
+#define GRPC_MDELEM_ACCEPT_EMPTY (&grpc_static_mdelem_table[1])
+/* "accept-encoding": "" */
+#define GRPC_MDELEM_ACCEPT_ENCODING_EMPTY (&grpc_static_mdelem_table[2])
+/* "accept-encoding": "gzip, deflate" */
+#define GRPC_MDELEM_ACCEPT_ENCODING_GZIP_COMMA_DEFLATE \
+ (&grpc_static_mdelem_table[3])
+/* "accept-language": "" */
+#define GRPC_MDELEM_ACCEPT_LANGUAGE_EMPTY (&grpc_static_mdelem_table[4])
+/* "accept-ranges": "" */
+#define GRPC_MDELEM_ACCEPT_RANGES_EMPTY (&grpc_static_mdelem_table[5])
+/* "access-control-allow-origin": "" */
+#define GRPC_MDELEM_ACCESS_CONTROL_ALLOW_ORIGIN_EMPTY \
+ (&grpc_static_mdelem_table[6])
+/* "age": "" */
+#define GRPC_MDELEM_AGE_EMPTY (&grpc_static_mdelem_table[7])
+/* "allow": "" */
+#define GRPC_MDELEM_ALLOW_EMPTY (&grpc_static_mdelem_table[8])
+/* ":authority": "" */
+#define GRPC_MDELEM_AUTHORITY_EMPTY (&grpc_static_mdelem_table[9])
+/* "authorization": "" */
+#define GRPC_MDELEM_AUTHORIZATION_EMPTY (&grpc_static_mdelem_table[10])
+/* "cache-control": "" */
+#define GRPC_MDELEM_CACHE_CONTROL_EMPTY (&grpc_static_mdelem_table[11])
+/* "content-disposition": "" */
+#define GRPC_MDELEM_CONTENT_DISPOSITION_EMPTY (&grpc_static_mdelem_table[12])
+/* "content-encoding": "" */
+#define GRPC_MDELEM_CONTENT_ENCODING_EMPTY (&grpc_static_mdelem_table[13])
+/* "content-language": "" */
+#define GRPC_MDELEM_CONTENT_LANGUAGE_EMPTY (&grpc_static_mdelem_table[14])
+/* "content-length": "" */
+#define GRPC_MDELEM_CONTENT_LENGTH_EMPTY (&grpc_static_mdelem_table[15])
+/* "content-location": "" */
+#define GRPC_MDELEM_CONTENT_LOCATION_EMPTY (&grpc_static_mdelem_table[16])
+/* "content-range": "" */
+#define GRPC_MDELEM_CONTENT_RANGE_EMPTY (&grpc_static_mdelem_table[17])
+/* "content-type": "application/grpc" */
+#define GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC \
+ (&grpc_static_mdelem_table[18])
+/* "content-type": "" */
+#define GRPC_MDELEM_CONTENT_TYPE_EMPTY (&grpc_static_mdelem_table[19])
+/* "cookie": "" */
+#define GRPC_MDELEM_COOKIE_EMPTY (&grpc_static_mdelem_table[20])
+/* "date": "" */
+#define GRPC_MDELEM_DATE_EMPTY (&grpc_static_mdelem_table[21])
+/* "etag": "" */
+#define GRPC_MDELEM_ETAG_EMPTY (&grpc_static_mdelem_table[22])
+/* "expect": "" */
+#define GRPC_MDELEM_EXPECT_EMPTY (&grpc_static_mdelem_table[23])
+/* "expires": "" */
+#define GRPC_MDELEM_EXPIRES_EMPTY (&grpc_static_mdelem_table[24])
+/* "from": "" */
+#define GRPC_MDELEM_FROM_EMPTY (&grpc_static_mdelem_table[25])
+/* "grpc-accept-encoding": "deflate" */
+#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_DEFLATE (&grpc_static_mdelem_table[26])
+/* "grpc-accept-encoding": "deflate,gzip" */
+#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_DEFLATE_COMMA_GZIP \
+ (&grpc_static_mdelem_table[27])
+/* "grpc-accept-encoding": "gzip" */
+#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_GZIP (&grpc_static_mdelem_table[28])
+/* "grpc-accept-encoding": "identity" */
+#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY \
+ (&grpc_static_mdelem_table[29])
+/* "grpc-accept-encoding": "identity,deflate" */
+#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY_COMMA_DEFLATE \
+ (&grpc_static_mdelem_table[30])
+/* "grpc-accept-encoding": "identity,deflate,gzip" */
+#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY_COMMA_DEFLATE_COMMA_GZIP \
+ (&grpc_static_mdelem_table[31])
+/* "grpc-accept-encoding": "identity,gzip" */
+#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY_COMMA_GZIP \
+ (&grpc_static_mdelem_table[32])
+/* "grpc-encoding": "deflate" */
+#define GRPC_MDELEM_GRPC_ENCODING_DEFLATE (&grpc_static_mdelem_table[33])
+/* "grpc-encoding": "gzip" */
+#define GRPC_MDELEM_GRPC_ENCODING_GZIP (&grpc_static_mdelem_table[34])
+/* "grpc-encoding": "identity" */
+#define GRPC_MDELEM_GRPC_ENCODING_IDENTITY (&grpc_static_mdelem_table[35])
+/* "grpc-status": "0" */
+#define GRPC_MDELEM_GRPC_STATUS_0 (&grpc_static_mdelem_table[36])
+/* "grpc-status": "1" */
+#define GRPC_MDELEM_GRPC_STATUS_1 (&grpc_static_mdelem_table[37])
+/* "grpc-status": "2" */
+#define GRPC_MDELEM_GRPC_STATUS_2 (&grpc_static_mdelem_table[38])
+/* "host": "" */
+#define GRPC_MDELEM_HOST_EMPTY (&grpc_static_mdelem_table[39])
+/* "if-match": "" */
+#define GRPC_MDELEM_IF_MATCH_EMPTY (&grpc_static_mdelem_table[40])
+/* "if-modified-since": "" */
+#define GRPC_MDELEM_IF_MODIFIED_SINCE_EMPTY (&grpc_static_mdelem_table[41])
+/* "if-none-match": "" */
+#define GRPC_MDELEM_IF_NONE_MATCH_EMPTY (&grpc_static_mdelem_table[42])
+/* "if-range": "" */
+#define GRPC_MDELEM_IF_RANGE_EMPTY (&grpc_static_mdelem_table[43])
+/* "if-unmodified-since": "" */
+#define GRPC_MDELEM_IF_UNMODIFIED_SINCE_EMPTY (&grpc_static_mdelem_table[44])
+/* "last-modified": "" */
+#define GRPC_MDELEM_LAST_MODIFIED_EMPTY (&grpc_static_mdelem_table[45])
+/* "link": "" */
+#define GRPC_MDELEM_LINK_EMPTY (&grpc_static_mdelem_table[46])
+/* "location": "" */
+#define GRPC_MDELEM_LOCATION_EMPTY (&grpc_static_mdelem_table[47])
+/* "max-forwards": "" */
+#define GRPC_MDELEM_MAX_FORWARDS_EMPTY (&grpc_static_mdelem_table[48])
+/* ":method": "GET" */
+#define GRPC_MDELEM_METHOD_GET (&grpc_static_mdelem_table[49])
+/* ":method": "POST" */
+#define GRPC_MDELEM_METHOD_POST (&grpc_static_mdelem_table[50])
+/* ":path": "/" */
+#define GRPC_MDELEM_PATH_SLASH (&grpc_static_mdelem_table[51])
+/* ":path": "/index.html" */
+#define GRPC_MDELEM_PATH_SLASH_INDEX_DOT_HTML (&grpc_static_mdelem_table[52])
+/* "proxy-authenticate": "" */
+#define GRPC_MDELEM_PROXY_AUTHENTICATE_EMPTY (&grpc_static_mdelem_table[53])
+/* "proxy-authorization": "" */
+#define GRPC_MDELEM_PROXY_AUTHORIZATION_EMPTY (&grpc_static_mdelem_table[54])
+/* "range": "" */
+#define GRPC_MDELEM_RANGE_EMPTY (&grpc_static_mdelem_table[55])
+/* "referer": "" */
+#define GRPC_MDELEM_REFERER_EMPTY (&grpc_static_mdelem_table[56])
+/* "refresh": "" */
+#define GRPC_MDELEM_REFRESH_EMPTY (&grpc_static_mdelem_table[57])
+/* "retry-after": "" */
+#define GRPC_MDELEM_RETRY_AFTER_EMPTY (&grpc_static_mdelem_table[58])
+/* ":scheme": "grpc" */
+#define GRPC_MDELEM_SCHEME_GRPC (&grpc_static_mdelem_table[59])
+/* ":scheme": "http" */
+#define GRPC_MDELEM_SCHEME_HTTP (&grpc_static_mdelem_table[60])
+/* ":scheme": "https" */
+#define GRPC_MDELEM_SCHEME_HTTPS (&grpc_static_mdelem_table[61])
+/* "server": "" */
+#define GRPC_MDELEM_SERVER_EMPTY (&grpc_static_mdelem_table[62])
+/* "set-cookie": "" */
+#define GRPC_MDELEM_SET_COOKIE_EMPTY (&grpc_static_mdelem_table[63])
+/* ":status": "200" */
+#define GRPC_MDELEM_STATUS_200 (&grpc_static_mdelem_table[64])
+/* ":status": "204" */
+#define GRPC_MDELEM_STATUS_204 (&grpc_static_mdelem_table[65])
+/* ":status": "206" */
+#define GRPC_MDELEM_STATUS_206 (&grpc_static_mdelem_table[66])
+/* ":status": "304" */
+#define GRPC_MDELEM_STATUS_304 (&grpc_static_mdelem_table[67])
+/* ":status": "400" */
+#define GRPC_MDELEM_STATUS_400 (&grpc_static_mdelem_table[68])
+/* ":status": "404" */
+#define GRPC_MDELEM_STATUS_404 (&grpc_static_mdelem_table[69])
+/* ":status": "500" */
+#define GRPC_MDELEM_STATUS_500 (&grpc_static_mdelem_table[70])
+/* "strict-transport-security": "" */
+#define GRPC_MDELEM_STRICT_TRANSPORT_SECURITY_EMPTY \
+ (&grpc_static_mdelem_table[71])
+/* "te": "trailers" */
+#define GRPC_MDELEM_TE_TRAILERS (&grpc_static_mdelem_table[72])
+/* "transfer-encoding": "" */
+#define GRPC_MDELEM_TRANSFER_ENCODING_EMPTY (&grpc_static_mdelem_table[73])
+/* "user-agent": "" */
+#define GRPC_MDELEM_USER_AGENT_EMPTY (&grpc_static_mdelem_table[74])
+/* "vary": "" */
+#define GRPC_MDELEM_VARY_EMPTY (&grpc_static_mdelem_table[75])
+/* "via": "" */
+#define GRPC_MDELEM_VIA_EMPTY (&grpc_static_mdelem_table[76])
+/* "www-authenticate": "" */
+#define GRPC_MDELEM_WWW_AUTHENTICATE_EMPTY (&grpc_static_mdelem_table[77])
+
+extern const uint8_t
+ grpc_static_metadata_elem_indices[GRPC_STATIC_MDELEM_COUNT * 2];
+extern const char *const grpc_static_metadata_strings[GRPC_STATIC_MDSTR_COUNT];
+extern const uint8_t grpc_static_accept_encoding_metadata[8];
+#define GRPC_MDELEM_ACCEPT_ENCODING_FOR_ALGORITHMS(algs) \
+ (&grpc_static_mdelem_table[grpc_static_accept_encoding_metadata[(algs)]])
+#endif /* GRPC_CORE_TRANSPORT_STATIC_METADATA_H */
diff --git a/src/core/transport/transport.c b/src/core/transport/transport.c
index 39d7b701f2..3b555fa933 100644
--- a/src/core/transport/transport.c
+++ b/src/core/transport/transport.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -32,88 +32,153 @@
*/
#include "src/core/transport/transport.h"
+#include <grpc/support/alloc.h>
+#include <grpc/support/atm.h>
+#include <grpc/support/log.h>
#include "src/core/transport/transport_impl.h"
-size_t grpc_transport_stream_size(grpc_transport *transport) {
- return transport->vtable->sizeof_stream;
+#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+void grpc_stream_ref(grpc_stream_refcount *refcount, const char *reason) {
+ gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count);
+ gpr_log(GPR_DEBUG, "%s %p:%p REF %d->%d %s", refcount->object_type,
+ refcount, refcount->destroy.cb_arg, val, val + 1, reason);
+#else
+void grpc_stream_ref(grpc_stream_refcount *refcount) {
+#endif
+ gpr_ref_non_zero(&refcount->refs);
}
-void grpc_transport_goaway(grpc_transport *transport, grpc_status_code status,
- gpr_slice message) {
- transport->vtable->goaway(transport, status, message);
-}
-
-void grpc_transport_close(grpc_transport *transport) {
- transport->vtable->close(transport);
+#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+void grpc_stream_unref(grpc_exec_ctx *exec_ctx, grpc_stream_refcount *refcount,
+ const char *reason) {
+ gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count);
+ gpr_log(GPR_DEBUG, "%s %p:%p UNREF %d->%d %s", refcount->object_type,
+ refcount, refcount->destroy.cb_arg, val, val - 1, reason);
+#else
+void grpc_stream_unref(grpc_exec_ctx *exec_ctx,
+ grpc_stream_refcount *refcount) {
+#endif
+ if (gpr_unref(&refcount->refs)) {
+ grpc_exec_ctx_enqueue(exec_ctx, &refcount->destroy, true, NULL);
+ }
}
-void grpc_transport_destroy(grpc_transport *transport) {
- transport->vtable->destroy(transport);
+#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+void grpc_stream_ref_init(grpc_stream_refcount *refcount, int initial_refs,
+ grpc_iomgr_cb_func cb, void *cb_arg,
+ const char *object_type) {
+ refcount->object_type = object_type;
+#else
+void grpc_stream_ref_init(grpc_stream_refcount *refcount, int initial_refs,
+ grpc_iomgr_cb_func cb, void *cb_arg) {
+#endif
+ gpr_ref_init(&refcount->refs, initial_refs);
+ grpc_closure_init(&refcount->destroy, cb, cb_arg);
}
-int grpc_transport_init_stream(grpc_transport *transport, grpc_stream *stream,
- const void *server_data,
- grpc_transport_op *initial_op) {
- return transport->vtable->init_stream(transport, stream, server_data,
- initial_op);
+size_t grpc_transport_stream_size(grpc_transport *transport) {
+ return transport->vtable->sizeof_stream;
}
-void grpc_transport_perform_op(grpc_transport *transport, grpc_stream *stream,
- grpc_transport_op *op) {
- transport->vtable->perform_op(transport, stream, op);
+void grpc_transport_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_transport *transport) {
+ transport->vtable->destroy(exec_ctx, transport);
}
-void grpc_transport_add_to_pollset(grpc_transport *transport,
- grpc_pollset *pollset) {
- transport->vtable->add_to_pollset(transport, pollset);
+int grpc_transport_init_stream(grpc_exec_ctx *exec_ctx,
+ grpc_transport *transport, grpc_stream *stream,
+ grpc_stream_refcount *refcount,
+ const void *server_data) {
+ return transport->vtable->init_stream(exec_ctx, transport, stream, refcount,
+ server_data);
}
-void grpc_transport_destroy_stream(grpc_transport *transport,
- grpc_stream *stream) {
- transport->vtable->destroy_stream(transport, stream);
+void grpc_transport_perform_stream_op(grpc_exec_ctx *exec_ctx,
+ grpc_transport *transport,
+ grpc_stream *stream,
+ grpc_transport_stream_op *op) {
+ transport->vtable->perform_stream_op(exec_ctx, transport, stream, op);
}
-void grpc_transport_ping(grpc_transport *transport, grpc_iomgr_closure *cb) {
- transport->vtable->ping(transport, cb);
+void grpc_transport_perform_op(grpc_exec_ctx *exec_ctx,
+ grpc_transport *transport,
+ grpc_transport_op *op) {
+ transport->vtable->perform_op(exec_ctx, transport, op);
}
-void grpc_transport_setup_cancel(grpc_transport_setup *setup) {
- setup->vtable->cancel(setup);
+void grpc_transport_set_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_transport *transport, grpc_stream *stream,
+ grpc_pollset *pollset) {
+ transport->vtable->set_pollset(exec_ctx, transport, stream, pollset);
}
-void grpc_transport_setup_initiate(grpc_transport_setup *setup) {
- setup->vtable->initiate(setup);
+void grpc_transport_destroy_stream(grpc_exec_ctx *exec_ctx,
+ grpc_transport *transport,
+ grpc_stream *stream) {
+ transport->vtable->destroy_stream(exec_ctx, transport, stream);
}
-void grpc_transport_setup_add_interested_party(grpc_transport_setup *setup,
- grpc_pollset *pollset) {
- setup->vtable->add_interested_party(setup, pollset);
+char *grpc_transport_get_peer(grpc_exec_ctx *exec_ctx,
+ grpc_transport *transport) {
+ return transport->vtable->get_peer(exec_ctx, transport);
}
-void grpc_transport_setup_del_interested_party(grpc_transport_setup *setup,
- grpc_pollset *pollset) {
- setup->vtable->del_interested_party(setup, pollset);
+void grpc_transport_stream_op_finish_with_failure(
+ grpc_exec_ctx *exec_ctx, grpc_transport_stream_op *op) {
+ grpc_exec_ctx_enqueue(exec_ctx, op->recv_message_ready, false, NULL);
+ grpc_exec_ctx_enqueue(exec_ctx, op->recv_initial_metadata_ready, false, NULL);
+ grpc_exec_ctx_enqueue(exec_ctx, op->on_complete, false, NULL);
}
-void grpc_transport_op_finish_with_failure(grpc_transport_op *op) {
- if (op->send_ops) {
- op->on_done_send->cb(op->on_done_send->cb_arg, 0);
+void grpc_transport_stream_op_add_cancellation(grpc_transport_stream_op *op,
+ grpc_status_code status) {
+ GPR_ASSERT(status != GRPC_STATUS_OK);
+ if (op->cancel_with_status == GRPC_STATUS_OK) {
+ op->cancel_with_status = status;
}
- if (op->recv_ops) {
- op->on_done_recv->cb(op->on_done_recv->cb_arg, 0);
+ if (op->close_with_status != GRPC_STATUS_OK) {
+ op->close_with_status = GRPC_STATUS_OK;
+ if (op->optional_close_message != NULL) {
+ gpr_slice_unref(*op->optional_close_message);
+ op->optional_close_message = NULL;
+ }
}
- if (op->on_consumed) {
- op->on_consumed->cb(op->on_consumed->cb_arg, 0);
+}
+
+typedef struct {
+ gpr_slice message;
+ grpc_closure *then_call;
+ grpc_closure closure;
+} close_message_data;
+
+static void free_message(grpc_exec_ctx *exec_ctx, void *p, bool iomgr_success) {
+ close_message_data *cmd = p;
+ gpr_slice_unref(cmd->message);
+ if (cmd->then_call != NULL) {
+ cmd->then_call->cb(exec_ctx, cmd->then_call->cb_arg, iomgr_success);
}
+ gpr_free(cmd);
}
-void grpc_transport_op_add_cancellation(grpc_transport_op *op,
+void grpc_transport_stream_op_add_close(grpc_transport_stream_op *op,
grpc_status_code status,
- grpc_mdstr *message) {
- if (op->cancel_with_status == GRPC_STATUS_OK) {
- op->cancel_with_status = status;
+ gpr_slice *optional_message) {
+ close_message_data *cmd;
+ GPR_ASSERT(status != GRPC_STATUS_OK);
+ if (op->cancel_with_status != GRPC_STATUS_OK ||
+ op->close_with_status != GRPC_STATUS_OK) {
+ if (optional_message) {
+ gpr_slice_unref(*optional_message);
+ }
+ return;
}
- if (message) {
- grpc_mdstr_unref(message);
+ if (optional_message) {
+ cmd = gpr_malloc(sizeof(*cmd));
+ cmd->message = *optional_message;
+ cmd->then_call = op->on_complete;
+ grpc_closure_init(&cmd->closure, free_message, cmd);
+ op->on_complete = &cmd->closure;
+ op->optional_close_message = &cmd->message;
}
+ op->close_with_status = status;
}
diff --git a/src/core/transport/transport.h b/src/core/transport/transport.h
index a2c41c47af..0f068dcb38 100644
--- a/src/core/transport/transport.h
+++ b/src/core/transport/transport.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,81 +31,128 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_TRANSPORT_TRANSPORT_H
-#define GRPC_INTERNAL_CORE_TRANSPORT_TRANSPORT_H
+#ifndef GRPC_CORE_TRANSPORT_TRANSPORT_H
+#define GRPC_CORE_TRANSPORT_TRANSPORT_H
#include <stddef.h>
#include "src/core/iomgr/pollset.h"
#include "src/core/iomgr/pollset_set.h"
-#include "src/core/transport/stream_op.h"
+#include "src/core/transport/metadata_batch.h"
+#include "src/core/transport/byte_stream.h"
#include "src/core/channel/context.h"
/* forward declarations */
typedef struct grpc_transport grpc_transport;
-typedef struct grpc_transport_callbacks grpc_transport_callbacks;
/* grpc_stream doesn't actually exist. It's used as a typesafe
opaque pointer for whatever data the transport wants to track
for a stream. */
typedef struct grpc_stream grpc_stream;
-/* Represents the send/recv closed state of a stream. */
-typedef enum grpc_stream_state {
- /* the stream is open for sends and receives */
- GRPC_STREAM_OPEN,
- /* the stream is closed for sends, but may still receive data */
- GRPC_STREAM_SEND_CLOSED,
- /* the stream is closed for receives, but may still send data */
- GRPC_STREAM_RECV_CLOSED,
- /* the stream is closed for both sends and receives */
- GRPC_STREAM_CLOSED
-} grpc_stream_state;
-
-/* Transport op: a set of operations to perform on a transport */
-typedef struct grpc_transport_op {
- grpc_iomgr_closure *on_consumed;
-
- grpc_stream_op_buffer *send_ops;
- int is_last_send;
- grpc_iomgr_closure *on_done_send;
-
- grpc_stream_op_buffer *recv_ops;
- grpc_stream_state *recv_state;
- grpc_iomgr_closure *on_done_recv;
-
- grpc_pollset *bind_pollset;
-
+/*#define GRPC_STREAM_REFCOUNT_DEBUG*/
+
+typedef struct grpc_stream_refcount {
+ gpr_refcount refs;
+ grpc_closure destroy;
+#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+ const char *object_type;
+#endif
+} grpc_stream_refcount;
+
+#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+void grpc_stream_ref_init(grpc_stream_refcount *refcount, int initial_refs,
+ grpc_iomgr_cb_func cb, void *cb_arg,
+ const char *object_type);
+void grpc_stream_ref(grpc_stream_refcount *refcount, const char *reason);
+void grpc_stream_unref(grpc_exec_ctx *exec_ctx, grpc_stream_refcount *refcount,
+ const char *reason);
+#define GRPC_STREAM_REF_INIT(rc, ir, cb, cb_arg, objtype) \
+ grpc_stream_ref_init(rc, ir, cb, cb_arg, objtype)
+#else
+void grpc_stream_ref_init(grpc_stream_refcount *refcount, int initial_refs,
+ grpc_iomgr_cb_func cb, void *cb_arg);
+void grpc_stream_ref(grpc_stream_refcount *refcount);
+void grpc_stream_unref(grpc_exec_ctx *exec_ctx, grpc_stream_refcount *refcount);
+#define GRPC_STREAM_REF_INIT(rc, ir, cb, cb_arg, objtype) \
+ grpc_stream_ref_init(rc, ir, cb, cb_arg)
+#endif
+
+/* Transport stream op: a set of operations to perform on a transport
+ against a single stream */
+typedef struct grpc_transport_stream_op {
+ /** Send initial metadata to the peer, from the provided metadata batch. */
+ grpc_metadata_batch *send_initial_metadata;
+
+ /** Send trailing metadata to the peer, from the provided metadata batch. */
+ grpc_metadata_batch *send_trailing_metadata;
+
+ /** Send message data to the peer, from the provided byte stream. */
+ grpc_byte_stream *send_message;
+
+ /** Receive initial metadata from the stream, into provided metadata batch. */
+ grpc_metadata_batch *recv_initial_metadata;
+ /** Should be enqueued when initial metadata is ready to be processed. */
+ grpc_closure *recv_initial_metadata_ready;
+
+ /** Receive message data from the stream, into provided byte stream. */
+ grpc_byte_stream **recv_message;
+ /** Should be enqueued when one message is ready to be processed. */
+ grpc_closure *recv_message_ready;
+
+ /** Receive trailing metadata from the stream, into provided metadata batch.
+ */
+ grpc_metadata_batch *recv_trailing_metadata;
+
+ /** Should be enqueued when all requested operations (excluding recv_message
+ and recv_initial_metadata which have their own closures) in a given batch
+ have been completed. */
+ grpc_closure *on_complete;
+
+ /** If != GRPC_STATUS_OK, cancel this stream */
grpc_status_code cancel_with_status;
+ /** If != GRPC_STATUS_OK, send grpc-status, grpc-message, and close this
+ stream for both reading and writing */
+ grpc_status_code close_with_status;
+ gpr_slice *optional_close_message;
+
/* Indexes correspond to grpc_context_index enum values */
grpc_call_context_element *context;
-} grpc_transport_op;
+} grpc_transport_stream_op;
-/* Callbacks made from the transport to the upper layers of grpc. */
-struct grpc_transport_callbacks {
- /* Initialize a new stream on behalf of the transport.
- Must result in a call to
- grpc_transport_init_stream(transport, ..., request) in the same call
- stack.
- Must not result in any other calls to the transport.
-
- Arguments:
- user_data - the transport user data set at transport creation time
- transport - the grpc_transport instance making this call
- request - request parameters for this stream (owned by the caller)
- server_data - opaque transport dependent argument that should be passed
- to grpc_transport_init_stream
- */
- void (*accept_stream)(void *user_data, grpc_transport *transport,
- const void *server_data);
-
- void (*goaway)(void *user_data, grpc_transport *transport,
- grpc_status_code status, gpr_slice debug);
-
- /* The transport has been closed */
- void (*closed)(void *user_data, grpc_transport *transport);
-};
+/** Transport op: a set of operations to perform on a transport as a whole */
+typedef struct grpc_transport_op {
+ /** Called when processing of this op is done. */
+ grpc_closure *on_consumed;
+ /** connectivity monitoring - set connectivity_state to NULL to unsubscribe */
+ grpc_closure *on_connectivity_state_change;
+ grpc_connectivity_state *connectivity_state;
+ /** should the transport be disconnected */
+ int disconnect;
+ /** should we send a goaway?
+ after a goaway is sent, once there are no more active calls on
+ the transport, the transport should disconnect */
+ int send_goaway;
+ /** what should the goaway contain? */
+ grpc_status_code goaway_status;
+ gpr_slice *goaway_message;
+ /** set the callback for accepting new streams;
+ this is a permanent callback, unlike the other one-shot closures.
+ If true, the callback is set to set_accept_stream_fn, with its
+ user_data argument set to set_accept_stream_user_data */
+ bool set_accept_stream;
+ void (*set_accept_stream_fn)(grpc_exec_ctx *exec_ctx, void *user_data,
+ grpc_transport *transport,
+ const void *server_data);
+ void *set_accept_stream_user_data;
+ /** add this transport to a pollset */
+ grpc_pollset *bind_pollset;
+ /** add this transport to a pollset_set */
+ grpc_pollset_set *bind_pollset_set;
+ /** send a ping, call this back if not NULL */
+ grpc_closure *send_ping;
+} grpc_transport_op;
/* Returns the amount of memory required to store a grpc_stream for this
transport */
@@ -120,9 +167,14 @@ size_t grpc_transport_stream_size(grpc_transport *transport);
stream - a pointer to uninitialized memory to initialize
server_data - either NULL for a client initiated stream, or a pointer
supplied from the accept_stream callback function */
-int grpc_transport_init_stream(grpc_transport *transport, grpc_stream *stream,
- const void *server_data,
- grpc_transport_op *initial_op);
+int grpc_transport_init_stream(grpc_exec_ctx *exec_ctx,
+ grpc_transport *transport, grpc_stream *stream,
+ grpc_stream_refcount *refcount,
+ const void *server_data);
+
+void grpc_transport_set_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_transport *transport, grpc_stream *stream,
+ grpc_pollset *pollset);
/* Destroy transport data for a stream.
@@ -134,20 +186,21 @@ int grpc_transport_init_stream(grpc_transport *transport, grpc_stream *stream,
transport - the transport on which to create this stream
stream - the grpc_stream to destroy (memory is still owned by the
caller, but any child memory must be cleaned up) */
-void grpc_transport_destroy_stream(grpc_transport *transport,
+void grpc_transport_destroy_stream(grpc_exec_ctx *exec_ctx,
+ grpc_transport *transport,
grpc_stream *stream);
-void grpc_transport_op_finish_with_failure(grpc_transport_op *op);
+void grpc_transport_stream_op_finish_with_failure(grpc_exec_ctx *exec_ctx,
+ grpc_transport_stream_op *op);
-void grpc_transport_op_add_cancellation(grpc_transport_op *op,
- grpc_status_code status,
- grpc_mdstr *message);
+void grpc_transport_stream_op_add_cancellation(grpc_transport_stream_op *op,
+ grpc_status_code status);
-/* TODO(ctiller): remove this */
-void grpc_transport_add_to_pollset(grpc_transport *transport,
- grpc_pollset *pollset);
+void grpc_transport_stream_op_add_close(grpc_transport_stream_op *op,
+ grpc_status_code status,
+ gpr_slice *optional_message);
-char *grpc_transport_op_string(grpc_transport_op *op);
+char *grpc_transport_stream_op_string(grpc_transport_stream_op *op);
/* Send a batch of operations on a transport
@@ -157,14 +210,20 @@ char *grpc_transport_op_string(grpc_transport_op *op);
transport - the transport on which to initiate the stream
stream - the stream on which to send the operations. This must be
non-NULL and previously initialized by the same transport.
- op - a grpc_transport_op specifying the op to perform */
-void grpc_transport_perform_op(grpc_transport *transport, grpc_stream *stream,
+ op - a grpc_transport_stream_op specifying the op to perform */
+void grpc_transport_perform_stream_op(grpc_exec_ctx *exec_ctx,
+ grpc_transport *transport,
+ grpc_stream *stream,
+ grpc_transport_stream_op *op);
+
+void grpc_transport_perform_op(grpc_exec_ctx *exec_ctx,
+ grpc_transport *transport,
grpc_transport_op *op);
/* Send a ping on a transport
Calls cb with user data when a response is received. */
-void grpc_transport_ping(grpc_transport *transport, grpc_iomgr_closure *cb);
+void grpc_transport_ping(grpc_transport *transport, grpc_closure *cb);
/* Advise peer of pending connection termination. */
void grpc_transport_goaway(grpc_transport *transport, grpc_status_code status,
@@ -174,54 +233,10 @@ void grpc_transport_goaway(grpc_transport *transport, grpc_status_code status,
void grpc_transport_close(grpc_transport *transport);
/* Destroy the transport */
-void grpc_transport_destroy(grpc_transport *transport);
-
-/* Return type for grpc_transport_setup_callback */
-typedef struct grpc_transport_setup_result {
- void *user_data;
- const grpc_transport_callbacks *callbacks;
-} grpc_transport_setup_result;
-
-/* Given a transport, return callbacks for that transport. Used to finalize
- setup as a transport is being created */
-typedef grpc_transport_setup_result (*grpc_transport_setup_callback)(
- void *setup_arg, grpc_transport *transport, grpc_mdctx *mdctx);
-
-typedef struct grpc_transport_setup grpc_transport_setup;
-typedef struct grpc_transport_setup_vtable grpc_transport_setup_vtable;
-
-struct grpc_transport_setup_vtable {
- void (*initiate)(grpc_transport_setup *setup);
- void (*add_interested_party)(grpc_transport_setup *setup,
- grpc_pollset *pollset);
- void (*del_interested_party)(grpc_transport_setup *setup,
- grpc_pollset *pollset);
- void (*cancel)(grpc_transport_setup *setup);
-};
-
-/* Transport setup is an asynchronous utility interface for client channels to
- establish connections. It's transport agnostic. */
-struct grpc_transport_setup {
- const grpc_transport_setup_vtable *vtable;
-};
-
-/* Initiate transport setup: e.g. for TCP+DNS trigger a resolve of the name
- given at transport construction time, create the tcp connection, perform
- handshakes, and call some grpc_transport_setup_result function provided at
- setup construction time.
- This *may* be implemented as a no-op if the setup process monitors something
- continuously. */
-void grpc_transport_setup_initiate(grpc_transport_setup *setup);
-
-void grpc_transport_setup_add_interested_party(grpc_transport_setup *setup,
- grpc_pollset *pollset);
-void grpc_transport_setup_del_interested_party(grpc_transport_setup *setup,
- grpc_pollset *pollset);
-
-/* Cancel transport setup. After this returns, no new transports should be
- created, and all pending transport setup callbacks should be completed.
- After this call completes, setup should be considered invalid (this can be
- used as a destruction call by setup). */
-void grpc_transport_setup_cancel(grpc_transport_setup *setup);
-
-#endif /* GRPC_INTERNAL_CORE_TRANSPORT_TRANSPORT_H */
+void grpc_transport_destroy(grpc_exec_ctx *exec_ctx, grpc_transport *transport);
+
+/* Get the transports peer */
+char *grpc_transport_get_peer(grpc_exec_ctx *exec_ctx,
+ grpc_transport *transport);
+
+#endif /* GRPC_CORE_TRANSPORT_TRANSPORT_H */
diff --git a/src/core/transport/transport_impl.h b/src/core/transport/transport_impl.h
index c51951b7a7..d9ecc4d2ba 100644
--- a/src/core/transport/transport_impl.h
+++ b/src/core/transport/transport_impl.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_TRANSPORT_TRANSPORT_IMPL_H
-#define GRPC_INTERNAL_CORE_TRANSPORT_TRANSPORT_IMPL_H
+#ifndef GRPC_CORE_TRANSPORT_TRANSPORT_IMPL_H
+#define GRPC_CORE_TRANSPORT_TRANSPORT_IMPL_H
#include "src/core/transport/transport.h"
@@ -41,32 +41,35 @@ typedef struct grpc_transport_vtable {
layers and initialized by the transport */
size_t sizeof_stream; /* = sizeof(transport stream) */
- /* implementation of grpc_transport_init_stream */
- int (*init_stream)(grpc_transport *self, grpc_stream *stream,
- const void *server_data, grpc_transport_op *initial_op);
-
- /* implementation of grpc_transport_send_batch */
- void (*perform_op)(grpc_transport *self, grpc_stream *stream,
- grpc_transport_op *op);
+ /* name of this transport implementation */
+ const char *name;
- /* implementation of grpc_transport_add_to_pollset */
- void (*add_to_pollset)(grpc_transport *self, grpc_pollset *pollset);
+ /* implementation of grpc_transport_init_stream */
+ int (*init_stream)(grpc_exec_ctx *exec_ctx, grpc_transport *self,
+ grpc_stream *stream, grpc_stream_refcount *refcount,
+ const void *server_data);
- /* implementation of grpc_transport_destroy_stream */
- void (*destroy_stream)(grpc_transport *self, grpc_stream *stream);
+ /* implementation of grpc_transport_set_pollset */
+ void (*set_pollset)(grpc_exec_ctx *exec_ctx, grpc_transport *self,
+ grpc_stream *stream, grpc_pollset *pollset);
- /* implementation of grpc_transport_goaway */
- void (*goaway)(grpc_transport *self, grpc_status_code status,
- gpr_slice debug_data);
+ /* implementation of grpc_transport_perform_stream_op */
+ void (*perform_stream_op)(grpc_exec_ctx *exec_ctx, grpc_transport *self,
+ grpc_stream *stream, grpc_transport_stream_op *op);
- /* implementation of grpc_transport_close */
- void (*close)(grpc_transport *self);
+ /* implementation of grpc_transport_perform_op */
+ void (*perform_op)(grpc_exec_ctx *exec_ctx, grpc_transport *self,
+ grpc_transport_op *op);
- /* implementation of grpc_transport_ping */
- void (*ping)(grpc_transport *self, grpc_iomgr_closure *cb);
+ /* implementation of grpc_transport_destroy_stream */
+ void (*destroy_stream)(grpc_exec_ctx *exec_ctx, grpc_transport *self,
+ grpc_stream *stream);
/* implementation of grpc_transport_destroy */
- void (*destroy)(grpc_transport *self);
+ void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_transport *self);
+
+ /* implementation of grpc_transport_get_peer */
+ char *(*get_peer)(grpc_exec_ctx *exec_ctx, grpc_transport *self);
} grpc_transport_vtable;
/* an instance of a grpc transport */
@@ -75,4 +78,4 @@ struct grpc_transport {
const grpc_transport_vtable *vtable;
};
-#endif /* GRPC_INTERNAL_CORE_TRANSPORT_TRANSPORT_IMPL_H */
+#endif /* GRPC_CORE_TRANSPORT_TRANSPORT_IMPL_H */
diff --git a/src/core/transport/transport_op_string.c b/src/core/transport/transport_op_string.c
index a408b75790..98b51afc88 100644
--- a/src/core/transport/transport_op_string.c
+++ b/src/core/transport/transport_op_string.c
@@ -47,14 +47,12 @@
static void put_metadata(gpr_strvec *b, grpc_mdelem *md) {
gpr_strvec_add(b, gpr_strdup("key="));
- gpr_strvec_add(
- b, gpr_hexdump((char *)GPR_SLICE_START_PTR(md->key->slice),
- GPR_SLICE_LENGTH(md->key->slice), GPR_HEXDUMP_PLAINTEXT));
+ gpr_strvec_add(b,
+ gpr_dump_slice(md->key->slice, GPR_DUMP_HEX | GPR_DUMP_ASCII));
gpr_strvec_add(b, gpr_strdup(" value="));
- gpr_strvec_add(b, gpr_hexdump((char *)GPR_SLICE_START_PTR(md->value->slice),
- GPR_SLICE_LENGTH(md->value->slice),
- GPR_HEXDUMP_PLAINTEXT));
+ gpr_strvec_add(
+ b, gpr_dump_slice(md->value->slice, GPR_DUMP_HEX | GPR_DUMP_ASCII));
}
static void put_metadata_list(gpr_strvec *b, grpc_metadata_batch md) {
@@ -63,80 +61,62 @@ static void put_metadata_list(gpr_strvec *b, grpc_metadata_batch md) {
if (m != md.list.head) gpr_strvec_add(b, gpr_strdup(", "));
put_metadata(b, m->md);
}
- if (gpr_time_cmp(md.deadline, gpr_inf_future) != 0) {
+ if (gpr_time_cmp(md.deadline, gpr_inf_future(md.deadline.clock_type)) != 0) {
char *tmp;
- gpr_asprintf(&tmp, " deadline=%d.%09d", md.deadline.tv_sec,
- md.deadline.tv_nsec);
+ gpr_asprintf(&tmp, " deadline=%lld.%09d", (long long)md.deadline.tv_sec,
+ (int)md.deadline.tv_nsec);
gpr_strvec_add(b, tmp);
}
}
-char *grpc_sopb_string(grpc_stream_op_buffer *sopb) {
- char *out;
+char *grpc_transport_stream_op_string(grpc_transport_stream_op *op) {
char *tmp;
- size_t i;
+ char *out;
+ int first = 1;
+
gpr_strvec b;
gpr_strvec_init(&b);
- for (i = 0; i < sopb->nops; i++) {
- grpc_stream_op *op = &sopb->ops[i];
- if (i > 0) gpr_strvec_add(&b, gpr_strdup(", "));
- switch (op->type) {
- case GRPC_NO_OP:
- gpr_strvec_add(&b, gpr_strdup("NO_OP"));
- break;
- case GRPC_OP_BEGIN_MESSAGE:
- gpr_asprintf(&tmp, "BEGIN_MESSAGE:%d", op->data.begin_message.length);
- gpr_strvec_add(&b, tmp);
- break;
- case GRPC_OP_SLICE:
- gpr_asprintf(&tmp, "SLICE:%d", GPR_SLICE_LENGTH(op->data.slice));
- gpr_strvec_add(&b, tmp);
- break;
- case GRPC_OP_METADATA:
- gpr_strvec_add(&b, gpr_strdup("METADATA{"));
- put_metadata_list(&b, op->data.metadata);
- gpr_strvec_add(&b, gpr_strdup("}"));
- break;
- }
+ if (op->send_initial_metadata != NULL) {
+ if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
+ first = 0;
+ gpr_strvec_add(&b, gpr_strdup("SEND_INITIAL_METADATA{"));
+ put_metadata_list(&b, *op->send_initial_metadata);
+ gpr_strvec_add(&b, gpr_strdup("}"));
}
- out = gpr_strvec_flatten(&b, NULL);
- gpr_strvec_destroy(&b);
-
- return out;
-}
-
-char *grpc_transport_op_string(grpc_transport_op *op) {
- char *tmp;
- char *out;
- int first = 1;
+ if (op->send_message != NULL) {
+ if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
+ first = 0;
+ gpr_asprintf(&tmp, "SEND_MESSAGE:flags=0x%08x:len=%d",
+ op->send_message->flags, op->send_message->length);
+ gpr_strvec_add(&b, tmp);
+ }
- gpr_strvec b;
- gpr_strvec_init(&b);
+ if (op->send_trailing_metadata != NULL) {
+ if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
+ first = 0;
+ gpr_strvec_add(&b, gpr_strdup("SEND_TRAILING_METADATA{"));
+ put_metadata_list(&b, *op->send_trailing_metadata);
+ gpr_strvec_add(&b, gpr_strdup("}"));
+ }
- if (op->send_ops) {
+ if (op->recv_initial_metadata != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
- gpr_strvec_add(&b, gpr_strdup("SEND"));
- if (op->is_last_send) {
- gpr_strvec_add(&b, gpr_strdup("_LAST"));
- }
- gpr_strvec_add(&b, gpr_strdup("["));
- gpr_strvec_add(&b, grpc_sopb_string(op->send_ops));
- gpr_strvec_add(&b, gpr_strdup("]"));
+ gpr_strvec_add(&b, gpr_strdup("RECV_INITIAL_METADATA"));
}
- if (op->recv_ops) {
+ if (op->recv_message != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
- gpr_strvec_add(&b, gpr_strdup("RECV"));
+ gpr_strvec_add(&b, gpr_strdup("RECV_MESSAGE"));
}
- if (op->bind_pollset) {
+ if (op->recv_trailing_metadata != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
- gpr_strvec_add(&b, gpr_strdup("BIND"));
+ gpr_strvec_add(&b, gpr_strdup("RECV_TRAILING_METADATA"));
}
if (op->cancel_with_status != GRPC_STATUS_OK) {
@@ -153,8 +133,8 @@ char *grpc_transport_op_string(grpc_transport_op *op) {
}
void grpc_call_log_op(char *file, int line, gpr_log_severity severity,
- grpc_call_element *elem, grpc_transport_op *op) {
- char *str = grpc_transport_op_string(op);
+ grpc_call_element *elem, grpc_transport_stream_op *op) {
+ char *str = grpc_transport_stream_op_string(op);
gpr_log(file, line, severity, "OP[%s:%p]: %s", elem->filter->name, elem, str);
gpr_free(str);
}
diff --git a/src/core/tsi/fake_transport_security.c b/src/core/tsi/fake_transport_security.c
index 9ce1ddb95e..72ac32a171 100644
--- a/src/core/tsi/fake_transport_security.c
+++ b/src/core/tsi/fake_transport_security.c
@@ -53,7 +53,7 @@
where the size field value is the size of the size field plus the size of
the data encoded in little endian on 4 bytes. */
typedef struct {
- unsigned char* data;
+ unsigned char *data;
size_t size;
size_t allocated_size;
size_t offset;
@@ -87,10 +87,10 @@ typedef struct {
/* --- Utils. ---*/
-static const char* tsi_fake_handshake_message_strings[] = {
+static const char *tsi_fake_handshake_message_strings[] = {
"CLIENT_INIT", "SERVER_INIT", "CLIENT_FINISHED", "SERVER_FINISHED"};
-static const char* tsi_fake_handshake_message_to_string(int msg) {
+static const char *tsi_fake_handshake_message_to_string(int msg) {
if (msg < 0 || msg >= TSI_FAKE_HANDSHAKE_MESSAGE_MAX) {
gpr_log(GPR_ERROR, "Invalid message %d", msg);
return "UNKNOWN";
@@ -99,8 +99,8 @@ static const char* tsi_fake_handshake_message_to_string(int msg) {
}
static tsi_result tsi_fake_handshake_message_from_string(
- const char* msg_string, tsi_fake_handshake_message* msg) {
- int i;
+ const char *msg_string, tsi_fake_handshake_message *msg) {
+ tsi_fake_handshake_message i;
for (i = 0; i < TSI_FAKE_HANDSHAKE_MESSAGE_MAX; i++) {
if (strncmp(msg_string, tsi_fake_handshake_message_strings[i],
strlen(tsi_fake_handshake_message_strings[i])) == 0) {
@@ -112,32 +112,32 @@ static tsi_result tsi_fake_handshake_message_from_string(
return TSI_DATA_CORRUPTED;
}
-static gpr_uint32 load32_little_endian(const unsigned char* buf) {
- return ((gpr_uint32)(buf[0]) | (gpr_uint32)(buf[1] << 8) |
- (gpr_uint32)(buf[2] << 16) | (gpr_uint32)(buf[3] << 24));
+static uint32_t load32_little_endian(const unsigned char *buf) {
+ return ((uint32_t)(buf[0]) | (uint32_t)(buf[1] << 8) |
+ (uint32_t)(buf[2] << 16) | (uint32_t)(buf[3] << 24));
}
-static void store32_little_endian(gpr_uint32 value, unsigned char* buf) {
- buf[3] = (unsigned char)(value >> 24) & 0xFF;
- buf[2] = (unsigned char)(value >> 16) & 0xFF;
- buf[1] = (unsigned char)(value >> 8) & 0xFF;
- buf[0] = (unsigned char)(value) & 0xFF;
+static void store32_little_endian(uint32_t value, unsigned char *buf) {
+ buf[3] = (unsigned char)((value >> 24) & 0xFF);
+ buf[2] = (unsigned char)((value >> 16) & 0xFF);
+ buf[1] = (unsigned char)((value >> 8) & 0xFF);
+ buf[0] = (unsigned char)((value)&0xFF);
}
-static void tsi_fake_frame_reset(tsi_fake_frame* frame, int needs_draining) {
+static void tsi_fake_frame_reset(tsi_fake_frame *frame, int needs_draining) {
frame->offset = 0;
frame->needs_draining = needs_draining;
if (!needs_draining) frame->size = 0;
}
/* Returns 1 if successful, 0 otherwise. */
-static int tsi_fake_frame_ensure_size(tsi_fake_frame* frame) {
+static int tsi_fake_frame_ensure_size(tsi_fake_frame *frame) {
if (frame->data == NULL) {
frame->allocated_size = frame->size;
frame->data = malloc(frame->allocated_size);
if (frame->data == NULL) return 0;
} else if (frame->size > frame->allocated_size) {
- unsigned char* new_data = realloc(frame->data, frame->size);
+ unsigned char *new_data = realloc(frame->data, frame->size);
if (new_data == NULL) {
free(frame->data);
frame->data = NULL;
@@ -150,12 +150,12 @@ static int tsi_fake_frame_ensure_size(tsi_fake_frame* frame) {
}
/* This method should not be called if frame->needs_framing is not 0. */
-static tsi_result fill_frame_from_bytes(const unsigned char* incoming_bytes,
- size_t* incoming_bytes_size,
- tsi_fake_frame* frame) {
+static tsi_result fill_frame_from_bytes(const unsigned char *incoming_bytes,
+ size_t *incoming_bytes_size,
+ tsi_fake_frame *frame) {
size_t available_size = *incoming_bytes_size;
size_t to_read_size = 0;
- const unsigned char* bytes_cursor = incoming_bytes;
+ const unsigned char *bytes_cursor = incoming_bytes;
if (frame->needs_draining) return TSI_INTERNAL_ERROR;
if (frame->data == NULL) {
@@ -171,7 +171,7 @@ static tsi_result fill_frame_from_bytes(const unsigned char* incoming_bytes,
memcpy(frame->data + frame->offset, bytes_cursor, available_size);
bytes_cursor += available_size;
frame->offset += available_size;
- *incoming_bytes_size = bytes_cursor - incoming_bytes;
+ *incoming_bytes_size = (size_t)(bytes_cursor - incoming_bytes);
return TSI_INCOMPLETE_DATA;
}
memcpy(frame->data + frame->offset, bytes_cursor, to_read_size);
@@ -187,20 +187,20 @@ static tsi_result fill_frame_from_bytes(const unsigned char* incoming_bytes,
memcpy(frame->data + frame->offset, bytes_cursor, available_size);
frame->offset += available_size;
bytes_cursor += available_size;
- *incoming_bytes_size = bytes_cursor - incoming_bytes;
+ *incoming_bytes_size = (size_t)(bytes_cursor - incoming_bytes);
return TSI_INCOMPLETE_DATA;
}
memcpy(frame->data + frame->offset, bytes_cursor, to_read_size);
bytes_cursor += to_read_size;
- *incoming_bytes_size = bytes_cursor - incoming_bytes;
+ *incoming_bytes_size = (size_t)(bytes_cursor - incoming_bytes);
tsi_fake_frame_reset(frame, 1 /* needs_draining */);
return TSI_OK;
}
/* This method should not be called if frame->needs_framing is 0. */
-static tsi_result drain_frame_to_bytes(unsigned char* outgoing_bytes,
- size_t* outgoing_bytes_size,
- tsi_fake_frame* frame) {
+static tsi_result drain_frame_to_bytes(unsigned char *outgoing_bytes,
+ size_t *outgoing_bytes_size,
+ tsi_fake_frame *frame) {
size_t to_write_size = frame->size - frame->offset;
if (!frame->needs_draining) return TSI_INTERNAL_ERROR;
if (*outgoing_bytes_size < to_write_size) {
@@ -214,35 +214,35 @@ static tsi_result drain_frame_to_bytes(unsigned char* outgoing_bytes,
return TSI_OK;
}
-static tsi_result bytes_to_frame(unsigned char* bytes, size_t bytes_size,
- tsi_fake_frame* frame) {
+static tsi_result bytes_to_frame(unsigned char *bytes, size_t bytes_size,
+ tsi_fake_frame *frame) {
frame->offset = 0;
frame->size = bytes_size + TSI_FAKE_FRAME_HEADER_SIZE;
if (!tsi_fake_frame_ensure_size(frame)) return TSI_OUT_OF_RESOURCES;
- store32_little_endian(frame->size, frame->data);
+ store32_little_endian((uint32_t)frame->size, frame->data);
memcpy(frame->data + TSI_FAKE_FRAME_HEADER_SIZE, bytes, bytes_size);
tsi_fake_frame_reset(frame, 1 /* needs draining */);
return TSI_OK;
}
-static void tsi_fake_frame_destruct(tsi_fake_frame* frame) {
+static void tsi_fake_frame_destruct(tsi_fake_frame *frame) {
if (frame->data != NULL) free(frame->data);
}
/* --- tsi_frame_protector methods implementation. ---*/
-static tsi_result fake_protector_protect(tsi_frame_protector* self,
- const unsigned char* unprotected_bytes,
- size_t* unprotected_bytes_size,
- unsigned char* protected_output_frames,
- size_t* protected_output_frames_size) {
+static tsi_result fake_protector_protect(tsi_frame_protector *self,
+ const unsigned char *unprotected_bytes,
+ size_t *unprotected_bytes_size,
+ unsigned char *protected_output_frames,
+ size_t *protected_output_frames_size) {
tsi_result result = TSI_OK;
- tsi_fake_frame_protector* impl = (tsi_fake_frame_protector*)self;
+ tsi_fake_frame_protector *impl = (tsi_fake_frame_protector *)self;
unsigned char frame_header[TSI_FAKE_FRAME_HEADER_SIZE];
- tsi_fake_frame* frame = &impl->protect_frame;
+ tsi_fake_frame *frame = &impl->protect_frame;
size_t saved_output_size = *protected_output_frames_size;
size_t drained_size = 0;
- size_t* num_bytes_written = protected_output_frames_size;
+ size_t *num_bytes_written = protected_output_frames_size;
*num_bytes_written = 0;
/* Try to drain first. */
@@ -266,7 +266,7 @@ static tsi_result fake_protector_protect(tsi_frame_protector* self,
if (frame->size == 0) {
/* New frame, create a header. */
size_t written_in_frame_size = 0;
- store32_little_endian(impl->max_frame_size, frame_header);
+ store32_little_endian((uint32_t)impl->max_frame_size, frame_header);
written_in_frame_size = TSI_FAKE_FRAME_HEADER_SIZE;
result = fill_frame_from_bytes(frame_header, &written_in_frame_size, frame);
if (result != TSI_INCOMPLETE_DATA) {
@@ -293,17 +293,18 @@ static tsi_result fake_protector_protect(tsi_frame_protector* self,
}
static tsi_result fake_protector_protect_flush(
- tsi_frame_protector* self, unsigned char* protected_output_frames,
- size_t* protected_output_frames_size, size_t* still_pending_size) {
+ tsi_frame_protector *self, unsigned char *protected_output_frames,
+ size_t *protected_output_frames_size, size_t *still_pending_size) {
tsi_result result = TSI_OK;
- tsi_fake_frame_protector* impl = (tsi_fake_frame_protector*)self;
- tsi_fake_frame* frame = &impl->protect_frame;
+ tsi_fake_frame_protector *impl = (tsi_fake_frame_protector *)self;
+ tsi_fake_frame *frame = &impl->protect_frame;
if (!frame->needs_draining) {
/* Create a short frame. */
frame->size = frame->offset;
frame->offset = 0;
frame->needs_draining = 1;
- store32_little_endian(frame->size, frame->data); /* Overwrite header. */
+ store32_little_endian((uint32_t)frame->size,
+ frame->data); /* Overwrite header. */
}
result = drain_frame_to_bytes(protected_output_frames,
protected_output_frames_size, frame);
@@ -313,15 +314,15 @@ static tsi_result fake_protector_protect_flush(
}
static tsi_result fake_protector_unprotect(
- tsi_frame_protector* self, const unsigned char* protected_frames_bytes,
- size_t* protected_frames_bytes_size, unsigned char* unprotected_bytes,
- size_t* unprotected_bytes_size) {
+ tsi_frame_protector *self, const unsigned char *protected_frames_bytes,
+ size_t *protected_frames_bytes_size, unsigned char *unprotected_bytes,
+ size_t *unprotected_bytes_size) {
tsi_result result = TSI_OK;
- tsi_fake_frame_protector* impl = (tsi_fake_frame_protector*)self;
- tsi_fake_frame* frame = &impl->unprotect_frame;
+ tsi_fake_frame_protector *impl = (tsi_fake_frame_protector *)self;
+ tsi_fake_frame *frame = &impl->unprotect_frame;
size_t saved_output_size = *unprotected_bytes_size;
size_t drained_size = 0;
- size_t* num_bytes_written = unprotected_bytes_size;
+ size_t *num_bytes_written = unprotected_bytes_size;
*num_bytes_written = 0;
/* Try to drain first. */
@@ -361,8 +362,8 @@ static tsi_result fake_protector_unprotect(
return result;
}
-static void fake_protector_destroy(tsi_frame_protector* self) {
- tsi_fake_frame_protector* impl = (tsi_fake_frame_protector*)self;
+static void fake_protector_destroy(tsi_frame_protector *self) {
+ tsi_fake_frame_protector *impl = (tsi_fake_frame_protector *)self;
tsi_fake_frame_destruct(&impl->protect_frame);
tsi_fake_frame_destruct(&impl->unprotect_frame);
free(self);
@@ -370,30 +371,33 @@ static void fake_protector_destroy(tsi_frame_protector* self) {
static const tsi_frame_protector_vtable frame_protector_vtable = {
fake_protector_protect, fake_protector_protect_flush,
- fake_protector_unprotect, fake_protector_destroy, };
+ fake_protector_unprotect, fake_protector_destroy,
+};
/* --- tsi_handshaker methods implementation. ---*/
static tsi_result fake_handshaker_get_bytes_to_send_to_peer(
- tsi_handshaker* self, unsigned char* bytes, size_t* bytes_size) {
- tsi_fake_handshaker* impl = (tsi_fake_handshaker*)self;
+ tsi_handshaker *self, unsigned char *bytes, size_t *bytes_size) {
+ tsi_fake_handshaker *impl = (tsi_fake_handshaker *)self;
tsi_result result = TSI_OK;
if (impl->needs_incoming_message || impl->result == TSI_OK) {
*bytes_size = 0;
return TSI_OK;
}
if (!impl->outgoing.needs_draining) {
- int next_message_to_send = impl->next_message_to_send + 2;
- const char* msg_string =
+ tsi_fake_handshake_message next_message_to_send =
+ impl->next_message_to_send + 2;
+ const char *msg_string =
tsi_fake_handshake_message_to_string(impl->next_message_to_send);
- result = bytes_to_frame((unsigned char*)msg_string, strlen(msg_string),
+ result = bytes_to_frame((unsigned char *)msg_string, strlen(msg_string),
&impl->outgoing);
if (result != TSI_OK) return result;
if (next_message_to_send > TSI_FAKE_HANDSHAKE_MESSAGE_MAX) {
next_message_to_send = TSI_FAKE_HANDSHAKE_MESSAGE_MAX;
}
if (tsi_tracing_enabled) {
- gpr_log(GPR_INFO, "%s prepared %s.", impl->is_client ? "Client" : "Server",
+ gpr_log(GPR_INFO, "%s prepared %s.",
+ impl->is_client ? "Client" : "Server",
tsi_fake_handshake_message_to_string(impl->next_message_to_send));
}
impl->next_message_to_send = next_message_to_send;
@@ -414,9 +418,9 @@ static tsi_result fake_handshaker_get_bytes_to_send_to_peer(
}
static tsi_result fake_handshaker_process_bytes_from_peer(
- tsi_handshaker* self, const unsigned char* bytes, size_t* bytes_size) {
+ tsi_handshaker *self, const unsigned char *bytes, size_t *bytes_size) {
tsi_result result = TSI_OK;
- tsi_fake_handshaker* impl = (tsi_fake_handshaker*)self;
+ tsi_fake_handshaker *impl = (tsi_fake_handshaker *)self;
tsi_fake_handshake_message expected_msg = impl->next_message_to_send - 1;
tsi_fake_handshake_message received_msg;
@@ -429,7 +433,7 @@ static tsi_result fake_handshaker_process_bytes_from_peer(
/* We now have a complete frame. */
result = tsi_fake_handshake_message_from_string(
- (const char*)impl->incoming.data + TSI_FAKE_FRAME_HEADER_SIZE,
+ (const char *)impl->incoming.data + TSI_FAKE_FRAME_HEADER_SIZE,
&received_msg);
if (result != TSI_OK) {
impl->result = result;
@@ -456,13 +460,13 @@ static tsi_result fake_handshaker_process_bytes_from_peer(
return TSI_OK;
}
-static tsi_result fake_handshaker_get_result(tsi_handshaker* self) {
- tsi_fake_handshaker* impl = (tsi_fake_handshaker*)self;
+static tsi_result fake_handshaker_get_result(tsi_handshaker *self) {
+ tsi_fake_handshaker *impl = (tsi_fake_handshaker *)self;
return impl->result;
}
-static tsi_result fake_handshaker_extract_peer(tsi_handshaker* self,
- tsi_peer* peer) {
+static tsi_result fake_handshaker_extract_peer(tsi_handshaker *self,
+ tsi_peer *peer) {
tsi_result result = tsi_construct_peer(1, peer);
if (result != TSI_OK) return result;
result = tsi_construct_string_peer_property_from_cstring(
@@ -473,15 +477,15 @@ static tsi_result fake_handshaker_extract_peer(tsi_handshaker* self,
}
static tsi_result fake_handshaker_create_frame_protector(
- tsi_handshaker* self, size_t* max_protected_frame_size,
- tsi_frame_protector** protector) {
+ tsi_handshaker *self, size_t *max_protected_frame_size,
+ tsi_frame_protector **protector) {
*protector = tsi_create_fake_protector(max_protected_frame_size);
if (*protector == NULL) return TSI_OUT_OF_RESOURCES;
return TSI_OK;
}
-static void fake_handshaker_destroy(tsi_handshaker* self) {
- tsi_fake_handshaker* impl = (tsi_fake_handshaker*)self;
+static void fake_handshaker_destroy(tsi_handshaker *self) {
+ tsi_fake_handshaker *impl = (tsi_fake_handshaker *)self;
tsi_fake_frame_destruct(&impl->incoming);
tsi_fake_frame_destruct(&impl->outgoing);
free(self);
@@ -489,14 +493,13 @@ static void fake_handshaker_destroy(tsi_handshaker* self) {
static const tsi_handshaker_vtable handshaker_vtable = {
fake_handshaker_get_bytes_to_send_to_peer,
- fake_handshaker_process_bytes_from_peer,
- fake_handshaker_get_result,
- fake_handshaker_extract_peer,
- fake_handshaker_create_frame_protector,
- fake_handshaker_destroy, };
-
-tsi_handshaker* tsi_create_fake_handshaker(int is_client) {
- tsi_fake_handshaker* impl = calloc(1, sizeof(tsi_fake_handshaker));
+ fake_handshaker_process_bytes_from_peer, fake_handshaker_get_result,
+ fake_handshaker_extract_peer, fake_handshaker_create_frame_protector,
+ fake_handshaker_destroy,
+};
+
+tsi_handshaker *tsi_create_fake_handshaker(int is_client) {
+ tsi_fake_handshaker *impl = calloc(1, sizeof(tsi_fake_handshaker));
impl->base.vtable = &handshaker_vtable;
impl->is_client = is_client;
impl->result = TSI_HANDSHAKE_IN_PROGRESS;
@@ -510,9 +513,9 @@ tsi_handshaker* tsi_create_fake_handshaker(int is_client) {
return &impl->base;
}
-tsi_frame_protector* tsi_create_fake_protector(
- size_t* max_protected_frame_size) {
- tsi_fake_frame_protector* impl = calloc(1, sizeof(tsi_fake_frame_protector));
+tsi_frame_protector *tsi_create_fake_protector(
+ size_t *max_protected_frame_size) {
+ tsi_fake_frame_protector *impl = calloc(1, sizeof(tsi_fake_frame_protector));
if (impl == NULL) return NULL;
impl->max_frame_size = (max_protected_frame_size == NULL)
? TSI_FAKE_DEFAULT_FRAME_SIZE
diff --git a/src/core/tsi/fake_transport_security.h b/src/core/tsi/fake_transport_security.h
index af9730b90e..6b8e596290 100644
--- a/src/core/tsi/fake_transport_security.h
+++ b/src/core/tsi/fake_transport_security.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_TSI_FAKE_TRANSPORT_SECURITY_H
-#define GRPC_INTERNAL_CORE_TSI_FAKE_TRANSPORT_SECURITY_H
+#ifndef GRPC_CORE_TSI_FAKE_TRANSPORT_SECURITY_H
+#define GRPC_CORE_TSI_FAKE_TRANSPORT_SECURITY_H
#include "src/core/tsi/transport_security_interface.h"
@@ -48,14 +48,14 @@ extern "C" {
No cryptography is performed in these objects. They just simulate handshake
messages going back and forth for the handshaker and do some framing on
cleartext data for the protector. */
-tsi_handshaker* tsi_create_fake_handshaker(int is_client);
+tsi_handshaker *tsi_create_fake_handshaker(int is_client);
/* Creates a protector directly without going through the handshake phase. */
-tsi_frame_protector* tsi_create_fake_protector(
- size_t* max_protected_frame_size);
+tsi_frame_protector *tsi_create_fake_protector(
+ size_t *max_protected_frame_size);
#ifdef __cplusplus
}
#endif
-#endif /* GRPC_INTERNAL_CORE_TSI_FAKE_TRANSPORT_SECURITY_H */
+#endif /* GRPC_CORE_TSI_FAKE_TRANSPORT_SECURITY_H */
diff --git a/src/core/tsi/ssl_transport_security.c b/src/core/tsi/ssl_transport_security.c
index 6156a39d09..42d25ca929 100644
--- a/src/core/tsi/ssl_transport_security.c
+++ b/src/core/tsi/ssl_transport_security.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -33,28 +33,38 @@
#include "src/core/tsi/ssl_transport_security.h"
+#include <grpc/support/port_platform.h>
+
#include <limits.h>
#include <string.h>
+/* TODO(jboeuf): refactor inet_ntop into a portability header. */
+#ifdef GPR_WINSOCK_SOCKET
+#include <ws2tcpip.h>
+#else
+#include <arpa/inet.h>
+#endif
+
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/thd.h>
#include <grpc/support/useful.h>
-#include "src/core/tsi/transport_security.h"
#include <openssl/bio.h>
-#include <openssl/crypto.h> /* For OPENSSL_free */
+#include <openssl/crypto.h> /* For OPENSSL_free */
#include <openssl/err.h>
#include <openssl/ssl.h>
#include <openssl/x509.h>
#include <openssl/x509v3.h>
+#include "src/core/tsi/ssl_types.h"
+#include "src/core/tsi/transport_security.h"
+
/* --- Constants. ---*/
#define TSI_SSL_MAX_PROTECTED_FRAME_SIZE_UPPER_BOUND 16384
#define TSI_SSL_MAX_PROTECTED_FRAME_SIZE_LOWER_BOUND 1024
-
/* Putting a macro like this and littering the source file with #if is really
bad practice.
TODO(jboeuf): refactor all the #if / #endif in a separate module. */
@@ -69,16 +79,16 @@
/* --- Structure definitions. ---*/
struct tsi_ssl_handshaker_factory {
- tsi_result (*create_handshaker)(tsi_ssl_handshaker_factory* self,
- const char* server_name_indication,
- tsi_handshaker** handshaker);
- void (*destroy)(tsi_ssl_handshaker_factory* self);
+ tsi_result (*create_handshaker)(tsi_ssl_handshaker_factory *self,
+ const char *server_name_indication,
+ tsi_handshaker **handshaker);
+ void (*destroy)(tsi_ssl_handshaker_factory *self);
};
typedef struct {
tsi_ssl_handshaker_factory base;
- SSL_CTX* ssl_context;
- unsigned char* alpn_protocol_list;
+ SSL_CTX *ssl_context;
+ unsigned char *alpn_protocol_list;
size_t alpn_protocol_list_length;
} tsi_ssl_client_handshaker_factory;
@@ -88,27 +98,27 @@ typedef struct {
/* Several contexts to support SNI.
The tsi_peer array contains the subject names of the server certificates
associated with the contexts at the same index. */
- SSL_CTX** ssl_contexts;
- tsi_peer* ssl_context_x509_subject_names;
+ SSL_CTX **ssl_contexts;
+ tsi_peer *ssl_context_x509_subject_names;
size_t ssl_context_count;
- unsigned char* alpn_protocol_list;
+ unsigned char *alpn_protocol_list;
size_t alpn_protocol_list_length;
} tsi_ssl_server_handshaker_factory;
typedef struct {
tsi_handshaker base;
- SSL* ssl;
- BIO* into_ssl;
- BIO* from_ssl;
+ SSL *ssl;
+ BIO *into_ssl;
+ BIO *from_ssl;
tsi_result result;
} tsi_ssl_handshaker;
typedef struct {
tsi_frame_protector base;
- SSL* ssl;
- BIO* into_ssl;
- BIO* from_ssl;
- unsigned char* buffer;
+ SSL *ssl;
+ BIO *into_ssl;
+ BIO *from_ssl;
+ unsigned char *buffer;
size_t buffer_size;
size_t buffer_offset;
} tsi_ssl_frame_protector;
@@ -118,7 +128,7 @@ typedef struct {
static gpr_once init_openssl_once = GPR_ONCE_INIT;
static gpr_mu *openssl_mutexes = NULL;
-static void openssl_locking_cb(int mode, int type, const char* file, int line) {
+static void openssl_locking_cb(int mode, int type, const char *file, int line) {
if (mode & CRYPTO_LOCK) {
gpr_mu_lock(&openssl_mutexes[type]);
} else {
@@ -132,10 +142,13 @@ static unsigned long openssl_thread_id_cb(void) {
static void init_openssl(void) {
int i;
+ int num_locks;
SSL_library_init();
SSL_load_error_strings();
OpenSSL_add_all_algorithms();
- openssl_mutexes = malloc(CRYPTO_num_locks() * sizeof(gpr_mu));
+ num_locks = CRYPTO_num_locks();
+ GPR_ASSERT(num_locks > 0);
+ openssl_mutexes = malloc((size_t)num_locks * sizeof(gpr_mu));
GPR_ASSERT(openssl_mutexes != NULL);
for (i = 0; i < CRYPTO_num_locks(); i++) {
gpr_mu_init(&openssl_mutexes[i]);
@@ -146,7 +159,7 @@ static void init_openssl(void) {
/* --- Ssl utils. ---*/
-static const char* ssl_error_string(int error) {
+static const char *ssl_error_string(int error) {
switch (error) {
case SSL_ERROR_NONE:
return "SSL_ERROR_NONE";
@@ -172,8 +185,8 @@ static const char* ssl_error_string(int error) {
}
/* TODO(jboeuf): Remove when we are past the debugging phase with this code. */
-static void ssl_log_where_info(const SSL* ssl, int where, int flag,
- const char* msg) {
+static void ssl_log_where_info(const SSL *ssl, int where, int flag,
+ const char *msg) {
if ((where & flag) && tsi_tracing_enabled) {
gpr_log(GPR_INFO, "%20.20s - %30.30s - %5.10s", msg,
SSL_state_string_long(ssl), SSL_state_string(ssl));
@@ -181,7 +194,7 @@ static void ssl_log_where_info(const SSL* ssl, int where, int flag,
}
/* Used for debugging. TODO(jboeuf): Remove when code is mature enough. */
-static void ssl_info_callback(const SSL* ssl, int where, int ret) {
+static void ssl_info_callback(const SSL *ssl, int where, int ret) {
if (ret == 0) {
gpr_log(GPR_ERROR, "ssl_info_callback: error occured.\n");
return;
@@ -193,13 +206,16 @@ static void ssl_info_callback(const SSL* ssl, int where, int ret) {
}
/* Returns 1 if name looks like an IP address, 0 otherwise.
- This is a very rough heuristic as it does not handle IPV6 or things like:
- 0300.0250.00.01, 0xC0.0Xa8.0x0.0x1, 000030052000001, 0xc0.052000001 */
+ This is a very rough heuristic, and only handles IPv6 in hexadecimal form. */
static int looks_like_ip_address(const char *name) {
size_t i;
size_t dot_count = 0;
size_t num_size = 0;
for (i = 0; i < strlen(name); i++) {
+ if (name[i] == ':') {
+ /* IPv6 Address in hexadecimal form, : is not allowed in DNS names. */
+ return 1;
+ }
if (name[i] >= '0' && name[i] <= '9') {
if (num_size > 3) return 0;
num_size++;
@@ -215,14 +231,13 @@ static int looks_like_ip_address(const char *name) {
return 1;
}
-
/* Gets the subject CN from an X509 cert. */
-static tsi_result ssl_get_x509_common_name(X509* cert, unsigned char** utf8,
- size_t* utf8_size) {
+static tsi_result ssl_get_x509_common_name(X509 *cert, unsigned char **utf8,
+ size_t *utf8_size) {
int common_name_index = -1;
- X509_NAME_ENTRY* common_name_entry = NULL;
- ASN1_STRING* common_name_asn1 = NULL;
- X509_NAME* subject_name = X509_get_subject_name(cert);
+ X509_NAME_ENTRY *common_name_entry = NULL;
+ ASN1_STRING *common_name_asn1 = NULL;
+ X509_NAME *subject_name = X509_get_subject_name(cert);
int utf8_returned_size = 0;
if (subject_name == NULL) {
gpr_log(GPR_ERROR, "Could not get subject name from certificate.");
@@ -251,14 +266,14 @@ static tsi_result ssl_get_x509_common_name(X509* cert, unsigned char** utf8,
gpr_log(GPR_ERROR, "Could not extract utf8 from asn1 string.");
return TSI_OUT_OF_RESOURCES;
}
- *utf8_size = utf8_returned_size;
+ *utf8_size = (size_t)utf8_returned_size;
return TSI_OK;
}
/* Gets the subject CN of an X509 cert as a tsi_peer_property. */
static tsi_result peer_property_from_x509_common_name(
- X509* cert, tsi_peer_property* property) {
- unsigned char* common_name;
+ X509 *cert, tsi_peer_property *property) {
+ unsigned char *common_name;
size_t common_name_size;
tsi_result result =
ssl_get_x509_common_name(cert, &common_name, &common_name_size);
@@ -272,57 +287,105 @@ static tsi_result peer_property_from_x509_common_name(
}
result = tsi_construct_string_peer_property(
TSI_X509_SUBJECT_COMMON_NAME_PEER_PROPERTY,
- common_name == NULL ? "" : (const char*)common_name, common_name_size,
+ common_name == NULL ? "" : (const char *)common_name, common_name_size,
property);
OPENSSL_free(common_name);
return result;
}
+/* Gets the X509 cert in PEM format as a tsi_peer_property. */
+static tsi_result add_pem_certificate(X509 *cert, tsi_peer_property *property) {
+ BIO *bio = BIO_new(BIO_s_mem());
+ if (!PEM_write_bio_X509(bio, cert)) {
+ BIO_free(bio);
+ return TSI_INTERNAL_ERROR;
+ }
+ char *contents;
+ long len = BIO_get_mem_data(bio, &contents);
+ if (len <= 0) {
+ BIO_free(bio);
+ return TSI_INTERNAL_ERROR;
+ }
+ tsi_result result = tsi_construct_string_peer_property(
+ TSI_X509_PEM_CERT_PROPERTY, (const char *)contents, (size_t)len,
+ property);
+ BIO_free(bio);
+ return result;
+}
+
/* Gets the subject SANs from an X509 cert as a tsi_peer_property. */
static tsi_result add_subject_alt_names_properties_to_peer(
- tsi_peer* peer, GENERAL_NAMES* subject_alt_names,
- int subject_alt_name_count) {
- int i;
+ tsi_peer *peer, GENERAL_NAMES *subject_alt_names,
+ size_t subject_alt_name_count) {
+ size_t i;
tsi_result result = TSI_OK;
/* Reset for DNS entries filtering. */
peer->property_count -= subject_alt_name_count;
for (i = 0; i < subject_alt_name_count; i++) {
- GENERAL_NAME* subject_alt_name =
- sk_GENERAL_NAME_value(subject_alt_names, i);
+ GENERAL_NAME *subject_alt_name =
+ sk_GENERAL_NAME_value(subject_alt_names, TSI_SIZE_AS_SIZE(i));
/* Filter out the non-dns entries names. */
if (subject_alt_name->type == GEN_DNS) {
- unsigned char* dns_name = NULL;
- int dns_name_size =
- ASN1_STRING_to_UTF8(&dns_name, subject_alt_name->d.dNSName);
- if (dns_name_size < 0) {
+ unsigned char *name = NULL;
+ int name_size;
+ name_size = ASN1_STRING_to_UTF8(&name, subject_alt_name->d.dNSName);
+ if (name_size < 0) {
gpr_log(GPR_ERROR, "Could not get utf8 from asn1 string.");
result = TSI_INTERNAL_ERROR;
break;
}
result = tsi_construct_string_peer_property(
- TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY,
- (const char*)dns_name, dns_name_size,
+ TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY, (const char *)name,
+ (size_t)name_size, &peer->properties[peer->property_count++]);
+ OPENSSL_free(name);
+ } else if (subject_alt_name->type == GEN_IPADD) {
+ char ntop_buf[INET6_ADDRSTRLEN];
+ int af;
+
+ if (subject_alt_name->d.iPAddress->length == 4) {
+ af = AF_INET;
+ } else if (subject_alt_name->d.iPAddress->length == 16) {
+ af = AF_INET6;
+ } else {
+ gpr_log(GPR_ERROR, "SAN IP Address contained invalid IP");
+ result = TSI_INTERNAL_ERROR;
+ break;
+ }
+ const char *name = inet_ntop(af, subject_alt_name->d.iPAddress->data,
+ ntop_buf, INET6_ADDRSTRLEN);
+ if (name == NULL) {
+ gpr_log(GPR_ERROR, "Could not get IP string from asn1 octet.");
+ result = TSI_INTERNAL_ERROR;
+ break;
+ }
+
+ result = tsi_construct_string_peer_property_from_cstring(
+ TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY, name,
&peer->properties[peer->property_count++]);
- OPENSSL_free(dns_name);
- if (result != TSI_OK) break;
}
+ if (result != TSI_OK) break;
}
return result;
}
/* Gets information about the peer's X509 cert as a tsi_peer object. */
-static tsi_result peer_from_x509(X509* cert, int include_certificate_type,
- tsi_peer* peer) {
+static tsi_result peer_from_x509(X509 *cert, int include_certificate_type,
+ tsi_peer *peer) {
/* TODO(jboeuf): Maybe add more properties. */
- GENERAL_NAMES* subject_alt_names =
+ GENERAL_NAMES *subject_alt_names =
X509_get_ext_d2i(cert, NID_subject_alt_name, 0, 0);
- int subject_alt_name_count =
- (subject_alt_names != NULL) ? sk_GENERAL_NAME_num(subject_alt_names) : 0;
- size_t property_count = (include_certificate_type ? 1 : 0) +
- 1 /* common name */ + subject_alt_name_count;
- tsi_result result = tsi_construct_peer(property_count, peer);
+ int subject_alt_name_count = (subject_alt_names != NULL)
+ ? (int)sk_GENERAL_NAME_num(subject_alt_names)
+ : 0;
+ size_t property_count;
+ tsi_result result;
+ GPR_ASSERT(subject_alt_name_count >= 0);
+ property_count = (include_certificate_type ? (size_t)1 : 0) +
+ 2 /* common name, certificate */ +
+ (size_t)subject_alt_name_count;
+ result = tsi_construct_peer(property_count, peer);
if (result != TSI_OK) return result;
do {
if (include_certificate_type) {
@@ -335,9 +398,13 @@ static tsi_result peer_from_x509(X509* cert, int include_certificate_type,
cert, &peer->properties[include_certificate_type ? 1 : 0]);
if (result != TSI_OK) break;
+ result = add_pem_certificate(
+ cert, &peer->properties[include_certificate_type ? 2 : 1]);
+ if (result != TSI_OK) break;
+
if (subject_alt_name_count != 0) {
- result = add_subject_alt_names_properties_to_peer(peer, subject_alt_names,
- subject_alt_name_count);
+ result = add_subject_alt_names_properties_to_peer(
+ peer, subject_alt_names, (size_t)subject_alt_name_count);
if (result != TSI_OK) break;
}
} while (0);
@@ -354,15 +421,18 @@ static void log_ssl_error_stack(void) {
unsigned long err;
while ((err = ERR_get_error()) != 0) {
char details[256];
- ERR_error_string_n(err, details, sizeof(details));
+ ERR_error_string_n((uint32_t)err, details, sizeof(details));
gpr_log(GPR_ERROR, "%s", details);
}
}
/* Performs an SSL_read and handle errors. */
-static tsi_result do_ssl_read(SSL* ssl, unsigned char* unprotected_bytes,
- size_t* unprotected_bytes_size) {
- int read_from_ssl = SSL_read(ssl, unprotected_bytes, *unprotected_bytes_size);
+static tsi_result do_ssl_read(SSL *ssl, unsigned char *unprotected_bytes,
+ size_t *unprotected_bytes_size) {
+ int read_from_ssl;
+ GPR_ASSERT(*unprotected_bytes_size <= INT_MAX);
+ read_from_ssl =
+ SSL_read(ssl, unprotected_bytes, (int)*unprotected_bytes_size);
if (read_from_ssl == 0) {
gpr_log(GPR_ERROR, "SSL_read returned 0 unexpectedly.");
return TSI_INTERNAL_ERROR;
@@ -389,15 +459,17 @@ static tsi_result do_ssl_read(SSL* ssl, unsigned char* unprotected_bytes,
return TSI_PROTOCOL_FAILURE;
}
}
- *unprotected_bytes_size = read_from_ssl;
+ *unprotected_bytes_size = (size_t)read_from_ssl;
return TSI_OK;
}
/* Performs an SSL_write and handle errors. */
-static tsi_result do_ssl_write(SSL* ssl, unsigned char* unprotected_bytes,
+static tsi_result do_ssl_write(SSL *ssl, unsigned char *unprotected_bytes,
size_t unprotected_bytes_size) {
- int ssl_write_result =
- SSL_write(ssl, unprotected_bytes, unprotected_bytes_size);
+ int ssl_write_result;
+ GPR_ASSERT(unprotected_bytes_size <= INT_MAX);
+ ssl_write_result =
+ SSL_write(ssl, unprotected_bytes, (int)unprotected_bytes_size);
if (ssl_write_result < 0) {
ssl_write_result = SSL_get_error(ssl, ssl_write_result);
if (ssl_write_result == SSL_ERROR_WANT_READ) {
@@ -415,11 +487,13 @@ static tsi_result do_ssl_write(SSL* ssl, unsigned char* unprotected_bytes,
/* Loads an in-memory PEM certificate chain into the SSL context. */
static tsi_result ssl_ctx_use_certificate_chain(
- SSL_CTX* context, const unsigned char* pem_cert_chain,
+ SSL_CTX *context, const unsigned char *pem_cert_chain,
size_t pem_cert_chain_size) {
tsi_result result = TSI_OK;
- X509* certificate = NULL;
- BIO* pem = BIO_new_mem_buf((void*)pem_cert_chain, pem_cert_chain_size);
+ X509 *certificate = NULL;
+ BIO *pem;
+ GPR_ASSERT(pem_cert_chain_size <= INT_MAX);
+ pem = BIO_new_mem_buf((void *)pem_cert_chain, (int)pem_cert_chain_size);
if (pem == NULL) return TSI_OUT_OF_RESOURCES;
do {
@@ -433,7 +507,7 @@ static tsi_result ssl_ctx_use_certificate_chain(
break;
}
while (1) {
- X509* certificate_authority = PEM_read_bio_X509(pem, NULL, NULL, "");
+ X509 *certificate_authority = PEM_read_bio_X509(pem, NULL, NULL, "");
if (certificate_authority == NULL) {
ERR_clear_error();
break; /* Done reading. */
@@ -445,7 +519,7 @@ static tsi_result ssl_ctx_use_certificate_chain(
}
/* We don't need to free certificate_authority as its ownership has been
transfered to the context. That is not the case for certificate though.
- */
+ */
}
} while (0);
@@ -455,12 +529,14 @@ static tsi_result ssl_ctx_use_certificate_chain(
}
/* Loads an in-memory PEM private key into the SSL context. */
-static tsi_result ssl_ctx_use_private_key(SSL_CTX* context,
- const unsigned char* pem_key,
+static tsi_result ssl_ctx_use_private_key(SSL_CTX *context,
+ const unsigned char *pem_key,
size_t pem_key_size) {
tsi_result result = TSI_OK;
- EVP_PKEY* private_key = NULL;
- BIO* pem = BIO_new_mem_buf((void*)pem_key, pem_key_size);
+ EVP_PKEY *private_key = NULL;
+ BIO *pem;
+ GPR_ASSERT(pem_key_size <= INT_MAX);
+ pem = BIO_new_mem_buf((void *)pem_key, (int)pem_key_size);
if (pem == NULL) return TSI_OUT_OF_RESOURCES;
do {
private_key = PEM_read_bio_PrivateKey(pem, NULL, NULL, "");
@@ -481,14 +557,17 @@ static tsi_result ssl_ctx_use_private_key(SSL_CTX* context,
/* Loads in-memory PEM verification certs into the SSL context and optionally
returns the verification cert names (root_names can be NULL). */
static tsi_result ssl_ctx_load_verification_certs(
- SSL_CTX* context, const unsigned char* pem_roots, size_t pem_roots_size,
+ SSL_CTX *context, const unsigned char *pem_roots, size_t pem_roots_size,
STACK_OF(X509_NAME) * *root_names) {
tsi_result result = TSI_OK;
size_t num_roots = 0;
- X509* root = NULL;
- X509_NAME* root_name = NULL;
- BIO* pem = BIO_new_mem_buf((void*)pem_roots, pem_roots_size);
- X509_STORE* root_store = SSL_CTX_get_cert_store(context);
+ X509 *root = NULL;
+ X509_NAME *root_name = NULL;
+ BIO *pem;
+ X509_STORE *root_store;
+ GPR_ASSERT(pem_roots_size <= INT_MAX);
+ pem = BIO_new_mem_buf((void *)pem_roots, (int)pem_roots_size);
+ root_store = SSL_CTX_get_cert_store(context);
if (root_store == NULL) return TSI_INVALID_ARGUMENT;
if (pem == NULL) return TSI_OUT_OF_RESOURCES;
if (root_names != NULL) {
@@ -546,9 +625,9 @@ static tsi_result ssl_ctx_load_verification_certs(
/* Populates the SSL context with a private key and a cert chain, and sets the
cipher list and the ephemeral ECDH key. */
static tsi_result populate_ssl_context(
- SSL_CTX* context, const unsigned char* pem_private_key,
- size_t pem_private_key_size, const unsigned char* pem_certificate_chain,
- size_t pem_certificate_chain_size, const char* cipher_list) {
+ SSL_CTX *context, const unsigned char *pem_private_key,
+ size_t pem_private_key_size, const unsigned char *pem_certificate_chain,
+ size_t pem_certificate_chain_size, const char *cipher_list) {
tsi_result result = TSI_OK;
if (pem_certificate_chain != NULL) {
result = ssl_ctx_use_certificate_chain(context, pem_certificate_chain,
@@ -571,7 +650,7 @@ static tsi_result populate_ssl_context(
return TSI_INVALID_ARGUMENT;
}
{
- EC_KEY* ecdh = EC_KEY_new_by_curve_name(NID_X9_62_prime256v1);
+ EC_KEY *ecdh = EC_KEY_new_by_curve_name(NID_X9_62_prime256v1);
if (!SSL_CTX_set_tmp_ecdh(context, ecdh)) {
gpr_log(GPR_ERROR, "Could not set ephemeral ECDH key.");
EC_KEY_free(ecdh);
@@ -585,10 +664,12 @@ static tsi_result populate_ssl_context(
/* Extracts the CN and the SANs from an X509 cert as a peer object. */
static tsi_result extract_x509_subject_names_from_pem_cert(
- const unsigned char* pem_cert, size_t pem_cert_size, tsi_peer* peer) {
+ const unsigned char *pem_cert, size_t pem_cert_size, tsi_peer *peer) {
tsi_result result = TSI_OK;
- X509* cert = NULL;
- BIO* pem = BIO_new_mem_buf((void*)pem_cert, pem_cert_size);
+ X509 *cert = NULL;
+ BIO *pem;
+ GPR_ASSERT(pem_cert_size <= INT_MAX);
+ pem = BIO_new_mem_buf((void *)pem_cert, (int)pem_cert_size);
if (pem == NULL) return TSI_OUT_OF_RESOURCES;
cert = PEM_read_bio_X509(pem, NULL, NULL, "");
@@ -605,11 +686,11 @@ static tsi_result extract_x509_subject_names_from_pem_cert(
/* Builds the alpn protocol name list according to rfc 7301. */
static tsi_result build_alpn_protocol_name_list(
- const unsigned char** alpn_protocols,
- const unsigned char* alpn_protocols_lengths, uint16_t num_alpn_protocols,
- unsigned char** protocol_name_list, size_t* protocol_name_list_length) {
+ const unsigned char **alpn_protocols,
+ const unsigned char *alpn_protocols_lengths, uint16_t num_alpn_protocols,
+ unsigned char **protocol_name_list, size_t *protocol_name_list_length) {
uint16_t i;
- unsigned char* current;
+ unsigned char *current;
*protocol_name_list = NULL;
*protocol_name_list_length = 0;
if (num_alpn_protocols == 0) return TSI_INVALID_ARGUMENT;
@@ -618,7 +699,7 @@ static tsi_result build_alpn_protocol_name_list(
gpr_log(GPR_ERROR, "Invalid 0-length protocol name.");
return TSI_INVALID_ARGUMENT;
}
- *protocol_name_list_length += alpn_protocols_lengths[i] + 1;
+ *protocol_name_list_length += (size_t)alpn_protocols_lengths[i] + 1;
}
*protocol_name_list = malloc(*protocol_name_list_length);
if (*protocol_name_list == NULL) return TSI_OUT_OF_RESOURCES;
@@ -630,7 +711,8 @@ static tsi_result build_alpn_protocol_name_list(
}
/* Safety check. */
if ((current < *protocol_name_list) ||
- ((gpr_uintptr)(current - *protocol_name_list) != *protocol_name_list_length)) {
+ ((uintptr_t)(current - *protocol_name_list) !=
+ *protocol_name_list_length)) {
return TSI_INTERNAL_ERROR;
}
return TSI_OK;
@@ -638,28 +720,29 @@ static tsi_result build_alpn_protocol_name_list(
/* --- tsi_frame_protector methods implementation. ---*/
-static tsi_result ssl_protector_protect(tsi_frame_protector* self,
- const unsigned char* unprotected_bytes,
- size_t* unprotected_bytes_size,
- unsigned char* protected_output_frames,
- size_t* protected_output_frames_size) {
- tsi_ssl_frame_protector* impl = (tsi_ssl_frame_protector*)self;
+static tsi_result ssl_protector_protect(tsi_frame_protector *self,
+ const unsigned char *unprotected_bytes,
+ size_t *unprotected_bytes_size,
+ unsigned char *protected_output_frames,
+ size_t *protected_output_frames_size) {
+ tsi_ssl_frame_protector *impl = (tsi_ssl_frame_protector *)self;
int read_from_ssl;
size_t available;
tsi_result result = TSI_OK;
/* First see if we have some pending data in the SSL BIO. */
- size_t pending_in_ssl = BIO_pending(impl->from_ssl);
+ int pending_in_ssl = (int)BIO_pending(impl->from_ssl);
if (pending_in_ssl > 0) {
*unprotected_bytes_size = 0;
+ GPR_ASSERT(*protected_output_frames_size <= INT_MAX);
read_from_ssl = BIO_read(impl->from_ssl, protected_output_frames,
- *protected_output_frames_size);
+ (int)*protected_output_frames_size);
if (read_from_ssl < 0) {
gpr_log(GPR_ERROR,
"Could not read from BIO even though some data is pending");
return TSI_INTERNAL_ERROR;
}
- *protected_output_frames_size = read_from_ssl;
+ *protected_output_frames_size = (size_t)read_from_ssl;
return TSI_OK;
}
@@ -679,24 +762,26 @@ static tsi_result ssl_protector_protect(tsi_frame_protector* self,
result = do_ssl_write(impl->ssl, impl->buffer, impl->buffer_size);
if (result != TSI_OK) return result;
+ GPR_ASSERT(*protected_output_frames_size <= INT_MAX);
read_from_ssl = BIO_read(impl->from_ssl, protected_output_frames,
- *protected_output_frames_size);
+ (int)*protected_output_frames_size);
if (read_from_ssl < 0) {
gpr_log(GPR_ERROR, "Could not read from BIO after SSL_write.");
return TSI_INTERNAL_ERROR;
}
- *protected_output_frames_size = read_from_ssl;
+ *protected_output_frames_size = (size_t)read_from_ssl;
*unprotected_bytes_size = available;
impl->buffer_offset = 0;
return TSI_OK;
}
static tsi_result ssl_protector_protect_flush(
- tsi_frame_protector* self, unsigned char* protected_output_frames,
- size_t* protected_output_frames_size, size_t* still_pending_size) {
+ tsi_frame_protector *self, unsigned char *protected_output_frames,
+ size_t *protected_output_frames_size, size_t *still_pending_size) {
tsi_result result = TSI_OK;
- tsi_ssl_frame_protector* impl = (tsi_ssl_frame_protector*)self;
+ tsi_ssl_frame_protector *impl = (tsi_ssl_frame_protector *)self;
int read_from_ssl = 0;
+ int pending;
if (impl->buffer_offset != 0) {
result = do_ssl_write(impl->ssl, impl->buffer, impl->buffer_offset);
@@ -704,29 +789,34 @@ static tsi_result ssl_protector_protect_flush(
impl->buffer_offset = 0;
}
- *still_pending_size = BIO_pending(impl->from_ssl);
+ pending = (int)BIO_pending(impl->from_ssl);
+ GPR_ASSERT(pending >= 0);
+ *still_pending_size = (size_t)pending;
if (*still_pending_size == 0) return TSI_OK;
+ GPR_ASSERT(*protected_output_frames_size <= INT_MAX);
read_from_ssl = BIO_read(impl->from_ssl, protected_output_frames,
- *protected_output_frames_size);
+ (int)*protected_output_frames_size);
if (read_from_ssl <= 0) {
gpr_log(GPR_ERROR, "Could not read from BIO after SSL_write.");
return TSI_INTERNAL_ERROR;
}
- *protected_output_frames_size = read_from_ssl;
- *still_pending_size = BIO_pending(impl->from_ssl);
+ *protected_output_frames_size = (size_t)read_from_ssl;
+ pending = (int)BIO_pending(impl->from_ssl);
+ GPR_ASSERT(pending >= 0);
+ *still_pending_size = (size_t)pending;
return TSI_OK;
}
static tsi_result ssl_protector_unprotect(
- tsi_frame_protector* self, const unsigned char* protected_frames_bytes,
- size_t* protected_frames_bytes_size, unsigned char* unprotected_bytes,
- size_t* unprotected_bytes_size) {
+ tsi_frame_protector *self, const unsigned char *protected_frames_bytes,
+ size_t *protected_frames_bytes_size, unsigned char *unprotected_bytes,
+ size_t *unprotected_bytes_size) {
tsi_result result = TSI_OK;
int written_into_ssl = 0;
size_t output_bytes_size = *unprotected_bytes_size;
size_t output_bytes_offset = 0;
- tsi_ssl_frame_protector* impl = (tsi_ssl_frame_protector*)self;
+ tsi_ssl_frame_protector *impl = (tsi_ssl_frame_protector *)self;
/* First, try to read remaining data from ssl. */
result = do_ssl_read(impl->ssl, unprotected_bytes, unprotected_bytes_size);
@@ -741,14 +831,15 @@ static tsi_result ssl_protector_unprotect(
*unprotected_bytes_size = output_bytes_size - output_bytes_offset;
/* Then, try to write some data to ssl. */
+ GPR_ASSERT(*protected_frames_bytes_size <= INT_MAX);
written_into_ssl = BIO_write(impl->into_ssl, protected_frames_bytes,
- *protected_frames_bytes_size);
+ (int)*protected_frames_bytes_size);
if (written_into_ssl < 0) {
gpr_log(GPR_ERROR, "Sending protected frame to ssl failed with %d",
written_into_ssl);
return TSI_INTERNAL_ERROR;
}
- *protected_frames_bytes_size = written_into_ssl;
+ *protected_frames_bytes_size = (size_t)written_into_ssl;
/* Now try to read some data again. */
result = do_ssl_read(impl->ssl, unprotected_bytes, unprotected_bytes_size);
@@ -759,8 +850,8 @@ static tsi_result ssl_protector_unprotect(
return result;
}
-static void ssl_protector_destroy(tsi_frame_protector* self) {
- tsi_ssl_frame_protector* impl = (tsi_ssl_frame_protector*)self;
+static void ssl_protector_destroy(tsi_frame_protector *self) {
+ tsi_ssl_frame_protector *impl = (tsi_ssl_frame_protector *)self;
if (impl->buffer != NULL) free(impl->buffer);
if (impl->ssl != NULL) SSL_free(impl->ssl);
free(self);
@@ -768,20 +859,22 @@ static void ssl_protector_destroy(tsi_frame_protector* self) {
static const tsi_frame_protector_vtable frame_protector_vtable = {
ssl_protector_protect, ssl_protector_protect_flush, ssl_protector_unprotect,
- ssl_protector_destroy, };
+ ssl_protector_destroy,
+};
/* --- tsi_handshaker methods implementation. ---*/
-static tsi_result ssl_handshaker_get_bytes_to_send_to_peer(tsi_handshaker* self,
- unsigned char* bytes,
- size_t* bytes_size) {
- tsi_ssl_handshaker* impl = (tsi_ssl_handshaker*)self;
+static tsi_result ssl_handshaker_get_bytes_to_send_to_peer(tsi_handshaker *self,
+ unsigned char *bytes,
+ size_t *bytes_size) {
+ tsi_ssl_handshaker *impl = (tsi_ssl_handshaker *)self;
int bytes_read_from_ssl = 0;
if (bytes == NULL || bytes_size == NULL || *bytes_size == 0 ||
*bytes_size > INT_MAX) {
return TSI_INVALID_ARGUMENT;
}
- bytes_read_from_ssl = BIO_read(impl->from_ssl, bytes, *bytes_size);
+ GPR_ASSERT(*bytes_size <= INT_MAX);
+ bytes_read_from_ssl = BIO_read(impl->from_ssl, bytes, (int)*bytes_size);
if (bytes_read_from_ssl < 0) {
*bytes_size = 0;
if (!BIO_should_retry(impl->from_ssl)) {
@@ -795,8 +888,8 @@ static tsi_result ssl_handshaker_get_bytes_to_send_to_peer(tsi_handshaker* self,
return BIO_pending(impl->from_ssl) == 0 ? TSI_OK : TSI_INCOMPLETE_DATA;
}
-static tsi_result ssl_handshaker_get_result(tsi_handshaker* self) {
- tsi_ssl_handshaker* impl = (tsi_ssl_handshaker*)self;
+static tsi_result ssl_handshaker_get_result(tsi_handshaker *self) {
+ tsi_ssl_handshaker *impl = (tsi_ssl_handshaker *)self;
if ((impl->result == TSI_HANDSHAKE_IN_PROGRESS) &&
SSL_is_init_finished(impl->ssl)) {
impl->result = TSI_OK;
@@ -805,19 +898,21 @@ static tsi_result ssl_handshaker_get_result(tsi_handshaker* self) {
}
static tsi_result ssl_handshaker_process_bytes_from_peer(
- tsi_handshaker* self, const unsigned char* bytes, size_t* bytes_size) {
- tsi_ssl_handshaker* impl = (tsi_ssl_handshaker*)self;
+ tsi_handshaker *self, const unsigned char *bytes, size_t *bytes_size) {
+ tsi_ssl_handshaker *impl = (tsi_ssl_handshaker *)self;
int bytes_written_into_ssl_size = 0;
if (bytes == NULL || bytes_size == 0 || *bytes_size > INT_MAX) {
return TSI_INVALID_ARGUMENT;
}
- bytes_written_into_ssl_size = BIO_write(impl->into_ssl, bytes, *bytes_size);
+ GPR_ASSERT(*bytes_size <= INT_MAX);
+ bytes_written_into_ssl_size =
+ BIO_write(impl->into_ssl, bytes, (int)*bytes_size);
if (bytes_written_into_ssl_size < 0) {
gpr_log(GPR_ERROR, "Could not write to memory BIO.");
impl->result = TSI_INTERNAL_ERROR;
return impl->result;
}
- *bytes_size = bytes_written_into_ssl_size;
+ *bytes_size = (size_t)bytes_written_into_ssl_size;
if (!tsi_handshaker_is_in_progress(self)) {
impl->result = TSI_OK;
@@ -848,13 +943,13 @@ static tsi_result ssl_handshaker_process_bytes_from_peer(
}
}
-static tsi_result ssl_handshaker_extract_peer(tsi_handshaker* self,
- tsi_peer* peer) {
+static tsi_result ssl_handshaker_extract_peer(tsi_handshaker *self,
+ tsi_peer *peer) {
tsi_result result = TSI_OK;
- const unsigned char* alpn_selected = NULL;
+ const unsigned char *alpn_selected = NULL;
unsigned int alpn_selected_len;
- tsi_ssl_handshaker* impl = (tsi_ssl_handshaker*)self;
- X509* peer_cert = SSL_get_peer_certificate(impl->ssl);
+ tsi_ssl_handshaker *impl = (tsi_ssl_handshaker *)self;
+ X509 *peer_cert = SSL_get_peer_certificate(impl->ssl);
if (peer_cert != NULL) {
result = peer_from_x509(peer_cert, 1, peer);
X509_free(peer_cert);
@@ -870,14 +965,14 @@ static tsi_result ssl_handshaker_extract_peer(tsi_handshaker* self,
}
if (alpn_selected != NULL) {
size_t i;
- tsi_peer_property* new_properties =
+ tsi_peer_property *new_properties =
calloc(1, sizeof(tsi_peer_property) * (peer->property_count + 1));
if (new_properties == NULL) return TSI_OUT_OF_RESOURCES;
for (i = 0; i < peer->property_count; i++) {
new_properties[i] = peer->properties[i];
}
result = tsi_construct_string_peer_property(
- TSI_SSL_ALPN_SELECTED_PROTOCOL, (const char*)alpn_selected,
+ TSI_SSL_ALPN_SELECTED_PROTOCOL, (const char *)alpn_selected,
alpn_selected_len, &new_properties[peer->property_count]);
if (result != TSI_OK) {
free(new_properties);
@@ -891,12 +986,12 @@ static tsi_result ssl_handshaker_extract_peer(tsi_handshaker* self,
}
static tsi_result ssl_handshaker_create_frame_protector(
- tsi_handshaker* self, size_t* max_output_protected_frame_size,
- tsi_frame_protector** protector) {
+ tsi_handshaker *self, size_t *max_output_protected_frame_size,
+ tsi_frame_protector **protector) {
size_t actual_max_output_protected_frame_size =
TSI_SSL_MAX_PROTECTED_FRAME_SIZE_UPPER_BOUND;
- tsi_ssl_handshaker* impl = (tsi_ssl_handshaker*)self;
- tsi_ssl_frame_protector* protector_impl =
+ tsi_ssl_handshaker *impl = (tsi_ssl_handshaker *)self;
+ tsi_ssl_frame_protector *protector_impl =
calloc(1, sizeof(tsi_ssl_frame_protector));
if (protector_impl == NULL) {
return TSI_OUT_OF_RESOURCES;
@@ -936,41 +1031,40 @@ static tsi_result ssl_handshaker_create_frame_protector(
return TSI_OK;
}
-static void ssl_handshaker_destroy(tsi_handshaker* self) {
- tsi_ssl_handshaker* impl = (tsi_ssl_handshaker*)self;
+static void ssl_handshaker_destroy(tsi_handshaker *self) {
+ tsi_ssl_handshaker *impl = (tsi_ssl_handshaker *)self;
SSL_free(impl->ssl); /* The BIO objects are owned by ssl */
free(impl);
}
static const tsi_handshaker_vtable handshaker_vtable = {
ssl_handshaker_get_bytes_to_send_to_peer,
- ssl_handshaker_process_bytes_from_peer,
- ssl_handshaker_get_result,
- ssl_handshaker_extract_peer,
- ssl_handshaker_create_frame_protector,
- ssl_handshaker_destroy, };
+ ssl_handshaker_process_bytes_from_peer, ssl_handshaker_get_result,
+ ssl_handshaker_extract_peer, ssl_handshaker_create_frame_protector,
+ ssl_handshaker_destroy,
+};
/* --- tsi_ssl_handshaker_factory common methods. --- */
tsi_result tsi_ssl_handshaker_factory_create_handshaker(
- tsi_ssl_handshaker_factory* self, const char* server_name_indication,
- tsi_handshaker** handshaker) {
+ tsi_ssl_handshaker_factory *self, const char *server_name_indication,
+ tsi_handshaker **handshaker) {
if (self == NULL || handshaker == NULL) return TSI_INVALID_ARGUMENT;
return self->create_handshaker(self, server_name_indication, handshaker);
}
-void tsi_ssl_handshaker_factory_destroy(tsi_ssl_handshaker_factory* self) {
+void tsi_ssl_handshaker_factory_destroy(tsi_ssl_handshaker_factory *self) {
if (self == NULL) return;
self->destroy(self);
}
-static tsi_result create_tsi_ssl_handshaker(SSL_CTX* ctx, int is_client,
- const char* server_name_indication,
- tsi_handshaker** handshaker) {
- SSL* ssl = SSL_new(ctx);
- BIO* into_ssl = NULL;
- BIO* from_ssl = NULL;
- tsi_ssl_handshaker* impl = NULL;
+static tsi_result create_tsi_ssl_handshaker(SSL_CTX *ctx, int is_client,
+ const char *server_name_indication,
+ tsi_handshaker **handshaker) {
+ SSL *ssl = SSL_new(ctx);
+ BIO *into_ssl = NULL;
+ BIO *from_ssl = NULL;
+ tsi_ssl_handshaker *impl = NULL;
*handshaker = NULL;
if (ctx == NULL) {
gpr_log(GPR_ERROR, "SSL Context is null. Should never happen.");
@@ -1029,18 +1123,18 @@ static tsi_result create_tsi_ssl_handshaker(SSL_CTX* ctx, int is_client,
return TSI_OK;
}
-static int select_protocol_list(const unsigned char** out,
- unsigned char* outlen,
- const unsigned char* client_list,
- unsigned int client_list_len,
- const unsigned char* server_list,
- unsigned int server_list_len) {
- const unsigned char* client_current = client_list;
+static int select_protocol_list(const unsigned char **out,
+ unsigned char *outlen,
+ const unsigned char *client_list,
+ size_t client_list_len,
+ const unsigned char *server_list,
+ size_t server_list_len) {
+ const unsigned char *client_current = client_list;
while ((unsigned int)(client_current - client_list) < client_list_len) {
unsigned char client_current_len = *(client_current++);
- const unsigned char* server_current = server_list;
+ const unsigned char *server_current = server_list;
while ((server_current >= server_list) &&
- (gpr_uintptr)(server_current - server_list) < server_list_len) {
+ (uintptr_t)(server_current - server_list) < server_list_len) {
unsigned char server_current_len = *(server_current++);
if ((client_current_len == server_current_len) &&
!memcmp(client_current, server_current, server_current_len)) {
@@ -1058,29 +1152,31 @@ static int select_protocol_list(const unsigned char** out,
/* --- tsi_ssl__client_handshaker_factory methods implementation. --- */
static tsi_result ssl_client_handshaker_factory_create_handshaker(
- tsi_ssl_handshaker_factory* self, const char* server_name_indication,
- tsi_handshaker** handshaker) {
- tsi_ssl_client_handshaker_factory* impl =
- (tsi_ssl_client_handshaker_factory*)self;
+ tsi_ssl_handshaker_factory *self, const char *server_name_indication,
+ tsi_handshaker **handshaker) {
+ tsi_ssl_client_handshaker_factory *impl =
+ (tsi_ssl_client_handshaker_factory *)self;
return create_tsi_ssl_handshaker(impl->ssl_context, 1, server_name_indication,
handshaker);
}
static void ssl_client_handshaker_factory_destroy(
- tsi_ssl_handshaker_factory* self) {
- tsi_ssl_client_handshaker_factory* impl =
- (tsi_ssl_client_handshaker_factory*)self;
+ tsi_ssl_handshaker_factory *self) {
+ tsi_ssl_client_handshaker_factory *impl =
+ (tsi_ssl_client_handshaker_factory *)self;
if (impl->ssl_context != NULL) SSL_CTX_free(impl->ssl_context);
if (impl->alpn_protocol_list != NULL) free(impl->alpn_protocol_list);
free(impl);
}
-static int client_handshaker_factory_npn_callback(
- SSL* ssl, unsigned char** out, unsigned char* outlen,
- const unsigned char* in, unsigned int inlen, void* arg) {
- tsi_ssl_client_handshaker_factory* factory =
- (tsi_ssl_client_handshaker_factory*)arg;
- return select_protocol_list((const unsigned char**)out, outlen,
+static int client_handshaker_factory_npn_callback(SSL *ssl, unsigned char **out,
+ unsigned char *outlen,
+ const unsigned char *in,
+ unsigned int inlen,
+ void *arg) {
+ tsi_ssl_client_handshaker_factory *factory =
+ (tsi_ssl_client_handshaker_factory *)arg;
+ return select_protocol_list((const unsigned char **)out, outlen,
factory->alpn_protocol_list,
factory->alpn_protocol_list_length, in, inlen);
}
@@ -1088,10 +1184,10 @@ static int client_handshaker_factory_npn_callback(
/* --- tsi_ssl_server_handshaker_factory methods implementation. --- */
static tsi_result ssl_server_handshaker_factory_create_handshaker(
- tsi_ssl_handshaker_factory* self, const char* server_name_indication,
- tsi_handshaker** handshaker) {
- tsi_ssl_server_handshaker_factory* impl =
- (tsi_ssl_server_handshaker_factory*)self;
+ tsi_ssl_handshaker_factory *self, const char *server_name_indication,
+ tsi_handshaker **handshaker) {
+ tsi_ssl_server_handshaker_factory *impl =
+ (tsi_ssl_server_handshaker_factory *)self;
if (impl->ssl_context_count == 0 || server_name_indication != NULL) {
return TSI_INVALID_ARGUMENT;
}
@@ -1101,9 +1197,9 @@ static tsi_result ssl_server_handshaker_factory_create_handshaker(
}
static void ssl_server_handshaker_factory_destroy(
- tsi_ssl_handshaker_factory* self) {
- tsi_ssl_server_handshaker_factory* impl =
- (tsi_ssl_server_handshaker_factory*)self;
+ tsi_ssl_handshaker_factory *self) {
+ tsi_ssl_server_handshaker_factory *impl =
+ (tsi_ssl_server_handshaker_factory *)self;
size_t i;
for (i = 0; i < impl->ssl_context_count; i++) {
if (impl->ssl_contexts[i] != NULL) {
@@ -1119,10 +1215,10 @@ static void ssl_server_handshaker_factory_destroy(
free(impl);
}
-static int does_entry_match_name(const char* entry, size_t entry_length,
- const char* name) {
+static int does_entry_match_name(const char *entry, size_t entry_length,
+ const char *name) {
const char *dot;
- const char* name_subdomain = NULL;
+ const char *name_subdomain = NULL;
size_t name_length = strlen(name);
size_t name_subdomain_length;
if (entry_length == 0) return 0;
@@ -1153,7 +1249,7 @@ static int does_entry_match_name(const char* entry, size_t entry_length,
if (name_subdomain_length < 2) return 0;
name_subdomain++; /* Starts after the dot. */
name_subdomain_length--;
- entry += 2; /* Remove *. */
+ entry += 2; /* Remove *. */
entry_length -= 2;
dot = strchr(name_subdomain, '.');
if ((dot == NULL) || (dot == &name_subdomain[name_subdomain_length - 1])) {
@@ -1167,12 +1263,12 @@ static int does_entry_match_name(const char* entry, size_t entry_length,
strncmp(entry, name_subdomain, entry_length) == 0);
}
-static int ssl_server_handshaker_factory_servername_callback(SSL* ssl, int* ap,
- void* arg) {
- tsi_ssl_server_handshaker_factory* impl =
- (tsi_ssl_server_handshaker_factory*)arg;
+static int ssl_server_handshaker_factory_servername_callback(SSL *ssl, int *ap,
+ void *arg) {
+ tsi_ssl_server_handshaker_factory *impl =
+ (tsi_ssl_server_handshaker_factory *)arg;
size_t i = 0;
- const char* servername = SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name);
+ const char *servername = SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name);
if (servername == NULL || strlen(servername) == 0) {
return SSL_TLSEXT_ERR_NOACK;
}
@@ -1190,10 +1286,10 @@ static int ssl_server_handshaker_factory_servername_callback(SSL* ssl, int* ap,
#if TSI_OPENSSL_ALPN_SUPPORT
static int server_handshaker_factory_alpn_callback(
- SSL* ssl, const unsigned char** out, unsigned char* outlen,
- const unsigned char* in, unsigned int inlen, void* arg) {
- tsi_ssl_server_handshaker_factory* factory =
- (tsi_ssl_server_handshaker_factory*)arg;
+ SSL *ssl, const unsigned char **out, unsigned char *outlen,
+ const unsigned char *in, unsigned int inlen, void *arg) {
+ tsi_ssl_server_handshaker_factory *factory =
+ (tsi_ssl_server_handshaker_factory *)arg;
return select_protocol_list(out, outlen, in, inlen,
factory->alpn_protocol_list,
factory->alpn_protocol_list_length);
@@ -1201,25 +1297,26 @@ static int server_handshaker_factory_alpn_callback(
#endif /* TSI_OPENSSL_ALPN_SUPPORT */
static int server_handshaker_factory_npn_advertised_callback(
- SSL* ssl, const unsigned char** out, unsigned int* outlen, void* arg) {
- tsi_ssl_server_handshaker_factory* factory =
- (tsi_ssl_server_handshaker_factory*)arg;
+ SSL *ssl, const unsigned char **out, unsigned int *outlen, void *arg) {
+ tsi_ssl_server_handshaker_factory *factory =
+ (tsi_ssl_server_handshaker_factory *)arg;
*out = factory->alpn_protocol_list;
- *outlen = factory->alpn_protocol_list_length;
+ GPR_ASSERT(factory->alpn_protocol_list_length <= UINT_MAX);
+ *outlen = (unsigned int)factory->alpn_protocol_list_length;
return SSL_TLSEXT_ERR_OK;
}
/* --- tsi_ssl_handshaker_factory constructors. --- */
tsi_result tsi_create_ssl_client_handshaker_factory(
- const unsigned char* pem_private_key, size_t pem_private_key_size,
- const unsigned char* pem_cert_chain, size_t pem_cert_chain_size,
- const unsigned char* pem_root_certs, size_t pem_root_certs_size,
- const char* cipher_list, const unsigned char** alpn_protocols,
- const unsigned char* alpn_protocols_lengths, uint16_t num_alpn_protocols,
- tsi_ssl_handshaker_factory** factory) {
- SSL_CTX* ssl_context = NULL;
- tsi_ssl_client_handshaker_factory* impl = NULL;
+ const unsigned char *pem_private_key, size_t pem_private_key_size,
+ const unsigned char *pem_cert_chain, size_t pem_cert_chain_size,
+ const unsigned char *pem_root_certs, size_t pem_root_certs_size,
+ const char *cipher_list, const unsigned char **alpn_protocols,
+ const unsigned char *alpn_protocols_lengths, uint16_t num_alpn_protocols,
+ tsi_ssl_handshaker_factory **factory) {
+ SSL_CTX *ssl_context = NULL;
+ tsi_ssl_client_handshaker_factory *impl = NULL;
tsi_result result = TSI_OK;
gpr_once_init(&init_openssl_once, init_openssl);
@@ -1263,8 +1360,10 @@ tsi_result tsi_create_ssl_client_handshaker_factory(
break;
}
#if TSI_OPENSSL_ALPN_SUPPORT
- if (SSL_CTX_set_alpn_protos(ssl_context, impl->alpn_protocol_list,
- impl->alpn_protocol_list_length)) {
+ GPR_ASSERT(impl->alpn_protocol_list_length < UINT_MAX);
+ if (SSL_CTX_set_alpn_protos(
+ ssl_context, impl->alpn_protocol_list,
+ (unsigned int)impl->alpn_protocol_list_length)) {
gpr_log(GPR_ERROR, "Could not set alpn protocol list to context.");
result = TSI_INVALID_ARGUMENT;
break;
@@ -1289,15 +1388,15 @@ tsi_result tsi_create_ssl_client_handshaker_factory(
}
tsi_result tsi_create_ssl_server_handshaker_factory(
- const unsigned char** pem_private_keys,
- const size_t* pem_private_keys_sizes, const unsigned char** pem_cert_chains,
- const size_t* pem_cert_chains_sizes, size_t key_cert_pair_count,
- const unsigned char* pem_client_root_certs,
- size_t pem_client_root_certs_size, const char* cipher_list,
- const unsigned char** alpn_protocols,
- const unsigned char* alpn_protocols_lengths, uint16_t num_alpn_protocols,
- tsi_ssl_handshaker_factory** factory) {
- tsi_ssl_server_handshaker_factory* impl = NULL;
+ const unsigned char **pem_private_keys,
+ const size_t *pem_private_keys_sizes, const unsigned char **pem_cert_chains,
+ const size_t *pem_cert_chains_sizes, size_t key_cert_pair_count,
+ const unsigned char *pem_client_root_certs,
+ size_t pem_client_root_certs_size, int force_client_auth,
+ const char *cipher_list, const unsigned char **alpn_protocols,
+ const unsigned char *alpn_protocols_lengths, uint16_t num_alpn_protocols,
+ tsi_ssl_handshaker_factory **factory) {
+ tsi_ssl_server_handshaker_factory *impl = NULL;
tsi_result result = TSI_OK;
size_t i = 0;
@@ -1315,7 +1414,7 @@ tsi_result tsi_create_ssl_server_handshaker_factory(
impl->base.create_handshaker =
ssl_server_handshaker_factory_create_handshaker;
impl->base.destroy = ssl_server_handshaker_factory_destroy;
- impl->ssl_contexts = calloc(key_cert_pair_count, sizeof(SSL_CTX*));
+ impl->ssl_contexts = calloc(key_cert_pair_count, sizeof(SSL_CTX *));
impl->ssl_context_x509_subject_names =
calloc(key_cert_pair_count, sizeof(tsi_peer));
if (impl->ssl_contexts == NULL ||
@@ -1349,7 +1448,8 @@ tsi_result tsi_create_ssl_server_handshaker_factory(
if (result != TSI_OK) break;
if (pem_client_root_certs != NULL) {
- STACK_OF(X509_NAME)* root_names = NULL;
+ int flags = SSL_VERIFY_PEER;
+ STACK_OF(X509_NAME) *root_names = NULL;
result = ssl_ctx_load_verification_certs(
impl->ssl_contexts[i], pem_client_root_certs,
pem_client_root_certs_size, &root_names);
@@ -1358,7 +1458,8 @@ tsi_result tsi_create_ssl_server_handshaker_factory(
break;
}
SSL_CTX_set_client_CA_list(impl->ssl_contexts[i], root_names);
- SSL_CTX_set_verify(impl->ssl_contexts[i], SSL_VERIFY_PEER, NULL);
+ if (force_client_auth) flags |= SSL_VERIFY_FAIL_IF_NO_PEER_CERT;
+ SSL_CTX_set_verify(impl->ssl_contexts[i], flags, NULL);
/* TODO(jboeuf): Add revocation verification. */
}
@@ -1391,23 +1492,28 @@ tsi_result tsi_create_ssl_server_handshaker_factory(
/* --- tsi_ssl utils. --- */
-int tsi_ssl_peer_matches_name(const tsi_peer* peer, const char* name) {
+int tsi_ssl_peer_matches_name(const tsi_peer *peer, const char *name) {
size_t i = 0;
size_t san_count = 0;
- const tsi_peer_property* cn_property = NULL;
-
- /* For now reject what looks like an IP address. */
- if (looks_like_ip_address(name)) return 0;
+ const tsi_peer_property *cn_property = NULL;
+ int like_ip = looks_like_ip_address(name);
/* Check the SAN first. */
for (i = 0; i < peer->property_count; i++) {
- const tsi_peer_property* property = &peer->properties[i];
+ const tsi_peer_property *property = &peer->properties[i];
if (property->name == NULL) continue;
if (strcmp(property->name,
TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY) == 0) {
san_count++;
- if (does_entry_match_name(property->value.data, property->value.length,
- name)) {
+
+ if (!like_ip && does_entry_match_name(property->value.data,
+ property->value.length, name)) {
+ return 1;
+ } else if (like_ip &&
+ strncmp(name, property->value.data, property->value.length) ==
+ 0 &&
+ strlen(name) == property->value.length) {
+ /* IP Addresses are exact matches only. */
return 1;
}
} else if (strcmp(property->name,
@@ -1416,8 +1522,8 @@ int tsi_ssl_peer_matches_name(const tsi_peer* peer, const char* name) {
}
}
- /* If there's no SAN, try the CN. */
- if (san_count == 0 && cn_property != NULL) {
+ /* If there's no SAN, try the CN, but only if its not like an IP Address */
+ if (san_count == 0 && cn_property != NULL && !like_ip) {
if (does_entry_match_name(cn_property->value.data,
cn_property->value.length, name)) {
return 1;
diff --git a/src/core/tsi/ssl_transport_security.h b/src/core/tsi/ssl_transport_security.h
index b2aa2f393e..612f5c64cc 100644
--- a/src/core/tsi/ssl_transport_security.h
+++ b/src/core/tsi/ssl_transport_security.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_TSI_SSL_TRANSPORT_SECURITY_H
-#define GRPC_INTERNAL_CORE_TSI_SSL_TRANSPORT_SECURITY_H
+#ifndef GRPC_CORE_TSI_SSL_TRANSPORT_SECURITY_H
+#define GRPC_CORE_TSI_SSL_TRANSPORT_SECURITY_H
#include "src/core/tsi/transport_security_interface.h"
@@ -48,6 +48,8 @@ extern "C" {
#define TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY \
"x509_subject_alternative_name"
+#define TSI_X509_PEM_CERT_PROPERTY "x509_pem_cert"
+
#define TSI_SSL_ALPN_SELECTED_PROTOCOL "ssl_alpn_selected_protocol"
/* --- tsi_ssl_handshaker_factory object ---
@@ -85,12 +87,12 @@ typedef struct tsi_ssl_handshaker_factory tsi_ssl_handshaker_factory;
- This method returns TSI_OK on success or TSI_INVALID_PARAMETER in the case
where a parameter is invalid. */
tsi_result tsi_create_ssl_client_handshaker_factory(
- const unsigned char* pem_private_key, size_t pem_private_key_size,
- const unsigned char* pem_cert_chain, size_t pem_cert_chain_size,
- const unsigned char* pem_root_certs, size_t pem_root_certs_size,
- const char* cipher_suites, const unsigned char** alpn_protocols,
- const unsigned char* alpn_protocols_lengths, uint16_t num_alpn_protocols,
- tsi_ssl_handshaker_factory** factory);
+ const unsigned char *pem_private_key, size_t pem_private_key_size,
+ const unsigned char *pem_cert_chain, size_t pem_cert_chain_size,
+ const unsigned char *pem_root_certs, size_t pem_root_certs_size,
+ const char *cipher_suites, const unsigned char **alpn_protocols,
+ const unsigned char *alpn_protocols_lengths, uint16_t num_alpn_protocols,
+ tsi_ssl_handshaker_factory **factory);
/* Creates a server handshaker factory.
- version indicates which version of the specification to use.
@@ -107,10 +109,14 @@ tsi_result tsi_create_ssl_client_handshaker_factory(
- key_cert_pair_count indicates the number of items in the private_key_files
and cert_chain_files parameters.
- pem_client_roots is the buffer containing the PEM encoding of the client
- root certificates. This parameter may be NULL in which case the server
- will not ask the client to authenticate itself with a certificate (server-
- only authentication mode).
- - pem_client_roots_size is the size of the associated buffer.
+ root certificates. This parameter may be NULL in which case the server will
+ not authenticate the client. If not NULL, the force_client_auth parameter
+ specifies if the server will accept only authenticated clients or both
+ authenticated and non-authenticated clients.
+ - pem_client_root_certs_size is the size of the associated buffer.
+ - force_client_auth, if set to non-zero will force the client to authenticate
+ with an SSL cert. Note that this option is ignored if pem_client_root_certs
+ is NULL or pem_client_roots_certs_size is 0
- cipher_suites contains an optional list of the ciphers that the server
supports. The format of this string is described in:
https://www.openssl.org/docs/apps/ciphers.html.
@@ -127,14 +133,14 @@ tsi_result tsi_create_ssl_client_handshaker_factory(
- This method returns TSI_OK on success or TSI_INVALID_PARAMETER in the case
where a parameter is invalid. */
tsi_result tsi_create_ssl_server_handshaker_factory(
- const unsigned char** pem_private_keys,
- const size_t* pem_private_keys_sizes, const unsigned char** pem_cert_chains,
- const size_t* pem_cert_chains_sizes, size_t key_cert_pair_count,
- const unsigned char* pem_client_root_certs,
- size_t pem_client_root_certs_size, const char* cipher_suites,
- const unsigned char** alpn_protocols,
- const unsigned char* alpn_protocols_lengths, uint16_t num_alpn_protocols,
- tsi_ssl_handshaker_factory** factory);
+ const unsigned char **pem_private_keys,
+ const size_t *pem_private_keys_sizes, const unsigned char **pem_cert_chains,
+ const size_t *pem_cert_chains_sizes, size_t key_cert_pair_count,
+ const unsigned char *pem_client_root_certs,
+ size_t pem_client_root_certs_size, int force_client_auth,
+ const char *cipher_suites, const unsigned char **alpn_protocols,
+ const unsigned char *alpn_protocols_lengths, uint16_t num_alpn_protocols,
+ tsi_ssl_handshaker_factory **factory);
/* Creates a handshaker.
- self is the factory from which the handshaker will be created.
@@ -147,23 +153,22 @@ tsi_result tsi_create_ssl_server_handshaker_factory(
- This method returns TSI_OK on success or TSI_INVALID_PARAMETER in the case
where a parameter is invalid. */
tsi_result tsi_ssl_handshaker_factory_create_handshaker(
- tsi_ssl_handshaker_factory* self, const char* server_name_indication,
- tsi_handshaker** handshaker);
+ tsi_ssl_handshaker_factory *self, const char *server_name_indication,
+ tsi_handshaker **handshaker);
/* Destroys the handshaker factory. WARNING: it is unsafe to destroy a factory
while handshakers created with this factory are still in use. */
-void tsi_ssl_handshaker_factory_destroy(tsi_ssl_handshaker_factory* self);
+void tsi_ssl_handshaker_factory_destroy(tsi_ssl_handshaker_factory *self);
/* Util that checks that an ssl peer matches a specific name.
Still TODO(jboeuf):
- handle mixed case.
- handle %encoded chars.
- - handle public suffix wildchar more strictly (e.g. *.co.uk)
- - handle IP addresses in SAN. */
-int tsi_ssl_peer_matches_name(const tsi_peer* peer, const char* name);
+ - handle public suffix wildchar more strictly (e.g. *.co.uk) */
+int tsi_ssl_peer_matches_name(const tsi_peer *peer, const char *name);
#ifdef __cplusplus
}
#endif
-#endif /* GRPC_INTERNAL_CORE_TSI_SSL_TRANSPORT_SECURITY_H */
+#endif /* GRPC_CORE_TSI_SSL_TRANSPORT_SECURITY_H */
diff --git a/src/core/tsi/ssl_types.h b/src/core/tsi/ssl_types.h
new file mode 100644
index 0000000000..6ea85fe6d4
--- /dev/null
+++ b/src/core/tsi/ssl_types.h
@@ -0,0 +1,55 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_TSI_SSL_TYPES_H
+#define GRPC_CORE_TSI_SSL_TYPES_H
+
+/* A collection of macros to cast between various integer types that are
+ * used differently between BoringSSL and OpenSSL:
+ * TSI_INT_AS_SIZE(x): convert 'int x' to a length parameter for an OpenSSL
+ * function
+ * TSI_SIZE_AS_SIZE(x): convert 'size_t x' to a length parameter for an OpenSSL
+ * function
+ */
+
+#include <openssl/ssl.h>
+
+#ifdef OPENSSL_IS_BORINGSSL
+#define TSI_INT_AS_SIZE(x) ((size_t)(x))
+#define TSI_SIZE_AS_SIZE(x) (x)
+#else
+#define TSI_INT_AS_SIZE(x) (x)
+#define TSI_SIZE_AS_SIZE(x) ((int)(x))
+#endif
+
+#endif /* GRPC_CORE_TSI_SSL_TYPES_H */
diff --git a/src/core/tsi/test_creds/server1.pem b/src/core/tsi/test_creds/server1.pem
index 8e582e571f..f3d43fcc5b 100644
--- a/src/core/tsi/test_creds/server1.pem
+++ b/src/core/tsi/test_creds/server1.pem
@@ -1,16 +1,16 @@
-----BEGIN CERTIFICATE-----
-MIICmzCCAgSgAwIBAgIBAzANBgkqhkiG9w0BAQUFADBWMQswCQYDVQQGEwJBVTET
-MBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQ
-dHkgTHRkMQ8wDQYDVQQDDAZ0ZXN0Y2EwHhcNMTQwNzIyMDYwMDU3WhcNMjQwNzE5
-MDYwMDU3WjBkMQswCQYDVQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNV
-BAcTB0NoaWNhZ28xFDASBgNVBAoTC0dvb2dsZSBJbmMuMRowGAYDVQQDFBEqLnRl
-c3QuZ29vZ2xlLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA4cMVJygs
-JUmlgMMzgdi0h1XoCR7+ww1pop04OMMyy7H/i0PJ2W6Y35+b4CM8QrkYeEafUGDO
-RYX6yV/cHGGsD/x02ye6ey1UDtkGAD/mpDEx8YCrjAc1Vfvt8Fk6Cn1WVIxV/J30
-3xjBsFgByQ55RBp1OLZfVLo6AleBDSbcxaECAwEAAaNrMGkwCQYDVR0TBAIwADAL
-BgNVHQ8EBAMCBeAwTwYDVR0RBEgwRoIQKi50ZXN0Lmdvb2dsZS5mcoIYd2F0ZXJ6
-b29pLnRlc3QuZ29vZ2xlLmJlghIqLnRlc3QueW91dHViZS5jb22HBMCoAQMwDQYJ
-KoZIhvcNAQEFBQADgYEAM2Ii0LgTGbJ1j4oqX9bxVcxm+/R5Yf8oi0aZqTJlnLYS
-wXcBykxTx181s7WyfJ49WwrYXo78zTDAnf1ma0fPq3e4mpspvyndLh1a+OarHa1e
-aT0DIIYk7qeEa1YcVljx2KyLd0r1BBAfrwyGaEPVeJQVYWaOJRU2we/KD4ojf9s=
+MIICnDCCAgWgAwIBAgIBBzANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJBVTET
+MBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQ
+dHkgTHRkMQ8wDQYDVQQDEwZ0ZXN0Y2EwHhcNMTUxMTA0MDIyMDI0WhcNMjUxMTAx
+MDIyMDI0WjBlMQswCQYDVQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNV
+BAcTB0NoaWNhZ28xFTATBgNVBAoTDEV4YW1wbGUsIENvLjEaMBgGA1UEAxQRKi50
+ZXN0Lmdvb2dsZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAOHDFSco
+LCVJpYDDM4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1Bg
+zkWF+slf3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd
+9N8YwbBYAckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAGjazBpMAkGA1UdEwQCMAAw
+CwYDVR0PBAQDAgXgME8GA1UdEQRIMEaCECoudGVzdC5nb29nbGUuZnKCGHdhdGVy
+em9vaS50ZXN0Lmdvb2dsZS5iZYISKi50ZXN0LnlvdXR1YmUuY29thwTAqAEDMA0G
+CSqGSIb3DQEBCwUAA4GBAJFXVifQNub1LUP4JlnX5lXNlo8FxZ2a12AFQs+bzoJ6
+hM044EDjqyxUqSbVePK0ni3w1fHQB5rY9yYC5f8G7aqqTY1QOhoUk8ZTSTRpnkTh
+y4jjdvTZeLDVBlueZUTDRmy2feY5aZIU18vFDK08dTG0A87pppuv1LNIR3loveU8
-----END CERTIFICATE-----
diff --git a/src/core/tsi/transport_security.c b/src/core/tsi/transport_security.c
index ec02a478ba..db219a50a6 100644
--- a/src/core/tsi/transport_security.c
+++ b/src/core/tsi/transport_security.c
@@ -42,8 +42,8 @@ int tsi_tracing_enabled = 0;
/* --- Utils. --- */
-char* tsi_strdup(const char* src) {
- char* dst;
+char *tsi_strdup(const char *src) {
+ char *dst;
size_t len;
if (!src) return NULL;
len = strlen(src) + 1;
@@ -55,7 +55,7 @@ char* tsi_strdup(const char* src) {
/* --- tsi_result common implementation. --- */
-const char* tsi_result_to_string(tsi_result result) {
+const char *tsi_result_to_string(tsi_result result) {
switch (result) {
case TSI_OK:
return "TSI_OK";
@@ -92,11 +92,11 @@ const char* tsi_result_to_string(tsi_result result) {
Calls specific implementation after state/input validation. */
-tsi_result tsi_frame_protector_protect(tsi_frame_protector* self,
- const unsigned char* unprotected_bytes,
- size_t* unprotected_bytes_size,
- unsigned char* protected_output_frames,
- size_t* protected_output_frames_size) {
+tsi_result tsi_frame_protector_protect(tsi_frame_protector *self,
+ const unsigned char *unprotected_bytes,
+ size_t *unprotected_bytes_size,
+ unsigned char *protected_output_frames,
+ size_t *protected_output_frames_size) {
if (self == NULL || unprotected_bytes == NULL ||
unprotected_bytes_size == NULL || protected_output_frames == NULL ||
protected_output_frames_size == NULL) {
@@ -108,8 +108,8 @@ tsi_result tsi_frame_protector_protect(tsi_frame_protector* self,
}
tsi_result tsi_frame_protector_protect_flush(
- tsi_frame_protector* self, unsigned char* protected_output_frames,
- size_t* protected_output_frames_size, size_t* still_pending_size) {
+ tsi_frame_protector *self, unsigned char *protected_output_frames,
+ size_t *protected_output_frames_size, size_t *still_pending_size) {
if (self == NULL || protected_output_frames == NULL ||
protected_output_frames == NULL || still_pending_size == NULL) {
return TSI_INVALID_ARGUMENT;
@@ -120,9 +120,9 @@ tsi_result tsi_frame_protector_protect_flush(
}
tsi_result tsi_frame_protector_unprotect(
- tsi_frame_protector* self, const unsigned char* protected_frames_bytes,
- size_t* protected_frames_bytes_size, unsigned char* unprotected_bytes,
- size_t* unprotected_bytes_size) {
+ tsi_frame_protector *self, const unsigned char *protected_frames_bytes,
+ size_t *protected_frames_bytes_size, unsigned char *unprotected_bytes,
+ size_t *unprotected_bytes_size) {
if (self == NULL || protected_frames_bytes == NULL ||
protected_frames_bytes_size == NULL || unprotected_bytes == NULL ||
unprotected_bytes_size == NULL) {
@@ -133,7 +133,7 @@ tsi_result tsi_frame_protector_unprotect(
unprotected_bytes_size);
}
-void tsi_frame_protector_destroy(tsi_frame_protector* self) {
+void tsi_frame_protector_destroy(tsi_frame_protector *self) {
if (self == NULL) return;
self->vtable->destroy(self);
}
@@ -142,29 +142,33 @@ void tsi_frame_protector_destroy(tsi_frame_protector* self) {
Calls specific implementation after state/input validation. */
-tsi_result tsi_handshaker_get_bytes_to_send_to_peer(tsi_handshaker* self,
- unsigned char* bytes,
- size_t* bytes_size) {
- if (self == NULL) return TSI_INVALID_ARGUMENT;
+tsi_result tsi_handshaker_get_bytes_to_send_to_peer(tsi_handshaker *self,
+ unsigned char *bytes,
+ size_t *bytes_size) {
+ if (self == NULL || bytes == NULL || bytes_size == NULL) {
+ return TSI_INVALID_ARGUMENT;
+ }
if (self->frame_protector_created) return TSI_FAILED_PRECONDITION;
return self->vtable->get_bytes_to_send_to_peer(self, bytes, bytes_size);
}
-tsi_result tsi_handshaker_process_bytes_from_peer(tsi_handshaker* self,
- const unsigned char* bytes,
- size_t* bytes_size) {
- if (self == NULL) return TSI_INVALID_ARGUMENT;
+tsi_result tsi_handshaker_process_bytes_from_peer(tsi_handshaker *self,
+ const unsigned char *bytes,
+ size_t *bytes_size) {
+ if (self == NULL || bytes == NULL || bytes_size == NULL) {
+ return TSI_INVALID_ARGUMENT;
+ }
if (self->frame_protector_created) return TSI_FAILED_PRECONDITION;
return self->vtable->process_bytes_from_peer(self, bytes, bytes_size);
}
-tsi_result tsi_handshaker_get_result(tsi_handshaker* self) {
+tsi_result tsi_handshaker_get_result(tsi_handshaker *self) {
if (self == NULL) return TSI_INVALID_ARGUMENT;
if (self->frame_protector_created) return TSI_FAILED_PRECONDITION;
return self->vtable->get_result(self);
}
-tsi_result tsi_handshaker_extract_peer(tsi_handshaker* self, tsi_peer* peer) {
+tsi_result tsi_handshaker_extract_peer(tsi_handshaker *self, tsi_peer *peer) {
if (self == NULL || peer == NULL) return TSI_INVALID_ARGUMENT;
memset(peer, 0, sizeof(tsi_peer));
if (self->frame_protector_created) return TSI_FAILED_PRECONDITION;
@@ -175,8 +179,8 @@ tsi_result tsi_handshaker_extract_peer(tsi_handshaker* self, tsi_peer* peer) {
}
tsi_result tsi_handshaker_create_frame_protector(
- tsi_handshaker* self, size_t* max_protected_frame_size,
- tsi_frame_protector** protector) {
+ tsi_handshaker *self, size_t *max_protected_frame_size,
+ tsi_frame_protector **protector) {
tsi_result result;
if (self == NULL || protector == NULL) return TSI_INVALID_ARGUMENT;
if (self->frame_protector_created) return TSI_FAILED_PRECONDITION;
@@ -191,7 +195,7 @@ tsi_result tsi_handshaker_create_frame_protector(
return result;
}
-void tsi_handshaker_destroy(tsi_handshaker* self) {
+void tsi_handshaker_destroy(tsi_handshaker *self) {
if (self == NULL) return;
self->vtable->destroy(self);
}
@@ -204,7 +208,7 @@ tsi_peer_property tsi_init_peer_property(void) {
return property;
}
-static void tsi_peer_destroy_list_property(tsi_peer_property* children,
+static void tsi_peer_destroy_list_property(tsi_peer_property *children,
size_t child_count) {
size_t i;
for (i = 0; i < child_count; i++) {
@@ -213,7 +217,7 @@ static void tsi_peer_destroy_list_property(tsi_peer_property* children,
free(children);
}
-void tsi_peer_property_destruct(tsi_peer_property* property) {
+void tsi_peer_property_destruct(tsi_peer_property *property) {
if (property->name != NULL) {
free(property->name);
}
@@ -223,7 +227,7 @@ void tsi_peer_property_destruct(tsi_peer_property* property) {
*property = tsi_init_peer_property(); /* Reset everything to 0. */
}
-void tsi_peer_destruct(tsi_peer* self) {
+void tsi_peer_destruct(tsi_peer *self) {
if (self == NULL) return;
if (self->properties != NULL) {
tsi_peer_destroy_list_property(self->properties, self->property_count);
@@ -233,7 +237,7 @@ void tsi_peer_destruct(tsi_peer* self) {
}
tsi_result tsi_construct_allocated_string_peer_property(
- const char* name, size_t value_length, tsi_peer_property* property) {
+ const char *name, size_t value_length, tsi_peer_property *property) {
*property = tsi_init_peer_property();
if (name != NULL) {
property->name = tsi_strdup(name);
@@ -251,15 +255,15 @@ tsi_result tsi_construct_allocated_string_peer_property(
}
tsi_result tsi_construct_string_peer_property_from_cstring(
- const char* name, const char* value, tsi_peer_property* property) {
+ const char *name, const char *value, tsi_peer_property *property) {
return tsi_construct_string_peer_property(name, value, strlen(value),
property);
}
-tsi_result tsi_construct_string_peer_property(const char* name,
- const char* value,
+tsi_result tsi_construct_string_peer_property(const char *name,
+ const char *value,
size_t value_length,
- tsi_peer_property* property) {
+ tsi_peer_property *property) {
tsi_result result = tsi_construct_allocated_string_peer_property(
name, value_length, property);
if (result != TSI_OK) return result;
@@ -269,7 +273,7 @@ tsi_result tsi_construct_string_peer_property(const char* name,
return TSI_OK;
}
-tsi_result tsi_construct_peer(size_t property_count, tsi_peer* peer) {
+tsi_result tsi_construct_peer(size_t property_count, tsi_peer *peer) {
memset(peer, 0, sizeof(tsi_peer));
if (property_count > 0) {
peer->properties = calloc(property_count, sizeof(tsi_peer_property));
diff --git a/src/core/tsi/transport_security.h b/src/core/tsi/transport_security.h
index 4cd0ec2cfb..ecc037193b 100644
--- a/src/core/tsi/transport_security.h
+++ b/src/core/tsi/transport_security.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_TSI_TRANSPORT_SECURITY_H
-#define GRPC_INTERNAL_CORE_TSI_TRANSPORT_SECURITY_H
+#ifndef GRPC_CORE_TSI_TRANSPORT_SECURITY_H
+#define GRPC_CORE_TSI_TRANSPORT_SECURITY_H
#include "src/core/tsi/transport_security_interface.h"
@@ -45,67 +45,67 @@ extern int tsi_tracing_enabled;
/* Base for tsi_frame_protector implementations.
See transport_security_interface.h for documentation. */
typedef struct {
- tsi_result (*protect)(tsi_frame_protector* self,
- const unsigned char* unprotected_bytes,
- size_t* unprotected_bytes_size,
- unsigned char* protected_output_frames,
- size_t* protected_output_frames_size);
- tsi_result (*protect_flush)(tsi_frame_protector* self,
- unsigned char* protected_output_frames,
- size_t* protected_output_frames_size,
- size_t* still_pending_size);
- tsi_result (*unprotect)(tsi_frame_protector* self,
- const unsigned char* protected_frames_bytes,
- size_t* protected_frames_bytes_size,
- unsigned char* unprotected_bytes,
- size_t* unprotected_bytes_size);
- void (*destroy)(tsi_frame_protector* self);
+ tsi_result (*protect)(tsi_frame_protector *self,
+ const unsigned char *unprotected_bytes,
+ size_t *unprotected_bytes_size,
+ unsigned char *protected_output_frames,
+ size_t *protected_output_frames_size);
+ tsi_result (*protect_flush)(tsi_frame_protector *self,
+ unsigned char *protected_output_frames,
+ size_t *protected_output_frames_size,
+ size_t *still_pending_size);
+ tsi_result (*unprotect)(tsi_frame_protector *self,
+ const unsigned char *protected_frames_bytes,
+ size_t *protected_frames_bytes_size,
+ unsigned char *unprotected_bytes,
+ size_t *unprotected_bytes_size);
+ void (*destroy)(tsi_frame_protector *self);
} tsi_frame_protector_vtable;
struct tsi_frame_protector {
- const tsi_frame_protector_vtable* vtable;
+ const tsi_frame_protector_vtable *vtable;
};
/* Base for tsi_handshaker implementations.
See transport_security_interface.h for documentation. */
typedef struct {
- tsi_result (*get_bytes_to_send_to_peer)(tsi_handshaker* self,
- unsigned char* bytes,
- size_t* bytes_size);
- tsi_result (*process_bytes_from_peer)(tsi_handshaker* self,
- const unsigned char* bytes,
- size_t* bytes_size);
- tsi_result (*get_result)(tsi_handshaker* self);
- tsi_result (*extract_peer)(tsi_handshaker* self, tsi_peer* peer);
- tsi_result (*create_frame_protector)(tsi_handshaker* self,
- size_t* max_protected_frame_size,
- tsi_frame_protector** protector);
- void (*destroy)(tsi_handshaker* self);
+ tsi_result (*get_bytes_to_send_to_peer)(tsi_handshaker *self,
+ unsigned char *bytes,
+ size_t *bytes_size);
+ tsi_result (*process_bytes_from_peer)(tsi_handshaker *self,
+ const unsigned char *bytes,
+ size_t *bytes_size);
+ tsi_result (*get_result)(tsi_handshaker *self);
+ tsi_result (*extract_peer)(tsi_handshaker *self, tsi_peer *peer);
+ tsi_result (*create_frame_protector)(tsi_handshaker *self,
+ size_t *max_protected_frame_size,
+ tsi_frame_protector **protector);
+ void (*destroy)(tsi_handshaker *self);
} tsi_handshaker_vtable;
struct tsi_handshaker {
- const tsi_handshaker_vtable* vtable;
+ const tsi_handshaker_vtable *vtable;
int frame_protector_created;
};
/* Peer and property construction/destruction functions. */
-tsi_result tsi_construct_peer(size_t property_count, tsi_peer* peer);
+tsi_result tsi_construct_peer(size_t property_count, tsi_peer *peer);
tsi_peer_property tsi_init_peer_property(void);
-void tsi_peer_property_destruct(tsi_peer_property* property);
-tsi_result tsi_construct_string_peer_property(const char* name,
- const char* value,
+void tsi_peer_property_destruct(tsi_peer_property *property);
+tsi_result tsi_construct_string_peer_property(const char *name,
+ const char *value,
size_t value_length,
- tsi_peer_property* property);
+ tsi_peer_property *property);
tsi_result tsi_construct_allocated_string_peer_property(
- const char* name, size_t value_length, tsi_peer_property* property);
+ const char *name, size_t value_length, tsi_peer_property *property);
tsi_result tsi_construct_string_peer_property_from_cstring(
- const char* name, const char* value, tsi_peer_property* property);
+ const char *name, const char *value, tsi_peer_property *property);
/* Utils. */
-char* tsi_strdup(const char* src); /* Sadly, no strdup in C89. */
+char *tsi_strdup(const char *src); /* Sadly, no strdup in C89. */
#ifdef __cplusplus
}
#endif
-#endif /* GRPC_INTERNAL_CORE_TSI_TRANSPORT_SECURITY_H */
+#endif /* GRPC_CORE_TSI_TRANSPORT_SECURITY_H */
diff --git a/src/core/tsi/transport_security_interface.h b/src/core/tsi/transport_security_interface.h
index 936b0c25b0..08501802f5 100644
--- a/src/core/tsi/transport_security_interface.h
+++ b/src/core/tsi/transport_security_interface.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_TSI_TRANSPORT_SECURITY_INTERFACE_H
-#define GRPC_INTERNAL_CORE_TSI_TRANSPORT_SECURITY_INTERFACE_H
+#ifndef GRPC_CORE_TSI_TRANSPORT_SECURITY_INTERFACE_H
+#define GRPC_CORE_TSI_TRANSPORT_SECURITY_INTERFACE_H
#include <stdint.h>
#include <stdlib.h>
@@ -59,7 +59,7 @@ typedef enum {
TSI_OUT_OF_RESOURCES = 12
} tsi_result;
-const char* tsi_result_to_string(tsi_result result);
+const char *tsi_result_to_string(tsi_result result);
/* --- tsi tracing --- */
@@ -126,11 +126,11 @@ typedef struct tsi_frame_protector tsi_frame_protector;
if (result != TSI_OK) HandleError(result);
------------------------------------------------------------------------ */
-tsi_result tsi_frame_protector_protect(tsi_frame_protector* self,
- const unsigned char* unprotected_bytes,
- size_t* unprotected_bytes_size,
- unsigned char* protected_output_frames,
- size_t* protected_output_frames_size);
+tsi_result tsi_frame_protector_protect(tsi_frame_protector *self,
+ const unsigned char *unprotected_bytes,
+ size_t *unprotected_bytes_size,
+ unsigned char *protected_output_frames,
+ size_t *protected_output_frames_size);
/* Indicates that we need to flush the bytes buffered in the protector and get
the resulting frame.
@@ -141,8 +141,8 @@ tsi_result tsi_frame_protector_protect(tsi_frame_protector* self,
- still_pending_bytes is an output parameter indicating the number of bytes
that still need to be flushed from the protector.*/
tsi_result tsi_frame_protector_protect_flush(
- tsi_frame_protector* self, unsigned char* protected_output_frames,
- size_t* protected_output_frames_size, size_t* still_pending_size);
+ tsi_frame_protector *self, unsigned char *protected_output_frames,
+ size_t *protected_output_frames_size, size_t *still_pending_size);
/* Outputs unprotected bytes.
- protected_frames_bytes is an input only parameter and points to the
@@ -158,6 +158,8 @@ tsi_result tsi_frame_protector_protect_flush(
value is expected to be at most max_protected_frame_size minus overhead
which means that max_protected_frame_size is a safe bet. The output value
is the number of bytes actually written.
+ If *unprotected_bytes_size is unchanged, there may be more data remaining
+ to unprotect, and the caller should call this function again.
- This method returns TSI_OK in case of success. Success includes cases where
there is not enough data to output a frame in which case
@@ -165,12 +167,12 @@ tsi_result tsi_frame_protector_protect_flush(
needs to be read before new protected data can be processed in which case
protected_frames_size will be set to 0. */
tsi_result tsi_frame_protector_unprotect(
- tsi_frame_protector* self, const unsigned char* protected_frames_bytes,
- size_t* protected_frames_bytes_size, unsigned char* unprotected_bytes,
- size_t* unprotected_bytes_size);
+ tsi_frame_protector *self, const unsigned char *protected_frames_bytes,
+ size_t *protected_frames_bytes_size, unsigned char *unprotected_bytes,
+ size_t *unprotected_bytes_size);
/* Destroys the tsi_frame_protector object. */
-void tsi_frame_protector_destroy(tsi_frame_protector* self);
+void tsi_frame_protector_destroy(tsi_frame_protector *self);
/* --- tsi_peer objects ---
@@ -182,20 +184,20 @@ void tsi_frame_protector_destroy(tsi_frame_protector* self);
/* Property values may contain NULL characters just like C++ strings.
The length field gives the length of the string. */
typedef struct tsi_peer_property {
- char* name;
+ char *name;
struct {
- char* data;
+ char *data;
size_t length;
} value;
} tsi_peer_property;
typedef struct {
- tsi_peer_property* properties;
+ tsi_peer_property *properties;
size_t property_count;
} tsi_peer;
/* Destructs the tsi_peer object. */
-void tsi_peer_destruct(tsi_peer* self);
+void tsi_peer_destruct(tsi_peer *self);
/* --- tsi_handshaker objects ----
@@ -277,9 +279,9 @@ typedef struct tsi_handshaker tsi_handshaker;
needs to be called again to get all the bytes to send to the peer (there
was more data to write than the specified bytes_size). In case of a fatal
error in the handshake, another specific error code is returned. */
-tsi_result tsi_handshaker_get_bytes_to_send_to_peer(tsi_handshaker* self,
- unsigned char* bytes,
- size_t* bytes_size);
+tsi_result tsi_handshaker_get_bytes_to_send_to_peer(tsi_handshaker *self,
+ unsigned char *bytes,
+ size_t *bytes_size);
/* Processes bytes received from the peer.
- bytes is the buffer containing the data.
@@ -290,16 +292,16 @@ tsi_result tsi_handshaker_get_bytes_to_send_to_peer(tsi_handshaker* self,
needs to be called again to complete the data needed for processing. In
case of a fatal error in the handshake, another specific error code is
returned. */
-tsi_result tsi_handshaker_process_bytes_from_peer(tsi_handshaker* self,
- const unsigned char* bytes,
- size_t* bytes_size);
+tsi_result tsi_handshaker_process_bytes_from_peer(tsi_handshaker *self,
+ const unsigned char *bytes,
+ size_t *bytes_size);
/* Gets the result of the handshaker.
Returns TSI_OK if the hanshake completed successfully and there has been no
errors. Returns TSI_HANDSHAKE_IN_PROGRESS if the handshaker is not done yet
but no error has been encountered so far. Otherwise the handshaker failed
with the returned error. */
-tsi_result tsi_handshaker_get_result(tsi_handshaker* self);
+tsi_result tsi_handshaker_get_result(tsi_handshaker *self);
/* Returns 1 if the handshake is in progress, 0 otherwise. */
#define tsi_handshaker_is_in_progress(h) \
@@ -309,7 +311,7 @@ tsi_result tsi_handshaker_get_result(tsi_handshaker* self);
tsi_handshaker_is_in_progress returns 1, it returns TSI_OK otherwise
assuming the handshaker is not in a fatal error state.
The caller is responsible for destructing the peer. */
-tsi_result tsi_handshaker_extract_peer(tsi_handshaker* self, tsi_peer* peer);
+tsi_result tsi_handshaker_extract_peer(tsi_handshaker *self, tsi_peer *peer);
/* This method creates a tsi_frame_protector object after the handshake phase
is done. After this method has been called successfully, the only method
@@ -328,15 +330,15 @@ tsi_result tsi_handshaker_extract_peer(tsi_handshaker* self, tsi_peer* peer);
the handshaker is not in a fatal error state.
The caller is responsible for destroying the protector. */
tsi_result tsi_handshaker_create_frame_protector(
- tsi_handshaker* self, size_t* max_output_protected_frame_size,
- tsi_frame_protector** protector);
+ tsi_handshaker *self, size_t *max_output_protected_frame_size,
+ tsi_frame_protector **protector);
/* This method releases the tsi_handshaker object. After this method is called,
no other method can be called on the object. */
-void tsi_handshaker_destroy(tsi_handshaker* self);
+void tsi_handshaker_destroy(tsi_handshaker *self);
#ifdef __cplusplus
}
#endif
-#endif /* GRPC_INTERNAL_CORE_TSI_TRANSPORT_SECURITY_INTERFACE_H */
+#endif /* GRPC_CORE_TSI_TRANSPORT_SECURITY_INTERFACE_H */