aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core
diff options
context:
space:
mode:
Diffstat (limited to 'src/core')
-rw-r--r--src/core/README.md6
-rw-r--r--src/core/ext/census/grpc_filter.c4
-rw-r--r--src/core/ext/client_channel/README.md (renamed from src/core/ext/client_config/README.md)21
-rw-r--r--src/core/ext/client_channel/channel_connectivity.c (renamed from src/core/ext/client_config/channel_connectivity.c)2
-rw-r--r--src/core/ext/client_channel/client_channel.c (renamed from src/core/ext/client_config/client_channel.c)319
-rw-r--r--src/core/ext/client_channel/client_channel.h (renamed from src/core/ext/client_config/client_channel.h)10
-rw-r--r--src/core/ext/client_channel/client_channel_factory.c (renamed from src/core/ext/client_config/client_channel_factory.c)6
-rw-r--r--src/core/ext/client_channel/client_channel_factory.h (renamed from src/core/ext/client_config/client_channel_factory.h)17
-rw-r--r--src/core/ext/client_channel/client_channel_plugin.c (renamed from src/core/ext/client_config/client_config_plugin.c)12
-rw-r--r--src/core/ext/client_channel/connector.c (renamed from src/core/ext/client_config/connector.c)2
-rw-r--r--src/core/ext/client_channel/connector.h (renamed from src/core/ext/client_config/connector.h)10
-rw-r--r--src/core/ext/client_channel/default_initial_connect_string.c (renamed from src/core/ext/client_config/default_initial_connect_string.c)5
-rw-r--r--src/core/ext/client_channel/http_connect_handshaker.c (renamed from src/core/ext/client_config/http_connect_handshaker.c)4
-rw-r--r--src/core/ext/client_channel/http_connect_handshaker.h (renamed from src/core/ext/client_config/http_connect_handshaker.h)6
-rw-r--r--src/core/ext/client_channel/initial_connect_string.c (renamed from src/core/ext/client_config/initial_connect_string.c)11
-rw-r--r--src/core/ext/client_channel/initial_connect_string.h (renamed from src/core/ext/client_config/initial_connect_string.h)16
-rw-r--r--src/core/ext/client_channel/lb_policy.c (renamed from src/core/ext/client_config/lb_policy.c)2
-rw-r--r--src/core/ext/client_channel/lb_policy.h (renamed from src/core/ext/client_config/lb_policy.h)19
-rw-r--r--src/core/ext/client_channel/lb_policy_factory.c (renamed from src/core/ext/client_config/lb_policy_factory.c)80
-rw-r--r--src/core/ext/client_channel/lb_policy_factory.h (renamed from src/core/ext/client_config/lb_policy_factory.h)50
-rw-r--r--src/core/ext/client_channel/lb_policy_registry.c (renamed from src/core/ext/client_config/lb_policy_registry.c)2
-rw-r--r--src/core/ext/client_channel/lb_policy_registry.h (renamed from src/core/ext/client_config/lb_policy_registry.h)8
-rw-r--r--src/core/ext/client_channel/parse_address.c (renamed from src/core/ext/client_config/parse_address.c)35
-rw-r--r--src/core/ext/client_channel/parse_address.h (renamed from src/core/ext/client_config/parse_address.h)18
-rw-r--r--src/core/ext/client_channel/resolver.c (renamed from src/core/ext/client_config/resolver.c)5
-rw-r--r--src/core/ext/client_channel/resolver.h (renamed from src/core/ext/client_config/resolver.h)26
-rw-r--r--src/core/ext/client_channel/resolver_factory.c (renamed from src/core/ext/client_config/resolver_factory.c)2
-rw-r--r--src/core/ext/client_channel/resolver_factory.h (renamed from src/core/ext/client_config/resolver_factory.h)19
-rw-r--r--src/core/ext/client_channel/resolver_registry.c (renamed from src/core/ext/client_config/resolver_registry.c)16
-rw-r--r--src/core/ext/client_channel/resolver_registry.h (renamed from src/core/ext/client_config/resolver_registry.h)15
-rw-r--r--src/core/ext/client_channel/subchannel.c (renamed from src/core/ext/client_config/subchannel.c)34
-rw-r--r--src/core/ext/client_channel/subchannel.h (renamed from src/core/ext/client_config/subchannel.h)18
-rw-r--r--src/core/ext/client_channel/subchannel_index.c (renamed from src/core/ext/client_config/subchannel_index.c)22
-rw-r--r--src/core/ext/client_channel/subchannel_index.h (renamed from src/core/ext/client_config/subchannel_index.h)14
-rw-r--r--src/core/ext/client_channel/uri_parser.c (renamed from src/core/ext/client_config/uri_parser.c)2
-rw-r--r--src/core/ext/client_channel/uri_parser.h (renamed from src/core/ext/client_config/uri_parser.h)6
-rw-r--r--src/core/ext/client_config/resolver_result.c94
-rw-r--r--src/core/ext/client_config/resolver_result.h74
-rw-r--r--src/core/ext/lb_policy/grpclb/grpclb.c954
-rw-r--r--src/core/ext/lb_policy/grpclb/grpclb.h2
-rw-r--r--src/core/ext/lb_policy/grpclb/load_balancer_api.h2
-rw-r--r--src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h4
-rw-r--r--src/core/ext/lb_policy/pick_first/pick_first.c51
-rw-r--r--src/core/ext/lb_policy/round_robin/round_robin.c99
-rw-r--r--src/core/ext/load_reporting/load_reporting.h20
-rw-r--r--src/core/ext/load_reporting/load_reporting_filter.c4
-rw-r--r--src/core/ext/resolver/dns/native/dns_resolver.c56
-rw-r--r--src/core/ext/resolver/sockaddr/sockaddr_resolver.c56
-rw-r--r--src/core/ext/transport/chttp2/alpn/alpn.c2
-rw-r--r--src/core/ext/transport/chttp2/client/insecure/channel_create.c78
-rw-r--r--src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c6
-rw-r--r--src/core/ext/transport/chttp2/client/secure/secure_channel_create.c94
-rw-r--r--src/core/ext/transport/chttp2/server/insecure/server_chttp2.c8
-rw-r--r--src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c8
-rw-r--r--src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c201
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_plugin.c3
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.c2356
-rw-r--r--src/core/ext/transport/chttp2/transport/frame.h4
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_data.c68
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_data.h9
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_goaway.c18
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_goaway.h9
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_ping.c13
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_ping.h8
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_rst_stream.c38
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_rst_stream.h9
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_settings.c23
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_settings.h9
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_window_update.c34
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_window_update.h5
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_parser.c492
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_parser.h17
-rw-r--r--src/core/ext/transport/chttp2/transport/internal.h674
-rw-r--r--src/core/ext/transport/chttp2/transport/parsing.c910
-rw-r--r--src/core/ext/transport/chttp2/transport/stream_lists.c351
-rw-r--r--src/core/ext/transport/chttp2/transport/stream_map.c47
-rw-r--r--src/core/ext/transport/chttp2/transport/stream_map.h7
-rw-r--r--src/core/ext/transport/chttp2/transport/writing.c466
-rw-r--r--src/core/lib/channel/channel_args.c67
-rw-r--r--src/core/lib/channel/channel_args.h15
-rw-r--r--src/core/lib/channel/channel_stack.c4
-rw-r--r--src/core/lib/channel/channel_stack.h4
-rw-r--r--src/core/lib/channel/deadline_filter.c76
-rw-r--r--src/core/lib/channel/deadline_filter.h33
-rw-r--r--src/core/lib/channel/handshaker.c2
-rw-r--r--src/core/lib/channel/http_server_filter.c7
-rw-r--r--src/core/lib/channel/message_size_filter.c94
-rw-r--r--src/core/lib/http/httpcli.c31
-rw-r--r--src/core/lib/http/httpcli.h2
-rw-r--r--src/core/lib/iomgr/closure.c18
-rw-r--r--src/core/lib/iomgr/closure.h11
-rw-r--r--src/core/lib/iomgr/combiner.c372
-rw-r--r--src/core/lib/iomgr/combiner.h15
-rw-r--r--src/core/lib/iomgr/endpoint.c4
-rw-r--r--src/core/lib/iomgr/endpoint.h4
-rw-r--r--src/core/lib/iomgr/endpoint_pair.h5
-rw-r--r--src/core/lib/iomgr/endpoint_pair_posix.c17
-rw-r--r--src/core/lib/iomgr/endpoint_pair_uv.c (renamed from src/core/lib/security/credentials/google_default/credentials_windows.c)35
-rw-r--r--src/core/lib/iomgr/endpoint_pair_windows.c13
-rw-r--r--src/core/lib/iomgr/error.c18
-rw-r--r--src/core/lib/iomgr/error.h12
-rw-r--r--src/core/lib/iomgr/ev_epoll_linux.c251
-rw-r--r--src/core/lib/iomgr/ev_epoll_linux.h5
-rw-r--r--src/core/lib/iomgr/ev_poll_and_epoll_posix.c46
-rw-r--r--src/core/lib/iomgr/ev_poll_posix.c284
-rw-r--r--src/core/lib/iomgr/ev_poll_posix.h1
-rw-r--r--src/core/lib/iomgr/ev_posix.c30
-rw-r--r--src/core/lib/iomgr/ev_posix.h13
-rw-r--r--src/core/lib/iomgr/exec_ctx.c61
-rw-r--r--src/core/lib/iomgr/exec_ctx.h24
-rw-r--r--src/core/lib/iomgr/iocp_windows.c6
-rw-r--r--src/core/lib/iomgr/iomgr.c11
-rw-r--r--src/core/lib/iomgr/iomgr.h2
-rw-r--r--src/core/lib/iomgr/iomgr_posix.c4
-rw-r--r--src/core/lib/iomgr/iomgr_uv.c49
-rw-r--r--src/core/lib/iomgr/iomgr_windows.c4
-rw-r--r--src/core/lib/iomgr/pollset_set_uv.c62
-rw-r--r--src/core/lib/iomgr/pollset_set_windows.c6
-rw-r--r--src/core/lib/iomgr/pollset_uv.c142
-rw-r--r--src/core/lib/iomgr/pollset_uv.h42
-rw-r--r--src/core/lib/iomgr/pollset_windows.c6
-rw-r--r--src/core/lib/iomgr/port.h129
-rw-r--r--src/core/lib/iomgr/resolve_address.h1
-rw-r--r--src/core/lib/iomgr/resolve_address_posix.c8
-rw-r--r--src/core/lib/iomgr/resolve_address_uv.c231
-rw-r--r--src/core/lib/iomgr/resolve_address_windows.c10
-rw-r--r--src/core/lib/iomgr/resource_quota.c724
-rw-r--r--src/core/lib/iomgr/resource_quota.h229
-rw-r--r--src/core/lib/iomgr/sockaddr.h12
-rw-r--r--src/core/lib/iomgr/sockaddr_utils.c99
-rw-r--r--src/core/lib/iomgr/sockaddr_utils.h28
-rw-r--r--src/core/lib/iomgr/socket_utils.h (renamed from src/core/lib/iomgr/workqueue_posix.h)31
-rw-r--r--src/core/lib/iomgr/socket_utils_common_posix.c32
-rw-r--r--src/core/lib/iomgr/socket_utils_linux.c16
-rw-r--r--src/core/lib/iomgr/socket_utils_posix.c17
-rw-r--r--src/core/lib/iomgr/socket_utils_posix.h10
-rw-r--r--src/core/lib/iomgr/socket_utils_uv.c49
-rw-r--r--src/core/lib/iomgr/socket_utils_windows.c48
-rw-r--r--src/core/lib/iomgr/socket_windows.c6
-rw-r--r--src/core/lib/iomgr/tcp_client.h22
-rw-r--r--src/core/lib/iomgr/tcp_client_posix.c91
-rw-r--r--src/core/lib/iomgr/tcp_client_posix.h45
-rw-r--r--src/core/lib/iomgr/tcp_client_uv.c173
-rw-r--r--src/core/lib/iomgr/tcp_client_windows.c54
-rw-r--r--src/core/lib/iomgr/tcp_posix.c101
-rw-r--r--src/core/lib/iomgr/tcp_posix.h4
-rw-r--r--src/core/lib/iomgr/tcp_server.h13
-rw-r--r--src/core/lib/iomgr/tcp_server_posix.c171
-rw-r--r--src/core/lib/iomgr/tcp_server_uv.c383
-rw-r--r--src/core/lib/iomgr/tcp_server_windows.c123
-rw-r--r--src/core/lib/iomgr/tcp_uv.c379
-rw-r--r--src/core/lib/iomgr/tcp_uv.h59
-rw-r--r--src/core/lib/iomgr/tcp_windows.c41
-rw-r--r--src/core/lib/iomgr/tcp_windows.h4
-rw-r--r--src/core/lib/iomgr/timer.h34
-rw-r--r--src/core/lib/iomgr/timer_generic.c (renamed from src/core/lib/iomgr/timer.c)6
-rw-r--r--src/core/lib/iomgr/timer_generic.h49
-rw-r--r--src/core/lib/iomgr/timer_heap.c6
-rw-r--r--src/core/lib/iomgr/timer_uv.c99
-rw-r--r--src/core/lib/iomgr/timer_uv.h47
-rw-r--r--src/core/lib/iomgr/udp_server.c202
-rw-r--r--src/core/lib/iomgr/udp_server.h8
-rw-r--r--src/core/lib/iomgr/unix_sockets_posix.c20
-rw-r--r--src/core/lib/iomgr/unix_sockets_posix.h11
-rw-r--r--src/core/lib/iomgr/unix_sockets_posix_noop.c8
-rw-r--r--src/core/lib/iomgr/wakeup_fd_cv.c118
-rw-r--r--src/core/lib/iomgr/wakeup_fd_cv.h80
-rw-r--r--src/core/lib/iomgr/wakeup_fd_eventfd.c6
-rw-r--r--src/core/lib/iomgr/wakeup_fd_nospecial.c6
-rw-r--r--src/core/lib/iomgr/wakeup_fd_pipe.c16
-rw-r--r--src/core/lib/iomgr/wakeup_fd_posix.c39
-rw-r--r--src/core/lib/iomgr/wakeup_fd_posix.h5
-rw-r--r--src/core/lib/iomgr/workqueue.h17
-rw-r--r--src/core/lib/iomgr/workqueue_posix.c196
-rw-r--r--src/core/lib/iomgr/workqueue_uv.c66
-rw-r--r--src/core/lib/iomgr/workqueue_uv.h37
-rw-r--r--src/core/lib/iomgr/workqueue_windows.c10
-rw-r--r--src/core/lib/profiling/basic_timers.c11
-rw-r--r--src/core/lib/profiling/timers.h2
-rw-r--r--src/core/lib/security/credentials/composite/composite_credentials.c11
-rw-r--r--src/core/lib/security/credentials/composite/composite_credentials.h4
-rw-r--r--src/core/lib/security/credentials/credentials.c12
-rw-r--r--src/core/lib/security/credentials/credentials.h10
-rw-r--r--src/core/lib/security/credentials/fake/fake_credentials.c2
-rw-r--r--src/core/lib/security/credentials/google_default/credentials_generic.c (renamed from src/core/lib/security/credentials/google_default/credentials_posix.c)21
-rw-r--r--src/core/lib/security/credentials/google_default/google_default_credentials.c5
-rw-r--r--src/core/lib/security/credentials/google_default/google_default_credentials.h14
-rw-r--r--src/core/lib/security/credentials/jwt/jwt_verifier.c16
-rw-r--r--src/core/lib/security/credentials/oauth2/oauth2_credentials.c20
-rw-r--r--src/core/lib/security/credentials/ssl/ssl_credentials.c2
-rw-r--r--src/core/lib/security/transport/secure_endpoint.c7
-rw-r--r--src/core/lib/security/transport/security_connector.c4
-rw-r--r--src/core/lib/support/log.c9
-rw-r--r--src/core/lib/support/string.c11
-rw-r--r--src/core/lib/support/string.h4
-rw-r--r--src/core/lib/support/thd.c2
-rw-r--r--src/core/lib/surface/call.c136
-rw-r--r--src/core/lib/surface/call.h32
-rw-r--r--src/core/lib/surface/channel.c18
-rw-r--r--src/core/lib/surface/completion_queue.c167
-rw-r--r--src/core/lib/surface/completion_queue.h3
-rw-r--r--src/core/lib/surface/init.c6
-rw-r--r--src/core/lib/surface/server.c35
-rw-r--r--src/core/lib/surface/server.h3
-rw-r--r--src/core/lib/transport/connectivity_state.c3
-rw-r--r--src/core/lib/transport/mdstr_hash_table.c157
-rw-r--r--src/core/lib/transport/mdstr_hash_table.h92
-rw-r--r--src/core/lib/transport/method_config.c340
-rw-r--r--src/core/lib/transport/method_config.h136
-rw-r--r--src/core/lib/transport/static_metadata.c4
-rw-r--r--src/core/lib/transport/static_metadata.h21
-rw-r--r--src/core/lib/transport/transport.c35
-rw-r--r--src/core/lib/transport/transport.h9
-rw-r--r--src/core/lib/transport/transport_op_string.c105
-rw-r--r--src/core/lib/tsi/ssl_transport_security.c4
-rw-r--r--src/core/plugin_registry/grpc_cronet_plugin_registry.c8
-rw-r--r--src/core/plugin_registry/grpc_plugin_registry.c8
-rw-r--r--src/core/plugin_registry/grpc_unsecure_plugin_registry.c8
218 files changed, 10356 insertions, 5705 deletions
diff --git a/src/core/README.md b/src/core/README.md
index 0d8c0d5bd9..44c6f24772 100644
--- a/src/core/README.md
+++ b/src/core/README.md
@@ -1,8 +1,4 @@
#Overview
-This directory contains source code for shared C library. Libraries in other languages in this repository (C++, Ruby,
+This directory contains source code for C library (a.k.a the *gRPC C core*) that provides all gRPC's core functionality through a low level API. Libraries in other languages in this repository (C++, Ruby,
Python, PHP, NodeJS, Objective-C) are layered on top of this library.
-
-#Status
-
-Beta
diff --git a/src/core/ext/census/grpc_filter.c b/src/core/ext/census/grpc_filter.c
index 9dacc17eb4..a4cf6f37bd 100644
--- a/src/core/ext/census/grpc_filter.c
+++ b/src/core/ext/census/grpc_filter.c
@@ -133,7 +133,7 @@ static grpc_error *client_init_call_elem(grpc_exec_ctx *exec_ctx,
call_data *d = elem->call_data;
GPR_ASSERT(d != NULL);
memset(d, 0, sizeof(*d));
- d->start_ts = gpr_now(GPR_CLOCK_REALTIME);
+ d->start_ts = args->start_time;
return GRPC_ERROR_NONE;
}
@@ -152,7 +152,7 @@ static grpc_error *server_init_call_elem(grpc_exec_ctx *exec_ctx,
call_data *d = elem->call_data;
GPR_ASSERT(d != NULL);
memset(d, 0, sizeof(*d));
- d->start_ts = gpr_now(GPR_CLOCK_REALTIME);
+ d->start_ts = args->start_time;
/* TODO(hongyu): call census_tracing_start_op here. */
grpc_closure_init(&d->finish_recv, server_on_done_recv, elem);
return GRPC_ERROR_NONE;
diff --git a/src/core/ext/client_config/README.md b/src/core/ext/client_channel/README.md
index eda01e3e71..7c209db12e 100644
--- a/src/core/ext/client_config/README.md
+++ b/src/core/ext/client_channel/README.md
@@ -5,28 +5,27 @@ This library provides high level configuration machinery to construct client
channels and load balance between them.
Each grpc_channel is created with a grpc_resolver. It is the resolver's duty
-to resolve a name into configuration data for the channel. Such configuration
-data might include:
+to resolve a name into a set of arguments for the channel. Such arguments
+might include:
- a list of (ip, port) addresses to connect to
- a load balancing policy to decide which server to send a request to
- a set of filters to mutate outgoing requests (say, by adding metadata)
-The resolver provides this data as a stream of grpc_resolver_result objects to
-the channel. We represent configuration as a stream so that it can be changed
-by the resolver during execution, by reacting to external events (such as a
-new configuration file being pushed to some store).
+The resolver provides this data as a stream of grpc_channel_args objects to
+the channel. We represent arguments as a stream so that they can be changed
+by the resolver during execution, by reacting to external events (such as
+new service configuration data being pushed to some store).
Load Balancing
--------------
-Load balancing configuration is provided by a grpc_lb_policy object, stored as
-part of grpc_resolver_result.
+Load balancing configuration is provided by a grpc_lb_policy object.
-The primary job of the load balancing policies is to pick a target server given only the
-initial metadata for a request. It does this by providing a grpc_subchannel
-object to the owning channel.
+The primary job of the load balancing policies is to pick a target server
+given only the initial metadata for a request. It does this by providing
+a grpc_subchannel object to the owning channel.
Sub-Channels
diff --git a/src/core/ext/client_config/channel_connectivity.c b/src/core/ext/client_channel/channel_connectivity.c
index ce3c13a4ee..9797e66564 100644
--- a/src/core/ext/client_config/channel_connectivity.c
+++ b/src/core/ext/client_channel/channel_connectivity.c
@@ -36,7 +36,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include "src/core/ext/client_config/client_channel.h"
+#include "src/core/ext/client_channel/client_channel.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/completion_queue.h"
diff --git a/src/core/ext/client_config/client_channel.c b/src/core/ext/client_channel/client_channel.c
index a6056c3e8d..ff773ac334 100644
--- a/src/core/ext/client_config/client_channel.c
+++ b/src/core/ext/client_channel/client_channel.c
@@ -31,7 +31,7 @@
*
*/
-#include "src/core/ext/client_config/client_channel.h"
+#include "src/core/ext/client_channel/client_channel.h"
#include <stdbool.h>
#include <stdio.h>
@@ -42,8 +42,8 @@
#include <grpc/support/sync.h>
#include <grpc/support/useful.h>
-#include "src/core/ext/client_config/lb_policy_registry.h"
-#include "src/core/ext/client_config/subchannel.h"
+#include "src/core/ext/client_channel/lb_policy_registry.h"
+#include "src/core/ext/client_channel/subchannel.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
#include "src/core/lib/channel/deadline_filter.h"
@@ -53,10 +53,62 @@
#include "src/core/lib/support/string.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/transport/connectivity_state.h"
+#include "src/core/lib/transport/metadata.h"
+#include "src/core/lib/transport/metadata_batch.h"
+#include "src/core/lib/transport/method_config.h"
+#include "src/core/lib/transport/static_metadata.h"
/* Client channel implementation */
/*************************************************************************
+ * METHOD-CONFIG TABLE
+ */
+
+typedef enum {
+ WAIT_FOR_READY_UNSET,
+ WAIT_FOR_READY_FALSE,
+ WAIT_FOR_READY_TRUE
+} wait_for_ready_value;
+
+typedef struct method_parameters {
+ gpr_timespec timeout;
+ wait_for_ready_value wait_for_ready;
+} method_parameters;
+
+static void *method_parameters_copy(void *value) {
+ void *new_value = gpr_malloc(sizeof(method_parameters));
+ memcpy(new_value, value, sizeof(method_parameters));
+ return new_value;
+}
+
+static int method_parameters_cmp(void *value1, void *value2) {
+ const method_parameters *v1 = value1;
+ const method_parameters *v2 = value2;
+ const int retval = gpr_time_cmp(v1->timeout, v2->timeout);
+ if (retval != 0) return retval;
+ if (v1->wait_for_ready > v2->wait_for_ready) return 1;
+ if (v1->wait_for_ready < v2->wait_for_ready) return -1;
+ return 0;
+}
+
+static const grpc_mdstr_hash_table_vtable method_parameters_vtable = {
+ gpr_free, method_parameters_copy, method_parameters_cmp};
+
+static void *method_config_convert_value(
+ const grpc_method_config *method_config) {
+ method_parameters *value = gpr_malloc(sizeof(method_parameters));
+ const gpr_timespec *timeout = grpc_method_config_get_timeout(method_config);
+ value->timeout = timeout != NULL ? *timeout : gpr_time_0(GPR_TIMESPAN);
+ const bool *wait_for_ready =
+ grpc_method_config_get_wait_for_ready(method_config);
+ value->wait_for_ready =
+ wait_for_ready == NULL
+ ? WAIT_FOR_READY_UNSET
+ : (wait_for_ready ? WAIT_FOR_READY_TRUE : WAIT_FOR_READY_FALSE);
+ return value;
+}
+
+/*************************************************************************
* CHANNEL-WIDE FUNCTIONS
*/
@@ -68,13 +120,14 @@ typedef struct client_channel_channel_data {
/** client channel factory */
grpc_client_channel_factory *client_channel_factory;
- /** mutex protecting client configuration, including all
- variables below in this data structure */
+ /** mutex protecting all variables below in this data structure */
gpr_mu mu;
- /** currently active load balancer - guarded by mu */
+ /** currently active load balancer */
grpc_lb_policy *lb_policy;
- /** incoming resolver result - set by resolver.next(), guarded by mu */
- grpc_resolver_result *resolver_result;
+ /** maps method names to method_parameters structs */
+ grpc_mdstr_hash_table *method_params_table;
+ /** incoming resolver result - set by resolver.next() */
+ grpc_channel_args *resolver_result;
/** a list of closures that are all waiting for config to come in */
grpc_closure_list waiting_for_config_closures;
/** resolver callback */
@@ -172,41 +225,49 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
channel_data *chand = arg;
grpc_lb_policy *lb_policy = NULL;
grpc_lb_policy *old_lb_policy;
+ grpc_mdstr_hash_table *method_params_table = NULL;
grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
bool exit_idle = false;
grpc_error *state_error = GRPC_ERROR_CREATE("No load balancing policy");
if (chand->resolver_result != NULL) {
grpc_lb_policy_args lb_policy_args;
- lb_policy_args.server_name =
- grpc_resolver_result_get_server_name(chand->resolver_result);
- lb_policy_args.addresses =
- grpc_resolver_result_get_addresses(chand->resolver_result);
- lb_policy_args.additional_args =
- grpc_resolver_result_get_lb_policy_args(chand->resolver_result);
+ lb_policy_args.args = chand->resolver_result;
lb_policy_args.client_channel_factory = chand->client_channel_factory;
+ // Find LB policy name.
+ const char *lb_policy_name = NULL;
+ const grpc_arg *channel_arg =
+ grpc_channel_args_find(lb_policy_args.args, GRPC_ARG_LB_POLICY_NAME);
+ if (channel_arg != NULL) {
+ GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
+ lb_policy_name = channel_arg->value.string;
+ }
// Special case: If all of the addresses are balancer addresses,
// assume that we should use the grpclb policy, regardless of what the
// resolver actually specified.
- const char *lb_policy_name =
- grpc_resolver_result_get_lb_policy_name(chand->resolver_result);
- bool found_backend_address = false;
- for (size_t i = 0; i < lb_policy_args.addresses->num_addresses; ++i) {
- if (!lb_policy_args.addresses->addresses[i].is_balancer) {
- found_backend_address = true;
- break;
+ channel_arg =
+ grpc_channel_args_find(lb_policy_args.args, GRPC_ARG_LB_ADDRESSES);
+ if (channel_arg != NULL) {
+ GPR_ASSERT(channel_arg->type == GRPC_ARG_POINTER);
+ grpc_lb_addresses *addresses = channel_arg->value.pointer.p;
+ bool found_backend_address = false;
+ for (size_t i = 0; i < addresses->num_addresses; ++i) {
+ if (!addresses->addresses[i].is_balancer) {
+ found_backend_address = true;
+ break;
+ }
}
- }
- if (!found_backend_address) {
- if (lb_policy_name != NULL && strcmp(lb_policy_name, "grpclb") != 0) {
- gpr_log(GPR_INFO,
- "resolver requested LB policy %s but provided only balancer "
- "addresses, no backend addresses -- forcing use of grpclb LB "
- "policy",
- (lb_policy_name == NULL ? "(none)" : lb_policy_name));
+ if (!found_backend_address) {
+ if (lb_policy_name != NULL && strcmp(lb_policy_name, "grpclb") != 0) {
+ gpr_log(GPR_INFO,
+ "resolver requested LB policy %s but provided only balancer "
+ "addresses, no backend addresses -- forcing use of grpclb LB "
+ "policy",
+ lb_policy_name);
+ }
+ lb_policy_name = "grpclb";
}
- lb_policy_name = "grpclb";
}
// Use pick_first if nothing was specified and we didn't select grpclb
// above.
@@ -220,7 +281,15 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
state =
grpc_lb_policy_check_connectivity(exec_ctx, lb_policy, &state_error);
}
- grpc_resolver_result_unref(exec_ctx, chand->resolver_result);
+ channel_arg =
+ grpc_channel_args_find(lb_policy_args.args, GRPC_ARG_SERVICE_CONFIG);
+ if (channel_arg != NULL) {
+ GPR_ASSERT(channel_arg->type == GRPC_ARG_POINTER);
+ method_params_table = grpc_method_config_table_convert(
+ (grpc_method_config_table *)channel_arg->value.pointer.p,
+ method_config_convert_value, &method_parameters_vtable);
+ }
+ grpc_channel_args_destroy(chand->resolver_result);
chand->resolver_result = NULL;
}
@@ -232,6 +301,10 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
gpr_mu_lock(&chand->mu);
old_lb_policy = chand->lb_policy;
chand->lb_policy = lb_policy;
+ if (chand->method_params_table != NULL) {
+ grpc_mdstr_hash_table_unref(chand->method_params_table);
+ }
+ chand->method_params_table = method_params_table;
if (lb_policy != NULL) {
grpc_exec_ctx_enqueue_list(exec_ctx, &chand->waiting_for_config_closures,
NULL);
@@ -392,6 +465,9 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
chand->interested_parties);
GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
}
+ if (chand->method_params_table != NULL) {
+ grpc_mdstr_hash_table_unref(chand->method_params_table);
+ }
grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker);
grpc_pollset_set_destroy(chand->interested_parties);
gpr_mu_destroy(&chand->mu);
@@ -424,7 +500,12 @@ typedef struct client_channel_call_data {
// stack and each has its own mutex. If/when we have time, find a way
// to avoid this without breaking the grpc_deadline_state abstraction.
grpc_deadline_state deadline_state;
+
+ grpc_mdstr *path; // Request path.
+ gpr_timespec call_start_time;
gpr_timespec deadline;
+ wait_for_ready_value wait_for_ready_from_service_config;
+ grpc_closure read_service_config;
grpc_error *cancel_error;
@@ -513,10 +594,14 @@ static void retry_waiting_locked(grpc_exec_ctx *exec_ctx, call_data *calld) {
static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- call_data *calld = arg;
+ grpc_call_element *elem = arg;
+ call_data *calld = elem->call_data;
+ channel_data *chand = elem->channel_data;
gpr_mu_lock(&calld->mu);
GPR_ASSERT(calld->creation_phase ==
GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL);
+ grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent,
+ chand->interested_parties);
calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
if (calld->connected_subchannel == NULL) {
gpr_atm_no_barrier_store(&calld->subchannel_call, 1);
@@ -528,10 +613,11 @@ static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_ERROR_CREATE_REFERENCING(
"Cancelled before creating subchannel", &error, 1));
} else {
+ /* Create call on subchannel. */
grpc_subchannel_call *subchannel_call = NULL;
grpc_error *new_error = grpc_connected_subchannel_create_call(
- exec_ctx, calld->connected_subchannel, calld->pollent, calld->deadline,
- &subchannel_call);
+ exec_ctx, calld->connected_subchannel, calld->pollent, calld->path,
+ calld->deadline, &subchannel_call);
if (new_error != GRPC_ERROR_NONE) {
new_error = grpc_error_add_child(new_error, error);
subchannel_call = CANCELLED_CALL;
@@ -564,6 +650,9 @@ typedef struct {
grpc_closure closure;
} continue_picking_args;
+/** Return true if subchannel is available immediately (in which case on_ready
+ should not be called), or false otherwise (in which case on_ready should be
+ called when the subchannel is available). */
static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_metadata_batch *initial_metadata,
uint32_t initial_metadata_flags,
@@ -577,11 +666,15 @@ static void continue_picking(grpc_exec_ctx *exec_ctx, void *arg,
/* cancelled, do nothing */
} else if (error != GRPC_ERROR_NONE) {
grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_REF(error), NULL);
- } else if (pick_subchannel(exec_ctx, cpa->elem, cpa->initial_metadata,
- cpa->initial_metadata_flags,
- cpa->connected_subchannel, cpa->on_ready,
- GRPC_ERROR_NONE)) {
- grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE, NULL);
+ } else {
+ call_data *calld = cpa->elem->call_data;
+ gpr_mu_lock(&calld->mu);
+ if (pick_subchannel(exec_ctx, cpa->elem, cpa->initial_metadata,
+ cpa->initial_metadata_flags, cpa->connected_subchannel,
+ cpa->on_ready, GRPC_ERROR_NONE)) {
+ grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE, NULL);
+ }
+ gpr_mu_unlock(&calld->mu);
}
gpr_free(cpa);
}
@@ -624,18 +717,33 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
GPR_ASSERT(error == GRPC_ERROR_NONE);
if (chand->lb_policy != NULL) {
grpc_lb_policy *lb_policy = chand->lb_policy;
- int r;
GRPC_LB_POLICY_REF(lb_policy, "pick_subchannel");
gpr_mu_unlock(&chand->mu);
+ // If the application explicitly set wait_for_ready, use that.
+ // Otherwise, if the service config specified a value for this
+ // method, use that.
+ const bool wait_for_ready_set_from_api =
+ initial_metadata_flags &
+ GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET;
+ const bool wait_for_ready_set_from_service_config =
+ calld->wait_for_ready_from_service_config != WAIT_FOR_READY_UNSET;
+ if (!wait_for_ready_set_from_api &&
+ wait_for_ready_set_from_service_config) {
+ if (calld->wait_for_ready_from_service_config == WAIT_FOR_READY_TRUE) {
+ initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
+ } else {
+ initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
+ }
+ }
// TODO(dgq): make this deadline configurable somehow.
const grpc_lb_policy_pick_args inputs = {
- calld->pollent, initial_metadata, initial_metadata_flags,
- &calld->lb_token_mdelem, gpr_inf_future(GPR_CLOCK_MONOTONIC)};
- r = grpc_lb_policy_pick(exec_ctx, lb_policy, &inputs, connected_subchannel,
- NULL, on_ready);
+ initial_metadata, initial_metadata_flags, &calld->lb_token_mdelem,
+ gpr_inf_future(GPR_CLOCK_MONOTONIC)};
+ const bool result = grpc_lb_policy_pick(
+ exec_ctx, lb_policy, &inputs, connected_subchannel, NULL, on_ready);
GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "pick_subchannel");
GPR_TIMER_END("pick_subchannel", 0);
- return r;
+ return result;
}
if (chand->resolver != NULL && !chand->started_resolving) {
chand->started_resolving = true;
@@ -672,6 +780,7 @@ static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
call_data *calld = elem->call_data;
+ channel_data *chand = elem->channel_data;
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
grpc_deadline_state_client_start_transport_stream_op(exec_ctx, elem, op);
/* try to (atomically) get the call */
@@ -739,14 +848,20 @@ retry:
calld->connected_subchannel == NULL &&
op->send_initial_metadata != NULL) {
calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL;
- grpc_closure_init(&calld->next_step, subchannel_ready, calld);
+ grpc_closure_init(&calld->next_step, subchannel_ready, elem);
GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel");
+ /* If a subchannel is not available immediately, the polling entity from
+ call_data should be provided to channel_data's interested_parties, so
+ that IO of the lb_policy and resolver could be done under it. */
if (pick_subchannel(exec_ctx, elem, op->send_initial_metadata,
op->send_initial_metadata_flags,
&calld->connected_subchannel, &calld->next_step,
GRPC_ERROR_NONE)) {
calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
+ } else {
+ grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent,
+ chand->interested_parties);
}
}
/* if we've got a subchannel, then let's ask it to create a call */
@@ -754,8 +869,8 @@ retry:
calld->connected_subchannel != NULL) {
grpc_subchannel_call *subchannel_call = NULL;
grpc_error *error = grpc_connected_subchannel_create_call(
- exec_ctx, calld->connected_subchannel, calld->pollent, calld->deadline,
- &subchannel_call);
+ exec_ctx, calld->connected_subchannel, calld->pollent, calld->path,
+ calld->deadline, &subchannel_call);
if (error != GRPC_ERROR_NONE) {
subchannel_call = CANCELLED_CALL;
fail_locked(exec_ctx, calld, GRPC_ERROR_REF(error));
@@ -772,13 +887,67 @@ retry:
GPR_TIMER_END("cc_start_transport_stream_op", 0);
}
+// Gets data from the service config. Invoked when the resolver returns
+// its initial result.
+static void read_service_config(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_call_element *elem = arg;
+ channel_data *chand = elem->channel_data;
+ call_data *calld = elem->call_data;
+ // If this is an error, there's no point in looking at the service config.
+ if (error == GRPC_ERROR_NONE) {
+ // Get the method config table from channel data.
+ gpr_mu_lock(&chand->mu);
+ grpc_mdstr_hash_table *method_params_table = NULL;
+ if (chand->method_params_table != NULL) {
+ method_params_table =
+ grpc_mdstr_hash_table_ref(chand->method_params_table);
+ }
+ gpr_mu_unlock(&chand->mu);
+ // If the method config table was present, use it.
+ if (method_params_table != NULL) {
+ const method_parameters *method_params =
+ grpc_method_config_table_get(method_params_table, calld->path);
+ if (method_params != NULL) {
+ const bool have_method_timeout =
+ gpr_time_cmp(method_params->timeout, gpr_time_0(GPR_TIMESPAN)) != 0;
+ if (have_method_timeout ||
+ method_params->wait_for_ready != WAIT_FOR_READY_UNSET) {
+ gpr_mu_lock(&calld->mu);
+ if (have_method_timeout) {
+ const gpr_timespec per_method_deadline =
+ gpr_time_add(calld->call_start_time, method_params->timeout);
+ if (gpr_time_cmp(per_method_deadline, calld->deadline) < 0) {
+ calld->deadline = per_method_deadline;
+ // Reset deadline timer.
+ grpc_deadline_state_reset(exec_ctx, elem, calld->deadline);
+ }
+ }
+ if (method_params->wait_for_ready != WAIT_FOR_READY_UNSET) {
+ calld->wait_for_ready_from_service_config =
+ method_params->wait_for_ready;
+ }
+ gpr_mu_unlock(&calld->mu);
+ }
+ }
+ grpc_mdstr_hash_table_unref(method_params_table);
+ }
+ }
+ GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "read_service_config");
+}
+
/* Constructor for call_data */
static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_call_element_args *args) {
+ channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
- grpc_deadline_state_init(exec_ctx, elem, args);
- calld->deadline = args->deadline;
+ // Initialize data members.
+ grpc_deadline_state_init(exec_ctx, elem, args->call_stack);
+ calld->path = GRPC_MDSTR_REF(args->path);
+ calld->call_start_time = args->start_time;
+ calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
+ calld->wait_for_ready_from_service_config = WAIT_FOR_READY_UNSET;
calld->cancel_error = GRPC_ERROR_NONE;
gpr_atm_rel_store(&calld->subchannel_call, 0);
gpr_mu_init(&calld->mu);
@@ -789,6 +958,51 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
calld->owning_call = args->call_stack;
calld->pollent = NULL;
+ // If the resolver has already returned results, then we can access
+ // the service config parameters immediately. Otherwise, we need to
+ // defer that work until the resolver returns an initial result.
+ // TODO(roth): This code is almost but not quite identical to the code
+ // in read_service_config() above. It would be nice to find a way to
+ // combine them, to avoid having to maintain it twice.
+ gpr_mu_lock(&chand->mu);
+ if (chand->lb_policy != NULL) {
+ // We already have a resolver result, so check for service config.
+ if (chand->method_params_table != NULL) {
+ grpc_mdstr_hash_table *method_params_table =
+ grpc_mdstr_hash_table_ref(chand->method_params_table);
+ gpr_mu_unlock(&chand->mu);
+ method_parameters *method_params =
+ grpc_method_config_table_get(method_params_table, args->path);
+ if (method_params != NULL) {
+ if (gpr_time_cmp(method_params->timeout,
+ gpr_time_0(GPR_CLOCK_MONOTONIC)) != 0) {
+ gpr_timespec per_method_deadline =
+ gpr_time_add(calld->call_start_time, method_params->timeout);
+ calld->deadline = gpr_time_min(calld->deadline, per_method_deadline);
+ }
+ if (method_params->wait_for_ready != WAIT_FOR_READY_UNSET) {
+ calld->wait_for_ready_from_service_config =
+ method_params->wait_for_ready;
+ }
+ }
+ grpc_mdstr_hash_table_unref(method_params_table);
+ } else {
+ gpr_mu_unlock(&chand->mu);
+ }
+ } else {
+ // We don't yet have a resolver result, so register a callback to
+ // get the service config data once the resolver returns.
+ // Take a reference to the call stack to be owned by the callback.
+ GRPC_CALL_STACK_REF(calld->owning_call, "read_service_config");
+ grpc_closure_init(&calld->read_service_config, read_service_config, elem);
+ grpc_closure_list_append(&chand->waiting_for_config_closures,
+ &calld->read_service_config, GRPC_ERROR_NONE);
+ gpr_mu_unlock(&chand->mu);
+ }
+ // Start the deadline timer with the current deadline value. If we
+ // do not yet have service config data, then the timer may be reset
+ // later.
+ grpc_deadline_state_start(exec_ctx, elem, calld->deadline);
return GRPC_ERROR_NONE;
}
@@ -799,6 +1013,7 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
void *and_free_memory) {
call_data *calld = elem->call_data;
grpc_deadline_state_destroy(exec_ctx, elem);
+ GRPC_MDSTR_UNREF(calld->path);
GRPC_ERROR_UNREF(calld->cancel_error);
grpc_subchannel_call *call = GET_CALL(calld);
if (call != NULL && call != CANCELLED_CALL) {
@@ -807,6 +1022,10 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING);
gpr_mu_destroy(&calld->mu);
GPR_ASSERT(calld->waiting_ops_count == 0);
+ if (calld->connected_subchannel != NULL) {
+ GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, calld->connected_subchannel,
+ "picked");
+ }
gpr_free(calld->waiting_ops);
gpr_free(and_free_memory);
}
diff --git a/src/core/ext/client_config/client_channel.h b/src/core/ext/client_channel/client_channel.h
index abb5ac4d87..ab5a84fdfb 100644
--- a/src/core/ext/client_config/client_channel.h
+++ b/src/core/ext/client_channel/client_channel.h
@@ -31,11 +31,11 @@
*
*/
-#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_CLIENT_CHANNEL_H
-#define GRPC_CORE_EXT_CLIENT_CONFIG_CLIENT_CHANNEL_H
+#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_CLIENT_CHANNEL_H
+#define GRPC_CORE_EXT_CLIENT_CHANNEL_CLIENT_CHANNEL_H
-#include "src/core/ext/client_config/client_channel_factory.h"
-#include "src/core/ext/client_config/resolver.h"
+#include "src/core/ext/client_channel/client_channel_factory.h"
+#include "src/core/ext/client_channel/resolver.h"
#include "src/core/lib/channel/channel_stack.h"
/* A client channel is a channel that begins disconnected, and can connect
@@ -61,4 +61,4 @@ void grpc_client_channel_watch_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, grpc_pollset *pollset,
grpc_connectivity_state *state, grpc_closure *on_complete);
-#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_CLIENT_CHANNEL_H */
+#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_CLIENT_CHANNEL_H */
diff --git a/src/core/ext/client_config/client_channel_factory.c b/src/core/ext/client_channel/client_channel_factory.c
index 71c64c0da1..4900832d57 100644
--- a/src/core/ext/client_config/client_channel_factory.c
+++ b/src/core/ext/client_channel/client_channel_factory.c
@@ -31,7 +31,7 @@
*
*/
-#include "src/core/ext/client_config/client_channel_factory.h"
+#include "src/core/ext/client_channel/client_channel_factory.h"
void grpc_client_channel_factory_ref(grpc_client_channel_factory* factory) {
factory->vtable->ref(factory);
@@ -44,14 +44,14 @@ void grpc_client_channel_factory_unref(grpc_exec_ctx* exec_ctx,
grpc_subchannel* grpc_client_channel_factory_create_subchannel(
grpc_exec_ctx* exec_ctx, grpc_client_channel_factory* factory,
- grpc_subchannel_args* args) {
+ const grpc_subchannel_args* args) {
return factory->vtable->create_subchannel(exec_ctx, factory, args);
}
grpc_channel* grpc_client_channel_factory_create_channel(
grpc_exec_ctx* exec_ctx, grpc_client_channel_factory* factory,
const char* target, grpc_client_channel_type type,
- grpc_channel_args* args) {
+ const grpc_channel_args* args) {
return factory->vtable->create_client_channel(exec_ctx, factory, target, type,
args);
}
diff --git a/src/core/ext/client_config/client_channel_factory.h b/src/core/ext/client_channel/client_channel_factory.h
index 1241b9b781..2b8fc577b3 100644
--- a/src/core/ext/client_config/client_channel_factory.h
+++ b/src/core/ext/client_channel/client_channel_factory.h
@@ -31,12 +31,12 @@
*
*/
-#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_CLIENT_CHANNEL_FACTORY_H
-#define GRPC_CORE_EXT_CLIENT_CONFIG_CLIENT_CHANNEL_FACTORY_H
+#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_CLIENT_CHANNEL_FACTORY_H
+#define GRPC_CORE_EXT_CLIENT_CHANNEL_CLIENT_CHANNEL_FACTORY_H
#include <grpc/impl/codegen/grpc_types.h>
-#include "src/core/ext/client_config/subchannel.h"
+#include "src/core/ext/client_channel/subchannel.h"
#include "src/core/lib/channel/channel_stack.h"
typedef struct grpc_client_channel_factory grpc_client_channel_factory;
@@ -60,12 +60,12 @@ struct grpc_client_channel_factory_vtable {
void (*unref)(grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *factory);
grpc_subchannel *(*create_subchannel)(grpc_exec_ctx *exec_ctx,
grpc_client_channel_factory *factory,
- grpc_subchannel_args *args);
+ const grpc_subchannel_args *args);
grpc_channel *(*create_client_channel)(grpc_exec_ctx *exec_ctx,
grpc_client_channel_factory *factory,
const char *target,
grpc_client_channel_type type,
- grpc_channel_args *args);
+ const grpc_channel_args *args);
};
void grpc_client_channel_factory_ref(grpc_client_channel_factory *factory);
@@ -75,11 +75,12 @@ void grpc_client_channel_factory_unref(grpc_exec_ctx *exec_ctx,
/** Create a new grpc_subchannel */
grpc_subchannel *grpc_client_channel_factory_create_subchannel(
grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *factory,
- grpc_subchannel_args *args);
+ const grpc_subchannel_args *args);
/** Create a new grpc_channel */
grpc_channel *grpc_client_channel_factory_create_channel(
grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *factory,
- const char *target, grpc_client_channel_type type, grpc_channel_args *args);
+ const char *target, grpc_client_channel_type type,
+ const grpc_channel_args *args);
-#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_CLIENT_CHANNEL_FACTORY_H */
+#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_CLIENT_CHANNEL_FACTORY_H */
diff --git a/src/core/ext/client_config/client_config_plugin.c b/src/core/ext/client_channel/client_channel_plugin.c
index dc3629d383..a3e5079843 100644
--- a/src/core/ext/client_config/client_config_plugin.c
+++ b/src/core/ext/client_channel/client_channel_plugin.c
@@ -37,10 +37,10 @@
#include <grpc/support/alloc.h>
-#include "src/core/ext/client_config/client_channel.h"
-#include "src/core/ext/client_config/lb_policy_registry.h"
-#include "src/core/ext/client_config/resolver_registry.h"
-#include "src/core/ext/client_config/subchannel_index.h"
+#include "src/core/ext/client_channel/client_channel.h"
+#include "src/core/ext/client_channel/lb_policy_registry.h"
+#include "src/core/ext/client_channel/resolver_registry.h"
+#include "src/core/ext/client_channel/subchannel_index.h"
#include "src/core/lib/surface/channel_init.h"
static bool append_filter(grpc_channel_stack_builder *builder, void *arg) {
@@ -73,7 +73,7 @@ static bool set_default_host_if_unset(grpc_channel_stack_builder *builder,
return true;
}
-void grpc_client_config_init(void) {
+void grpc_client_channel_init(void) {
grpc_lb_policy_registry_init();
grpc_resolver_registry_init();
grpc_subchannel_index_init();
@@ -83,7 +83,7 @@ void grpc_client_config_init(void) {
(void *)&grpc_client_channel_filter);
}
-void grpc_client_config_shutdown(void) {
+void grpc_client_channel_shutdown(void) {
grpc_subchannel_index_shutdown();
grpc_channel_init_shutdown();
grpc_resolver_registry_shutdown();
diff --git a/src/core/ext/client_config/connector.c b/src/core/ext/client_channel/connector.c
index 5b629ed5fb..0582e5b372 100644
--- a/src/core/ext/client_config/connector.c
+++ b/src/core/ext/client_channel/connector.c
@@ -31,7 +31,7 @@
*
*/
-#include "src/core/ext/client_config/connector.h"
+#include "src/core/ext/client_channel/connector.h"
grpc_connector* grpc_connector_ref(grpc_connector* connector) {
connector->vtable->ref(connector);
diff --git a/src/core/ext/client_config/connector.h b/src/core/ext/client_channel/connector.h
index ea9d23706e..ed7d5450de 100644
--- a/src/core/ext/client_config/connector.h
+++ b/src/core/ext/client_channel/connector.h
@@ -31,11 +31,11 @@
*
*/
-#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_CONNECTOR_H
-#define GRPC_CORE_EXT_CLIENT_CONFIG_CONNECTOR_H
+#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_CONNECTOR_H
+#define GRPC_CORE_EXT_CLIENT_CHANNEL_CONNECTOR_H
#include "src/core/lib/channel/channel_stack.h"
-#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/transport/transport.h"
typedef struct grpc_connector grpc_connector;
@@ -49,7 +49,7 @@ typedef struct {
/** set of pollsets interested in this connection */
grpc_pollset_set *interested_parties;
/** address to connect to */
- const struct sockaddr *addr;
+ const grpc_resolved_address *addr;
size_t addr_len;
/** initial connect string to send */
gpr_slice initial_connect_string;
@@ -89,4 +89,4 @@ void grpc_connector_connect(grpc_exec_ctx *exec_ctx, grpc_connector *connector,
void grpc_connector_shutdown(grpc_exec_ctx *exec_ctx,
grpc_connector *connector);
-#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_CONNECTOR_H */
+#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_CONNECTOR_H */
diff --git a/src/core/ext/client_config/default_initial_connect_string.c b/src/core/ext/client_channel/default_initial_connect_string.c
index a70da4a84a..0b251372fd 100644
--- a/src/core/ext/client_config/default_initial_connect_string.c
+++ b/src/core/ext/client_channel/default_initial_connect_string.c
@@ -32,8 +32,7 @@
*/
#include <grpc/support/slice.h>
-#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/lib/iomgr/resolve_address.h"
-void grpc_set_default_initial_connect_string(struct sockaddr **addr,
- size_t *addr_len,
+void grpc_set_default_initial_connect_string(grpc_resolved_address **addr,
gpr_slice *initial_str) {}
diff --git a/src/core/ext/client_config/http_connect_handshaker.c b/src/core/ext/client_channel/http_connect_handshaker.c
index fda1df173e..ea2cbbdd97 100644
--- a/src/core/ext/client_config/http_connect_handshaker.c
+++ b/src/core/ext/client_channel/http_connect_handshaker.c
@@ -31,7 +31,7 @@
*
*/
-#include "src/core/ext/client_config/http_connect_handshaker.h"
+#include "src/core/ext/client_channel/http_connect_handshaker.h"
#include <string.h>
@@ -40,7 +40,7 @@
#include <grpc/support/slice_buffer.h>
#include <grpc/support/string_util.h>
-#include "src/core/ext/client_config/uri_parser.h"
+#include "src/core/ext/client_channel/uri_parser.h"
#include "src/core/lib/http/format_request.h"
#include "src/core/lib/http/parser.h"
#include "src/core/lib/iomgr/timer.h"
diff --git a/src/core/ext/client_config/http_connect_handshaker.h b/src/core/ext/client_channel/http_connect_handshaker.h
index 1fc3948267..c689df2b2b 100644
--- a/src/core/ext/client_config/http_connect_handshaker.h
+++ b/src/core/ext/client_channel/http_connect_handshaker.h
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_HTTP_CONNECT_HANDSHAKER_H
-#define GRPC_CORE_EXT_CLIENT_CONFIG_HTTP_CONNECT_HANDSHAKER_H
+#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_HTTP_CONNECT_HANDSHAKER_H
+#define GRPC_CORE_EXT_CLIENT_CHANNEL_HTTP_CONNECT_HANDSHAKER_H
#include "src/core/lib/channel/handshaker.h"
@@ -44,4 +44,4 @@ grpc_handshaker* grpc_http_connect_handshaker_create(const char* proxy_server,
/// Caller takes ownership of result.
char* grpc_get_http_proxy_server();
-#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_HTTP_CONNECT_HANDSHAKER_H */
+#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_HTTP_CONNECT_HANDSHAKER_H */
diff --git a/src/core/ext/client_config/initial_connect_string.c b/src/core/ext/client_channel/initial_connect_string.c
index 41580d2106..fb1493d77d 100644
--- a/src/core/ext/client_config/initial_connect_string.c
+++ b/src/core/ext/client_channel/initial_connect_string.c
@@ -31,13 +31,12 @@
*
*/
-#include "src/core/ext/client_config/initial_connect_string.h"
+#include "src/core/ext/client_channel/initial_connect_string.h"
#include <stddef.h>
-extern void grpc_set_default_initial_connect_string(struct sockaddr **addr,
- size_t *addr_len,
- gpr_slice *initial_str);
+extern void grpc_set_default_initial_connect_string(
+ grpc_resolved_address **addr, gpr_slice *initial_str);
static grpc_set_initial_connect_string_func g_set_initial_connect_string_func =
grpc_set_default_initial_connect_string;
@@ -47,7 +46,7 @@ void grpc_test_set_initial_connect_string_function(
g_set_initial_connect_string_func = func;
}
-void grpc_set_initial_connect_string(struct sockaddr **addr, size_t *addr_len,
+void grpc_set_initial_connect_string(grpc_resolved_address **addr,
gpr_slice *initial_str) {
- g_set_initial_connect_string_func(addr, addr_len, initial_str);
+ g_set_initial_connect_string_func(addr, initial_str);
}
diff --git a/src/core/ext/client_config/initial_connect_string.h b/src/core/ext/client_channel/initial_connect_string.h
index 06f0767832..68adb0373c 100644
--- a/src/core/ext/client_config/initial_connect_string.h
+++ b/src/core/ext/client_channel/initial_connect_string.h
@@ -31,20 +31,20 @@
*
*/
-#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_INITIAL_CONNECT_STRING_H
-#define GRPC_CORE_EXT_CLIENT_CONFIG_INITIAL_CONNECT_STRING_H
+#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_INITIAL_CONNECT_STRING_H
+#define GRPC_CORE_EXT_CLIENT_CHANNEL_INITIAL_CONNECT_STRING_H
#include <grpc/support/slice.h>
-#include "src/core/lib/iomgr/sockaddr.h"
-typedef void (*grpc_set_initial_connect_string_func)(struct sockaddr **addr,
- size_t *addr_len,
- gpr_slice *initial_str);
+#include "src/core/lib/iomgr/resolve_address.h"
+
+typedef void (*grpc_set_initial_connect_string_func)(
+ grpc_resolved_address **addr, gpr_slice *initial_str);
void grpc_test_set_initial_connect_string_function(
grpc_set_initial_connect_string_func func);
/** Set a string to be sent once connected. Optionally reset addr. */
-void grpc_set_initial_connect_string(struct sockaddr **addr, size_t *addr_len,
+void grpc_set_initial_connect_string(grpc_resolved_address **addr,
gpr_slice *connect_string);
-#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_INITIAL_CONNECT_STRING_H */
+#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_INITIAL_CONNECT_STRING_H */
diff --git a/src/core/ext/client_config/lb_policy.c b/src/core/ext/client_channel/lb_policy.c
index 46391272a6..45ee72e2f0 100644
--- a/src/core/ext/client_config/lb_policy.c
+++ b/src/core/ext/client_channel/lb_policy.c
@@ -31,7 +31,7 @@
*
*/
-#include "src/core/ext/client_config/lb_policy.h"
+#include "src/core/ext/client_channel/lb_policy.h"
#define WEAK_REF_BITS 16
diff --git a/src/core/ext/client_config/lb_policy.h b/src/core/ext/client_channel/lb_policy.h
index 110d08fcac..120c641edc 100644
--- a/src/core/ext/client_config/lb_policy.h
+++ b/src/core/ext/client_channel/lb_policy.h
@@ -31,10 +31,10 @@
*
*/
-#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_LB_POLICY_H
-#define GRPC_CORE_EXT_CLIENT_CONFIG_LB_POLICY_H
+#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_LB_POLICY_H
+#define GRPC_CORE_EXT_CLIENT_CHANNEL_LB_POLICY_H
-#include "src/core/ext/client_config/subchannel.h"
+#include "src/core/ext/client_channel/subchannel.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/transport/connectivity_state.h"
@@ -55,8 +55,6 @@ struct grpc_lb_policy {
/** Extra arguments for an LB pick */
typedef struct grpc_lb_policy_pick_args {
- /** Parties interested in the pick's progress */
- grpc_polling_entity *pollent;
/** Initial metadata associated with the picking call. */
grpc_metadata_batch *initial_metadata;
/** Bitmask used for selective cancelling. See \a
@@ -111,10 +109,16 @@ struct grpc_lb_policy_vtable {
/*#define GRPC_LB_POLICY_REFCOUNT_DEBUG*/
#ifdef GRPC_LB_POLICY_REFCOUNT_DEBUG
+
+/* Strong references: the policy will shutdown when they reach zero */
#define GRPC_LB_POLICY_REF(p, r) \
grpc_lb_policy_ref((p), __FILE__, __LINE__, (r))
#define GRPC_LB_POLICY_UNREF(exec_ctx, p, r) \
grpc_lb_policy_unref((exec_ctx), (p), __FILE__, __LINE__, (r))
+
+/* Weak references: they don't prevent the shutdown of the LB policy. When no
+ * strong references are left but there are still weak ones, shutdown is called.
+ * Once the weak reference also reaches zero, the LB policy is destroyed. */
#define GRPC_LB_POLICY_WEAK_REF(p, r) \
grpc_lb_policy_weak_ref((p), __FILE__, __LINE__, (r))
#define GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, p, r) \
@@ -153,7 +157,8 @@ void grpc_lb_policy_init(grpc_lb_policy *policy,
once the pick is complete with its error argument set to indicate
success or failure.
- Any I/O should be done under \a pick_args->pollent. */
+ Any IO should be done under the \a interested_parties \a grpc_pollset_set
+ in the \a grpc_lb_policy struct. */
int grpc_lb_policy_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const grpc_lb_policy_pick_args *pick_args,
grpc_connected_subchannel **target, void **user_data,
@@ -194,4 +199,4 @@ grpc_connectivity_state grpc_lb_policy_check_connectivity(
grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_error **connectivity_error);
-#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_LB_POLICY_H */
+#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_LB_POLICY_H */
diff --git a/src/core/ext/client_config/lb_policy_factory.c b/src/core/ext/client_channel/lb_policy_factory.c
index c17af91a09..8a474c8818 100644
--- a/src/core/ext/client_config/lb_policy_factory.c
+++ b/src/core/ext/client_channel/lb_policy_factory.c
@@ -36,21 +36,22 @@
#include <grpc/support/alloc.h>
#include <grpc/support/string_util.h>
-#include "src/core/ext/client_config/lb_policy_factory.h"
+#include "src/core/ext/client_channel/lb_policy_factory.h"
-grpc_lb_addresses* grpc_lb_addresses_create(size_t num_addresses) {
+grpc_lb_addresses* grpc_lb_addresses_create(
+ size_t num_addresses, const grpc_lb_user_data_vtable* user_data_vtable) {
grpc_lb_addresses* addresses = gpr_malloc(sizeof(grpc_lb_addresses));
addresses->num_addresses = num_addresses;
+ addresses->user_data_vtable = user_data_vtable;
const size_t addresses_size = sizeof(grpc_lb_address) * num_addresses;
addresses->addresses = gpr_malloc(addresses_size);
memset(addresses->addresses, 0, addresses_size);
return addresses;
}
-grpc_lb_addresses* grpc_lb_addresses_copy(grpc_lb_addresses* addresses,
- void* (*user_data_copy)(void*)) {
- grpc_lb_addresses* new_addresses =
- grpc_lb_addresses_create(addresses->num_addresses);
+grpc_lb_addresses* grpc_lb_addresses_copy(const grpc_lb_addresses* addresses) {
+ grpc_lb_addresses* new_addresses = grpc_lb_addresses_create(
+ addresses->num_addresses, addresses->user_data_vtable);
memcpy(new_addresses->addresses, addresses->addresses,
sizeof(grpc_lb_address) * addresses->num_addresses);
for (size_t i = 0; i < addresses->num_addresses; ++i) {
@@ -58,9 +59,9 @@ grpc_lb_addresses* grpc_lb_addresses_copy(grpc_lb_addresses* addresses,
new_addresses->addresses[i].balancer_name =
gpr_strdup(new_addresses->addresses[i].balancer_name);
}
- if (user_data_copy != NULL) {
- new_addresses->addresses[i].user_data =
- user_data_copy(new_addresses->addresses[i].user_data);
+ if (new_addresses->addresses[i].user_data != NULL) {
+ new_addresses->addresses[i].user_data = addresses->user_data_vtable->copy(
+ new_addresses->addresses[i].user_data);
}
}
return new_addresses;
@@ -71,6 +72,7 @@ void grpc_lb_addresses_set_address(grpc_lb_addresses* addresses, size_t index,
bool is_balancer, char* balancer_name,
void* user_data) {
GPR_ASSERT(index < addresses->num_addresses);
+ if (user_data != NULL) GPR_ASSERT(addresses->user_data_vtable != NULL);
grpc_lb_address* target = &addresses->addresses[index];
memcpy(target->address.addr, address, address_len);
target->address.len = address_len;
@@ -79,18 +81,70 @@ void grpc_lb_addresses_set_address(grpc_lb_addresses* addresses, size_t index,
target->user_data = user_data;
}
-void grpc_lb_addresses_destroy(grpc_lb_addresses* addresses,
- void (*user_data_destroy)(void*)) {
+int grpc_lb_addresses_cmp(const grpc_lb_addresses* addresses1,
+ const grpc_lb_addresses* addresses2) {
+ if (addresses1->num_addresses > addresses2->num_addresses) return 1;
+ if (addresses1->num_addresses < addresses2->num_addresses) return -1;
+ if (addresses1->user_data_vtable > addresses2->user_data_vtable) return 1;
+ if (addresses1->user_data_vtable < addresses2->user_data_vtable) return -1;
+ for (size_t i = 0; i < addresses1->num_addresses; ++i) {
+ const grpc_lb_address* target1 = &addresses1->addresses[i];
+ const grpc_lb_address* target2 = &addresses2->addresses[i];
+ if (target1->address.len > target2->address.len) return 1;
+ if (target1->address.len < target2->address.len) return -1;
+ int retval = memcmp(target1->address.addr, target2->address.addr,
+ target1->address.len);
+ if (retval != 0) return retval;
+ if (target1->is_balancer > target2->is_balancer) return 1;
+ if (target1->is_balancer < target2->is_balancer) return -1;
+ const char* balancer_name1 =
+ target1->balancer_name != NULL ? target1->balancer_name : "";
+ const char* balancer_name2 =
+ target2->balancer_name != NULL ? target2->balancer_name : "";
+ retval = strcmp(balancer_name1, balancer_name2);
+ if (retval != 0) return retval;
+ if (addresses1->user_data_vtable != NULL) {
+ retval = addresses1->user_data_vtable->cmp(target1->user_data,
+ target2->user_data);
+ if (retval != 0) return retval;
+ }
+ }
+ return 0;
+}
+
+void grpc_lb_addresses_destroy(grpc_lb_addresses* addresses) {
for (size_t i = 0; i < addresses->num_addresses; ++i) {
gpr_free(addresses->addresses[i].balancer_name);
- if (user_data_destroy != NULL) {
- user_data_destroy(addresses->addresses[i].user_data);
+ if (addresses->addresses[i].user_data != NULL) {
+ addresses->user_data_vtable->destroy(addresses->addresses[i].user_data);
}
}
gpr_free(addresses->addresses);
gpr_free(addresses);
}
+static void* lb_addresses_copy(void* addresses) {
+ return grpc_lb_addresses_copy(addresses);
+}
+static void lb_addresses_destroy(void* addresses) {
+ grpc_lb_addresses_destroy(addresses);
+}
+static int lb_addresses_cmp(void* addresses1, void* addresses2) {
+ return grpc_lb_addresses_cmp(addresses1, addresses2);
+}
+static const grpc_arg_pointer_vtable lb_addresses_arg_vtable = {
+ lb_addresses_copy, lb_addresses_destroy, lb_addresses_cmp};
+
+grpc_arg grpc_lb_addresses_create_channel_arg(
+ const grpc_lb_addresses* addresses) {
+ grpc_arg arg;
+ arg.type = GRPC_ARG_POINTER;
+ arg.key = GRPC_ARG_LB_ADDRESSES;
+ arg.value.pointer.p = (void*)addresses;
+ arg.value.pointer.vtable = &lb_addresses_arg_vtable;
+ return arg;
+}
+
void grpc_lb_policy_factory_ref(grpc_lb_policy_factory* factory) {
factory->vtable->ref(factory);
}
diff --git a/src/core/ext/client_config/lb_policy_factory.h b/src/core/ext/client_channel/lb_policy_factory.h
index ade55704f2..e2b8080a32 100644
--- a/src/core/ext/client_config/lb_policy_factory.h
+++ b/src/core/ext/client_channel/lb_policy_factory.h
@@ -31,11 +31,11 @@
*
*/
-#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_LB_POLICY_FACTORY_H
-#define GRPC_CORE_EXT_CLIENT_CONFIG_LB_POLICY_FACTORY_H
+#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_LB_POLICY_FACTORY_H
+#define GRPC_CORE_EXT_CLIENT_CHANNEL_LB_POLICY_FACTORY_H
-#include "src/core/ext/client_config/client_channel_factory.h"
-#include "src/core/ext/client_config/lb_policy.h"
+#include "src/core/ext/client_channel/client_channel_factory.h"
+#include "src/core/ext/client_channel/lb_policy.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/resolve_address.h"
@@ -59,19 +59,26 @@ typedef struct grpc_lb_address {
void *user_data;
} grpc_lb_address;
+typedef struct grpc_lb_user_data_vtable {
+ void *(*copy)(void *);
+ void (*destroy)(void *);
+ int (*cmp)(void *, void *);
+} grpc_lb_user_data_vtable;
+
typedef struct grpc_lb_addresses {
size_t num_addresses;
grpc_lb_address *addresses;
+ const grpc_lb_user_data_vtable *user_data_vtable;
} grpc_lb_addresses;
/** Returns a grpc_addresses struct with enough space for
- * \a num_addresses addresses. */
-grpc_lb_addresses *grpc_lb_addresses_create(size_t num_addresses);
+ \a num_addresses addresses. The \a user_data_vtable argument may be
+ NULL if no user data will be added. */
+grpc_lb_addresses *grpc_lb_addresses_create(
+ size_t num_addresses, const grpc_lb_user_data_vtable *user_data_vtable);
-/** Creates a copy of \a addresses. If \a user_data_copy is not NULL,
- * it will be invoked to copy the \a user_data field of each address. */
-grpc_lb_addresses *grpc_lb_addresses_copy(grpc_lb_addresses *addresses,
- void *(*user_data_copy)(void *));
+/** Creates a copy of \a addresses. */
+grpc_lb_addresses *grpc_lb_addresses_copy(const grpc_lb_addresses *addresses);
/** Sets the value of the address at index \a index of \a addresses.
* \a address is a socket address of length \a address_len.
@@ -81,20 +88,21 @@ void grpc_lb_addresses_set_address(grpc_lb_addresses *addresses, size_t index,
bool is_balancer, char *balancer_name,
void *user_data);
-/** Destroys \a addresses. If \a user_data_destroy is not NULL, it will
- * be invoked to destroy the \a user_data field of each address. */
-void grpc_lb_addresses_destroy(grpc_lb_addresses *addresses,
- void (*user_data_destroy)(void *));
+/** Compares \a addresses1 and \a addresses2. */
+int grpc_lb_addresses_cmp(const grpc_lb_addresses *addresses1,
+ const grpc_lb_addresses *addresses2);
+
+/** Destroys \a addresses. */
+void grpc_lb_addresses_destroy(grpc_lb_addresses *addresses);
+
+/** Returns a channel arg containing \a addresses. */
+grpc_arg grpc_lb_addresses_create_channel_arg(
+ const grpc_lb_addresses *addresses);
/** Arguments passed to LB policies. */
-/* TODO(roth, ctiller): Consider replacing this struct with
- grpc_channel_args. See comment in resolver_result.h for details. */
typedef struct grpc_lb_policy_args {
- const char *server_name;
- grpc_lb_addresses *addresses;
grpc_client_channel_factory *client_channel_factory;
- /* Can be used to pass implementation-specific parameters to the LB policy. */
- grpc_channel_args *additional_args;
+ grpc_channel_args *args;
} grpc_lb_policy_args;
struct grpc_lb_policy_factory_vtable {
@@ -118,4 +126,4 @@ grpc_lb_policy *grpc_lb_policy_factory_create_lb_policy(
grpc_exec_ctx *exec_ctx, grpc_lb_policy_factory *factory,
grpc_lb_policy_args *args);
-#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_LB_POLICY_FACTORY_H */
+#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_LB_POLICY_FACTORY_H */
diff --git a/src/core/ext/client_config/lb_policy_registry.c b/src/core/ext/client_channel/lb_policy_registry.c
index a23643ecc6..f46a721f9d 100644
--- a/src/core/ext/client_config/lb_policy_registry.c
+++ b/src/core/ext/client_channel/lb_policy_registry.c
@@ -31,7 +31,7 @@
*
*/
-#include "src/core/ext/client_config/lb_policy_registry.h"
+#include "src/core/ext/client_channel/lb_policy_registry.h"
#include <string.h>
diff --git a/src/core/ext/client_config/lb_policy_registry.h b/src/core/ext/client_channel/lb_policy_registry.h
index 92f38d6de6..21c468e021 100644
--- a/src/core/ext/client_config/lb_policy_registry.h
+++ b/src/core/ext/client_channel/lb_policy_registry.h
@@ -31,10 +31,10 @@
*
*/
-#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_LB_POLICY_REGISTRY_H
-#define GRPC_CORE_EXT_CLIENT_CONFIG_LB_POLICY_REGISTRY_H
+#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_LB_POLICY_REGISTRY_H
+#define GRPC_CORE_EXT_CLIENT_CHANNEL_LB_POLICY_REGISTRY_H
-#include "src/core/ext/client_config/lb_policy_factory.h"
+#include "src/core/ext/client_channel/lb_policy_factory.h"
#include "src/core/lib/iomgr/exec_ctx.h"
/** Initialize the registry and set \a default_factory as the factory to be
@@ -52,4 +52,4 @@ void grpc_register_lb_policy(grpc_lb_policy_factory *factory);
grpc_lb_policy *grpc_lb_policy_create(grpc_exec_ctx *exec_ctx, const char *name,
grpc_lb_policy_args *args);
-#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_LB_POLICY_REGISTRY_H */
+#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_LB_POLICY_REGISTRY_H */
diff --git a/src/core/ext/client_config/parse_address.c b/src/core/ext/client_channel/parse_address.c
index 8b4abe24a6..b1d55ad0f5 100644
--- a/src/core/ext/client_config/parse_address.c
+++ b/src/core/ext/client_channel/parse_address.c
@@ -31,11 +31,12 @@
*
*/
-#include "src/core/ext/client_config/parse_address.h"
+#include "src/core/ext/client_channel/parse_address.h"
+#include "src/core/lib/iomgr/sockaddr.h"
#include <stdio.h>
#include <string.h>
-#ifdef GPR_HAVE_UNIX_SOCKET
+#ifdef GRPC_HAVE_UNIX_SOCKET
#include <sys/un.h>
#endif
@@ -44,33 +45,39 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
-#ifdef GPR_HAVE_UNIX_SOCKET
-int parse_unix(grpc_uri *uri, struct sockaddr_storage *addr, size_t *len) {
- struct sockaddr_un *un = (struct sockaddr_un *)addr;
+#ifdef GRPC_HAVE_UNIX_SOCKET
+
+int parse_unix(grpc_uri *uri, grpc_resolved_address *resolved_addr) {
+ struct sockaddr_un *un = (struct sockaddr_un *)resolved_addr->addr;
un->sun_family = AF_UNIX;
strcpy(un->sun_path, uri->path);
- *len = strlen(un->sun_path) + sizeof(un->sun_family) + 1;
+ resolved_addr->len = strlen(un->sun_path) + sizeof(un->sun_family) + 1;
return 1;
}
-#endif
-int parse_ipv4(grpc_uri *uri, struct sockaddr_storage *addr, size_t *len) {
+#else /* GRPC_HAVE_UNIX_SOCKET */
+
+int parse_unix(grpc_uri *uri, grpc_resolved_address *resolved_addr) { abort(); }
+
+#endif /* GRPC_HAVE_UNIX_SOCKET */
+
+int parse_ipv4(grpc_uri *uri, grpc_resolved_address *resolved_addr) {
const char *host_port = uri->path;
char *host;
char *port;
int port_num;
int result = 0;
- struct sockaddr_in *in = (struct sockaddr_in *)addr;
+ struct sockaddr_in *in = (struct sockaddr_in *)resolved_addr->addr;
if (*host_port == '/') ++host_port;
if (!gpr_split_host_port(host_port, &host, &port)) {
return 0;
}
- memset(in, 0, sizeof(*in));
- *len = sizeof(*in);
+ memset(resolved_addr, 0, sizeof(grpc_resolved_address));
+ resolved_addr->len = sizeof(struct sockaddr_in);
in->sin_family = AF_INET;
if (inet_pton(AF_INET, host, &in->sin_addr) == 0) {
gpr_log(GPR_ERROR, "invalid ipv4 address: '%s'", host);
@@ -96,13 +103,13 @@ done:
return result;
}
-int parse_ipv6(grpc_uri *uri, struct sockaddr_storage *addr, size_t *len) {
+int parse_ipv6(grpc_uri *uri, grpc_resolved_address *resolved_addr) {
const char *host_port = uri->path;
char *host;
char *port;
int port_num;
int result = 0;
- struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)addr;
+ struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)resolved_addr->addr;
if (*host_port == '/') ++host_port;
if (!gpr_split_host_port(host_port, &host, &port)) {
@@ -110,7 +117,7 @@ int parse_ipv6(grpc_uri *uri, struct sockaddr_storage *addr, size_t *len) {
}
memset(in6, 0, sizeof(*in6));
- *len = sizeof(*in6);
+ resolved_addr->len = sizeof(*in6);
in6->sin6_family = AF_INET6;
if (inet_pton(AF_INET6, host, &in6->sin6_addr) == 0) {
gpr_log(GPR_ERROR, "invalid ipv6 address: '%s'", host);
diff --git a/src/core/ext/client_config/parse_address.h b/src/core/ext/client_channel/parse_address.h
index 74c86f4d93..bf99c5298a 100644
--- a/src/core/ext/client_config/parse_address.h
+++ b/src/core/ext/client_channel/parse_address.h
@@ -31,26 +31,24 @@
*
*/
-#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_PARSE_ADDRESS_H
-#define GRPC_CORE_EXT_CLIENT_CONFIG_PARSE_ADDRESS_H
+#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_PARSE_ADDRESS_H
+#define GRPC_CORE_EXT_CLIENT_CHANNEL_PARSE_ADDRESS_H
#include <stddef.h>
-#include "src/core/ext/client_config/uri_parser.h"
-#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/ext/client_channel/uri_parser.h"
+#include "src/core/lib/iomgr/resolve_address.h"
-#ifdef GPR_HAVE_UNIX_SOCKET
/** Populate \a addr and \a len from \a uri, whose path is expected to contain a
* unix socket path. Returns true upon success. */
-int parse_unix(grpc_uri *uri, struct sockaddr_storage *addr, size_t *len);
-#endif
+int parse_unix(grpc_uri *uri, grpc_resolved_address *resolved_addr);
/** Populate /a addr and \a len from \a uri, whose path is expected to contain a
* host:port pair. Returns true upon success. */
-int parse_ipv4(grpc_uri *uri, struct sockaddr_storage *addr, size_t *len);
+int parse_ipv4(grpc_uri *uri, grpc_resolved_address *resolved_addr);
/** Populate /a addr and \a len from \a uri, whose path is expected to contain a
* host:port pair. Returns true upon success. */
-int parse_ipv6(grpc_uri *uri, struct sockaddr_storage *addr, size_t *len);
+int parse_ipv6(grpc_uri *uri, grpc_resolved_address *resolved_addr);
-#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_PARSE_ADDRESS_H */
+#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_PARSE_ADDRESS_H */
diff --git a/src/core/ext/client_config/resolver.c b/src/core/ext/client_channel/resolver.c
index 7534ea62af..2ae4fe862e 100644
--- a/src/core/ext/client_config/resolver.c
+++ b/src/core/ext/client_channel/resolver.c
@@ -31,7 +31,7 @@
*
*/
-#include "src/core/ext/client_config/resolver.h"
+#include "src/core/ext/client_channel/resolver.h"
void grpc_resolver_init(grpc_resolver *resolver,
const grpc_resolver_vtable *vtable) {
@@ -76,7 +76,6 @@ void grpc_resolver_channel_saw_error(grpc_exec_ctx *exec_ctx,
}
void grpc_resolver_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
- grpc_resolver_result **result,
- grpc_closure *on_complete) {
+ grpc_channel_args **result, grpc_closure *on_complete) {
resolver->vtable->next(exec_ctx, resolver, result, on_complete);
}
diff --git a/src/core/ext/client_config/resolver.h b/src/core/ext/client_channel/resolver.h
index 88ac262d51..96ece92b9d 100644
--- a/src/core/ext/client_config/resolver.h
+++ b/src/core/ext/client_channel/resolver.h
@@ -31,18 +31,16 @@
*
*/
-#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_RESOLVER_H
-#define GRPC_CORE_EXT_CLIENT_CONFIG_RESOLVER_H
+#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_RESOLVER_H
+#define GRPC_CORE_EXT_CLIENT_CHANNEL_RESOLVER_H
-#include "src/core/ext/client_config/resolver_result.h"
-#include "src/core/ext/client_config/subchannel.h"
+#include "src/core/ext/client_channel/subchannel.h"
#include "src/core/lib/iomgr/iomgr.h"
typedef struct grpc_resolver grpc_resolver;
typedef struct grpc_resolver_vtable grpc_resolver_vtable;
-/** grpc_resolver provides grpc_resolver_result objects to grpc_channel
- objects */
+/** \a grpc_resolver provides \a grpc_channel_args objects to its caller */
struct grpc_resolver {
const grpc_resolver_vtable *vtable;
gpr_refcount refs;
@@ -53,7 +51,7 @@ struct grpc_resolver_vtable {
void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
void (*channel_saw_error)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
void (*next)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
- grpc_resolver_result **result, grpc_closure *on_complete);
+ grpc_channel_args **result, grpc_closure *on_complete);
};
#ifdef GRPC_RESOLVER_REFCOUNT_DEBUG
@@ -81,14 +79,12 @@ void grpc_resolver_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
void grpc_resolver_channel_saw_error(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver);
-/** Get the next client config. Called by the channel to fetch a new
- configuration. Expected to set *result with a new configuration,
- and then schedule on_complete for execution.
+/** Get the next result from the resolver. Expected to set \a *result with
+ new channel args and then schedule \a on_complete for execution.
- If resolution is fatally broken, set *result to NULL and
- schedule on_complete. */
+ If resolution is fatally broken, set \a *result to NULL and
+ schedule \a on_complete. */
void grpc_resolver_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
- grpc_resolver_result **result,
- grpc_closure *on_complete);
+ grpc_channel_args **result, grpc_closure *on_complete);
-#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_RESOLVER_H */
+#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_RESOLVER_H */
diff --git a/src/core/ext/client_config/resolver_factory.c b/src/core/ext/client_channel/resolver_factory.c
index 67832dcf59..7c3d644257 100644
--- a/src/core/ext/client_config/resolver_factory.c
+++ b/src/core/ext/client_channel/resolver_factory.c
@@ -31,7 +31,7 @@
*
*/
-#include "src/core/ext/client_config/resolver_factory.h"
+#include "src/core/ext/client_channel/resolver_factory.h"
void grpc_resolver_factory_ref(grpc_resolver_factory* factory) {
factory->vtable->ref(factory);
diff --git a/src/core/ext/client_config/resolver_factory.h b/src/core/ext/client_channel/resolver_factory.h
index 9ec5b9a70e..4da42e84d2 100644
--- a/src/core/ext/client_config/resolver_factory.h
+++ b/src/core/ext/client_channel/resolver_factory.h
@@ -31,23 +31,24 @@
*
*/
-#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_RESOLVER_FACTORY_H
-#define GRPC_CORE_EXT_CLIENT_CONFIG_RESOLVER_FACTORY_H
+#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_RESOLVER_FACTORY_H
+#define GRPC_CORE_EXT_CLIENT_CHANNEL_RESOLVER_FACTORY_H
-#include "src/core/ext/client_config/client_channel_factory.h"
-#include "src/core/ext/client_config/resolver.h"
-#include "src/core/ext/client_config/uri_parser.h"
+#include "src/core/ext/client_channel/client_channel_factory.h"
+#include "src/core/ext/client_channel/resolver.h"
+#include "src/core/ext/client_channel/uri_parser.h"
typedef struct grpc_resolver_factory grpc_resolver_factory;
typedef struct grpc_resolver_factory_vtable grpc_resolver_factory_vtable;
-/** grpc_resolver provides grpc_resolver_result objects to grpc_channel
- objects */
struct grpc_resolver_factory {
const grpc_resolver_factory_vtable *vtable;
};
-typedef struct grpc_resolver_args { grpc_uri *uri; } grpc_resolver_args;
+typedef struct grpc_resolver_args {
+ grpc_uri *uri;
+ const grpc_channel_args *args;
+} grpc_resolver_args;
struct grpc_resolver_factory_vtable {
void (*ref)(grpc_resolver_factory *factory);
@@ -76,4 +77,4 @@ grpc_resolver *grpc_resolver_factory_create_resolver(
char *grpc_resolver_factory_get_default_authority(
grpc_resolver_factory *factory, grpc_uri *uri);
-#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_RESOLVER_FACTORY_H */
+#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_RESOLVER_FACTORY_H */
diff --git a/src/core/ext/client_config/resolver_registry.c b/src/core/ext/client_channel/resolver_registry.c
index bd5c683878..d0f0fc3f33 100644
--- a/src/core/ext/client_config/resolver_registry.c
+++ b/src/core/ext/client_channel/resolver_registry.c
@@ -31,7 +31,7 @@
*
*/
-#include "src/core/ext/client_config/resolver_registry.h"
+#include "src/core/ext/client_channel/resolver_registry.h"
#include <string.h>
@@ -55,7 +55,7 @@ void grpc_resolver_registry_shutdown(void) {
grpc_resolver_factory_unref(g_all_of_the_resolvers[i]);
}
// FIXME(ctiller): this should live in grpc_resolver_registry_init,
- // however that would have the client_config plugin call this AFTER we start
+ // however that would have the client_channel plugin call this AFTER we start
// registering resolvers from third party plugins, and so they'd never show
// up.
// We likely need some kind of dependency system for plugins.... what form
@@ -131,14 +131,16 @@ static grpc_resolver_factory *resolve_factory(const char *target,
return factory;
}
-grpc_resolver *grpc_resolver_create(const char *target) {
+grpc_resolver *grpc_resolver_create(const char *target,
+ const grpc_channel_args *args) {
grpc_uri *uri = NULL;
grpc_resolver_factory *factory = resolve_factory(target, &uri);
grpc_resolver *resolver;
- grpc_resolver_args args;
- memset(&args, 0, sizeof(args));
- args.uri = uri;
- resolver = grpc_resolver_factory_create_resolver(factory, &args);
+ grpc_resolver_args resolver_args;
+ memset(&resolver_args, 0, sizeof(resolver_args));
+ resolver_args.uri = uri;
+ resolver_args.args = args;
+ resolver = grpc_resolver_factory_create_resolver(factory, &resolver_args);
grpc_uri_destroy(uri);
return resolver;
}
diff --git a/src/core/ext/client_config/resolver_registry.h b/src/core/ext/client_channel/resolver_registry.h
index 4c6279b978..2a95a669f0 100644
--- a/src/core/ext/client_config/resolver_registry.h
+++ b/src/core/ext/client_channel/resolver_registry.h
@@ -31,10 +31,10 @@
*
*/
-#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_RESOLVER_REGISTRY_H
-#define GRPC_CORE_EXT_CLIENT_CONFIG_RESOLVER_REGISTRY_H
+#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_RESOLVER_REGISTRY_H
+#define GRPC_CORE_EXT_CLIENT_CHANNEL_RESOLVER_REGISTRY_H
-#include "src/core/ext/client_config/resolver_factory.h"
+#include "src/core/ext/client_channel/resolver_factory.h"
void grpc_resolver_registry_init();
void grpc_resolver_registry_shutdown(void);
@@ -57,8 +57,11 @@ void grpc_register_resolver_type(grpc_resolver_factory *factory);
was not NULL).
If a resolver factory was found, use it to instantiate a resolver and
return it.
- If a resolver factory was not found, return NULL. */
-grpc_resolver *grpc_resolver_create(const char *target);
+ If a resolver factory was not found, return NULL.
+ \a args is a set of channel arguments to be included in the result
+ (typically the set of arguments passed in from the client API). */
+grpc_resolver *grpc_resolver_create(const char *target,
+ const grpc_channel_args *args);
/** Find a resolver factory given a name and return an (owned-by-the-caller)
* reference to it */
@@ -68,4 +71,4 @@ grpc_resolver_factory *grpc_resolver_factory_lookup(const char *name);
representing the default authority to pass from a client. */
char *grpc_get_default_authority(const char *target);
-#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_RESOLVER_REGISTRY_H */
+#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_RESOLVER_REGISTRY_H */
diff --git a/src/core/ext/client_config/subchannel.c b/src/core/ext/client_channel/subchannel.c
index 0bbaa3e382..789966cb69 100644
--- a/src/core/ext/client_config/subchannel.c
+++ b/src/core/ext/client_channel/subchannel.c
@@ -31,7 +31,7 @@
*
*/
-#include "src/core/ext/client_config/subchannel.h"
+#include "src/core/ext/client_channel/subchannel.h"
#include <limits.h>
#include <string.h>
@@ -39,9 +39,9 @@
#include <grpc/support/alloc.h>
#include <grpc/support/avl.h>
-#include "src/core/ext/client_config/client_channel.h"
-#include "src/core/ext/client_config/initial_connect_string.h"
-#include "src/core/ext/client_config/subchannel_index.h"
+#include "src/core/ext/client_channel/client_channel.h"
+#include "src/core/ext/client_channel/initial_connect_string.h"
+#include "src/core/ext/client_channel/subchannel_index.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
#include "src/core/lib/iomgr/timer.h"
@@ -95,8 +95,7 @@ struct grpc_subchannel {
/** channel arguments */
grpc_channel_args *args;
/** address to connect to */
- struct sockaddr *addr;
- size_t addr_len;
+ grpc_resolved_address *addr;
grpc_subchannel_key *key;
@@ -184,9 +183,10 @@ static void connection_destroy(grpc_exec_ctx *exec_ctx, void *arg,
gpr_free(c);
}
-void grpc_connected_subchannel_ref(
+grpc_connected_subchannel *grpc_connected_subchannel_ref(
grpc_connected_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
GRPC_CHANNEL_STACK_REF(CHANNEL_STACK_FROM_CONNECTION(c), REF_REASON);
+ return c;
}
void grpc_connected_subchannel_unref(grpc_exec_ctx *exec_ctx,
@@ -220,8 +220,8 @@ static gpr_atm ref_mutate(grpc_subchannel *c, gpr_atm delta,
: gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta);
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "SUBCHANNEL: %p %12s 0x%08d -> 0x%08d [%s]", c, purpose, (int)old_val,
- (int)(old_val + delta), reason);
+ "SUBCHANNEL: %p %s 0x%08" PRIxPTR " -> 0x%08" PRIxPTR " [%s]", c,
+ purpose, old_val, old_val + delta, reason);
#endif
return old_val;
}
@@ -298,7 +298,7 @@ void grpc_subchannel_weak_unref(grpc_exec_ctx *exec_ctx,
grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
grpc_connector *connector,
- grpc_subchannel_args *args) {
+ const grpc_subchannel_args *args) {
grpc_subchannel_key *key = grpc_subchannel_key_create(connector, args);
grpc_subchannel *c = grpc_subchannel_index_find(exec_ctx, key);
if (c) {
@@ -320,12 +320,11 @@ grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
} else {
c->filters = NULL;
}
- c->addr = gpr_malloc(args->addr_len);
- if (args->addr_len) memcpy(c->addr, args->addr, args->addr_len);
+ c->addr = gpr_malloc(sizeof(grpc_resolved_address));
+ if (args->addr->len)
+ memcpy(c->addr, args->addr, sizeof(grpc_resolved_address));
c->pollset_set = grpc_pollset_set_create();
- c->addr_len = args->addr_len;
- grpc_set_initial_connect_string(&c->addr, &c->addr_len,
- &c->initial_connect_string);
+ grpc_set_initial_connect_string(&c->addr, &c->initial_connect_string);
c->args = grpc_channel_args_copy(args->args);
c->root_external_state_watcher.next = c->root_external_state_watcher.prev =
&c->root_external_state_watcher;
@@ -376,7 +375,6 @@ static void continue_connect(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
args.interested_parties = c->pollset_set;
args.addr = c->addr;
- args.addr_len = c->addr_len;
args.deadline = c->next_attempt;
args.channel_args = c->args;
args.initial_connect_string = c->initial_connect_string;
@@ -704,7 +702,7 @@ grpc_connected_subchannel *grpc_subchannel_get_connected_subchannel(
grpc_error *grpc_connected_subchannel_create_call(
grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *con,
- grpc_polling_entity *pollent, gpr_timespec deadline,
+ grpc_polling_entity *pollent, grpc_mdstr *path, gpr_timespec deadline,
grpc_subchannel_call **call) {
grpc_channel_stack *chanstk = CHANNEL_STACK_FROM_CONNECTION(con);
*call = gpr_malloc(sizeof(grpc_subchannel_call) + chanstk->call_stack_size);
@@ -712,7 +710,7 @@ grpc_error *grpc_connected_subchannel_create_call(
(*call)->connection = con; // Ref is added below.
grpc_error *error =
grpc_call_stack_init(exec_ctx, chanstk, 1, subchannel_call_destroy, *call,
- NULL, NULL, deadline, callstk);
+ NULL, NULL, path, deadline, callstk);
if (error != GRPC_ERROR_NONE) {
const char *error_string = grpc_error_string(error);
gpr_log(GPR_ERROR, "error: %s", error_string);
diff --git a/src/core/ext/client_config/subchannel.h b/src/core/ext/client_channel/subchannel.h
index 3330621071..93bd72d20d 100644
--- a/src/core/ext/client_config/subchannel.h
+++ b/src/core/ext/client_channel/subchannel.h
@@ -31,13 +31,14 @@
*
*/
-#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_SUBCHANNEL_H
-#define GRPC_CORE_EXT_CLIENT_CONFIG_SUBCHANNEL_H
+#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_SUBCHANNEL_H
+#define GRPC_CORE_EXT_CLIENT_CHANNEL_SUBCHANNEL_H
-#include "src/core/ext/client_config/connector.h"
+#include "src/core/ext/client_channel/connector.h"
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/transport/connectivity_state.h"
+#include "src/core/lib/transport/metadata.h"
/** A (sub-)channel that knows how to connect to exactly one target
address. Provides a target for load balancing. */
@@ -96,7 +97,7 @@ grpc_subchannel *grpc_subchannel_weak_ref(
void grpc_subchannel_weak_unref(grpc_exec_ctx *exec_ctx,
grpc_subchannel *channel
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-void grpc_connected_subchannel_ref(
+grpc_connected_subchannel *grpc_connected_subchannel_ref(
grpc_connected_subchannel *channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_connected_subchannel_unref(grpc_exec_ctx *exec_ctx,
grpc_connected_subchannel *channel
@@ -110,7 +111,7 @@ void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx,
/** construct a subchannel call */
grpc_error *grpc_connected_subchannel_create_call(
grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *connected_subchannel,
- grpc_polling_entity *pollent, gpr_timespec deadline,
+ grpc_polling_entity *pollent, grpc_mdstr *path, gpr_timespec deadline,
grpc_subchannel_call **subchannel_call);
/** process a transport level op */
@@ -166,13 +167,12 @@ struct grpc_subchannel_args {
/** Server name */
const char *server_name;
/** Address to connect to */
- struct sockaddr *addr;
- size_t addr_len;
+ grpc_resolved_address *addr;
};
/** create a subchannel given a connector */
grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
grpc_connector *connector,
- grpc_subchannel_args *args);
+ const grpc_subchannel_args *args);
-#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_SUBCHANNEL_H */
+#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_SUBCHANNEL_H */
diff --git a/src/core/ext/client_config/subchannel_index.c b/src/core/ext/client_channel/subchannel_index.c
index 673f85b8cb..227013a7d7 100644
--- a/src/core/ext/client_config/subchannel_index.c
+++ b/src/core/ext/client_channel/subchannel_index.c
@@ -31,7 +31,7 @@
//
//
-#include "src/core/ext/client_config/subchannel_index.h"
+#include "src/core/ext/client_channel/subchannel_index.h"
#include <stdbool.h>
#include <string.h>
@@ -73,7 +73,7 @@ static grpc_exec_ctx *current_ctx() {
}
static grpc_subchannel_key *create_key(
- grpc_connector *connector, grpc_subchannel_args *args,
+ grpc_connector *connector, const grpc_subchannel_args *args,
grpc_channel_args *(*copy_channel_args)(const grpc_channel_args *args)) {
grpc_subchannel_key *k = gpr_malloc(sizeof(*k));
k->connector = grpc_connector_ref(connector);
@@ -87,17 +87,17 @@ static grpc_subchannel_key *create_key(
k->args.filters = NULL;
}
k->args.server_name = gpr_strdup(args->server_name);
- k->args.addr_len = args->addr_len;
- k->args.addr = gpr_malloc(args->addr_len);
- if (k->args.addr_len > 0) {
- memcpy(k->args.addr, args->addr, k->args.addr_len);
+ k->args.addr = gpr_malloc(sizeof(grpc_resolved_address));
+ k->args.addr->len = args->addr->len;
+ if (k->args.addr->len > 0) {
+ memcpy(k->args.addr, args->addr, sizeof(grpc_resolved_address));
}
k->args.args = copy_channel_args(args->args);
return k;
}
-grpc_subchannel_key *grpc_subchannel_key_create(grpc_connector *connector,
- grpc_subchannel_args *args) {
+grpc_subchannel_key *grpc_subchannel_key_create(
+ grpc_connector *connector, const grpc_subchannel_args *args) {
return create_key(connector, args, grpc_channel_args_normalize);
}
@@ -109,14 +109,14 @@ static int subchannel_key_compare(grpc_subchannel_key *a,
grpc_subchannel_key *b) {
int c = GPR_ICMP(a->connector, b->connector);
if (c != 0) return c;
- c = GPR_ICMP(a->args.addr_len, b->args.addr_len);
+ c = GPR_ICMP(a->args.addr->len, b->args.addr->len);
if (c != 0) return c;
c = GPR_ICMP(a->args.filter_count, b->args.filter_count);
if (c != 0) return c;
c = strcmp(a->args.server_name, b->args.server_name);
if (c != 0) return c;
- if (a->args.addr_len) {
- c = memcmp(a->args.addr, b->args.addr, a->args.addr_len);
+ if (a->args.addr->len) {
+ c = memcmp(a->args.addr->addr, b->args.addr->addr, a->args.addr->len);
if (c != 0) return c;
}
if (a->args.filter_count > 0) {
diff --git a/src/core/ext/client_config/subchannel_index.h b/src/core/ext/client_channel/subchannel_index.h
index 6b8d063855..a67bd5e219 100644
--- a/src/core/ext/client_config/subchannel_index.h
+++ b/src/core/ext/client_channel/subchannel_index.h
@@ -31,11 +31,11 @@
*
*/
-#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_SUBCHANNEL_INDEX_H
-#define GRPC_CORE_EXT_CLIENT_CONFIG_SUBCHANNEL_INDEX_H
+#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_SUBCHANNEL_INDEX_H
+#define GRPC_CORE_EXT_CLIENT_CHANNEL_SUBCHANNEL_INDEX_H
-#include "src/core/ext/client_config/connector.h"
-#include "src/core/ext/client_config/subchannel.h"
+#include "src/core/ext/client_channel/connector.h"
+#include "src/core/ext/client_channel/subchannel.h"
/** \file Provides an index of active subchannels so that they can be
shared amongst channels */
@@ -43,8 +43,8 @@
typedef struct grpc_subchannel_key grpc_subchannel_key;
/** Create a key that can be used to uniquely identify a subchannel */
-grpc_subchannel_key *grpc_subchannel_key_create(grpc_connector *con,
- grpc_subchannel_args *args);
+grpc_subchannel_key *grpc_subchannel_key_create(
+ grpc_connector *con, const grpc_subchannel_args *args);
/** Destroy a subchannel key */
void grpc_subchannel_key_destroy(grpc_exec_ctx *exec_ctx,
@@ -74,4 +74,4 @@ void grpc_subchannel_index_init(void);
/** Shutdown the subchannel index (global) */
void grpc_subchannel_index_shutdown(void);
-#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_SUBCHANNEL_INDEX_H */
+#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_SUBCHANNEL_INDEX_H */
diff --git a/src/core/ext/client_config/uri_parser.c b/src/core/ext/client_channel/uri_parser.c
index 3ca1a58e69..bcb6a1dee4 100644
--- a/src/core/ext/client_config/uri_parser.c
+++ b/src/core/ext/client_channel/uri_parser.c
@@ -31,7 +31,7 @@
*
*/
-#include "src/core/ext/client_config/uri_parser.h"
+#include "src/core/ext/client_channel/uri_parser.h"
#include <string.h>
diff --git a/src/core/ext/client_config/uri_parser.h b/src/core/ext/client_channel/uri_parser.h
index 875a7cb07c..5fe0e8f35e 100644
--- a/src/core/ext/client_config/uri_parser.h
+++ b/src/core/ext/client_channel/uri_parser.h
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_URI_PARSER_H
-#define GRPC_CORE_EXT_CLIENT_CONFIG_URI_PARSER_H
+#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_URI_PARSER_H
+#define GRPC_CORE_EXT_CLIENT_CHANNEL_URI_PARSER_H
#include <stddef.h>
@@ -60,4 +60,4 @@ const char *grpc_uri_get_query_arg(const grpc_uri *uri, const char *key);
/** destroy a uri */
void grpc_uri_destroy(grpc_uri *uri);
-#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_URI_PARSER_H */
+#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_URI_PARSER_H */
diff --git a/src/core/ext/client_config/resolver_result.c b/src/core/ext/client_config/resolver_result.c
deleted file mode 100644
index 63480d152b..0000000000
--- a/src/core/ext/client_config/resolver_result.c
+++ /dev/null
@@ -1,94 +0,0 @@
-//
-// Copyright 2015, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-
-#include "src/core/ext/client_config/resolver_result.h"
-
-#include <string.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/string_util.h>
-
-#include "src/core/lib/channel/channel_args.h"
-
-struct grpc_resolver_result {
- gpr_refcount refs;
- char* server_name;
- grpc_lb_addresses* addresses;
- char* lb_policy_name;
- grpc_channel_args* lb_policy_args;
-};
-
-grpc_resolver_result* grpc_resolver_result_create(
- const char* server_name, grpc_lb_addresses* addresses,
- const char* lb_policy_name, grpc_channel_args* lb_policy_args) {
- grpc_resolver_result* result = gpr_malloc(sizeof(*result));
- memset(result, 0, sizeof(*result));
- gpr_ref_init(&result->refs, 1);
- result->server_name = gpr_strdup(server_name);
- result->addresses = addresses;
- result->lb_policy_name = gpr_strdup(lb_policy_name);
- result->lb_policy_args = lb_policy_args;
- return result;
-}
-
-void grpc_resolver_result_ref(grpc_resolver_result* result) {
- gpr_ref(&result->refs);
-}
-
-void grpc_resolver_result_unref(grpc_exec_ctx* exec_ctx,
- grpc_resolver_result* result) {
- if (gpr_unref(&result->refs)) {
- gpr_free(result->server_name);
- grpc_lb_addresses_destroy(result->addresses, NULL /* user_data_destroy */);
- gpr_free(result->lb_policy_name);
- grpc_channel_args_destroy(result->lb_policy_args);
- gpr_free(result);
- }
-}
-
-const char* grpc_resolver_result_get_server_name(grpc_resolver_result* result) {
- return result->server_name;
-}
-
-grpc_lb_addresses* grpc_resolver_result_get_addresses(
- grpc_resolver_result* result) {
- return result->addresses;
-}
-
-const char* grpc_resolver_result_get_lb_policy_name(
- grpc_resolver_result* result) {
- return result->lb_policy_name;
-}
-
-grpc_channel_args* grpc_resolver_result_get_lb_policy_args(
- grpc_resolver_result* result) {
- return result->lb_policy_args;
-}
diff --git a/src/core/ext/client_config/resolver_result.h b/src/core/ext/client_config/resolver_result.h
deleted file mode 100644
index 414c2e2482..0000000000
--- a/src/core/ext/client_config/resolver_result.h
+++ /dev/null
@@ -1,74 +0,0 @@
-//
-// Copyright 2015, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-
-#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_RESOLVER_RESULT_H
-#define GRPC_CORE_EXT_CLIENT_CONFIG_RESOLVER_RESULT_H
-
-#include "src/core/ext/client_config/lb_policy_factory.h"
-#include "src/core/lib/iomgr/resolve_address.h"
-
-// TODO(roth, ctiller): In the long term, we are considering replacing
-// the resolver_result data structure with grpc_channel_args. The idea is
-// that the resolver will return a set of channel args that contains the
-// information that is currently in the resolver_result struct. For
-// example, there will be specific args indicating the set of addresses
-// and the name of the LB policy to instantiate. Note that if we did
-// this, we would probably want to change the data structure of
-// grpc_channel_args such to a hash table or AVL or some other data
-// structure that does not require linear search to find keys.
-
-/// Results reported from a grpc_resolver.
-typedef struct grpc_resolver_result grpc_resolver_result;
-
-/// Takes ownership of \a addresses and \a lb_policy_args.
-grpc_resolver_result* grpc_resolver_result_create(
- const char* server_name, grpc_lb_addresses* addresses,
- const char* lb_policy_name, grpc_channel_args* lb_policy_args);
-void grpc_resolver_result_ref(grpc_resolver_result* result);
-void grpc_resolver_result_unref(grpc_exec_ctx* exec_ctx,
- grpc_resolver_result* result);
-
-/// Caller does NOT take ownership of result.
-const char* grpc_resolver_result_get_server_name(grpc_resolver_result* result);
-
-/// Caller does NOT take ownership of result.
-grpc_lb_addresses* grpc_resolver_result_get_addresses(
- grpc_resolver_result* result);
-
-/// Caller does NOT take ownership of result.
-const char* grpc_resolver_result_get_lb_policy_name(
- grpc_resolver_result* result);
-
-/// Caller does NOT take ownership of result.
-grpc_channel_args* grpc_resolver_result_get_lb_policy_args(
- grpc_resolver_result* result);
-
-#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_RESOLVER_RESULT_H */
diff --git a/src/core/ext/lb_policy/grpclb/grpclb.c b/src/core/ext/lb_policy/grpclb/grpclb.c
index ae1f2a3b4c..734108a9db 100644
--- a/src/core/ext/lb_policy/grpclb/grpclb.c
+++ b/src/core/ext/lb_policy/grpclb/grpclb.c
@@ -43,34 +43,27 @@
* policy to select from this list of LB server backends.
*
* The first time the policy gets a request for a pick, a ping, or to exit the
- * idle state, \a query_for_backends() is called. It creates an instance of \a
- * lb_client_data, an internal struct meant to contain the data associated with
- * the internal communication with the LB server. This instance is created via
- * \a lb_client_data_create(). There, the call over lb_channel to pick-first
- * from {a1..an} is created, the \a LoadBalancingRequest message is assembled
- * and all necessary callbacks for the progress of the internal call configured.
+ * idle state, \a query_for_backends_locked() is called. This function sets up
+ * and initiates the internal communication with the LB server. In particular,
+ * it's responsible for instantiating the internal *streaming* call to the LB
+ * server (whichever address from {a1..an} pick-first chose). This call is
+ * serviced by two callbacks, \a lb_on_server_status_received and \a
+ * lb_on_response_received. The former will be called when the call to the LB
+ * server completes. This can happen if the LB server closes the connection or
+ * if this policy itself cancels the call (for example because it's shutting
+ * down). If the internal call times out, the usual behavior of pick-first
+ * applies, continuing to pick from the list {a1..an}.
*
- * Back in \a query_for_backends(), the internal *streaming* call to the LB
- * server (whichever address from {a1..an} pick-first chose) is kicked off.
- * It'll progress over the callbacks configured in \a lb_client_data_create()
- * (see the field docstrings of \a lb_client_data for more details).
- *
- * If the call fails with UNIMPLEMENTED, the original call will also fail.
- * There's a misconfiguration somewhere: at least one of {a1..an} isn't a LB
- * server, which contradicts the LB bit being set. If the internal call times
- * out, the usual behavior of pick-first applies, continuing to pick from the
- * list {a1..an}.
- *
- * Upon sucesss, a \a LoadBalancingResponse is expected in \a res_recv_cb. An
- * invalid one results in the termination of the streaming call. A new streaming
- * call should be created if possible, failing the original call otherwise.
- * For a valid \a LoadBalancingResponse, the server list of actual backends is
- * extracted. A Round Robin policy will be created from this list. There are two
- * possible scenarios:
+ * Upon sucesss, the incoming \a LoadBalancingResponse is processed by \a
+ * res_recv. An invalid one results in the termination of the streaming call. A
+ * new streaming call should be created if possible, failing the original call
+ * otherwise. For a valid \a LoadBalancingResponse, the server list of actual
+ * backends is extracted. A Round Robin policy will be created from this list.
+ * There are two possible scenarios:
*
* 1. This is the first server list received. There was no previous instance of
- * the Round Robin policy. \a rr_handover() will instantiate the RR policy
- * and perform all the pending operations over it.
+ * the Round Robin policy. \a rr_handover_locked() will instantiate the RR
+ * policy and perform all the pending operations over it.
* 2. There's already a RR policy instance active. We need to introduce the new
* one build from the new serverlist, but taking care not to disrupt the
* operations in progress over the old RR instance. This is done by
@@ -78,16 +71,16 @@
* references are held on the old RR policy, it'll be destroyed and \a
* glb_rr_connectivity_changed notified with a \a GRPC_CHANNEL_SHUTDOWN
* state. At this point we can transition to a new RR instance safely, which
- * is done once again via \a rr_handover().
+ * is done once again via \a rr_handover_locked().
*
*
* Once a RR policy instance is in place (and getting updated as described),
* calls to for a pick, a ping or a cancellation will be serviced right away by
* forwarding them to the RR instance. Any time there's no RR policy available
- * (ie, right after the creation of the gRPCLB policy, if an empty serverlist
- * is received, etc), pick/ping requests are added to a list of pending
- * picks/pings to be flushed and serviced as part of \a rr_handover() the moment
- * the RR policy instance becomes available.
+ * (ie, right after the creation of the gRPCLB policy, if an empty serverlist is
+ * received, etc), pick/ping requests are added to a list of pending picks/pings
+ * to be flushed and serviced as part of \a rr_handover_locked() the moment the
+ * RR policy instance becomes available.
*
* \see https://github.com/grpc/grpc/blob/master/doc/load-balancing.md for the
* high level design and details. */
@@ -96,6 +89,12 @@
* - Implement LB service forwarding (point 2c. in the doc's diagram).
*/
+/* With the addition of a libuv endpoint, sockaddr.h now includes uv.h when
+ using that endpoint. Because of various transitive includes in uv.h,
+ including windows.h on Windows, uv.h must be included before other system
+ headers. Therefore, sockaddr.h must always be included first */
+#include "src/core/lib/iomgr/sockaddr.h"
+
#include <errno.h>
#include <string.h>
@@ -107,19 +106,27 @@
#include <grpc/support/string_util.h>
#include <grpc/support/time.h>
-#include "src/core/ext/client_config/client_channel_factory.h"
-#include "src/core/ext/client_config/lb_policy_factory.h"
-#include "src/core/ext/client_config/lb_policy_registry.h"
-#include "src/core/ext/client_config/parse_address.h"
+#include "src/core/ext/client_channel/client_channel_factory.h"
+#include "src/core/ext/client_channel/lb_policy_factory.h"
+#include "src/core/ext/client_channel/lb_policy_registry.h"
+#include "src/core/ext/client_channel/parse_address.h"
#include "src/core/ext/lb_policy/grpclb/grpclb.h"
#include "src/core/ext/lb_policy/grpclb/load_balancer_api.h"
+#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
+#include "src/core/lib/iomgr/timer.h"
+#include "src/core/lib/support/backoff.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/transport/static_metadata.h"
+#define BACKOFF_MULTIPLIER 1.6
+#define BACKOFF_JITTER 0.2
+#define BACKOFF_MIN_SECONDS 10
+#define BACKOFF_MAX_SECONDS 60
+
int grpc_lb_glb_trace = 0;
/* add lb_token of selected subchannel (address) to the call's initial
@@ -134,6 +141,9 @@ static void initial_metadata_add_lb_token(
}
typedef struct wrapped_rr_closure_arg {
+ /* the closure instance using this struct as argument */
+ grpc_closure wrapper_closure;
+
/* the original closure. Usually a on_complete/notify cb for pick() and ping()
* calls against the internal RR instance, respectively. */
grpc_closure *wrapped_closure;
@@ -155,9 +165,8 @@ typedef struct wrapped_rr_closure_arg {
/* The RR instance related to the closure */
grpc_lb_policy *rr_policy;
- /* when not NULL, represents a pending_{pick,ping} node to be freed upon
- * closure execution */
- void *owning_pending_node; /* to be freed if not NULL */
+ /* heap memory to be freed upon closure execution. */
+ void *free_when_done;
} wrapped_rr_closure_arg;
/* The \a on_complete closure passed as part of the pick requires keeping a
@@ -166,13 +175,12 @@ typedef struct wrapped_rr_closure_arg {
static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
wrapped_rr_closure_arg *wc_arg = arg;
- if (wc_arg->rr_policy != NULL) {
- if (grpc_lb_glb_trace) {
- gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")",
- (intptr_t)wc_arg->rr_policy);
- }
- GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure");
+ GPR_ASSERT(wc_arg->wrapped_closure != NULL);
+ grpc_exec_ctx_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error),
+ NULL);
+
+ if (wc_arg->rr_policy != NULL) {
/* if target is NULL, no pick has been made by the RR policy (eg, all
* addresses failed to connect). There won't be any user_data/token
* available */
@@ -181,12 +189,14 @@ static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
wc_arg->lb_token_mdelem_storage,
GRPC_MDELEM_REF(wc_arg->lb_token));
}
+ if (grpc_lb_glb_trace) {
+ gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")",
+ (intptr_t)wc_arg->rr_policy);
+ }
+ GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure");
}
- GPR_ASSERT(wc_arg->wrapped_closure != NULL);
-
- grpc_exec_ctx_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error),
- NULL);
- gpr_free(wc_arg->owning_pending_node);
+ GPR_ASSERT(wc_arg->free_when_done != NULL);
+ gpr_free(wc_arg->free_when_done);
}
/* Linked list of pending pick requests. It stores all information needed to
@@ -207,10 +217,6 @@ typedef struct pending_pick {
* upon error. */
grpc_connected_subchannel **target;
- /* a closure wrapping the original on_complete one to be invoked once the
- * pick() has completed (regardless of success) */
- grpc_closure wrapped_on_complete;
-
/* args for wrapped_on_complete */
wrapped_rr_closure_arg wrapped_on_complete_arg;
} pending_pick;
@@ -230,8 +236,9 @@ static void add_pending_pick(pending_pick **root,
pp->wrapped_on_complete_arg.initial_metadata = pick_args->initial_metadata;
pp->wrapped_on_complete_arg.lb_token_mdelem_storage =
pick_args->lb_token_mdelem_storage;
- grpc_closure_init(&pp->wrapped_on_complete, wrapped_rr_closure,
- &pp->wrapped_on_complete_arg);
+ pp->wrapped_on_complete_arg.free_when_done = pp;
+ grpc_closure_init(&pp->wrapped_on_complete_arg.wrapper_closure,
+ wrapped_rr_closure, &pp->wrapped_on_complete_arg);
*root = pp;
}
@@ -239,10 +246,6 @@ static void add_pending_pick(pending_pick **root,
typedef struct pending_ping {
struct pending_ping *next;
- /* a closure wrapping the original on_complete one to be invoked once the
- * ping() has completed (regardless of success) */
- grpc_closure wrapped_notify;
-
/* args for wrapped_notify */
wrapped_rr_closure_arg wrapped_notify_arg;
} pending_ping;
@@ -251,10 +254,11 @@ static void add_pending_ping(pending_ping **root, grpc_closure *notify) {
pending_ping *pping = gpr_malloc(sizeof(*pping));
memset(pping, 0, sizeof(pending_ping));
memset(&pping->wrapped_notify_arg, 0, sizeof(wrapped_rr_closure_arg));
- pping->next = *root;
- grpc_closure_init(&pping->wrapped_notify, wrapped_rr_closure,
- &pping->wrapped_notify_arg);
pping->wrapped_notify_arg.wrapped_closure = notify;
+ pping->wrapped_notify_arg.free_when_done = pping;
+ pping->next = *root;
+ grpc_closure_init(&pping->wrapped_notify_arg.wrapper_closure,
+ wrapped_rr_closure, &pping->wrapped_notify_arg);
*root = pping;
}
@@ -262,7 +266,6 @@ static void add_pending_ping(pending_ping **root, grpc_closure *notify) {
* glb_lb_policy
*/
typedef struct rr_connectivity_data rr_connectivity_data;
-struct lb_client_data;
static const grpc_lb_policy_vtable glb_lb_policy_vtable;
typedef struct glb_lb_policy {
/** base policy: must be first */
@@ -274,6 +277,7 @@ typedef struct glb_lb_policy {
/** who the client is trying to communicate with */
const char *server_name;
grpc_client_channel_factory *cc_factory;
+ grpc_channel_args *args;
/** deadline for the LB's call */
gpr_timespec deadline;
@@ -293,27 +297,47 @@ typedef struct glb_lb_policy {
* response has arrived. */
grpc_grpclb_serverlist *serverlist;
- /** addresses from \a serverlist */
- grpc_lb_addresses *addresses;
-
/** list of picks that are waiting on RR's policy connectivity */
pending_pick *pending_picks;
/** list of pings that are waiting on RR's policy connectivity */
pending_ping *pending_pings;
- /** client data associated with the LB server communication */
- struct lb_client_data *lb_client;
+ bool shutting_down;
+
+ /************************************************************/
+ /* client data associated with the LB server communication */
+ /************************************************************/
+ /* Status from the LB server has been received. This signals the end of the LB
+ * call. */
+ grpc_closure lb_on_server_status_received;
+
+ /* A response from the LB server has been received. Process it */
+ grpc_closure lb_on_response_received;
+
+ grpc_call *lb_call; /* streaming call to the LB server, */
+
+ grpc_metadata_array lb_initial_metadata_recv; /* initial MD from LB server */
+ grpc_metadata_array
+ lb_trailing_metadata_recv; /* trailing MD from LB server */
+
+ /* what's being sent to the LB server. Note that its value may vary if the LB
+ * server indicates a redirect. */
+ grpc_byte_buffer *lb_request_payload;
+
+ /* response the LB server, if any. Processed in lb_on_response_received() */
+ grpc_byte_buffer *lb_response_payload;
- /** for tracking of the RR connectivity */
- rr_connectivity_data *rr_connectivity;
+ /* call status code and details, set in lb_on_server_status_received() */
+ grpc_status_code lb_call_status;
+ char *lb_call_status_details;
+ size_t lb_call_status_details_capacity;
- /* a wrapped (see \a wrapped_rr_closure) on-complete closure for readily
- * available RR picks */
- grpc_closure wrapped_on_complete;
+ /** LB call retry backoff state */
+ gpr_backoff lb_call_backoff_state;
- /* arguments for the wrapped_on_complete closure */
- wrapped_rr_closure_arg wc_arg;
+ /** LB call retry timer */
+ grpc_timer lb_call_retry_timer;
} glb_lb_policy;
/* Keeps track and reacts to changes in connectivity of the RR instance */
@@ -329,8 +353,8 @@ static bool is_server_valid(const grpc_grpclb_server *server, size_t idx,
if (server->port >> 16 != 0) {
if (log) {
gpr_log(GPR_ERROR,
- "Invalid port '%d' at index %zu of serverlist. Ignoring.",
- server->port, idx);
+ "Invalid port '%d' at index %lu of serverlist. Ignoring.",
+ server->port, (unsigned long)idx);
}
return false;
}
@@ -338,15 +362,52 @@ static bool is_server_valid(const grpc_grpclb_server *server, size_t idx,
if (ip->size != 4 && ip->size != 16) {
if (log) {
gpr_log(GPR_ERROR,
- "Expected IP to be 4 or 16 bytes, got %d at index %zu of "
+ "Expected IP to be 4 or 16 bytes, got %d at index %lu of "
"serverlist. Ignoring",
- ip->size, idx);
+ ip->size, (unsigned long)idx);
}
return false;
}
return true;
}
+/* vtable for LB tokens in grpc_lb_addresses. */
+static void *lb_token_copy(void *token) {
+ return token == NULL ? NULL : GRPC_MDELEM_REF(token);
+}
+static void lb_token_destroy(void *token) {
+ if (token != NULL) GRPC_MDELEM_UNREF(token);
+}
+static int lb_token_cmp(void *token1, void *token2) {
+ if (token1 > token2) return 1;
+ if (token1 < token2) return -1;
+ return 0;
+}
+static const grpc_lb_user_data_vtable lb_token_vtable = {
+ lb_token_copy, lb_token_destroy, lb_token_cmp};
+
+static void parse_server(const grpc_grpclb_server *server,
+ grpc_resolved_address *addr) {
+ const uint16_t netorder_port = htons((uint16_t)server->port);
+ /* the addresses are given in binary format (a in(6)_addr struct) in
+ * server->ip_address.bytes. */
+ const grpc_grpclb_ip_address *ip = &server->ip_address;
+ memset(addr, 0, sizeof(*addr));
+ if (ip->size == 4) {
+ addr->len = sizeof(struct sockaddr_in);
+ struct sockaddr_in *addr4 = (struct sockaddr_in *)&addr->addr;
+ addr4->sin_family = AF_INET;
+ memcpy(&addr4->sin_addr, ip->bytes, ip->size);
+ addr4->sin_port = netorder_port;
+ } else if (ip->size == 16) {
+ addr->len = sizeof(struct sockaddr_in6);
+ struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&addr->addr;
+ addr6->sin6_family = AF_INET;
+ memcpy(&addr6->sin6_addr, ip->bytes, ip->size);
+ addr6->sin6_port = netorder_port;
+ }
+}
+
/* Returns addresses extracted from \a serverlist. */
static grpc_lb_addresses *process_serverlist(
const grpc_grpclb_serverlist *serverlist) {
@@ -358,7 +419,8 @@ static grpc_lb_addresses *process_serverlist(
}
if (num_valid == 0) return NULL;
- grpc_lb_addresses *lb_addresses = grpc_lb_addresses_create(num_valid);
+ grpc_lb_addresses *lb_addresses =
+ grpc_lb_addresses_create(num_valid, &lb_token_vtable);
/* second pass: actually populate the addresses and LB tokens (aka user data
* to the outside world) to be read by the RR policy during its creation.
@@ -372,41 +434,26 @@ static grpc_lb_addresses *process_serverlist(
if (!is_server_valid(serverlist->servers[sl_idx], sl_idx, false)) continue;
/* address processing */
- const uint16_t netorder_port = htons((uint16_t)server->port);
- /* the addresses are given in binary format (a in(6)_addr struct) in
- * server->ip_address.bytes. */
- const grpc_grpclb_ip_address *ip = &server->ip_address;
grpc_resolved_address addr;
- memset(&addr, 0, sizeof(addr));
- if (ip->size == 4) {
- addr.len = sizeof(struct sockaddr_in);
- struct sockaddr_in *addr4 = (struct sockaddr_in *)&addr.addr;
- addr4->sin_family = AF_INET;
- memcpy(&addr4->sin_addr, ip->bytes, ip->size);
- addr4->sin_port = netorder_port;
- } else if (ip->size == 16) {
- addr.len = sizeof(struct sockaddr_in6);
- struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&addr.addr;
- addr6->sin6_family = AF_INET;
- memcpy(&addr6->sin6_addr, ip->bytes, ip->size);
- addr6->sin6_port = netorder_port;
- }
+ parse_server(server, &addr);
/* lb token processing */
void *user_data;
if (server->has_load_balance_token) {
- const size_t lb_token_size =
- GPR_ARRAY_SIZE(server->load_balance_token) - 1;
+ const size_t lb_token_max_length =
+ GPR_ARRAY_SIZE(server->load_balance_token);
+ const size_t lb_token_length =
+ strnlen(server->load_balance_token, lb_token_max_length);
grpc_mdstr *lb_token_mdstr = grpc_mdstr_from_buffer(
- (uint8_t *)server->load_balance_token, lb_token_size);
- user_data = grpc_mdelem_from_metadata_strings(
- GRPC_MDSTR_LOAD_REPORTING_INITIAL, lb_token_mdstr);
+ (uint8_t *)server->load_balance_token, lb_token_length);
+ user_data = grpc_mdelem_from_metadata_strings(GRPC_MDSTR_LB_TOKEN,
+ lb_token_mdstr);
} else {
gpr_log(GPR_ERROR,
"Missing LB token for backend address '%s'. The empty token will "
"be used instead",
- grpc_sockaddr_to_uri((struct sockaddr *)&addr.addr));
- user_data = GRPC_MDELEM_LOAD_REPORTING_INITIAL_EMPTY;
+ grpc_sockaddr_to_uri(&addr));
+ user_data = GRPC_MDELEM_LB_TOKEN_EMPTY;
}
grpc_lb_addresses_set_address(lb_addresses, addr_idx, &addr.addr, addr.len,
@@ -415,57 +462,111 @@ static grpc_lb_addresses *process_serverlist(
++addr_idx;
}
GPR_ASSERT(addr_idx == num_valid);
-
return lb_addresses;
}
-/* A plugin for grpc_lb_addresses_destroy that unrefs the LB token metadata. */
-static void lb_token_destroy(void *token) {
- if (token != NULL) GRPC_MDELEM_UNREF(token);
+/* perform a pick over \a rr_policy. Given that a pick can return immediately
+ * (ignoring its completion callback) we need to perform the cleanups this
+ * callback would be otherwise resposible for */
+static bool pick_from_internal_rr_locked(
+ grpc_exec_ctx *exec_ctx, grpc_lb_policy *rr_policy,
+ const grpc_lb_policy_pick_args *pick_args,
+ grpc_connected_subchannel **target, wrapped_rr_closure_arg *wc_arg) {
+ GPR_ASSERT(rr_policy != NULL);
+ const bool pick_done =
+ grpc_lb_policy_pick(exec_ctx, rr_policy, pick_args, target,
+ (void **)&wc_arg->lb_token, &wc_arg->wrapper_closure);
+ if (pick_done) {
+ /* synchronous grpc_lb_policy_pick call. Unref the RR policy. */
+ if (grpc_lb_glb_trace) {
+ gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")",
+ (intptr_t)wc_arg->rr_policy);
+ }
+ GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
+
+ /* add the load reporting initial metadata */
+ initial_metadata_add_lb_token(pick_args->initial_metadata,
+ pick_args->lb_token_mdelem_storage,
+ GRPC_MDELEM_REF(wc_arg->lb_token));
+
+ gpr_free(wc_arg);
+ }
+ /* else, the pending pick will be registered and taken care of by the
+ * pending pick list inside the RR policy (glb_policy->rr_policy).
+ * Eventually, wrapped_on_complete will be called, which will -among other
+ * things- add the LB token to the call's initial metadata */
+ return pick_done;
}
-static grpc_lb_policy *create_rr(grpc_exec_ctx *exec_ctx,
- const grpc_grpclb_serverlist *serverlist,
- glb_lb_policy *glb_policy) {
+static grpc_lb_policy *create_rr_locked(
+ grpc_exec_ctx *exec_ctx, const grpc_grpclb_serverlist *serverlist,
+ glb_lb_policy *glb_policy) {
GPR_ASSERT(serverlist != NULL && serverlist->num_servers > 0);
grpc_lb_policy_args args;
memset(&args, 0, sizeof(args));
- args.server_name = glb_policy->server_name;
args.client_channel_factory = glb_policy->cc_factory;
- args.addresses = process_serverlist(serverlist);
-
- grpc_lb_policy *rr = grpc_lb_policy_create(exec_ctx, "round_robin", &args);
+ grpc_lb_addresses *addresses = process_serverlist(serverlist);
- if (glb_policy->addresses != NULL) {
- /* dispose of the previous version */
- grpc_lb_addresses_destroy(glb_policy->addresses, lb_token_destroy);
- }
- glb_policy->addresses = args.addresses;
+ // Replace the LB addresses in the channel args that we pass down to
+ // the subchannel.
+ static const char *keys_to_remove[] = {GRPC_ARG_LB_ADDRESSES};
+ const grpc_arg arg = grpc_lb_addresses_create_channel_arg(addresses);
+ args.args = grpc_channel_args_copy_and_add_and_remove(
+ glb_policy->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &arg,
+ 1);
+ grpc_lb_policy *rr = grpc_lb_policy_create(exec_ctx, "round_robin", &args);
+ GPR_ASSERT(rr != NULL);
+ grpc_lb_addresses_destroy(addresses);
+ grpc_channel_args_destroy(args.args);
return rr;
}
-static void rr_handover(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
- grpc_error *error) {
+static void glb_rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error);
+/* glb_policy->rr_policy may be NULL (initial handover) */
+static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
+ glb_lb_policy *glb_policy, grpc_error *error) {
GPR_ASSERT(glb_policy->serverlist != NULL &&
glb_policy->serverlist->num_servers > 0);
- glb_policy->rr_policy =
- create_rr(exec_ctx, glb_policy->serverlist, glb_policy);
if (grpc_lb_glb_trace) {
- gpr_log(GPR_INFO, "Created RR policy (0x%" PRIxPTR ")",
- (intptr_t)glb_policy->rr_policy);
+ gpr_log(GPR_INFO, "RR handover. Old RR: %p", (void *)glb_policy->rr_policy);
+ }
+ if (glb_policy->rr_policy != NULL) {
+ /* if we are phasing out an existing RR instance, unref it. */
+ GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, "rr_handover");
+ }
+
+ glb_policy->rr_policy =
+ create_rr_locked(exec_ctx, glb_policy->serverlist, glb_policy);
+ if (grpc_lb_glb_trace) {
+ gpr_log(GPR_INFO, "Created RR policy (%p)", (void *)glb_policy->rr_policy);
}
+
GPR_ASSERT(glb_policy->rr_policy != NULL);
- glb_policy->rr_connectivity->state = grpc_lb_policy_check_connectivity(
+ grpc_pollset_set_add_pollset_set(exec_ctx,
+ glb_policy->rr_policy->interested_parties,
+ glb_policy->base.interested_parties);
+
+ rr_connectivity_data *rr_connectivity =
+ gpr_malloc(sizeof(rr_connectivity_data));
+ memset(rr_connectivity, 0, sizeof(rr_connectivity_data));
+ grpc_closure_init(&rr_connectivity->on_change, glb_rr_connectivity_changed,
+ rr_connectivity);
+ rr_connectivity->glb_policy = glb_policy;
+ rr_connectivity->state = grpc_lb_policy_check_connectivity(
exec_ctx, glb_policy->rr_policy, &error);
- grpc_lb_policy_notify_on_state_change(
- exec_ctx, glb_policy->rr_policy, &glb_policy->rr_connectivity->state,
- &glb_policy->rr_connectivity->on_change);
+
grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker,
- glb_policy->rr_connectivity->state,
- GRPC_ERROR_REF(error), "rr_handover");
+ rr_connectivity->state, GRPC_ERROR_REF(error),
+ "rr_handover");
+ /* subscribe */
+ GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "rr_connectivity_cb");
+ grpc_lb_policy_notify_on_state_change(exec_ctx, glb_policy->rr_policy,
+ &rr_connectivity->state,
+ &rr_connectivity->on_change);
grpc_lb_policy_exit_idle(exec_ctx, glb_policy->rr_policy);
/* flush pending ops */
@@ -478,11 +579,9 @@ static void rr_handover(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
gpr_log(GPR_INFO, "Pending pick about to PICK from 0x%" PRIxPTR "",
(intptr_t)glb_policy->rr_policy);
}
- grpc_lb_policy_pick(exec_ctx, glb_policy->rr_policy, &pp->pick_args,
- pp->target,
- (void **)&pp->wrapped_on_complete_arg.lb_token,
- &pp->wrapped_on_complete);
- pp->wrapped_on_complete_arg.owning_pending_node = pp;
+ pick_from_internal_rr_locked(exec_ctx, glb_policy->rr_policy,
+ &pp->pick_args, pp->target,
+ &pp->wrapped_on_complete_arg);
}
pending_ping *pping;
@@ -495,44 +594,45 @@ static void rr_handover(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
(intptr_t)glb_policy->rr_policy);
}
grpc_lb_policy_ping_one(exec_ctx, glb_policy->rr_policy,
- &pping->wrapped_notify);
- pping->wrapped_notify_arg.owning_pending_node = pping;
+ &pping->wrapped_notify_arg.wrapper_closure);
}
}
static void glb_rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
+ /* If shutdown or error free the arg. Rely on the rest of the code to set the
+ * right grpclb status. */
rr_connectivity_data *rr_conn_data = arg;
glb_lb_policy *glb_policy = rr_conn_data->glb_policy;
- if (rr_conn_data->state == GRPC_CHANNEL_SHUTDOWN) {
- if (glb_policy->serverlist != NULL) {
- /* a RR policy is shutting down but there's a serverlist available ->
- * perform a handover */
- rr_handover(exec_ctx, glb_policy, error);
- } else {
- /* shutting down and no new serverlist available. Bail out. */
- gpr_free(rr_conn_data);
- }
+ if (rr_conn_data->state != GRPC_CHANNEL_SHUTDOWN &&
+ !glb_policy->shutting_down) {
+ gpr_mu_lock(&glb_policy->mu);
+ /* RR not shutting down. Mimic the RR's policy state */
+ grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker,
+ rr_conn_data->state, GRPC_ERROR_REF(error),
+ "rr_connectivity_cb");
+ /* resubscribe. Reuse the "rr_connectivity_cb" weak ref. */
+ grpc_lb_policy_notify_on_state_change(exec_ctx, glb_policy->rr_policy,
+ &rr_conn_data->state,
+ &rr_conn_data->on_change);
+ gpr_mu_unlock(&glb_policy->mu);
} else {
- if (error == GRPC_ERROR_NONE) {
- /* RR not shutting down. Mimic the RR's policy state */
- grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker,
- rr_conn_data->state, GRPC_ERROR_REF(error),
- "glb_rr_connectivity_changed");
- /* resubscribe */
- grpc_lb_policy_notify_on_state_change(exec_ctx, glb_policy->rr_policy,
- &rr_conn_data->state,
- &rr_conn_data->on_change);
- } else { /* error */
- gpr_free(rr_conn_data);
- }
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
+ "rr_connectivity_cb");
+ gpr_free(rr_conn_data);
}
}
static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_factory *factory,
grpc_lb_policy_args *args) {
+ /* Get server name. */
+ const grpc_arg *arg =
+ grpc_channel_args_find(args->args, GRPC_ARG_SERVER_NAME);
+ const char *server_name =
+ arg != NULL && arg->type == GRPC_ARG_STRING ? arg->value.string : NULL;
+
/* Count the number of gRPC-LB addresses. There must be at least one.
* TODO(roth): For now, we ignore non-balancer addresses, but in the
* future, we may change the behavior such that we fall back to using
@@ -540,23 +640,27 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
* time, this should be changed to allow a list with no balancer addresses,
* since the resolver might fail to return a balancer address even when
* this is the right LB policy to use. */
+ arg = grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
+ GPR_ASSERT(arg != NULL && arg->type == GRPC_ARG_POINTER);
+ grpc_lb_addresses *addresses = arg->value.pointer.p;
size_t num_grpclb_addrs = 0;
- for (size_t i = 0; i < args->addresses->num_addresses; ++i) {
- if (args->addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
+ for (size_t i = 0; i < addresses->num_addresses; ++i) {
+ if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
}
if (num_grpclb_addrs == 0) return NULL;
glb_lb_policy *glb_policy = gpr_malloc(sizeof(*glb_policy));
memset(glb_policy, 0, sizeof(*glb_policy));
- /* All input addresses in args->addresses come from a resolver that claims
+ /* All input addresses in addresses come from a resolver that claims
* they are LB services. It's the resolver's responsibility to make sure
* this
* policy is only instantiated and used in that case.
*
* Create a client channel over them to communicate with a LB service */
- glb_policy->server_name = gpr_strdup(args->server_name);
+ glb_policy->server_name = gpr_strdup(server_name);
glb_policy->cc_factory = args->client_channel_factory;
+ glb_policy->args = grpc_channel_args_copy(args->args);
GPR_ASSERT(glb_policy->cc_factory != NULL);
/* construct a target from the addresses in args, given in the form
@@ -564,22 +668,19 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
* TODO(dgq): support mixed ip version */
char **addr_strs = gpr_malloc(sizeof(char *) * num_grpclb_addrs);
size_t addr_index = 0;
- for (size_t i = 0; i < args->addresses->num_addresses; i++) {
- if (args->addresses->addresses[i].user_data != NULL) {
+ for (size_t i = 0; i < addresses->num_addresses; i++) {
+ if (addresses->addresses[i].user_data != NULL) {
gpr_log(GPR_ERROR,
"This LB policy doesn't support user data. It will be ignored");
}
- if (args->addresses->addresses[i].is_balancer) {
+ if (addresses->addresses[i].is_balancer) {
if (addr_index == 0) {
- addr_strs[addr_index++] = grpc_sockaddr_to_uri(
- (const struct sockaddr *)&args->addresses->addresses[i]
- .address.addr);
+ addr_strs[addr_index++] =
+ grpc_sockaddr_to_uri(&addresses->addresses[i].address);
} else {
- GPR_ASSERT(grpc_sockaddr_to_string(
- &addr_strs[addr_index++],
- (const struct sockaddr *)&args->addresses->addresses[i]
- .address.addr,
- true) > 0);
+ GPR_ASSERT(grpc_sockaddr_to_string(&addr_strs[addr_index++],
+ &addresses->addresses[i].address,
+ true) > 0);
}
}
}
@@ -587,10 +688,29 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
char *target_uri_str = gpr_strjoin_sep((const char **)addr_strs,
num_grpclb_addrs, ",", &uri_path_len);
- /* will pick using pick_first */
+ /* Create a channel to talk to the LBs.
+ *
+ * We strip out the channel arg for the LB policy name, since we want
+ * to use the default (pick_first) in this case.
+ *
+ * We also strip out the channel arg for the resolved addresses, since
+ * that will be generated by the name resolver used in the LB channel.
+ * Note that the LB channel will use the sockaddr resolver, so this
+ * won't actually generate a query to DNS (or some other name service).
+ * However, the addresses returned by the sockaddr resolver will have
+ * is_balancer=false, whereas our own addresses have is_balancer=true.
+ * We need the LB channel to return addresses with is_balancer=false
+ * so that it does not wind up recursively using the grpclb LB policy,
+ * as per the special case logic in client_channel.c.
+ */
+ static const char *keys_to_remove[] = {GRPC_ARG_LB_POLICY_NAME,
+ GRPC_ARG_LB_ADDRESSES};
+ grpc_channel_args *new_args = grpc_channel_args_copy_and_remove(
+ args->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove));
glb_policy->lb_channel = grpc_client_channel_factory_create_channel(
exec_ctx, glb_policy->cc_factory, target_uri_str,
- GRPC_CLIENT_CHANNEL_TYPE_LOAD_BALANCING, NULL);
+ GRPC_CLIENT_CHANNEL_TYPE_LOAD_BALANCING, new_args);
+ grpc_channel_args_destroy(new_args);
gpr_free(target_uri_str);
for (size_t i = 0; i < num_grpclb_addrs; i++) {
@@ -603,18 +723,11 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
return NULL;
}
- rr_connectivity_data *rr_connectivity =
- gpr_malloc(sizeof(rr_connectivity_data));
- memset(rr_connectivity, 0, sizeof(rr_connectivity_data));
- grpc_closure_init(&rr_connectivity->on_change, glb_rr_connectivity_changed,
- rr_connectivity);
- rr_connectivity->glb_policy = glb_policy;
- glb_policy->rr_connectivity = rr_connectivity;
-
grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable);
gpr_mu_init(&glb_policy->mu);
grpc_connectivity_state_init(&glb_policy->state_tracker, GRPC_CHANNEL_IDLE,
"grpclb");
+
return &glb_policy->base;
}
@@ -623,6 +736,7 @@ static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
GPR_ASSERT(glb_policy->pending_picks == NULL);
GPR_ASSERT(glb_policy->pending_pings == NULL);
gpr_free((void *)glb_policy->server_name);
+ grpc_channel_args_destroy(glb_policy->args);
grpc_channel_destroy(glb_policy->lb_channel);
glb_policy->lb_channel = NULL;
grpc_connectivity_state_destroy(exec_ctx, &glb_policy->state_tracker);
@@ -630,14 +744,13 @@ static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
}
gpr_mu_destroy(&glb_policy->mu);
- grpc_lb_addresses_destroy(glb_policy->addresses, lb_token_destroy);
gpr_free(glb_policy);
}
-static void lb_client_data_destroy(struct lb_client_data *lb_client);
static void glb_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
gpr_mu_lock(&glb_policy->mu);
+ glb_policy->shutting_down = true;
pending_pick *pp = glb_policy->pending_picks;
glb_policy->pending_picks = NULL;
@@ -648,28 +761,29 @@ static void glb_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
while (pp != NULL) {
pending_pick *next = pp->next;
*pp->target = NULL;
- grpc_exec_ctx_sched(exec_ctx, &pp->wrapped_on_complete, GRPC_ERROR_NONE,
- NULL);
+ grpc_exec_ctx_sched(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
+ GRPC_ERROR_NONE, NULL);
pp = next;
}
while (pping != NULL) {
pending_ping *next = pping->next;
- grpc_exec_ctx_sched(exec_ctx, &pping->wrapped_notify, GRPC_ERROR_NONE,
- NULL);
+ grpc_exec_ctx_sched(exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
+ GRPC_ERROR_NONE, NULL);
pping = next;
}
if (glb_policy->rr_policy) {
- /* unsubscribe */
- grpc_lb_policy_notify_on_state_change(
- exec_ctx, glb_policy->rr_policy, NULL,
- &glb_policy->rr_connectivity->on_change);
GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, "glb_shutdown");
}
- lb_client_data_destroy(glb_policy->lb_client);
- glb_policy->lb_client = NULL;
+ if (glb_policy->started_picking) {
+ if (glb_policy->lb_call != NULL) {
+ grpc_call_cancel(glb_policy->lb_call, NULL);
+ /* lb_on_server_status_received will pick up the cancellation and clean up
+ */
+ }
+ }
grpc_connectivity_state_set(
exec_ctx, &glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN,
@@ -686,11 +800,9 @@ static void glb_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
while (pp != NULL) {
pending_pick *next = pp->next;
if (pp->target == target) {
- grpc_polling_entity_del_from_pollset_set(
- exec_ctx, pp->pick_args.pollent, glb_policy->base.interested_parties);
*target = NULL;
grpc_exec_ctx_sched(
- exec_ctx, &pp->wrapped_on_complete,
+ exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL);
} else {
pp->next = glb_policy->pending_picks;
@@ -702,27 +814,20 @@ static void glb_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
GRPC_ERROR_UNREF(error);
}
-static grpc_call *lb_client_data_get_call(struct lb_client_data *lb_client);
static void glb_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
grpc_error *error) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
gpr_mu_lock(&glb_policy->mu);
- if (glb_policy->lb_client != NULL) {
- /* cancel the call to the load balancer service, if any */
- grpc_call_cancel(lb_client_data_get_call(glb_policy->lb_client), NULL);
- }
pending_pick *pp = glb_policy->pending_picks;
glb_policy->pending_picks = NULL;
while (pp != NULL) {
pending_pick *next = pp->next;
if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
- grpc_polling_entity_del_from_pollset_set(
- exec_ctx, pp->pick_args.pollent, glb_policy->base.interested_parties);
grpc_exec_ctx_sched(
- exec_ctx, &pp->wrapped_on_complete,
+ exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL);
} else {
pp->next = glb_policy->pending_picks;
@@ -734,18 +839,20 @@ static void glb_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
GRPC_ERROR_UNREF(error);
}
-static void query_for_backends(grpc_exec_ctx *exec_ctx,
- glb_lb_policy *glb_policy);
-static void start_picking(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy) {
+static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
+ glb_lb_policy *glb_policy);
+static void start_picking_locked(grpc_exec_ctx *exec_ctx,
+ glb_lb_policy *glb_policy) {
glb_policy->started_picking = true;
- query_for_backends(exec_ctx, glb_policy);
+ gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
+ query_for_backends_locked(exec_ctx, glb_policy);
}
static void glb_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
gpr_mu_lock(&glb_policy->mu);
if (!glb_policy->started_picking) {
- start_picking(exec_ctx, glb_policy);
+ start_picking_locked(exec_ctx, glb_policy);
}
gpr_mu_unlock(&glb_policy->mu);
}
@@ -771,48 +878,35 @@ static int glb_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if (glb_policy->rr_policy != NULL) {
if (grpc_lb_glb_trace) {
- gpr_log(GPR_INFO, "about to PICK from 0x%" PRIxPTR "",
- (intptr_t)glb_policy->rr_policy);
+ gpr_log(GPR_INFO, "grpclb %p about to PICK from RR %p",
+ (void *)glb_policy, (void *)glb_policy->rr_policy);
}
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick");
- memset(&glb_policy->wc_arg, 0, sizeof(wrapped_rr_closure_arg));
- glb_policy->wc_arg.rr_policy = glb_policy->rr_policy;
- glb_policy->wc_arg.target = target;
- glb_policy->wc_arg.wrapped_closure = on_complete;
- glb_policy->wc_arg.lb_token_mdelem_storage =
- pick_args->lb_token_mdelem_storage;
- glb_policy->wc_arg.initial_metadata = pick_args->initial_metadata;
- glb_policy->wc_arg.owning_pending_node = NULL;
- grpc_closure_init(&glb_policy->wrapped_on_complete, wrapped_rr_closure,
- &glb_policy->wc_arg);
-
- pick_done =
- grpc_lb_policy_pick(exec_ctx, glb_policy->rr_policy, pick_args, target,
- (void **)&glb_policy->wc_arg.lb_token,
- &glb_policy->wrapped_on_complete);
- if (pick_done) {
- /* synchronous grpc_lb_policy_pick call. Unref the RR policy. */
- if (grpc_lb_glb_trace) {
- gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")",
- (intptr_t)glb_policy->wc_arg.rr_policy);
- }
- GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->wc_arg.rr_policy, "glb_pick");
- /* add the load reporting initial metadata */
- initial_metadata_add_lb_token(
- pick_args->initial_metadata, pick_args->lb_token_mdelem_storage,
- GRPC_MDELEM_REF(glb_policy->wc_arg.lb_token));
- }
+ wrapped_rr_closure_arg *wc_arg = gpr_malloc(sizeof(wrapped_rr_closure_arg));
+ memset(wc_arg, 0, sizeof(wrapped_rr_closure_arg));
+
+ grpc_closure_init(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg);
+ wc_arg->rr_policy = glb_policy->rr_policy;
+ wc_arg->target = target;
+ wc_arg->wrapped_closure = on_complete;
+ wc_arg->lb_token_mdelem_storage = pick_args->lb_token_mdelem_storage;
+ wc_arg->initial_metadata = pick_args->initial_metadata;
+ wc_arg->free_when_done = wc_arg;
+ pick_done = pick_from_internal_rr_locked(exec_ctx, glb_policy->rr_policy,
+ pick_args, target, wc_arg);
} else {
- /* else, the pending pick will be registered and taken care of by the
- * pending pick list inside the RR policy (glb_policy->rr_policy) */
- grpc_polling_entity_add_to_pollset_set(exec_ctx, pick_args->pollent,
- glb_policy->base.interested_parties);
+ if (grpc_lb_glb_trace) {
+ gpr_log(GPR_DEBUG,
+ "No RR policy in grpclb instance %p. Adding to grpclb's pending "
+ "picks",
+ (void *)(glb_policy));
+ }
add_pending_pick(&glb_policy->pending_picks, pick_args, target,
on_complete);
if (!glb_policy->started_picking) {
- start_picking(exec_ctx, glb_policy);
+ start_picking_locked(exec_ctx, glb_policy);
}
pick_done = false;
}
@@ -841,7 +935,7 @@ static void glb_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
} else {
add_pending_ping(&glb_policy->pending_pings, closure);
if (!glb_policy->started_picking) {
- start_picking(exec_ctx, glb_policy);
+ start_picking_locked(exec_ctx, glb_policy);
}
}
gpr_mu_unlock(&glb_policy->mu);
@@ -859,248 +953,182 @@ static void glb_notify_on_state_change(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&glb_policy->mu);
}
-/*
- * lb_client_data
- *
- * Used internally for the client call to the LB */
-typedef struct lb_client_data {
- gpr_mu mu;
-
- /* called once initial metadata's been sent */
- grpc_closure md_sent;
-
- /* called once the LoadBalanceRequest has been sent to the LB server. See
- * src/proto/grpc/.../load_balancer.proto */
- grpc_closure req_sent;
-
- /* A response from the LB server has been received (or error). Process it */
- grpc_closure res_rcvd;
-
- /* After the client has sent a close to the LB server */
- grpc_closure close_sent;
-
- /* ... and the status from the LB server has been received */
- grpc_closure srv_status_rcvd;
-
- grpc_call *lb_call; /* streaming call to the LB server, */
- gpr_timespec deadline; /* for the streaming call to the LB server */
-
- grpc_metadata_array initial_metadata_recv; /* initial MD from LB server */
- grpc_metadata_array trailing_metadata_recv; /* trailing MD from LB server */
-
- /* what's being sent to the LB server. Note that its value may vary if the LB
- * server indicates a redirect. */
- grpc_byte_buffer *request_payload;
-
- /* response from the LB server, if any. Processed in res_recv_cb() */
- grpc_byte_buffer *response_payload;
-
- /* the call's status and status detailset in srv_status_rcvd_cb() */
- grpc_status_code status;
- char *status_details;
- size_t status_details_capacity;
-
- /* pointer back to the enclosing policy */
- glb_lb_policy *glb_policy;
-} lb_client_data;
-
-static void md_sent_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
-static void req_sent_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
-static void res_recv_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
-static void close_sent_cb(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
-static void srv_status_rcvd_cb(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
-
-static lb_client_data *lb_client_data_create(glb_lb_policy *glb_policy) {
+static void lb_on_server_status_received(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error);
+static void lb_on_response_received(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error);
+static void lb_call_init(glb_lb_policy *glb_policy) {
GPR_ASSERT(glb_policy->server_name != NULL);
GPR_ASSERT(glb_policy->server_name[0] != '\0');
- lb_client_data *lb_client = gpr_malloc(sizeof(lb_client_data));
- memset(lb_client, 0, sizeof(lb_client_data));
-
- gpr_mu_init(&lb_client->mu);
- grpc_closure_init(&lb_client->md_sent, md_sent_cb, lb_client);
-
- grpc_closure_init(&lb_client->req_sent, req_sent_cb, lb_client);
- grpc_closure_init(&lb_client->res_rcvd, res_recv_cb, lb_client);
- grpc_closure_init(&lb_client->close_sent, close_sent_cb, lb_client);
- grpc_closure_init(&lb_client->srv_status_rcvd, srv_status_rcvd_cb, lb_client);
-
- lb_client->deadline = glb_policy->deadline;
-
/* Note the following LB call progresses every time there's activity in \a
* glb_policy->base.interested_parties, which is comprised of the polling
- * entities passed to glb_pick(). */
- lb_client->lb_call = grpc_channel_create_pollset_set_call(
+ * entities from \a client_channel. */
+ glb_policy->lb_call = grpc_channel_create_pollset_set_call(
glb_policy->lb_channel, NULL, GRPC_PROPAGATE_DEFAULTS,
glb_policy->base.interested_parties,
"/grpc.lb.v1.LoadBalancer/BalanceLoad", glb_policy->server_name,
- lb_client->deadline, NULL);
+ glb_policy->deadline, NULL);
- grpc_metadata_array_init(&lb_client->initial_metadata_recv);
- grpc_metadata_array_init(&lb_client->trailing_metadata_recv);
+ grpc_metadata_array_init(&glb_policy->lb_initial_metadata_recv);
+ grpc_metadata_array_init(&glb_policy->lb_trailing_metadata_recv);
grpc_grpclb_request *request =
grpc_grpclb_request_create(glb_policy->server_name);
gpr_slice request_payload_slice = grpc_grpclb_request_encode(request);
- lb_client->request_payload =
+ glb_policy->lb_request_payload =
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
gpr_slice_unref(request_payload_slice);
grpc_grpclb_request_destroy(request);
- lb_client->status_details = NULL;
- lb_client->status_details_capacity = 0;
- lb_client->glb_policy = glb_policy;
- return lb_client;
+ glb_policy->lb_call_status_details = NULL;
+ glb_policy->lb_call_status_details_capacity = 0;
+
+ grpc_closure_init(&glb_policy->lb_on_server_status_received,
+ lb_on_server_status_received, glb_policy);
+ grpc_closure_init(&glb_policy->lb_on_response_received,
+ lb_on_response_received, glb_policy);
+
+ gpr_backoff_init(&glb_policy->lb_call_backoff_state, BACKOFF_MULTIPLIER,
+ BACKOFF_JITTER, BACKOFF_MIN_SECONDS * 1000,
+ BACKOFF_MAX_SECONDS * 1000);
}
-static void lb_client_data_destroy(lb_client_data *lb_client) {
- grpc_call_destroy(lb_client->lb_call);
- grpc_metadata_array_destroy(&lb_client->initial_metadata_recv);
- grpc_metadata_array_destroy(&lb_client->trailing_metadata_recv);
+static void lb_call_destroy(glb_lb_policy *glb_policy) {
+ GPR_ASSERT(glb_policy->lb_call != NULL);
+ grpc_call_destroy(glb_policy->lb_call);
+ glb_policy->lb_call = NULL;
- grpc_byte_buffer_destroy(lb_client->request_payload);
+ grpc_metadata_array_destroy(&glb_policy->lb_initial_metadata_recv);
+ grpc_metadata_array_destroy(&glb_policy->lb_trailing_metadata_recv);
- gpr_free(lb_client->status_details);
- gpr_mu_destroy(&lb_client->mu);
- gpr_free(lb_client);
-}
-static grpc_call *lb_client_data_get_call(lb_client_data *lb_client) {
- return lb_client->lb_call;
+ grpc_byte_buffer_destroy(glb_policy->lb_request_payload);
+ gpr_free(glb_policy->lb_call_status_details);
}
/*
* Auxiliary functions and LB client callbacks.
*/
-static void query_for_backends(grpc_exec_ctx *exec_ctx,
- glb_lb_policy *glb_policy) {
+static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
+ glb_lb_policy *glb_policy) {
GPR_ASSERT(glb_policy->lb_channel != NULL);
+ lb_call_init(glb_policy);
+
+ if (grpc_lb_glb_trace) {
+ gpr_log(GPR_INFO, "Query for backends (grpclb: %p, lb_call: %p)",
+ (void *)glb_policy, (void *)glb_policy->lb_call);
+ }
+ GPR_ASSERT(glb_policy->lb_call != NULL);
- glb_policy->lb_client = lb_client_data_create(glb_policy);
grpc_call_error call_error;
- grpc_op ops[1];
+ grpc_op ops[4];
memset(ops, 0, sizeof(ops));
+
grpc_op *op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op->reserved = NULL;
op++;
- call_error = grpc_call_start_batch_and_execute(
- exec_ctx, glb_policy->lb_client->lb_call, ops, (size_t)(op - ops),
- &glb_policy->lb_client->md_sent);
- GPR_ASSERT(GRPC_CALL_OK == call_error);
- op = ops;
- op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
- op->data.recv_status_on_client.trailing_metadata =
- &glb_policy->lb_client->trailing_metadata_recv;
- op->data.recv_status_on_client.status = &glb_policy->lb_client->status;
- op->data.recv_status_on_client.status_details =
- &glb_policy->lb_client->status_details;
- op->data.recv_status_on_client.status_details_capacity =
- &glb_policy->lb_client->status_details_capacity;
+ op->op = GRPC_OP_RECV_INITIAL_METADATA;
+ op->data.recv_initial_metadata = &glb_policy->lb_initial_metadata_recv;
op->flags = 0;
op->reserved = NULL;
op++;
- call_error = grpc_call_start_batch_and_execute(
- exec_ctx, glb_policy->lb_client->lb_call, ops, (size_t)(op - ops),
- &glb_policy->lb_client->srv_status_rcvd);
- GPR_ASSERT(GRPC_CALL_OK == call_error);
-}
-
-static void md_sent_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- lb_client_data *lb_client = arg;
- GPR_ASSERT(lb_client->lb_call);
- grpc_op ops[1];
- memset(ops, 0, sizeof(ops));
- grpc_op *op = ops;
+ GPR_ASSERT(glb_policy->lb_request_payload != NULL);
op->op = GRPC_OP_SEND_MESSAGE;
- op->data.send_message = lb_client->request_payload;
+ op->data.send_message = glb_policy->lb_request_payload;
op->flags = 0;
op->reserved = NULL;
op++;
- grpc_call_error call_error = grpc_call_start_batch_and_execute(
- exec_ctx, lb_client->lb_call, ops, (size_t)(op - ops),
- &lb_client->req_sent);
- GPR_ASSERT(GRPC_CALL_OK == call_error);
-}
-
-static void req_sent_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- lb_client_data *lb_client = arg;
- GPR_ASSERT(lb_client->lb_call);
-
- grpc_op ops[2];
- memset(ops, 0, sizeof(ops));
- grpc_op *op = ops;
- op->op = GRPC_OP_RECV_INITIAL_METADATA;
- op->data.recv_initial_metadata = &lb_client->initial_metadata_recv;
+ op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
+ op->data.recv_status_on_client.trailing_metadata =
+ &glb_policy->lb_trailing_metadata_recv;
+ op->data.recv_status_on_client.status = &glb_policy->lb_call_status;
+ op->data.recv_status_on_client.status_details =
+ &glb_policy->lb_call_status_details;
+ op->data.recv_status_on_client.status_details_capacity =
+ &glb_policy->lb_call_status_details_capacity;
op->flags = 0;
op->reserved = NULL;
op++;
+ /* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref
+ * count goes to zero) to be unref'd in lb_on_server_status_received */
+ GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "lb_on_server_status_received");
+ call_error = grpc_call_start_batch_and_execute(
+ exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
+ &glb_policy->lb_on_server_status_received);
+ GPR_ASSERT(GRPC_CALL_OK == call_error);
+ op = ops;
op->op = GRPC_OP_RECV_MESSAGE;
- op->data.recv_message = &lb_client->response_payload;
+ op->data.recv_message = &glb_policy->lb_response_payload;
op->flags = 0;
op->reserved = NULL;
op++;
- grpc_call_error call_error = grpc_call_start_batch_and_execute(
- exec_ctx, lb_client->lb_call, ops, (size_t)(op - ops),
- &lb_client->res_rcvd);
+ /* take another weak ref to be unref'd in lb_on_response_received */
+ GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "lb_on_response_received");
+ call_error = grpc_call_start_batch_and_execute(
+ exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
+ &glb_policy->lb_on_response_received);
GPR_ASSERT(GRPC_CALL_OK == call_error);
}
-static void res_recv_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- lb_client_data *lb_client = arg;
+static void lb_on_response_received(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ glb_lb_policy *glb_policy = arg;
+
grpc_op ops[2];
memset(ops, 0, sizeof(ops));
grpc_op *op = ops;
- if (lb_client->response_payload != NULL) {
+ if (glb_policy->lb_response_payload != NULL) {
+ gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
/* Received data from the LB server. Look inside
- * lb_client->response_payload, for a serverlist. */
+ * glb_policy->lb_response_payload, for a serverlist. */
grpc_byte_buffer_reader bbr;
- grpc_byte_buffer_reader_init(&bbr, lb_client->response_payload);
+ grpc_byte_buffer_reader_init(&bbr, glb_policy->lb_response_payload);
gpr_slice response_slice = grpc_byte_buffer_reader_readall(&bbr);
- grpc_byte_buffer_destroy(lb_client->response_payload);
+ grpc_byte_buffer_destroy(glb_policy->lb_response_payload);
grpc_grpclb_serverlist *serverlist =
grpc_grpclb_response_parse_serverlist(response_slice);
if (serverlist != NULL) {
+ GPR_ASSERT(glb_policy->lb_call != NULL);
gpr_slice_unref(response_slice);
if (grpc_lb_glb_trace) {
- gpr_log(GPR_INFO, "Serverlist with %zu servers received",
- serverlist->num_servers);
+ gpr_log(GPR_INFO, "Serverlist with %lu servers received",
+ (unsigned long)serverlist->num_servers);
+ for (size_t i = 0; i < serverlist->num_servers; ++i) {
+ grpc_resolved_address addr;
+ parse_server(serverlist->servers[i], &addr);
+ char *ipport;
+ grpc_sockaddr_to_string(&ipport, &addr, false);
+ gpr_log(GPR_INFO, "Serverlist[%lu]: %s", (unsigned long)i, ipport);
+ gpr_free(ipport);
+ }
}
/* update serverlist */
if (serverlist->num_servers > 0) {
- if (grpc_grpclb_serverlist_equals(lb_client->glb_policy->serverlist,
- serverlist)) {
+ gpr_mu_lock(&glb_policy->mu);
+ if (grpc_grpclb_serverlist_equals(glb_policy->serverlist, serverlist)) {
if (grpc_lb_glb_trace) {
gpr_log(GPR_INFO,
"Incoming server list identical to current, ignoring.");
}
} else { /* new serverlist */
- if (lb_client->glb_policy->serverlist != NULL) {
+ if (glb_policy->serverlist != NULL) {
/* dispose of the old serverlist */
- grpc_grpclb_destroy_serverlist(lb_client->glb_policy->serverlist);
+ grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
}
/* and update the copy in the glb_lb_policy instance */
- lb_client->glb_policy->serverlist = serverlist;
- }
- if (lb_client->glb_policy->rr_policy == NULL) {
- /* initial "handover", in this case from a null RR policy, meaning
- * it'll just create the first RR policy instance */
- rr_handover(exec_ctx, lb_client->glb_policy, error);
- } else {
- /* unref the RR policy, eventually leading to its substitution with a
- * new one constructed from the received serverlist (see
- * glb_rr_connectivity_changed) */
- GRPC_LB_POLICY_UNREF(exec_ctx, lb_client->glb_policy->rr_policy,
- "serverlist_received");
+ glb_policy->serverlist = serverlist;
+
+ rr_handover_locked(exec_ctx, glb_policy, error);
}
+ gpr_mu_unlock(&glb_policy->mu);
} else {
if (grpc_lb_glb_trace) {
gpr_log(GPR_INFO,
@@ -1108,60 +1136,94 @@ static void res_recv_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
"response with > 0 servers is received");
}
}
+ } else { /* serverlist == NULL */
+ gpr_log(GPR_ERROR, "Invalid LB response received: '%s'. Ignoring.",
+ gpr_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX));
+ gpr_slice_unref(response_slice);
+ }
+ if (!glb_policy->shutting_down) {
/* keep listening for serverlist updates */
op->op = GRPC_OP_RECV_MESSAGE;
- op->data.recv_message = &lb_client->response_payload;
+ op->data.recv_message = &glb_policy->lb_response_payload;
op->flags = 0;
op->reserved = NULL;
op++;
+ /* reuse the "lb_on_response_received" weak ref taken in
+ * query_for_backends_locked() */
const grpc_call_error call_error = grpc_call_start_batch_and_execute(
- exec_ctx, lb_client->lb_call, ops, (size_t)(op - ops),
- &lb_client->res_rcvd); /* loop */
+ exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
+ &glb_policy->lb_on_response_received); /* loop */
GPR_ASSERT(GRPC_CALL_OK == call_error);
- return;
}
-
- GPR_ASSERT(serverlist == NULL);
- gpr_log(GPR_ERROR, "Invalid LB response received: '%s'",
- gpr_dump_slice(response_slice, GPR_DUMP_ASCII));
- gpr_slice_unref(response_slice);
-
- /* Disconnect from server returning invalid response. */
- op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
- op->flags = 0;
- op->reserved = NULL;
- op++;
- grpc_call_error call_error = grpc_call_start_batch_and_execute(
- exec_ctx, lb_client->lb_call, ops, (size_t)(op - ops),
- &lb_client->close_sent);
- GPR_ASSERT(GRPC_CALL_OK == call_error);
+ } else { /* empty payload: call cancelled. */
+ /* dispose of the "lb_on_response_received" weak ref taken in
+ * query_for_backends_locked() and reused in every reception loop */
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
+ "lb_on_response_received_empty_payload");
}
- /* empty payload: call cancelled by server. Cleanups happening in
- * srv_status_rcvd_cb */
}
-static void close_sent_cb(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- if (grpc_lb_glb_trace) {
- gpr_log(GPR_INFO,
- "Close from LB client sent. Waiting from server status now");
+static void lb_call_on_retry_timer(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ glb_lb_policy *glb_policy = arg;
+ gpr_mu_lock(&glb_policy->mu);
+
+ if (!glb_policy->shutting_down) {
+ if (grpc_lb_glb_trace) {
+ gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)",
+ (void *)glb_policy);
+ }
+ GPR_ASSERT(glb_policy->lb_call == NULL);
+ query_for_backends_locked(exec_ctx, glb_policy);
}
+ gpr_mu_unlock(&glb_policy->mu);
+
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
+ "grpclb_on_retry_timer");
}
-static void srv_status_rcvd_cb(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- lb_client_data *lb_client = arg;
+static void lb_on_server_status_received(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ glb_lb_policy *glb_policy = arg;
+ gpr_mu_lock(&glb_policy->mu);
+
+ GPR_ASSERT(glb_policy->lb_call != NULL);
+
if (grpc_lb_glb_trace) {
- gpr_log(GPR_INFO,
- "status from lb server received. Status = %d, Details = '%s', "
- "Capaticy "
- "= %zu",
- lb_client->status, lb_client->status_details,
- lb_client->status_details_capacity);
+ gpr_log(GPR_DEBUG,
+ "Status from LB server received. Status = %d, Details = '%s', "
+ "(call: %p)",
+ glb_policy->lb_call_status, glb_policy->lb_call_status_details,
+ (void *)glb_policy->lb_call);
+ }
+
+ /* We need to performe cleanups no matter what. */
+ lb_call_destroy(glb_policy);
+
+ if (!glb_policy->shutting_down) {
+ /* if we aren't shutting down, restart the LB client call after some time */
+ gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
+ gpr_timespec next_try =
+ gpr_backoff_step(&glb_policy->lb_call_backoff_state, now);
+ if (grpc_lb_glb_trace) {
+ gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...",
+ (void *)glb_policy);
+ gpr_timespec timeout = gpr_time_sub(next_try, now);
+ if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) {
+ gpr_log(GPR_DEBUG, "... retrying in %" PRId64 ".%09d seconds.",
+ timeout.tv_sec, timeout.tv_nsec);
+ } else {
+ gpr_log(GPR_DEBUG, "... retrying immediately.");
+ }
+ }
+ GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
+ grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
+ lb_call_on_retry_timer, glb_policy, now);
}
- /* TODO(dgq): deal with stream termination properly (fire up another one?
- * fail the original call?) */
+ gpr_mu_unlock(&glb_policy->mu);
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
+ "lb_on_server_status_received");
}
/* Code wiring the policy with the rest of the core */
diff --git a/src/core/ext/lb_policy/grpclb/grpclb.h b/src/core/ext/lb_policy/grpclb/grpclb.h
index 83552b4fa0..ff23f3a545 100644
--- a/src/core/ext/lb_policy/grpclb/grpclb.h
+++ b/src/core/ext/lb_policy/grpclb/grpclb.h
@@ -34,7 +34,7 @@
#ifndef GRPC_CORE_EXT_LB_POLICY_GRPCLB_GRPCLB_H
#define GRPC_CORE_EXT_LB_POLICY_GRPCLB_GRPCLB_H
-#include "src/core/ext/client_config/lb_policy_factory.h"
+#include "src/core/ext/client_channel/lb_policy_factory.h"
/** Returns a load balancing factory for the glb policy, which tries to connect
* to a load balancing server to decide the next successfully connected
diff --git a/src/core/ext/lb_policy/grpclb/load_balancer_api.h b/src/core/ext/lb_policy/grpclb/load_balancer_api.h
index c1e73d08ef..079a64a3f3 100644
--- a/src/core/ext/lb_policy/grpclb/load_balancer_api.h
+++ b/src/core/ext/lb_policy/grpclb/load_balancer_api.h
@@ -36,7 +36,7 @@
#include <grpc/support/slice_buffer.h>
-#include "src/core/ext/client_config/lb_policy_factory.h"
+#include "src/core/ext/client_channel/lb_policy_factory.h"
#include "src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h"
#ifdef __cplusplus
diff --git a/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h b/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h
index 53fed22bae..e36d0966f8 100644
--- a/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h
+++ b/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h
@@ -77,7 +77,7 @@ typedef struct _grpc_lb_v1_Server {
bool has_port;
int32_t port;
bool has_load_balance_token;
- char load_balance_token[65];
+ char load_balance_token[50];
bool has_drop_request;
bool drop_request;
/* @@protoc_insertion_point(struct:grpc_lb_v1_Server) */
@@ -172,7 +172,7 @@ extern const pb_field_t grpc_lb_v1_Server_fields[5];
#define grpc_lb_v1_LoadBalanceResponse_size (98 + grpc_lb_v1_ServerList_size)
#define grpc_lb_v1_InitialLoadBalanceResponse_size 90
/* grpc_lb_v1_ServerList_size depends on runtime parameters */
-#define grpc_lb_v1_Server_size 98
+#define grpc_lb_v1_Server_size 83
/* Message IDs (where set with "msgid" option) */
#ifdef PB_MSGID
diff --git a/src/core/ext/lb_policy/pick_first/pick_first.c b/src/core/ext/lb_policy/pick_first/pick_first.c
index 961a0c9b19..ac3c6a305a 100644
--- a/src/core/ext/lb_policy/pick_first/pick_first.c
+++ b/src/core/ext/lb_policy/pick_first/pick_first.c
@@ -34,12 +34,13 @@
#include <string.h>
#include <grpc/support/alloc.h>
-#include "src/core/ext/client_config/lb_policy_registry.h"
+
+#include "src/core/ext/client_channel/lb_policy_registry.h"
+#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/transport/connectivity_state.h"
typedef struct pending_pick {
struct pending_pick *next;
- grpc_polling_entity *pollent;
uint32_t initial_metadata_flags;
grpc_connected_subchannel **target;
grpc_closure *on_complete;
@@ -119,8 +120,6 @@ static void pf_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
while (pp != NULL) {
pending_pick *next = pp->next;
*pp->target = NULL;
- grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent,
- p->base.interested_parties);
grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL);
gpr_free(pp);
pp = next;
@@ -138,8 +137,6 @@ static void pf_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
while (pp != NULL) {
pending_pick *next = pp->next;
if (pp->target == target) {
- grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent,
- p->base.interested_parties);
*target = NULL;
grpc_exec_ctx_sched(
exec_ctx, pp->on_complete,
@@ -168,8 +165,6 @@ static void pf_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next;
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
- grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent,
- p->base.interested_parties);
grpc_exec_ctx_sched(
exec_ctx, pp->on_complete,
GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL);
@@ -214,7 +209,7 @@ static int pf_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
/* Check atomically for a selected channel */
grpc_connected_subchannel *selected = GET_SELECTED(p);
if (selected != NULL) {
- *target = selected;
+ *target = GRPC_CONNECTED_SUBCHANNEL_REF(selected, "picked");
return 1;
}
@@ -223,17 +218,14 @@ static int pf_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
selected = GET_SELECTED(p);
if (selected) {
gpr_mu_unlock(&p->mu);
- *target = selected;
+ *target = GRPC_CONNECTED_SUBCHANNEL_REF(selected, "picked");
return 1;
} else {
if (!p->started_picking) {
start_picking(exec_ctx, p);
}
- grpc_polling_entity_add_to_pollset_set(exec_ctx, pick_args->pollent,
- p->base.interested_parties);
pp = gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
- pp->pollent = pick_args->pollent;
pp->target = target;
pp->initial_metadata_flags = pick_args->initial_metadata_flags;
pp->on_complete = on_complete;
@@ -318,9 +310,7 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
/* update any calls that were waiting for a pick */
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
- *pp->target = selected;
- grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent,
- p->base.interested_parties);
+ *pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(selected, "picked");
grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL);
gpr_free(pp);
}
@@ -444,14 +434,22 @@ static void pick_first_factory_unref(grpc_lb_policy_factory *factory) {}
static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_factory *factory,
grpc_lb_policy_args *args) {
- GPR_ASSERT(args->addresses != NULL);
GPR_ASSERT(args->client_channel_factory != NULL);
+ /* Get server name. */
+ const grpc_arg *arg =
+ grpc_channel_args_find(args->args, GRPC_ARG_SERVER_NAME);
+ const char *server_name =
+ arg != NULL && arg->type == GRPC_ARG_STRING ? arg->value.string : NULL;
+
/* Find the number of backend addresses. We ignore balancer
* addresses, since we don't know how to handle them. */
+ arg = grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
+ GPR_ASSERT(arg != NULL && arg->type == GRPC_ARG_POINTER);
+ grpc_lb_addresses *addresses = arg->value.pointer.p;
size_t num_addrs = 0;
- for (size_t i = 0; i < args->addresses->num_addresses; i++) {
- if (!args->addresses->addresses[i].is_balancer) ++num_addrs;
+ for (size_t i = 0; i < addresses->num_addresses; i++) {
+ if (!addresses->addresses[i].is_balancer) ++num_addrs;
}
if (num_addrs == 0) return NULL;
@@ -462,22 +460,21 @@ static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx,
memset(p->subchannels, 0, sizeof(*p->subchannels) * num_addrs);
grpc_subchannel_args sc_args;
size_t subchannel_idx = 0;
- for (size_t i = 0; i < args->addresses->num_addresses; i++) {
+ for (size_t i = 0; i < addresses->num_addresses; i++) {
/* Skip balancer addresses, since we only know how to handle backends. */
- if (args->addresses->addresses[i].is_balancer) continue;
+ if (addresses->addresses[i].is_balancer) continue;
- if (args->addresses->addresses[i].user_data != NULL) {
+ if (addresses->addresses[i].user_data != NULL) {
gpr_log(GPR_ERROR,
"This LB policy doesn't support user data. It will be ignored");
}
memset(&sc_args, 0, sizeof(grpc_subchannel_args));
/* server_name will be copied as part of the subchannel creation. This makes
- * the copying of args->server_name (a borrowed pointer) OK. */
- sc_args.server_name = args->server_name;
- sc_args.addr =
- (struct sockaddr *)(&args->addresses->addresses[i].address.addr);
- sc_args.addr_len = args->addresses->addresses[i].address.len;
+ * the copying of server_name (a borrowed pointer) OK. */
+ sc_args.server_name = server_name;
+ sc_args.addr = &addresses->addresses[i].address;
+ sc_args.args = args->args;
grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
exec_ctx, args->client_channel_factory, &sc_args);
diff --git a/src/core/ext/lb_policy/round_robin/round_robin.c b/src/core/ext/lb_policy/round_robin/round_robin.c
index 930fa86aca..b0c461730b 100644
--- a/src/core/ext/lb_policy/round_robin/round_robin.c
+++ b/src/core/ext/lb_policy/round_robin/round_robin.c
@@ -63,7 +63,8 @@
#include <grpc/support/alloc.h>
-#include "src/core/ext/client_config/lb_policy_registry.h"
+#include "src/core/ext/client_channel/lb_policy_registry.h"
+#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/static_metadata.h"
@@ -78,9 +79,6 @@ int grpc_lb_round_robin_trace = 0;
typedef struct pending_pick {
struct pending_pick *next;
- /* polling entity for the pick()'s async notification */
- grpc_polling_entity *pollent;
-
/* output argument where to store the pick()ed user_data. It'll be NULL if no
* such data is present or there's an error (the definite test for errors is
* \a target being NULL). */
@@ -122,6 +120,8 @@ typedef struct {
grpc_connectivity_state connectivity_state;
/** the subchannel's target user data */
void *user_data;
+ /** vtable to operate over \a user_data */
+ const grpc_lb_user_data_vtable *user_data_vtable;
} subchannel_data;
struct round_robin_lb_policy {
@@ -188,9 +188,13 @@ static void advance_last_picked_locked(round_robin_lb_policy *p) {
}
if (grpc_lb_round_robin_trace) {
- gpr_log(GPR_DEBUG, "[READYLIST] ADVANCED LAST PICK. NOW AT NODE %p (SC %p)",
- (void *)p->ready_list_last_pick,
- (void *)p->ready_list_last_pick->subchannel);
+ gpr_log(GPR_DEBUG,
+ "[READYLIST, RR: %p] ADVANCED LAST PICK. NOW AT NODE %p (SC %p, "
+ "CSC %p)",
+ (void *)p, (void *)p->ready_list_last_pick,
+ (void *)p->ready_list_last_pick->subchannel,
+ (void *)grpc_subchannel_get_connected_subchannel(
+ p->ready_list_last_pick->subchannel));
}
}
@@ -257,9 +261,18 @@ static void remove_disconnected_sc_locked(round_robin_lb_policy *p,
static void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
ready_list *elem;
+
+ if (grpc_lb_round_robin_trace) {
+ gpr_log(GPR_DEBUG, "Destroying Round Robin policy at %p", (void *)pol);
+ }
+
for (size_t i = 0; i < p->num_subchannels; i++) {
subchannel_data *sd = p->subchannels[i];
- GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "round_robin");
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "round_robin_destroy");
+ if (sd->user_data != NULL) {
+ GPR_ASSERT(sd->user_data_vtable != NULL);
+ sd->user_data_vtable->destroy(sd->user_data);
+ }
gpr_free(sd);
}
@@ -287,6 +300,9 @@ static void rr_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
size_t i;
gpr_mu_lock(&p->mu);
+ if (grpc_lb_round_robin_trace) {
+ gpr_log(GPR_DEBUG, "Shutting down Round Robin policy at %p", (void *)pol);
+ }
p->shutdown = 1;
while ((pp = p->pending_picks)) {
@@ -298,7 +314,7 @@ static void rr_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
}
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
- GRPC_ERROR_CREATE("Channel Shutdown"), "shutdown");
+ GRPC_ERROR_CREATE("Channel Shutdown"), "rr_shutdown");
for (i = 0; i < p->num_subchannels; i++) {
subchannel_data *sd = p->subchannels[i];
grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL, NULL,
@@ -318,8 +334,6 @@ static void rr_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
while (pp != NULL) {
pending_pick *next = pp->next;
if (pp->target == target) {
- grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent,
- p->base.interested_parties);
*target = NULL;
grpc_exec_ctx_sched(
exec_ctx, pp->on_complete,
@@ -348,8 +362,6 @@ static void rr_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next;
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
- grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent,
- p->base.interested_parties);
*pp->target = NULL;
grpc_exec_ctx_sched(
exec_ctx, pp->on_complete,
@@ -401,10 +413,16 @@ static int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *pp;
ready_list *selected;
gpr_mu_lock(&p->mu);
+
+ if (grpc_lb_round_robin_trace) {
+ gpr_log(GPR_INFO, "Round Robin %p trying to pick", (void *)pol);
+ }
+
if ((selected = peek_next_connected_locked(p))) {
/* readily available, report right away */
- gpr_mu_unlock(&p->mu);
- *target = grpc_subchannel_get_connected_subchannel(selected->subchannel);
+ *target = GRPC_CONNECTED_SUBCHANNEL_REF(
+ grpc_subchannel_get_connected_subchannel(selected->subchannel),
+ "picked");
if (user_data != NULL) {
*user_data = selected->user_data;
@@ -416,17 +434,15 @@ static int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
/* only advance the last picked pointer if the selection was used */
advance_last_picked_locked(p);
+ gpr_mu_unlock(&p->mu);
return 1;
} else {
/* no pick currently available. Save for later in list of pending picks */
if (!p->started_picking) {
start_picking(exec_ctx, p);
}
- grpc_polling_entity_add_to_pollset_set(exec_ctx, pick_args->pollent,
- p->base.interested_parties);
pp = gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
- pp->pollent = pick_args->pollent;
pp->target = target;
pp->on_complete = on_complete;
pp->initial_metadata_flags = pick_args->initial_metadata_flags;
@@ -442,7 +458,6 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
subchannel_data *sd = arg;
round_robin_lb_policy *p = sd->policy;
pending_pick *pp;
- ready_list *selected;
int unref = 0;
@@ -463,17 +478,20 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
/* at this point we know there's at least one suitable subchannel. Go
* ahead and pick one and notify the pending suitors in
* p->pending_picks. This preemtively replicates rr_pick()'s actions. */
- selected = peek_next_connected_locked(p);
+ ready_list *selected = peek_next_connected_locked(p);
+ GPR_ASSERT(selected != NULL);
if (p->pending_picks != NULL) {
/* if the selected subchannel is going to be used for the pending
* picks, update the last picked pointer */
advance_last_picked_locked(p);
}
+
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
- *pp->target =
- grpc_subchannel_get_connected_subchannel(selected->subchannel);
+ *pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(
+ grpc_subchannel_get_connected_subchannel(selected->subchannel),
+ "picked");
if (pp->user_data != NULL) {
*pp->user_data = selected->user_data;
}
@@ -482,8 +500,6 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
"[RR CONN CHANGED] TARGET <-- SUBCHANNEL %p (NODE %p)",
(void *)selected->subchannel, (void *)selected);
}
- grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent,
- p->base.interested_parties);
grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL);
gpr_free(pp);
}
@@ -589,7 +605,9 @@ static void rr_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
gpr_mu_lock(&p->mu);
if ((selected = peek_next_connected_locked(p))) {
gpr_mu_unlock(&p->mu);
- target = grpc_subchannel_get_connected_subchannel(selected->subchannel);
+ target = GRPC_CONNECTED_SUBCHANNEL_REF(
+ grpc_subchannel_get_connected_subchannel(selected->subchannel),
+ "picked");
grpc_connected_subchannel_ping(exec_ctx, target, closure);
} else {
gpr_mu_unlock(&p->mu);
@@ -610,14 +628,22 @@ static void round_robin_factory_unref(grpc_lb_policy_factory *factory) {}
static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_factory *factory,
grpc_lb_policy_args *args) {
- GPR_ASSERT(args->addresses != NULL);
GPR_ASSERT(args->client_channel_factory != NULL);
+ /* Get server name. */
+ const grpc_arg *arg =
+ grpc_channel_args_find(args->args, GRPC_ARG_SERVER_NAME);
+ const char *server_name =
+ arg != NULL && arg->type == GRPC_ARG_STRING ? arg->value.string : NULL;
+
/* Find the number of backend addresses. We ignore balancer
* addresses, since we don't know how to handle them. */
+ arg = grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
+ GPR_ASSERT(arg != NULL && arg->type == GRPC_ARG_POINTER);
+ grpc_lb_addresses *addresses = arg->value.pointer.p;
size_t num_addrs = 0;
- for (size_t i = 0; i < args->addresses->num_addresses; i++) {
- if (!args->addresses->addresses[i].is_balancer) ++num_addrs;
+ for (size_t i = 0; i < addresses->num_addresses; i++) {
+ if (!addresses->addresses[i].is_balancer) ++num_addrs;
}
if (num_addrs == 0) return NULL;
@@ -630,17 +656,16 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
grpc_subchannel_args sc_args;
size_t subchannel_idx = 0;
- for (size_t i = 0; i < args->addresses->num_addresses; i++) {
+ for (size_t i = 0; i < addresses->num_addresses; i++) {
/* Skip balancer addresses, since we only know how to handle backends. */
- if (args->addresses->addresses[i].is_balancer) continue;
+ if (addresses->addresses[i].is_balancer) continue;
memset(&sc_args, 0, sizeof(grpc_subchannel_args));
/* server_name will be copied as part of the subchannel creation. This makes
- * the copying of args->server_name (a borrowed pointer) OK. */
- sc_args.server_name = args->server_name;
- sc_args.addr =
- (struct sockaddr *)(&args->addresses->addresses[i].address.addr);
- sc_args.addr_len = args->addresses->addresses[i].address.len;
+ * the copying of server_name (a borrowed pointer) OK. */
+ sc_args.server_name = server_name;
+ sc_args.addr = &addresses->addresses[i].address;
+ sc_args.args = args->args;
grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
exec_ctx, args->client_channel_factory, &sc_args);
@@ -652,7 +677,9 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
sd->policy = p;
sd->index = subchannel_idx;
sd->subchannel = subchannel;
- sd->user_data = args->addresses->addresses[i].user_data;
+ sd->user_data_vtable = addresses->user_data_vtable;
+ sd->user_data =
+ sd->user_data_vtable->copy(addresses->addresses[i].user_data);
++subchannel_idx;
grpc_closure_init(&sd->connectivity_changed_closure,
rr_connectivity_changed, sd);
diff --git a/src/core/ext/load_reporting/load_reporting.h b/src/core/ext/load_reporting/load_reporting.h
index e37817d8c2..a157844734 100644
--- a/src/core/ext/load_reporting/load_reporting.h
+++ b/src/core/ext/load_reporting/load_reporting.h
@@ -37,13 +37,21 @@
#include <grpc/impl/codegen/grpc_types.h>
#include "src/core/lib/channel/channel_stack.h"
-/** Metadata key for initial metadata coming from clients */
-/* TODO(dgq): change to the final value TBD */
-#define GRPC_LOAD_REPORTING_INITIAL_MD_KEY "load-reporting-initial"
+/** Metadata key for the gRPC LB load balancer token.
+ *
+ * The value corresponding to this key is an opaque token that is given to the
+ * frontend as part of each pick; the frontend sends this token to the backend
+ * in each request it sends when using that pick. The token is used by the
+ * backend to verify the request and to allow the backend to report load to the
+ * gRPC LB system. */
+#define GRPC_LB_TOKEN_MD_KEY "lb-token"
-/** Metadata key for trailing metadata from servers */
-/* TODO(dgq): change to the final value TBD */
-#define GRPC_LOAD_REPORTING_TRAILING_MD_KEY "load-reporting-trailing"
+/** Metadata key for gRPC LB cost reporting.
+ *
+ * The value corresponding to this key is an opaque binary blob reported by the
+ * backend as part of its trailing metadata containing cost information for the
+ * call. */
+#define GRPC_LB_COST_MD_KEY "lb-cost-bin"
/** Identifiers for the invocation point of the users LR callback */
typedef enum grpc_load_reporting_source {
diff --git a/src/core/ext/load_reporting/load_reporting_filter.c b/src/core/ext/load_reporting/load_reporting_filter.c
index 394f0cb832..eeae2400fb 100644
--- a/src/core/ext/load_reporting/load_reporting_filter.c
+++ b/src/core/ext/load_reporting/load_reporting_filter.c
@@ -75,7 +75,7 @@ static grpc_mdelem *recv_md_filter(void *user_data, grpc_mdelem *md) {
if (md->key == GRPC_MDSTR_PATH) {
calld->service_method = grpc_mdstr_as_c_string(md->value);
- } else if (md->key == GRPC_MDSTR_LOAD_REPORTING_INITIAL) {
+ } else if (md->key == GRPC_MDSTR_LB_TOKEN) {
calld->initial_md_string = gpr_strdup(grpc_mdstr_as_c_string(md->value));
return NULL;
}
@@ -193,7 +193,7 @@ static grpc_mdelem *lr_trailing_md_filter(void *user_data, grpc_mdelem *md) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
- if (md->key == GRPC_MDSTR_LOAD_REPORTING_TRAILING) {
+ if (md->key == GRPC_MDSTR_LB_COST_BIN) {
calld->trailing_md_string = gpr_strdup(grpc_mdstr_as_c_string(md->value));
return NULL;
}
diff --git a/src/core/ext/resolver/dns/native/dns_resolver.c b/src/core/ext/resolver/dns/native/dns_resolver.c
index fa33ffd7bd..958b8af8b2 100644
--- a/src/core/ext/resolver/dns/native/dns_resolver.c
+++ b/src/core/ext/resolver/dns/native/dns_resolver.c
@@ -37,9 +37,10 @@
#include <grpc/support/host_port.h>
#include <grpc/support/string_util.h>
-#include "src/core/ext/client_config/http_connect_handshaker.h"
-#include "src/core/ext/client_config/lb_policy_registry.h"
-#include "src/core/ext/client_config/resolver_registry.h"
+#include "src/core/ext/client_channel/http_connect_handshaker.h"
+#include "src/core/ext/client_channel/lb_policy_registry.h"
+#include "src/core/ext/client_channel/resolver_registry.h"
+#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/support/backoff.h"
@@ -53,12 +54,12 @@
typedef struct {
/** base class: must be first */
grpc_resolver base;
- /** target name */
- char *target_name;
- /** name to resolve (usually the same as target_name) */
+ /** name to resolve */
char *name_to_resolve;
/** default port to use */
char *default_port;
+ /** channel args. */
+ grpc_channel_args *channel_args;
/** mutex guarding the rest of the state */
gpr_mu mu;
@@ -71,9 +72,9 @@ typedef struct {
/** pending next completion, or NULL */
grpc_closure *next_completion;
/** target result address for next completion */
- grpc_resolver_result **target_result;
+ grpc_channel_args **target_result;
/** current (fully resolved) result */
- grpc_resolver_result *resolved_result;
+ grpc_channel_args *resolved_result;
/** retry timer */
bool have_retry_timer;
grpc_timer retry_timer;
@@ -94,7 +95,7 @@ static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
static void dns_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
static void dns_channel_saw_error(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
static void dns_next(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
- grpc_resolver_result **target_result,
+ grpc_channel_args **target_result,
grpc_closure *on_complete);
static const grpc_resolver_vtable dns_resolver_vtable = {
@@ -127,7 +128,7 @@ static void dns_channel_saw_error(grpc_exec_ctx *exec_ctx,
}
static void dns_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
- grpc_resolver_result **target_result,
+ grpc_channel_args **target_result,
grpc_closure *on_complete) {
dns_resolver *r = (dns_resolver *)resolver;
gpr_mu_lock(&r->mu);
@@ -162,22 +163,23 @@ static void dns_on_retry_timer(grpc_exec_ctx *exec_ctx, void *arg,
static void dns_on_resolved(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
dns_resolver *r = arg;
- grpc_resolver_result *result = NULL;
+ grpc_channel_args *result = NULL;
gpr_mu_lock(&r->mu);
GPR_ASSERT(r->resolving);
r->resolving = false;
if (r->addresses != NULL) {
- grpc_lb_addresses *addresses =
- grpc_lb_addresses_create(r->addresses->naddrs);
+ grpc_lb_addresses *addresses = grpc_lb_addresses_create(
+ r->addresses->naddrs, NULL /* user_data_vtable */);
for (size_t i = 0; i < r->addresses->naddrs; ++i) {
grpc_lb_addresses_set_address(
addresses, i, &r->addresses->addrs[i].addr,
r->addresses->addrs[i].len, false /* is_balancer */,
NULL /* balancer_name */, NULL /* user_data */);
}
+ grpc_arg new_arg = grpc_lb_addresses_create_channel_arg(addresses);
+ result = grpc_channel_args_copy_and_add(r->channel_args, &new_arg, 1);
grpc_resolved_addresses_destroy(r->addresses);
- result = grpc_resolver_result_create(r->target_name, addresses,
- NULL /* lb_policy_name */, NULL);
+ grpc_lb_addresses_destroy(addresses);
} else {
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_timespec next_try = gpr_backoff_step(&r->backoff_state, now);
@@ -197,8 +199,8 @@ static void dns_on_resolved(grpc_exec_ctx *exec_ctx, void *arg,
grpc_timer_init(exec_ctx, &r->retry_timer, next_try, dns_on_retry_timer, r,
now);
}
- if (r->resolved_result) {
- grpc_resolver_result_unref(exec_ctx, r->resolved_result);
+ if (r->resolved_result != NULL) {
+ grpc_channel_args_destroy(r->resolved_result);
}
r->resolved_result = result;
r->resolved_version++;
@@ -222,10 +224,9 @@ static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
dns_resolver *r) {
if (r->next_completion != NULL &&
r->resolved_version != r->published_version) {
- *r->target_result = r->resolved_result;
- if (r->resolved_result) {
- grpc_resolver_result_ref(r->resolved_result);
- }
+ *r->target_result = r->resolved_result == NULL
+ ? NULL
+ : grpc_channel_args_copy(r->resolved_result);
grpc_exec_ctx_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE, NULL);
r->next_completion = NULL;
r->published_version = r->resolved_version;
@@ -235,12 +236,12 @@ static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
static void dns_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
dns_resolver *r = (dns_resolver *)gr;
gpr_mu_destroy(&r->mu);
- if (r->resolved_result) {
- grpc_resolver_result_unref(exec_ctx, r->resolved_result);
+ if (r->resolved_result != NULL) {
+ grpc_channel_args_destroy(r->resolved_result);
}
- gpr_free(r->target_name);
gpr_free(r->name_to_resolve);
gpr_free(r->default_port);
+ grpc_channel_args_destroy(r->channel_args);
gpr_free(r);
}
@@ -260,9 +261,14 @@ static grpc_resolver *dns_create(grpc_resolver_args *args,
memset(r, 0, sizeof(*r));
gpr_mu_init(&r->mu);
grpc_resolver_init(&r->base, &dns_resolver_vtable);
- r->target_name = gpr_strdup(path);
r->name_to_resolve = proxy_name == NULL ? gpr_strdup(path) : proxy_name;
r->default_port = gpr_strdup(default_port);
+ grpc_arg server_name_arg;
+ server_name_arg.type = GRPC_ARG_STRING;
+ server_name_arg.key = GRPC_ARG_SERVER_NAME;
+ server_name_arg.value.string = (char *)path;
+ r->channel_args =
+ grpc_channel_args_copy_and_add(args->args, &server_name_arg, 1);
gpr_backoff_init(&r->backoff_state, BACKOFF_MULTIPLIER, BACKOFF_JITTER,
BACKOFF_MIN_SECONDS * 1000, BACKOFF_MAX_SECONDS * 1000);
return &r->base;
diff --git a/src/core/ext/resolver/sockaddr/sockaddr_resolver.c b/src/core/ext/resolver/sockaddr/sockaddr_resolver.c
index 5a7a32d7cb..5fec03a8e4 100644
--- a/src/core/ext/resolver/sockaddr/sockaddr_resolver.c
+++ b/src/core/ext/resolver/sockaddr/sockaddr_resolver.c
@@ -33,6 +33,7 @@
#include <stdbool.h>
#include <stdio.h>
+#include <stdlib.h>
#include <string.h>
#include <grpc/support/alloc.h>
@@ -40,8 +41,10 @@
#include <grpc/support/port_platform.h>
#include <grpc/support/string_util.h>
-#include "src/core/ext/client_config/parse_address.h"
-#include "src/core/ext/client_config/resolver_registry.h"
+#include "src/core/ext/client_channel/lb_policy_factory.h"
+#include "src/core/ext/client_channel/parse_address.h"
+#include "src/core/ext/client_channel/resolver_registry.h"
+#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/unix_sockets_posix.h"
#include "src/core/lib/support/string.h"
@@ -49,10 +52,10 @@
typedef struct {
/** base class: must be first */
grpc_resolver base;
- /** the path component of the uri passed in */
- char *target_name;
/** the addresses that we've 'resolved' */
grpc_lb_addresses *addresses;
+ /** channel args */
+ grpc_channel_args *channel_args;
/** mutex guarding the rest of the state */
gpr_mu mu;
/** have we published? */
@@ -60,7 +63,7 @@ typedef struct {
/** pending next completion, or NULL */
grpc_closure *next_completion;
/** target result address for next completion */
- grpc_resolver_result **target_result;
+ grpc_channel_args **target_result;
} sockaddr_resolver;
static void sockaddr_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
@@ -72,7 +75,7 @@ static void sockaddr_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
static void sockaddr_channel_saw_error(grpc_exec_ctx *exec_ctx,
grpc_resolver *r);
static void sockaddr_next(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
- grpc_resolver_result **target_result,
+ grpc_channel_args **target_result,
grpc_closure *on_complete);
static const grpc_resolver_vtable sockaddr_resolver_vtable = {
@@ -101,7 +104,7 @@ static void sockaddr_channel_saw_error(grpc_exec_ctx *exec_ctx,
}
static void sockaddr_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
- grpc_resolver_result **target_result,
+ grpc_channel_args **target_result,
grpc_closure *on_complete) {
sockaddr_resolver *r = (sockaddr_resolver *)resolver;
gpr_mu_lock(&r->mu);
@@ -116,10 +119,9 @@ static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
sockaddr_resolver *r) {
if (r->next_completion != NULL && !r->published) {
r->published = true;
- *r->target_result = grpc_resolver_result_create(
- r->target_name,
- grpc_lb_addresses_copy(r->addresses, NULL /* user_data_copy */),
- NULL /* lb_policy_name */, NULL);
+ grpc_arg arg = grpc_lb_addresses_create_channel_arg(r->addresses);
+ *r->target_result =
+ grpc_channel_args_copy_and_add(r->channel_args, &arg, 1);
grpc_exec_ctx_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE, NULL);
r->next_completion = NULL;
}
@@ -128,8 +130,8 @@ static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
static void sockaddr_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
sockaddr_resolver *r = (sockaddr_resolver *)gr;
gpr_mu_destroy(&r->mu);
- grpc_lb_addresses_destroy(r->addresses, NULL /* user_data_destroy */);
- gpr_free(r->target_name);
+ grpc_lb_addresses_destroy(r->addresses);
+ grpc_channel_args_destroy(r->channel_args);
gpr_free(r);
}
@@ -149,7 +151,7 @@ static char *ipv6_get_default_authority(grpc_resolver_factory *factory,
return ip_get_default_authority(uri);
}
-#ifdef GPR_HAVE_UNIX_SOCKET
+#ifdef GRPC_HAVE_UNIX_SOCKET
char *unix_get_default_authority(grpc_resolver_factory *factory,
grpc_uri *uri) {
return gpr_strdup("localhost");
@@ -160,8 +162,7 @@ static void do_nothing(void *ignored) {}
static grpc_resolver *sockaddr_create(grpc_resolver_args *args,
int parse(grpc_uri *uri,
- struct sockaddr_storage *dst,
- size_t *len)) {
+ grpc_resolved_address *dst)) {
if (0 != strcmp(args->uri->authority, "")) {
gpr_log(GPR_ERROR, "authority based uri's not supported by the %s scheme",
args->uri->scheme);
@@ -173,17 +174,15 @@ static grpc_resolver *sockaddr_create(grpc_resolver_args *args,
gpr_slice_buffer path_parts;
gpr_slice_buffer_init(&path_parts);
gpr_slice_split(path_slice, ",", &path_parts);
- grpc_lb_addresses *addresses = grpc_lb_addresses_create(path_parts.count);
+ grpc_lb_addresses *addresses =
+ grpc_lb_addresses_create(path_parts.count, NULL /* user_data_vtable */);
bool errors_found = false;
for (size_t i = 0; i < addresses->num_addresses; i++) {
grpc_uri ith_uri = *args->uri;
char *part_str = gpr_dump_slice(path_parts.slices[i], GPR_DUMP_ASCII);
ith_uri.path = part_str;
- if (!parse(
- &ith_uri,
- (struct sockaddr_storage *)(&addresses->addresses[i].address.addr),
- &addresses->addresses[i].address.len)) {
- errors_found = true;
+ if (!parse(&ith_uri, &addresses->addresses[i].address)) {
+ errors_found = true; /* GPR_TRUE */
}
gpr_free(part_str);
if (errors_found) break;
@@ -191,14 +190,19 @@ static grpc_resolver *sockaddr_create(grpc_resolver_args *args,
gpr_slice_buffer_destroy(&path_parts);
gpr_slice_unref(path_slice);
if (errors_found) {
- grpc_lb_addresses_destroy(addresses, NULL /* user_data_destroy */);
+ grpc_lb_addresses_destroy(addresses);
return NULL;
}
/* Instantiate resolver. */
sockaddr_resolver *r = gpr_malloc(sizeof(sockaddr_resolver));
memset(r, 0, sizeof(*r));
- r->target_name = gpr_strdup(args->uri->path);
r->addresses = addresses;
+ grpc_arg server_name_arg;
+ server_name_arg.type = GRPC_ARG_STRING;
+ server_name_arg.key = GRPC_ARG_SERVER_NAME;
+ server_name_arg.value.string = args->uri->path;
+ r->channel_args =
+ grpc_channel_args_copy_and_add(args->args, &server_name_arg, 1);
gpr_mu_init(&r->mu);
grpc_resolver_init(&r->base, &sockaddr_resolver_vtable);
return &r->base;
@@ -223,7 +227,7 @@ static void sockaddr_factory_unref(grpc_resolver_factory *factory) {}
static grpc_resolver_factory name##_resolver_factory = { \
&name##_factory_vtable}
-#ifdef GPR_HAVE_UNIX_SOCKET
+#ifdef GRPC_HAVE_UNIX_SOCKET
DECL_FACTORY(unix);
#endif
DECL_FACTORY(ipv4);
@@ -232,7 +236,7 @@ DECL_FACTORY(ipv6);
void grpc_resolver_sockaddr_init(void) {
grpc_register_resolver_type(&ipv4_resolver_factory);
grpc_register_resolver_type(&ipv6_resolver_factory);
-#ifdef GPR_HAVE_UNIX_SOCKET
+#ifdef GRPC_HAVE_UNIX_SOCKET
grpc_register_resolver_type(&unix_resolver_factory);
#endif
}
diff --git a/src/core/ext/transport/chttp2/alpn/alpn.c b/src/core/ext/transport/chttp2/alpn/alpn.c
index 48b0217265..55710dc5ae 100644
--- a/src/core/ext/transport/chttp2/alpn/alpn.c
+++ b/src/core/ext/transport/chttp2/alpn/alpn.c
@@ -36,7 +36,7 @@
#include <grpc/support/useful.h>
/* in order of preference */
-static const char *const supported_versions[] = {"h2"};
+static const char *const supported_versions[] = {"grpc-exp", "h2"};
int grpc_chttp2_is_alpn_version_supported(const char *version, size_t size) {
size_t i;
diff --git a/src/core/ext/transport/chttp2/client/insecure/channel_create.c b/src/core/ext/transport/chttp2/client/insecure/channel_create.c
index 858b1dbee0..71a06e118b 100644
--- a/src/core/ext/transport/chttp2/client/insecure/channel_create.c
+++ b/src/core/ext/transport/chttp2/client/insecure/channel_create.c
@@ -40,9 +40,9 @@
#include <grpc/support/slice.h>
#include <grpc/support/slice_buffer.h>
-#include "src/core/ext/client_config/client_channel.h"
-#include "src/core/ext/client_config/http_connect_handshaker.h"
-#include "src/core/ext/client_config/resolver_registry.h"
+#include "src/core/ext/client_channel/client_channel.h"
+#include "src/core/ext/client_channel/http_connect_handshaker.h"
+#include "src/core/ext/client_channel/resolver_registry.h"
#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/compress_filter.h"
@@ -52,6 +52,10 @@
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/channel.h"
+//
+// connector
+//
+
typedef struct {
grpc_connector base;
gpr_refcount refs;
@@ -142,7 +146,6 @@ static void connector_connect(grpc_exec_ctx *exec_ctx, grpc_connector *con,
grpc_connect_out_args *result,
grpc_closure *notify) {
connector *c = (connector *)con;
- grpc_tcp_client_connect_args tcp_client_connect_args;
GPR_ASSERT(c->notify == NULL);
GPR_ASSERT(notify->cb);
c->notify = notify;
@@ -150,47 +153,28 @@ static void connector_connect(grpc_exec_ctx *exec_ctx, grpc_connector *con,
c->result = result;
c->tcp = NULL;
grpc_closure_init(&c->connected, connected, c);
- tcp_client_connect_args.interested_parties = args->interested_parties;
- tcp_client_connect_args.addr = args->addr;
- tcp_client_connect_args.addr_len = args->addr_len;
- tcp_client_connect_args.deadline = args->deadline;
- tcp_client_connect_args.channel_args = args->channel_args;
grpc_tcp_client_connect(exec_ctx, &c->connected, &c->tcp,
- &tcp_client_connect_args);
+ args->interested_parties, args->channel_args,
+ args->addr, args->deadline);
}
static const grpc_connector_vtable connector_vtable = {
connector_ref, connector_unref, connector_shutdown, connector_connect};
-typedef struct {
- grpc_client_channel_factory base;
- gpr_refcount refs;
- grpc_channel_args *merge_args;
-} client_channel_factory;
+//
+// client_channel_factory
+//
static void client_channel_factory_ref(
- grpc_client_channel_factory *cc_factory) {
- client_channel_factory *f = (client_channel_factory *)cc_factory;
- gpr_ref(&f->refs);
-}
+ grpc_client_channel_factory *cc_factory) {}
static void client_channel_factory_unref(
- grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *cc_factory) {
- client_channel_factory *f = (client_channel_factory *)cc_factory;
- if (gpr_unref(&f->refs)) {
- grpc_channel_args_destroy(f->merge_args);
- gpr_free(f);
- }
-}
+ grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *cc_factory) {}
static grpc_subchannel *client_channel_factory_create_subchannel(
grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *cc_factory,
- grpc_subchannel_args *args) {
- client_channel_factory *f = (client_channel_factory *)cc_factory;
+ const grpc_subchannel_args *args) {
connector *c = gpr_malloc(sizeof(*c));
- grpc_channel_args *final_args =
- grpc_channel_args_merge(args->args, f->merge_args);
- grpc_subchannel *s;
memset(c, 0, sizeof(*c));
c->base.vtable = &connector_vtable;
gpr_ref_init(&c->refs, 1);
@@ -202,23 +186,18 @@ static grpc_subchannel *client_channel_factory_create_subchannel(
grpc_http_connect_handshaker_create(proxy_name, args->server_name));
gpr_free(proxy_name);
}
- args->args = final_args;
- s = grpc_subchannel_create(exec_ctx, &c->base, args);
+ grpc_subchannel *s = grpc_subchannel_create(exec_ctx, &c->base, args);
grpc_connector_unref(exec_ctx, &c->base);
- grpc_channel_args_destroy(final_args);
return s;
}
static grpc_channel *client_channel_factory_create_channel(
grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *cc_factory,
const char *target, grpc_client_channel_type type,
- grpc_channel_args *args) {
- client_channel_factory *f = (client_channel_factory *)cc_factory;
- grpc_channel_args *final_args = grpc_channel_args_merge(args, f->merge_args);
- grpc_channel *channel = grpc_channel_create(exec_ctx, target, final_args,
- GRPC_CLIENT_CHANNEL, NULL);
- grpc_channel_args_destroy(final_args);
- grpc_resolver *resolver = grpc_resolver_create(target);
+ const grpc_channel_args *args) {
+ grpc_channel *channel =
+ grpc_channel_create(exec_ctx, target, args, GRPC_CLIENT_CHANNEL, NULL);
+ grpc_resolver *resolver = grpc_resolver_create(target, args);
if (!resolver) {
GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel,
"client_channel_factory_create_channel");
@@ -226,7 +205,7 @@ static grpc_channel *client_channel_factory_create_channel(
}
grpc_client_channel_finish_initialization(
- exec_ctx, grpc_channel_get_channel_stack(channel), resolver, &f->base);
+ exec_ctx, grpc_channel_get_channel_stack(channel), resolver, cc_factory);
GRPC_RESOLVER_UNREF(exec_ctx, resolver, "create_channel");
return channel;
@@ -237,6 +216,9 @@ static const grpc_client_channel_factory_vtable client_channel_factory_vtable =
client_channel_factory_create_subchannel,
client_channel_factory_create_channel};
+static grpc_client_channel_factory client_channel_factory = {
+ &client_channel_factory_vtable};
+
/* Create a client channel:
Asynchronously: - resolve target
- connect to it (trying alternatives as presented)
@@ -250,16 +232,12 @@ grpc_channel *grpc_insecure_channel_create(const char *target,
(target, args, reserved));
GPR_ASSERT(!reserved);
- client_channel_factory *f = gpr_malloc(sizeof(*f));
- memset(f, 0, sizeof(*f));
- f->base.vtable = &client_channel_factory_vtable;
- gpr_ref_init(&f->refs, 1);
- f->merge_args = grpc_channel_args_copy(args);
-
+ grpc_client_channel_factory *factory =
+ (grpc_client_channel_factory *)&client_channel_factory;
grpc_channel *channel = client_channel_factory_create_channel(
- &exec_ctx, &f->base, target, GRPC_CLIENT_CHANNEL_TYPE_REGULAR, NULL);
+ &exec_ctx, factory, target, GRPC_CLIENT_CHANNEL_TYPE_REGULAR, args);
- grpc_client_channel_factory_unref(&exec_ctx, &f->base);
+ grpc_client_channel_factory_unref(&exec_ctx, factory);
grpc_exec_ctx_finish(&exec_ctx);
return channel != NULL ? channel : grpc_lame_client_channel_create(
diff --git a/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c b/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c
index b2c5e5b088..1e5b1c22e3 100644
--- a/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c
+++ b/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c
@@ -44,6 +44,7 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/iomgr/tcp_client_posix.h"
#include "src/core/lib/iomgr/tcp_posix.h"
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/channel.h"
@@ -65,9 +66,8 @@ grpc_channel *grpc_insecure_channel_create_from_fd(
int flags = fcntl(fd, F_GETFL, 0);
GPR_ASSERT(fcntl(fd, F_SETFL, flags | O_NONBLOCK) == 0);
- grpc_endpoint *client =
- grpc_tcp_create(grpc_fd_create(fd, "client"),
- GRPC_TCP_DEFAULT_READ_SLICE_SIZE, "fd-client");
+ grpc_endpoint *client = grpc_tcp_client_create_from_fd(
+ &exec_ctx, grpc_fd_create(fd, "client"), args, "fd-client");
grpc_transport *transport =
grpc_create_chttp2_transport(&exec_ctx, final_args, client, 1);
diff --git a/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c b/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c
index d71d9f2d52..d0ac72a011 100644
--- a/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c
+++ b/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c
@@ -40,9 +40,9 @@
#include <grpc/support/slice.h>
#include <grpc/support/slice_buffer.h>
-#include "src/core/ext/client_config/client_channel.h"
-#include "src/core/ext/client_config/http_connect_handshaker.h"
-#include "src/core/ext/client_config/resolver_registry.h"
+#include "src/core/ext/client_channel/client_channel.h"
+#include "src/core/ext/client_channel/http_connect_handshaker.h"
+#include "src/core/ext/client_channel/resolver_registry.h"
#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/handshaker.h"
@@ -54,6 +54,10 @@
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/tsi/transport_security_interface.h"
+//
+// connector
+//
+
typedef struct {
grpc_connector base;
gpr_refcount refs;
@@ -200,7 +204,6 @@ static void connector_connect(grpc_exec_ctx *exec_ctx, grpc_connector *con,
grpc_connect_out_args *result,
grpc_closure *notify) {
connector *c = (connector *)con;
- grpc_tcp_client_connect_args tcp_client_connect_args;
GPR_ASSERT(c->notify == NULL);
c->notify = notify;
c->args = *args;
@@ -209,23 +212,21 @@ static void connector_connect(grpc_exec_ctx *exec_ctx, grpc_connector *con,
GPR_ASSERT(c->connecting_endpoint == NULL);
gpr_mu_unlock(&c->mu);
grpc_closure_init(&c->connected_closure, connected, c);
- tcp_client_connect_args.interested_parties = args->interested_parties;
- tcp_client_connect_args.addr = args->addr;
- tcp_client_connect_args.addr_len = args->addr_len;
- tcp_client_connect_args.deadline = args->deadline;
- tcp_client_connect_args.channel_args = args->channel_args;
- grpc_tcp_client_connect(exec_ctx, &c->connected_closure,
- &c->newly_connecting_endpoint,
- &tcp_client_connect_args);
+ grpc_tcp_client_connect(
+ exec_ctx, &c->connected_closure, &c->newly_connecting_endpoint,
+ args->interested_parties, args->channel_args, args->addr, args->deadline);
}
static const grpc_connector_vtable connector_vtable = {
connector_ref, connector_unref, connector_shutdown, connector_connect};
+//
+// client_channel_factory
+//
+
typedef struct {
grpc_client_channel_factory base;
gpr_refcount refs;
- grpc_channel_args *merge_args;
grpc_channel_security_connector *security_connector;
} client_channel_factory;
@@ -241,19 +242,15 @@ static void client_channel_factory_unref(
if (gpr_unref(&f->refs)) {
GRPC_SECURITY_CONNECTOR_UNREF(&f->security_connector->base,
"client_channel_factory");
- grpc_channel_args_destroy(f->merge_args);
gpr_free(f);
}
}
static grpc_subchannel *client_channel_factory_create_subchannel(
grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *cc_factory,
- grpc_subchannel_args *args) {
+ const grpc_subchannel_args *args) {
client_channel_factory *f = (client_channel_factory *)cc_factory;
connector *c = gpr_malloc(sizeof(*c));
- grpc_channel_args *final_args =
- grpc_channel_args_merge(args->args, f->merge_args);
- grpc_subchannel *s;
memset(c, 0, sizeof(*c));
c->base.vtable = &connector_vtable;
c->security_connector = f->security_connector;
@@ -267,25 +264,19 @@ static grpc_subchannel *client_channel_factory_create_subchannel(
}
gpr_mu_init(&c->mu);
gpr_ref_init(&c->refs, 1);
- args->args = final_args;
- s = grpc_subchannel_create(exec_ctx, &c->base, args);
+ grpc_subchannel *s = grpc_subchannel_create(exec_ctx, &c->base, args);
grpc_connector_unref(exec_ctx, &c->base);
- grpc_channel_args_destroy(final_args);
return s;
}
static grpc_channel *client_channel_factory_create_channel(
grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *cc_factory,
const char *target, grpc_client_channel_type type,
- grpc_channel_args *args) {
+ const grpc_channel_args *args) {
client_channel_factory *f = (client_channel_factory *)cc_factory;
-
- grpc_channel_args *final_args = grpc_channel_args_merge(args, f->merge_args);
- grpc_channel *channel = grpc_channel_create(exec_ctx, target, final_args,
- GRPC_CLIENT_CHANNEL, NULL);
- grpc_channel_args_destroy(final_args);
-
- grpc_resolver *resolver = grpc_resolver_create(target);
+ grpc_channel *channel =
+ grpc_channel_create(exec_ctx, target, args, GRPC_CLIENT_CHANNEL, NULL);
+ grpc_resolver *resolver = grpc_resolver_create(target, args);
if (resolver != NULL) {
grpc_client_channel_finish_initialization(
exec_ctx, grpc_channel_get_channel_stack(channel), resolver, &f->base);
@@ -295,9 +286,6 @@ static grpc_channel *client_channel_factory_create_channel(
"client_channel_factory_create_channel");
channel = NULL;
}
-
- GRPC_SECURITY_CONNECTOR_UNREF(&f->security_connector->base,
- "client_channel_factory_create_channel");
return channel;
}
@@ -314,19 +302,13 @@ grpc_channel *grpc_secure_channel_create(grpc_channel_credentials *creds,
const char *target,
const grpc_channel_args *args,
void *reserved) {
- grpc_arg connector_arg;
- grpc_channel_args *args_copy;
- grpc_channel_args *new_args_from_connector;
- grpc_channel_security_connector *security_connector;
- client_channel_factory *f;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-
GRPC_API_TRACE(
"grpc_secure_channel_create(creds=%p, target=%s, args=%p, "
"reserved=%p)",
4, (creds, target, args, reserved));
GPR_ASSERT(reserved == NULL);
-
+ // Make sure security connector does not already exist in args.
if (grpc_find_security_connector_in_args(args) != NULL) {
gpr_log(GPR_ERROR, "Cannot set security context in channel args.");
grpc_exec_ctx_finish(&exec_ctx);
@@ -334,7 +316,9 @@ grpc_channel *grpc_secure_channel_create(grpc_channel_credentials *creds,
target, GRPC_STATUS_INTERNAL,
"Security connector exists in channel args.");
}
-
+ // Create security connector and construct new channel args.
+ grpc_channel_security_connector *security_connector;
+ grpc_channel_args *new_args_from_connector;
if (grpc_channel_credentials_create_security_connector(
creds, target, args, &security_connector, &new_args_from_connector) !=
GRPC_SECURITY_OK) {
@@ -342,32 +326,30 @@ grpc_channel *grpc_secure_channel_create(grpc_channel_credentials *creds,
return grpc_lame_client_channel_create(
target, GRPC_STATUS_INTERNAL, "Failed to create security connector.");
}
-
- connector_arg = grpc_security_connector_to_arg(&security_connector->base);
- args_copy = grpc_channel_args_copy_and_add(
+ grpc_arg connector_arg =
+ grpc_security_connector_to_arg(&security_connector->base);
+ grpc_channel_args *new_args = grpc_channel_args_copy_and_add(
new_args_from_connector != NULL ? new_args_from_connector : args,
&connector_arg, 1);
-
- f = gpr_malloc(sizeof(*f));
- memset(f, 0, sizeof(*f));
- f->base.vtable = &client_channel_factory_vtable;
- gpr_ref_init(&f->refs, 1);
-
- f->merge_args = grpc_channel_args_copy(args_copy);
- grpc_channel_args_destroy(args_copy);
if (new_args_from_connector != NULL) {
grpc_channel_args_destroy(new_args_from_connector);
}
-
+ // Create client channel factory.
+ client_channel_factory *f = gpr_malloc(sizeof(*f));
+ memset(f, 0, sizeof(*f));
+ f->base.vtable = &client_channel_factory_vtable;
+ gpr_ref_init(&f->refs, 1);
GRPC_SECURITY_CONNECTOR_REF(&security_connector->base,
"grpc_secure_channel_create");
f->security_connector = security_connector;
-
+ // Create channel.
grpc_channel *channel = client_channel_factory_create_channel(
- &exec_ctx, &f->base, target, GRPC_CLIENT_CHANNEL_TYPE_REGULAR, NULL);
-
+ &exec_ctx, &f->base, target, GRPC_CLIENT_CHANNEL_TYPE_REGULAR, new_args);
+ // Clean up.
+ GRPC_SECURITY_CONNECTOR_UNREF(&f->security_connector->base,
+ "secure_client_channel_factory_create_channel");
+ grpc_channel_args_destroy(new_args);
grpc_client_channel_factory_unref(&exec_ctx, &f->base);
grpc_exec_ctx_finish(&exec_ctx);
-
return channel; /* may be NULL */
}
diff --git a/src/core/ext/transport/chttp2/server/insecure/server_chttp2.c b/src/core/ext/transport/chttp2/server/insecure/server_chttp2.c
index f0e07429fa..d42611b863 100644
--- a/src/core/ext/transport/chttp2/server/insecure/server_chttp2.c
+++ b/src/core/ext/transport/chttp2/server/insecure/server_chttp2.c
@@ -139,8 +139,8 @@ int grpc_server_add_insecure_http2_port(grpc_server *server, const char *addr) {
goto error;
}
- err =
- grpc_tcp_server_create(NULL, grpc_server_get_channel_args(server), &tcp);
+ err = grpc_tcp_server_create(&exec_ctx, NULL,
+ grpc_server_get_channel_args(server), &tcp);
if (err != GRPC_ERROR_NONE) {
goto error;
}
@@ -148,9 +148,7 @@ int grpc_server_add_insecure_http2_port(grpc_server *server, const char *addr) {
const size_t naddrs = resolved->naddrs;
errors = gpr_malloc(sizeof(*errors) * naddrs);
for (i = 0; i < naddrs; i++) {
- errors[i] = grpc_tcp_server_add_port(
- tcp, (struct sockaddr *)&resolved->addrs[i].addr,
- resolved->addrs[i].len, &port_temp);
+ errors[i] = grpc_tcp_server_add_port(tcp, &resolved->addrs[i], &port_temp);
if (errors[i] == GRPC_ERROR_NONE) {
if (port_num == -1) {
port_num = port_temp;
diff --git a/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c b/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c
index 9af17fb5ae..aa2ecf5743 100644
--- a/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c
+++ b/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c
@@ -57,8 +57,12 @@ void grpc_server_add_insecure_channel_from_fd(grpc_server *server,
char *name;
gpr_asprintf(&name, "fd:%d", fd);
- grpc_endpoint *server_endpoint = grpc_tcp_create(
- grpc_fd_create(fd, name), GRPC_TCP_DEFAULT_READ_SLICE_SIZE, name);
+ grpc_resource_quota *resource_quota = grpc_resource_quota_from_channel_args(
+ grpc_server_get_channel_args(server));
+ grpc_endpoint *server_endpoint =
+ grpc_tcp_create(grpc_fd_create(fd, name), resource_quota,
+ GRPC_TCP_DEFAULT_READ_SLICE_SIZE, name);
+ grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
gpr_free(name);
diff --git a/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c b/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c
index da3e284fcf..7ad687042d 100644
--- a/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c
+++ b/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c
@@ -61,13 +61,12 @@ typedef struct server_secure_state {
grpc_server_credentials *creds;
bool is_shutdown;
gpr_mu mu;
- gpr_refcount refcount;
- grpc_closure destroy_closure;
- grpc_closure *destroy_callback;
+ grpc_closure tcp_server_shutdown_complete;
+ grpc_closure *server_destroy_listener_done;
} server_secure_state;
typedef struct server_secure_connect {
- server_secure_state *state;
+ server_secure_state *server_state;
grpc_pollset *accepting_pollset;
grpc_tcp_server_acceptor *acceptor;
grpc_handshake_manager *handshake_mgr;
@@ -77,39 +76,28 @@ typedef struct server_secure_connect {
grpc_channel_args *args;
} server_secure_connect;
-static void state_ref(server_secure_state *state) { gpr_ref(&state->refcount); }
-
-static void state_unref(server_secure_state *state) {
- if (gpr_unref(&state->refcount)) {
- /* ensure all threads have unlocked */
- gpr_mu_lock(&state->mu);
- gpr_mu_unlock(&state->mu);
- /* clean up */
- GRPC_SECURITY_CONNECTOR_UNREF(&state->sc->base, "server");
- grpc_server_credentials_unref(state->creds);
- gpr_free(state);
- }
-}
-
static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *statep,
grpc_security_status status,
grpc_endpoint *secure_endpoint,
grpc_auth_context *auth_context) {
- server_secure_connect *state = statep;
+ server_secure_connect *connection_state = statep;
if (status == GRPC_SECURITY_OK) {
if (secure_endpoint) {
- gpr_mu_lock(&state->state->mu);
- if (!state->state->is_shutdown) {
+ gpr_mu_lock(&connection_state->server_state->mu);
+ if (!connection_state->server_state->is_shutdown) {
grpc_transport *transport = grpc_create_chttp2_transport(
- exec_ctx, grpc_server_get_channel_args(state->state->server),
+ exec_ctx, grpc_server_get_channel_args(
+ connection_state->server_state->server),
secure_endpoint, 0);
grpc_arg args_to_add[2];
- args_to_add[0] = grpc_server_credentials_to_arg(state->state->creds);
+ args_to_add[0] = grpc_server_credentials_to_arg(
+ connection_state->server_state->creds);
args_to_add[1] = grpc_auth_context_to_arg(auth_context);
grpc_channel_args *args_copy = grpc_channel_args_copy_and_add(
- state->args, args_to_add, GPR_ARRAY_SIZE(args_to_add));
- grpc_server_setup_transport(exec_ctx, state->state->server, transport,
- state->accepting_pollset, args_copy);
+ connection_state->args, args_to_add, GPR_ARRAY_SIZE(args_to_add));
+ grpc_server_setup_transport(
+ exec_ctx, connection_state->server_state->server, transport,
+ connection_state->accepting_pollset, args_copy);
grpc_channel_args_destroy(args_copy);
grpc_chttp2_transport_start_reading(exec_ctx, transport, NULL);
} else {
@@ -117,21 +105,21 @@ static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *statep,
* gone away. */
grpc_endpoint_destroy(exec_ctx, secure_endpoint);
}
- gpr_mu_unlock(&state->state->mu);
+ gpr_mu_unlock(&connection_state->server_state->mu);
}
} else {
gpr_log(GPR_ERROR, "Secure transport failed with error %d", status);
}
- grpc_channel_args_destroy(state->args);
- state_unref(state->state);
- gpr_free(state);
+ grpc_channel_args_destroy(connection_state->args);
+ grpc_tcp_server_unref(exec_ctx, connection_state->server_state->tcp);
+ gpr_free(connection_state);
}
static void on_handshake_done(grpc_exec_ctx *exec_ctx, grpc_endpoint *endpoint,
grpc_channel_args *args,
gpr_slice_buffer *read_buffer, void *user_data,
grpc_error *error) {
- server_secure_connect *state = user_data;
+ server_secure_connect *connection_state = user_data;
if (error != GRPC_ERROR_NONE) {
const char *error_str = grpc_error_string(error);
gpr_log(GPR_ERROR, "Handshaking failed: %s", error_str);
@@ -139,81 +127,107 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, grpc_endpoint *endpoint,
GRPC_ERROR_UNREF(error);
grpc_channel_args_destroy(args);
gpr_free(read_buffer);
- grpc_handshake_manager_shutdown(exec_ctx, state->handshake_mgr);
- grpc_handshake_manager_destroy(exec_ctx, state->handshake_mgr);
- state_unref(state->state);
- gpr_free(state);
+ grpc_handshake_manager_shutdown(exec_ctx, connection_state->handshake_mgr);
+ grpc_handshake_manager_destroy(exec_ctx, connection_state->handshake_mgr);
+ grpc_tcp_server_unref(exec_ctx, connection_state->server_state->tcp);
+ gpr_free(connection_state);
return;
}
- grpc_handshake_manager_destroy(exec_ctx, state->handshake_mgr);
- state->handshake_mgr = NULL;
+ grpc_handshake_manager_destroy(exec_ctx, connection_state->handshake_mgr);
+ connection_state->handshake_mgr = NULL;
// TODO(roth, jboeuf): Convert security connector handshaking to use new
// handshake API, and then move the code from on_secure_handshake_done()
// into this function.
- state->args = args;
+ connection_state->args = args;
grpc_server_security_connector_do_handshake(
- exec_ctx, state->state->sc, state->acceptor, endpoint, read_buffer,
- state->deadline, on_secure_handshake_done, state);
+ exec_ctx, connection_state->server_state->sc, connection_state->acceptor,
+ endpoint, read_buffer, connection_state->deadline,
+ on_secure_handshake_done, connection_state);
}
static void on_accept(grpc_exec_ctx *exec_ctx, void *statep, grpc_endpoint *tcp,
grpc_pollset *accepting_pollset,
grpc_tcp_server_acceptor *acceptor) {
- server_secure_connect *state = gpr_malloc(sizeof(*state));
- state->state = statep;
- state_ref(state->state);
- state->accepting_pollset = accepting_pollset;
- state->acceptor = acceptor;
- state->handshake_mgr = grpc_handshake_manager_create();
+ server_secure_state *server_state = statep;
+ server_secure_connect *connection_state = NULL;
+ gpr_mu_lock(&server_state->mu);
+ if (server_state->is_shutdown) {
+ gpr_mu_unlock(&server_state->mu);
+ grpc_endpoint_destroy(exec_ctx, tcp);
+ return;
+ }
+ gpr_mu_unlock(&server_state->mu);
+ grpc_tcp_server_ref(server_state->tcp);
+ connection_state = gpr_malloc(sizeof(*connection_state));
+ connection_state->server_state = server_state;
+ connection_state->accepting_pollset = accepting_pollset;
+ connection_state->acceptor = acceptor;
+ connection_state->handshake_mgr = grpc_handshake_manager_create();
// TODO(roth): We should really get this timeout value from channel
// args instead of hard-coding it.
- state->deadline = gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
- gpr_time_from_seconds(120, GPR_TIMESPAN));
+ connection_state->deadline = gpr_time_add(
+ gpr_now(GPR_CLOCK_MONOTONIC), gpr_time_from_seconds(120, GPR_TIMESPAN));
grpc_handshake_manager_do_handshake(
- exec_ctx, state->handshake_mgr, tcp,
- grpc_server_get_channel_args(state->state->server), state->deadline,
- acceptor, on_handshake_done, state);
+ exec_ctx, connection_state->handshake_mgr, tcp,
+ grpc_server_get_channel_args(connection_state->server_state->server),
+ connection_state->deadline, acceptor, on_handshake_done,
+ connection_state);
}
/* Server callback: start listening on our ports */
-static void start(grpc_exec_ctx *exec_ctx, grpc_server *server, void *statep,
- grpc_pollset **pollsets, size_t pollset_count) {
- server_secure_state *state = statep;
- grpc_tcp_server_start(exec_ctx, state->tcp, pollsets, pollset_count,
- on_accept, state);
+static void server_start_listener(grpc_exec_ctx *exec_ctx, grpc_server *server,
+ void *statep, grpc_pollset **pollsets,
+ size_t pollset_count) {
+ server_secure_state *server_state = statep;
+ gpr_mu_lock(&server_state->mu);
+ server_state->is_shutdown = false;
+ gpr_mu_unlock(&server_state->mu);
+ grpc_tcp_server_start(exec_ctx, server_state->tcp, pollsets, pollset_count,
+ on_accept, server_state);
}
-static void destroy_done(grpc_exec_ctx *exec_ctx, void *statep,
- grpc_error *error) {
- server_secure_state *state = statep;
- if (state->destroy_callback != NULL) {
- state->destroy_callback->cb(exec_ctx, state->destroy_callback->cb_arg,
- GRPC_ERROR_REF(error));
+static void tcp_server_shutdown_complete(grpc_exec_ctx *exec_ctx, void *statep,
+ grpc_error *error) {
+ server_secure_state *server_state = statep;
+ /* ensure all threads have unlocked */
+ gpr_mu_lock(&server_state->mu);
+ grpc_closure *destroy_done = server_state->server_destroy_listener_done;
+ GPR_ASSERT(server_state->is_shutdown);
+ gpr_mu_unlock(&server_state->mu);
+ /* clean up */
+ grpc_server_security_connector_shutdown(exec_ctx, server_state->sc);
+
+ /* Flush queued work before a synchronous unref. */
+ grpc_exec_ctx_flush(exec_ctx);
+ GRPC_SECURITY_CONNECTOR_UNREF(&server_state->sc->base, "server");
+ grpc_server_credentials_unref(server_state->creds);
+
+ if (destroy_done != NULL) {
+ destroy_done->cb(exec_ctx, destroy_done->cb_arg, GRPC_ERROR_REF(error));
+ grpc_exec_ctx_flush(exec_ctx);
}
- grpc_server_security_connector_shutdown(exec_ctx, state->sc);
- state_unref(state);
+ gpr_free(server_state);
}
-/* Server callback: destroy the tcp listener (so we don't generate further
- callbacks) */
-static void destroy(grpc_exec_ctx *exec_ctx, grpc_server *server, void *statep,
- grpc_closure *callback) {
- server_secure_state *state = statep;
+static void server_destroy_listener(grpc_exec_ctx *exec_ctx,
+ grpc_server *server, void *statep,
+ grpc_closure *callback) {
+ server_secure_state *server_state = statep;
grpc_tcp_server *tcp;
- gpr_mu_lock(&state->mu);
- state->is_shutdown = true;
- state->destroy_callback = callback;
- tcp = state->tcp;
- gpr_mu_unlock(&state->mu);
+ gpr_mu_lock(&server_state->mu);
+ server_state->is_shutdown = true;
+ server_state->server_destroy_listener_done = callback;
+ tcp = server_state->tcp;
+ gpr_mu_unlock(&server_state->mu);
grpc_tcp_server_shutdown_listeners(exec_ctx, tcp);
- grpc_tcp_server_unref(exec_ctx, tcp);
+ grpc_tcp_server_unref(exec_ctx, server_state->tcp);
}
int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
grpc_server_credentials *creds) {
grpc_resolved_addresses *resolved = NULL;
grpc_tcp_server *tcp = NULL;
- server_secure_state *state = NULL;
+ server_secure_state *server_state = NULL;
size_t i;
size_t count = 0;
int port_num = -1;
@@ -253,28 +267,27 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
if (err != GRPC_ERROR_NONE) {
goto error;
}
- state = gpr_malloc(sizeof(*state));
- memset(state, 0, sizeof(*state));
- grpc_closure_init(&state->destroy_closure, destroy_done, state);
- err = grpc_tcp_server_create(&state->destroy_closure,
+ server_state = gpr_malloc(sizeof(*server_state));
+ memset(server_state, 0, sizeof(*server_state));
+ grpc_closure_init(&server_state->tcp_server_shutdown_complete,
+ tcp_server_shutdown_complete, server_state);
+ err = grpc_tcp_server_create(&exec_ctx,
+ &server_state->tcp_server_shutdown_complete,
grpc_server_get_channel_args(server), &tcp);
if (err != GRPC_ERROR_NONE) {
goto error;
}
- state->server = server;
- state->tcp = tcp;
- state->sc = sc;
- state->creds = grpc_server_credentials_ref(creds);
- state->is_shutdown = false;
- gpr_mu_init(&state->mu);
- gpr_ref_init(&state->refcount, 1);
+ server_state->server = server;
+ server_state->tcp = tcp;
+ server_state->sc = sc;
+ server_state->creds = grpc_server_credentials_ref(creds);
+ server_state->is_shutdown = true;
+ gpr_mu_init(&server_state->mu);
errors = gpr_malloc(sizeof(*errors) * resolved->naddrs);
for (i = 0; i < resolved->naddrs; i++) {
- errors[i] = grpc_tcp_server_add_port(
- tcp, (struct sockaddr *)&resolved->addrs[i].addr,
- resolved->addrs[i].len, &port_temp);
+ errors[i] = grpc_tcp_server_add_port(tcp, &resolved->addrs[i], &port_temp);
if (errors[i] == GRPC_ERROR_NONE) {
if (port_num == -1) {
port_num = port_temp;
@@ -313,7 +326,8 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
grpc_resolved_addresses_destroy(resolved);
/* Register with the server only upon success */
- grpc_server_add_listener(&exec_ctx, server, state, start, destroy);
+ grpc_server_add_listener(&exec_ctx, server, server_state,
+ server_start_listener, server_destroy_listener);
grpc_exec_ctx_finish(&exec_ctx);
return port_num;
@@ -334,10 +348,11 @@ error:
grpc_tcp_server_unref(&exec_ctx, tcp);
} else {
if (sc) {
+ grpc_exec_ctx_flush(&exec_ctx);
GRPC_SECURITY_CONNECTOR_UNREF(&sc->base, "server");
}
- if (state) {
- gpr_free(state);
+ if (server_state) {
+ gpr_free(server_state);
}
}
grpc_exec_ctx_finish(&exec_ctx);
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_plugin.c b/src/core/ext/transport/chttp2/transport/chttp2_plugin.c
index 7d5279b9da..bd87253ed3 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_plugin.c
+++ b/src/core/ext/transport/chttp2/transport/chttp2_plugin.c
@@ -36,14 +36,11 @@
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/transport/metadata.h"
-extern int grpc_http_write_state_trace;
-
void grpc_chttp2_plugin_init(void) {
grpc_chttp2_base64_encode_and_huffman_compress =
grpc_chttp2_base64_encode_and_huffman_compress_impl;
grpc_register_tracer("http", &grpc_http_trace);
grpc_register_tracer("flowctl", &grpc_flowctl_trace);
- grpc_register_tracer("http_write_state", &grpc_http_write_state_trace);
}
void grpc_chttp2_plugin_shutdown(void) {}
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
index 1dd7fef76f..4a9f806354 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
@@ -65,91 +65,73 @@
#define MAX_CLIENT_STREAM_ID 0x7fffffffu
int grpc_http_trace = 0;
int grpc_flowctl_trace = 0;
-int grpc_http_write_state_trace = 0;
-
-#define TRANSPORT_FROM_WRITING(tw) \
- ((grpc_chttp2_transport *)((char *)(tw)-offsetof(grpc_chttp2_transport, \
- writing)))
-
-#define TRANSPORT_FROM_PARSING(tp) \
- ((grpc_chttp2_transport *)((char *)(tp)-offsetof(grpc_chttp2_transport, \
- parsing)))
-
-#define TRANSPORT_FROM_GLOBAL(tg) \
- ((grpc_chttp2_transport *)((char *)(tg)-offsetof(grpc_chttp2_transport, \
- global)))
-
-#define STREAM_FROM_GLOBAL(sg) \
- ((grpc_chttp2_stream *)((char *)(sg)-offsetof(grpc_chttp2_stream, global)))
-
-#define STREAM_FROM_PARSING(sg) \
- ((grpc_chttp2_stream *)((char *)(sg)-offsetof(grpc_chttp2_stream, parsing)))
static const grpc_transport_vtable vtable;
/* forward declarations of various callbacks that we'll build closures around */
-static void writing_action(grpc_exec_ctx *exec_ctx, void *t, grpc_error *error);
-static void reading_action(grpc_exec_ctx *exec_ctx, void *t, grpc_error *error);
-static void parsing_action(grpc_exec_ctx *exec_ctx, void *t, grpc_error *error);
-static void reading_action_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
-static void post_parse_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
-static void initiate_writing_locked(grpc_exec_ctx *exec_ctx, void *t,
+static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *t,
+ grpc_error *error);
+static void write_action(grpc_exec_ctx *exec_ctx, void *t, grpc_error *error);
+static void write_action_end(grpc_exec_ctx *exec_ctx, void *t,
+ grpc_error *error);
+static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *t,
grpc_error *error);
-static void initiate_read_flush_locked(grpc_exec_ctx *exec_ctx, void *t,
- grpc_error *error);
-static void terminate_writing_with_lock(grpc_exec_ctx *exec_ctx, void *t,
- grpc_error *error);
-static void start_writing(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t);
-static void end_waiting_for_write(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t, grpc_error *error);
+static void read_action_begin(grpc_exec_ctx *exec_ctx, void *t,
+ grpc_error *error);
+static void read_action_locked(grpc_exec_ctx *exec_ctx, void *t,
+ grpc_error *error);
+static void complete_fetch_locked(grpc_exec_ctx *exec_ctx, void *gs,
+ grpc_error *error);
+static void complete_fetch(grpc_exec_ctx *exec_ctx, void *gs,
+ grpc_error *error);
/** Set a transport level setting, and push it to our peer */
static void push_setting(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_setting_id id, uint32_t value);
-/** Start disconnection chain */
-static void drop_connection(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_error *error);
-
-/** Cancel a stream: coming from the transport API */
-static void cancel_from_api(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global,
- grpc_error *error);
-
-static void close_from_api(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global,
- grpc_error *error);
+static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s, grpc_error *error);
/** Start new streams that have been created if we can */
-static void maybe_start_some_streams(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global);
-
-static void connectivity_state_set(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
- grpc_connectivity_state state, grpc_error *error, const char *reason);
+static void maybe_start_some_streams(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t);
-static void check_read_ops(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *transport_global);
-
-static void incoming_byte_stream_update_flow_control(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global, size_t max_size_hint,
- size_t have_already);
+static void connectivity_state_set(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_connectivity_state state,
+ grpc_error *error, const char *reason);
+
+static void incoming_byte_stream_update_flow_control(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ size_t max_size_hint,
+ size_t have_already);
static void incoming_byte_stream_destroy_locked(grpc_exec_ctx *exec_ctx,
void *byte_stream,
grpc_error *error_ignored);
static void fail_pending_writes(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global,
+ grpc_chttp2_transport *t, grpc_chttp2_stream *s,
grpc_error *error);
-static void set_write_state(grpc_chttp2_transport *t,
- grpc_chttp2_write_state state, const char *reason);
+static void benign_reclaimer(grpc_exec_ctx *exec_ctx, void *t,
+ grpc_error *error);
+static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *t,
+ grpc_error *error);
+static void destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *t,
+ grpc_error *error);
+static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *t,
+ grpc_error *error);
+
+static void post_benign_reclaimer(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t);
+static void post_destructive_reclaimer(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t);
+
+static void close_transport_locked(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t, grpc_error *error);
+static void end_all_the_calls(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_error *error);
/*******************************************************************************
* CONSTRUCTION/DESTRUCTION/REFCOUNTING
@@ -161,34 +143,31 @@ static void destruct_transport(grpc_exec_ctx *exec_ctx,
grpc_endpoint_destroy(exec_ctx, t->ep);
- gpr_slice_buffer_destroy(&t->global.qbuf);
+ gpr_slice_buffer_destroy(&t->qbuf);
- gpr_slice_buffer_destroy(&t->writing.outbuf);
- grpc_chttp2_hpack_compressor_destroy(&t->writing.hpack_compressor);
+ gpr_slice_buffer_destroy(&t->outbuf);
+ grpc_chttp2_hpack_compressor_destroy(&t->hpack_compressor);
- gpr_slice_buffer_destroy(&t->parsing.qbuf);
gpr_slice_buffer_destroy(&t->read_buffer);
- grpc_chttp2_hpack_parser_destroy(&t->parsing.hpack_parser);
- grpc_chttp2_goaway_parser_destroy(&t->parsing.goaway_parser);
+ grpc_chttp2_hpack_parser_destroy(&t->hpack_parser);
+ grpc_chttp2_goaway_parser_destroy(&t->goaway_parser);
for (i = 0; i < STREAM_LIST_COUNT; i++) {
GPR_ASSERT(t->lists[i].head == NULL);
GPR_ASSERT(t->lists[i].tail == NULL);
}
- GPR_ASSERT(grpc_chttp2_stream_map_size(&t->parsing_stream_map) == 0);
- GPR_ASSERT(grpc_chttp2_stream_map_size(&t->new_stream_map) == 0);
+ GPR_ASSERT(grpc_chttp2_stream_map_size(&t->stream_map) == 0);
- grpc_chttp2_stream_map_destroy(&t->parsing_stream_map);
- grpc_chttp2_stream_map_destroy(&t->new_stream_map);
+ grpc_chttp2_stream_map_destroy(&t->stream_map);
grpc_connectivity_state_destroy(exec_ctx, &t->channel_callback.state_tracker);
- grpc_combiner_destroy(exec_ctx, t->executor.combiner);
+ grpc_combiner_destroy(exec_ctx, t->combiner);
/* callback remaining pings: they're not allowed to call into the transpot,
and maybe they hold resources that need to be freed */
- while (t->global.pings.next != &t->global.pings) {
- grpc_chttp2_outstanding_ping *ping = t->global.pings.next;
+ while (t->pings.next != &t->pings) {
+ grpc_chttp2_outstanding_ping *ping = t->pings.next;
grpc_exec_ctx_sched(exec_ctx, ping->on_recv,
GRPC_ERROR_CREATE("Transport closed"), NULL);
ping->next->prev = ping->prev;
@@ -196,37 +175,40 @@ static void destruct_transport(grpc_exec_ctx *exec_ctx,
gpr_free(ping);
}
+ while (t->write_cb_pool) {
+ grpc_chttp2_write_cb *next = t->write_cb_pool->next;
+ gpr_free(t->write_cb_pool);
+ t->write_cb_pool = next;
+ }
+
gpr_free(t->peer_string);
gpr_free(t);
}
-/*#define REFCOUNTING_DEBUG 1*/
-#ifdef REFCOUNTING_DEBUG
-#define REF_TRANSPORT(t, r) ref_transport(t, r, __FILE__, __LINE__)
-#define UNREF_TRANSPORT(cl, t, r) unref_transport(cl, t, r, __FILE__, __LINE__)
-static void unref_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- const char *reason, const char *file, int line) {
- gpr_log(GPR_DEBUG, "chttp2:unref:%p %d->%d %s [%s:%d]", t, t->refs.count,
- t->refs.count - 1, reason, file, line);
+#ifdef GRPC_CHTTP2_REFCOUNTING_DEBUG
+void grpc_chttp2_unref_transport(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t, const char *reason,
+ const char *file, int line) {
+ gpr_log(GPR_DEBUG, "chttp2:unref:%p %" PRIdPTR "->%" PRIdPTR " %s [%s:%d]", t,
+ t->refs.count, t->refs.count - 1, reason, file, line);
if (!gpr_unref(&t->refs)) return;
destruct_transport(exec_ctx, t);
}
-static void ref_transport(grpc_chttp2_transport *t, const char *reason,
- const char *file, int line) {
- gpr_log(GPR_DEBUG, "chttp2: ref:%p %d->%d %s [%s:%d]", t, t->refs.count,
- t->refs.count + 1, reason, file, line);
+void grpc_chttp2_ref_transport(grpc_chttp2_transport *t, const char *reason,
+ const char *file, int line) {
+ gpr_log(GPR_DEBUG, "chttp2: ref:%p %" PRIdPTR "->%" PRIdPTR " %s [%s:%d]", t,
+ t->refs.count, t->refs.count + 1, reason, file, line);
gpr_ref(&t->refs);
}
#else
-#define REF_TRANSPORT(t, r) ref_transport(t)
-#define UNREF_TRANSPORT(cl, t, r) unref_transport(cl, t)
-static void unref_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) {
+void grpc_chttp2_unref_transport(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
if (!gpr_unref(&t->refs)) return;
destruct_transport(exec_ctx, t);
}
-static void ref_transport(grpc_chttp2_transport *t) { gpr_ref(&t->refs); }
+void grpc_chttp2_ref_transport(grpc_chttp2_transport *t) { gpr_ref(&t->refs); }
#endif
static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
@@ -241,51 +223,46 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
memset(t, 0, sizeof(*t));
t->base.vtable = &vtable;
- t->executor.write_state = GRPC_CHTTP2_WRITES_CORKED;
t->ep = ep;
/* one ref is for destroy */
gpr_ref_init(&t->refs, 1);
- /* ref is dropped at transport close() */
- gpr_ref_init(&t->shutdown_ep_refs, 1);
- t->executor.combiner = grpc_combiner_create(grpc_endpoint_get_workqueue(ep));
+ t->combiner = grpc_combiner_create(grpc_endpoint_get_workqueue(ep));
t->peer_string = grpc_endpoint_get_peer(ep);
t->endpoint_reading = 1;
- t->global.next_stream_id = is_client ? 1 : 2;
- t->global.is_client = is_client;
- t->writing.outgoing_window = DEFAULT_WINDOW;
- t->parsing.incoming_window = DEFAULT_WINDOW;
- t->global.stream_lookahead = DEFAULT_WINDOW;
- t->global.connection_window_target = DEFAULT_CONNECTION_WINDOW_TARGET;
- t->global.ping_counter = 1;
- t->global.pings.next = t->global.pings.prev = &t->global.pings;
- t->parsing.is_client = is_client;
- t->parsing.deframe_state =
- is_client ? GRPC_DTS_FH_0 : GRPC_DTS_CLIENT_PREFIX_0;
- t->parsing.is_first_frame = true;
- t->writing.is_client = is_client;
+ t->next_stream_id = is_client ? 1 : 2;
+ t->is_client = is_client;
+ t->outgoing_window = DEFAULT_WINDOW;
+ t->incoming_window = DEFAULT_WINDOW;
+ t->stream_lookahead = DEFAULT_WINDOW;
+ t->connection_window_target = DEFAULT_CONNECTION_WINDOW_TARGET;
+ t->ping_counter = 1;
+ t->pings.next = t->pings.prev = &t->pings;
+ t->deframe_state = is_client ? GRPC_DTS_FH_0 : GRPC_DTS_CLIENT_PREFIX_0;
+ t->is_first_frame = true;
grpc_connectivity_state_init(
&t->channel_callback.state_tracker, GRPC_CHANNEL_READY,
is_client ? "client_transport" : "server_transport");
- gpr_slice_buffer_init(&t->global.qbuf);
-
- gpr_slice_buffer_init(&t->writing.outbuf);
- grpc_chttp2_hpack_compressor_init(&t->writing.hpack_compressor);
- grpc_closure_init(&t->writing_action, writing_action, t);
- grpc_closure_init(&t->reading_action, reading_action, t);
- grpc_closure_init(&t->reading_action_locked, reading_action_locked, t);
- grpc_closure_init(&t->parsing_action, parsing_action, t);
- grpc_closure_init(&t->post_parse_locked, post_parse_locked, t);
- grpc_closure_init(&t->initiate_writing, initiate_writing_locked, t);
- grpc_closure_init(&t->terminate_writing, terminate_writing_with_lock, t);
- grpc_closure_init(&t->initiate_read_flush_locked, initiate_read_flush_locked,
- t);
- grpc_closure_init(&t->writing.done_cb, grpc_chttp2_terminate_writing,
- &t->writing);
+ gpr_slice_buffer_init(&t->qbuf);
- gpr_slice_buffer_init(&t->parsing.qbuf);
- grpc_chttp2_goaway_parser_init(&t->parsing.goaway_parser);
- grpc_chttp2_hpack_parser_init(&t->parsing.hpack_parser);
+ gpr_slice_buffer_init(&t->outbuf);
+ grpc_chttp2_hpack_compressor_init(&t->hpack_compressor);
+
+ grpc_closure_init(&t->write_action_begin_locked, write_action_begin_locked,
+ t);
+ grpc_closure_init(&t->write_action, write_action, t);
+ grpc_closure_init(&t->write_action_end, write_action_end, t);
+ grpc_closure_init(&t->write_action_end_locked, write_action_end_locked, t);
+ grpc_closure_init(&t->read_action_begin, read_action_begin, t);
+ grpc_closure_init(&t->read_action_locked, read_action_locked, t);
+ grpc_closure_init(&t->benign_reclaimer, benign_reclaimer, t);
+ grpc_closure_init(&t->destructive_reclaimer, destructive_reclaimer, t);
+ grpc_closure_init(&t->benign_reclaimer_locked, benign_reclaimer_locked, t);
+ grpc_closure_init(&t->destructive_reclaimer_locked,
+ destructive_reclaimer_locked, t);
+
+ grpc_chttp2_goaway_parser_init(&t->goaway_parser);
+ grpc_chttp2_hpack_parser_init(&t->hpack_parser);
gpr_slice_buffer_init(&t->read_buffer);
@@ -294,28 +271,24 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
large enough that the exponential growth should happen nicely when it's
needed.
TODO(ctiller): tune this */
- grpc_chttp2_stream_map_init(&t->parsing_stream_map, 8);
- grpc_chttp2_stream_map_init(&t->new_stream_map, 8);
+ grpc_chttp2_stream_map_init(&t->stream_map, 8);
/* copy in initial settings to all setting sets */
for (i = 0; i < GRPC_CHTTP2_NUM_SETTINGS; i++) {
- t->parsing.settings[i] = grpc_chttp2_settings_parameters[i].default_value;
for (j = 0; j < GRPC_NUM_SETTING_SETS; j++) {
- t->global.settings[j][i] =
- grpc_chttp2_settings_parameters[i].default_value;
+ t->settings[j][i] = grpc_chttp2_settings_parameters[i].default_value;
}
}
- t->global.dirtied_local_settings = 1;
+ t->dirtied_local_settings = 1;
/* Hack: it's common for implementations to assume 65536 bytes initial send
window -- this should by rights be 0 */
- t->global.force_send_settings = 1 << GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE;
- t->global.sent_local_settings = 0;
+ t->force_send_settings = 1 << GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE;
+ t->sent_local_settings = 0;
if (is_client) {
- gpr_slice_buffer_add(
- &t->writing.outbuf,
- gpr_slice_from_copied_string(GRPC_CHTTP2_CLIENT_CONNECT_STRING));
- grpc_chttp2_initiate_write(exec_ctx, &t->global, false, "initial_write");
+ gpr_slice_buffer_add(&t->outbuf, gpr_slice_from_copied_string(
+ GRPC_CHTTP2_CLIENT_CONNECT_STRING));
+ grpc_chttp2_initiate_write(exec_ctx, t, false, "initial_write");
}
/* configure http2 the way we like it */
@@ -330,34 +303,18 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
if (channel_args) {
for (i = 0; i < channel_args->num_args; i++) {
- if (0 ==
- strcmp(channel_args->args[i].key, GRPC_ARG_MAX_CONCURRENT_STREAMS)) {
- if (is_client) {
- gpr_log(GPR_ERROR, "%s: is ignored on the client",
- GRPC_ARG_MAX_CONCURRENT_STREAMS);
- } else {
- const grpc_integer_options options = {-1, 0, INT_MAX};
- const int value =
- grpc_channel_arg_get_integer(&channel_args->args[i], options);
- if (value >= 0) {
- push_setting(exec_ctx, t,
- GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS,
- (uint32_t)value);
- }
- }
- } else if (0 == strcmp(channel_args->args[i].key,
- GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER)) {
+ if (0 == strcmp(channel_args->args[i].key,
+ GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER)) {
const grpc_integer_options options = {-1, 0, INT_MAX};
const int value =
grpc_channel_arg_get_integer(&channel_args->args[i], options);
if (value >= 0) {
- if ((t->global.next_stream_id & 1) != (value & 1)) {
+ if ((t->next_stream_id & 1) != (value & 1)) {
gpr_log(GPR_ERROR, "%s: low bit must be %d on %s",
GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER,
- t->global.next_stream_id & 1,
- is_client ? "client" : "server");
+ t->next_stream_id & 1, is_client ? "client" : "server");
} else {
- t->global.next_stream_id = (uint32_t)value;
+ t->next_stream_id = (uint32_t)value;
}
}
} else if (0 == strcmp(channel_args->args[i].key,
@@ -366,16 +323,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
const int value =
grpc_channel_arg_get_integer(&channel_args->args[i], options);
if (value >= 0) {
- t->global.stream_lookahead = (uint32_t)value;
- }
- } else if (0 == strcmp(channel_args->args[i].key,
- GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_DECODER)) {
- const grpc_integer_options options = {-1, 0, INT_MAX};
- const int value =
- grpc_channel_arg_get_integer(&channel_args->args[i], options);
- if (value >= 0) {
- push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE,
- (uint32_t)value);
+ t->stream_lookahead = (uint32_t)value;
}
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_ENCODER)) {
@@ -383,118 +331,126 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
const int value =
grpc_channel_arg_get_integer(&channel_args->args[i], options);
if (value >= 0) {
- grpc_chttp2_hpack_compressor_set_max_usable_size(
- &t->writing.hpack_compressor, (uint32_t)value);
+ grpc_chttp2_hpack_compressor_set_max_usable_size(&t->hpack_compressor,
+ (uint32_t)value);
}
- } else if (0 == strcmp(channel_args->args[i].key,
- GRPC_ARG_MAX_METADATA_SIZE)) {
- const grpc_integer_options options = {-1, 0, INT_MAX};
- const int value =
- grpc_channel_arg_get_integer(&channel_args->args[i], options);
- if (value >= 0) {
- push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE,
- (uint32_t)value);
- }
- } else if (0 == strcmp(channel_args->args[i].key,
- GRPC_ARG_HTTP2_MAX_FRAME_SIZE)) {
- if (channel_args->args[i].type != GRPC_ARG_INTEGER) {
- gpr_log(GPR_ERROR, "%s: must be an integer",
- GRPC_ARG_HTTP2_MAX_FRAME_SIZE);
- } else if (channel_args->args[i].value.integer < 16384 ||
- channel_args->args[i].value.integer > 16777215) {
- gpr_log(GPR_ERROR, "%s: must be between 16384 and 16777215",
- GRPC_ARG_HTTP2_MAX_FRAME_SIZE);
- } else {
- push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE,
- (uint32_t)channel_args->args[i].value.integer);
+ } else {
+ static const struct {
+ const char *channel_arg_name;
+ grpc_chttp2_setting_id setting_id;
+ grpc_integer_options integer_options;
+ bool availability[2] /* server, client */;
+ } settings_map[] = {
+ {GRPC_ARG_MAX_CONCURRENT_STREAMS,
+ GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS,
+ {-1, 0, INT_MAX},
+ {true, false}},
+ {GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_DECODER,
+ GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE,
+ {-1, 0, INT_MAX},
+ {true, true}},
+ {GRPC_ARG_MAX_METADATA_SIZE,
+ GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE,
+ {-1, 0, INT_MAX},
+ {true, true}},
+ {GRPC_ARG_HTTP2_MAX_FRAME_SIZE,
+ GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE,
+ {-1, 16384, 16777215},
+ {true, true}},
+ };
+ for (j = 0; j < (int)GPR_ARRAY_SIZE(settings_map); j++) {
+ if (0 == strcmp(channel_args->args[i].key,
+ settings_map[j].channel_arg_name)) {
+ if (!settings_map[j].availability[is_client]) {
+ gpr_log(GPR_DEBUG, "%s is not available on %s",
+ settings_map[j].channel_arg_name,
+ is_client ? "clients" : "servers");
+ } else {
+ int value = grpc_channel_arg_get_integer(
+ &channel_args->args[i], settings_map[j].integer_options);
+ if (value >= 0) {
+ push_setting(exec_ctx, t, settings_map[j].setting_id,
+ (uint32_t)value);
+ }
+ }
+ break;
+ }
}
}
}
}
- set_write_state(t, GRPC_CHTTP2_WRITING_INACTIVE, "uncork");
- grpc_chttp2_initiate_write(exec_ctx, &t->global, false, "init");
+ grpc_chttp2_initiate_write(exec_ctx, t, false, "init");
+ post_benign_reclaimer(exec_ctx, t);
}
static void destroy_transport_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
grpc_chttp2_transport *t = tp;
t->destroying = 1;
- drop_connection(exec_ctx, t, GRPC_ERROR_CREATE("Transport destroyed"));
- UNREF_TRANSPORT(exec_ctx, t, "destroy");
+ close_transport_locked(
+ exec_ctx, t,
+ grpc_error_set_int(GRPC_ERROR_CREATE("Transport destroyed"),
+ GRPC_ERROR_INT_OCCURRED_DURING_WRITE, t->write_state));
+ GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "destroy");
}
static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
- grpc_combiner_execute(exec_ctx, t->executor.combiner,
+ grpc_combiner_execute(exec_ctx, t->combiner,
grpc_closure_create(destroy_transport_locked, t),
- GRPC_ERROR_NONE);
-}
-
-/** block grpc_endpoint_shutdown being called until a paired
- allow_endpoint_shutdown is made */
-static void prevent_endpoint_shutdown(grpc_chttp2_transport *t) {
- gpr_ref(&t->shutdown_ep_refs);
-}
-
-static void allow_endpoint_shutdown_locked(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t) {
- if (gpr_unref(&t->shutdown_ep_refs)) {
- grpc_endpoint_shutdown(exec_ctx, t->ep);
- }
+ GRPC_ERROR_NONE, false);
}
static void close_transport_locked(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_error *error) {
if (!t->closed) {
- if (grpc_http_write_state_trace) {
- gpr_log(GPR_DEBUG, "W:%p close transport", t);
+ if (t->write_state != GRPC_CHTTP2_WRITE_STATE_IDLE) {
+ if (t->close_transport_on_writes_finished == NULL) {
+ t->close_transport_on_writes_finished =
+ GRPC_ERROR_CREATE("Delayed close due to in-progress write");
+ }
+ t->close_transport_on_writes_finished =
+ grpc_error_add_child(t->close_transport_on_writes_finished, error);
+ return;
+ }
+ if (!grpc_error_get_int(error, GRPC_ERROR_INT_GRPC_STATUS, NULL)) {
+ error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS,
+ GRPC_STATUS_UNAVAILABLE);
}
t->closed = 1;
- connectivity_state_set(exec_ctx, &t->global, GRPC_CHANNEL_SHUTDOWN,
+ connectivity_state_set(exec_ctx, t, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(error), "close_transport");
- allow_endpoint_shutdown_locked(exec_ctx, t);
+ grpc_endpoint_shutdown(exec_ctx, t->ep);
/* flush writable stream list to avoid dangling references */
- grpc_chttp2_stream_global *stream_global;
- grpc_chttp2_stream_writing *stream_writing;
- while (grpc_chttp2_list_pop_writable_stream(
- &t->global, &t->writing, &stream_global, &stream_writing)) {
- GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "chttp2_writing");
+ grpc_chttp2_stream *s;
+ while (grpc_chttp2_list_pop_writable_stream(t, &s)) {
+ GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:close");
}
+ end_all_the_calls(exec_ctx, t, GRPC_ERROR_REF(error));
}
GRPC_ERROR_UNREF(error);
}
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
-void grpc_chttp2_stream_ref(grpc_chttp2_stream_global *stream_global,
- const char *reason) {
- grpc_stream_ref(STREAM_FROM_GLOBAL(stream_global)->refcount, reason);
+void grpc_chttp2_stream_ref(grpc_chttp2_stream *s, const char *reason) {
+ grpc_stream_ref(s->refcount, reason);
}
-void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_stream_global *stream_global,
+void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx, grpc_chttp2_stream *s,
const char *reason) {
- grpc_stream_unref(exec_ctx, STREAM_FROM_GLOBAL(stream_global)->refcount,
- reason);
+ grpc_stream_unref(exec_ctx, s->refcount, reason);
}
#else
-void grpc_chttp2_stream_ref(grpc_chttp2_stream_global *stream_global) {
- grpc_stream_ref(STREAM_FROM_GLOBAL(stream_global)->refcount);
+void grpc_chttp2_stream_ref(grpc_chttp2_stream *s) {
+ grpc_stream_ref(s->refcount);
}
-void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_stream_global *stream_global) {
- grpc_stream_unref(exec_ctx, STREAM_FROM_GLOBAL(stream_global)->refcount);
+void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx, grpc_chttp2_stream *s) {
+ grpc_stream_unref(exec_ctx, s->refcount);
}
#endif
-static void finish_init_stream_locked(grpc_exec_ctx *exec_ctx, void *sp,
- grpc_error *error) {
- grpc_chttp2_stream *s = sp;
- grpc_chttp2_register_stream(s->t, s);
- GRPC_CHTTP2_STREAM_UNREF(exec_ctx, &s->global, "init");
-}
-
static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_stream *gs, grpc_stream_refcount *refcount,
const void *server_data) {
@@ -509,41 +465,31 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
/* We reserve one 'active stream' that's dropped when the stream is
read-closed. The others are for incoming_byte_streams that are actively
reading */
- gpr_ref_init(&s->global.active_streams, 1);
- GRPC_CHTTP2_STREAM_REF(&s->global, "chttp2");
+ gpr_ref_init(&s->active_streams, 1);
+ GRPC_CHTTP2_STREAM_REF(s, "chttp2");
- grpc_chttp2_incoming_metadata_buffer_init(&s->parsing.metadata_buffer[0]);
- grpc_chttp2_incoming_metadata_buffer_init(&s->parsing.metadata_buffer[1]);
- grpc_chttp2_incoming_metadata_buffer_init(
- &s->global.received_initial_metadata);
- grpc_chttp2_incoming_metadata_buffer_init(
- &s->global.received_trailing_metadata);
- grpc_chttp2_data_parser_init(&s->parsing.data_parser);
- gpr_slice_buffer_init(&s->writing.flow_controlled_buffer);
- s->global.deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ grpc_chttp2_incoming_metadata_buffer_init(&s->metadata_buffer[0]);
+ grpc_chttp2_incoming_metadata_buffer_init(&s->metadata_buffer[1]);
+ grpc_chttp2_data_parser_init(&s->data_parser);
+ gpr_slice_buffer_init(&s->flow_controlled_buffer);
+ s->deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ grpc_closure_init(&s->complete_fetch, complete_fetch, s);
+ grpc_closure_init(&s->complete_fetch_locked, complete_fetch_locked, s);
- REF_TRANSPORT(t, "stream");
+ GRPC_CHTTP2_REF_TRANSPORT(t, "stream");
if (server_data) {
- GPR_ASSERT(t->executor.parsing_active);
- s->global.id = (uint32_t)(uintptr_t)server_data;
- s->parsing.id = s->global.id;
- s->global.outgoing_window =
- t->global.settings[GRPC_PEER_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
- s->parsing.incoming_window = s->global.max_recv_bytes =
- t->global.settings[GRPC_SENT_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
+ s->id = (uint32_t)(uintptr_t)server_data;
+ s->outgoing_window = t->settings[GRPC_PEER_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
+ s->incoming_window = s->max_recv_bytes =
+ t->settings[GRPC_SENT_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
*t->accepting_stream = s;
- grpc_chttp2_stream_map_add(&t->parsing_stream_map, s->global.id, s);
- s->global.in_stream_map = true;
+ grpc_chttp2_stream_map_add(&t->stream_map, s->id, s);
+ post_destructive_reclaimer(exec_ctx, t);
}
- grpc_closure_init(&s->init_stream, finish_init_stream_locked, s);
- GRPC_CHTTP2_STREAM_REF(&s->global, "init");
- grpc_combiner_execute(exec_ctx, t->executor.combiner, &s->init_stream,
- GRPC_ERROR_NONE);
-
GPR_TIMER_END("init_stream", 0);
return 0;
@@ -557,55 +503,39 @@ static void destroy_stream_locked(grpc_exec_ctx *exec_ctx, void *sp,
GPR_TIMER_BEGIN("destroy_stream", 0);
- GPR_ASSERT((s->global.write_closed && s->global.read_closed) ||
- s->global.id == 0);
- GPR_ASSERT(!s->global.in_stream_map);
- if (grpc_chttp2_unregister_stream(t, s) && t->global.sent_goaway) {
- close_transport_locked(
- exec_ctx, t,
- GRPC_ERROR_CREATE("Last stream closed after sending goaway"));
- }
- if (!t->executor.parsing_active && s->global.id) {
- GPR_ASSERT(grpc_chttp2_stream_map_find(&t->parsing_stream_map,
- s->global.id) == NULL);
+ GPR_ASSERT((s->write_closed && s->read_closed) || s->id == 0);
+ if (s->id != 0) {
+ GPR_ASSERT(grpc_chttp2_stream_map_find(&t->stream_map, s->id) == NULL);
}
- while (
- (bs = grpc_chttp2_incoming_frame_queue_pop(&s->global.incoming_frames))) {
+ while ((bs = grpc_chttp2_incoming_frame_queue_pop(&s->incoming_frames))) {
incoming_byte_stream_destroy_locked(exec_ctx, bs, GRPC_ERROR_NONE);
}
- grpc_chttp2_list_remove_unannounced_incoming_window_available(&t->global,
- &s->global);
- grpc_chttp2_list_remove_stalled_by_transport(&t->global, &s->global);
- grpc_chttp2_list_remove_check_read_ops(&t->global, &s->global);
+ grpc_chttp2_list_remove_stalled_by_transport(t, s);
for (int i = 0; i < STREAM_LIST_COUNT; i++) {
if (s->included[i]) {
gpr_log(GPR_ERROR, "%s stream %d still included in list %d",
- t->global.is_client ? "client" : "server", s->global.id, i);
+ t->is_client ? "client" : "server", s->id, i);
abort();
}
}
- GPR_ASSERT(s->global.send_initial_metadata_finished == NULL);
- GPR_ASSERT(s->global.send_message_finished == NULL);
- GPR_ASSERT(s->global.send_trailing_metadata_finished == NULL);
- GPR_ASSERT(s->global.recv_initial_metadata_ready == NULL);
- GPR_ASSERT(s->global.recv_message_ready == NULL);
- GPR_ASSERT(s->global.recv_trailing_metadata_finished == NULL);
- grpc_chttp2_data_parser_destroy(exec_ctx, &s->parsing.data_parser);
- grpc_chttp2_incoming_metadata_buffer_destroy(&s->parsing.metadata_buffer[0]);
- grpc_chttp2_incoming_metadata_buffer_destroy(&s->parsing.metadata_buffer[1]);
- grpc_chttp2_incoming_metadata_buffer_destroy(
- &s->global.received_initial_metadata);
- grpc_chttp2_incoming_metadata_buffer_destroy(
- &s->global.received_trailing_metadata);
- gpr_slice_buffer_destroy(&s->writing.flow_controlled_buffer);
- GRPC_ERROR_UNREF(s->global.read_closed_error);
- GRPC_ERROR_UNREF(s->global.write_closed_error);
-
- UNREF_TRANSPORT(exec_ctx, t, "stream");
+ GPR_ASSERT(s->send_initial_metadata_finished == NULL);
+ GPR_ASSERT(s->fetching_send_message == NULL);
+ GPR_ASSERT(s->send_trailing_metadata_finished == NULL);
+ GPR_ASSERT(s->recv_initial_metadata_ready == NULL);
+ GPR_ASSERT(s->recv_message_ready == NULL);
+ GPR_ASSERT(s->recv_trailing_metadata_finished == NULL);
+ grpc_chttp2_data_parser_destroy(exec_ctx, &s->data_parser);
+ grpc_chttp2_incoming_metadata_buffer_destroy(&s->metadata_buffer[0]);
+ grpc_chttp2_incoming_metadata_buffer_destroy(&s->metadata_buffer[1]);
+ gpr_slice_buffer_destroy(&s->flow_controlled_buffer);
+ GRPC_ERROR_UNREF(s->read_closed_error);
+ GRPC_ERROR_UNREF(s->write_closed_error);
+
+ GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "stream");
GPR_TIMER_END("destroy_stream", 0);
@@ -620,280 +550,222 @@ static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
s->destroy_stream_arg = and_free_memory;
grpc_closure_init(&s->destroy_stream, destroy_stream_locked, s);
- grpc_combiner_execute(exec_ctx, t->executor.combiner, &s->destroy_stream,
- GRPC_ERROR_NONE);
+ grpc_combiner_execute(exec_ctx, t->combiner, &s->destroy_stream,
+ GRPC_ERROR_NONE, false);
GPR_TIMER_END("destroy_stream", 0);
}
-grpc_chttp2_stream_parsing *grpc_chttp2_parsing_lookup_stream(
- grpc_chttp2_transport_parsing *transport_parsing, uint32_t id) {
- grpc_chttp2_transport *t = TRANSPORT_FROM_PARSING(transport_parsing);
- grpc_chttp2_stream *s =
- grpc_chttp2_stream_map_find(&t->parsing_stream_map, id);
- return s ? &s->parsing : NULL;
+grpc_chttp2_stream *grpc_chttp2_parsing_lookup_stream(grpc_chttp2_transport *t,
+ uint32_t id) {
+ return grpc_chttp2_stream_map_find(&t->stream_map, id);
}
-grpc_chttp2_stream_parsing *grpc_chttp2_parsing_accept_stream(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
- uint32_t id) {
+grpc_chttp2_stream *grpc_chttp2_parsing_accept_stream(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ uint32_t id) {
+ if (t->channel_callback.accept_stream == NULL) {
+ return NULL;
+ }
grpc_chttp2_stream *accepting;
- grpc_chttp2_transport *t = TRANSPORT_FROM_PARSING(transport_parsing);
GPR_ASSERT(t->accepting_stream == NULL);
t->accepting_stream = &accepting;
t->channel_callback.accept_stream(exec_ctx,
t->channel_callback.accept_stream_user_data,
&t->base, (void *)(uintptr_t)id);
t->accepting_stream = NULL;
- return &accepting->parsing;
+ return accepting;
}
/*******************************************************************************
- * LOCK MANAGEMENT
+ * OUTPUT PROCESSING
*/
-static const char *write_state_name(grpc_chttp2_write_state state) {
- switch (state) {
- case GRPC_CHTTP2_WRITES_CORKED:
- return "CORKED";
- case GRPC_CHTTP2_WRITING_INACTIVE:
- return "INACTIVE";
- case GRPC_CHTTP2_WRITE_SCHEDULED:
- return "SCHEDULED";
- case GRPC_CHTTP2_WRITING:
+static const char *write_state_name(grpc_chttp2_write_state st) {
+ switch (st) {
+ case GRPC_CHTTP2_WRITE_STATE_IDLE:
+ return "IDLE";
+ case GRPC_CHTTP2_WRITE_STATE_WRITING:
return "WRITING";
- case GRPC_CHTTP2_WRITING_STALE_WITH_POLLER:
- return "WRITING[p=1]";
- case GRPC_CHTTP2_WRITING_STALE_NO_POLLER:
- return "WRITING[p=0]";
+ case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE:
+ return "WRITING+MORE";
+ case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE_AND_COVERED_BY_POLLER:
+ return "WRITING+MORE+COVERED";
}
GPR_UNREACHABLE_CODE(return "UNKNOWN");
}
-static void set_write_state(grpc_chttp2_transport *t,
- grpc_chttp2_write_state state, const char *reason) {
- if (grpc_http_write_state_trace) {
- gpr_log(GPR_DEBUG, "W:%p %s -> %s because %s", t,
- write_state_name(t->executor.write_state), write_state_name(state),
- reason);
+static void set_write_state(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_chttp2_write_state st, const char *reason) {
+ GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_DEBUG, "W:%p %s state %s -> %s [%s]", t,
+ t->is_client ? "CLIENT" : "SERVER",
+ write_state_name(t->write_state),
+ write_state_name(st), reason));
+ t->write_state = st;
+ if (st == GRPC_CHTTP2_WRITE_STATE_IDLE &&
+ t->close_transport_on_writes_finished != NULL) {
+ grpc_error *err = t->close_transport_on_writes_finished;
+ t->close_transport_on_writes_finished = NULL;
+ close_transport_locked(exec_ctx, t, err);
}
- t->executor.write_state = state;
-}
-
-static void initiate_writing_locked(grpc_exec_ctx *exec_ctx, void *tp,
- grpc_error *error) {
- grpc_chttp2_transport *t = tp;
- GPR_ASSERT(t->executor.write_state == GRPC_CHTTP2_WRITE_SCHEDULED);
- start_writing(exec_ctx, t);
}
-static void initiate_read_flush_locked(grpc_exec_ctx *exec_ctx, void *tp,
- grpc_error *error) {
- grpc_chttp2_transport *t = tp;
- t->executor.check_read_ops_scheduled = false;
- check_read_ops(exec_ctx, &t->global);
-}
-
-/*******************************************************************************
- * OUTPUT PROCESSING
- */
-
void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_transport *t,
bool covered_by_poller, const char *reason) {
GPR_TIMER_BEGIN("grpc_chttp2_initiate_write", 0);
- /* Perform state checks, and transition to a scheduled state if appropriate.
- If we are inactive, schedule a write chain to begin once the transport
- combiner finishes any executions in its current batch (which may be
- scheduled AFTER this code executes). The write chain will:
- - call start_writing, which verifies (under the global lock) that there
- are things that need to be written by calling
- grpc_chttp2_unlocking_check_writes, and if so schedules writing_action
- against the current exec_ctx, to be executed OUTSIDE of the global lock
- - eventually writing_action results in grpc_chttp2_terminate_writing being
- called, which re-takes the global lock, updates state, checks if we need
- to do *another* write immediately, and if so loops back to
- start_writing.
-
- Current problems:
- - too much lock entry/exiting
- - the writing thread can become stuck indefinitely (punt through the
- workqueue periodically to fix) */
-
- grpc_chttp2_transport *t = TRANSPORT_FROM_GLOBAL(transport_global);
- switch (t->executor.write_state) {
- case GRPC_CHTTP2_WRITES_CORKED:
+ switch (t->write_state) {
+ case GRPC_CHTTP2_WRITE_STATE_IDLE:
+ set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING, reason);
+ GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
+ grpc_combiner_execute_finally(exec_ctx, t->combiner,
+ &t->write_action_begin_locked,
+ GRPC_ERROR_NONE, covered_by_poller);
break;
- case GRPC_CHTTP2_WRITING_INACTIVE:
- set_write_state(t, GRPC_CHTTP2_WRITE_SCHEDULED, reason);
- REF_TRANSPORT(t, "writing");
- grpc_combiner_execute_finally(exec_ctx, t->executor.combiner,
- &t->initiate_writing, GRPC_ERROR_NONE,
- covered_by_poller);
+ case GRPC_CHTTP2_WRITE_STATE_WRITING:
+ set_write_state(
+ exec_ctx, t,
+ covered_by_poller
+ ? GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE_AND_COVERED_BY_POLLER
+ : GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE,
+ reason);
break;
- case GRPC_CHTTP2_WRITE_SCHEDULED:
+ case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE:
if (covered_by_poller) {
- /* upgrade to note poller is available to cover the write */
- grpc_combiner_force_async_finally(t->executor.combiner);
+ set_write_state(
+ exec_ctx, t,
+ GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE_AND_COVERED_BY_POLLER,
+ reason);
}
break;
- case GRPC_CHTTP2_WRITING:
- set_write_state(t,
- covered_by_poller ? GRPC_CHTTP2_WRITING_STALE_WITH_POLLER
- : GRPC_CHTTP2_WRITING_STALE_NO_POLLER,
- reason);
- break;
- case GRPC_CHTTP2_WRITING_STALE_WITH_POLLER:
- /* nothing to do: write already requested */
- break;
- case GRPC_CHTTP2_WRITING_STALE_NO_POLLER:
- if (covered_by_poller) {
- /* upgrade to note poller is available to cover the write */
- set_write_state(t, GRPC_CHTTP2_WRITING_STALE_WITH_POLLER, reason);
- }
+ case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE_AND_COVERED_BY_POLLER:
break;
}
GPR_TIMER_END("grpc_chttp2_initiate_write", 0);
}
-static void start_writing(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) {
- GPR_TIMER_BEGIN("start_writing", 0);
- GPR_ASSERT(t->executor.write_state == GRPC_CHTTP2_WRITE_SCHEDULED);
- if (!t->closed &&
- grpc_chttp2_unlocking_check_writes(exec_ctx, &t->global, &t->writing)) {
- set_write_state(t, GRPC_CHTTP2_WRITING, "start_writing");
- prevent_endpoint_shutdown(t);
- grpc_exec_ctx_sched(exec_ctx, &t->writing_action, GRPC_ERROR_NONE, NULL);
- } else {
- if (t->closed) {
- set_write_state(t, GRPC_CHTTP2_WRITING_INACTIVE,
- "start_writing:transport_closed");
- } else {
- set_write_state(t, GRPC_CHTTP2_WRITING_INACTIVE,
- "start_writing:nothing_to_write");
- }
- end_waiting_for_write(exec_ctx, t, GRPC_ERROR_NONE);
- UNREF_TRANSPORT(exec_ctx, t, "writing");
+void grpc_chttp2_become_writable(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s, bool covered_by_poller,
+ const char *reason) {
+ if (!t->closed && grpc_chttp2_list_add_writable_stream(t, s)) {
+ GRPC_CHTTP2_STREAM_REF(s, "chttp2_writing:become");
+ grpc_chttp2_initiate_write(exec_ctx, t, covered_by_poller, reason);
}
- GPR_TIMER_END("start_writing", 0);
}
-void grpc_chttp2_become_writable(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global,
- bool covered_by_poller, const char *reason) {
- if (!TRANSPORT_FROM_GLOBAL(transport_global)->closed &&
- grpc_chttp2_list_add_writable_stream(transport_global, stream_global)) {
- GRPC_CHTTP2_STREAM_REF(stream_global, "chttp2_writing");
- grpc_chttp2_initiate_write(exec_ctx, transport_global, covered_by_poller,
- reason);
+static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *gt,
+ grpc_error *error_ignored) {
+ GPR_TIMER_BEGIN("write_action_begin_locked", 0);
+ grpc_chttp2_transport *t = gt;
+ GPR_ASSERT(t->write_state != GRPC_CHTTP2_WRITE_STATE_IDLE);
+ if (!t->closed && grpc_chttp2_begin_write(exec_ctx, t)) {
+ set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
+ "begin writing");
+ grpc_exec_ctx_sched(exec_ctx, &t->write_action, GRPC_ERROR_NONE, NULL);
+ } else {
+ set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_IDLE,
+ "begin writing nothing");
+ GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "writing");
}
+ GPR_TIMER_END("write_action_begin_locked", 0);
}
-static void push_setting(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_chttp2_setting_id id, uint32_t value) {
- const grpc_chttp2_setting_parameters *sp =
- &grpc_chttp2_settings_parameters[id];
- uint32_t use_value = GPR_CLAMP(value, sp->min_value, sp->max_value);
- if (use_value != value) {
- gpr_log(GPR_INFO, "Requested parameter %s clamped from %d to %d", sp->name,
- value, use_value);
- }
- if (use_value != t->global.settings[GRPC_LOCAL_SETTINGS][id]) {
- t->global.settings[GRPC_LOCAL_SETTINGS][id] = use_value;
- t->global.dirtied_local_settings = 1;
- grpc_chttp2_initiate_write(exec_ctx, &t->global, false, "push_setting");
- }
+static void write_action(grpc_exec_ctx *exec_ctx, void *gt, grpc_error *error) {
+ grpc_chttp2_transport *t = gt;
+ GPR_TIMER_BEGIN("write_action", 0);
+ grpc_endpoint_write(exec_ctx, t->ep, &t->outbuf, &t->write_action_end);
+ GPR_TIMER_END("write_action", 0);
}
-/* error may be GRPC_ERROR_NONE if there is no error allocated yet.
- In that case, use "reason" as the text for a new error. */
-static void end_waiting_for_write(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t, grpc_error *error) {
- grpc_chttp2_stream_global *stream_global;
- while (grpc_chttp2_list_pop_closed_waiting_for_writing(&t->global,
- &stream_global)) {
- fail_pending_writes(exec_ctx, &t->global, stream_global,
- GRPC_ERROR_REF(error));
- GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "finish_writes");
- }
- GRPC_ERROR_UNREF(error);
+static void write_action_end(grpc_exec_ctx *exec_ctx, void *gt,
+ grpc_error *error) {
+ grpc_chttp2_transport *t = gt;
+ GPR_TIMER_BEGIN("write_action_end", 0);
+ grpc_combiner_execute(exec_ctx, t->combiner, &t->write_action_end_locked,
+ GRPC_ERROR_REF(error), false);
+ GPR_TIMER_END("write_action_end", 0);
}
-static void terminate_writing_with_lock(grpc_exec_ctx *exec_ctx, void *tp,
- grpc_error *error) {
+static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp,
+ grpc_error *error) {
GPR_TIMER_BEGIN("terminate_writing_with_lock", 0);
grpc_chttp2_transport *t = tp;
- allow_endpoint_shutdown_locked(exec_ctx, t);
if (error != GRPC_ERROR_NONE) {
- drop_connection(exec_ctx, t, GRPC_ERROR_REF(error));
+ close_transport_locked(exec_ctx, t, GRPC_ERROR_REF(error));
}
- grpc_chttp2_cleanup_writing(exec_ctx, &t->global, &t->writing);
+ if (t->sent_goaway_state == GRPC_CHTTP2_GOAWAY_SEND_SCHEDULED) {
+ t->sent_goaway_state = GRPC_CHTTP2_GOAWAY_SENT;
+ if (grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
+ close_transport_locked(exec_ctx, t, GRPC_ERROR_CREATE("goaway sent"));
+ }
+ }
- end_waiting_for_write(exec_ctx, t, GRPC_ERROR_REF(error));
+ grpc_chttp2_end_write(exec_ctx, t, GRPC_ERROR_REF(error));
- switch (t->executor.write_state) {
- case GRPC_CHTTP2_WRITES_CORKED:
- case GRPC_CHTTP2_WRITING_INACTIVE:
- case GRPC_CHTTP2_WRITE_SCHEDULED:
+ switch (t->write_state) {
+ case GRPC_CHTTP2_WRITE_STATE_IDLE:
GPR_UNREACHABLE_CODE(break);
- case GRPC_CHTTP2_WRITING:
+ case GRPC_CHTTP2_WRITE_STATE_WRITING:
GPR_TIMER_MARK("state=writing", 0);
- set_write_state(t, GRPC_CHTTP2_WRITING_INACTIVE, "terminate_writing");
- break;
- case GRPC_CHTTP2_WRITING_STALE_WITH_POLLER:
- GPR_TIMER_MARK("state=writing_stale_with_poller", 0);
- set_write_state(t, GRPC_CHTTP2_WRITE_SCHEDULED, "terminate_writing");
- REF_TRANSPORT(t, "writing");
- grpc_combiner_execute_finally(exec_ctx, t->executor.combiner,
- &t->initiate_writing, GRPC_ERROR_NONE,
- true);
+ set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_IDLE,
+ "finish writing");
break;
- case GRPC_CHTTP2_WRITING_STALE_NO_POLLER:
+ case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE:
GPR_TIMER_MARK("state=writing_stale_no_poller", 0);
- set_write_state(t, GRPC_CHTTP2_WRITE_SCHEDULED, "terminate_writing");
- REF_TRANSPORT(t, "writing");
- grpc_combiner_execute_finally(exec_ctx, t->executor.combiner,
- &t->initiate_writing, GRPC_ERROR_NONE,
- false);
+ set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
+ "continue writing [!covered]");
+ GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
+ grpc_combiner_execute_finally(exec_ctx, t->combiner,
+ &t->write_action_begin_locked,
+ GRPC_ERROR_NONE, false);
+ break;
+ case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE_AND_COVERED_BY_POLLER:
+ GPR_TIMER_MARK("state=writing_stale_with_poller", 0);
+ set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
+ "continue writing [covered]");
+ GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
+ grpc_combiner_execute_finally(exec_ctx, t->combiner,
+ &t->write_action_begin_locked,
+ GRPC_ERROR_NONE, true);
break;
}
- UNREF_TRANSPORT(exec_ctx, t, "writing");
+ GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "writing");
GPR_TIMER_END("terminate_writing_with_lock", 0);
}
-void grpc_chttp2_terminate_writing(grpc_exec_ctx *exec_ctx,
- void *transport_writing, grpc_error *error) {
- GPR_TIMER_BEGIN("grpc_chttp2_terminate_writing", 0);
- grpc_chttp2_transport *t = TRANSPORT_FROM_WRITING(transport_writing);
- grpc_combiner_execute(exec_ctx, t->executor.combiner, &t->terminate_writing,
- GRPC_ERROR_REF(error));
- GPR_TIMER_END("grpc_chttp2_terminate_writing", 0);
-}
-
-static void writing_action(grpc_exec_ctx *exec_ctx, void *gt,
- grpc_error *error) {
- grpc_chttp2_transport *t = gt;
- GPR_TIMER_BEGIN("writing_action", 0);
- grpc_chttp2_perform_writes(exec_ctx, &t->writing, t->ep);
- GPR_TIMER_END("writing_action", 0);
+static void push_setting(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_chttp2_setting_id id, uint32_t value) {
+ const grpc_chttp2_setting_parameters *sp =
+ &grpc_chttp2_settings_parameters[id];
+ uint32_t use_value = GPR_CLAMP(value, sp->min_value, sp->max_value);
+ if (use_value != value) {
+ gpr_log(GPR_INFO, "Requested parameter %s clamped from %d to %d", sp->name,
+ value, use_value);
+ }
+ if (use_value != t->settings[GRPC_LOCAL_SETTINGS][id]) {
+ t->settings[GRPC_LOCAL_SETTINGS][id] = use_value;
+ t->dirtied_local_settings = 1;
+ grpc_chttp2_initiate_write(exec_ctx, t, false, "push_setting");
+ }
}
-void grpc_chttp2_add_incoming_goaway(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
- uint32_t goaway_error, gpr_slice goaway_text) {
+void grpc_chttp2_add_incoming_goaway(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ uint32_t goaway_error,
+ gpr_slice goaway_text) {
char *msg = gpr_dump_slice(goaway_text, GPR_DUMP_HEX | GPR_DUMP_ASCII);
GRPC_CHTTP2_IF_TRACING(
gpr_log(GPR_DEBUG, "got goaway [%d]: %s", goaway_error, msg));
gpr_slice_unref(goaway_text);
- transport_global->seen_goaway = 1;
+ t->seen_goaway = 1;
/* lie: use transient failure from the transport to indicate goaway has been
* received */
connectivity_state_set(
- exec_ctx, transport_global, GRPC_CHANNEL_TRANSIENT_FAILURE,
+ exec_ctx, t, GRPC_CHANNEL_TRANSIENT_FAILURE,
grpc_error_set_str(
grpc_error_set_int(GRPC_ERROR_CREATE("GOAWAY received"),
GRPC_ERROR_INT_HTTP2_ERROR,
@@ -903,61 +775,50 @@ void grpc_chttp2_add_incoming_goaway(
gpr_free(msg);
}
-static void maybe_start_some_streams(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global) {
- grpc_chttp2_stream_global *stream_global;
+static void maybe_start_some_streams(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
+ grpc_chttp2_stream *s;
uint32_t stream_incoming_window;
/* start streams where we have free grpc_chttp2_stream ids and free
* concurrency */
- while (transport_global->next_stream_id <= MAX_CLIENT_STREAM_ID &&
- transport_global->concurrent_stream_count <
- transport_global
- ->settings[GRPC_PEER_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS] &&
- grpc_chttp2_list_pop_waiting_for_concurrency(transport_global,
- &stream_global)) {
+ while (t->next_stream_id <= MAX_CLIENT_STREAM_ID &&
+ grpc_chttp2_stream_map_size(&t->stream_map) <
+ t->settings[GRPC_PEER_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS] &&
+ grpc_chttp2_list_pop_waiting_for_concurrency(t, &s)) {
/* safe since we can't (legally) be parsing this stream yet */
- grpc_chttp2_stream_parsing *stream_parsing =
- &STREAM_FROM_GLOBAL(stream_global)->parsing;
GRPC_CHTTP2_IF_TRACING(gpr_log(
GPR_DEBUG, "HTTP:%s: Allocating new grpc_chttp2_stream %p to id %d",
- transport_global->is_client ? "CLI" : "SVR", stream_global,
- transport_global->next_stream_id));
+ t->is_client ? "CLI" : "SVR", s, t->next_stream_id));
- GPR_ASSERT(stream_global->id == 0);
- stream_global->id = stream_parsing->id = transport_global->next_stream_id;
- transport_global->next_stream_id += 2;
+ GPR_ASSERT(s->id == 0);
+ s->id = t->next_stream_id;
+ t->next_stream_id += 2;
- if (transport_global->next_stream_id >= MAX_CLIENT_STREAM_ID) {
- connectivity_state_set(
- exec_ctx, transport_global, GRPC_CHANNEL_TRANSIENT_FAILURE,
- GRPC_ERROR_CREATE("Stream IDs exhausted"), "no_more_stream_ids");
+ if (t->next_stream_id >= MAX_CLIENT_STREAM_ID) {
+ connectivity_state_set(exec_ctx, t, GRPC_CHANNEL_TRANSIENT_FAILURE,
+ GRPC_ERROR_CREATE("Stream IDs exhausted"),
+ "no_more_stream_ids");
}
- stream_global->outgoing_window =
- transport_global->settings[GRPC_PEER_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
- stream_parsing->incoming_window = stream_incoming_window =
- transport_global->settings[GRPC_SENT_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
- stream_global->max_recv_bytes =
- GPR_MAX(stream_incoming_window, stream_global->max_recv_bytes);
- grpc_chttp2_stream_map_add(
- &TRANSPORT_FROM_GLOBAL(transport_global)->new_stream_map,
- stream_global->id, STREAM_FROM_GLOBAL(stream_global));
- stream_global->in_stream_map = true;
- transport_global->concurrent_stream_count++;
- grpc_chttp2_become_writable(exec_ctx, transport_global, stream_global, true,
- "new_stream");
+ s->outgoing_window = t->settings[GRPC_PEER_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
+ s->incoming_window = stream_incoming_window =
+ t->settings[GRPC_SENT_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
+ s->max_recv_bytes = GPR_MAX(stream_incoming_window, s->max_recv_bytes);
+ grpc_chttp2_stream_map_add(&t->stream_map, s->id, s);
+ post_destructive_reclaimer(exec_ctx, t);
+ grpc_chttp2_become_writable(exec_ctx, t, s, true, "new_stream");
}
/* cancel out streams that will never be started */
- while (transport_global->next_stream_id >= MAX_CLIENT_STREAM_ID &&
- grpc_chttp2_list_pop_waiting_for_concurrency(transport_global,
- &stream_global)) {
- cancel_from_api(exec_ctx, transport_global, stream_global,
- grpc_error_set_int(
- GRPC_ERROR_CREATE("Stream IDs exhausted"),
- GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE));
+ while (t->next_stream_id >= MAX_CLIENT_STREAM_ID &&
+ grpc_chttp2_list_pop_waiting_for_concurrency(t, &s)) {
+ grpc_chttp2_cancel_stream(
+ exec_ctx, t, s,
+ grpc_error_set_int(GRPC_ERROR_CREATE("Stream IDs exhausted"),
+ GRPC_ERROR_INT_GRPC_STATUS,
+ GRPC_STATUS_UNAVAILABLE));
}
}
@@ -969,48 +830,125 @@ static grpc_closure *add_closure_barrier(grpc_closure *closure) {
return closure;
}
-void grpc_chttp2_complete_closure_step(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global, grpc_closure **pclosure,
- grpc_error *error) {
+static void null_then_run_closure(grpc_exec_ctx *exec_ctx,
+ grpc_closure **closure, grpc_error *error) {
+ grpc_closure *c = *closure;
+ *closure = NULL;
+ grpc_closure_run(exec_ctx, c, error);
+}
+
+void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ grpc_closure **pclosure,
+ grpc_error *error, const char *desc) {
grpc_closure *closure = *pclosure;
+ *pclosure = NULL;
if (closure == NULL) {
GRPC_ERROR_UNREF(error);
return;
}
closure->next_data.scratch -= CLOSURE_BARRIER_FIRST_REF_BIT;
if (error != GRPC_ERROR_NONE) {
- if (closure->error == GRPC_ERROR_NONE) {
- closure->error =
+ if (closure->error_data.error == GRPC_ERROR_NONE) {
+ closure->error_data.error =
GRPC_ERROR_CREATE("Error in HTTP transport completing operation");
- closure->error = grpc_error_set_str(
- closure->error, GRPC_ERROR_STR_TARGET_ADDRESS,
- TRANSPORT_FROM_GLOBAL(transport_global)->peer_string);
+ closure->error_data.error =
+ grpc_error_set_str(closure->error_data.error,
+ GRPC_ERROR_STR_TARGET_ADDRESS, t->peer_string);
}
- closure->error = grpc_error_add_child(closure->error, error);
+ closure->error_data.error =
+ grpc_error_add_child(closure->error_data.error, error);
}
if (closure->next_data.scratch < CLOSURE_BARRIER_FIRST_REF_BIT) {
if (closure->next_data.scratch & CLOSURE_BARRIER_STATS_BIT) {
- grpc_transport_move_stats(&stream_global->stats,
- stream_global->collecting_stats);
- stream_global->collecting_stats = NULL;
+ grpc_transport_move_stats(&s->stats, s->collecting_stats);
+ s->collecting_stats = NULL;
}
- grpc_exec_ctx_sched(exec_ctx, closure, closure->error, NULL);
+ grpc_closure_run(exec_ctx, closure, closure->error_data.error);
}
- *pclosure = NULL;
}
-static int contains_non_ok_status(
- grpc_chttp2_transport_global *transport_global,
- grpc_metadata_batch *batch) {
+static bool contains_non_ok_status(grpc_metadata_batch *batch) {
grpc_linked_mdelem *l;
for (l = batch->list.head; l; l = l->next) {
if (l->md->key == GRPC_MDSTR_GRPC_STATUS &&
l->md != GRPC_MDELEM_GRPC_STATUS_0) {
- return 1;
+ return true;
}
}
- return 0;
+ return false;
+}
+
+static void add_fetched_slice_locked(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s) {
+ s->fetched_send_message_length +=
+ (uint32_t)GPR_SLICE_LENGTH(s->fetching_slice);
+ gpr_slice_buffer_add(&s->flow_controlled_buffer, s->fetching_slice);
+ if (s->id != 0) {
+ grpc_chttp2_become_writable(exec_ctx, t, s, true, "op.send_message");
+ }
+}
+
+static void continue_fetching_send_locked(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s) {
+ for (;;) {
+ if (s->fetching_send_message == NULL) {
+ /* Stream was cancelled before message fetch completed */
+ abort(); /* TODO(ctiller): what cleanup here? */
+ return; /* early out */
+ }
+ if (s->fetched_send_message_length == s->fetching_send_message->length) {
+ int64_t notify_offset = s->next_message_end_offset;
+ if (notify_offset <= s->flow_controlled_bytes_written) {
+ grpc_chttp2_complete_closure_step(
+ exec_ctx, t, s, &s->fetching_send_message_finished, GRPC_ERROR_NONE,
+ "fetching_send_message_finished");
+ } else {
+ grpc_chttp2_write_cb *cb = t->write_cb_pool;
+ if (cb == NULL) {
+ cb = gpr_malloc(sizeof(*cb));
+ } else {
+ t->write_cb_pool = cb->next;
+ }
+ cb->call_at_byte = notify_offset;
+ cb->closure = s->fetching_send_message_finished;
+ s->fetching_send_message_finished = NULL;
+ cb->next = s->on_write_finished_cbs;
+ s->on_write_finished_cbs = cb;
+ }
+ s->fetching_send_message = NULL;
+ return; /* early out */
+ } else if (grpc_byte_stream_next(exec_ctx, s->fetching_send_message,
+ &s->fetching_slice, UINT32_MAX,
+ &s->complete_fetch)) {
+ add_fetched_slice_locked(exec_ctx, t, s);
+ }
+ }
+}
+
+static void complete_fetch_locked(grpc_exec_ctx *exec_ctx, void *gs,
+ grpc_error *error) {
+ grpc_chttp2_stream *s = gs;
+ grpc_chttp2_transport *t = s->t;
+ if (error == GRPC_ERROR_NONE) {
+ add_fetched_slice_locked(exec_ctx, t, s);
+ continue_fetching_send_locked(exec_ctx, t, s);
+ } else {
+ /* TODO(ctiller): what to do here */
+ abort();
+ }
+}
+
+static void complete_fetch(grpc_exec_ctx *exec_ctx, void *gs,
+ grpc_error *error) {
+ grpc_chttp2_stream *s = gs;
+ grpc_chttp2_transport *t = s->t;
+ grpc_combiner_execute(exec_ctx, t->combiner, &s->complete_fetch_locked,
+ GRPC_ERROR_REF(error),
+ s->complete_fetch_covered_by_poller);
}
static void do_nothing(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {}
@@ -1022,12 +960,11 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
grpc_transport_stream_op *op = stream_op;
grpc_chttp2_transport *t = op->transport_private.args[0];
grpc_chttp2_stream *s = op->transport_private.args[1];
- grpc_chttp2_transport_global *transport_global = &t->global;
- grpc_chttp2_stream_global *stream_global = &s->global;
if (grpc_http_trace) {
char *str = grpc_transport_stream_op_string(op);
- gpr_log(GPR_DEBUG, "perform_stream_op_locked: %s", str);
+ gpr_log(GPR_DEBUG, "perform_stream_op_locked: %s; on_complete = %p", str,
+ op->on_complete);
gpr_free(str);
}
@@ -1035,45 +972,42 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
if (on_complete == NULL) {
on_complete = grpc_closure_create(do_nothing, NULL);
}
+
/* use final_data as a barrier until enqueue time; the inital counter is
dropped at the end of this function */
on_complete->next_data.scratch = CLOSURE_BARRIER_FIRST_REF_BIT;
- on_complete->error = GRPC_ERROR_NONE;
+ on_complete->error_data.error = GRPC_ERROR_NONE;
if (op->collect_stats != NULL) {
- GPR_ASSERT(stream_global->collecting_stats == NULL);
- stream_global->collecting_stats = op->collect_stats;
+ GPR_ASSERT(s->collecting_stats == NULL);
+ s->collecting_stats = op->collect_stats;
on_complete->next_data.scratch |= CLOSURE_BARRIER_STATS_BIT;
}
if (op->cancel_error != GRPC_ERROR_NONE) {
- cancel_from_api(exec_ctx, transport_global, stream_global,
- GRPC_ERROR_REF(op->cancel_error));
+ grpc_chttp2_cancel_stream(exec_ctx, t, s, GRPC_ERROR_REF(op->cancel_error));
}
if (op->close_error != GRPC_ERROR_NONE) {
- close_from_api(exec_ctx, transport_global, stream_global,
- GRPC_ERROR_REF(op->close_error));
+ close_from_api(exec_ctx, t, s, GRPC_ERROR_REF(op->close_error));
}
if (op->send_initial_metadata != NULL) {
- GPR_ASSERT(stream_global->send_initial_metadata_finished == NULL);
- stream_global->send_initial_metadata_finished =
- add_closure_barrier(on_complete);
- stream_global->send_initial_metadata = op->send_initial_metadata;
+ GPR_ASSERT(s->send_initial_metadata_finished == NULL);
+ s->send_initial_metadata_finished = add_closure_barrier(on_complete);
+ s->send_initial_metadata = op->send_initial_metadata;
const size_t metadata_size =
grpc_metadata_batch_size(op->send_initial_metadata);
const size_t metadata_peer_limit =
- transport_global->settings[GRPC_PEER_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE];
- if (transport_global->is_client) {
- stream_global->deadline =
- gpr_time_min(stream_global->deadline,
- stream_global->send_initial_metadata->deadline);
+ t->settings[GRPC_PEER_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE];
+ if (t->is_client) {
+ s->deadline =
+ gpr_time_min(s->deadline, s->send_initial_metadata->deadline);
}
if (metadata_size > metadata_peer_limit) {
- cancel_from_api(
- exec_ctx, transport_global, stream_global,
+ grpc_chttp2_cancel_stream(
+ exec_ctx, t, s,
grpc_error_set_int(
grpc_error_set_int(
grpc_error_set_int(
@@ -1083,64 +1017,83 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
GRPC_ERROR_INT_LIMIT, (intptr_t)metadata_peer_limit),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED));
} else {
- if (contains_non_ok_status(transport_global, op->send_initial_metadata)) {
- stream_global->seen_error = true;
- grpc_chttp2_list_add_check_read_ops(exec_ctx, transport_global,
- stream_global);
+ if (contains_non_ok_status(op->send_initial_metadata)) {
+ s->seen_error = true;
}
- if (!stream_global->write_closed) {
- if (transport_global->is_client) {
- GPR_ASSERT(stream_global->id == 0);
- grpc_chttp2_list_add_waiting_for_concurrency(transport_global,
- stream_global);
- maybe_start_some_streams(exec_ctx, transport_global);
+ if (!s->write_closed) {
+ if (t->is_client) {
+ if (!t->closed) {
+ GPR_ASSERT(s->id == 0);
+ grpc_chttp2_list_add_waiting_for_concurrency(t, s);
+ maybe_start_some_streams(exec_ctx, t);
+ } else {
+ grpc_chttp2_cancel_stream(exec_ctx, t, s,
+ GRPC_ERROR_CREATE("Transport closed"));
+ }
} else {
- GPR_ASSERT(stream_global->id != 0);
- grpc_chttp2_become_writable(exec_ctx, transport_global, stream_global,
- true, "op.send_initial_metadata");
+ GPR_ASSERT(s->id != 0);
+ grpc_chttp2_become_writable(exec_ctx, t, s, true,
+ "op.send_initial_metadata");
}
} else {
- stream_global->send_trailing_metadata = NULL;
+ s->send_trailing_metadata = NULL;
grpc_chttp2_complete_closure_step(
- exec_ctx, transport_global, stream_global,
- &stream_global->send_initial_metadata_finished,
+ exec_ctx, t, s, &s->send_initial_metadata_finished,
GRPC_ERROR_CREATE(
- "Attempt to send initial metadata after stream was closed"));
+ "Attempt to send initial metadata after stream was closed"),
+ "send_initial_metadata_finished");
}
}
}
if (op->send_message != NULL) {
- GPR_ASSERT(stream_global->send_message_finished == NULL);
- GPR_ASSERT(stream_global->send_message == NULL);
- stream_global->send_message_finished = add_closure_barrier(on_complete);
- if (stream_global->write_closed) {
+ s->fetching_send_message_finished = add_closure_barrier(op->on_complete);
+ if (s->write_closed) {
grpc_chttp2_complete_closure_step(
- exec_ctx, transport_global, stream_global,
- &stream_global->send_message_finished,
- GRPC_ERROR_CREATE("Attempt to send message after stream was closed"));
+ exec_ctx, t, s, &s->fetching_send_message_finished,
+ GRPC_ERROR_CREATE("Attempt to send message after stream was closed"),
+ "fetching_send_message_finished");
} else {
- stream_global->send_message = op->send_message;
- if (stream_global->id != 0) {
- grpc_chttp2_become_writable(exec_ctx, transport_global, stream_global,
- true, "op.send_message");
+ GPR_ASSERT(s->fetching_send_message == NULL);
+ uint8_t *frame_hdr =
+ gpr_slice_buffer_tiny_add(&s->flow_controlled_buffer, 5);
+ uint32_t flags = op->send_message->flags;
+ frame_hdr[0] = (flags & GRPC_WRITE_INTERNAL_COMPRESS) != 0;
+ size_t len = op->send_message->length;
+ frame_hdr[1] = (uint8_t)(len >> 24);
+ frame_hdr[2] = (uint8_t)(len >> 16);
+ frame_hdr[3] = (uint8_t)(len >> 8);
+ frame_hdr[4] = (uint8_t)(len);
+ s->fetching_send_message = op->send_message;
+ s->fetched_send_message_length = 0;
+ s->next_message_end_offset = s->flow_controlled_bytes_written +
+ (int64_t)s->flow_controlled_buffer.length +
+ (int64_t)len;
+ s->complete_fetch_covered_by_poller = op->covered_by_poller;
+ if (flags & GRPC_WRITE_BUFFER_HINT) {
+ /* allow up to 64kb to be buffered */
+ /* TODO(ctiller): make this configurable */
+ s->next_message_end_offset -= 65536;
+ }
+ continue_fetching_send_locked(exec_ctx, t, s);
+ if (s->id != 0) {
+ grpc_chttp2_become_writable(exec_ctx, t, s, true, "op.send_message");
}
}
}
if (op->send_trailing_metadata != NULL) {
- GPR_ASSERT(stream_global->send_trailing_metadata_finished == NULL);
- stream_global->send_trailing_metadata_finished =
- add_closure_barrier(on_complete);
- stream_global->send_trailing_metadata = op->send_trailing_metadata;
+ GPR_ASSERT(s->send_trailing_metadata_finished == NULL);
+ s->send_trailing_metadata_finished = add_closure_barrier(on_complete);
+ s->send_trailing_metadata = op->send_trailing_metadata;
const size_t metadata_size =
grpc_metadata_batch_size(op->send_trailing_metadata);
const size_t metadata_peer_limit =
- transport_global->settings[GRPC_PEER_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE];
+ t->settings[GRPC_PEER_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE];
if (metadata_size > metadata_peer_limit) {
- cancel_from_api(
- exec_ctx, transport_global, stream_global,
+ grpc_chttp2_cancel_stream(
+ exec_ctx, t, s,
grpc_error_set_int(
grpc_error_set_int(
grpc_error_set_int(
@@ -1150,69 +1103,59 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
GRPC_ERROR_INT_LIMIT, (intptr_t)metadata_peer_limit),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED));
} else {
- if (contains_non_ok_status(transport_global,
- op->send_trailing_metadata)) {
- stream_global->seen_error = true;
- grpc_chttp2_list_add_check_read_ops(exec_ctx, transport_global,
- stream_global);
+ if (contains_non_ok_status(op->send_trailing_metadata)) {
+ s->seen_error = true;
}
- if (stream_global->write_closed) {
- stream_global->send_trailing_metadata = NULL;
+ if (s->write_closed) {
+ s->send_trailing_metadata = NULL;
grpc_chttp2_complete_closure_step(
- exec_ctx, transport_global, stream_global,
- &stream_global->send_trailing_metadata_finished,
+ exec_ctx, t, s, &s->send_trailing_metadata_finished,
grpc_metadata_batch_is_empty(op->send_trailing_metadata)
? GRPC_ERROR_NONE
: GRPC_ERROR_CREATE("Attempt to send trailing metadata after "
- "stream was closed"));
- } else if (stream_global->id != 0) {
+ "stream was closed"),
+ "send_trailing_metadata_finished");
+ } else if (s->id != 0) {
/* TODO(ctiller): check if there's flow control for any outstanding
bytes before going writable */
- grpc_chttp2_become_writable(exec_ctx, transport_global, stream_global,
- true, "op.send_trailing_metadata");
+ grpc_chttp2_become_writable(exec_ctx, t, s, true,
+ "op.send_trailing_metadata");
}
}
}
if (op->recv_initial_metadata != NULL) {
- GPR_ASSERT(stream_global->recv_initial_metadata_ready == NULL);
- stream_global->recv_initial_metadata_ready =
- op->recv_initial_metadata_ready;
- stream_global->recv_initial_metadata = op->recv_initial_metadata;
- grpc_chttp2_list_add_check_read_ops(exec_ctx, transport_global,
- stream_global);
+ GPR_ASSERT(s->recv_initial_metadata_ready == NULL);
+ s->recv_initial_metadata_ready = op->recv_initial_metadata_ready;
+ s->recv_initial_metadata = op->recv_initial_metadata;
+ grpc_chttp2_maybe_complete_recv_initial_metadata(exec_ctx, t, s);
}
if (op->recv_message != NULL) {
- GPR_ASSERT(stream_global->recv_message_ready == NULL);
- stream_global->recv_message_ready = op->recv_message_ready;
- stream_global->recv_message = op->recv_message;
- if (stream_global->id != 0 &&
- (stream_global->incoming_frames.head == NULL ||
- stream_global->incoming_frames.head->is_tail)) {
- incoming_byte_stream_update_flow_control(
- exec_ctx, transport_global, stream_global,
- transport_global->stream_lookahead, 0);
+ GPR_ASSERT(s->recv_message_ready == NULL);
+ s->recv_message_ready = op->recv_message_ready;
+ s->recv_message = op->recv_message;
+ if (s->id != 0 &&
+ (s->incoming_frames.head == NULL || s->incoming_frames.head->is_tail)) {
+ incoming_byte_stream_update_flow_control(exec_ctx, t, s,
+ t->stream_lookahead, 0);
}
- grpc_chttp2_list_add_check_read_ops(exec_ctx, transport_global,
- stream_global);
+ grpc_chttp2_maybe_complete_recv_message(exec_ctx, t, s);
}
if (op->recv_trailing_metadata != NULL) {
- GPR_ASSERT(stream_global->recv_trailing_metadata_finished == NULL);
- stream_global->recv_trailing_metadata_finished =
- add_closure_barrier(on_complete);
- stream_global->recv_trailing_metadata = op->recv_trailing_metadata;
- stream_global->final_metadata_requested = true;
- grpc_chttp2_list_add_check_read_ops(exec_ctx, transport_global,
- stream_global);
+ GPR_ASSERT(s->recv_trailing_metadata_finished == NULL);
+ s->recv_trailing_metadata_finished = add_closure_barrier(on_complete);
+ s->recv_trailing_metadata = op->recv_trailing_metadata;
+ s->final_metadata_requested = true;
+ grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s);
}
- grpc_chttp2_complete_closure_step(exec_ctx, transport_global, stream_global,
- &on_complete, GRPC_ERROR_NONE);
+ grpc_chttp2_complete_closure_step(exec_ctx, t, s, &on_complete,
+ GRPC_ERROR_NONE, "op->on_complete");
GPR_TIMER_END("perform_stream_op_locked", 0);
- GRPC_CHTTP2_STREAM_UNREF(exec_ctx, &s->global, "perform_stream_op");
+ GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "perform_stream_op");
}
static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
@@ -1220,70 +1163,68 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
GPR_TIMER_BEGIN("perform_stream_op", 0);
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs;
+
+ if (grpc_http_trace) {
+ char *str = grpc_transport_stream_op_string(op);
+ gpr_log(GPR_DEBUG, "perform_stream_op[s=%p/%d]: %s", s, s->id, str);
+ gpr_free(str);
+ }
+
grpc_closure_init(&op->transport_private.closure, perform_stream_op_locked,
op);
op->transport_private.args[0] = gt;
op->transport_private.args[1] = gs;
- GRPC_CHTTP2_STREAM_REF(&s->global, "perform_stream_op");
- grpc_combiner_execute(exec_ctx, t->executor.combiner,
- &op->transport_private.closure, GRPC_ERROR_NONE);
+ GRPC_CHTTP2_STREAM_REF(s, "perform_stream_op");
+ grpc_combiner_execute(exec_ctx, t->combiner, &op->transport_private.closure,
+ GRPC_ERROR_NONE, op->covered_by_poller);
GPR_TIMER_END("perform_stream_op", 0);
}
static void send_ping_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_closure *on_recv) {
grpc_chttp2_outstanding_ping *p = gpr_malloc(sizeof(*p));
- p->next = &t->global.pings;
+ p->next = &t->pings;
p->prev = p->next->prev;
p->prev->next = p->next->prev = p;
- p->id[0] = (uint8_t)((t->global.ping_counter >> 56) & 0xff);
- p->id[1] = (uint8_t)((t->global.ping_counter >> 48) & 0xff);
- p->id[2] = (uint8_t)((t->global.ping_counter >> 40) & 0xff);
- p->id[3] = (uint8_t)((t->global.ping_counter >> 32) & 0xff);
- p->id[4] = (uint8_t)((t->global.ping_counter >> 24) & 0xff);
- p->id[5] = (uint8_t)((t->global.ping_counter >> 16) & 0xff);
- p->id[6] = (uint8_t)((t->global.ping_counter >> 8) & 0xff);
- p->id[7] = (uint8_t)(t->global.ping_counter & 0xff);
+ p->id[0] = (uint8_t)((t->ping_counter >> 56) & 0xff);
+ p->id[1] = (uint8_t)((t->ping_counter >> 48) & 0xff);
+ p->id[2] = (uint8_t)((t->ping_counter >> 40) & 0xff);
+ p->id[3] = (uint8_t)((t->ping_counter >> 32) & 0xff);
+ p->id[4] = (uint8_t)((t->ping_counter >> 24) & 0xff);
+ p->id[5] = (uint8_t)((t->ping_counter >> 16) & 0xff);
+ p->id[6] = (uint8_t)((t->ping_counter >> 8) & 0xff);
+ p->id[7] = (uint8_t)(t->ping_counter & 0xff);
+ t->ping_counter++;
p->on_recv = on_recv;
- gpr_slice_buffer_add(&t->global.qbuf, grpc_chttp2_ping_create(0, p->id));
- grpc_chttp2_initiate_write(exec_ctx, &t->global, true, "send_ping");
+ gpr_slice_buffer_add(&t->qbuf, grpc_chttp2_ping_create(0, p->id));
+ grpc_chttp2_initiate_write(exec_ctx, t, true, "send_ping");
}
-typedef struct ack_ping_args {
- grpc_closure closure;
- grpc_chttp2_transport *t;
- uint8_t opaque_8bytes[8];
-} ack_ping_args;
-
-static void ack_ping_locked(grpc_exec_ctx *exec_ctx, void *a,
- grpc_error *error_ignored) {
- ack_ping_args *args = a;
+void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ const uint8_t *opaque_8bytes) {
grpc_chttp2_outstanding_ping *ping;
- grpc_chttp2_transport_global *transport_global = &args->t->global;
- for (ping = transport_global->pings.next; ping != &transport_global->pings;
- ping = ping->next) {
- if (0 == memcmp(args->opaque_8bytes, ping->id, 8)) {
+ for (ping = t->pings.next; ping != &t->pings; ping = ping->next) {
+ if (0 == memcmp(opaque_8bytes, ping->id, 8)) {
grpc_exec_ctx_sched(exec_ctx, ping->on_recv, GRPC_ERROR_NONE, NULL);
ping->next->prev = ping->prev;
ping->prev->next = ping->next;
gpr_free(ping);
- break;
+ return;
}
}
- UNREF_TRANSPORT(exec_ctx, args->t, "ack_ping");
- gpr_free(args);
+ char *msg = gpr_dump((const char *)opaque_8bytes, 8, GPR_DUMP_HEX);
+ char *from = grpc_endpoint_get_peer(t->ep);
+ gpr_log(GPR_DEBUG, "Unknown ping response from %s: %s", from, msg);
+ gpr_free(from);
+ gpr_free(msg);
}
-void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_parsing *transport_parsing,
- const uint8_t *opaque_8bytes) {
- ack_ping_args *args = gpr_malloc(sizeof(*args));
- args->t = TRANSPORT_FROM_PARSING(transport_parsing);
- memcpy(args->opaque_8bytes, opaque_8bytes, sizeof(args->opaque_8bytes));
- grpc_closure_init(&args->closure, ack_ping_locked, args);
- REF_TRANSPORT(args->t, "ack_ping");
- grpc_combiner_execute(exec_ctx, args->t->executor.combiner, &args->closure,
- GRPC_ERROR_NONE);
+static void send_goaway(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_chttp2_error_code error, gpr_slice data) {
+ t->sent_goaway_state = GRPC_CHTTP2_GOAWAY_SEND_SCHEDULED;
+ grpc_chttp2_goaway_append(t->last_new_stream_id, (uint32_t)error, data,
+ &t->qbuf);
+ grpc_chttp2_initiate_write(exec_ctx, t, false, "goaway_sent");
}
static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
@@ -1293,15 +1234,6 @@ static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t = op->transport_private.args[0];
grpc_error *close_transport = op->disconnect_with_error;
- /* If there's a set_accept_stream ensure that we're not parsing
- to avoid changing things out from underneath */
- if (t->executor.parsing_active && op->set_accept_stream) {
- GPR_ASSERT(t->post_parsing_op == NULL);
- t->post_parsing_op = gpr_malloc(sizeof(*op));
- memcpy(t->post_parsing_op, op, sizeof(*op));
- return;
- }
-
if (op->on_connectivity_state_change != NULL) {
grpc_connectivity_state_notify_on_state_change(
exec_ctx, &t->channel_callback.state_tracker, op->connectivity_state,
@@ -1309,15 +1241,9 @@ static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
}
if (op->send_goaway) {
- t->global.sent_goaway = 1;
- grpc_chttp2_goaway_append(
- t->global.last_incoming_stream_id,
- (uint32_t)grpc_chttp2_grpc_status_to_http2_error(op->goaway_status),
- gpr_slice_ref(*op->goaway_message), &t->global.qbuf);
- close_transport = grpc_chttp2_has_streams(t)
- ? GRPC_ERROR_NONE
- : GRPC_ERROR_CREATE("GOAWAY sent");
- grpc_chttp2_initiate_write(exec_ctx, &t->global, false, "goaway_sent");
+ send_goaway(exec_ctx, t,
+ grpc_chttp2_grpc_status_to_http2_error(op->goaway_status),
+ gpr_slice_ref(*op->goaway_message));
}
if (op->set_accept_stream) {
@@ -1342,154 +1268,131 @@ static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
close_transport_locked(exec_ctx, t, close_transport);
}
- grpc_exec_ctx_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE, NULL);
+ grpc_closure_run(exec_ctx, op->on_consumed, GRPC_ERROR_NONE);
- UNREF_TRANSPORT(exec_ctx, t, "transport_op");
+ GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "transport_op");
}
static void perform_transport_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_transport_op *op) {
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
+ char *msg = grpc_transport_op_string(op);
+ gpr_free(msg);
op->transport_private.args[0] = gt;
grpc_closure_init(&op->transport_private.closure, perform_transport_op_locked,
op);
- REF_TRANSPORT(t, "transport_op");
- grpc_combiner_execute(exec_ctx, t->executor.combiner,
- &op->transport_private.closure, GRPC_ERROR_NONE);
+ GRPC_CHTTP2_REF_TRANSPORT(t, "transport_op");
+ grpc_combiner_execute(exec_ctx, t->combiner, &op->transport_private.closure,
+ GRPC_ERROR_NONE, false);
}
/*******************************************************************************
* INPUT PROCESSING - GENERAL
*/
-static void check_read_ops(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *transport_global) {
- GPR_TIMER_BEGIN("check_read_ops", 0);
- grpc_chttp2_stream_global *stream_global;
+void grpc_chttp2_maybe_complete_recv_initial_metadata(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s) {
grpc_byte_stream *bs;
- while (
- grpc_chttp2_list_pop_check_read_ops(transport_global, &stream_global)) {
- if (stream_global->recv_initial_metadata_ready != NULL &&
- stream_global->published_initial_metadata) {
- if (stream_global->seen_error) {
- while ((bs = grpc_chttp2_incoming_frame_queue_pop(
- &stream_global->incoming_frames)) != NULL) {
- incoming_byte_stream_destroy_locked(exec_ctx, bs, GRPC_ERROR_NONE);
- }
- if (stream_global->exceeded_metadata_size) {
- cancel_from_api(
- exec_ctx, transport_global, stream_global,
- grpc_error_set_int(
- GRPC_ERROR_CREATE(
- "received initial metadata size exceeds limit"),
- GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED));
- }
+ if (s->recv_initial_metadata_ready != NULL &&
+ s->published_metadata[0] != GRPC_METADATA_NOT_PUBLISHED) {
+ if (s->seen_error) {
+ while ((bs = grpc_chttp2_incoming_frame_queue_pop(&s->incoming_frames)) !=
+ NULL) {
+ incoming_byte_stream_destroy_locked(exec_ctx, bs, GRPC_ERROR_NONE);
}
- grpc_chttp2_incoming_metadata_buffer_publish(
- &stream_global->received_initial_metadata,
- stream_global->recv_initial_metadata);
- grpc_exec_ctx_sched(exec_ctx, stream_global->recv_initial_metadata_ready,
- GRPC_ERROR_NONE, NULL);
- stream_global->recv_initial_metadata_ready = NULL;
}
- if (stream_global->recv_message_ready != NULL) {
- while (stream_global->final_metadata_requested &&
- stream_global->seen_error &&
- (bs = grpc_chttp2_incoming_frame_queue_pop(
- &stream_global->incoming_frames)) != NULL) {
+ grpc_chttp2_incoming_metadata_buffer_publish(&s->metadata_buffer[0],
+ s->recv_initial_metadata);
+ null_then_run_closure(exec_ctx, &s->recv_initial_metadata_ready,
+ GRPC_ERROR_NONE);
+ }
+}
+
+void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s) {
+ grpc_byte_stream *bs;
+ if (s->recv_message_ready != NULL) {
+ while (s->final_metadata_requested && s->seen_error &&
+ (bs = grpc_chttp2_incoming_frame_queue_pop(&s->incoming_frames)) !=
+ NULL) {
+ incoming_byte_stream_destroy_locked(exec_ctx, bs, GRPC_ERROR_NONE);
+ }
+ if (s->incoming_frames.head != NULL) {
+ *s->recv_message =
+ grpc_chttp2_incoming_frame_queue_pop(&s->incoming_frames);
+ GPR_ASSERT(*s->recv_message != NULL);
+ null_then_run_closure(exec_ctx, &s->recv_message_ready, GRPC_ERROR_NONE);
+ } else if (s->published_metadata[1] != GRPC_METADATA_NOT_PUBLISHED) {
+ *s->recv_message = NULL;
+ null_then_run_closure(exec_ctx, &s->recv_message_ready, GRPC_ERROR_NONE);
+ }
+ }
+}
+
+void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s) {
+ grpc_byte_stream *bs;
+ grpc_chttp2_maybe_complete_recv_message(exec_ctx, t, s);
+ if (s->recv_trailing_metadata_finished != NULL && s->read_closed &&
+ s->write_closed) {
+ if (s->seen_error) {
+ while ((bs = grpc_chttp2_incoming_frame_queue_pop(&s->incoming_frames)) !=
+ NULL) {
incoming_byte_stream_destroy_locked(exec_ctx, bs, GRPC_ERROR_NONE);
}
- if (stream_global->incoming_frames.head != NULL) {
- *stream_global->recv_message = grpc_chttp2_incoming_frame_queue_pop(
- &stream_global->incoming_frames);
- GPR_ASSERT(*stream_global->recv_message != NULL);
- grpc_exec_ctx_sched(exec_ctx, stream_global->recv_message_ready,
- GRPC_ERROR_NONE, NULL);
- stream_global->recv_message_ready = NULL;
- } else if (stream_global->published_trailing_metadata) {
- *stream_global->recv_message = NULL;
- grpc_exec_ctx_sched(exec_ctx, stream_global->recv_message_ready,
- GRPC_ERROR_NONE, NULL);
- stream_global->recv_message_ready = NULL;
- }
}
- if (stream_global->recv_trailing_metadata_finished != NULL &&
- stream_global->read_closed && stream_global->write_closed) {
- if (stream_global->seen_error) {
- while ((bs = grpc_chttp2_incoming_frame_queue_pop(
- &stream_global->incoming_frames)) != NULL) {
- incoming_byte_stream_destroy_locked(exec_ctx, bs, GRPC_ERROR_NONE);
- }
- if (stream_global->exceeded_metadata_size) {
- cancel_from_api(
- exec_ctx, transport_global, stream_global,
- grpc_error_set_int(
- GRPC_ERROR_CREATE(
- "received trailing metadata size exceeds limit"),
- GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED));
- }
- }
- if (stream_global->all_incoming_byte_streams_finished) {
- grpc_chttp2_incoming_metadata_buffer_publish(
- &stream_global->received_trailing_metadata,
- stream_global->recv_trailing_metadata);
- grpc_chttp2_complete_closure_step(
- exec_ctx, transport_global, stream_global,
- &stream_global->recv_trailing_metadata_finished, GRPC_ERROR_NONE);
- }
+ if (s->all_incoming_byte_streams_finished &&
+ s->recv_trailing_metadata_finished != NULL) {
+ grpc_chttp2_incoming_metadata_buffer_publish(&s->metadata_buffer[1],
+ s->recv_trailing_metadata);
+ grpc_chttp2_complete_closure_step(
+ exec_ctx, t, s, &s->recv_trailing_metadata_finished, GRPC_ERROR_NONE,
+ "recv_trailing_metadata_finished");
}
}
- GPR_TIMER_END("check_read_ops", 0);
}
-static void decrement_active_streams_locked(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global) {
- if ((stream_global->all_incoming_byte_streams_finished =
- gpr_unref(&stream_global->active_streams))) {
- grpc_chttp2_list_add_check_read_ops(exec_ctx, transport_global,
- stream_global);
+static void decrement_active_streams_locked(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s) {
+ if ((s->all_incoming_byte_streams_finished = gpr_unref(&s->active_streams))) {
+ grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s);
}
}
static void remove_stream(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
uint32_t id, grpc_error *error) {
- size_t new_stream_count;
- grpc_chttp2_stream *s =
- grpc_chttp2_stream_map_delete(&t->parsing_stream_map, id);
- if (!s) {
- s = grpc_chttp2_stream_map_delete(&t->new_stream_map, id);
- }
+ grpc_chttp2_stream *s = grpc_chttp2_stream_map_delete(&t->stream_map, id);
GPR_ASSERT(s);
- s->global.in_stream_map = false;
- if (t->parsing.incoming_stream == &s->parsing) {
- t->parsing.incoming_stream = NULL;
- grpc_chttp2_parsing_become_skip_parser(exec_ctx, &t->parsing);
+ if (t->incoming_stream == s) {
+ t->incoming_stream = NULL;
+ grpc_chttp2_parsing_become_skip_parser(exec_ctx, t);
}
- if (s->parsing.data_parser.parsing_frame != NULL) {
+ if (s->data_parser.parsing_frame != NULL) {
grpc_chttp2_incoming_byte_stream_finished(
- exec_ctx, s->parsing.data_parser.parsing_frame, GRPC_ERROR_REF(error),
- 0);
- s->parsing.data_parser.parsing_frame = NULL;
+ exec_ctx, s->data_parser.parsing_frame, GRPC_ERROR_REF(error));
+ s->data_parser.parsing_frame = NULL;
}
- if (grpc_chttp2_unregister_stream(t, s) && t->global.sent_goaway) {
- close_transport_locked(
- exec_ctx, t, GRPC_ERROR_CREATE_REFERENCING(
- "Last stream closed after sending GOAWAY", &error, 1));
+ if (grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
+ post_benign_reclaimer(exec_ctx, t);
+ if (t->sent_goaway_state == GRPC_CHTTP2_GOAWAY_SENT) {
+ close_transport_locked(
+ exec_ctx, t,
+ GRPC_ERROR_CREATE_REFERENCING(
+ "Last stream closed after sending GOAWAY", &error, 1));
+ }
}
- if (grpc_chttp2_list_remove_writable_stream(&t->global, &s->global)) {
- GRPC_CHTTP2_STREAM_UNREF(exec_ctx, &s->global, "chttp2_writing");
+ if (grpc_chttp2_list_remove_writable_stream(t, s)) {
+ GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:remove_stream");
}
- new_stream_count = grpc_chttp2_stream_map_size(&t->parsing_stream_map) +
- grpc_chttp2_stream_map_size(&t->new_stream_map);
- GPR_ASSERT(new_stream_count <= UINT32_MAX);
- if (new_stream_count != t->global.concurrent_stream_count) {
- t->global.concurrent_stream_count = (uint32_t)new_stream_count;
- maybe_start_some_streams(exec_ctx, &t->global);
- }
GRPC_ERROR_UNREF(error);
+
+ maybe_start_some_streams(exec_ctx, t);
}
static void status_codes_from_error(grpc_error *error, gpr_timespec deadline,
@@ -1519,23 +1422,20 @@ static void status_codes_from_error(grpc_error *error, gpr_timespec deadline,
}
}
-static void cancel_from_api(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global,
- grpc_error *due_to_error) {
- if (!stream_global->read_closed || !stream_global->write_closed) {
+void grpc_chttp2_cancel_stream(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t, grpc_chttp2_stream *s,
+ grpc_error *due_to_error) {
+ if (!s->read_closed || !s->write_closed) {
grpc_status_code grpc_status;
grpc_chttp2_error_code http_error;
- status_codes_from_error(due_to_error, stream_global->deadline, &http_error,
+ status_codes_from_error(due_to_error, s->deadline, &http_error,
&grpc_status);
- if (stream_global->id != 0) {
+ if (s->id != 0) {
gpr_slice_buffer_add(
- &transport_global->qbuf,
- grpc_chttp2_rst_stream_create(stream_global->id, (uint32_t)http_error,
- &stream_global->stats.outgoing));
- grpc_chttp2_initiate_write(exec_ctx, transport_global, false,
- "rst_stream");
+ &t->qbuf, grpc_chttp2_rst_stream_create(s->id, (uint32_t)http_error,
+ &s->stats.outgoing));
+ grpc_chttp2_initiate_write(exec_ctx, t, false, "rst_stream");
}
const char *msg =
@@ -1546,27 +1446,21 @@ static void cancel_from_api(grpc_exec_ctx *exec_ctx,
msg = grpc_error_string(due_to_error);
}
gpr_slice msg_slice = gpr_slice_from_copied_string(msg);
- grpc_chttp2_fake_status(exec_ctx, transport_global, stream_global,
- grpc_status, &msg_slice);
+ grpc_chttp2_fake_status(exec_ctx, t, s, grpc_status, &msg_slice);
if (free_msg) grpc_error_free_string(msg);
}
- if (due_to_error != GRPC_ERROR_NONE && !stream_global->seen_error) {
- stream_global->seen_error = true;
- grpc_chttp2_list_add_check_read_ops(exec_ctx, transport_global,
- stream_global);
+ if (due_to_error != GRPC_ERROR_NONE && !s->seen_error) {
+ s->seen_error = true;
+ grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s);
}
- grpc_chttp2_mark_stream_closed(exec_ctx, transport_global, stream_global, 1,
- 1, due_to_error);
+ grpc_chttp2_mark_stream_closed(exec_ctx, t, s, 1, 1, due_to_error);
}
-void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global,
- grpc_status_code status, gpr_slice *slice) {
+void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s, grpc_status_code status,
+ gpr_slice *slice) {
if (status != GRPC_STATUS_OK) {
- stream_global->seen_error = true;
- grpc_chttp2_list_add_check_read_ops(exec_ctx, transport_global,
- stream_global);
+ s->seen_error = true;
}
/* stream_global->recv_trailing_metadata_finished gives us a
last chance replacement: we've received trailing metadata,
@@ -1574,24 +1468,23 @@ void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx,
to the upper layers - drop what we've got, and then publish
what we want - which is safe because we haven't told anyone
about the metadata yet */
- if (!stream_global->published_trailing_metadata ||
- stream_global->recv_trailing_metadata_finished != NULL) {
+ if (s->published_metadata[1] == GRPC_METADATA_NOT_PUBLISHED ||
+ s->recv_trailing_metadata_finished != NULL) {
char status_string[GPR_LTOA_MIN_BUFSIZE];
gpr_ltoa(status, status_string);
grpc_chttp2_incoming_metadata_buffer_add(
- &stream_global->received_trailing_metadata,
+ &s->metadata_buffer[1],
grpc_mdelem_from_metadata_strings(
GRPC_MDSTR_GRPC_STATUS, grpc_mdstr_from_string(status_string)));
if (slice) {
grpc_chttp2_incoming_metadata_buffer_add(
- &stream_global->received_trailing_metadata,
+ &s->metadata_buffer[1],
grpc_mdelem_from_metadata_strings(
GRPC_MDSTR_GRPC_MESSAGE,
grpc_mdstr_from_slice(gpr_slice_ref(*slice))));
}
- stream_global->published_trailing_metadata = true;
- grpc_chttp2_list_add_check_read_ops(exec_ctx, transport_global,
- stream_global);
+ s->published_metadata[1] = GRPC_METADATA_SYNTHESIZED_FROM_FAKE;
+ grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s);
}
if (slice) {
gpr_slice_unref(*slice);
@@ -1609,91 +1502,88 @@ static void add_error(grpc_error *error, grpc_error **refs, size_t *nrefs) {
++*nrefs;
}
-static grpc_error *removal_error(grpc_error *extra_error,
- grpc_chttp2_stream_global *stream_global) {
+static grpc_error *removal_error(grpc_error *extra_error, grpc_chttp2_stream *s,
+ const char *master_error_msg) {
grpc_error *refs[3];
size_t nrefs = 0;
- add_error(stream_global->read_closed_error, refs, &nrefs);
- add_error(stream_global->write_closed_error, refs, &nrefs);
+ add_error(s->read_closed_error, refs, &nrefs);
+ add_error(s->write_closed_error, refs, &nrefs);
add_error(extra_error, refs, &nrefs);
grpc_error *error = GRPC_ERROR_NONE;
if (nrefs > 0) {
- error = GRPC_ERROR_CREATE_REFERENCING("Failed due to stream removal", refs,
- nrefs);
+ error = GRPC_ERROR_CREATE_REFERENCING(master_error_msg, refs, nrefs);
}
GRPC_ERROR_UNREF(extra_error);
return error;
}
static void fail_pending_writes(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global,
+ grpc_chttp2_transport *t, grpc_chttp2_stream *s,
grpc_error *error) {
- error = removal_error(error, stream_global);
- stream_global->send_message = NULL;
+ error =
+ removal_error(error, s, "Pending writes failed due to stream closure");
+ s->fetching_send_message = NULL;
+ grpc_chttp2_complete_closure_step(
+ exec_ctx, t, s, &s->send_initial_metadata_finished, GRPC_ERROR_REF(error),
+ "send_initial_metadata_finished");
grpc_chttp2_complete_closure_step(
- exec_ctx, transport_global, stream_global,
- &stream_global->send_initial_metadata_finished, GRPC_ERROR_REF(error));
+ exec_ctx, t, s, &s->send_trailing_metadata_finished,
+ GRPC_ERROR_REF(error), "send_trailing_metadata_finished");
grpc_chttp2_complete_closure_step(
- exec_ctx, transport_global, stream_global,
- &stream_global->send_trailing_metadata_finished, GRPC_ERROR_REF(error));
- grpc_chttp2_complete_closure_step(exec_ctx, transport_global, stream_global,
- &stream_global->send_message_finished,
- error);
+ exec_ctx, t, s, &s->fetching_send_message_finished, GRPC_ERROR_REF(error),
+ "fetching_send_message_finished");
+ while (s->on_write_finished_cbs) {
+ grpc_chttp2_write_cb *cb = s->on_write_finished_cbs;
+ s->on_write_finished_cbs = cb->next;
+ grpc_chttp2_complete_closure_step(exec_ctx, t, s, &cb->closure,
+ GRPC_ERROR_REF(error),
+ "on_write_finished_cb");
+ cb->next = t->write_cb_pool;
+ t->write_cb_pool = cb;
+ }
+ GRPC_ERROR_UNREF(error);
}
-void grpc_chttp2_mark_stream_closed(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global, int close_reads, int close_writes,
- grpc_error *error) {
- if (stream_global->read_closed && stream_global->write_closed) {
+void grpc_chttp2_mark_stream_closed(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s, int close_reads,
+ int close_writes, grpc_error *error) {
+ if (s->read_closed && s->write_closed) {
/* already closed */
GRPC_ERROR_UNREF(error);
return;
}
- grpc_chttp2_list_add_check_read_ops(exec_ctx, transport_global,
- stream_global);
- if (close_reads && !stream_global->read_closed) {
- stream_global->read_closed_error = GRPC_ERROR_REF(error);
- stream_global->read_closed = true;
- stream_global->published_initial_metadata = true;
- stream_global->published_trailing_metadata = true;
- decrement_active_streams_locked(exec_ctx, transport_global, stream_global);
- }
- if (close_writes && !stream_global->write_closed) {
- stream_global->write_closed_error = GRPC_ERROR_REF(error);
- stream_global->write_closed = true;
- if (TRANSPORT_FROM_GLOBAL(transport_global)->executor.write_state !=
- GRPC_CHTTP2_WRITING_INACTIVE) {
- GRPC_CHTTP2_STREAM_REF(stream_global, "finish_writes");
- grpc_chttp2_list_add_closed_waiting_for_writing(transport_global,
- stream_global);
- } else {
- fail_pending_writes(exec_ctx, transport_global, stream_global,
- GRPC_ERROR_REF(error));
- }
- }
- if (stream_global->read_closed && stream_global->write_closed) {
- if (stream_global->id != 0 &&
- TRANSPORT_FROM_GLOBAL(transport_global)->executor.parsing_active) {
- grpc_chttp2_list_add_closed_waiting_for_parsing(transport_global,
- stream_global);
- } else {
- if (stream_global->id != 0) {
- remove_stream(exec_ctx, TRANSPORT_FROM_GLOBAL(transport_global),
- stream_global->id,
- removal_error(GRPC_ERROR_REF(error), stream_global));
+ if (close_reads && !s->read_closed) {
+ s->read_closed_error = GRPC_ERROR_REF(error);
+ s->read_closed = true;
+ for (int i = 0; i < 2; i++) {
+ if (s->published_metadata[i] == GRPC_METADATA_NOT_PUBLISHED) {
+ s->published_metadata[i] = GPRC_METADATA_PUBLISHED_AT_CLOSE;
}
- GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "chttp2");
}
+ decrement_active_streams_locked(exec_ctx, t, s);
+ grpc_chttp2_maybe_complete_recv_initial_metadata(exec_ctx, t, s);
+ grpc_chttp2_maybe_complete_recv_message(exec_ctx, t, s);
+ grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s);
+ }
+ if (close_writes && !s->write_closed) {
+ s->write_closed_error = GRPC_ERROR_REF(error);
+ s->write_closed = true;
+ fail_pending_writes(exec_ctx, t, s, GRPC_ERROR_REF(error));
+ grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s);
+ }
+ if (s->read_closed && s->write_closed) {
+ if (s->id != 0) {
+ remove_stream(exec_ctx, t, s->id,
+ removal_error(GRPC_ERROR_REF(error), s, "Stream removed"));
+ }
+ GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2");
}
GRPC_ERROR_UNREF(error);
}
-static void close_from_api(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global,
- grpc_error *error) {
+static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s, grpc_error *error) {
gpr_slice hdr;
gpr_slice status_hdr;
gpr_slice message_pfx;
@@ -1701,16 +1591,17 @@ static void close_from_api(grpc_exec_ctx *exec_ctx,
uint32_t len = 0;
grpc_status_code grpc_status;
grpc_chttp2_error_code http_error;
- status_codes_from_error(error, stream_global->deadline, &http_error,
- &grpc_status);
+ status_codes_from_error(error, s->deadline, &http_error, &grpc_status);
GPR_ASSERT(grpc_status >= 0 && (int)grpc_status < 100);
- if (stream_global->id != 0 && !transport_global->is_client) {
+ if (s->id != 0 && !t->is_client) {
/* Hand roll a header block.
- This is unnecessarily ugly - at some point we should find a more elegant
+ This is unnecessarily ugly - at some point we should find a more
+ elegant
solution.
- It's complicated by the fact that our send machinery would be dead by the
+ It's complicated by the fact that our send machinery would be dead by
+ the
time we got around to sending this, so instead we ignore HPACK
compression
and just write the uncompressed bytes onto the wire. */
@@ -1775,23 +1666,22 @@ static void close_from_api(grpc_exec_ctx *exec_ctx,
*p++ = (uint8_t)(len);
*p++ = GRPC_CHTTP2_FRAME_HEADER;
*p++ = GRPC_CHTTP2_DATA_FLAG_END_STREAM | GRPC_CHTTP2_DATA_FLAG_END_HEADERS;
- *p++ = (uint8_t)(stream_global->id >> 24);
- *p++ = (uint8_t)(stream_global->id >> 16);
- *p++ = (uint8_t)(stream_global->id >> 8);
- *p++ = (uint8_t)(stream_global->id);
+ *p++ = (uint8_t)(s->id >> 24);
+ *p++ = (uint8_t)(s->id >> 16);
+ *p++ = (uint8_t)(s->id >> 8);
+ *p++ = (uint8_t)(s->id);
GPR_ASSERT(p == GPR_SLICE_END_PTR(hdr));
- gpr_slice_buffer_add(&transport_global->qbuf, hdr);
- gpr_slice_buffer_add(&transport_global->qbuf, status_hdr);
+ gpr_slice_buffer_add(&t->qbuf, hdr);
+ gpr_slice_buffer_add(&t->qbuf, status_hdr);
if (optional_message) {
- gpr_slice_buffer_add(&transport_global->qbuf, message_pfx);
- gpr_slice_buffer_add(&transport_global->qbuf,
+ gpr_slice_buffer_add(&t->qbuf, message_pfx);
+ gpr_slice_buffer_add(&t->qbuf,
gpr_slice_from_copied_string(optional_message));
}
gpr_slice_buffer_add(
- &transport_global->qbuf,
- grpc_chttp2_rst_stream_create(stream_global->id, GRPC_CHTTP2_NO_ERROR,
- &stream_global->stats.outgoing));
+ &t->qbuf, grpc_chttp2_rst_stream_create(s->id, GRPC_CHTTP2_NO_ERROR,
+ &s->stats.outgoing));
}
const char *msg = grpc_error_get_str(error, GRPC_ERROR_STR_GRPC_MESSAGE);
@@ -1801,46 +1691,33 @@ static void close_from_api(grpc_exec_ctx *exec_ctx,
msg = grpc_error_string(error);
}
gpr_slice msg_slice = gpr_slice_from_copied_string(msg);
- grpc_chttp2_fake_status(exec_ctx, transport_global, stream_global,
- grpc_status, &msg_slice);
+ grpc_chttp2_fake_status(exec_ctx, t, s, grpc_status, &msg_slice);
if (free_msg) grpc_error_free_string(msg);
- grpc_chttp2_mark_stream_closed(exec_ctx, transport_global, stream_global, 1,
- 1, error);
- grpc_chttp2_initiate_write(exec_ctx, transport_global, false,
- "close_from_api");
+ grpc_chttp2_mark_stream_closed(exec_ctx, t, s, 1, 1, error);
+ grpc_chttp2_initiate_write(exec_ctx, t, false, "close_from_api");
}
typedef struct {
grpc_exec_ctx *exec_ctx;
grpc_error *error;
+ grpc_chttp2_transport *t;
} cancel_stream_cb_args;
-static void cancel_stream_cb(grpc_chttp2_transport_global *transport_global,
- void *user_data,
- grpc_chttp2_stream_global *stream_global) {
+static void cancel_stream_cb(void *user_data, uint32_t key, void *stream) {
cancel_stream_cb_args *args = user_data;
- cancel_from_api(args->exec_ctx, transport_global, stream_global,
- GRPC_ERROR_REF(args->error));
+ grpc_chttp2_stream *s = stream;
+ grpc_chttp2_cancel_stream(args->exec_ctx, args->t, s,
+ GRPC_ERROR_REF(args->error));
}
static void end_all_the_calls(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_error *error) {
- cancel_stream_cb_args args = {exec_ctx, error};
- grpc_chttp2_for_all_streams(&t->global, &args, cancel_stream_cb);
+ cancel_stream_cb_args args = {exec_ctx, error, t};
+ grpc_chttp2_stream_map_for_each(&t->stream_map, cancel_stream_cb, &args);
GRPC_ERROR_UNREF(error);
}
-static void drop_connection(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_error *error) {
- if (!grpc_error_get_int(error, GRPC_ERROR_INT_GRPC_STATUS, NULL)) {
- error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS,
- GRPC_STATUS_UNAVAILABLE);
- }
- close_transport_locked(exec_ctx, t, GRPC_ERROR_REF(error));
- end_all_the_calls(exec_ctx, t, error);
-}
-
/** update window from a settings change */
typedef struct {
grpc_chttp2_transport *t;
@@ -1851,20 +1728,23 @@ static void update_global_window(void *args, uint32_t id, void *stream) {
update_global_window_args *a = args;
grpc_chttp2_transport *t = a->t;
grpc_chttp2_stream *s = stream;
- grpc_chttp2_transport_global *transport_global = &t->global;
- grpc_chttp2_stream_global *stream_global = &s->global;
int was_zero;
int is_zero;
- int64_t initial_window_update = t->parsing.initial_window_update;
+ int64_t initial_window_update = t->initial_window_update;
- was_zero = stream_global->outgoing_window <= 0;
- GRPC_CHTTP2_FLOW_CREDIT_STREAM("settings", transport_global, stream_global,
- outgoing_window, initial_window_update);
- is_zero = stream_global->outgoing_window <= 0;
+ if (initial_window_update > 0) {
+ was_zero = s->outgoing_window <= 0;
+ GRPC_CHTTP2_FLOW_CREDIT_STREAM("settings", t, s, outgoing_window,
+ initial_window_update);
+ is_zero = s->outgoing_window <= 0;
- if (was_zero && !is_zero) {
- grpc_chttp2_become_writable(a->exec_ctx, transport_global, stream_global,
- true, "update_global_window");
+ if (was_zero && !is_zero) {
+ grpc_chttp2_become_writable(a->exec_ctx, t, s, true,
+ "update_global_window");
+ }
+ } else {
+ GRPC_CHTTP2_FLOW_DEBIT_STREAM("settings", t, s, outgoing_window,
+ -initial_window_update);
}
}
@@ -1872,50 +1752,19 @@ static void update_global_window(void *args, uint32_t id, void *stream) {
* INPUT PROCESSING - PARSING
*/
-static void parsing_action(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
-static void post_reading_action_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
-static void post_parse_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
-
-static void reading_action(grpc_exec_ctx *exec_ctx, void *tp,
- grpc_error *error) {
+static void read_action_begin(grpc_exec_ctx *exec_ctx, void *tp,
+ grpc_error *error) {
/* Control flow:
reading_action_locked ->
(parse_unlocked -> post_parse_locked)? ->
post_reading_action_locked */
GPR_TIMER_BEGIN("reading_action", 0);
grpc_chttp2_transport *t = tp;
- grpc_combiner_execute(exec_ctx, t->executor.combiner,
- &t->reading_action_locked, GRPC_ERROR_REF(error));
+ grpc_combiner_execute(exec_ctx, t->combiner, &t->read_action_locked,
+ GRPC_ERROR_REF(error), false);
GPR_TIMER_END("reading_action", 0);
}
-static void reading_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
- grpc_error *error) {
- GPR_TIMER_BEGIN("reading_action_locked", 0);
-
- grpc_chttp2_transport *t = tp;
- grpc_chttp2_transport_global *transport_global = &t->global;
- grpc_chttp2_transport_parsing *transport_parsing = &t->parsing;
-
- GPR_ASSERT(!t->executor.parsing_active);
- if (!t->closed) {
- t->executor.parsing_active = 1;
- /* merge stream lists */
- grpc_chttp2_stream_map_move_into(&t->new_stream_map,
- &t->parsing_stream_map);
- grpc_chttp2_prepare_to_read(transport_global, transport_parsing);
- grpc_exec_ctx_sched(exec_ctx, &t->parsing_action, GRPC_ERROR_REF(error),
- NULL);
- } else {
- post_reading_action_locked(exec_ctx, t, error);
- }
-
- GPR_TIMER_END("reading_action_locked", 0);
-}
-
static grpc_error *try_http_parsing(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t) {
grpc_http_parser parser;
@@ -1944,131 +1793,102 @@ static grpc_error *try_http_parsing(grpc_exec_ctx *exec_ctx,
return error;
}
-static void parsing_action(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_chttp2_transport *t = arg;
- grpc_error *err = GRPC_ERROR_NONE;
- GPR_TIMER_BEGIN("reading_action.parse", 0);
- size_t i = 0;
- grpc_error *errors[3] = {GRPC_ERROR_REF(error), GRPC_ERROR_NONE,
- GRPC_ERROR_NONE};
- for (; i < t->read_buffer.count && errors[1] == GRPC_ERROR_NONE; i++) {
- errors[1] = grpc_chttp2_perform_read(exec_ctx, &t->parsing,
- t->read_buffer.slices[i]);
- };
- if (errors[1] == GRPC_ERROR_NONE) {
- err = GRPC_ERROR_REF(error);
- } else {
- errors[2] = try_http_parsing(exec_ctx, t);
- err = GRPC_ERROR_CREATE_REFERENCING("Failed parsing HTTP/2", errors,
- GPR_ARRAY_SIZE(errors));
+static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
+ grpc_error *error) {
+ GPR_TIMER_BEGIN("reading_action_locked", 0);
+
+ grpc_chttp2_transport *t = tp;
+
+ GRPC_ERROR_REF(error);
+
+ grpc_error *err = error;
+ if (err != GRPC_ERROR_NONE) {
+ err = grpc_error_set_int(
+ GRPC_ERROR_CREATE_REFERENCING("Endpoint read failed", &err, 1),
+ GRPC_ERROR_INT_OCCURRED_DURING_WRITE, t->write_state);
}
- for (i = 0; i < GPR_ARRAY_SIZE(errors); i++) {
- GRPC_ERROR_UNREF(errors[i]);
+ GPR_SWAP(grpc_error *, err, error);
+ GRPC_ERROR_UNREF(err);
+ if (!t->closed) {
+ GPR_TIMER_BEGIN("reading_action.parse", 0);
+ size_t i = 0;
+ grpc_error *errors[3] = {GRPC_ERROR_REF(error), GRPC_ERROR_NONE,
+ GRPC_ERROR_NONE};
+ for (; i < t->read_buffer.count && errors[1] == GRPC_ERROR_NONE; i++) {
+ errors[1] =
+ grpc_chttp2_perform_read(exec_ctx, t, t->read_buffer.slices[i]);
+ };
+ if (errors[1] != GRPC_ERROR_NONE) {
+ errors[2] = try_http_parsing(exec_ctx, t);
+ GRPC_ERROR_UNREF(error);
+ error = GRPC_ERROR_CREATE_REFERENCING("Failed parsing HTTP/2", errors,
+ GPR_ARRAY_SIZE(errors));
+ }
+ for (i = 0; i < GPR_ARRAY_SIZE(errors); i++) {
+ GRPC_ERROR_UNREF(errors[i]);
+ }
+ GPR_TIMER_END("reading_action.parse", 0);
+
+ GPR_TIMER_BEGIN("post_parse_locked", 0);
+ if (t->initial_window_update != 0) {
+ update_global_window_args args = {t, exec_ctx};
+ grpc_chttp2_stream_map_for_each(&t->stream_map, update_global_window,
+ &args);
+ t->initial_window_update = 0;
+ }
+ /* handle higher level things */
+ if (t->incoming_window < t->connection_window_target * 3 / 4) {
+ int64_t announce_bytes = t->connection_window_target - t->incoming_window;
+ GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parsed", t, announce_incoming_window,
+ announce_bytes);
+ GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parsed", t, incoming_window,
+ announce_bytes);
+ grpc_chttp2_initiate_write(exec_ctx, t, false, "global incoming window");
+ }
+
+ GPR_TIMER_END("post_parse_locked", 0);
}
- grpc_combiner_execute(exec_ctx, t->executor.combiner, &t->post_parse_locked,
- err);
- GPR_TIMER_END("reading_action.parse", 0);
-}
-static void post_parse_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- GPR_TIMER_BEGIN("post_parse_locked", 0);
- grpc_chttp2_transport *t = arg;
- grpc_chttp2_transport_global *transport_global = &t->global;
- grpc_chttp2_transport_parsing *transport_parsing = &t->parsing;
- /* copy parsing qbuf to global qbuf */
- if (t->parsing.qbuf.count > 0) {
- gpr_slice_buffer_move_into(&t->parsing.qbuf, &t->global.qbuf);
- grpc_chttp2_initiate_write(exec_ctx, transport_global, false,
- "parsing_qbuf");
- }
- /* merge stream lists */
- grpc_chttp2_stream_map_move_into(&t->new_stream_map, &t->parsing_stream_map);
- transport_global->concurrent_stream_count =
- (uint32_t)grpc_chttp2_stream_map_size(&t->parsing_stream_map);
- if (transport_parsing->initial_window_update != 0) {
- update_global_window_args args = {t, exec_ctx};
- grpc_chttp2_stream_map_for_each(&t->parsing_stream_map,
- update_global_window, &args);
- transport_parsing->initial_window_update = 0;
- }
- /* handle higher level things */
- grpc_chttp2_publish_reads(exec_ctx, transport_global, transport_parsing);
- t->executor.parsing_active = 0;
- /* handle delayed transport ops (if there is one) */
- if (t->post_parsing_op) {
- grpc_transport_op *op = t->post_parsing_op;
- t->post_parsing_op = NULL;
- perform_transport_op_locked(exec_ctx, op, GRPC_ERROR_NONE);
- gpr_free(op);
- }
- /* if a stream is in the stream map, and gets cancelled, we need to
- * ensure we are not parsing before continuing the cancellation to keep
- * things in a sane state */
- grpc_chttp2_stream_global *stream_global;
- while (grpc_chttp2_list_pop_closed_waiting_for_parsing(transport_global,
- &stream_global)) {
- GPR_ASSERT(stream_global->in_stream_map);
- GPR_ASSERT(stream_global->write_closed);
- GPR_ASSERT(stream_global->read_closed);
- remove_stream(exec_ctx, t, stream_global->id,
- removal_error(GRPC_ERROR_NONE, stream_global));
- GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "chttp2");
- }
-
- post_reading_action_locked(exec_ctx, t, error);
- GPR_TIMER_END("post_parse_locked", 0);
-}
-
-static void post_reading_action_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
GPR_TIMER_BEGIN("post_reading_action_locked", 0);
- grpc_chttp2_transport *t = arg;
bool keep_reading = false;
- GRPC_ERROR_REF(error);
if (error == GRPC_ERROR_NONE && t->closed) {
error = GRPC_ERROR_CREATE("Transport closed");
}
if (error != GRPC_ERROR_NONE) {
- drop_connection(exec_ctx, t, GRPC_ERROR_REF(error));
+ close_transport_locked(exec_ctx, t, GRPC_ERROR_REF(error));
t->endpoint_reading = 0;
- if (grpc_http_write_state_trace) {
- gpr_log(GPR_DEBUG, "R:%p -> 0 ws=%s", t,
- write_state_name(t->executor.write_state));
- }
} else if (!t->closed) {
keep_reading = true;
- REF_TRANSPORT(t, "keep_reading");
- prevent_endpoint_shutdown(t);
+ GRPC_CHTTP2_REF_TRANSPORT(t, "keep_reading");
}
gpr_slice_buffer_reset_and_unref(&t->read_buffer);
if (keep_reading) {
- grpc_endpoint_read(exec_ctx, t->ep, &t->read_buffer, &t->reading_action);
- allow_endpoint_shutdown_locked(exec_ctx, t);
- UNREF_TRANSPORT(exec_ctx, t, "keep_reading");
+ grpc_endpoint_read(exec_ctx, t->ep, &t->read_buffer, &t->read_action_begin);
+ GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "keep_reading");
} else {
- UNREF_TRANSPORT(exec_ctx, t, "reading_action");
+ GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "reading_action");
}
- GRPC_ERROR_UNREF(error);
GPR_TIMER_END("post_reading_action_locked", 0);
+
+ GRPC_ERROR_UNREF(error);
+
+ GPR_TIMER_END("reading_action_locked", 0);
}
/*******************************************************************************
* CALLBACK LOOP
*/
-static void connectivity_state_set(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
- grpc_connectivity_state state, grpc_error *error, const char *reason) {
+static void connectivity_state_set(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_connectivity_state state,
+ grpc_error *error, const char *reason) {
GRPC_CHTTP2_IF_TRACING(
gpr_log(GPR_DEBUG, "set connectivity_state=%d", state));
- grpc_connectivity_state_set(
- exec_ctx,
- &TRANSPORT_FROM_GLOBAL(transport_global)->channel_callback.state_tracker,
- state, error, reason);
+ grpc_connectivity_state_set(exec_ctx, &t->channel_callback.state_tracker,
+ state, error, reason);
}
/*******************************************************************************
@@ -2101,15 +1921,16 @@ static void incoming_byte_stream_unref(grpc_exec_ctx *exec_ctx,
}
}
-static void incoming_byte_stream_update_flow_control(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global, size_t max_size_hint,
- size_t have_already) {
+static void incoming_byte_stream_update_flow_control(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ size_t max_size_hint,
+ size_t have_already) {
uint32_t max_recv_bytes;
/* clamp max recv hint to an allowable size */
- if (max_size_hint >= UINT32_MAX - transport_global->stream_lookahead) {
- max_recv_bytes = UINT32_MAX - transport_global->stream_lookahead;
+ if (max_size_hint >= UINT32_MAX - t->stream_lookahead) {
+ max_recv_bytes = UINT32_MAX - t->stream_lookahead;
} else {
max_recv_bytes = (uint32_t)max_size_hint;
}
@@ -2122,23 +1943,21 @@ static void incoming_byte_stream_update_flow_control(
}
/* add some small lookahead to keep pipelines flowing */
- GPR_ASSERT(max_recv_bytes <= UINT32_MAX - transport_global->stream_lookahead);
- max_recv_bytes += transport_global->stream_lookahead;
- if (stream_global->max_recv_bytes < max_recv_bytes) {
- uint32_t add_max_recv_bytes =
- max_recv_bytes - stream_global->max_recv_bytes;
- GRPC_CHTTP2_FLOW_CREDIT_STREAM("op", transport_global, stream_global,
- max_recv_bytes, add_max_recv_bytes);
- GRPC_CHTTP2_FLOW_CREDIT_STREAM("op", transport_global, stream_global,
- unannounced_incoming_window_for_parse,
+ GPR_ASSERT(max_recv_bytes <= UINT32_MAX - t->stream_lookahead);
+ max_recv_bytes += t->stream_lookahead;
+ if (s->max_recv_bytes < max_recv_bytes) {
+ uint32_t add_max_recv_bytes = max_recv_bytes - s->max_recv_bytes;
+ bool new_window_write_is_covered_by_poller =
+ s->max_recv_bytes < have_already;
+ GRPC_CHTTP2_FLOW_CREDIT_STREAM("op", t, s, max_recv_bytes,
+ add_max_recv_bytes);
+ GRPC_CHTTP2_FLOW_CREDIT_STREAM("op", t, s, incoming_window,
add_max_recv_bytes);
- GRPC_CHTTP2_FLOW_CREDIT_STREAM("op", transport_global, stream_global,
- unannounced_incoming_window_for_writing,
+ GRPC_CHTTP2_FLOW_CREDIT_STREAM("op", t, s, announce_window,
add_max_recv_bytes);
- grpc_chttp2_list_add_unannounced_incoming_window_available(transport_global,
- stream_global);
- grpc_chttp2_become_writable(exec_ctx, transport_global, stream_global,
- false, "read_incoming_stream");
+ grpc_chttp2_become_writable(exec_ctx, t, s,
+ new_window_write_is_covered_by_poller,
+ "read_incoming_stream");
}
}
@@ -2146,25 +1965,23 @@ static void incoming_byte_stream_next_locked(grpc_exec_ctx *exec_ctx,
void *argp,
grpc_error *error_ignored) {
grpc_chttp2_incoming_byte_stream *bs = argp;
- grpc_chttp2_transport_global *transport_global = &bs->transport->global;
- grpc_chttp2_stream_global *stream_global = &bs->stream->global;
+ grpc_chttp2_transport *t = bs->transport;
+ grpc_chttp2_stream *s = bs->stream;
if (bs->is_tail) {
gpr_mu_lock(&bs->slice_mu);
size_t cur_length = bs->slices.length;
gpr_mu_unlock(&bs->slice_mu);
incoming_byte_stream_update_flow_control(
- exec_ctx, transport_global, stream_global,
- bs->next_action.max_size_hint, cur_length);
+ exec_ctx, t, s, bs->next_action.max_size_hint, cur_length);
}
gpr_mu_lock(&bs->slice_mu);
if (bs->slices.count > 0) {
*bs->next_action.slice = gpr_slice_buffer_take_first(&bs->slices);
- grpc_exec_ctx_sched(exec_ctx, bs->next_action.on_complete, GRPC_ERROR_NONE,
- NULL);
+ grpc_closure_run(exec_ctx, bs->next_action.on_complete, GRPC_ERROR_NONE);
} else if (bs->error != GRPC_ERROR_NONE) {
- grpc_exec_ctx_sched(exec_ctx, bs->next_action.on_complete,
- GRPC_ERROR_REF(bs->error), NULL);
+ grpc_closure_run(exec_ctx, bs->next_action.on_complete,
+ GRPC_ERROR_REF(bs->error));
} else {
bs->on_next = bs->next_action.on_complete;
bs->next = bs->next_action.slice;
@@ -2186,8 +2003,8 @@ static int incoming_byte_stream_next(grpc_exec_ctx *exec_ctx,
bs->next_action.on_complete = on_complete;
grpc_closure_init(&bs->next_action.closure, incoming_byte_stream_next_locked,
bs);
- grpc_combiner_execute(exec_ctx, bs->transport->executor.combiner,
- &bs->next_action.closure, GRPC_ERROR_NONE);
+ grpc_combiner_execute(exec_ctx, bs->transport->combiner,
+ &bs->next_action.closure, GRPC_ERROR_NONE, false);
GPR_TIMER_END("incoming_byte_stream_next", 0);
return 0;
}
@@ -2200,8 +2017,7 @@ static void incoming_byte_stream_destroy_locked(grpc_exec_ctx *exec_ctx,
grpc_error *error_ignored) {
grpc_chttp2_incoming_byte_stream *bs = byte_stream;
GPR_ASSERT(bs->base.destroy == incoming_byte_stream_destroy);
- decrement_active_streams_locked(exec_ctx, &bs->transport->global,
- &bs->stream->global);
+ decrement_active_streams_locked(exec_ctx, bs->transport, bs->stream);
incoming_byte_stream_unref(exec_ctx, bs);
}
@@ -2212,126 +2028,215 @@ static void incoming_byte_stream_destroy(grpc_exec_ctx *exec_ctx,
(grpc_chttp2_incoming_byte_stream *)byte_stream;
grpc_closure_init(&bs->destroy_action, incoming_byte_stream_destroy_locked,
bs);
- grpc_combiner_execute(exec_ctx, bs->transport->executor.combiner,
- &bs->destroy_action, GRPC_ERROR_NONE);
+ grpc_combiner_execute(exec_ctx, bs->transport->combiner, &bs->destroy_action,
+ GRPC_ERROR_NONE, false);
GPR_TIMER_END("incoming_byte_stream_destroy", 0);
}
-typedef struct {
- grpc_chttp2_incoming_byte_stream *byte_stream;
- gpr_slice slice;
-} incoming_byte_stream_push_arg;
+static void incoming_byte_stream_publish_error(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs,
+ grpc_error *error) {
+ GPR_ASSERT(error != GRPC_ERROR_NONE);
+ grpc_exec_ctx_sched(exec_ctx, bs->on_next, GRPC_ERROR_REF(error), NULL);
+ bs->on_next = NULL;
+ GRPC_ERROR_UNREF(bs->error);
+ bs->error = error;
+}
void grpc_chttp2_incoming_byte_stream_push(grpc_exec_ctx *exec_ctx,
grpc_chttp2_incoming_byte_stream *bs,
gpr_slice slice) {
gpr_mu_lock(&bs->slice_mu);
- if (bs->on_next != NULL) {
- *bs->next = slice;
- grpc_exec_ctx_sched(exec_ctx, bs->on_next, GRPC_ERROR_NONE, NULL);
- bs->on_next = NULL;
+ if (bs->remaining_bytes < GPR_SLICE_LENGTH(slice)) {
+ incoming_byte_stream_publish_error(
+ exec_ctx, bs, GRPC_ERROR_CREATE("Too many bytes in stream"));
} else {
- gpr_slice_buffer_add(&bs->slices, slice);
+ bs->remaining_bytes -= (uint32_t)GPR_SLICE_LENGTH(slice);
+ if (bs->on_next != NULL) {
+ *bs->next = slice;
+ grpc_exec_ctx_sched(exec_ctx, bs->on_next, GRPC_ERROR_NONE, NULL);
+ bs->on_next = NULL;
+ } else {
+ gpr_slice_buffer_add(&bs->slices, slice);
+ }
}
gpr_mu_unlock(&bs->slice_mu);
}
-static void incoming_byte_stream_finished_locked(grpc_exec_ctx *exec_ctx,
- void *bsp, grpc_error *error) {
- grpc_chttp2_incoming_byte_stream *bs = bsp;
- if (error != GRPC_ERROR_NONE) {
- grpc_exec_ctx_sched(exec_ctx, bs->on_next, GRPC_ERROR_REF(error), NULL);
- bs->on_next = NULL;
- GRPC_ERROR_UNREF(bs->error);
- bs->error = error;
- }
- incoming_byte_stream_unref(exec_ctx, bs);
-}
-
void grpc_chttp2_incoming_byte_stream_finished(
grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs,
- grpc_error *error, int from_parsing_thread) {
- GPR_TIMER_BEGIN("grpc_chttp2_incoming_byte_stream_finished", 0);
- if (from_parsing_thread) {
- grpc_closure_init(&bs->finished_action,
- incoming_byte_stream_finished_locked, bs);
- grpc_combiner_execute(exec_ctx, bs->transport->executor.combiner,
- &bs->finished_action, GRPC_ERROR_REF(error));
- } else {
- incoming_byte_stream_finished_locked(exec_ctx, bs, error);
+ grpc_error *error) {
+ if (error == GRPC_ERROR_NONE) {
+ gpr_mu_lock(&bs->slice_mu);
+ if (bs->remaining_bytes != 0) {
+ error = GRPC_ERROR_CREATE("Truncated message");
+ }
+ gpr_mu_unlock(&bs->slice_mu);
+ }
+ if (error != GRPC_ERROR_NONE) {
+ incoming_byte_stream_publish_error(exec_ctx, bs, error);
}
- GPR_TIMER_END("grpc_chttp2_incoming_byte_stream_finished", 0);
+ incoming_byte_stream_unref(exec_ctx, bs);
}
grpc_chttp2_incoming_byte_stream *grpc_chttp2_incoming_byte_stream_create(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing, uint32_t frame_size,
- uint32_t flags, grpc_chttp2_incoming_frame_queue *add_to_queue) {
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_chttp2_stream *s,
+ uint32_t frame_size, uint32_t flags) {
grpc_chttp2_incoming_byte_stream *incoming_byte_stream =
gpr_malloc(sizeof(*incoming_byte_stream));
incoming_byte_stream->base.length = frame_size;
+ incoming_byte_stream->remaining_bytes = frame_size;
incoming_byte_stream->base.flags = flags;
incoming_byte_stream->base.next = incoming_byte_stream_next;
incoming_byte_stream->base.destroy = incoming_byte_stream_destroy;
gpr_mu_init(&incoming_byte_stream->slice_mu);
gpr_ref_init(&incoming_byte_stream->refs, 2);
incoming_byte_stream->next_message = NULL;
- incoming_byte_stream->transport = TRANSPORT_FROM_PARSING(transport_parsing);
- incoming_byte_stream->stream = STREAM_FROM_PARSING(stream_parsing);
- gpr_ref(&incoming_byte_stream->stream->global.active_streams);
+ incoming_byte_stream->transport = t;
+ incoming_byte_stream->stream = s;
+ gpr_ref(&incoming_byte_stream->stream->active_streams);
gpr_slice_buffer_init(&incoming_byte_stream->slices);
incoming_byte_stream->on_next = NULL;
incoming_byte_stream->is_tail = 1;
incoming_byte_stream->error = GRPC_ERROR_NONE;
- if (add_to_queue->head == NULL) {
- add_to_queue->head = incoming_byte_stream;
+ grpc_chttp2_incoming_frame_queue *q = &s->incoming_frames;
+ if (q->head == NULL) {
+ q->head = incoming_byte_stream;
} else {
- add_to_queue->tail->is_tail = 0;
- add_to_queue->tail->next_message = incoming_byte_stream;
+ q->tail->is_tail = 0;
+ q->tail->next_message = incoming_byte_stream;
}
- add_to_queue->tail = incoming_byte_stream;
+ q->tail = incoming_byte_stream;
+ grpc_chttp2_maybe_complete_recv_message(exec_ctx, t, s);
return incoming_byte_stream;
}
/*******************************************************************************
- * TRACING
+ * RESOURCE QUOTAS
*/
-static char *format_flowctl_context_var(const char *context, const char *var,
- int64_t val, uint32_t id,
- char **scope) {
- char *underscore_pos;
- char *buf;
- char *result;
- if (context == NULL) {
- *scope = NULL;
- gpr_asprintf(&buf, "%s(%" PRId64 ")", var, val);
- result = gpr_leftpad(buf, ' ', 60);
- gpr_free(buf);
- return result;
- }
- underscore_pos = strchr(context, '_');
- *scope = gpr_strdup(context);
- (*scope)[underscore_pos - context] = 0;
- if (id != 0) {
- char *tmp = *scope;
- gpr_asprintf(scope, "%s[%d]", tmp, id);
- gpr_free(tmp);
- }
- gpr_asprintf(&buf, "%s.%s(%" PRId64 ")", underscore_pos + 1, var, val);
- result = gpr_leftpad(buf, ' ', 60);
- gpr_free(buf);
- return result;
+static void post_benign_reclaimer(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
+ if (!t->benign_reclaimer_registered) {
+ t->benign_reclaimer_registered = true;
+ GRPC_CHTTP2_REF_TRANSPORT(t, "benign_reclaimer");
+ grpc_resource_user_post_reclaimer(exec_ctx,
+ grpc_endpoint_get_resource_user(t->ep),
+ false, &t->benign_reclaimer);
+ }
+}
+
+static void post_destructive_reclaimer(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
+ if (!t->destructive_reclaimer_registered) {
+ t->destructive_reclaimer_registered = true;
+ GRPC_CHTTP2_REF_TRANSPORT(t, "destructive_reclaimer");
+ grpc_resource_user_post_reclaimer(exec_ctx,
+ grpc_endpoint_get_resource_user(t->ep),
+ true, &t->destructive_reclaimer);
+ }
+}
+
+static void benign_reclaimer(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_chttp2_transport *t = arg;
+ grpc_combiner_execute(exec_ctx, t->combiner, &t->benign_reclaimer_locked,
+ GRPC_ERROR_REF(error), false);
+}
+
+static void destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_chttp2_transport *t = arg;
+ grpc_combiner_execute(exec_ctx, t->combiner, &t->destructive_reclaimer_locked,
+ GRPC_ERROR_REF(error), false);
+}
+
+static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_chttp2_transport *t = arg;
+ if (error == GRPC_ERROR_NONE &&
+ grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
+ /* Channel with no active streams: send a goaway to try and make it
+ * disconnect cleanly */
+ if (grpc_resource_quota_trace) {
+ gpr_log(GPR_DEBUG, "HTTP2: %s - send goaway to free memory",
+ t->peer_string);
+ }
+ send_goaway(exec_ctx, t, GRPC_CHTTP2_ENHANCE_YOUR_CALM,
+ gpr_slice_from_static_string("Buffers full"));
+ } else if (error == GRPC_ERROR_NONE && grpc_resource_quota_trace) {
+ gpr_log(GPR_DEBUG,
+ "HTTP2: %s - skip benign reclamation, there are still %" PRIdPTR
+ " streams",
+ t->peer_string, grpc_chttp2_stream_map_size(&t->stream_map));
+ }
+ t->benign_reclaimer_registered = false;
+ if (error != GRPC_ERROR_CANCELLED) {
+ grpc_resource_user_finish_reclamation(
+ exec_ctx, grpc_endpoint_get_resource_user(t->ep));
+ }
+ GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "benign_reclaimer");
}
-static int samestr(char *a, char *b) {
- if (a == NULL) {
- return b == NULL;
+static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_chttp2_transport *t = arg;
+ size_t n = grpc_chttp2_stream_map_size(&t->stream_map);
+ t->destructive_reclaimer_registered = false;
+ if (error == GRPC_ERROR_NONE && n > 0) {
+ grpc_chttp2_stream *s = grpc_chttp2_stream_map_rand(&t->stream_map);
+ if (grpc_resource_quota_trace) {
+ gpr_log(GPR_DEBUG, "HTTP2: %s - abandon stream id %d", t->peer_string,
+ s->id);
+ }
+ grpc_chttp2_cancel_stream(
+ exec_ctx, t, s, grpc_error_set_int(GRPC_ERROR_CREATE("Buffers full"),
+ GRPC_ERROR_INT_HTTP2_ERROR,
+ GRPC_CHTTP2_ENHANCE_YOUR_CALM));
+ if (n > 1) {
+ /* Since we cancel one stream per destructive reclamation, if
+ there are more streams left, we can immediately post a new
+ reclaimer in case the resource quota needs to free more
+ memory */
+ post_destructive_reclaimer(exec_ctx, t);
+ }
}
- if (b == NULL) {
- return 0;
+ if (error != GRPC_ERROR_CANCELLED) {
+ grpc_resource_user_finish_reclamation(
+ exec_ctx, grpc_endpoint_get_resource_user(t->ep));
+ }
+ GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "destructive_reclaimer");
+}
+
+/*******************************************************************************
+ * TRACING
+ */
+
+static char *format_flowctl_context_var(const char *context, const char *var,
+ int64_t val, uint32_t id) {
+ char *name;
+ if (context == NULL) {
+ name = gpr_strdup(var);
+ } else if (0 == strcmp(context, "t")) {
+ GPR_ASSERT(id == 0);
+ gpr_asprintf(&name, "TRANSPORT:%s", var);
+ } else if (0 == strcmp(context, "s")) {
+ GPR_ASSERT(id != 0);
+ gpr_asprintf(&name, "STREAM[%d]:%s", id, var);
+ } else {
+ gpr_asprintf(&name, "BAD_CONTEXT[%s][%d]:%s", context, id, var);
}
- return 0 == strcmp(a, b);
+ char *name_fld = gpr_leftpad(name, ' ', 64);
+ char *value;
+ gpr_asprintf(&value, "%" PRId64, val);
+ char *value_fld = gpr_leftpad(value, ' ', 8);
+ char *result;
+ gpr_asprintf(&result, "%s %s", name_fld, value_fld);
+ gpr_free(name);
+ gpr_free(name_fld);
+ gpr_free(value);
+ gpr_free(value_fld);
+ return result;
}
void grpc_chttp2_flowctl_trace(const char *file, int line, const char *phase,
@@ -2339,26 +2244,18 @@ void grpc_chttp2_flowctl_trace(const char *file, int line, const char *phase,
const char *var1, const char *context2,
const char *var2, int is_client,
uint32_t stream_id, int64_t val1, int64_t val2) {
- char *scope1;
- char *scope2;
char *tmp_phase;
- char *tmp_scope1;
- char *label1 =
- format_flowctl_context_var(context1, var1, val1, stream_id, &scope1);
- char *label2 =
- format_flowctl_context_var(context2, var2, val2, stream_id, &scope2);
+ char *label1 = format_flowctl_context_var(context1, var1, val1, stream_id);
+ char *label2 = format_flowctl_context_var(context2, var2, val2, stream_id);
char *clisvr = is_client ? "client" : "server";
char *prefix;
tmp_phase = gpr_leftpad(phase, ' ', 8);
- tmp_scope1 = gpr_leftpad(scope1, ' ', 11);
- gpr_asprintf(&prefix, "FLOW %s: %s %s ", tmp_phase, clisvr, scope1);
+ gpr_asprintf(&prefix, "FLOW %s: %s ", tmp_phase, clisvr);
gpr_free(tmp_phase);
- gpr_free(tmp_scope1);
switch (op) {
case GRPC_CHTTP2_FLOWCTL_MOVE:
- GPR_ASSERT(samestr(scope1, scope2));
if (val2 != 0) {
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"%sMOVE %s <- %s giving %" PRId64, prefix, label1, label2,
@@ -2383,8 +2280,6 @@ void grpc_chttp2_flowctl_trace(const char *file, int line, const char *phase,
break;
}
- gpr_free(scope1);
- gpr_free(scope2);
gpr_free(label1);
gpr_free(label2);
gpr_free(prefix);
@@ -2421,10 +2316,11 @@ void grpc_chttp2_transport_start_reading(grpc_exec_ctx *exec_ctx,
grpc_transport *transport,
gpr_slice_buffer *read_buffer) {
grpc_chttp2_transport *t = (grpc_chttp2_transport *)transport;
- REF_TRANSPORT(t, "reading_action"); /* matches unref inside reading_action */
+ GRPC_CHTTP2_REF_TRANSPORT(
+ t, "reading_action"); /* matches unref inside reading_action */
if (read_buffer != NULL) {
gpr_slice_buffer_move_into(read_buffer, &t->read_buffer);
gpr_free(read_buffer);
}
- reading_action(exec_ctx, t, GRPC_ERROR_NONE);
+ read_action_begin(exec_ctx, t, GRPC_ERROR_NONE);
}
diff --git a/src/core/ext/transport/chttp2/transport/frame.h b/src/core/ext/transport/chttp2/transport/frame.h
index 507aae4100..1e444a91fd 100644
--- a/src/core/ext/transport/chttp2/transport/frame.h
+++ b/src/core/ext/transport/chttp2/transport/frame.h
@@ -40,8 +40,8 @@
#include "src/core/lib/iomgr/error.h"
/* defined in internal.h */
-typedef struct grpc_chttp2_stream_parsing grpc_chttp2_stream_parsing;
-typedef struct grpc_chttp2_transport_parsing grpc_chttp2_transport_parsing;
+typedef struct grpc_chttp2_stream grpc_chttp2_stream;
+typedef struct grpc_chttp2_transport grpc_chttp2_transport;
#define GRPC_CHTTP2_FRAME_DATA 0
#define GRPC_CHTTP2_FRAME_HEADER 1
diff --git a/src/core/ext/transport/chttp2/transport/frame_data.c b/src/core/ext/transport/chttp2/transport/frame_data.c
index 9046fbc453..8668816930 100644
--- a/src/core/ext/transport/chttp2/transport/frame_data.c
+++ b/src/core/ext/transport/chttp2/transport/frame_data.c
@@ -51,16 +51,11 @@ grpc_error *grpc_chttp2_data_parser_init(grpc_chttp2_data_parser *parser) {
void grpc_chttp2_data_parser_destroy(grpc_exec_ctx *exec_ctx,
grpc_chttp2_data_parser *parser) {
- grpc_byte_stream *bs;
- if (parser->parsing_frame) {
+ if (parser->parsing_frame != NULL) {
grpc_chttp2_incoming_byte_stream_finished(
- exec_ctx, parser->parsing_frame, GRPC_ERROR_CREATE("Parser destroyed"),
- 1);
- }
- while (
- (bs = grpc_chttp2_incoming_frame_queue_pop(&parser->incoming_frames))) {
- grpc_byte_stream_destroy(exec_ctx, bs);
+ exec_ctx, parser->parsing_frame, GRPC_ERROR_CREATE("Parser destroyed"));
}
+ GRPC_ERROR_UNREF(parser->error);
}
grpc_error *grpc_chttp2_data_parser_begin_frame(grpc_chttp2_data_parser *parser,
@@ -145,22 +140,17 @@ void grpc_chttp2_encode_data(uint32_t id, gpr_slice_buffer *inbuf,
stats->data_bytes += write_bytes;
}
-grpc_error *grpc_chttp2_data_parser_parse(
- grpc_exec_ctx *exec_ctx, void *parser,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
+static grpc_error *parse_inner(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_data_parser *p,
+ grpc_chttp2_transport *t, grpc_chttp2_stream *s,
+ gpr_slice slice) {
uint8_t *const beg = GPR_SLICE_START_PTR(slice);
uint8_t *const end = GPR_SLICE_END_PTR(slice);
uint8_t *cur = beg;
- grpc_chttp2_data_parser *p = parser;
uint32_t message_flags;
grpc_chttp2_incoming_byte_stream *incoming_byte_stream;
char *msg;
- if (is_last && p->is_last_frame) {
- stream_parsing->received_close = 1;
- }
-
if (cur == end) {
return GRPC_ERROR_NONE;
}
@@ -171,7 +161,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
return GRPC_ERROR_REF(p->error);
fh_0:
case GRPC_CHTTP2_DATA_FH_0:
- stream_parsing->stats.incoming.framing_bytes++;
+ s->stats.incoming.framing_bytes++;
p->frame_type = *cur;
switch (p->frame_type) {
case 0:
@@ -184,7 +174,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
gpr_asprintf(&msg, "Bad GRPC frame type 0x%02x", p->frame_type);
p->error = GRPC_ERROR_CREATE(msg);
p->error = grpc_error_set_int(p->error, GRPC_ERROR_INT_STREAM_ID,
- (intptr_t)stream_parsing->id);
+ (intptr_t)s->id);
gpr_free(msg);
msg = gpr_dump_slice(slice, GPR_DUMP_HEX | GPR_DUMP_ASCII);
p->error =
@@ -201,7 +191,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
}
/* fallthrough */
case GRPC_CHTTP2_DATA_FH_1:
- stream_parsing->stats.incoming.framing_bytes++;
+ s->stats.incoming.framing_bytes++;
p->frame_size = ((uint32_t)*cur) << 24;
if (++cur == end) {
p->state = GRPC_CHTTP2_DATA_FH_2;
@@ -209,7 +199,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
}
/* fallthrough */
case GRPC_CHTTP2_DATA_FH_2:
- stream_parsing->stats.incoming.framing_bytes++;
+ s->stats.incoming.framing_bytes++;
p->frame_size |= ((uint32_t)*cur) << 16;
if (++cur == end) {
p->state = GRPC_CHTTP2_DATA_FH_3;
@@ -217,7 +207,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
}
/* fallthrough */
case GRPC_CHTTP2_DATA_FH_3:
- stream_parsing->stats.incoming.framing_bytes++;
+ s->stats.incoming.framing_bytes++;
p->frame_size |= ((uint32_t)*cur) << 8;
if (++cur == end) {
p->state = GRPC_CHTTP2_DATA_FH_4;
@@ -225,7 +215,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
}
/* fallthrough */
case GRPC_CHTTP2_DATA_FH_4:
- stream_parsing->stats.incoming.framing_bytes++;
+ s->stats.incoming.framing_bytes++;
p->frame_size |= ((uint32_t)*cur);
p->state = GRPC_CHTTP2_DATA_FRAME;
++cur;
@@ -234,35 +224,32 @@ grpc_error *grpc_chttp2_data_parser_parse(
message_flags |= GRPC_WRITE_INTERNAL_COMPRESS;
}
p->parsing_frame = incoming_byte_stream =
- grpc_chttp2_incoming_byte_stream_create(
- exec_ctx, transport_parsing, stream_parsing, p->frame_size,
- message_flags, &p->incoming_frames);
+ grpc_chttp2_incoming_byte_stream_create(exec_ctx, t, s, p->frame_size,
+ message_flags);
/* fallthrough */
case GRPC_CHTTP2_DATA_FRAME:
- grpc_chttp2_list_add_parsing_seen_stream(transport_parsing,
- stream_parsing);
if (cur == end) {
return GRPC_ERROR_NONE;
}
uint32_t remaining = (uint32_t)(end - cur);
if (remaining == p->frame_size) {
- stream_parsing->stats.incoming.data_bytes += p->frame_size;
+ s->stats.incoming.data_bytes += p->frame_size;
grpc_chttp2_incoming_byte_stream_push(
exec_ctx, p->parsing_frame,
gpr_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg)));
grpc_chttp2_incoming_byte_stream_finished(exec_ctx, p->parsing_frame,
- GRPC_ERROR_NONE, 1);
+ GRPC_ERROR_NONE);
p->parsing_frame = NULL;
p->state = GRPC_CHTTP2_DATA_FH_0;
return GRPC_ERROR_NONE;
} else if (remaining > p->frame_size) {
- stream_parsing->stats.incoming.data_bytes += p->frame_size;
+ s->stats.incoming.data_bytes += p->frame_size;
grpc_chttp2_incoming_byte_stream_push(
exec_ctx, p->parsing_frame,
gpr_slice_sub(slice, (size_t)(cur - beg),
(size_t)(cur + p->frame_size - beg)));
grpc_chttp2_incoming_byte_stream_finished(exec_ctx, p->parsing_frame,
- GRPC_ERROR_NONE, 1);
+ GRPC_ERROR_NONE);
p->parsing_frame = NULL;
cur += p->frame_size;
goto fh_0; /* loop */
@@ -272,10 +259,25 @@ grpc_error *grpc_chttp2_data_parser_parse(
exec_ctx, p->parsing_frame,
gpr_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg)));
p->frame_size -= remaining;
- stream_parsing->stats.incoming.data_bytes += remaining;
+ s->stats.incoming.data_bytes += remaining;
return GRPC_ERROR_NONE;
}
}
GPR_UNREACHABLE_CODE(return GRPC_ERROR_CREATE("Should never reach here"));
}
+
+grpc_error *grpc_chttp2_data_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ gpr_slice slice, int is_last) {
+ grpc_chttp2_data_parser *p = parser;
+ grpc_error *error = parse_inner(exec_ctx, p, t, s, slice);
+
+ if (is_last && p->is_last_frame) {
+ grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, false,
+ GRPC_ERROR_NONE);
+ }
+
+ return error;
+}
diff --git a/src/core/ext/transport/chttp2/transport/frame_data.h b/src/core/ext/transport/chttp2/transport/frame_data.h
index a21a7942b9..eb2d97d898 100644
--- a/src/core/ext/transport/chttp2/transport/frame_data.h
+++ b/src/core/ext/transport/chttp2/transport/frame_data.h
@@ -69,7 +69,6 @@ typedef struct {
grpc_error *error;
int is_frame_compressed;
- grpc_chttp2_incoming_frame_queue incoming_frames;
grpc_chttp2_incoming_byte_stream *parsing_frame;
} grpc_chttp2_data_parser;
@@ -92,10 +91,10 @@ grpc_error *grpc_chttp2_data_parser_begin_frame(grpc_chttp2_data_parser *parser,
/* handle a slice of a data frame - is_last indicates the last slice of a
frame */
-grpc_error *grpc_chttp2_data_parser_parse(
- grpc_exec_ctx *exec_ctx, void *parser,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
+grpc_error *grpc_chttp2_data_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ gpr_slice slice, int is_last);
void grpc_chttp2_encode_data(uint32_t id, gpr_slice_buffer *inbuf,
uint32_t write_bytes, int is_eof,
diff --git a/src/core/ext/transport/chttp2/transport/frame_goaway.c b/src/core/ext/transport/chttp2/transport/frame_goaway.c
index 299e27ad70..33d2269169 100644
--- a/src/core/ext/transport/chttp2/transport/frame_goaway.c
+++ b/src/core/ext/transport/chttp2/transport/frame_goaway.c
@@ -67,10 +67,11 @@ grpc_error *grpc_chttp2_goaway_parser_begin_frame(grpc_chttp2_goaway_parser *p,
return GRPC_ERROR_NONE;
}
-grpc_error *grpc_chttp2_goaway_parser_parse(
- grpc_exec_ctx *exec_ctx, void *parser,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
+grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx,
+ void *parser,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ gpr_slice slice, int is_last) {
uint8_t *const beg = GPR_SLICE_START_PTR(slice);
uint8_t *const end = GPR_SLICE_END_PTR(slice);
uint8_t *cur = beg;
@@ -148,12 +149,9 @@ grpc_error *grpc_chttp2_goaway_parser_parse(
p->debug_pos += (uint32_t)(end - cur);
p->state = GRPC_CHTTP2_GOAWAY_DEBUG;
if (is_last) {
- transport_parsing->goaway_received = 1;
- transport_parsing->goaway_last_stream_index = p->last_stream_id;
- gpr_slice_unref(transport_parsing->goaway_text);
- transport_parsing->goaway_error = (grpc_status_code)p->error_code;
- transport_parsing->goaway_text =
- gpr_slice_new(p->debug_data, p->debug_length, gpr_free);
+ grpc_chttp2_add_incoming_goaway(
+ exec_ctx, t, (uint32_t)p->error_code,
+ gpr_slice_new(p->debug_data, p->debug_length, gpr_free));
p->debug_data = NULL;
}
return GRPC_ERROR_NONE;
diff --git a/src/core/ext/transport/chttp2/transport/frame_goaway.h b/src/core/ext/transport/chttp2/transport/frame_goaway.h
index eb4303405a..355104a5a7 100644
--- a/src/core/ext/transport/chttp2/transport/frame_goaway.h
+++ b/src/core/ext/transport/chttp2/transport/frame_goaway.h
@@ -65,10 +65,11 @@ void grpc_chttp2_goaway_parser_init(grpc_chttp2_goaway_parser *p);
void grpc_chttp2_goaway_parser_destroy(grpc_chttp2_goaway_parser *p);
grpc_error *grpc_chttp2_goaway_parser_begin_frame(
grpc_chttp2_goaway_parser *parser, uint32_t length, uint8_t flags);
-grpc_error *grpc_chttp2_goaway_parser_parse(
- grpc_exec_ctx *exec_ctx, void *parser,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
+grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx,
+ void *parser,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ gpr_slice slice, int is_last);
void grpc_chttp2_goaway_append(uint32_t last_stream_id, uint32_t error_code,
gpr_slice debug_data,
diff --git a/src/core/ext/transport/chttp2/transport/frame_ping.c b/src/core/ext/transport/chttp2/transport/frame_ping.c
index 1f814ab1bd..624f42649d 100644
--- a/src/core/ext/transport/chttp2/transport/frame_ping.c
+++ b/src/core/ext/transport/chttp2/transport/frame_ping.c
@@ -73,10 +73,10 @@ grpc_error *grpc_chttp2_ping_parser_begin_frame(grpc_chttp2_ping_parser *parser,
return GRPC_ERROR_NONE;
}
-grpc_error *grpc_chttp2_ping_parser_parse(
- grpc_exec_ctx *exec_ctx, void *parser,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
+grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ gpr_slice slice, int is_last) {
uint8_t *const beg = GPR_SLICE_START_PTR(slice);
uint8_t *const end = GPR_SLICE_END_PTR(slice);
uint8_t *cur = beg;
@@ -91,10 +91,11 @@ grpc_error *grpc_chttp2_ping_parser_parse(
if (p->byte == 8) {
GPR_ASSERT(is_last);
if (p->is_ack) {
- grpc_chttp2_ack_ping(exec_ctx, transport_parsing, p->opaque_8bytes);
+ grpc_chttp2_ack_ping(exec_ctx, t, p->opaque_8bytes);
} else {
- gpr_slice_buffer_add(&transport_parsing->qbuf,
+ gpr_slice_buffer_add(&t->qbuf,
grpc_chttp2_ping_create(1, p->opaque_8bytes));
+ grpc_chttp2_initiate_write(exec_ctx, t, false, "ping response");
}
}
diff --git a/src/core/ext/transport/chttp2/transport/frame_ping.h b/src/core/ext/transport/chttp2/transport/frame_ping.h
index 5a8723421c..2071f647fb 100644
--- a/src/core/ext/transport/chttp2/transport/frame_ping.h
+++ b/src/core/ext/transport/chttp2/transport/frame_ping.h
@@ -48,9 +48,9 @@ gpr_slice grpc_chttp2_ping_create(uint8_t ack, uint8_t *opaque_8bytes);
grpc_error *grpc_chttp2_ping_parser_begin_frame(grpc_chttp2_ping_parser *parser,
uint32_t length, uint8_t flags);
-grpc_error *grpc_chttp2_ping_parser_parse(
- grpc_exec_ctx *exec_ctx, void *parser,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
+grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ gpr_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_PING_H */
diff --git a/src/core/ext/transport/chttp2/transport/frame_rst_stream.c b/src/core/ext/transport/chttp2/transport/frame_rst_stream.c
index e3a3c9e4a7..9eac050797 100644
--- a/src/core/ext/transport/chttp2/transport/frame_rst_stream.c
+++ b/src/core/ext/transport/chttp2/transport/frame_rst_stream.c
@@ -39,6 +39,8 @@
#include <grpc/support/string_util.h>
#include "src/core/ext/transport/chttp2/transport/frame.h"
+#include "src/core/ext/transport/chttp2/transport/http2_errors.h"
+#include "src/core/ext/transport/chttp2/transport/status_conversion.h"
gpr_slice grpc_chttp2_rst_stream_create(uint32_t id, uint32_t code,
grpc_transport_one_way_stats *stats) {
@@ -83,10 +85,11 @@ grpc_error *grpc_chttp2_rst_stream_parser_begin_frame(
return GRPC_ERROR_NONE;
}
-grpc_error *grpc_chttp2_rst_stream_parser_parse(
- grpc_exec_ctx *exec_ctx, void *parser,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
+grpc_error *grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx *exec_ctx,
+ void *parser,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ gpr_slice slice, int is_last) {
uint8_t *const beg = GPR_SLICE_START_PTR(slice);
uint8_t *const end = GPR_SLICE_END_PTR(slice);
uint8_t *cur = beg;
@@ -97,19 +100,28 @@ grpc_error *grpc_chttp2_rst_stream_parser_parse(
cur++;
p->byte++;
}
- stream_parsing->stats.incoming.framing_bytes += (uint64_t)(end - cur);
+ s->stats.incoming.framing_bytes += (uint64_t)(end - cur);
if (p->byte == 4) {
GPR_ASSERT(is_last);
- stream_parsing->received_close = 1;
- if (stream_parsing->forced_close_error == GRPC_ERROR_NONE) {
- stream_parsing->forced_close_error = grpc_error_set_int(
- GRPC_ERROR_CREATE("RST_STREAM"), GRPC_ERROR_INT_HTTP2_ERROR,
- (intptr_t)((((uint32_t)p->reason_bytes[0]) << 24) |
- (((uint32_t)p->reason_bytes[1]) << 16) |
- (((uint32_t)p->reason_bytes[2]) << 8) |
- (((uint32_t)p->reason_bytes[3]))));
+ uint32_t reason = (((uint32_t)p->reason_bytes[0]) << 24) |
+ (((uint32_t)p->reason_bytes[1]) << 16) |
+ (((uint32_t)p->reason_bytes[2]) << 8) |
+ (((uint32_t)p->reason_bytes[3]));
+ grpc_error *error = GRPC_ERROR_NONE;
+ if (reason != GRPC_CHTTP2_NO_ERROR) {
+ error = grpc_error_set_int(GRPC_ERROR_CREATE("RST_STREAM"),
+ GRPC_ERROR_INT_HTTP2_ERROR, (intptr_t)reason);
+ grpc_status_code status_code = grpc_chttp2_http2_error_to_grpc_status(
+ (grpc_chttp2_error_code)reason, s->deadline);
+ char *status_details;
+ gpr_asprintf(&status_details, "Received RST_STREAM with error code %d",
+ reason);
+ gpr_slice slice_details = gpr_slice_from_copied_string(status_details);
+ gpr_free(status_details);
+ grpc_chttp2_fake_status(exec_ctx, t, s, status_code, &slice_details);
}
+ grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, true, error);
}
return GRPC_ERROR_NONE;
diff --git a/src/core/ext/transport/chttp2/transport/frame_rst_stream.h b/src/core/ext/transport/chttp2/transport/frame_rst_stream.h
index 11cf94f3ea..5a1f578a29 100644
--- a/src/core/ext/transport/chttp2/transport/frame_rst_stream.h
+++ b/src/core/ext/transport/chttp2/transport/frame_rst_stream.h
@@ -49,9 +49,10 @@ gpr_slice grpc_chttp2_rst_stream_create(uint32_t stream_id, uint32_t code,
grpc_error *grpc_chttp2_rst_stream_parser_begin_frame(
grpc_chttp2_rst_stream_parser *parser, uint32_t length, uint8_t flags);
-grpc_error *grpc_chttp2_rst_stream_parser_parse(
- grpc_exec_ctx *exec_ctx, void *parser,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
+grpc_error *grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx *exec_ctx,
+ void *parser,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ gpr_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_RST_STREAM_H */
diff --git a/src/core/ext/transport/chttp2/transport/frame_settings.c b/src/core/ext/transport/chttp2/transport/frame_settings.c
index 04b96c4cd9..92022f90c9 100644
--- a/src/core/ext/transport/chttp2/transport/frame_settings.c
+++ b/src/core/ext/transport/chttp2/transport/frame_settings.c
@@ -143,10 +143,10 @@ grpc_error *grpc_chttp2_settings_parser_begin_frame(
}
}
-grpc_error *grpc_chttp2_settings_parser_parse(
- grpc_exec_ctx *exec_ctx, void *p,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
+grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx, void *p,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ gpr_slice slice, int is_last) {
grpc_chttp2_settings_parser *parser = p;
const uint8_t *cur = GPR_SLICE_START_PTR(slice);
const uint8_t *end = GPR_SLICE_END_PTR(slice);
@@ -162,11 +162,9 @@ grpc_error *grpc_chttp2_settings_parser_parse(
if (cur == end) {
parser->state = GRPC_CHTTP2_SPS_ID0;
if (is_last) {
- transport_parsing->settings_updated = 1;
memcpy(parser->target_settings, parser->incoming_settings,
GRPC_CHTTP2_NUM_SETTINGS * sizeof(uint32_t));
- gpr_slice_buffer_add(&transport_parsing->qbuf,
- grpc_chttp2_settings_ack_create());
+ gpr_slice_buffer_add(&t->qbuf, grpc_chttp2_settings_ack_create());
}
return GRPC_ERROR_NONE;
}
@@ -226,9 +224,9 @@ grpc_error *grpc_chttp2_settings_parser_parse(
break;
case GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE:
grpc_chttp2_goaway_append(
- transport_parsing->last_incoming_stream_id, sp->error_value,
+ t->last_new_stream_id, sp->error_value,
gpr_slice_from_static_string("HTTP2 settings error"),
- &transport_parsing->qbuf);
+ &t->qbuf);
gpr_asprintf(&msg, "invalid value %u passed for %s",
parser->value, sp->name);
grpc_error *err = GRPC_ERROR_CREATE(msg);
@@ -238,18 +236,17 @@ grpc_error *grpc_chttp2_settings_parser_parse(
}
if (parser->id == GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE &&
parser->incoming_settings[parser->id] != parser->value) {
- transport_parsing->initial_window_update =
+ t->initial_window_update =
(int64_t)parser->value - parser->incoming_settings[parser->id];
if (grpc_http_trace) {
gpr_log(GPR_DEBUG, "adding %d for initial_window change",
- (int)transport_parsing->initial_window_update);
+ (int)t->initial_window_update);
}
}
parser->incoming_settings[parser->id] = parser->value;
if (grpc_http_trace) {
gpr_log(GPR_DEBUG, "CHTTP2:%s: got setting %d = %d",
- transport_parsing->is_client ? "CLI" : "SVR", parser->id,
- parser->value);
+ t->is_client ? "CLI" : "SVR", parser->id, parser->value);
}
} else if (grpc_http_trace) {
gpr_log(GPR_ERROR, "CHTTP2: Ignoring unknown setting %d (value %d)",
diff --git a/src/core/ext/transport/chttp2/transport/frame_settings.h b/src/core/ext/transport/chttp2/transport/frame_settings.h
index f654c598c8..4bfa944cf1 100644
--- a/src/core/ext/transport/chttp2/transport/frame_settings.h
+++ b/src/core/ext/transport/chttp2/transport/frame_settings.h
@@ -95,9 +95,10 @@ gpr_slice grpc_chttp2_settings_ack_create(void);
grpc_error *grpc_chttp2_settings_parser_begin_frame(
grpc_chttp2_settings_parser *parser, uint32_t length, uint8_t flags,
uint32_t *settings);
-grpc_error *grpc_chttp2_settings_parser_parse(
- grpc_exec_ctx *exec_ctx, void *parser,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
+grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx,
+ void *parser,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ gpr_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_SETTINGS_H */
diff --git a/src/core/ext/transport/chttp2/transport/frame_window_update.c b/src/core/ext/transport/chttp2/transport/frame_window_update.c
index 3cf848fd5c..418166a6df 100644
--- a/src/core/ext/transport/chttp2/transport/frame_window_update.c
+++ b/src/core/ext/transport/chttp2/transport/frame_window_update.c
@@ -80,9 +80,8 @@ grpc_error *grpc_chttp2_window_update_parser_begin_frame(
}
grpc_error *grpc_chttp2_window_update_parser_parse(
- grpc_exec_ctx *exec_ctx, void *parser,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
+ grpc_exec_ctx *exec_ctx, void *parser, grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s, gpr_slice slice, int is_last) {
uint8_t *const beg = GPR_SLICE_START_PTR(slice);
uint8_t *const end = GPR_SLICE_END_PTR(slice);
uint8_t *cur = beg;
@@ -94,8 +93,8 @@ grpc_error *grpc_chttp2_window_update_parser_parse(
p->byte++;
}
- if (stream_parsing != NULL) {
- stream_parsing->stats.incoming.framing_bytes += (uint32_t)(end - cur);
+ if (s != NULL) {
+ s->stats.incoming.framing_bytes += (uint32_t)(end - cur);
}
if (p->byte == 4) {
@@ -109,17 +108,26 @@ grpc_error *grpc_chttp2_window_update_parser_parse(
}
GPR_ASSERT(is_last);
- if (transport_parsing->incoming_stream_id != 0) {
- if (stream_parsing != NULL) {
- GRPC_CHTTP2_FLOW_CREDIT_STREAM("parse", transport_parsing,
- stream_parsing, outgoing_window,
+ if (t->incoming_stream_id != 0) {
+ if (s != NULL) {
+ bool was_zero = s->outgoing_window <= 0;
+ GRPC_CHTTP2_FLOW_CREDIT_STREAM("parse", t, s, outgoing_window,
received_update);
- grpc_chttp2_list_add_parsing_seen_stream(transport_parsing,
- stream_parsing);
+ bool is_zero = s->outgoing_window <= 0;
+ if (was_zero && !is_zero) {
+ grpc_chttp2_become_writable(exec_ctx, t, s, false,
+ "stream.read_flow_control");
+ }
}
} else {
- GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parse", transport_parsing,
- outgoing_window, received_update);
+ bool was_zero = t->outgoing_window <= 0;
+ GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parse", t, outgoing_window,
+ received_update);
+ bool is_zero = t->outgoing_window <= 0;
+ if (was_zero && !is_zero) {
+ grpc_chttp2_initiate_write(exec_ctx, t, false,
+ "new_global_flow_control");
+ }
}
}
diff --git a/src/core/ext/transport/chttp2/transport/frame_window_update.h b/src/core/ext/transport/chttp2/transport/frame_window_update.h
index 1bcbbf9247..6e62f31872 100644
--- a/src/core/ext/transport/chttp2/transport/frame_window_update.h
+++ b/src/core/ext/transport/chttp2/transport/frame_window_update.h
@@ -51,8 +51,7 @@ gpr_slice grpc_chttp2_window_update_create(uint32_t id, uint32_t window_delta,
grpc_error *grpc_chttp2_window_update_parser_begin_frame(
grpc_chttp2_window_update_parser *parser, uint32_t length, uint8_t flags);
grpc_error *grpc_chttp2_window_update_parser_parse(
- grpc_exec_ctx *exec_ctx, void *parser,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
+ grpc_exec_ctx *exec_ctx, void *parser, grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s, gpr_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_WINDOW_UPDATE_H */
diff --git a/src/core/ext/transport/chttp2/transport/hpack_parser.c b/src/core/ext/transport/chttp2/transport/hpack_parser.c
index 522455f7dc..8180f78fc0 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_parser.c
+++ b/src/core/ext/transport/chttp2/transport/hpack_parser.c
@@ -78,69 +78,96 @@ typedef enum {
a set of indirect jumps, and so not waste stack space. */
/* forward declarations for parsing states */
-static grpc_error *parse_begin(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+static grpc_error *parse_begin(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end);
-static grpc_error *parse_error(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+static grpc_error *parse_error(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end, grpc_error *error);
-static grpc_error *still_parse_error(grpc_chttp2_hpack_parser *p,
+static grpc_error *still_parse_error(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end);
-static grpc_error *parse_illegal_op(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_illegal_op(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end);
-static grpc_error *parse_string_prefix(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_string_prefix(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end);
-static grpc_error *parse_key_string(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_key_string(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end);
static grpc_error *parse_value_string_with_indexed_key(
- grpc_chttp2_hpack_parser *p, const uint8_t *cur, const uint8_t *end);
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end);
static grpc_error *parse_value_string_with_literal_key(
- grpc_chttp2_hpack_parser *p, const uint8_t *cur, const uint8_t *end);
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end);
-static grpc_error *parse_value0(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+static grpc_error *parse_value0(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end);
-static grpc_error *parse_value1(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+static grpc_error *parse_value1(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end);
-static grpc_error *parse_value2(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+static grpc_error *parse_value2(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end);
-static grpc_error *parse_value3(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+static grpc_error *parse_value3(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end);
-static grpc_error *parse_value4(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+static grpc_error *parse_value4(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end);
-static grpc_error *parse_value5up(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_value5up(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end);
-static grpc_error *parse_indexed_field(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_indexed_field(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end);
-static grpc_error *parse_indexed_field_x(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_indexed_field_x(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end);
-static grpc_error *parse_lithdr_incidx(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_incidx(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end);
-static grpc_error *parse_lithdr_incidx_x(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_incidx_x(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end);
-static grpc_error *parse_lithdr_incidx_v(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_incidx_v(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end);
-static grpc_error *parse_lithdr_notidx(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_notidx(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end);
-static grpc_error *parse_lithdr_notidx_x(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_notidx_x(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end);
-static grpc_error *parse_lithdr_notidx_v(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_notidx_v(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end);
-static grpc_error *parse_lithdr_nvridx(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_nvridx(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end);
-static grpc_error *parse_lithdr_nvridx_x(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_nvridx_x(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end);
-static grpc_error *parse_lithdr_nvridx_v(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_nvridx_v(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end);
-static grpc_error *parse_max_tbl_size(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_max_tbl_size(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end);
-static grpc_error *parse_max_tbl_size_x(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_max_tbl_size_x(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end);
/* we translate the first byte of a hpack field into one of these decoding
@@ -639,8 +666,8 @@ static const uint8_t inverse_base64[256] = {
};
/* emission helpers */
-static grpc_error *on_hdr(grpc_chttp2_hpack_parser *p, grpc_mdelem *md,
- int add_to_table) {
+static grpc_error *on_hdr(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p,
+ grpc_mdelem *md, int add_to_table) {
if (add_to_table) {
grpc_error *err = grpc_chttp2_hptbl_add(&p->table, md);
if (err != GRPC_ERROR_NONE) return err;
@@ -649,7 +676,7 @@ static grpc_error *on_hdr(grpc_chttp2_hpack_parser *p, grpc_mdelem *md,
GRPC_MDELEM_UNREF(md);
return GRPC_ERROR_CREATE("on_header callback not set");
}
- p->on_header(p->on_header_user_data, md);
+ p->on_header(exec_ctx, p->on_header_user_data, md);
return GRPC_ERROR_NONE;
}
@@ -661,78 +688,86 @@ static grpc_mdstr *take_string(grpc_chttp2_hpack_parser *p,
}
/* jump to the next state */
-static grpc_error *parse_next(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+static grpc_error *parse_next(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end) {
p->state = *p->next_state++;
- return p->state(p, cur, end);
+ return p->state(exec_ctx, p, cur, end);
}
/* begin parsing a header: all functionality is encoded into lookup tables
above */
-static grpc_error *parse_begin(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+static grpc_error *parse_begin(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end) {
if (cur == end) {
p->state = parse_begin;
return GRPC_ERROR_NONE;
}
- return first_byte_action[first_byte_lut[*cur]](p, cur, end);
+ return first_byte_action[first_byte_lut[*cur]](exec_ctx, p, cur, end);
}
/* stream dependency and prioritization data: we just skip it */
-static grpc_error *parse_stream_weight(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_stream_weight(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
if (cur == end) {
p->state = parse_stream_weight;
return GRPC_ERROR_NONE;
}
- return p->after_prioritization(p, cur + 1, end);
+ return p->after_prioritization(exec_ctx, p, cur + 1, end);
}
-static grpc_error *parse_stream_dep3(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_stream_dep3(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
if (cur == end) {
p->state = parse_stream_dep3;
return GRPC_ERROR_NONE;
}
- return parse_stream_weight(p, cur + 1, end);
+ return parse_stream_weight(exec_ctx, p, cur + 1, end);
}
-static grpc_error *parse_stream_dep2(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_stream_dep2(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
if (cur == end) {
p->state = parse_stream_dep2;
return GRPC_ERROR_NONE;
}
- return parse_stream_dep3(p, cur + 1, end);
+ return parse_stream_dep3(exec_ctx, p, cur + 1, end);
}
-static grpc_error *parse_stream_dep1(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_stream_dep1(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
if (cur == end) {
p->state = parse_stream_dep1;
return GRPC_ERROR_NONE;
}
- return parse_stream_dep2(p, cur + 1, end);
+ return parse_stream_dep2(exec_ctx, p, cur + 1, end);
}
-static grpc_error *parse_stream_dep0(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_stream_dep0(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
if (cur == end) {
p->state = parse_stream_dep0;
return GRPC_ERROR_NONE;
}
- return parse_stream_dep1(p, cur + 1, end);
+ return parse_stream_dep1(exec_ctx, p, cur + 1, end);
}
/* emit an indexed field; for now just logs it to console; jumps to
begin the next field on completion */
-static grpc_error *finish_indexed_field(grpc_chttp2_hpack_parser *p,
+static grpc_error *finish_indexed_field(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
grpc_mdelem *md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
@@ -743,21 +778,23 @@ static grpc_error *finish_indexed_field(grpc_chttp2_hpack_parser *p,
GRPC_ERROR_INT_SIZE, (intptr_t)p->table.num_ents);
}
GRPC_MDELEM_REF(md);
- grpc_error *err = on_hdr(p, md, 0);
+ grpc_error *err = on_hdr(exec_ctx, p, md, 0);
if (err != GRPC_ERROR_NONE) return err;
- return parse_begin(p, cur, end);
+ return parse_begin(exec_ctx, p, cur, end);
}
/* parse an indexed field with index < 127 */
-static grpc_error *parse_indexed_field(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_indexed_field(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
p->dynamic_table_update_allowed = 0;
p->index = (*cur) & 0x7f;
- return finish_indexed_field(p, cur + 1, end);
+ return finish_indexed_field(exec_ctx, p, cur + 1, end);
}
/* parse an indexed field with index >= 127 */
-static grpc_error *parse_indexed_field_x(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_indexed_field_x(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
@@ -766,49 +803,53 @@ static grpc_error *parse_indexed_field_x(grpc_chttp2_hpack_parser *p,
p->next_state = and_then;
p->index = 0x7f;
p->parsing.value = &p->index;
- return parse_value0(p, cur + 1, end);
+ return parse_value0(exec_ctx, p, cur + 1, end);
}
/* finish a literal header with incremental indexing: just log, and jump to '
begin */
-static grpc_error *finish_lithdr_incidx(grpc_chttp2_hpack_parser *p,
+static grpc_error *finish_lithdr_incidx(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
grpc_mdelem *md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
GPR_ASSERT(md != NULL); /* handled in string parsing */
- grpc_error *err =
- on_hdr(p, grpc_mdelem_from_metadata_strings(GRPC_MDSTR_REF(md->key),
- take_string(p, &p->value)),
- 1);
- if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
- return parse_begin(p, cur, end);
+ grpc_error *err = on_hdr(
+ exec_ctx, p, grpc_mdelem_from_metadata_strings(GRPC_MDSTR_REF(md->key),
+ take_string(p, &p->value)),
+ 1);
+ if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
+ return parse_begin(exec_ctx, p, cur, end);
}
/* finish a literal header with incremental indexing with no index */
-static grpc_error *finish_lithdr_incidx_v(grpc_chttp2_hpack_parser *p,
+static grpc_error *finish_lithdr_incidx_v(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
- grpc_error *err =
- on_hdr(p, grpc_mdelem_from_metadata_strings(take_string(p, &p->key),
- take_string(p, &p->value)),
- 1);
- if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
- return parse_begin(p, cur, end);
+ grpc_error *err = on_hdr(
+ exec_ctx, p, grpc_mdelem_from_metadata_strings(take_string(p, &p->key),
+ take_string(p, &p->value)),
+ 1);
+ if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
+ return parse_begin(exec_ctx, p, cur, end);
}
/* parse a literal header with incremental indexing; index < 63 */
-static grpc_error *parse_lithdr_incidx(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_incidx(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_value_string_with_indexed_key, finish_lithdr_incidx};
p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
p->index = (*cur) & 0x3f;
- return parse_string_prefix(p, cur + 1, end);
+ return parse_string_prefix(exec_ctx, p, cur + 1, end);
}
/* parse a literal header with incremental indexing; index >= 63 */
-static grpc_error *parse_lithdr_incidx_x(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_incidx_x(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
@@ -818,11 +859,12 @@ static grpc_error *parse_lithdr_incidx_x(grpc_chttp2_hpack_parser *p,
p->next_state = and_then;
p->index = 0x3f;
p->parsing.value = &p->index;
- return parse_value0(p, cur + 1, end);
+ return parse_value0(exec_ctx, p, cur + 1, end);
}
/* parse a literal header with incremental indexing; index = 0 */
-static grpc_error *parse_lithdr_incidx_v(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_incidx_v(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
@@ -830,48 +872,52 @@ static grpc_error *parse_lithdr_incidx_v(grpc_chttp2_hpack_parser *p,
parse_value_string_with_literal_key, finish_lithdr_incidx_v};
p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
- return parse_string_prefix(p, cur + 1, end);
+ return parse_string_prefix(exec_ctx, p, cur + 1, end);
}
/* finish a literal header without incremental indexing */
-static grpc_error *finish_lithdr_notidx(grpc_chttp2_hpack_parser *p,
+static grpc_error *finish_lithdr_notidx(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
grpc_mdelem *md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
GPR_ASSERT(md != NULL); /* handled in string parsing */
- grpc_error *err =
- on_hdr(p, grpc_mdelem_from_metadata_strings(GRPC_MDSTR_REF(md->key),
- take_string(p, &p->value)),
- 0);
- if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
- return parse_begin(p, cur, end);
+ grpc_error *err = on_hdr(
+ exec_ctx, p, grpc_mdelem_from_metadata_strings(GRPC_MDSTR_REF(md->key),
+ take_string(p, &p->value)),
+ 0);
+ if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
+ return parse_begin(exec_ctx, p, cur, end);
}
/* finish a literal header without incremental indexing with index = 0 */
-static grpc_error *finish_lithdr_notidx_v(grpc_chttp2_hpack_parser *p,
+static grpc_error *finish_lithdr_notidx_v(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
- grpc_error *err =
- on_hdr(p, grpc_mdelem_from_metadata_strings(take_string(p, &p->key),
- take_string(p, &p->value)),
- 0);
- if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
- return parse_begin(p, cur, end);
+ grpc_error *err = on_hdr(
+ exec_ctx, p, grpc_mdelem_from_metadata_strings(take_string(p, &p->key),
+ take_string(p, &p->value)),
+ 0);
+ if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
+ return parse_begin(exec_ctx, p, cur, end);
}
/* parse a literal header without incremental indexing; index < 15 */
-static grpc_error *parse_lithdr_notidx(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_notidx(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_value_string_with_indexed_key, finish_lithdr_notidx};
p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
p->index = (*cur) & 0xf;
- return parse_string_prefix(p, cur + 1, end);
+ return parse_string_prefix(exec_ctx, p, cur + 1, end);
}
/* parse a literal header without incremental indexing; index >= 15 */
-static grpc_error *parse_lithdr_notidx_x(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_notidx_x(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
@@ -881,11 +927,12 @@ static grpc_error *parse_lithdr_notidx_x(grpc_chttp2_hpack_parser *p,
p->next_state = and_then;
p->index = 0xf;
p->parsing.value = &p->index;
- return parse_value0(p, cur + 1, end);
+ return parse_value0(exec_ctx, p, cur + 1, end);
}
/* parse a literal header without incremental indexing; index == 0 */
-static grpc_error *parse_lithdr_notidx_v(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_notidx_v(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
@@ -893,48 +940,52 @@ static grpc_error *parse_lithdr_notidx_v(grpc_chttp2_hpack_parser *p,
parse_value_string_with_literal_key, finish_lithdr_notidx_v};
p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
- return parse_string_prefix(p, cur + 1, end);
+ return parse_string_prefix(exec_ctx, p, cur + 1, end);
}
/* finish a literal header that is never indexed */
-static grpc_error *finish_lithdr_nvridx(grpc_chttp2_hpack_parser *p,
+static grpc_error *finish_lithdr_nvridx(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
grpc_mdelem *md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
GPR_ASSERT(md != NULL); /* handled in string parsing */
- grpc_error *err =
- on_hdr(p, grpc_mdelem_from_metadata_strings(GRPC_MDSTR_REF(md->key),
- take_string(p, &p->value)),
- 0);
- if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
- return parse_begin(p, cur, end);
+ grpc_error *err = on_hdr(
+ exec_ctx, p, grpc_mdelem_from_metadata_strings(GRPC_MDSTR_REF(md->key),
+ take_string(p, &p->value)),
+ 0);
+ if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
+ return parse_begin(exec_ctx, p, cur, end);
}
/* finish a literal header that is never indexed with an extra value */
-static grpc_error *finish_lithdr_nvridx_v(grpc_chttp2_hpack_parser *p,
+static grpc_error *finish_lithdr_nvridx_v(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
- grpc_error *err =
- on_hdr(p, grpc_mdelem_from_metadata_strings(take_string(p, &p->key),
- take_string(p, &p->value)),
- 0);
- if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
- return parse_begin(p, cur, end);
+ grpc_error *err = on_hdr(
+ exec_ctx, p, grpc_mdelem_from_metadata_strings(take_string(p, &p->key),
+ take_string(p, &p->value)),
+ 0);
+ if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
+ return parse_begin(exec_ctx, p, cur, end);
}
/* parse a literal header that is never indexed; index < 15 */
-static grpc_error *parse_lithdr_nvridx(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_nvridx(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_value_string_with_indexed_key, finish_lithdr_nvridx};
p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
p->index = (*cur) & 0xf;
- return parse_string_prefix(p, cur + 1, end);
+ return parse_string_prefix(exec_ctx, p, cur + 1, end);
}
/* parse a literal header that is never indexed; index >= 15 */
-static grpc_error *parse_lithdr_nvridx_x(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_nvridx_x(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
@@ -944,11 +995,12 @@ static grpc_error *parse_lithdr_nvridx_x(grpc_chttp2_hpack_parser *p,
p->next_state = and_then;
p->index = 0xf;
p->parsing.value = &p->index;
- return parse_value0(p, cur + 1, end);
+ return parse_value0(exec_ctx, p, cur + 1, end);
}
/* parse a literal header that is never indexed; index == 0 */
-static grpc_error *parse_lithdr_nvridx_v(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_nvridx_v(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
@@ -956,44 +1008,47 @@ static grpc_error *parse_lithdr_nvridx_v(grpc_chttp2_hpack_parser *p,
parse_value_string_with_literal_key, finish_lithdr_nvridx_v};
p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
- return parse_string_prefix(p, cur + 1, end);
+ return parse_string_prefix(exec_ctx, p, cur + 1, end);
}
/* finish parsing a max table size change */
-static grpc_error *finish_max_tbl_size(grpc_chttp2_hpack_parser *p,
+static grpc_error *finish_max_tbl_size(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
if (grpc_http_trace) {
gpr_log(GPR_INFO, "MAX TABLE SIZE: %d", p->index);
}
grpc_error *err =
grpc_chttp2_hptbl_set_current_table_size(&p->table, p->index);
- if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
- return parse_begin(p, cur, end);
+ if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
+ return parse_begin(exec_ctx, p, cur, end);
}
/* parse a max table size change, max size < 15 */
-static grpc_error *parse_max_tbl_size(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_max_tbl_size(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
if (p->dynamic_table_update_allowed == 0) {
return parse_error(
- p, cur, end,
+ exec_ctx, p, cur, end,
GRPC_ERROR_CREATE(
"More than two max table size changes in a single frame"));
}
p->dynamic_table_update_allowed--;
p->index = (*cur) & 0x1f;
- return finish_max_tbl_size(p, cur + 1, end);
+ return finish_max_tbl_size(exec_ctx, p, cur + 1, end);
}
/* parse a max table size change, max size >= 15 */
-static grpc_error *parse_max_tbl_size_x(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_max_tbl_size_x(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
finish_max_tbl_size};
if (p->dynamic_table_update_allowed == 0) {
return parse_error(
- p, cur, end,
+ exec_ctx, p, cur, end,
GRPC_ERROR_CREATE(
"More than two max table size changes in a single frame"));
}
@@ -1001,11 +1056,12 @@ static grpc_error *parse_max_tbl_size_x(grpc_chttp2_hpack_parser *p,
p->next_state = and_then;
p->index = 0x1f;
p->parsing.value = &p->index;
- return parse_value0(p, cur + 1, end);
+ return parse_value0(exec_ctx, p, cur + 1, end);
}
/* a parse error: jam the parse state into parse_error, and return error */
-static grpc_error *parse_error(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+static grpc_error *parse_error(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end, grpc_error *err) {
GPR_ASSERT(err != GRPC_ERROR_NONE);
if (p->last_error == GRPC_ERROR_NONE) {
@@ -1015,24 +1071,27 @@ static grpc_error *parse_error(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
return err;
}
-static grpc_error *still_parse_error(grpc_chttp2_hpack_parser *p,
+static grpc_error *still_parse_error(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
return GRPC_ERROR_REF(p->last_error);
}
-static grpc_error *parse_illegal_op(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_illegal_op(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
GPR_ASSERT(cur != end);
char *msg;
gpr_asprintf(&msg, "Illegal hpack op code %d", *cur);
grpc_error *err = GRPC_ERROR_CREATE(msg);
gpr_free(msg);
- return parse_error(p, cur, end, err);
+ return parse_error(exec_ctx, p, cur, end, err);
}
/* parse the 1st byte of a varint into p->parsing.value
no overflow is possible */
-static grpc_error *parse_value0(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+static grpc_error *parse_value0(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end) {
if (cur == end) {
p->state = parse_value0;
@@ -1042,15 +1101,16 @@ static grpc_error *parse_value0(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
*p->parsing.value += (*cur) & 0x7f;
if ((*cur) & 0x80) {
- return parse_value1(p, cur + 1, end);
+ return parse_value1(exec_ctx, p, cur + 1, end);
} else {
- return parse_next(p, cur + 1, end);
+ return parse_next(exec_ctx, p, cur + 1, end);
}
}
/* parse the 2nd byte of a varint into p->parsing.value
no overflow is possible */
-static grpc_error *parse_value1(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+static grpc_error *parse_value1(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end) {
if (cur == end) {
p->state = parse_value1;
@@ -1060,15 +1120,16 @@ static grpc_error *parse_value1(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
*p->parsing.value += (((uint32_t)*cur) & 0x7f) << 7;
if ((*cur) & 0x80) {
- return parse_value2(p, cur + 1, end);
+ return parse_value2(exec_ctx, p, cur + 1, end);
} else {
- return parse_next(p, cur + 1, end);
+ return parse_next(exec_ctx, p, cur + 1, end);
}
}
/* parse the 3rd byte of a varint into p->parsing.value
no overflow is possible */
-static grpc_error *parse_value2(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+static grpc_error *parse_value2(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end) {
if (cur == end) {
p->state = parse_value2;
@@ -1078,15 +1139,16 @@ static grpc_error *parse_value2(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
*p->parsing.value += (((uint32_t)*cur) & 0x7f) << 14;
if ((*cur) & 0x80) {
- return parse_value3(p, cur + 1, end);
+ return parse_value3(exec_ctx, p, cur + 1, end);
} else {
- return parse_next(p, cur + 1, end);
+ return parse_next(exec_ctx, p, cur + 1, end);
}
}
/* parse the 4th byte of a varint into p->parsing.value
no overflow is possible */
-static grpc_error *parse_value3(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+static grpc_error *parse_value3(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end) {
if (cur == end) {
p->state = parse_value3;
@@ -1096,15 +1158,16 @@ static grpc_error *parse_value3(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
*p->parsing.value += (((uint32_t)*cur) & 0x7f) << 21;
if ((*cur) & 0x80) {
- return parse_value4(p, cur + 1, end);
+ return parse_value4(exec_ctx, p, cur + 1, end);
} else {
- return parse_next(p, cur + 1, end);
+ return parse_next(exec_ctx, p, cur + 1, end);
}
}
/* parse the 5th byte of a varint into p->parsing.value
depending on the byte, we may overflow, and care must be taken */
-static grpc_error *parse_value4(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+static grpc_error *parse_value4(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end) {
uint8_t c;
uint32_t cur_value;
@@ -1130,9 +1193,9 @@ static grpc_error *parse_value4(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
*p->parsing.value = cur_value + add_value;
if ((*cur) & 0x80) {
- return parse_value5up(p, cur + 1, end);
+ return parse_value5up(exec_ctx, p, cur + 1, end);
} else {
- return parse_next(p, cur + 1, end);
+ return parse_next(exec_ctx, p, cur + 1, end);
}
error:
@@ -1142,13 +1205,14 @@ error:
*p->parsing.value, *cur);
grpc_error *err = GRPC_ERROR_CREATE(msg);
gpr_free(msg);
- return parse_error(p, cur, end, err);
+ return parse_error(exec_ctx, p, cur, end, err);
}
/* parse any trailing bytes in a varint: it's possible to append an arbitrary
number of 0x80's and not affect the value - a zero will terminate - and
anything else will overflow */
-static grpc_error *parse_value5up(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_value5up(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
while (cur != end && *cur == 0x80) {
++cur;
@@ -1160,7 +1224,7 @@ static grpc_error *parse_value5up(grpc_chttp2_hpack_parser *p,
}
if (*cur == 0) {
- return parse_next(p, cur + 1, end);
+ return parse_next(exec_ctx, p, cur + 1, end);
}
char *msg;
@@ -1170,11 +1234,12 @@ static grpc_error *parse_value5up(grpc_chttp2_hpack_parser *p,
*p->parsing.value, *cur);
grpc_error *err = GRPC_ERROR_CREATE(msg);
gpr_free(msg);
- return parse_error(p, cur, end, err);
+ return parse_error(exec_ctx, p, cur, end, err);
}
/* parse a string prefix */
-static grpc_error *parse_string_prefix(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_string_prefix(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
if (cur == end) {
p->state = parse_string_prefix;
@@ -1185,9 +1250,9 @@ static grpc_error *parse_string_prefix(grpc_chttp2_hpack_parser *p,
p->huff = (*cur) >> 7;
if (p->strlen == 0x7f) {
p->parsing.value = &p->strlen;
- return parse_value0(p, cur + 1, end);
+ return parse_value0(exec_ctx, p, cur + 1, end);
} else {
- return parse_next(p, cur + 1, end);
+ return parse_next(exec_ctx, p, cur + 1, end);
}
}
@@ -1205,7 +1270,8 @@ static void append_bytes(grpc_chttp2_hpack_parser_string *str,
str->length += (uint32_t)length;
}
-static grpc_error *append_string(grpc_chttp2_hpack_parser *p,
+static grpc_error *append_string(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
grpc_chttp2_hpack_parser_string *str = p->parsing.str;
uint32_t bits;
@@ -1223,7 +1289,7 @@ static grpc_error *append_string(grpc_chttp2_hpack_parser *p,
bits = inverse_base64[*cur];
++cur;
if (bits == 255)
- return parse_error(p, cur, end,
+ return parse_error(exec_ctx, p, cur, end,
GRPC_ERROR_CREATE("Illegal base64 character"));
else if (bits == 64)
goto b64_byte0;
@@ -1238,7 +1304,7 @@ static grpc_error *append_string(grpc_chttp2_hpack_parser *p,
bits = inverse_base64[*cur];
++cur;
if (bits == 255)
- return parse_error(p, cur, end,
+ return parse_error(exec_ctx, p, cur, end,
GRPC_ERROR_CREATE("Illegal base64 character"));
else if (bits == 64)
goto b64_byte1;
@@ -1253,7 +1319,7 @@ static grpc_error *append_string(grpc_chttp2_hpack_parser *p,
bits = inverse_base64[*cur];
++cur;
if (bits == 255)
- return parse_error(p, cur, end,
+ return parse_error(exec_ctx, p, cur, end,
GRPC_ERROR_CREATE("Illegal base64 character"));
else if (bits == 64)
goto b64_byte2;
@@ -1268,7 +1334,7 @@ static grpc_error *append_string(grpc_chttp2_hpack_parser *p,
bits = inverse_base64[*cur];
++cur;
if (bits == 255)
- return parse_error(p, cur, end,
+ return parse_error(exec_ctx, p, cur, end,
GRPC_ERROR_CREATE("Illegal base64 character"));
else if (bits == 64)
goto b64_byte3;
@@ -1281,11 +1347,12 @@ static grpc_error *append_string(grpc_chttp2_hpack_parser *p,
goto b64_byte0;
}
GPR_UNREACHABLE_CODE(return parse_error(
- p, cur, end, GRPC_ERROR_CREATE("Should never reach here")));
+ exec_ctx, p, cur, end, GRPC_ERROR_CREATE("Should never reach here")));
}
/* append a null terminator to a string */
-static grpc_error *finish_str(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+static grpc_error *finish_str(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end) {
uint8_t terminator = 0;
uint8_t decoded[2];
@@ -1298,7 +1365,7 @@ static grpc_error *finish_str(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
break;
case B64_BYTE1:
return parse_error(
- p, cur, end,
+ exec_ctx, p, cur, end,
GRPC_ERROR_CREATE("illegal base64 encoding")); /* illegal encoding */
case B64_BYTE2:
bits = p->base64_buffer;
@@ -1308,7 +1375,7 @@ static grpc_error *finish_str(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
bits & 0xffff);
grpc_error *err = GRPC_ERROR_CREATE(msg);
gpr_free(msg);
- return parse_error(p, cur, end, err);
+ return parse_error(exec_ctx, p, cur, end, err);
}
decoded[0] = (uint8_t)(bits >> 16);
append_bytes(str, decoded, 1);
@@ -1321,7 +1388,7 @@ static grpc_error *finish_str(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
bits & 0xff);
grpc_error *err = GRPC_ERROR_CREATE(msg);
gpr_free(msg);
- return parse_error(p, cur, end, err);
+ return parse_error(exec_ctx, p, cur, end, err);
}
decoded[0] = (uint8_t)(bits >> 16);
decoded[1] = (uint8_t)(bits >> 8);
@@ -1334,13 +1401,14 @@ static grpc_error *finish_str(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
}
/* decode a nibble from a huffman encoded stream */
-static grpc_error *huff_nibble(grpc_chttp2_hpack_parser *p, uint8_t nibble) {
+static grpc_error *huff_nibble(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, uint8_t nibble) {
int16_t emit = emit_sub_tbl[16 * emit_tbl[p->huff_state] + nibble];
int16_t next = next_sub_tbl[16 * next_tbl[p->huff_state] + nibble];
if (emit != -1) {
if (emit >= 0 && emit < 256) {
uint8_t c = (uint8_t)emit;
- grpc_error *err = append_string(p, &c, (&c) + 1);
+ grpc_error *err = append_string(exec_ctx, p, &c, (&c) + 1);
if (err != GRPC_ERROR_NONE) return err;
} else {
assert(emit == 256);
@@ -1351,42 +1419,45 @@ static grpc_error *huff_nibble(grpc_chttp2_hpack_parser *p, uint8_t nibble) {
}
/* decode full bytes from a huffman encoded stream */
-static grpc_error *add_huff_bytes(grpc_chttp2_hpack_parser *p,
+static grpc_error *add_huff_bytes(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
for (; cur != end; ++cur) {
- grpc_error *err = huff_nibble(p, *cur >> 4);
- if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
- err = huff_nibble(p, *cur & 0xf);
- if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
+ grpc_error *err = huff_nibble(exec_ctx, p, *cur >> 4);
+ if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
+ err = huff_nibble(exec_ctx, p, *cur & 0xf);
+ if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
}
return GRPC_ERROR_NONE;
}
/* decode some string bytes based on the current decoding mode
(huffman or not) */
-static grpc_error *add_str_bytes(grpc_chttp2_hpack_parser *p,
+static grpc_error *add_str_bytes(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
if (p->huff) {
- return add_huff_bytes(p, cur, end);
+ return add_huff_bytes(exec_ctx, p, cur, end);
} else {
- return append_string(p, cur, end);
+ return append_string(exec_ctx, p, cur, end);
}
}
/* parse a string - tries to do large chunks at a time */
-static grpc_error *parse_string(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+static grpc_error *parse_string(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end) {
size_t remaining = p->strlen - p->strgot;
size_t given = (size_t)(end - cur);
if (remaining <= given) {
- grpc_error *err = add_str_bytes(p, cur, cur + remaining);
- if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
- err = finish_str(p, cur + remaining, end);
- if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
- return parse_next(p, cur + remaining, end);
+ grpc_error *err = add_str_bytes(exec_ctx, p, cur, cur + remaining);
+ if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
+ err = finish_str(exec_ctx, p, cur + remaining, end);
+ if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
+ return parse_next(exec_ctx, p, cur + remaining, end);
} else {
- grpc_error *err = add_str_bytes(p, cur, cur + given);
- if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
+ grpc_error *err = add_str_bytes(exec_ctx, p, cur, cur + given);
+ if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
GPR_ASSERT(given <= UINT32_MAX - p->strgot);
p->strgot += (uint32_t)given;
p->state = parse_string;
@@ -1395,7 +1466,8 @@ static grpc_error *parse_string(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
}
/* begin parsing a string - performs setup, calls parse_string */
-static grpc_error *begin_parse_string(grpc_chttp2_hpack_parser *p,
+static grpc_error *begin_parse_string(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end,
uint8_t binary,
grpc_chttp2_hpack_parser_string *str) {
@@ -1404,13 +1476,14 @@ static grpc_error *begin_parse_string(grpc_chttp2_hpack_parser *p,
p->parsing.str = str;
p->huff_state = 0;
p->binary = binary;
- return parse_string(p, cur, end);
+ return parse_string(exec_ctx, p, cur, end);
}
/* parse the key string */
-static grpc_error *parse_key_string(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_key_string(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
- return begin_parse_string(p, cur, end, NOT_BINARY, &p->key);
+ return begin_parse_string(exec_ctx, p, cur, end, NOT_BINARY, &p->key);
}
/* check if a key represents a binary header or not */
@@ -1435,24 +1508,27 @@ static grpc_error *is_binary_indexed_header(grpc_chttp2_hpack_parser *p,
}
/* parse the value string */
-static grpc_error *parse_value_string(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_value_string(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end,
bool is_binary) {
- return begin_parse_string(p, cur, end, is_binary ? B64_BYTE0 : NOT_BINARY,
- &p->value);
+ return begin_parse_string(exec_ctx, p, cur, end,
+ is_binary ? B64_BYTE0 : NOT_BINARY, &p->value);
}
static grpc_error *parse_value_string_with_indexed_key(
- grpc_chttp2_hpack_parser *p, const uint8_t *cur, const uint8_t *end) {
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
bool is_binary = false;
grpc_error *err = is_binary_indexed_header(p, &is_binary);
- if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
- return parse_value_string(p, cur, end, is_binary);
+ if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
+ return parse_value_string(exec_ctx, p, cur, end, is_binary);
}
static grpc_error *parse_value_string_with_literal_key(
- grpc_chttp2_hpack_parser *p, const uint8_t *cur, const uint8_t *end) {
- return parse_value_string(p, cur, end, is_binary_literal_header(p));
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
+ return parse_value_string(exec_ctx, p, cur, end, is_binary_literal_header(p));
}
/* PUBLIC INTERFACE */
@@ -1484,27 +1560,36 @@ void grpc_chttp2_hpack_parser_destroy(grpc_chttp2_hpack_parser *p) {
gpr_free(p->value.str);
}
-grpc_error *grpc_chttp2_hpack_parser_parse(grpc_chttp2_hpack_parser *p,
+grpc_error *grpc_chttp2_hpack_parser_parse(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *beg,
const uint8_t *end) {
/* TODO(ctiller): limit the distance of end from beg, and perform multiple
steps in the event of a large chunk of data to limit
stack space usage when no tail call optimization is
available */
- return p->state(p, beg, end);
+ return p->state(exec_ctx, p, beg, end);
}
-grpc_error *grpc_chttp2_header_parser_parse(
- grpc_exec_ctx *exec_ctx, void *hpack_parser,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
+typedef void (*maybe_complete_func_type)(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s);
+static const maybe_complete_func_type maybe_complete_funcs[] = {
+ grpc_chttp2_maybe_complete_recv_initial_metadata,
+ grpc_chttp2_maybe_complete_recv_trailing_metadata};
+
+grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
+ void *hpack_parser,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ gpr_slice slice, int is_last) {
grpc_chttp2_hpack_parser *parser = hpack_parser;
GPR_TIMER_BEGIN("grpc_chttp2_hpack_parser_parse", 0);
- if (stream_parsing != NULL) {
- stream_parsing->stats.incoming.header_bytes += GPR_SLICE_LENGTH(slice);
+ if (s != NULL) {
+ s->stats.incoming.header_bytes += GPR_SLICE_LENGTH(slice);
}
grpc_error *error = grpc_chttp2_hpack_parser_parse(
- parser, GPR_SLICE_START_PTR(slice), GPR_SLICE_END_PTR(slice));
+ exec_ctx, parser, GPR_SLICE_START_PTR(slice), GPR_SLICE_END_PTR(slice));
if (error != GRPC_ERROR_NONE) {
GPR_TIMER_END("grpc_chttp2_hpack_parser_parse", 0);
return error;
@@ -1517,20 +1602,19 @@ grpc_error *grpc_chttp2_header_parser_parse(
}
/* need to check for null stream: this can occur if we receive an invalid
stream id on a header */
- if (stream_parsing != NULL) {
+ if (s != NULL) {
if (parser->is_boundary) {
- if (stream_parsing->header_frames_received ==
- GPR_ARRAY_SIZE(stream_parsing->got_metadata_on_parse)) {
+ if (s->header_frames_received == GPR_ARRAY_SIZE(s->metadata_buffer)) {
return GRPC_ERROR_CREATE("Too many trailer frames");
}
- stream_parsing
- ->got_metadata_on_parse[stream_parsing->header_frames_received] = 1;
- stream_parsing->header_frames_received++;
- grpc_chttp2_list_add_parsing_seen_stream(transport_parsing,
- stream_parsing);
+ s->published_metadata[s->header_frames_received] =
+ GRPC_METADATA_PUBLISHED_FROM_WIRE;
+ maybe_complete_funcs[s->header_frames_received](exec_ctx, t, s);
+ s->header_frames_received++;
}
if (parser->is_eof) {
- stream_parsing->received_close = 1;
+ grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, false,
+ GRPC_ERROR_NONE);
}
}
parser->on_header = NULL;
diff --git a/src/core/ext/transport/chttp2/transport/hpack_parser.h b/src/core/ext/transport/chttp2/transport/hpack_parser.h
index 78eb38db5e..0290c78d5a 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_parser.h
+++ b/src/core/ext/transport/chttp2/transport/hpack_parser.h
@@ -45,7 +45,8 @@
typedef struct grpc_chttp2_hpack_parser grpc_chttp2_hpack_parser;
typedef grpc_error *(*grpc_chttp2_hpack_parser_state)(
- grpc_chttp2_hpack_parser *p, const uint8_t *beg, const uint8_t *end);
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p, const uint8_t *beg,
+ const uint8_t *end);
typedef struct {
char *str;
@@ -55,7 +56,7 @@ typedef struct {
struct grpc_chttp2_hpack_parser {
/* user specified callback for each header output */
- void (*on_header)(void *user_data, grpc_mdelem *md);
+ void (*on_header)(grpc_exec_ctx *exec_ctx, void *user_data, grpc_mdelem *md);
void *on_header_user_data;
grpc_error *last_error;
@@ -104,15 +105,17 @@ void grpc_chttp2_hpack_parser_destroy(grpc_chttp2_hpack_parser *p);
void grpc_chttp2_hpack_parser_set_has_priority(grpc_chttp2_hpack_parser *p);
/* returns 1 on success, 0 on error */
-grpc_error *grpc_chttp2_hpack_parser_parse(grpc_chttp2_hpack_parser *p,
+grpc_error *grpc_chttp2_hpack_parser_parse(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *beg,
const uint8_t *end);
/* wraps grpc_chttp2_hpack_parser_parse to provide a frame level parser for
the transport */
-grpc_error *grpc_chttp2_header_parser_parse(
- grpc_exec_ctx *exec_ctx, void *hpack_parser,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
+grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
+ void *hpack_parser,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ gpr_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_PARSER_H */
diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h
index 04b788b702..e0c4a1e925 100644
--- a/src/core/ext/transport/chttp2/transport/internal.h
+++ b/src/core/ext/transport/chttp2/transport/internal.h
@@ -53,31 +53,25 @@
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/transport_impl.h"
-typedef struct grpc_chttp2_transport grpc_chttp2_transport;
-typedef struct grpc_chttp2_stream grpc_chttp2_stream;
-
/* streams are kept in various linked lists depending on what things need to
happen to them... this enum labels each list */
typedef enum {
- GRPC_CHTTP2_LIST_ALL_STREAMS,
- GRPC_CHTTP2_LIST_CHECK_READ_OPS,
- GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE,
GRPC_CHTTP2_LIST_WRITABLE,
GRPC_CHTTP2_LIST_WRITING,
- GRPC_CHTTP2_LIST_WRITTEN,
- GRPC_CHTTP2_LIST_PARSING_SEEN,
- GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_PARSING,
- GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_WRITING,
GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT,
- /* streams waiting for the outgoing window in the writing path, they will be
- * merged to the stalled list or writable list under transport lock. */
- GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT,
/** streams that are waiting to start because there are too many concurrent
streams on the connection */
GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY,
STREAM_LIST_COUNT /* must be last */
} grpc_chttp2_stream_list_id;
+typedef enum {
+ GRPC_CHTTP2_WRITE_STATE_IDLE,
+ GRPC_CHTTP2_WRITE_STATE_WRITING,
+ GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE,
+ GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE_AND_COVERED_BY_POLLER,
+} grpc_chttp2_write_state;
+
/* deframer state for the overall http2 stream of bytes */
typedef enum {
/* prefix: one entry per http2 connection prefix byte */
@@ -144,6 +138,12 @@ typedef enum {
GRPC_NUM_SETTING_SETS
} grpc_chttp2_setting_set;
+typedef enum {
+ GRPC_CHTTP2_NO_GOAWAY_SEND,
+ GRPC_CHTTP2_GOAWAY_SEND_SCHEDULED,
+ GRPC_CHTTP2_GOAWAY_SENT,
+} grpc_chttp2_sent_goaway_state;
+
/* Outstanding ping request data */
typedef struct grpc_chttp2_outstanding_ping {
uint8_t id[8];
@@ -152,6 +152,12 @@ typedef struct grpc_chttp2_outstanding_ping {
struct grpc_chttp2_outstanding_ping *prev;
} grpc_chttp2_outstanding_ping;
+typedef struct grpc_chttp2_write_cb {
+ int64_t call_at_byte;
+ grpc_closure *closure;
+ struct grpc_chttp2_write_cb *next;
+} grpc_chttp2_write_cb;
+
/* forward declared in frame_data.h */
struct grpc_chttp2_incoming_byte_stream {
grpc_byte_stream base;
@@ -161,12 +167,13 @@ struct grpc_chttp2_incoming_byte_stream {
grpc_chttp2_transport *transport;
grpc_chttp2_stream *stream;
- int is_tail;
+ bool is_tail;
gpr_mu slice_mu; // protects slices, on_next
gpr_slice_buffer slices;
grpc_closure *on_next;
gpr_slice *next;
+ uint32_t remaining_bytes;
struct {
grpc_closure closure;
@@ -178,12 +185,68 @@ struct grpc_chttp2_incoming_byte_stream {
grpc_closure finished_action;
};
-typedef struct {
+struct grpc_chttp2_transport {
+ grpc_transport base; /* must be first */
+ gpr_refcount refs;
+ grpc_endpoint *ep;
+ char *peer_string;
+
+ grpc_combiner *combiner;
+
+ /** write execution state of the transport */
+ grpc_chttp2_write_state write_state;
+
+ /** is the transport destroying itself? */
+ uint8_t destroying;
+ /** has the upper layer closed the transport? */
+ uint8_t closed;
+
+ /** is there a read request to the endpoint outstanding? */
+ uint8_t endpoint_reading;
+
+ /** various lists of streams */
+ grpc_chttp2_stream_list lists[STREAM_LIST_COUNT];
+
+ /** maps stream id to grpc_chttp2_stream objects */
+ grpc_chttp2_stream_map stream_map;
+
+ grpc_closure write_action_begin_locked;
+ grpc_closure write_action;
+ grpc_closure write_action_end;
+ grpc_closure write_action_end_locked;
+
+ grpc_closure read_action_begin;
+ grpc_closure read_action_locked;
+
+ /** incoming read bytes */
+ gpr_slice_buffer read_buffer;
+
+ /** address to place a newly accepted stream - set and unset by
+ grpc_chttp2_parsing_accept_stream; used by init_stream to
+ publish the accepted server stream */
+ grpc_chttp2_stream **accepting_stream;
+
+ struct {
+ /* accept stream callback */
+ void (*accept_stream)(grpc_exec_ctx *exec_ctx, void *user_data,
+ grpc_transport *transport, const void *server_data);
+ void *accept_stream_user_data;
+
+ /** connectivity tracking */
+ grpc_connectivity_state_tracker state_tracker;
+ } channel_callback;
+
+ /** data to write now */
+ gpr_slice_buffer outbuf;
+ /** hpack encoding */
+ grpc_chttp2_hpack_compressor hpack_compressor;
+ int64_t outgoing_window;
+ /** is this a client? */
+ uint8_t is_client;
+
/** data to write next write */
gpr_slice_buffer qbuf;
- /** window available for us to send to peer */
- int64_t outgoing_window;
/** window available to announce to peer */
int64_t announce_incoming_window;
/** how much window would we like to have for incoming_window */
@@ -192,10 +255,8 @@ typedef struct {
/** have we seen a goaway */
uint8_t seen_goaway;
/** have we sent a goaway */
- uint8_t sent_goaway;
+ grpc_chttp2_sent_goaway_state sent_goaway_state;
- /** is this transport a client? */
- uint8_t is_client;
/** are the local settings dirty and need to be sent? */
uint8_t dirtied_local_settings;
/** have local settings been sent? */
@@ -212,49 +273,14 @@ typedef struct {
/** how far to lookahead in a stream? */
uint32_t stream_lookahead;
- /** last received stream id */
- uint32_t last_incoming_stream_id;
+ /** last new stream id */
+ uint32_t last_new_stream_id;
/** pings awaiting responses */
grpc_chttp2_outstanding_ping pings;
/** next payload for an outgoing ping */
uint64_t ping_counter;
- /** concurrent stream count: updated when not parsing,
- so this is a strict over-estimation on the client */
- uint32_t concurrent_stream_count;
-} grpc_chttp2_transport_global;
-
-typedef struct {
- /** data to write now */
- gpr_slice_buffer outbuf;
- /** hpack encoding */
- grpc_chttp2_hpack_compressor hpack_compressor;
- int64_t outgoing_window;
- /** is this a client? */
- uint8_t is_client;
- /** callback for when writing is done */
- grpc_closure done_cb;
- /** maximum frame size */
- uint32_t max_frame_size;
-} grpc_chttp2_transport_writing;
-
-struct grpc_chttp2_transport_parsing {
- /** is this transport a client? (boolean) */
- uint8_t is_client;
-
- /** were settings updated? */
- uint8_t settings_updated;
- /** was a settings ack received? */
- uint8_t settings_ack_received;
- /** was a goaway frame received? */
- uint8_t goaway_received;
-
- /** initial window change */
- int64_t initial_window_update;
-
- /** data to write later - after parsing */
- gpr_slice_buffer qbuf;
/** parser for headers */
grpc_chttp2_hpack_parser hpack_parser;
/** simple one shot parsers */
@@ -267,13 +293,12 @@ struct grpc_chttp2_transport_parsing {
/** parser for goaway frames */
grpc_chttp2_goaway_parser goaway_parser;
+ /** initial window change */
+ int64_t initial_window_update;
+
/** window available for peer to send to us */
int64_t incoming_window;
- /** next stream id available at the time of beginning parsing */
- uint32_t next_stream_id;
- uint32_t last_incoming_stream_id;
-
/* deframing */
grpc_chttp2_deframe_transport_state deframe_state;
uint8_t incoming_frame_type;
@@ -284,135 +309,54 @@ struct grpc_chttp2_transport_parsing {
uint32_t incoming_frame_size;
uint32_t incoming_stream_id;
- /* current max frame size */
- uint32_t max_frame_size;
-
/* active parser */
void *parser_data;
- grpc_chttp2_stream_parsing *incoming_stream;
+ grpc_chttp2_stream *incoming_stream;
grpc_error *(*parser)(grpc_exec_ctx *exec_ctx, void *parser_user_data,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing,
+ grpc_chttp2_transport *t, grpc_chttp2_stream *s,
gpr_slice slice, int is_last);
- /* received settings */
- uint32_t settings[GRPC_CHTTP2_NUM_SETTINGS];
- /* last settings that were sent */
- uint32_t last_sent_settings[GRPC_CHTTP2_NUM_SETTINGS];
-
/* goaway data */
grpc_status_code goaway_error;
uint32_t goaway_last_stream_index;
gpr_slice goaway_text;
- int64_t outgoing_window;
+ grpc_chttp2_write_cb *write_cb_pool;
+
+ /* if non-NULL, close the transport with this error when writes are finished
+ */
+ grpc_error *close_transport_on_writes_finished;
+
+ /* buffer pool state */
+ /** have we scheduled a benign cleanup? */
+ bool benign_reclaimer_registered;
+ /** have we scheduled a destructive cleanup? */
+ bool destructive_reclaimer_registered;
+ /** benign cleanup closure */
+ grpc_closure benign_reclaimer;
+ grpc_closure benign_reclaimer_locked;
+ /** destructive cleanup closure */
+ grpc_closure destructive_reclaimer;
+ grpc_closure destructive_reclaimer_locked;
};
typedef enum {
- /** no writing activity allowed */
- GRPC_CHTTP2_WRITES_CORKED,
- /** no writing activity */
- GRPC_CHTTP2_WRITING_INACTIVE,
- /** write has been requested and scheduled against the workqueue */
- GRPC_CHTTP2_WRITE_SCHEDULED,
- /** write has been initiated after being reaped from the workqueue */
- GRPC_CHTTP2_WRITING,
- /** write has been initiated, AND another write needs to be started once it's
- done */
- GRPC_CHTTP2_WRITING_STALE_WITH_POLLER,
- GRPC_CHTTP2_WRITING_STALE_NO_POLLER,
-} grpc_chttp2_write_state;
+ GRPC_METADATA_NOT_PUBLISHED,
+ GRPC_METADATA_SYNTHESIZED_FROM_FAKE,
+ GRPC_METADATA_PUBLISHED_FROM_WIRE,
+ GPRC_METADATA_PUBLISHED_AT_CLOSE
+} grpc_published_metadata_method;
-struct grpc_chttp2_transport {
- grpc_transport base; /* must be first */
- gpr_refcount refs;
- grpc_endpoint *ep;
- char *peer_string;
-
- /** when this drops to zero it's safe to shutdown the endpoint */
- gpr_refcount shutdown_ep_refs;
-
- struct {
- grpc_combiner *combiner;
-
- /** is a thread currently in the global lock */
- bool global_active;
- /** is a thread currently parsing */
- bool parsing_active;
- /** write execution state of the transport */
- grpc_chttp2_write_state write_state;
- /** has a check_read_ops been scheduled */
- bool check_read_ops_scheduled;
- } executor;
-
- /** is the transport destroying itself? */
- uint8_t destroying;
- /** has the upper layer closed the transport? */
- uint8_t closed;
-
- /** is there a read request to the endpoint outstanding? */
- uint8_t endpoint_reading;
-
- /** various lists of streams */
- grpc_chttp2_stream_list lists[STREAM_LIST_COUNT];
-
- /** global state for reading/writing */
- grpc_chttp2_transport_global global;
- /** state only accessible by the chain of execution that
- set writing_state >= GRPC_WRITING, and only by the writing closure
- chain. */
- grpc_chttp2_transport_writing writing;
- /** state only accessible by the chain of execution that
- set parsing_active=1 */
- grpc_chttp2_transport_parsing parsing;
-
- /** maps stream id to grpc_chttp2_stream objects;
- owned by the parsing thread when parsing */
- grpc_chttp2_stream_map parsing_stream_map;
-
- /** streams created by the client (possibly during parsing);
- merged with parsing_stream_map during unlock when no
- parsing is occurring */
- grpc_chttp2_stream_map new_stream_map;
-
- /** closure to execute writing */
- grpc_closure writing_action;
- /** closure to start reading from the endpoint */
- grpc_closure reading_action;
- grpc_closure reading_action_locked;
- grpc_closure post_parse_locked;
- /** closure to actually do parsing */
- grpc_closure parsing_action;
- /** closure to initiate writing */
- grpc_closure initiate_writing;
- /** closure to finish writing */
- grpc_closure terminate_writing;
- /** closure to flush read state up the stack */
- grpc_closure initiate_read_flush_locked;
-
- /** incoming read bytes */
- gpr_slice_buffer read_buffer;
-
- /** address to place a newly accepted stream - set and unset by
- grpc_chttp2_parsing_accept_stream; used by init_stream to
- publish the accepted server stream */
- grpc_chttp2_stream **accepting_stream;
-
- struct {
- /* accept stream callback */
- void (*accept_stream)(grpc_exec_ctx *exec_ctx, void *user_data,
- grpc_transport *transport, const void *server_data);
- void *accept_stream_user_data;
+struct grpc_chttp2_stream {
+ grpc_chttp2_transport *t;
+ grpc_stream_refcount *refcount;
- /** connectivity tracking */
- grpc_connectivity_state_tracker state_tracker;
- } channel_callback;
+ grpc_closure destroy_stream;
+ void *destroy_stream_arg;
- /** Transport op to be applied post-parsing */
- grpc_transport_op *post_parsing_op;
-};
+ grpc_chttp2_stream_link links[STREAM_LIST_COUNT];
+ uint8_t included[STREAM_LIST_COUNT];
-typedef struct {
/** HTTP2 stream id for this stream, or zero if one has not been assigned */
uint32_t id;
@@ -422,20 +366,22 @@ typedef struct {
As the upper layer offers more bytes, this value increases.
As bytes are read, this value decreases. */
uint32_t max_recv_bytes;
- /** The number of bytes the upper layer has offered to read but we have
- not yet announced to HTTP2 flow control.
- As the upper layers offer to read more bytes, this value increases.
- As we advertise incoming flow control window, this value decreases. */
- uint32_t unannounced_incoming_window_for_parse;
- uint32_t unannounced_incoming_window_for_writing;
/** things the upper layers would like to send */
grpc_metadata_batch *send_initial_metadata;
grpc_closure *send_initial_metadata_finished;
- grpc_byte_stream *send_message;
- grpc_closure *send_message_finished;
grpc_metadata_batch *send_trailing_metadata;
grpc_closure *send_trailing_metadata_finished;
+ grpc_byte_stream *fetching_send_message;
+ uint32_t fetched_send_message_length;
+ gpr_slice fetching_slice;
+ int64_t next_message_end_offset;
+ int64_t flow_controlled_bytes_written;
+ bool complete_fetch_covered_by_poller;
+ grpc_closure complete_fetch;
+ grpc_closure complete_fetch_locked;
+ grpc_closure *fetching_send_message_finished;
+
grpc_metadata_batch *recv_initial_metadata;
grpc_closure *recv_initial_metadata_ready;
grpc_byte_stream **recv_message;
@@ -455,95 +401,44 @@ typedef struct {
bool read_closed;
/** Are all published incoming byte streams closed. */
bool all_incoming_byte_streams_finished;
- /** Is this stream in the stream map. */
- bool in_stream_map;
/** Has this stream seen an error.
If true, then pending incoming frames can be thrown away. */
bool seen_error;
- bool exceeded_metadata_size;
/** the error that resulted in this stream being read-closed */
grpc_error *read_closed_error;
/** the error that resulted in this stream being write-closed */
grpc_error *write_closed_error;
- bool published_initial_metadata;
- bool published_trailing_metadata;
+ grpc_published_metadata_method published_metadata[2];
bool final_metadata_requested;
- grpc_chttp2_incoming_metadata_buffer received_initial_metadata;
- grpc_chttp2_incoming_metadata_buffer received_trailing_metadata;
+ grpc_chttp2_incoming_metadata_buffer metadata_buffer[2];
grpc_chttp2_incoming_frame_queue incoming_frames;
gpr_timespec deadline;
-} grpc_chttp2_stream_global;
-
-typedef struct {
- /** HTTP2 stream id for this stream, or zero if one has not been assigned */
- uint32_t id;
- uint8_t fetching;
- bool sent_initial_metadata;
- uint8_t sent_message;
- uint8_t sent_trailing_metadata;
- uint8_t read_closed;
- /** send this initial metadata */
- grpc_metadata_batch *send_initial_metadata;
- grpc_byte_stream *send_message;
- grpc_metadata_batch *send_trailing_metadata;
- int64_t outgoing_window;
- /** how much window should we announce? */
- uint32_t announce_window;
- gpr_slice_buffer flow_controlled_buffer;
- gpr_slice fetching_slice;
- size_t stream_fetched;
- grpc_closure finished_fetch;
- /** stats gathered during the write */
- grpc_transport_one_way_stats stats;
-} grpc_chttp2_stream_writing;
-struct grpc_chttp2_stream_parsing {
/** saw some stream level error */
grpc_error *forced_close_error;
- /** HTTP2 stream id for this stream, or zero if one has not been assigned */
- uint32_t id;
- /** has this stream received a close */
- uint8_t received_close;
/** how many header frames have we received? */
uint8_t header_frames_received;
- /** which metadata did we get (on this parse) */
- uint8_t got_metadata_on_parse[2];
- /** should we raise the seen_error flag in transport_global */
- bool seen_error;
- bool exceeded_metadata_size;
/** window available for peer to send to us */
int64_t incoming_window;
/** parsing state for data frames */
grpc_chttp2_data_parser data_parser;
- /** amount of window given */
- int64_t outgoing_window;
/** number of bytes received - reset at end of parse thread execution */
int64_t received_bytes;
- /** stats gathered during the parse */
- grpc_transport_stream_stats stats;
- /** incoming metadata */
- grpc_chttp2_incoming_metadata_buffer metadata_buffer[2];
-};
-
-struct grpc_chttp2_stream {
- grpc_chttp2_transport *t;
- grpc_stream_refcount *refcount;
- grpc_chttp2_stream_global global;
- grpc_chttp2_stream_writing writing;
- grpc_chttp2_stream_parsing parsing;
-
- grpc_closure init_stream;
- grpc_closure destroy_stream;
- void *destroy_stream_arg;
+ bool sent_initial_metadata;
+ bool sent_trailing_metadata;
+ /** how much window should we announce? */
+ uint32_t announce_window;
+ gpr_slice_buffer flow_controlled_buffer;
- grpc_chttp2_stream_link links[STREAM_LIST_COUNT];
- uint8_t included[STREAM_LIST_COUNT];
+ grpc_chttp2_write_cb *on_write_finished_cbs;
+ grpc_chttp2_write_cb *finish_after_write;
+ size_t sending_bytes;
};
/** Transport writing call flow:
@@ -559,162 +454,71 @@ struct grpc_chttp2_stream {
The actual call chain is documented in the implementation of this function.
*/
void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_transport *t,
bool covered_by_poller, const char *reason);
/** Someone is unlocking the transport mutex: check to see if writes
- are required, and schedule them if so */
-int grpc_chttp2_unlocking_check_writes(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *global,
- grpc_chttp2_transport_writing *writing);
-void grpc_chttp2_perform_writes(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing,
- grpc_endpoint *endpoint);
-void grpc_chttp2_terminate_writing(grpc_exec_ctx *exec_ctx,
- void *transport_writing, grpc_error *error);
-void grpc_chttp2_cleanup_writing(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *global,
- grpc_chttp2_transport_writing *writing);
-
-void grpc_chttp2_prepare_to_read(grpc_chttp2_transport_global *global,
- grpc_chttp2_transport_parsing *parsing);
+ are required, and frame them if so */
+bool grpc_chttp2_begin_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t);
+void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_error *error);
+
/** Process one slice of incoming data; return 1 if the connection is still
viable after reading, or 0 if the connection should be torn down */
-grpc_error *grpc_chttp2_perform_read(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
- gpr_slice slice);
-void grpc_chttp2_publish_reads(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *global,
- grpc_chttp2_transport_parsing *parsing);
-
-bool grpc_chttp2_list_add_writable_stream(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global);
+grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t, gpr_slice slice);
+
+bool grpc_chttp2_list_add_writable_stream(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s);
/** Get a writable stream
returns non-zero if there was a stream available */
-int grpc_chttp2_list_pop_writable_stream(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_transport_writing *transport_writing,
- grpc_chttp2_stream_global **stream_global,
- grpc_chttp2_stream_writing **stream_writing);
+int grpc_chttp2_list_pop_writable_stream(grpc_chttp2_transport *t,
+ grpc_chttp2_stream **s);
bool grpc_chttp2_list_remove_writable_stream(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global) GRPC_MUST_USE_RESULT;
-
-void grpc_chttp2_list_add_writing_stream(
- grpc_chttp2_transport_writing *transport_writing,
- grpc_chttp2_stream_writing *stream_writing);
-int grpc_chttp2_list_have_writing_streams(
- grpc_chttp2_transport_writing *transport_writing);
-int grpc_chttp2_list_pop_writing_stream(
- grpc_chttp2_transport_writing *transport_writing,
- grpc_chttp2_stream_writing **stream_writing);
-
-void grpc_chttp2_list_add_written_stream(
- grpc_chttp2_transport_writing *transport_writing,
- grpc_chttp2_stream_writing *stream_writing);
-int grpc_chttp2_list_pop_written_stream(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_transport_writing *transport_writing,
- grpc_chttp2_stream_global **stream_global,
- grpc_chttp2_stream_writing **stream_writing);
-
-void grpc_chttp2_list_add_parsing_seen_stream(
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing);
-int grpc_chttp2_list_pop_parsing_seen_stream(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_global **stream_global,
- grpc_chttp2_stream_parsing **stream_parsing);
-
-void grpc_chttp2_list_add_waiting_for_concurrency(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global);
-int grpc_chttp2_list_pop_waiting_for_concurrency(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global **stream_global);
-
-void grpc_chttp2_list_add_check_read_ops(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global);
-bool grpc_chttp2_list_remove_check_read_ops(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global);
-int grpc_chttp2_list_pop_check_read_ops(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global **stream_global);
-
-void grpc_chttp2_list_add_writing_stalled_by_transport(
- grpc_chttp2_transport_writing *transport_writing,
- grpc_chttp2_stream_writing *stream_writing);
-bool grpc_chttp2_list_flush_writing_stalled_by_transport(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing);
-
-void grpc_chttp2_list_add_stalled_by_transport(
- grpc_chttp2_transport_writing *transport_writing,
- grpc_chttp2_stream_writing *stream_writing);
-int grpc_chttp2_list_pop_stalled_by_transport(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global **stream_global);
-void grpc_chttp2_list_remove_stalled_by_transport(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global);
-
-void grpc_chttp2_list_add_unannounced_incoming_window_available(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global);
-void grpc_chttp2_list_remove_unannounced_incoming_window_available(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global);
-int grpc_chttp2_list_pop_unannounced_incoming_window_available(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_global **stream_global,
- grpc_chttp2_stream_parsing **stream_parsing);
-
-void grpc_chttp2_list_add_closed_waiting_for_parsing(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global);
-int grpc_chttp2_list_pop_closed_waiting_for_parsing(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global **stream_global);
-
-void grpc_chttp2_list_add_closed_waiting_for_writing(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global);
-int grpc_chttp2_list_pop_closed_waiting_for_writing(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global **stream_global);
-
-grpc_chttp2_stream_parsing *grpc_chttp2_parsing_lookup_stream(
- grpc_chttp2_transport_parsing *transport_parsing, uint32_t id);
-grpc_chttp2_stream_parsing *grpc_chttp2_parsing_accept_stream(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
- uint32_t id);
-
-void grpc_chttp2_add_incoming_goaway(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
- uint32_t goaway_error, gpr_slice goaway_text);
-
-void grpc_chttp2_register_stream(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s);
-/* returns 1 if this is the last stream, 0 otherwise */
-int grpc_chttp2_unregister_stream(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s) GRPC_MUST_USE_RESULT;
-int grpc_chttp2_has_streams(grpc_chttp2_transport *t);
-void grpc_chttp2_for_all_streams(
- grpc_chttp2_transport_global *transport_global, void *user_data,
- void (*cb)(grpc_chttp2_transport_global *transport_global, void *user_data,
- grpc_chttp2_stream_global *stream_global));
-
-void grpc_chttp2_parsing_become_skip_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
-
-void grpc_chttp2_complete_closure_step(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global, grpc_closure **pclosure,
- grpc_error *error);
+ grpc_chttp2_transport *t, grpc_chttp2_stream *s) GRPC_MUST_USE_RESULT;
+
+bool grpc_chttp2_list_add_writing_stream(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s);
+int grpc_chttp2_list_have_writing_streams(grpc_chttp2_transport *t);
+int grpc_chttp2_list_pop_writing_stream(grpc_chttp2_transport *t,
+ grpc_chttp2_stream **s);
+
+void grpc_chttp2_list_add_written_stream(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s);
+int grpc_chttp2_list_pop_written_stream(grpc_chttp2_transport *t,
+ grpc_chttp2_stream **s);
+
+void grpc_chttp2_list_add_waiting_for_concurrency(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s);
+int grpc_chttp2_list_pop_waiting_for_concurrency(grpc_chttp2_transport *t,
+ grpc_chttp2_stream **s);
+
+void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s);
+int grpc_chttp2_list_pop_stalled_by_transport(grpc_chttp2_transport *t,
+ grpc_chttp2_stream **s);
+void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s);
+
+grpc_chttp2_stream *grpc_chttp2_parsing_lookup_stream(grpc_chttp2_transport *t,
+ uint32_t id);
+grpc_chttp2_stream *grpc_chttp2_parsing_accept_stream(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ uint32_t id);
+
+void grpc_chttp2_add_incoming_goaway(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ uint32_t goaway_error,
+ gpr_slice goaway_text);
+
+void grpc_chttp2_parsing_become_skip_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t);
+
+void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ grpc_closure **pclosure,
+ grpc_error *error, const char *desc);
#define GRPC_CHTTP2_CLIENT_CONNECT_STRING "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
#define GRPC_CHTTP2_CLIENT_CONNECT_STRLEN \
@@ -805,57 +609,83 @@ void grpc_chttp2_flowctl_trace(const char *file, int line, const char *phase,
const char *var2, int is_client,
uint32_t stream_id, int64_t val1, int64_t val2);
-void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream,
+void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_chttp2_stream *stream,
grpc_status_code status, gpr_slice *details);
-void grpc_chttp2_mark_stream_closed(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global, int close_reads, int close_writes,
- grpc_error *error);
+void grpc_chttp2_mark_stream_closed(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s, int close_reads,
+ int close_writes, grpc_error *error);
void grpc_chttp2_start_writing(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *transport_global);
+ grpc_chttp2_transport *t);
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
-#define GRPC_CHTTP2_STREAM_REF(stream_global, reason) \
- grpc_chttp2_stream_ref(stream_global, reason)
-#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, reason) \
- grpc_chttp2_stream_unref(exec_ctx, stream_global, reason)
-void grpc_chttp2_stream_ref(grpc_chttp2_stream_global *stream_global,
- const char *reason);
-void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_stream_global *stream_global,
+#define GRPC_CHTTP2_STREAM_REF(stream, reason) \
+ grpc_chttp2_stream_ref(stream, reason)
+#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream, reason) \
+ grpc_chttp2_stream_unref(exec_ctx, stream, reason)
+void grpc_chttp2_stream_ref(grpc_chttp2_stream *s, const char *reason);
+void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx, grpc_chttp2_stream *s,
const char *reason);
#else
-#define GRPC_CHTTP2_STREAM_REF(stream_global, reason) \
- grpc_chttp2_stream_ref(stream_global)
-#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, reason) \
- grpc_chttp2_stream_unref(exec_ctx, stream_global)
-void grpc_chttp2_stream_ref(grpc_chttp2_stream_global *stream_global);
-void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_stream_global *stream_global);
+#define GRPC_CHTTP2_STREAM_REF(stream, reason) grpc_chttp2_stream_ref(stream)
+#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream, reason) \
+ grpc_chttp2_stream_unref(exec_ctx, stream)
+void grpc_chttp2_stream_ref(grpc_chttp2_stream *s);
+void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx, grpc_chttp2_stream *s);
+#endif
+
+//#define GRPC_CHTTP2_REFCOUNTING_DEBUG 1
+#ifdef GRPC_CHTTP2_REFCOUNTING_DEBUG
+#define GRPC_CHTTP2_REF_TRANSPORT(t, r) \
+ grpc_chttp2_ref_transport(t, r, __FILE__, __LINE__)
+#define GRPC_CHTTP2_UNREF_TRANSPORT(cl, t, r) \
+ grpc_chttp2_unref_transport(cl, t, r, __FILE__, __LINE__)
+void grpc_chttp2_unref_transport(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t, const char *reason,
+ const char *file, int line);
+void grpc_chttp2_ref_transport(grpc_chttp2_transport *t, const char *reason,
+ const char *file, int line);
+#else
+#define GRPC_CHTTP2_REF_TRANSPORT(t, r) grpc_chttp2_ref_transport(t)
+#define GRPC_CHTTP2_UNREF_TRANSPORT(cl, t, r) grpc_chttp2_unref_transport(cl, t)
+void grpc_chttp2_unref_transport(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t);
+void grpc_chttp2_ref_transport(grpc_chttp2_transport *t);
#endif
grpc_chttp2_incoming_byte_stream *grpc_chttp2_incoming_byte_stream_create(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing, uint32_t frame_size,
- uint32_t flags, grpc_chttp2_incoming_frame_queue *add_to_queue);
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_chttp2_stream *s,
+ uint32_t frame_size, uint32_t flags);
void grpc_chttp2_incoming_byte_stream_push(grpc_exec_ctx *exec_ctx,
grpc_chttp2_incoming_byte_stream *bs,
gpr_slice slice);
void grpc_chttp2_incoming_byte_stream_finished(
grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs,
- grpc_error *error, int from_parsing_thread);
+ grpc_error *error);
-void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_parsing *parsing,
+void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
const uint8_t *opaque_8bytes);
/** add a ref to the stream and add it to the writable list;
ref will be dropped in writing.c */
void grpc_chttp2_become_writable(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global,
- bool covered_by_poller, const char *reason);
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s, bool covered_by_poller,
+ const char *reason);
+
+void grpc_chttp2_cancel_stream(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t, grpc_chttp2_stream *s,
+ grpc_error *due_to_error);
+
+void grpc_chttp2_maybe_complete_recv_initial_metadata(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s);
+void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s);
+void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_INTERNAL_H */
diff --git a/src/core/ext/transport/chttp2/transport/parsing.c b/src/core/ext/transport/chttp2/transport/parsing.c
index 0e6d579ba9..8005350ae7 100644
--- a/src/core/ext/transport/chttp2/transport/parsing.c
+++ b/src/core/ext/transport/chttp2/transport/parsing.c
@@ -45,227 +45,34 @@
#include "src/core/lib/transport/static_metadata.h"
#include "src/core/lib/transport/timeout_encoding.h"
-#define TRANSPORT_FROM_PARSING(tp) \
- ((grpc_chttp2_transport *)((char *)(tp)-offsetof(grpc_chttp2_transport, \
- parsing)))
-
-static grpc_error *init_frame_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
-static grpc_error *init_header_frame_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
- int is_continuation);
-static grpc_error *init_data_frame_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
-static grpc_error *init_rst_stream_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
-static grpc_error *init_settings_frame_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
-static grpc_error *init_window_update_frame_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
-static grpc_error *init_ping_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
-static grpc_error *init_goaway_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
-static grpc_error *init_skip_frame_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
- int is_header);
-
-static grpc_error *parse_frame_slice(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
- gpr_slice slice, int is_last);
-
-void grpc_chttp2_prepare_to_read(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_transport_parsing *transport_parsing) {
- grpc_chttp2_stream_global *stream_global;
- grpc_chttp2_stream_parsing *stream_parsing;
-
- GPR_TIMER_BEGIN("grpc_chttp2_prepare_to_read", 0);
-
- transport_parsing->next_stream_id = transport_global->next_stream_id;
- memcpy(transport_parsing->last_sent_settings,
- transport_global->settings[GRPC_SENT_SETTINGS],
- sizeof(transport_parsing->last_sent_settings));
- transport_parsing->max_frame_size =
- transport_global->settings[GRPC_ACKED_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE];
-
- /* update the parsing view of incoming window */
- while (grpc_chttp2_list_pop_unannounced_incoming_window_available(
- transport_global, transport_parsing, &stream_global, &stream_parsing)) {
- GRPC_CHTTP2_FLOW_MOVE_STREAM("parse", transport_parsing, stream_parsing,
- incoming_window, stream_global,
- unannounced_incoming_window_for_parse);
- }
-
- GPR_TIMER_END("grpc_chttp2_prepare_to_read", 0);
-}
-
-void grpc_chttp2_publish_reads(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_transport_parsing *transport_parsing) {
- grpc_chttp2_stream_global *stream_global;
- grpc_chttp2_stream_parsing *stream_parsing;
- int was_zero;
- int is_zero;
-
- /* transport_parsing->last_incoming_stream_id is used as
- last-grpc_chttp2_stream-id when
- sending GOAWAY frame.
- https://tools.ietf.org/html/draft-ietf-httpbis-http2-17#section-6.8
- says that last-grpc_chttp2_stream-id is peer-initiated grpc_chttp2_stream
- ID. So,
- since we don't have server pushed streams, client should send
- GOAWAY last-grpc_chttp2_stream-id=0 in this case. */
- if (!transport_parsing->is_client) {
- transport_global->last_incoming_stream_id =
- transport_parsing->last_incoming_stream_id;
- }
-
- /* update global settings */
- if (transport_parsing->settings_updated) {
- memcpy(transport_global->settings[GRPC_PEER_SETTINGS],
- transport_parsing->settings, sizeof(transport_parsing->settings));
- transport_parsing->settings_updated = 0;
- }
-
- /* update settings based on ack if received */
- if (transport_parsing->settings_ack_received) {
- memcpy(transport_global->settings[GRPC_ACKED_SETTINGS],
- transport_global->settings[GRPC_SENT_SETTINGS],
- GRPC_CHTTP2_NUM_SETTINGS * sizeof(uint32_t));
- transport_parsing->settings_ack_received = 0;
- transport_global->sent_local_settings = 0;
- }
-
- /* move goaway to the global state if we received one (it will be
- published later */
- if (transport_parsing->goaway_received) {
- grpc_chttp2_add_incoming_goaway(exec_ctx, transport_global,
- (uint32_t)transport_parsing->goaway_error,
- transport_parsing->goaway_text);
- transport_parsing->goaway_text = gpr_empty_slice();
- transport_parsing->goaway_received = 0;
- }
-
- /* propagate flow control tokens to global state */
- was_zero = transport_global->outgoing_window <= 0;
- GRPC_CHTTP2_FLOW_MOVE_TRANSPORT("parsed", transport_global, outgoing_window,
- transport_parsing, outgoing_window);
- is_zero = transport_global->outgoing_window <= 0;
- if (was_zero && !is_zero) {
- grpc_chttp2_initiate_write(exec_ctx, transport_global, false,
- "new_global_flow_control");
- }
-
- if (transport_parsing->incoming_window <
- transport_global->connection_window_target * 3 / 4) {
- int64_t announce_bytes = transport_global->connection_window_target -
- transport_parsing->incoming_window;
- GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parsed", transport_global,
- announce_incoming_window, announce_bytes);
- GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parsed", transport_parsing,
- incoming_window, announce_bytes);
- grpc_chttp2_initiate_write(exec_ctx, transport_global, false,
- "global incoming window");
- }
-
- /* for each stream that saw an update, fixup global state */
- while (grpc_chttp2_list_pop_parsing_seen_stream(
- transport_global, transport_parsing, &stream_global, &stream_parsing)) {
- if (stream_parsing->seen_error) {
- stream_global->seen_error = true;
- stream_global->exceeded_metadata_size =
- stream_parsing->exceeded_metadata_size;
- grpc_chttp2_list_add_check_read_ops(exec_ctx, transport_global,
- stream_global);
- }
-
- /* flush stats to global stream state */
- grpc_transport_move_stats(&stream_parsing->stats, &stream_global->stats);
-
- /* update outgoing flow control window */
- was_zero = stream_global->outgoing_window <= 0;
- GRPC_CHTTP2_FLOW_MOVE_STREAM("parsed", transport_global, stream_global,
- outgoing_window, stream_parsing,
- outgoing_window);
- is_zero = stream_global->outgoing_window <= 0;
- if (was_zero && !is_zero) {
- grpc_chttp2_become_writable(exec_ctx, transport_global, stream_global,
- false, "stream.read_flow_control");
- }
-
- stream_global->max_recv_bytes -= (uint32_t)GPR_MIN(
- stream_global->max_recv_bytes, stream_parsing->received_bytes);
- stream_parsing->received_bytes = 0;
-
- /* publish incoming stream ops */
- if (stream_global->incoming_frames.tail != NULL) {
- stream_global->incoming_frames.tail->is_tail = 0;
- }
- if (stream_parsing->data_parser.incoming_frames.head != NULL) {
- grpc_chttp2_list_add_check_read_ops(exec_ctx, transport_global,
- stream_global);
- }
- grpc_chttp2_incoming_frame_queue_merge(
- &stream_global->incoming_frames,
- &stream_parsing->data_parser.incoming_frames);
- if (stream_global->incoming_frames.tail != NULL) {
- stream_global->incoming_frames.tail->is_tail = 1;
- }
-
- if (!stream_global->published_initial_metadata &&
- stream_parsing->got_metadata_on_parse[0]) {
- stream_parsing->got_metadata_on_parse[0] = 0;
- stream_global->published_initial_metadata = 1;
- GPR_SWAP(grpc_chttp2_incoming_metadata_buffer,
- stream_parsing->metadata_buffer[0],
- stream_global->received_initial_metadata);
- grpc_chttp2_list_add_check_read_ops(exec_ctx, transport_global,
- stream_global);
- }
- if (!stream_global->published_trailing_metadata &&
- stream_parsing->got_metadata_on_parse[1]) {
- stream_parsing->got_metadata_on_parse[1] = 0;
- stream_global->published_trailing_metadata = 1;
- GPR_SWAP(grpc_chttp2_incoming_metadata_buffer,
- stream_parsing->metadata_buffer[1],
- stream_global->received_trailing_metadata);
- grpc_chttp2_list_add_check_read_ops(exec_ctx, transport_global,
- stream_global);
- }
-
- if (stream_parsing->forced_close_error != GRPC_ERROR_NONE) {
- intptr_t reason;
- bool has_reason = grpc_error_get_int(stream_parsing->forced_close_error,
- GRPC_ERROR_INT_HTTP2_ERROR, &reason);
- if (has_reason && reason != GRPC_CHTTP2_NO_ERROR) {
- grpc_status_code status_code =
- has_reason
- ? grpc_chttp2_http2_error_to_grpc_status(
- (grpc_chttp2_error_code)reason, stream_global->deadline)
- : GRPC_STATUS_INTERNAL;
- const char *status_details =
- grpc_error_string(stream_parsing->forced_close_error);
- gpr_slice slice_details = gpr_slice_from_copied_string(status_details);
- grpc_error_free_string(status_details);
- grpc_chttp2_fake_status(exec_ctx, transport_global, stream_global,
- status_code, &slice_details);
- }
- grpc_chttp2_mark_stream_closed(exec_ctx, transport_global, stream_global,
- 1, 1, stream_parsing->forced_close_error);
- }
-
- if (stream_parsing->received_close) {
- grpc_chttp2_mark_stream_closed(exec_ctx, transport_global, stream_global,
- 1, 0, GRPC_ERROR_NONE);
- }
- }
-}
-
-grpc_error *grpc_chttp2_perform_read(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
- gpr_slice slice) {
+static grpc_error *init_frame_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t);
+static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ int is_continuation);
+static grpc_error *init_data_frame_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t);
+static grpc_error *init_rst_stream_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t);
+static grpc_error *init_settings_frame_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t);
+static grpc_error *init_window_update_frame_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t);
+static grpc_error *init_ping_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t);
+static grpc_error *init_goaway_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t);
+static grpc_error *init_skip_frame_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ int is_header);
+
+static grpc_error *parse_frame_slice(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t, gpr_slice slice,
+ int is_last);
+
+grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ gpr_slice slice) {
uint8_t *beg = GPR_SLICE_START_PTR(slice);
uint8_t *end = GPR_SLICE_END_PTR(slice);
uint8_t *cur = beg;
@@ -273,7 +80,7 @@ grpc_error *grpc_chttp2_perform_read(
if (cur == end) return GRPC_ERROR_NONE;
- switch (transport_parsing->deframe_state) {
+ switch (t->deframe_state) {
case GRPC_DTS_CLIENT_PREFIX_0:
case GRPC_DTS_CLIENT_PREFIX_1:
case GRPC_DTS_CLIENT_PREFIX_2:
@@ -298,25 +105,22 @@ grpc_error *grpc_chttp2_perform_read(
case GRPC_DTS_CLIENT_PREFIX_21:
case GRPC_DTS_CLIENT_PREFIX_22:
case GRPC_DTS_CLIENT_PREFIX_23:
- while (cur != end && transport_parsing->deframe_state != GRPC_DTS_FH_0) {
- if (*cur != GRPC_CHTTP2_CLIENT_CONNECT_STRING[transport_parsing
- ->deframe_state]) {
+ while (cur != end && t->deframe_state != GRPC_DTS_FH_0) {
+ if (*cur != GRPC_CHTTP2_CLIENT_CONNECT_STRING[t->deframe_state]) {
char *msg;
gpr_asprintf(
&msg,
"Connect string mismatch: expected '%c' (%d) got '%c' (%d) "
"at byte %d",
- GRPC_CHTTP2_CLIENT_CONNECT_STRING[transport_parsing
- ->deframe_state],
- (int)(uint8_t)GRPC_CHTTP2_CLIENT_CONNECT_STRING
- [transport_parsing->deframe_state],
- *cur, (int)*cur, transport_parsing->deframe_state);
+ GRPC_CHTTP2_CLIENT_CONNECT_STRING[t->deframe_state],
+ (int)(uint8_t)GRPC_CHTTP2_CLIENT_CONNECT_STRING[t->deframe_state],
+ *cur, (int)*cur, t->deframe_state);
err = GRPC_ERROR_CREATE(msg);
gpr_free(msg);
return err;
}
++cur;
- ++transport_parsing->deframe_state;
+ ++t->deframe_state;
}
if (cur == end) {
return GRPC_ERROR_NONE;
@@ -325,100 +129,95 @@ grpc_error *grpc_chttp2_perform_read(
dts_fh_0:
case GRPC_DTS_FH_0:
GPR_ASSERT(cur < end);
- transport_parsing->incoming_frame_size = ((uint32_t)*cur) << 16;
+ t->incoming_frame_size = ((uint32_t)*cur) << 16;
if (++cur == end) {
- transport_parsing->deframe_state = GRPC_DTS_FH_1;
+ t->deframe_state = GRPC_DTS_FH_1;
return GRPC_ERROR_NONE;
}
/* fallthrough */
case GRPC_DTS_FH_1:
GPR_ASSERT(cur < end);
- transport_parsing->incoming_frame_size |= ((uint32_t)*cur) << 8;
+ t->incoming_frame_size |= ((uint32_t)*cur) << 8;
if (++cur == end) {
- transport_parsing->deframe_state = GRPC_DTS_FH_2;
+ t->deframe_state = GRPC_DTS_FH_2;
return GRPC_ERROR_NONE;
}
/* fallthrough */
case GRPC_DTS_FH_2:
GPR_ASSERT(cur < end);
- transport_parsing->incoming_frame_size |= *cur;
+ t->incoming_frame_size |= *cur;
if (++cur == end) {
- transport_parsing->deframe_state = GRPC_DTS_FH_3;
+ t->deframe_state = GRPC_DTS_FH_3;
return GRPC_ERROR_NONE;
}
/* fallthrough */
case GRPC_DTS_FH_3:
GPR_ASSERT(cur < end);
- transport_parsing->incoming_frame_type = *cur;
+ t->incoming_frame_type = *cur;
if (++cur == end) {
- transport_parsing->deframe_state = GRPC_DTS_FH_4;
+ t->deframe_state = GRPC_DTS_FH_4;
return GRPC_ERROR_NONE;
}
/* fallthrough */
case GRPC_DTS_FH_4:
GPR_ASSERT(cur < end);
- transport_parsing->incoming_frame_flags = *cur;
+ t->incoming_frame_flags = *cur;
if (++cur == end) {
- transport_parsing->deframe_state = GRPC_DTS_FH_5;
+ t->deframe_state = GRPC_DTS_FH_5;
return GRPC_ERROR_NONE;
}
/* fallthrough */
case GRPC_DTS_FH_5:
GPR_ASSERT(cur < end);
- transport_parsing->incoming_stream_id = (((uint32_t)*cur) & 0x7f) << 24;
+ t->incoming_stream_id = (((uint32_t)*cur) & 0x7f) << 24;
if (++cur == end) {
- transport_parsing->deframe_state = GRPC_DTS_FH_6;
+ t->deframe_state = GRPC_DTS_FH_6;
return GRPC_ERROR_NONE;
}
/* fallthrough */
case GRPC_DTS_FH_6:
GPR_ASSERT(cur < end);
- transport_parsing->incoming_stream_id |= ((uint32_t)*cur) << 16;
+ t->incoming_stream_id |= ((uint32_t)*cur) << 16;
if (++cur == end) {
- transport_parsing->deframe_state = GRPC_DTS_FH_7;
+ t->deframe_state = GRPC_DTS_FH_7;
return GRPC_ERROR_NONE;
}
/* fallthrough */
case GRPC_DTS_FH_7:
GPR_ASSERT(cur < end);
- transport_parsing->incoming_stream_id |= ((uint32_t)*cur) << 8;
+ t->incoming_stream_id |= ((uint32_t)*cur) << 8;
if (++cur == end) {
- transport_parsing->deframe_state = GRPC_DTS_FH_8;
+ t->deframe_state = GRPC_DTS_FH_8;
return GRPC_ERROR_NONE;
}
/* fallthrough */
case GRPC_DTS_FH_8:
GPR_ASSERT(cur < end);
- transport_parsing->incoming_stream_id |= ((uint32_t)*cur);
- transport_parsing->deframe_state = GRPC_DTS_FRAME;
- err = init_frame_parser(exec_ctx, transport_parsing);
+ t->incoming_stream_id |= ((uint32_t)*cur);
+ t->deframe_state = GRPC_DTS_FRAME;
+ err = init_frame_parser(exec_ctx, t);
if (err != GRPC_ERROR_NONE) {
return err;
}
- if (transport_parsing->incoming_stream_id != 0 &&
- transport_parsing->incoming_stream_id >
- transport_parsing->last_incoming_stream_id) {
- transport_parsing->last_incoming_stream_id =
- transport_parsing->incoming_stream_id;
- }
- if (transport_parsing->incoming_frame_size == 0) {
- err = parse_frame_slice(exec_ctx, transport_parsing, gpr_empty_slice(),
- 1);
+ if (t->incoming_frame_size == 0) {
+ err = parse_frame_slice(exec_ctx, t, gpr_empty_slice(), 1);
if (err != GRPC_ERROR_NONE) {
return err;
}
- transport_parsing->incoming_stream = NULL;
+ t->incoming_stream = NULL;
if (++cur == end) {
- transport_parsing->deframe_state = GRPC_DTS_FH_0;
+ t->deframe_state = GRPC_DTS_FH_0;
return GRPC_ERROR_NONE;
}
goto dts_fh_0; /* loop */
- } else if (transport_parsing->incoming_frame_size >
- transport_parsing->max_frame_size) {
+ } else if (t->incoming_frame_size >
+ t->settings[GRPC_ACKED_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE]) {
char *msg;
gpr_asprintf(&msg, "Frame size %d is larger than max frame size %d",
- transport_parsing->incoming_frame_size,
- transport_parsing->max_frame_size);
+ t->incoming_frame_size,
+ t->settings[GRPC_ACKED_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE]);
err = GRPC_ERROR_CREATE(msg);
gpr_free(msg);
return err;
@@ -429,41 +228,39 @@ grpc_error *grpc_chttp2_perform_read(
/* fallthrough */
case GRPC_DTS_FRAME:
GPR_ASSERT(cur < end);
- if ((uint32_t)(end - cur) == transport_parsing->incoming_frame_size) {
- err = parse_frame_slice(exec_ctx, transport_parsing,
+ if ((uint32_t)(end - cur) == t->incoming_frame_size) {
+ err = parse_frame_slice(exec_ctx, t,
gpr_slice_sub_no_ref(slice, (size_t)(cur - beg),
(size_t)(end - beg)),
1);
if (err != GRPC_ERROR_NONE) {
return err;
}
- transport_parsing->deframe_state = GRPC_DTS_FH_0;
- transport_parsing->incoming_stream = NULL;
+ t->deframe_state = GRPC_DTS_FH_0;
+ t->incoming_stream = NULL;
return GRPC_ERROR_NONE;
- } else if ((uint32_t)(end - cur) >
- transport_parsing->incoming_frame_size) {
+ } else if ((uint32_t)(end - cur) > t->incoming_frame_size) {
size_t cur_offset = (size_t)(cur - beg);
err = parse_frame_slice(
- exec_ctx, transport_parsing,
- gpr_slice_sub_no_ref(
- slice, cur_offset,
- cur_offset + transport_parsing->incoming_frame_size),
+ exec_ctx, t,
+ gpr_slice_sub_no_ref(slice, cur_offset,
+ cur_offset + t->incoming_frame_size),
1);
if (err != GRPC_ERROR_NONE) {
return err;
}
- cur += transport_parsing->incoming_frame_size;
- transport_parsing->incoming_stream = NULL;
+ cur += t->incoming_frame_size;
+ t->incoming_stream = NULL;
goto dts_fh_0; /* loop */
} else {
- err = parse_frame_slice(exec_ctx, transport_parsing,
+ err = parse_frame_slice(exec_ctx, t,
gpr_slice_sub_no_ref(slice, (size_t)(cur - beg),
(size_t)(end - beg)),
0);
if (err != GRPC_ERROR_NONE) {
return err;
}
- transport_parsing->incoming_frame_size -= (uint32_t)(end - cur);
+ t->incoming_frame_size -= (uint32_t)(end - cur);
return GRPC_ERROR_NONE;
}
GPR_UNREACHABLE_CODE(return 0);
@@ -472,175 +269,172 @@ grpc_error *grpc_chttp2_perform_read(
GPR_UNREACHABLE_CODE(return 0);
}
-static grpc_error *init_frame_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
- if (transport_parsing->is_first_frame &&
- transport_parsing->incoming_frame_type != GRPC_CHTTP2_FRAME_SETTINGS) {
+static grpc_error *init_frame_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
+ if (t->is_first_frame &&
+ t->incoming_frame_type != GRPC_CHTTP2_FRAME_SETTINGS) {
char *msg;
gpr_asprintf(
&msg, "Expected SETTINGS frame as the first frame, got frame type %d",
- transport_parsing->incoming_frame_type);
+ t->incoming_frame_type);
grpc_error *err = GRPC_ERROR_CREATE(msg);
gpr_free(msg);
return err;
}
- transport_parsing->is_first_frame = false;
- if (transport_parsing->expect_continuation_stream_id != 0) {
- if (transport_parsing->incoming_frame_type !=
- GRPC_CHTTP2_FRAME_CONTINUATION) {
+ t->is_first_frame = false;
+ if (t->expect_continuation_stream_id != 0) {
+ if (t->incoming_frame_type != GRPC_CHTTP2_FRAME_CONTINUATION) {
char *msg;
gpr_asprintf(&msg, "Expected CONTINUATION frame, got frame type %02x",
- transport_parsing->incoming_frame_type);
+ t->incoming_frame_type);
grpc_error *err = GRPC_ERROR_CREATE(msg);
gpr_free(msg);
return err;
}
- if (transport_parsing->expect_continuation_stream_id !=
- transport_parsing->incoming_stream_id) {
+ if (t->expect_continuation_stream_id != t->incoming_stream_id) {
char *msg;
gpr_asprintf(
&msg,
"Expected CONTINUATION frame for grpc_chttp2_stream %08x, got "
"grpc_chttp2_stream %08x",
- transport_parsing->expect_continuation_stream_id,
- transport_parsing->incoming_stream_id);
+ t->expect_continuation_stream_id, t->incoming_stream_id);
grpc_error *err = GRPC_ERROR_CREATE(msg);
gpr_free(msg);
return err;
}
- return init_header_frame_parser(exec_ctx, transport_parsing, 1);
+ return init_header_frame_parser(exec_ctx, t, 1);
}
- switch (transport_parsing->incoming_frame_type) {
+ switch (t->incoming_frame_type) {
case GRPC_CHTTP2_FRAME_DATA:
- return init_data_frame_parser(exec_ctx, transport_parsing);
+ return init_data_frame_parser(exec_ctx, t);
case GRPC_CHTTP2_FRAME_HEADER:
- return init_header_frame_parser(exec_ctx, transport_parsing, 0);
+ return init_header_frame_parser(exec_ctx, t, 0);
case GRPC_CHTTP2_FRAME_CONTINUATION:
return GRPC_ERROR_CREATE("Unexpected CONTINUATION frame");
case GRPC_CHTTP2_FRAME_RST_STREAM:
- return init_rst_stream_parser(exec_ctx, transport_parsing);
+ return init_rst_stream_parser(exec_ctx, t);
case GRPC_CHTTP2_FRAME_SETTINGS:
- return init_settings_frame_parser(exec_ctx, transport_parsing);
+ return init_settings_frame_parser(exec_ctx, t);
case GRPC_CHTTP2_FRAME_WINDOW_UPDATE:
- return init_window_update_frame_parser(exec_ctx, transport_parsing);
+ return init_window_update_frame_parser(exec_ctx, t);
case GRPC_CHTTP2_FRAME_PING:
- return init_ping_parser(exec_ctx, transport_parsing);
+ return init_ping_parser(exec_ctx, t);
case GRPC_CHTTP2_FRAME_GOAWAY:
- return init_goaway_parser(exec_ctx, transport_parsing);
+ return init_goaway_parser(exec_ctx, t);
default:
if (grpc_http_trace) {
- gpr_log(GPR_ERROR, "Unknown frame type %02x",
- transport_parsing->incoming_frame_type);
+ gpr_log(GPR_ERROR, "Unknown frame type %02x", t->incoming_frame_type);
}
- return init_skip_frame_parser(exec_ctx, transport_parsing, 0);
+ return init_skip_frame_parser(exec_ctx, t, 0);
}
}
static grpc_error *skip_parser(grpc_exec_ctx *exec_ctx, void *parser,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing,
+ grpc_chttp2_transport *t, grpc_chttp2_stream *s,
gpr_slice slice, int is_last) {
return GRPC_ERROR_NONE;
}
-static void skip_header(void *tp, grpc_mdelem *md) { GRPC_MDELEM_UNREF(md); }
+static void skip_header(grpc_exec_ctx *exec_ctx, void *tp, grpc_mdelem *md) {
+ GRPC_MDELEM_UNREF(md);
+}
-static grpc_error *init_skip_frame_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
- int is_header) {
+static grpc_error *init_skip_frame_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ int is_header) {
if (is_header) {
- uint8_t is_eoh = transport_parsing->expect_continuation_stream_id != 0;
- transport_parsing->parser = grpc_chttp2_header_parser_parse;
- transport_parsing->parser_data = &transport_parsing->hpack_parser;
- transport_parsing->hpack_parser.on_header = skip_header;
- transport_parsing->hpack_parser.on_header_user_data = NULL;
- transport_parsing->hpack_parser.is_boundary = is_eoh;
- transport_parsing->hpack_parser.is_eof =
- (uint8_t)(is_eoh ? transport_parsing->header_eof : 0);
+ uint8_t is_eoh = t->expect_continuation_stream_id != 0;
+ t->parser = grpc_chttp2_header_parser_parse;
+ t->parser_data = &t->hpack_parser;
+ t->hpack_parser.on_header = skip_header;
+ t->hpack_parser.on_header_user_data = NULL;
+ t->hpack_parser.is_boundary = is_eoh;
+ t->hpack_parser.is_eof = (uint8_t)(is_eoh ? t->header_eof : 0);
} else {
- transport_parsing->parser = skip_parser;
+ t->parser = skip_parser;
}
return GRPC_ERROR_NONE;
}
-void grpc_chttp2_parsing_become_skip_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
- init_skip_frame_parser(
- exec_ctx, transport_parsing,
- transport_parsing->parser == grpc_chttp2_header_parser_parse);
+void grpc_chttp2_parsing_become_skip_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
+ init_skip_frame_parser(exec_ctx, t,
+ t->parser == grpc_chttp2_header_parser_parse);
}
-static grpc_error *update_incoming_window(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing) {
- uint32_t incoming_frame_size = transport_parsing->incoming_frame_size;
- if (incoming_frame_size > transport_parsing->incoming_window) {
- char *msg;
- gpr_asprintf(&msg, "frame of size %d overflows incoming window of %" PRId64,
- transport_parsing->incoming_frame_size,
- transport_parsing->incoming_window);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
- gpr_free(msg);
- return err;
- }
-
- if (incoming_frame_size > stream_parsing->incoming_window) {
+static grpc_error *update_incoming_window(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s) {
+ uint32_t incoming_frame_size = t->incoming_frame_size;
+ if (incoming_frame_size > t->incoming_window) {
char *msg;
gpr_asprintf(&msg, "frame of size %d overflows incoming window of %" PRId64,
- transport_parsing->incoming_frame_size,
- stream_parsing->incoming_window);
+ t->incoming_frame_size, t->incoming_window);
grpc_error *err = GRPC_ERROR_CREATE(msg);
gpr_free(msg);
return err;
}
- GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("parse", transport_parsing, incoming_window,
+ GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("parse", t, incoming_window,
incoming_frame_size);
- GRPC_CHTTP2_FLOW_DEBIT_STREAM("parse", transport_parsing, stream_parsing,
- incoming_window, incoming_frame_size);
- stream_parsing->received_bytes += incoming_frame_size;
- grpc_chttp2_list_add_parsing_seen_stream(transport_parsing, stream_parsing);
+ if (s != NULL) {
+ if (incoming_frame_size > s->incoming_window) {
+ char *msg;
+ gpr_asprintf(&msg,
+ "frame of size %d overflows incoming window of %" PRId64,
+ t->incoming_frame_size, s->incoming_window);
+ grpc_error *err = GRPC_ERROR_CREATE(msg);
+ gpr_free(msg);
+ return err;
+ }
+
+ GRPC_CHTTP2_FLOW_DEBIT_STREAM("parse", t, s, incoming_window,
+ incoming_frame_size);
+ s->received_bytes += incoming_frame_size;
+ s->max_recv_bytes -=
+ (uint32_t)GPR_MIN(s->max_recv_bytes, incoming_frame_size);
+ }
return GRPC_ERROR_NONE;
}
-static grpc_error *init_data_frame_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
- grpc_chttp2_stream_parsing *stream_parsing =
- grpc_chttp2_parsing_lookup_stream(transport_parsing,
- transport_parsing->incoming_stream_id);
+static grpc_error *init_data_frame_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
+ grpc_chttp2_stream *s =
+ grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id);
grpc_error *err = GRPC_ERROR_NONE;
- if (stream_parsing == NULL) {
- return init_skip_frame_parser(exec_ctx, transport_parsing, 0);
+ err = update_incoming_window(exec_ctx, t, s);
+ if (err != GRPC_ERROR_NONE) {
+ goto error_handler;
}
- stream_parsing->stats.incoming.framing_bytes += 9;
- if (stream_parsing->received_close) {
- return init_skip_frame_parser(exec_ctx, transport_parsing, 0);
+ if (s == NULL) {
+ return init_skip_frame_parser(exec_ctx, t, 0);
}
- if (err == GRPC_ERROR_NONE) {
- err = update_incoming_window(exec_ctx, transport_parsing, stream_parsing);
+ s->stats.incoming.framing_bytes += 9;
+ if (err == GRPC_ERROR_NONE && s->read_closed) {
+ return init_skip_frame_parser(exec_ctx, t, 0);
}
if (err == GRPC_ERROR_NONE) {
- err = grpc_chttp2_data_parser_begin_frame(
- &stream_parsing->data_parser, transport_parsing->incoming_frame_flags,
- stream_parsing->id);
+ err = grpc_chttp2_data_parser_begin_frame(&s->data_parser,
+ t->incoming_frame_flags, s->id);
}
+error_handler:
if (err == GRPC_ERROR_NONE) {
- transport_parsing->incoming_stream = stream_parsing;
- transport_parsing->parser = grpc_chttp2_data_parser_parse;
- transport_parsing->parser_data = &stream_parsing->data_parser;
+ t->incoming_stream = s;
+ t->parser = grpc_chttp2_data_parser_parse;
+ t->parser_data = &s->data_parser;
return GRPC_ERROR_NONE;
} else if (grpc_error_get_int(err, GRPC_ERROR_INT_STREAM_ID, NULL)) {
/* handle stream errors by closing the stream */
- stream_parsing->received_close = 1;
- stream_parsing->forced_close_error = err;
+ if (s != NULL) {
+ grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, false, err);
+ }
gpr_slice_buffer_add(
- &transport_parsing->qbuf,
- grpc_chttp2_rst_stream_create(transport_parsing->incoming_stream_id,
- GRPC_CHTTP2_PROTOCOL_ERROR,
- &stream_parsing->stats.outgoing));
- return init_skip_frame_parser(exec_ctx, transport_parsing, 0);
+ &t->qbuf, grpc_chttp2_rst_stream_create(t->incoming_stream_id,
+ GRPC_CHTTP2_PROTOCOL_ERROR,
+ &s->stats.outgoing));
+ return init_skip_frame_parser(exec_ctx, t, 0);
} else {
return err;
}
@@ -648,23 +442,22 @@ static grpc_error *init_data_frame_parser(
static void free_timeout(void *p) { gpr_free(p); }
-static void on_initial_header(void *tp, grpc_mdelem *md) {
- grpc_chttp2_transport_parsing *transport_parsing = tp;
- grpc_chttp2_stream_parsing *stream_parsing =
- transport_parsing->incoming_stream;
+static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp,
+ grpc_mdelem *md) {
+ grpc_chttp2_transport *t = tp;
+ grpc_chttp2_stream *s = t->incoming_stream;
GPR_TIMER_BEGIN("on_initial_header", 0);
- GPR_ASSERT(stream_parsing);
+ GPR_ASSERT(s != NULL);
GRPC_CHTTP2_IF_TRACING(gpr_log(
- GPR_INFO, "HTTP:%d:HDR:%s: %s: %s", stream_parsing->id,
- transport_parsing->is_client ? "CLI" : "SVR",
+ GPR_INFO, "HTTP:%d:HDR:%s: %s: %s", s->id, t->is_client ? "CLI" : "SVR",
grpc_mdstr_as_c_string(md->key), grpc_mdstr_as_c_string(md->value)));
if (md->key == GRPC_MDSTR_GRPC_STATUS && md != GRPC_MDELEM_GRPC_STATUS_0) {
/* TODO(ctiller): check for a status like " 0" */
- stream_parsing->seen_error = true;
+ s->seen_error = true;
}
if (md->key == GRPC_MDSTR_GRPC_TIMEOUT) {
@@ -681,306 +474,273 @@ static void on_initial_header(void *tp, grpc_mdelem *md) {
grpc_mdelem_set_user_data(md, free_timeout, cached_timeout);
}
grpc_chttp2_incoming_metadata_buffer_set_deadline(
- &stream_parsing->metadata_buffer[0],
+ &s->metadata_buffer[0],
gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), *cached_timeout));
GRPC_MDELEM_UNREF(md);
} else {
- const size_t new_size =
- stream_parsing->metadata_buffer[0].size + GRPC_MDELEM_LENGTH(md);
- grpc_chttp2_transport_global *transport_global =
- &TRANSPORT_FROM_PARSING(transport_parsing)->global;
+ const size_t new_size = s->metadata_buffer[0].size + GRPC_MDELEM_LENGTH(md);
const size_t metadata_size_limit =
- transport_global->settings[GRPC_LOCAL_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE];
+ t->settings[GRPC_ACKED_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE];
if (new_size > metadata_size_limit) {
- if (!stream_parsing->exceeded_metadata_size) {
- gpr_log(GPR_DEBUG,
- "received initial metadata size exceeds limit (%" PRIuPTR
- " vs. %" PRIuPTR ")",
- new_size, metadata_size_limit);
- stream_parsing->seen_error = true;
- stream_parsing->exceeded_metadata_size = true;
- }
+ gpr_log(GPR_DEBUG,
+ "received initial metadata size exceeds limit (%" PRIuPTR
+ " vs. %" PRIuPTR ")",
+ new_size, metadata_size_limit);
+ grpc_chttp2_cancel_stream(
+ exec_ctx, t, s,
+ grpc_error_set_int(
+ GRPC_ERROR_CREATE("received initial metadata size exceeds limit"),
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED));
+ grpc_chttp2_parsing_become_skip_parser(exec_ctx, t);
+ s->seen_error = true;
GRPC_MDELEM_UNREF(md);
} else {
- grpc_chttp2_incoming_metadata_buffer_add(
- &stream_parsing->metadata_buffer[0], md);
+ grpc_chttp2_incoming_metadata_buffer_add(&s->metadata_buffer[0], md);
}
}
- grpc_chttp2_list_add_parsing_seen_stream(transport_parsing, stream_parsing);
-
GPR_TIMER_END("on_initial_header", 0);
}
-static void on_trailing_header(void *tp, grpc_mdelem *md) {
- grpc_chttp2_transport_parsing *transport_parsing = tp;
- grpc_chttp2_stream_parsing *stream_parsing =
- transport_parsing->incoming_stream;
+static void on_trailing_header(grpc_exec_ctx *exec_ctx, void *tp,
+ grpc_mdelem *md) {
+ grpc_chttp2_transport *t = tp;
+ grpc_chttp2_stream *s = t->incoming_stream;
GPR_TIMER_BEGIN("on_trailing_header", 0);
- GPR_ASSERT(stream_parsing);
+ GPR_ASSERT(s != NULL);
GRPC_CHTTP2_IF_TRACING(gpr_log(
- GPR_INFO, "HTTP:%d:TRL:%s: %s: %s", stream_parsing->id,
- transport_parsing->is_client ? "CLI" : "SVR",
+ GPR_INFO, "HTTP:%d:TRL:%s: %s: %s", s->id, t->is_client ? "CLI" : "SVR",
grpc_mdstr_as_c_string(md->key), grpc_mdstr_as_c_string(md->value)));
if (md->key == GRPC_MDSTR_GRPC_STATUS && md != GRPC_MDELEM_GRPC_STATUS_0) {
/* TODO(ctiller): check for a status like " 0" */
- stream_parsing->seen_error = true;
+ s->seen_error = true;
}
- const size_t new_size =
- stream_parsing->metadata_buffer[1].size + GRPC_MDELEM_LENGTH(md);
- grpc_chttp2_transport_global *transport_global =
- &TRANSPORT_FROM_PARSING(transport_parsing)->global;
+ const size_t new_size = s->metadata_buffer[1].size + GRPC_MDELEM_LENGTH(md);
const size_t metadata_size_limit =
- transport_global->settings[GRPC_LOCAL_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE];
+ t->settings[GRPC_ACKED_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE];
if (new_size > metadata_size_limit) {
- if (!stream_parsing->exceeded_metadata_size) {
- gpr_log(GPR_DEBUG,
- "received trailing metadata size exceeds limit (%" PRIuPTR
- " vs. %" PRIuPTR ")",
- new_size, metadata_size_limit);
- stream_parsing->seen_error = true;
- stream_parsing->exceeded_metadata_size = true;
- }
+ gpr_log(GPR_DEBUG,
+ "received trailing metadata size exceeds limit (%" PRIuPTR
+ " vs. %" PRIuPTR ")",
+ new_size, metadata_size_limit);
+ grpc_chttp2_cancel_stream(
+ exec_ctx, t, s,
+ grpc_error_set_int(
+ GRPC_ERROR_CREATE("received trailing metadata size exceeds limit"),
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED));
+ grpc_chttp2_parsing_become_skip_parser(exec_ctx, t);
+ s->seen_error = true;
GRPC_MDELEM_UNREF(md);
} else {
- grpc_chttp2_incoming_metadata_buffer_add(
- &stream_parsing->metadata_buffer[1], md);
+ grpc_chttp2_incoming_metadata_buffer_add(&s->metadata_buffer[1], md);
}
- grpc_chttp2_list_add_parsing_seen_stream(transport_parsing, stream_parsing);
-
GPR_TIMER_END("on_trailing_header", 0);
}
-static grpc_error *init_header_frame_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
- int is_continuation) {
- uint8_t is_eoh = (transport_parsing->incoming_frame_flags &
- GRPC_CHTTP2_DATA_FLAG_END_HEADERS) != 0;
- int via_accept = 0;
- grpc_chttp2_stream_parsing *stream_parsing;
+static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ int is_continuation) {
+ uint8_t is_eoh =
+ (t->incoming_frame_flags & GRPC_CHTTP2_DATA_FLAG_END_HEADERS) != 0;
+ grpc_chttp2_stream *s;
/* TODO(ctiller): when to increment header_frames_received? */
if (is_eoh) {
- transport_parsing->expect_continuation_stream_id = 0;
+ t->expect_continuation_stream_id = 0;
} else {
- transport_parsing->expect_continuation_stream_id =
- transport_parsing->incoming_stream_id;
+ t->expect_continuation_stream_id = t->incoming_stream_id;
}
if (!is_continuation) {
- transport_parsing->header_eof = (transport_parsing->incoming_frame_flags &
- GRPC_CHTTP2_DATA_FLAG_END_STREAM) != 0;
+ t->header_eof =
+ (t->incoming_frame_flags & GRPC_CHTTP2_DATA_FLAG_END_STREAM) != 0;
}
/* could be a new grpc_chttp2_stream or an existing grpc_chttp2_stream */
- stream_parsing = grpc_chttp2_parsing_lookup_stream(
- transport_parsing, transport_parsing->incoming_stream_id);
- if (stream_parsing == NULL) {
+ s = grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id);
+ if (s == NULL) {
if (is_continuation) {
- gpr_log(GPR_ERROR,
- "grpc_chttp2_stream disbanded before CONTINUATION received");
- return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
+ GRPC_CHTTP2_IF_TRACING(
+ gpr_log(GPR_ERROR,
+ "grpc_chttp2_stream disbanded before CONTINUATION received"));
+ return init_skip_frame_parser(exec_ctx, t, 1);
}
- if (transport_parsing->is_client) {
- if ((transport_parsing->incoming_stream_id & 1) &&
- transport_parsing->incoming_stream_id <
- transport_parsing->next_stream_id) {
+ if (t->is_client) {
+ if ((t->incoming_stream_id & 1) &&
+ t->incoming_stream_id < t->next_stream_id) {
/* this is an old (probably cancelled) grpc_chttp2_stream */
} else {
- gpr_log(GPR_ERROR,
- "ignoring new grpc_chttp2_stream creation on client");
+ GRPC_CHTTP2_IF_TRACING(gpr_log(
+ GPR_ERROR, "ignoring new grpc_chttp2_stream creation on client"));
}
- return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
- } else if (transport_parsing->last_incoming_stream_id >
- transport_parsing->incoming_stream_id) {
- gpr_log(GPR_ERROR,
- "ignoring out of order new grpc_chttp2_stream request on server; "
- "last grpc_chttp2_stream "
- "id=%d, new grpc_chttp2_stream id=%d",
- transport_parsing->last_incoming_stream_id,
- transport_parsing->incoming_stream_id);
- return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
- } else if ((transport_parsing->incoming_stream_id & 1) == 0) {
- gpr_log(GPR_ERROR,
- "ignoring grpc_chttp2_stream with non-client generated index %d",
- transport_parsing->incoming_stream_id);
- return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
+ return init_skip_frame_parser(exec_ctx, t, 1);
+ } else if (t->last_new_stream_id >= t->incoming_stream_id) {
+ GRPC_CHTTP2_IF_TRACING(gpr_log(
+ GPR_ERROR,
+ "ignoring out of order new grpc_chttp2_stream request on server; "
+ "last grpc_chttp2_stream "
+ "id=%d, new grpc_chttp2_stream id=%d",
+ t->last_new_stream_id, t->incoming_stream_id));
+ return init_skip_frame_parser(exec_ctx, t, 1);
+ } else if ((t->incoming_stream_id & 1) == 0) {
+ GRPC_CHTTP2_IF_TRACING(gpr_log(
+ GPR_ERROR,
+ "ignoring grpc_chttp2_stream with non-client generated index %d",
+ t->incoming_stream_id));
+ return init_skip_frame_parser(exec_ctx, t, 1);
}
- stream_parsing = transport_parsing->incoming_stream =
- grpc_chttp2_parsing_accept_stream(
- exec_ctx, transport_parsing, transport_parsing->incoming_stream_id);
- if (stream_parsing == NULL) {
- gpr_log(GPR_ERROR, "grpc_chttp2_stream not accepted");
- return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
+ t->last_new_stream_id = t->incoming_stream_id;
+ s = t->incoming_stream =
+ grpc_chttp2_parsing_accept_stream(exec_ctx, t, t->incoming_stream_id);
+ if (s == NULL) {
+ GRPC_CHTTP2_IF_TRACING(
+ gpr_log(GPR_ERROR, "grpc_chttp2_stream not accepted"));
+ return init_skip_frame_parser(exec_ctx, t, 1);
}
- via_accept = 1;
} else {
- transport_parsing->incoming_stream = stream_parsing;
- }
- GPR_ASSERT(stream_parsing != NULL && (via_accept == 0 || via_accept == 1));
- stream_parsing->stats.incoming.framing_bytes += 9;
- if (stream_parsing->received_close) {
- gpr_log(GPR_ERROR, "skipping already closed grpc_chttp2_stream header");
- transport_parsing->incoming_stream = NULL;
- return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
- }
- transport_parsing->parser = grpc_chttp2_header_parser_parse;
- transport_parsing->parser_data = &transport_parsing->hpack_parser;
- switch (stream_parsing->header_frames_received) {
+ t->incoming_stream = s;
+ }
+ GPR_ASSERT(s != NULL);
+ s->stats.incoming.framing_bytes += 9;
+ if (s->read_closed) {
+ GRPC_CHTTP2_IF_TRACING(gpr_log(
+ GPR_ERROR, "skipping already closed grpc_chttp2_stream header"));
+ t->incoming_stream = NULL;
+ return init_skip_frame_parser(exec_ctx, t, 1);
+ }
+ t->parser = grpc_chttp2_header_parser_parse;
+ t->parser_data = &t->hpack_parser;
+ switch (s->header_frames_received) {
case 0:
- transport_parsing->hpack_parser.on_header = on_initial_header;
+ t->hpack_parser.on_header = on_initial_header;
break;
case 1:
- transport_parsing->hpack_parser.on_header = on_trailing_header;
+ t->hpack_parser.on_header = on_trailing_header;
break;
case 2:
gpr_log(GPR_ERROR, "too many header frames received");
- return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
+ return init_skip_frame_parser(exec_ctx, t, 1);
}
- transport_parsing->hpack_parser.on_header_user_data = transport_parsing;
- transport_parsing->hpack_parser.is_boundary = is_eoh;
- transport_parsing->hpack_parser.is_eof =
- (uint8_t)(is_eoh ? transport_parsing->header_eof : 0);
- if (!is_continuation && (transport_parsing->incoming_frame_flags &
- GRPC_CHTTP2_FLAG_HAS_PRIORITY)) {
- grpc_chttp2_hpack_parser_set_has_priority(&transport_parsing->hpack_parser);
+ t->hpack_parser.on_header_user_data = t;
+ t->hpack_parser.is_boundary = is_eoh;
+ t->hpack_parser.is_eof = (uint8_t)(is_eoh ? t->header_eof : 0);
+ if (!is_continuation &&
+ (t->incoming_frame_flags & GRPC_CHTTP2_FLAG_HAS_PRIORITY)) {
+ grpc_chttp2_hpack_parser_set_has_priority(&t->hpack_parser);
}
return GRPC_ERROR_NONE;
}
-static grpc_error *init_window_update_frame_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
+static grpc_error *init_window_update_frame_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
grpc_error *err = grpc_chttp2_window_update_parser_begin_frame(
- &transport_parsing->simple.window_update,
- transport_parsing->incoming_frame_size,
- transport_parsing->incoming_frame_flags);
+ &t->simple.window_update, t->incoming_frame_size,
+ t->incoming_frame_flags);
if (err != GRPC_ERROR_NONE) return err;
- if (transport_parsing->incoming_stream_id != 0) {
- grpc_chttp2_stream_parsing *stream_parsing =
- transport_parsing->incoming_stream = grpc_chttp2_parsing_lookup_stream(
- transport_parsing, transport_parsing->incoming_stream_id);
- if (stream_parsing == NULL) {
- return init_skip_frame_parser(exec_ctx, transport_parsing, 0);
+ if (t->incoming_stream_id != 0) {
+ grpc_chttp2_stream *s = t->incoming_stream =
+ grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id);
+ if (s == NULL) {
+ return init_skip_frame_parser(exec_ctx, t, 0);
}
- stream_parsing->stats.incoming.framing_bytes += 9;
+ s->stats.incoming.framing_bytes += 9;
}
- transport_parsing->parser = grpc_chttp2_window_update_parser_parse;
- transport_parsing->parser_data = &transport_parsing->simple.window_update;
+ t->parser = grpc_chttp2_window_update_parser_parse;
+ t->parser_data = &t->simple.window_update;
return GRPC_ERROR_NONE;
}
-static grpc_error *init_ping_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
+static grpc_error *init_ping_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
grpc_error *err = grpc_chttp2_ping_parser_begin_frame(
- &transport_parsing->simple.ping, transport_parsing->incoming_frame_size,
- transport_parsing->incoming_frame_flags);
+ &t->simple.ping, t->incoming_frame_size, t->incoming_frame_flags);
if (err != GRPC_ERROR_NONE) return err;
- transport_parsing->parser = grpc_chttp2_ping_parser_parse;
- transport_parsing->parser_data = &transport_parsing->simple.ping;
+ t->parser = grpc_chttp2_ping_parser_parse;
+ t->parser_data = &t->simple.ping;
return GRPC_ERROR_NONE;
}
-static grpc_error *init_rst_stream_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
+static grpc_error *init_rst_stream_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
grpc_error *err = grpc_chttp2_rst_stream_parser_begin_frame(
- &transport_parsing->simple.rst_stream,
- transport_parsing->incoming_frame_size,
- transport_parsing->incoming_frame_flags);
+ &t->simple.rst_stream, t->incoming_frame_size, t->incoming_frame_flags);
if (err != GRPC_ERROR_NONE) return err;
- grpc_chttp2_stream_parsing *stream_parsing =
- transport_parsing->incoming_stream = grpc_chttp2_parsing_lookup_stream(
- transport_parsing, transport_parsing->incoming_stream_id);
- if (!transport_parsing->incoming_stream) {
- return init_skip_frame_parser(exec_ctx, transport_parsing, 0);
- }
- stream_parsing->stats.incoming.framing_bytes += 9;
- transport_parsing->parser = grpc_chttp2_rst_stream_parser_parse;
- transport_parsing->parser_data = &transport_parsing->simple.rst_stream;
+ grpc_chttp2_stream *s = t->incoming_stream =
+ grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id);
+ if (!t->incoming_stream) {
+ return init_skip_frame_parser(exec_ctx, t, 0);
+ }
+ s->stats.incoming.framing_bytes += 9;
+ t->parser = grpc_chttp2_rst_stream_parser_parse;
+ t->parser_data = &t->simple.rst_stream;
return GRPC_ERROR_NONE;
}
-static grpc_error *init_goaway_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
+static grpc_error *init_goaway_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
grpc_error *err = grpc_chttp2_goaway_parser_begin_frame(
- &transport_parsing->goaway_parser, transport_parsing->incoming_frame_size,
- transport_parsing->incoming_frame_flags);
+ &t->goaway_parser, t->incoming_frame_size, t->incoming_frame_flags);
if (err != GRPC_ERROR_NONE) return err;
- transport_parsing->parser = grpc_chttp2_goaway_parser_parse;
- transport_parsing->parser_data = &transport_parsing->goaway_parser;
+ t->parser = grpc_chttp2_goaway_parser_parse;
+ t->parser_data = &t->goaway_parser;
return GRPC_ERROR_NONE;
}
-static grpc_error *init_settings_frame_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
- if (transport_parsing->incoming_stream_id != 0) {
+static grpc_error *init_settings_frame_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
+ if (t->incoming_stream_id != 0) {
return GRPC_ERROR_CREATE("Settings frame received for grpc_chttp2_stream");
}
grpc_error *err = grpc_chttp2_settings_parser_begin_frame(
- &transport_parsing->simple.settings,
- transport_parsing->incoming_frame_size,
- transport_parsing->incoming_frame_flags, transport_parsing->settings);
+ &t->simple.settings, t->incoming_frame_size, t->incoming_frame_flags,
+ t->settings[GRPC_PEER_SETTINGS]);
if (err != GRPC_ERROR_NONE) {
return err;
}
- if (transport_parsing->incoming_frame_flags & GRPC_CHTTP2_FLAG_ACK) {
- transport_parsing->settings_ack_received = 1;
+ if (t->incoming_frame_flags & GRPC_CHTTP2_FLAG_ACK) {
+ memcpy(t->settings[GRPC_ACKED_SETTINGS], t->settings[GRPC_SENT_SETTINGS],
+ GRPC_CHTTP2_NUM_SETTINGS * sizeof(uint32_t));
grpc_chttp2_hptbl_set_max_bytes(
- &transport_parsing->hpack_parser.table,
- transport_parsing
- ->last_sent_settings[GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE]);
- transport_parsing->max_frame_size =
- transport_parsing
- ->last_sent_settings[GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE];
+ &t->hpack_parser.table,
+ t->settings[GRPC_ACKED_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE]);
+ t->sent_local_settings = 0;
}
- transport_parsing->parser = grpc_chttp2_settings_parser_parse;
- transport_parsing->parser_data = &transport_parsing->simple.settings;
+ t->parser = grpc_chttp2_settings_parser_parse;
+ t->parser_data = &t->simple.settings;
return GRPC_ERROR_NONE;
}
-/*
-static int is_window_update_legal(int64_t window_update, int64_t window) {
- return window + window_update < MAX_WINDOW;
-}
-*/
-
-static grpc_error *parse_frame_slice(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
- gpr_slice slice, int is_last) {
- grpc_chttp2_stream_parsing *stream_parsing =
- transport_parsing->incoming_stream;
- grpc_error *err = transport_parsing->parser(
- exec_ctx, transport_parsing->parser_data, transport_parsing,
- stream_parsing, slice, is_last);
+static grpc_error *parse_frame_slice(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t, gpr_slice slice,
+ int is_last) {
+ grpc_chttp2_stream *s = t->incoming_stream;
+ grpc_error *err = t->parser(exec_ctx, t->parser_data, t, s, slice, is_last);
if (err == GRPC_ERROR_NONE) {
- if (stream_parsing) {
- grpc_chttp2_list_add_parsing_seen_stream(transport_parsing,
- stream_parsing);
- }
- return GRPC_ERROR_NONE;
+ return err;
} else if (grpc_error_get_int(err, GRPC_ERROR_INT_STREAM_ID, NULL)) {
if (grpc_http_trace) {
const char *msg = grpc_error_string(err);
gpr_log(GPR_ERROR, "%s", msg);
grpc_error_free_string(msg);
}
- grpc_chttp2_parsing_become_skip_parser(exec_ctx, transport_parsing);
- if (stream_parsing) {
- stream_parsing->forced_close_error = err;
+ grpc_chttp2_parsing_become_skip_parser(exec_ctx, t);
+ if (s) {
+ s->forced_close_error = err;
gpr_slice_buffer_add(
- &transport_parsing->qbuf,
- grpc_chttp2_rst_stream_create(transport_parsing->incoming_stream_id,
- GRPC_CHTTP2_PROTOCOL_ERROR,
- &stream_parsing->stats.outgoing));
+ &t->qbuf, grpc_chttp2_rst_stream_create(t->incoming_stream_id,
+ GRPC_CHTTP2_PROTOCOL_ERROR,
+ &s->stats.outgoing));
} else {
GRPC_ERROR_UNREF(err);
}
diff --git a/src/core/ext/transport/chttp2/transport/stream_lists.c b/src/core/ext/transport/chttp2/transport/stream_lists.c
index 4dc4968248..6d25b3ae57 100644
--- a/src/core/ext/transport/chttp2/transport/stream_lists.c
+++ b/src/core/ext/transport/chttp2/transport/stream_lists.c
@@ -35,27 +35,6 @@
#include <grpc/support/log.h>
-#define TRANSPORT_FROM_GLOBAL(tg) \
- ((grpc_chttp2_transport *)((char *)(tg)-offsetof(grpc_chttp2_transport, \
- global)))
-
-#define STREAM_FROM_GLOBAL(sg) \
- ((grpc_chttp2_stream *)((char *)(sg)-offsetof(grpc_chttp2_stream, global)))
-
-#define TRANSPORT_FROM_WRITING(tw) \
- ((grpc_chttp2_transport *)((char *)(tw)-offsetof(grpc_chttp2_transport, \
- writing)))
-
-#define STREAM_FROM_WRITING(sw) \
- ((grpc_chttp2_stream *)((char *)(sw)-offsetof(grpc_chttp2_stream, writing)))
-
-#define TRANSPORT_FROM_PARSING(tp) \
- ((grpc_chttp2_transport *)((char *)(tp)-offsetof(grpc_chttp2_transport, \
- parsing)))
-
-#define STREAM_FROM_PARSING(sp) \
- ((grpc_chttp2_stream *)((char *)(sp)-offsetof(grpc_chttp2_stream, parsing)))
-
/* core list management */
static int stream_list_empty(grpc_chttp2_transport *t,
@@ -139,321 +118,57 @@ static bool stream_list_add(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
/* wrappers for specializations */
-bool grpc_chttp2_list_add_writable_stream(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global) {
- GPR_ASSERT(stream_global->id != 0);
- return stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
- STREAM_FROM_GLOBAL(stream_global),
- GRPC_CHTTP2_LIST_WRITABLE);
-}
-
-int grpc_chttp2_list_pop_writable_stream(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_transport_writing *transport_writing,
- grpc_chttp2_stream_global **stream_global,
- grpc_chttp2_stream_writing **stream_writing) {
- grpc_chttp2_stream *stream;
- int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
- GRPC_CHTTP2_LIST_WRITABLE);
- if (r != 0) {
- *stream_global = &stream->global;
- *stream_writing = &stream->writing;
- }
- return r;
-}
-
-bool grpc_chttp2_list_remove_writable_stream(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global) {
- return stream_list_maybe_remove(TRANSPORT_FROM_GLOBAL(transport_global),
- STREAM_FROM_GLOBAL(stream_global),
- GRPC_CHTTP2_LIST_WRITABLE);
-}
-
-void grpc_chttp2_list_add_writing_stream(
- grpc_chttp2_transport_writing *transport_writing,
- grpc_chttp2_stream_writing *stream_writing) {
- GPR_ASSERT(stream_list_add(TRANSPORT_FROM_WRITING(transport_writing),
- STREAM_FROM_WRITING(stream_writing),
- GRPC_CHTTP2_LIST_WRITING));
-}
-
-int grpc_chttp2_list_have_writing_streams(
- grpc_chttp2_transport_writing *transport_writing) {
- return !stream_list_empty(TRANSPORT_FROM_WRITING(transport_writing),
- GRPC_CHTTP2_LIST_WRITING);
-}
-
-int grpc_chttp2_list_pop_writing_stream(
- grpc_chttp2_transport_writing *transport_writing,
- grpc_chttp2_stream_writing **stream_writing) {
- grpc_chttp2_stream *stream;
- int r = stream_list_pop(TRANSPORT_FROM_WRITING(transport_writing), &stream,
- GRPC_CHTTP2_LIST_WRITING);
- if (r != 0) {
- *stream_writing = &stream->writing;
- }
- return r;
-}
-
-void grpc_chttp2_list_add_written_stream(
- grpc_chttp2_transport_writing *transport_writing,
- grpc_chttp2_stream_writing *stream_writing) {
- stream_list_add(TRANSPORT_FROM_WRITING(transport_writing),
- STREAM_FROM_WRITING(stream_writing),
- GRPC_CHTTP2_LIST_WRITTEN);
-}
-
-int grpc_chttp2_list_pop_written_stream(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_transport_writing *transport_writing,
- grpc_chttp2_stream_global **stream_global,
- grpc_chttp2_stream_writing **stream_writing) {
- grpc_chttp2_stream *stream;
- int r = stream_list_pop(TRANSPORT_FROM_WRITING(transport_writing), &stream,
- GRPC_CHTTP2_LIST_WRITTEN);
- if (r != 0) {
- *stream_global = &stream->global;
- *stream_writing = &stream->writing;
- }
- return r;
-}
-
-void grpc_chttp2_list_add_unannounced_incoming_window_available(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global) {
- GPR_ASSERT(stream_global->id != 0);
- stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
- STREAM_FROM_GLOBAL(stream_global),
- GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE);
-}
-
-void grpc_chttp2_list_remove_unannounced_incoming_window_available(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global) {
- stream_list_maybe_remove(
- TRANSPORT_FROM_GLOBAL(transport_global),
- STREAM_FROM_GLOBAL(stream_global),
- GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE);
-}
-
-int grpc_chttp2_list_pop_unannounced_incoming_window_available(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_global **stream_global,
- grpc_chttp2_stream_parsing **stream_parsing) {
- grpc_chttp2_stream *stream;
- int r =
- stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
- GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE);
- if (r != 0) {
- *stream_global = &stream->global;
- *stream_parsing = &stream->parsing;
- }
- return r;
-}
-
-void grpc_chttp2_list_add_parsing_seen_stream(
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing) {
- stream_list_add(TRANSPORT_FROM_PARSING(transport_parsing),
- STREAM_FROM_PARSING(stream_parsing),
- GRPC_CHTTP2_LIST_PARSING_SEEN);
-}
-
-int grpc_chttp2_list_pop_parsing_seen_stream(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_global **stream_global,
- grpc_chttp2_stream_parsing **stream_parsing) {
- grpc_chttp2_stream *stream;
- int r = stream_list_pop(TRANSPORT_FROM_PARSING(transport_parsing), &stream,
- GRPC_CHTTP2_LIST_PARSING_SEEN);
- if (r != 0) {
- *stream_global = &stream->global;
- *stream_parsing = &stream->parsing;
- }
- return r;
-}
-
-void grpc_chttp2_list_add_waiting_for_concurrency(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global) {
- stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
- STREAM_FROM_GLOBAL(stream_global),
- GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY);
-}
-
-int grpc_chttp2_list_pop_waiting_for_concurrency(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global **stream_global) {
- grpc_chttp2_stream *stream;
- int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
- GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY);
- if (r != 0) {
- *stream_global = &stream->global;
- }
- return r;
-}
-
-void grpc_chttp2_list_add_check_read_ops(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global) {
- grpc_chttp2_transport *t = TRANSPORT_FROM_GLOBAL(transport_global);
- if (!t->executor.check_read_ops_scheduled) {
- grpc_combiner_execute_finally(exec_ctx, t->executor.combiner,
- &t->initiate_read_flush_locked,
- GRPC_ERROR_NONE, false);
- t->executor.check_read_ops_scheduled = true;
- }
- stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
- STREAM_FROM_GLOBAL(stream_global),
- GRPC_CHTTP2_LIST_CHECK_READ_OPS);
-}
-
-bool grpc_chttp2_list_remove_check_read_ops(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global) {
- return stream_list_maybe_remove(TRANSPORT_FROM_GLOBAL(transport_global),
- STREAM_FROM_GLOBAL(stream_global),
- GRPC_CHTTP2_LIST_CHECK_READ_OPS);
+bool grpc_chttp2_list_add_writable_stream(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s) {
+ GPR_ASSERT(s->id != 0);
+ return stream_list_add(t, s, GRPC_CHTTP2_LIST_WRITABLE);
}
-int grpc_chttp2_list_pop_check_read_ops(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global **stream_global) {
- grpc_chttp2_stream *stream;
- int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
- GRPC_CHTTP2_LIST_CHECK_READ_OPS);
- if (r != 0) {
- *stream_global = &stream->global;
- }
- return r;
-}
-
-void grpc_chttp2_list_add_writing_stalled_by_transport(
- grpc_chttp2_transport_writing *transport_writing,
- grpc_chttp2_stream_writing *stream_writing) {
- grpc_chttp2_stream *stream = STREAM_FROM_WRITING(stream_writing);
- gpr_log(GPR_DEBUG, "writing stalled %d", stream->global.id);
- if (!stream->included[GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT]) {
- GRPC_CHTTP2_STREAM_REF(&stream->global, "chttp2_writing_stalled");
- }
- stream_list_add(TRANSPORT_FROM_WRITING(transport_writing), stream,
- GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT);
+int grpc_chttp2_list_pop_writable_stream(grpc_chttp2_transport *t,
+ grpc_chttp2_stream **s) {
+ return stream_list_pop(t, s, GRPC_CHTTP2_LIST_WRITABLE);
}
-bool grpc_chttp2_list_flush_writing_stalled_by_transport(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing) {
- grpc_chttp2_stream *stream;
- bool out = false;
- grpc_chttp2_transport *transport = TRANSPORT_FROM_WRITING(transport_writing);
- while (stream_list_pop(transport, &stream,
- GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT)) {
- gpr_log(GPR_DEBUG, "move %d from writing stalled to just stalled",
- stream->global.id);
- grpc_chttp2_list_add_stalled_by_transport(transport_writing,
- &stream->writing);
- GRPC_CHTTP2_STREAM_UNREF(exec_ctx, &stream->global,
- "chttp2_writing_stalled");
- out = true;
- }
- return out;
+bool grpc_chttp2_list_remove_writable_stream(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s) {
+ return stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_WRITABLE);
}
-void grpc_chttp2_list_add_stalled_by_transport(
- grpc_chttp2_transport_writing *transport_writing,
- grpc_chttp2_stream_writing *stream_writing) {
- gpr_log(GPR_DEBUG, "stalled %d", stream_writing->id);
- stream_list_add(TRANSPORT_FROM_WRITING(transport_writing),
- STREAM_FROM_WRITING(stream_writing),
- GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
+bool grpc_chttp2_list_add_writing_stream(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s) {
+ return stream_list_add(t, s, GRPC_CHTTP2_LIST_WRITING);
}
-int grpc_chttp2_list_pop_stalled_by_transport(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global **stream_global) {
- grpc_chttp2_stream *stream;
- int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
- GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
- if (r != 0) {
- *stream_global = &stream->global;
- }
- return r;
+int grpc_chttp2_list_have_writing_streams(grpc_chttp2_transport *t) {
+ return !stream_list_empty(t, GRPC_CHTTP2_LIST_WRITING);
}
-void grpc_chttp2_list_remove_stalled_by_transport(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global) {
- stream_list_maybe_remove(TRANSPORT_FROM_GLOBAL(transport_global),
- STREAM_FROM_GLOBAL(stream_global),
- GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
+int grpc_chttp2_list_pop_writing_stream(grpc_chttp2_transport *t,
+ grpc_chttp2_stream **s) {
+ return stream_list_pop(t, s, GRPC_CHTTP2_LIST_WRITING);
}
-void grpc_chttp2_list_add_closed_waiting_for_parsing(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global) {
- stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
- STREAM_FROM_GLOBAL(stream_global),
- GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_PARSING);
+void grpc_chttp2_list_add_waiting_for_concurrency(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s) {
+ stream_list_add(t, s, GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY);
}
-int grpc_chttp2_list_pop_closed_waiting_for_parsing(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global **stream_global) {
- grpc_chttp2_stream *stream;
- int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
- GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_PARSING);
- if (r != 0) {
- *stream_global = &stream->global;
- }
- return r;
+int grpc_chttp2_list_pop_waiting_for_concurrency(grpc_chttp2_transport *t,
+ grpc_chttp2_stream **s) {
+ return stream_list_pop(t, s, GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY);
}
-void grpc_chttp2_list_add_closed_waiting_for_writing(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global) {
- stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
- STREAM_FROM_GLOBAL(stream_global),
- GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_WRITING);
+void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s) {
+ stream_list_add(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}
-int grpc_chttp2_list_pop_closed_waiting_for_writing(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global **stream_global) {
- grpc_chttp2_stream *stream;
- int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
- GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_WRITING);
- if (r != 0) {
- *stream_global = &stream->global;
- }
- return r;
+int grpc_chttp2_list_pop_stalled_by_transport(grpc_chttp2_transport *t,
+ grpc_chttp2_stream **s) {
+ return stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}
-void grpc_chttp2_register_stream(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s) {
- stream_list_add_tail(t, s, GRPC_CHTTP2_LIST_ALL_STREAMS);
-}
-
-int grpc_chttp2_unregister_stream(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s) {
- stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_ALL_STREAMS);
- return stream_list_empty(t, GRPC_CHTTP2_LIST_ALL_STREAMS);
-}
-
-int grpc_chttp2_has_streams(grpc_chttp2_transport *t) {
- return !stream_list_empty(t, GRPC_CHTTP2_LIST_ALL_STREAMS);
-}
-
-void grpc_chttp2_for_all_streams(
- grpc_chttp2_transport_global *transport_global, void *user_data,
- void (*cb)(grpc_chttp2_transport_global *transport_global, void *user_data,
- grpc_chttp2_stream_global *stream_global)) {
- grpc_chttp2_stream *s;
- grpc_chttp2_transport *t = TRANSPORT_FROM_GLOBAL(transport_global);
- for (s = t->lists[GRPC_CHTTP2_LIST_ALL_STREAMS].head; s != NULL;
- s = s->links[GRPC_CHTTP2_LIST_ALL_STREAMS].next) {
- cb(transport_global, user_data, &s->global);
- }
+void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s) {
+ stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}
diff --git a/src/core/ext/transport/chttp2/transport/stream_map.c b/src/core/ext/transport/chttp2/transport/stream_map.c
index f70791c422..5f5a28446d 100644
--- a/src/core/ext/transport/chttp2/transport/stream_map.c
+++ b/src/core/ext/transport/chttp2/transport/stream_map.c
@@ -77,6 +77,7 @@ void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map *map, uint32_t key,
GPR_ASSERT(count == 0 || keys[count - 1] < key);
GPR_ASSERT(value);
+ GPR_ASSERT(grpc_chttp2_stream_map_find(map, key) == NULL);
if (count == capacity) {
if (map->free > capacity / 4) {
@@ -96,40 +97,6 @@ void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map *map, uint32_t key,
map->count = count + 1;
}
-void grpc_chttp2_stream_map_move_into(grpc_chttp2_stream_map *src,
- grpc_chttp2_stream_map *dst) {
- /* if src is empty we dont need to do anything */
- if (src->count == src->free) {
- return;
- }
- /* if dst is empty we simply need to swap */
- if (dst->count == dst->free) {
- GPR_SWAP(grpc_chttp2_stream_map, *src, *dst);
- return;
- }
- /* the first element of src must be greater than the last of dst...
- * however the maps may need compacting for this property to hold */
- if (src->keys[0] <= dst->keys[dst->count - 1]) {
- src->count = compact(src->keys, src->values, src->count);
- src->free = 0;
- dst->count = compact(dst->keys, dst->values, dst->count);
- dst->free = 0;
- }
- GPR_ASSERT(src->keys[0] > dst->keys[dst->count - 1]);
- /* if dst doesn't have capacity, resize */
- if (dst->count + src->count > dst->capacity) {
- dst->capacity = GPR_MAX(dst->capacity * 3 / 2, dst->count + src->count);
- dst->keys = gpr_realloc(dst->keys, dst->capacity * sizeof(uint32_t));
- dst->values = gpr_realloc(dst->values, dst->capacity * sizeof(void *));
- }
- memcpy(dst->keys + dst->count, src->keys, src->count * sizeof(uint32_t));
- memcpy(dst->values + dst->count, src->values, src->count * sizeof(void *));
- dst->count += src->count;
- dst->free += src->free;
- src->count = 0;
- src->free = 0;
-}
-
static void **find(grpc_chttp2_stream_map *map, uint32_t key) {
size_t min_idx = 0;
size_t max_idx = map->count;
@@ -170,6 +137,7 @@ void *grpc_chttp2_stream_map_delete(grpc_chttp2_stream_map *map, uint32_t key) {
if (map->free == map->count) {
map->free = map->count = 0;
}
+ GPR_ASSERT(grpc_chttp2_stream_map_find(map, key) == NULL);
}
return out;
}
@@ -183,6 +151,17 @@ size_t grpc_chttp2_stream_map_size(grpc_chttp2_stream_map *map) {
return map->count - map->free;
}
+void *grpc_chttp2_stream_map_rand(grpc_chttp2_stream_map *map) {
+ if (map->count == map->free) {
+ return NULL;
+ }
+ if (map->free != 0) {
+ map->count = compact(map->keys, map->values, map->count);
+ map->free = 0;
+ }
+ return map->values[((size_t)rand()) % map->count];
+}
+
void grpc_chttp2_stream_map_for_each(grpc_chttp2_stream_map *map,
void (*f)(void *user_data, uint32_t key,
void *value),
diff --git a/src/core/ext/transport/chttp2/transport/stream_map.h b/src/core/ext/transport/chttp2/transport/stream_map.h
index b1d59ca6a3..203f640680 100644
--- a/src/core/ext/transport/chttp2/transport/stream_map.h
+++ b/src/core/ext/transport/chttp2/transport/stream_map.h
@@ -65,13 +65,12 @@ void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map *map, uint32_t key,
or NULL otherwise */
void *grpc_chttp2_stream_map_delete(grpc_chttp2_stream_map *map, uint32_t key);
-/* Move all elements of src into dst */
-void grpc_chttp2_stream_map_move_into(grpc_chttp2_stream_map *src,
- grpc_chttp2_stream_map *dst);
-
/* Return an existing key, or NULL if it does not exist */
void *grpc_chttp2_stream_map_find(grpc_chttp2_stream_map *map, uint32_t key);
+/* Return a random entry */
+void *grpc_chttp2_stream_map_rand(grpc_chttp2_stream_map *map);
+
/* How many (populated) entries are in the stream map? */
size_t grpc_chttp2_stream_map_size(grpc_chttp2_stream_map *map);
diff --git a/src/core/ext/transport/chttp2/transport/writing.c b/src/core/ext/transport/chttp2/transport/writing.c
index 979515bd54..b39695a1a5 100644
--- a/src/core/ext/transport/chttp2/transport/writing.c
+++ b/src/core/ext/transport/chttp2/transport/writing.c
@@ -40,349 +40,221 @@
#include "src/core/ext/transport/chttp2/transport/http2_errors.h"
#include "src/core/lib/profiling/timers.h"
-static void finalize_outbuf(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_writing *transport_writing);
+static void add_to_write_list(grpc_chttp2_write_cb **list,
+ grpc_chttp2_write_cb *cb) {
+ cb->next = *list;
+ *list = cb;
+}
+
+static void finish_write_cb(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s, grpc_chttp2_write_cb *cb,
+ grpc_error *error) {
+ grpc_chttp2_complete_closure_step(exec_ctx, t, s, &cb->closure, error,
+ "finish_write_cb");
+ cb->next = t->write_cb_pool;
+ t->write_cb_pool = cb;
+}
-int grpc_chttp2_unlocking_check_writes(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_transport_writing *transport_writing) {
- grpc_chttp2_stream_global *stream_global;
- grpc_chttp2_stream_writing *stream_writing;
+static void update_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s, int64_t send_bytes,
+ grpc_chttp2_write_cb **list, grpc_error *error) {
+ grpc_chttp2_write_cb *cb = *list;
+ *list = NULL;
+ s->flow_controlled_bytes_written += send_bytes;
+ while (cb) {
+ grpc_chttp2_write_cb *next = cb->next;
+ if (cb->call_at_byte <= s->flow_controlled_bytes_written) {
+ finish_write_cb(exec_ctx, t, s, cb, GRPC_ERROR_REF(error));
+ } else {
+ add_to_write_list(list, cb);
+ }
+ cb = next;
+ }
+ GRPC_ERROR_UNREF(error);
+}
- GPR_TIMER_BEGIN("grpc_chttp2_unlocking_check_writes", 0);
+bool grpc_chttp2_begin_write(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
+ grpc_chttp2_stream *s;
- transport_writing->max_frame_size =
- transport_global->settings[GRPC_ACKED_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE];
+ GPR_TIMER_BEGIN("grpc_chttp2_begin_write", 0);
- if (transport_global->dirtied_local_settings &&
- !transport_global->sent_local_settings) {
+ if (t->dirtied_local_settings && !t->sent_local_settings) {
gpr_slice_buffer_add(
- &transport_writing->outbuf,
+ &t->outbuf,
grpc_chttp2_settings_create(
- transport_global->settings[GRPC_SENT_SETTINGS],
- transport_global->settings[GRPC_LOCAL_SETTINGS],
- transport_global->force_send_settings, GRPC_CHTTP2_NUM_SETTINGS));
- transport_global->force_send_settings = 0;
- transport_global->dirtied_local_settings = 0;
- transport_global->sent_local_settings = 1;
+ t->settings[GRPC_SENT_SETTINGS], t->settings[GRPC_LOCAL_SETTINGS],
+ t->force_send_settings, GRPC_CHTTP2_NUM_SETTINGS));
+ t->force_send_settings = 0;
+ t->dirtied_local_settings = 0;
+ t->sent_local_settings = 1;
}
/* simple writes are queued to qbuf, and flushed here */
- gpr_slice_buffer_move_into(&transport_global->qbuf,
- &transport_writing->outbuf);
- GPR_ASSERT(transport_global->qbuf.count == 0);
+ gpr_slice_buffer_move_into(&t->qbuf, &t->outbuf);
+ GPR_ASSERT(t->qbuf.count == 0);
grpc_chttp2_hpack_compressor_set_max_table_size(
- &transport_writing->hpack_compressor,
- transport_global->settings[GRPC_PEER_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE]);
+ &t->hpack_compressor,
+ t->settings[GRPC_PEER_SETTINGS][GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE]);
- GRPC_CHTTP2_FLOW_MOVE_TRANSPORT("write", transport_writing, outgoing_window,
- transport_global, outgoing_window);
- if (transport_writing->outgoing_window > 0) {
- while (grpc_chttp2_list_pop_stalled_by_transport(transport_global,
- &stream_global)) {
- grpc_chttp2_become_writable(exec_ctx, transport_global, stream_global,
- false, "transport.read_flow_control");
+ if (t->outgoing_window > 0) {
+ while (grpc_chttp2_list_pop_stalled_by_transport(t, &s)) {
+ grpc_chttp2_become_writable(exec_ctx, t, s, false,
+ "transport.read_flow_control");
}
}
/* for each grpc_chttp2_stream that's become writable, frame it's data
(according to available window sizes) and add to the output buffer */
- while (grpc_chttp2_list_pop_writable_stream(
- transport_global, transport_writing, &stream_global, &stream_writing)) {
- bool sent_initial_metadata = stream_writing->sent_initial_metadata;
- bool become_writable = false;
+ while (grpc_chttp2_list_pop_writable_stream(t, &s)) {
+ bool sent_initial_metadata = s->sent_initial_metadata;
+ bool now_writing = false;
- stream_writing->id = stream_global->id;
- stream_writing->read_closed = stream_global->read_closed;
+ GRPC_CHTTP2_IF_TRACING(gpr_log(
+ GPR_DEBUG, "W:%p %s[%d] im-(sent,send)=(%d,%d) announce=%d", t,
+ t->is_client ? "CLIENT" : "SERVER", s->id, sent_initial_metadata,
+ s->send_initial_metadata != NULL, s->announce_window));
- GRPC_CHTTP2_FLOW_MOVE_STREAM("write", transport_writing, stream_writing,
- outgoing_window, stream_global,
- outgoing_window);
-
- if (!sent_initial_metadata && stream_global->send_initial_metadata) {
- stream_writing->send_initial_metadata =
- stream_global->send_initial_metadata;
- stream_global->send_initial_metadata = NULL;
- become_writable = true;
+ /* send initial metadata if it's available */
+ if (!sent_initial_metadata && s->send_initial_metadata) {
+ grpc_chttp2_encode_header(
+ &t->hpack_compressor, s->id, s->send_initial_metadata, 0,
+ t->settings[GRPC_ACKED_SETTINGS][GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
+ &s->stats.outgoing, &t->outbuf);
+ s->send_initial_metadata = NULL;
+ s->sent_initial_metadata = true;
sent_initial_metadata = true;
+ now_writing = true;
+ }
+ /* send any window updates */
+ if (s->announce_window > 0) {
+ uint32_t announce = s->announce_window;
+ gpr_slice_buffer_add(&t->outbuf,
+ grpc_chttp2_window_update_create(
+ s->id, s->announce_window, &s->stats.outgoing));
+ GRPC_CHTTP2_FLOW_DEBIT_STREAM("write", t, s, announce_window, announce);
}
if (sent_initial_metadata) {
- if (stream_global->send_message != NULL) {
- gpr_slice hdr = gpr_slice_malloc(5);
- uint8_t *p = GPR_SLICE_START_PTR(hdr);
- uint32_t len = stream_global->send_message->length;
- GPR_ASSERT(stream_writing->send_message == NULL);
- p[0] = (stream_global->send_message->flags &
- GRPC_WRITE_INTERNAL_COMPRESS) != 0;
- p[1] = (uint8_t)(len >> 24);
- p[2] = (uint8_t)(len >> 16);
- p[3] = (uint8_t)(len >> 8);
- p[4] = (uint8_t)(len);
- gpr_slice_buffer_add(&stream_writing->flow_controlled_buffer, hdr);
- if (stream_global->send_message->length > 0) {
- stream_writing->send_message = stream_global->send_message;
- } else {
- stream_writing->send_message = NULL;
+ /* send any body bytes, if allowed by flow control */
+ if (s->flow_controlled_buffer.length > 0) {
+ uint32_t max_outgoing =
+ (uint32_t)GPR_MIN(t->settings[GRPC_ACKED_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
+ GPR_MIN(s->outgoing_window, t->outgoing_window));
+ if (max_outgoing > 0) {
+ uint32_t send_bytes =
+ (uint32_t)GPR_MIN(max_outgoing, s->flow_controlled_buffer.length);
+ bool is_last_data_frame =
+ s->fetching_send_message == NULL &&
+ send_bytes == s->flow_controlled_buffer.length;
+ bool is_last_frame =
+ is_last_data_frame && s->send_trailing_metadata != NULL &&
+ grpc_metadata_batch_is_empty(s->send_trailing_metadata);
+ grpc_chttp2_encode_data(s->id, &s->flow_controlled_buffer, send_bytes,
+ is_last_frame, &s->stats.outgoing,
+ &t->outbuf);
+ GRPC_CHTTP2_FLOW_DEBIT_STREAM("write", t, s, outgoing_window,
+ send_bytes);
+ GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("write", t, outgoing_window,
+ send_bytes);
+ if (is_last_frame) {
+ s->send_trailing_metadata = NULL;
+ s->sent_trailing_metadata = true;
+ if (!t->is_client && !s->read_closed) {
+ gpr_slice_buffer_add(&t->outbuf, grpc_chttp2_rst_stream_create(
+ s->id, GRPC_CHTTP2_NO_ERROR,
+ &s->stats.outgoing));
+ }
+ }
+ s->sending_bytes += send_bytes;
+ now_writing = true;
+ if (s->flow_controlled_buffer.length > 0) {
+ GRPC_CHTTP2_STREAM_REF(s, "chttp2_writing:fork");
+ grpc_chttp2_list_add_writable_stream(t, s);
+ }
+ } else if (t->outgoing_window == 0) {
+ grpc_chttp2_list_add_stalled_by_transport(t, s);
+ now_writing = true;
}
- stream_writing->stream_fetched = 0;
- stream_global->send_message = NULL;
}
- if ((stream_writing->send_message != NULL ||
- stream_writing->flow_controlled_buffer.length > 0) &&
- stream_writing->outgoing_window > 0) {
- if (transport_writing->outgoing_window > 0) {
- become_writable = true;
+ if (s->send_trailing_metadata != NULL &&
+ s->fetching_send_message == NULL &&
+ s->flow_controlled_buffer.length == 0) {
+ if (grpc_metadata_batch_is_empty(s->send_trailing_metadata)) {
+ grpc_chttp2_encode_data(s->id, &s->flow_controlled_buffer, 0, true,
+ &s->stats.outgoing, &t->outbuf);
} else {
- grpc_chttp2_list_add_stalled_by_transport(transport_writing,
- stream_writing);
+ grpc_chttp2_encode_header(
+ &t->hpack_compressor, s->id, s->send_trailing_metadata, true,
+ t->settings[GRPC_ACKED_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
+ &s->stats.outgoing, &t->outbuf);
}
+ s->send_trailing_metadata = NULL;
+ s->sent_trailing_metadata = true;
+ if (!t->is_client && !s->read_closed) {
+ gpr_slice_buffer_add(
+ &t->outbuf, grpc_chttp2_rst_stream_create(
+ s->id, GRPC_CHTTP2_NO_ERROR, &s->stats.outgoing));
+ }
+ now_writing = true;
}
- if (stream_global->send_trailing_metadata) {
- stream_writing->send_trailing_metadata =
- stream_global->send_trailing_metadata;
- stream_global->send_trailing_metadata = NULL;
- become_writable = true;
- }
- }
-
- if (!stream_global->read_closed &&
- stream_global->unannounced_incoming_window_for_writing > 1024) {
- GRPC_CHTTP2_FLOW_MOVE_STREAM("write", transport_global, stream_writing,
- announce_window, stream_global,
- unannounced_incoming_window_for_writing);
- become_writable = true;
}
- if (become_writable) {
- grpc_chttp2_list_add_writing_stream(transport_writing, stream_writing);
+ if (now_writing) {
+ if (!grpc_chttp2_list_add_writing_stream(t, s)) {
+ /* already in writing list: drop ref */
+ GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:already_writing");
+ }
} else {
- GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "chttp2_writing");
+ GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:no_write");
}
}
/* if the grpc_chttp2_transport is ready to send a window update, do so here
also; 3/4 is a magic number that will likely get tuned soon */
- if (transport_global->announce_incoming_window > 0) {
- uint32_t announced = (uint32_t)GPR_MIN(
- transport_global->announce_incoming_window, UINT32_MAX);
- GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("write", transport_global,
- announce_incoming_window, announced);
+ if (t->announce_incoming_window > 0) {
+ uint32_t announced =
+ (uint32_t)GPR_MIN(t->announce_incoming_window, UINT32_MAX);
+ GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("write", t, announce_incoming_window,
+ announced);
grpc_transport_one_way_stats throwaway_stats;
- gpr_slice_buffer_add(
- &transport_writing->outbuf,
- grpc_chttp2_window_update_create(0, announced, &throwaway_stats));
- }
-
- GPR_TIMER_END("grpc_chttp2_unlocking_check_writes", 0);
-
- return transport_writing->outbuf.count > 0 ||
- grpc_chttp2_list_have_writing_streams(transport_writing);
-}
-
-void grpc_chttp2_perform_writes(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing,
- grpc_endpoint *endpoint) {
- GPR_ASSERT(transport_writing->outbuf.count > 0 ||
- grpc_chttp2_list_have_writing_streams(transport_writing));
-
- finalize_outbuf(exec_ctx, transport_writing);
-
- GPR_ASSERT(endpoint);
-
- if (transport_writing->outbuf.count > 0) {
- grpc_endpoint_write(exec_ctx, endpoint, &transport_writing->outbuf,
- &transport_writing->done_cb);
- } else {
- grpc_exec_ctx_sched(exec_ctx, &transport_writing->done_cb, GRPC_ERROR_NONE,
- NULL);
+ gpr_slice_buffer_add(&t->outbuf, grpc_chttp2_window_update_create(
+ 0, announced, &throwaway_stats));
}
-}
-
-static void finalize_outbuf(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_writing *transport_writing) {
- grpc_chttp2_stream_writing *stream_writing;
- GPR_TIMER_BEGIN("finalize_outbuf", 0);
+ GPR_TIMER_END("grpc_chttp2_begin_write", 0);
- bool is_first_data_frame = true;
- while (
- grpc_chttp2_list_pop_writing_stream(transport_writing, &stream_writing)) {
- uint32_t max_outgoing =
- (uint32_t)GPR_MIN(transport_writing->max_frame_size,
- GPR_MIN(stream_writing->outgoing_window,
- transport_writing->outgoing_window));
- /* send initial metadata if it's available */
- if (stream_writing->send_initial_metadata != NULL) {
- grpc_chttp2_encode_header(
- &transport_writing->hpack_compressor, stream_writing->id,
- stream_writing->send_initial_metadata, 0,
- transport_writing->max_frame_size, &stream_writing->stats,
- &transport_writing->outbuf);
- stream_writing->send_initial_metadata = NULL;
- stream_writing->sent_initial_metadata = 1;
- }
- /* send any window updates */
- if (stream_writing->announce_window > 0 &&
- stream_writing->send_initial_metadata == NULL) {
- uint32_t announce = stream_writing->announce_window;
- gpr_slice_buffer_add(
- &transport_writing->outbuf,
- grpc_chttp2_window_update_create(stream_writing->id,
- stream_writing->announce_window,
- &stream_writing->stats));
- GRPC_CHTTP2_FLOW_DEBIT_STREAM("write", transport_writing, stream_writing,
- announce_window, announce);
- stream_writing->announce_window = 0;
- }
- /* fetch any body bytes */
- while (!stream_writing->fetching && stream_writing->send_message &&
- stream_writing->flow_controlled_buffer.length < max_outgoing &&
- stream_writing->stream_fetched <
- stream_writing->send_message->length) {
- if (grpc_byte_stream_next(exec_ctx, stream_writing->send_message,
- &stream_writing->fetching_slice, max_outgoing,
- &stream_writing->finished_fetch)) {
- stream_writing->stream_fetched +=
- GPR_SLICE_LENGTH(stream_writing->fetching_slice);
- if (stream_writing->stream_fetched ==
- stream_writing->send_message->length) {
- stream_writing->send_message = NULL;
- }
- gpr_slice_buffer_add(&stream_writing->flow_controlled_buffer,
- stream_writing->fetching_slice);
- } else {
- stream_writing->fetching = 1;
- }
- }
- /* send any body bytes */
- if (stream_writing->flow_controlled_buffer.length > 0) {
- if (max_outgoing > 0) {
- uint32_t send_bytes = (uint32_t)GPR_MIN(
- max_outgoing, stream_writing->flow_controlled_buffer.length);
- int is_last_data_frame =
- stream_writing->send_message == NULL &&
- send_bytes == stream_writing->flow_controlled_buffer.length;
- int is_last_frame = is_last_data_frame &&
- stream_writing->send_trailing_metadata != NULL &&
- grpc_metadata_batch_is_empty(
- stream_writing->send_trailing_metadata);
- grpc_chttp2_encode_data(
- stream_writing->id, &stream_writing->flow_controlled_buffer,
- send_bytes, is_last_frame, &stream_writing->stats,
- &transport_writing->outbuf);
- if (is_first_data_frame) {
- /* TODO(dgq): this is a hack. It'll be fix in a future refactoring */
- stream_writing->stats.data_bytes -= 5; /* discount grpc framing */
- is_first_data_frame = false;
- }
- GRPC_CHTTP2_FLOW_DEBIT_STREAM("write", transport_writing,
- stream_writing, outgoing_window,
- send_bytes);
- GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("write", transport_writing,
- outgoing_window, send_bytes);
- if (is_last_frame) {
- stream_writing->send_trailing_metadata = NULL;
- stream_writing->sent_trailing_metadata = 1;
- }
- if (is_last_data_frame) {
- GPR_ASSERT(stream_writing->send_message == NULL);
- stream_writing->sent_message = 1;
- }
- } else if (transport_writing->outgoing_window == 0) {
- grpc_chttp2_list_add_writing_stalled_by_transport(transport_writing,
- stream_writing);
- grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
- }
- }
- /* send trailing metadata if it's available and we're ready for it */
- if (stream_writing->send_message == NULL &&
- stream_writing->flow_controlled_buffer.length == 0 &&
- stream_writing->send_trailing_metadata != NULL) {
- if (grpc_metadata_batch_is_empty(
- stream_writing->send_trailing_metadata)) {
- grpc_chttp2_encode_data(
- stream_writing->id, &stream_writing->flow_controlled_buffer, 0, 1,
- &stream_writing->stats, &transport_writing->outbuf);
- } else {
- grpc_chttp2_encode_header(
- &transport_writing->hpack_compressor, stream_writing->id,
- stream_writing->send_trailing_metadata, 1,
- transport_writing->max_frame_size, &stream_writing->stats,
- &transport_writing->outbuf);
- }
- if (!transport_writing->is_client && !stream_writing->read_closed) {
- gpr_slice_buffer_add(&transport_writing->outbuf,
- grpc_chttp2_rst_stream_create(
- stream_writing->id, GRPC_CHTTP2_NO_ERROR,
- &stream_writing->stats));
- }
- stream_writing->send_trailing_metadata = NULL;
- stream_writing->sent_trailing_metadata = 1;
- }
- /* if there's more to write, then loop, otherwise prepare to finish the
- * write */
- if ((stream_writing->flow_controlled_buffer.length > 0 ||
- (stream_writing->send_message && !stream_writing->fetching)) &&
- stream_writing->outgoing_window > 0) {
- if (transport_writing->outgoing_window > 0) {
- grpc_chttp2_list_add_writing_stream(transport_writing, stream_writing);
- } else {
- grpc_chttp2_list_add_writing_stalled_by_transport(transport_writing,
- stream_writing);
- grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
- }
- } else {
- grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
- }
- }
-
- GPR_TIMER_END("finalize_outbuf", 0);
+ return t->outbuf.count > 0;
}
-void grpc_chttp2_cleanup_writing(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_transport_writing *transport_writing) {
- GPR_TIMER_BEGIN("grpc_chttp2_cleanup_writing", 0);
- grpc_chttp2_stream_writing *stream_writing;
- grpc_chttp2_stream_global *stream_global;
-
- if (grpc_chttp2_list_flush_writing_stalled_by_transport(exec_ctx,
- transport_writing)) {
- grpc_chttp2_initiate_write(exec_ctx, transport_global, false,
- "resume_stalled_stream");
- }
+void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_error *error) {
+ GPR_TIMER_BEGIN("grpc_chttp2_end_write", 0);
+ grpc_chttp2_stream *s;
- while (grpc_chttp2_list_pop_written_stream(
- transport_global, transport_writing, &stream_global, &stream_writing)) {
- if (stream_writing->sent_initial_metadata) {
+ while (grpc_chttp2_list_pop_writing_stream(t, &s)) {
+ if (s->sent_initial_metadata) {
grpc_chttp2_complete_closure_step(
- exec_ctx, transport_global, stream_global,
- &stream_global->send_initial_metadata_finished, GRPC_ERROR_NONE);
+ exec_ctx, t, s, &s->send_initial_metadata_finished,
+ GRPC_ERROR_REF(error), "send_initial_metadata_finished");
}
- grpc_transport_move_one_way_stats(&stream_writing->stats,
- &stream_global->stats.outgoing);
- if (stream_writing->sent_message) {
- GPR_ASSERT(stream_writing->send_message == NULL);
- grpc_chttp2_complete_closure_step(
- exec_ctx, transport_global, stream_global,
- &stream_global->send_message_finished, GRPC_ERROR_NONE);
- stream_writing->sent_message = 0;
+ if (s->sending_bytes != 0) {
+ update_list(exec_ctx, t, s, (int64_t)s->sending_bytes,
+ &s->on_write_finished_cbs, GRPC_ERROR_REF(error));
+ s->sending_bytes = 0;
}
- if (stream_writing->sent_trailing_metadata) {
+ if (s->sent_trailing_metadata) {
grpc_chttp2_complete_closure_step(
- exec_ctx, transport_global, stream_global,
- &stream_global->send_trailing_metadata_finished, GRPC_ERROR_NONE);
- }
- if (stream_writing->sent_trailing_metadata) {
- grpc_chttp2_mark_stream_closed(exec_ctx, transport_global, stream_global,
- !transport_global->is_client, 1,
- GRPC_ERROR_NONE);
+ exec_ctx, t, s, &s->send_trailing_metadata_finished,
+ GRPC_ERROR_REF(error), "send_trailing_metadata_finished");
+ grpc_chttp2_mark_stream_closed(exec_ctx, t, s, !t->is_client, 1,
+ GRPC_ERROR_REF(error));
}
- GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "chttp2_writing");
+ GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:end");
}
- gpr_slice_buffer_reset_and_unref(&transport_writing->outbuf);
- GPR_TIMER_END("grpc_chttp2_cleanup_writing", 0);
+ gpr_slice_buffer_reset_and_unref(&t->outbuf);
+ GRPC_ERROR_UNREF(error);
+ GPR_TIMER_END("grpc_chttp2_end_write", 0);
}
diff --git a/src/core/lib/channel/channel_args.c b/src/core/lib/channel/channel_args.c
index 770b4e0a79..401a2ad4fe 100644
--- a/src/core/lib/channel/channel_args.c
+++ b/src/core/lib/channel/channel_args.c
@@ -66,22 +66,59 @@ static grpc_arg copy_arg(const grpc_arg *src) {
grpc_channel_args *grpc_channel_args_copy_and_add(const grpc_channel_args *src,
const grpc_arg *to_add,
size_t num_to_add) {
+ return grpc_channel_args_copy_and_add_and_remove(src, NULL, 0, to_add,
+ num_to_add);
+}
+
+grpc_channel_args *grpc_channel_args_copy_and_remove(
+ const grpc_channel_args *src, const char **to_remove,
+ size_t num_to_remove) {
+ return grpc_channel_args_copy_and_add_and_remove(src, to_remove,
+ num_to_remove, NULL, 0);
+}
+
+static bool should_remove_arg(const grpc_arg *arg, const char **to_remove,
+ size_t num_to_remove) {
+ for (size_t i = 0; i < num_to_remove; ++i) {
+ if (strcmp(arg->key, to_remove[i]) == 0) return true;
+ }
+ return false;
+}
+
+grpc_channel_args *grpc_channel_args_copy_and_add_and_remove(
+ const grpc_channel_args *src, const char **to_remove, size_t num_to_remove,
+ const grpc_arg *to_add, size_t num_to_add) {
+ // Figure out how many args we'll be copying.
+ size_t num_args_to_copy = 0;
+ if (src != NULL) {
+ for (size_t i = 0; i < src->num_args; ++i) {
+ if (!should_remove_arg(&src->args[i], to_remove, num_to_remove)) {
+ ++num_args_to_copy;
+ }
+ }
+ }
+ // Create result.
grpc_channel_args *dst = gpr_malloc(sizeof(grpc_channel_args));
- size_t i;
- size_t src_num_args = (src == NULL) ? 0 : src->num_args;
- if (!src && !to_add) {
- dst->num_args = 0;
+ dst->num_args = num_args_to_copy + num_to_add;
+ if (dst->num_args == 0) {
dst->args = NULL;
return dst;
}
- dst->num_args = src_num_args + num_to_add;
dst->args = gpr_malloc(sizeof(grpc_arg) * dst->num_args);
- for (i = 0; i < src_num_args; i++) {
- dst->args[i] = copy_arg(&src->args[i]);
+ // Copy args from src that are not being removed.
+ size_t dst_idx = 0;
+ if (src != NULL) {
+ for (size_t i = 0; i < src->num_args; ++i) {
+ if (!should_remove_arg(&src->args[i], to_remove, num_to_remove)) {
+ dst->args[dst_idx++] = copy_arg(&src->args[i]);
+ }
+ }
}
- for (i = 0; i < num_to_add; i++) {
- dst->args[i + src_num_args] = copy_arg(&to_add[i]);
+ // Add args from to_add.
+ for (size_t i = 0; i < num_to_add; ++i) {
+ dst->args[dst_idx++] = copy_arg(&to_add[i]);
}
+ GPR_ASSERT(dst_idx == dst->num_args);
return dst;
}
@@ -278,6 +315,18 @@ int grpc_channel_args_compare(const grpc_channel_args *a,
return 0;
}
+const grpc_arg *grpc_channel_args_find(const grpc_channel_args *args,
+ const char *name) {
+ if (args != NULL) {
+ for (size_t i = 0; i < args->num_args; ++i) {
+ if (strcmp(args->args[i].key, name) == 0) {
+ return &args->args[i];
+ }
+ }
+ }
+ return NULL;
+}
+
int grpc_channel_arg_get_integer(grpc_arg *arg, grpc_integer_options options) {
if (arg->type != GRPC_ARG_INTEGER) {
gpr_log(GPR_ERROR, "%s ignored: it must be an integer", arg->key);
diff --git a/src/core/lib/channel/channel_args.h b/src/core/lib/channel/channel_args.h
index 17b22ce669..88fc0e37a3 100644
--- a/src/core/lib/channel/channel_args.h
+++ b/src/core/lib/channel/channel_args.h
@@ -52,6 +52,17 @@ grpc_channel_args *grpc_channel_args_copy_and_add(const grpc_channel_args *src,
const grpc_arg *to_add,
size_t num_to_add);
+/** Copies the arguments in \a src except for those whose keys are in
+ \a to_remove. */
+grpc_channel_args *grpc_channel_args_copy_and_remove(
+ const grpc_channel_args *src, const char **to_remove, size_t num_to_remove);
+
+/** Copies the arguments from \a src except for those whose keys are in
+ \a to_remove and appends the arguments in \a to_add. */
+grpc_channel_args *grpc_channel_args_copy_and_add_and_remove(
+ const grpc_channel_args *src, const char **to_remove, size_t num_to_remove,
+ const grpc_arg *to_add, size_t num_to_add);
+
/** Concatenate args from \a a and \a b into a new instance */
grpc_channel_args *grpc_channel_args_merge(const grpc_channel_args *a,
const grpc_channel_args *b);
@@ -97,6 +108,10 @@ int grpc_channel_args_compare(const grpc_channel_args *a,
grpc_channel_args *grpc_channel_args_set_socket_mutator(
grpc_channel_args *a, grpc_socket_mutator *mutator);
+/** Returns the value of argument \a name from \a args, or NULL if not found. */
+const grpc_arg *grpc_channel_args_find(const grpc_channel_args *args,
+ const char *name);
+
typedef struct grpc_integer_options {
int default_value; // Return this if value is outside of expected bounds.
int min_value;
diff --git a/src/core/lib/channel/channel_stack.c b/src/core/lib/channel/channel_stack.c
index 57d34d9e9a..2c5367901d 100644
--- a/src/core/lib/channel/channel_stack.c
+++ b/src/core/lib/channel/channel_stack.c
@@ -162,7 +162,7 @@ grpc_error *grpc_call_stack_init(
grpc_exec_ctx *exec_ctx, grpc_channel_stack *channel_stack,
int initial_refs, grpc_iomgr_cb_func destroy, void *destroy_arg,
grpc_call_context_element *context, const void *transport_server_data,
- gpr_timespec deadline, grpc_call_stack *call_stack) {
+ grpc_mdstr *path, gpr_timespec deadline, grpc_call_stack *call_stack) {
grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(channel_stack);
grpc_call_element_args args;
size_t count = channel_stack->count;
@@ -179,10 +179,12 @@ grpc_error *grpc_call_stack_init(
/* init per-filter data */
grpc_error *first_error = GRPC_ERROR_NONE;
+ args.start_time = gpr_now(GPR_CLOCK_MONOTONIC);
for (i = 0; i < count; i++) {
args.call_stack = call_stack;
args.server_transport_data = transport_server_data;
args.context = context;
+ args.path = path;
args.deadline = deadline;
call_elems[i].filter = channel_elems[i].filter;
call_elems[i].channel_data = channel_elems[i].channel_data;
diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h
index 1cfe2885d8..27f3be7b29 100644
--- a/src/core/lib/channel/channel_stack.h
+++ b/src/core/lib/channel/channel_stack.h
@@ -74,6 +74,8 @@ typedef struct {
grpc_call_stack *call_stack;
const void *server_transport_data;
grpc_call_context_element *context;
+ grpc_mdstr *path;
+ gpr_timespec start_time;
gpr_timespec deadline;
} grpc_call_element_args;
@@ -225,7 +227,7 @@ grpc_error *grpc_call_stack_init(
grpc_exec_ctx *exec_ctx, grpc_channel_stack *channel_stack,
int initial_refs, grpc_iomgr_cb_func destroy, void *destroy_arg,
grpc_call_context_element *context, const void *transport_server_data,
- gpr_timespec deadline, grpc_call_stack *call_stack);
+ grpc_mdstr *path, gpr_timespec deadline, grpc_call_stack *call_stack);
/* Set a pollset or a pollset_set for a call stack: must occur before the first
* op is started */
void grpc_call_stack_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
diff --git a/src/core/lib/channel/deadline_filter.c b/src/core/lib/channel/deadline_filter.c
index 079b98a2f8..d2ea5250f6 100644
--- a/src/core/lib/channel/deadline_filter.c
+++ b/src/core/lib/channel/deadline_filter.c
@@ -64,30 +64,49 @@ static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg,
}
// Starts the deadline timer.
-static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
- grpc_call_element* elem,
- gpr_timespec deadline) {
+static void start_timer_if_needed_locked(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ gpr_timespec deadline) {
grpc_deadline_state* deadline_state = elem->call_data;
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
- if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
+ // Note: We do not start the timer if there is already a timer
+ // pending. This should be okay, because this is only called from two
+ // functions exported by this module: grpc_deadline_state_start(), which
+ // starts the initial timer, and grpc_deadline_state_reset(), which
+ // cancels any pre-existing timer before starting a new one. In
+ // particular, we want to ensure that if grpc_deadline_state_start()
+ // winds up trying to start the timer after grpc_deadline_state_reset()
+ // has already done so, we ignore the value from the former.
+ if (!deadline_state->timer_pending &&
+ gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
// Take a reference to the call stack, to be owned by the timer.
GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer");
- gpr_mu_lock(&deadline_state->timer_mu);
deadline_state->timer_pending = true;
grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, timer_callback,
elem, gpr_now(GPR_CLOCK_MONOTONIC));
- gpr_mu_unlock(&deadline_state->timer_mu);
}
}
+static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ gpr_timespec deadline) {
+ grpc_deadline_state* deadline_state = elem->call_data;
+ gpr_mu_lock(&deadline_state->timer_mu);
+ start_timer_if_needed_locked(exec_ctx, elem, deadline);
+ gpr_mu_unlock(&deadline_state->timer_mu);
+}
// Cancels the deadline timer.
-static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx,
- grpc_deadline_state* deadline_state) {
- gpr_mu_lock(&deadline_state->timer_mu);
+static void cancel_timer_if_needed_locked(grpc_exec_ctx* exec_ctx,
+ grpc_deadline_state* deadline_state) {
if (deadline_state->timer_pending) {
grpc_timer_cancel(exec_ctx, &deadline_state->timer);
deadline_state->timer_pending = false;
}
+}
+static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx,
+ grpc_deadline_state* deadline_state) {
+ gpr_mu_lock(&deadline_state->timer_mu);
+ cancel_timer_if_needed_locked(exec_ctx, deadline_state);
gpr_mu_unlock(&deadline_state->timer_mu);
}
@@ -108,6 +127,21 @@ static void inject_on_complete_cb(grpc_deadline_state* deadline_state,
op->on_complete = &deadline_state->on_complete;
}
+void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ grpc_call_stack* call_stack) {
+ grpc_deadline_state* deadline_state = elem->call_data;
+ memset(deadline_state, 0, sizeof(*deadline_state));
+ deadline_state->call_stack = call_stack;
+ gpr_mu_init(&deadline_state->timer_mu);
+}
+
+void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem) {
+ grpc_deadline_state* deadline_state = elem->call_data;
+ cancel_timer_if_needed(exec_ctx, deadline_state);
+ gpr_mu_destroy(&deadline_state->timer_mu);
+}
+
// Callback and associated state for starting the timer after call stack
// initialization has been completed.
struct start_timer_after_init_state {
@@ -122,16 +156,11 @@ static void start_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
gpr_free(state);
}
-void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
- grpc_call_element_args* args) {
- grpc_deadline_state* deadline_state = elem->call_data;
- memset(deadline_state, 0, sizeof(*deadline_state));
- deadline_state->call_stack = args->call_stack;
- gpr_mu_init(&deadline_state->timer_mu);
+void grpc_deadline_state_start(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ gpr_timespec deadline) {
// Deadline will always be infinite on servers, so the timer will only be
// set on clients with a finite deadline.
- const gpr_timespec deadline =
- gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
+ deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
// When the deadline passes, we indicate the failure by sending down
// an op with cancel_error set. However, we can't send down any ops
@@ -148,11 +177,13 @@ void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
}
}
-void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
- grpc_call_element* elem) {
+void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ gpr_timespec new_deadline) {
grpc_deadline_state* deadline_state = elem->call_data;
- cancel_timer_if_needed(exec_ctx, deadline_state);
- gpr_mu_destroy(&deadline_state->timer_mu);
+ gpr_mu_lock(&deadline_state->timer_mu);
+ cancel_timer_if_needed_locked(exec_ctx, deadline_state);
+ start_timer_if_needed_locked(exec_ctx, elem, new_deadline);
+ gpr_mu_unlock(&deadline_state->timer_mu);
}
void grpc_deadline_state_client_start_transport_stream_op(
@@ -209,7 +240,8 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
grpc_call_element_args* args) {
// Note: size of call data is different between client and server.
memset(elem->call_data, 0, elem->filter->sizeof_call_data);
- grpc_deadline_state_init(exec_ctx, elem, args);
+ grpc_deadline_state_init(exec_ctx, elem, args->call_stack);
+ grpc_deadline_state_start(exec_ctx, elem, args->deadline);
return GRPC_ERROR_NONE;
}
diff --git a/src/core/lib/channel/deadline_filter.h b/src/core/lib/channel/deadline_filter.h
index 685df87761..716a852565 100644
--- a/src/core/lib/channel/deadline_filter.h
+++ b/src/core/lib/channel/deadline_filter.h
@@ -54,18 +54,37 @@ typedef struct grpc_deadline_state {
grpc_closure* next_on_complete;
} grpc_deadline_state;
-// To be used in a filter's init_call_elem(), destroy_call_elem(), and
-// start_transport_stream_op() methods to enforce call deadlines.
//
-// REQUIRES: The first field in elem->call_data is a grpc_deadline_state.
+// NOTE: All of these functions require that the first field in
+// elem->call_data is a grpc_deadline_state.
//
-// For grpc_deadline_state_client_start_transport_stream_op(), it is the
-// caller's responsibility to chain to the next filter if necessary
-// after the function returns.
+
void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
- grpc_call_element_args* args);
+ grpc_call_stack* call_stack);
void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem);
+
+// Starts the timer with the specified deadline.
+// Should be called from the filter's init_call_elem() method.
+void grpc_deadline_state_start(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ gpr_timespec deadline);
+
+// Cancels the existing timer and starts a new one with new_deadline.
+//
+// Note: It is generally safe to call this with an earlier deadline
+// value than the current one, but not the reverse. No checks are done
+// to ensure that the timer callback is not invoked while it is in the
+// process of being reset, which means that attempting to increase the
+// deadline may result in the timer being called twice.
+void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ gpr_timespec new_deadline);
+
+// To be called from the client-side filter's start_transport_stream_op()
+// method. Ensures that the deadline timer is cancelled when the call
+// is completed.
+//
+// Note: It is the caller's responsibility to chain to the next filter if
+// necessary after this function returns.
void grpc_deadline_state_client_start_transport_stream_op(
grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_transport_stream_op* op);
diff --git a/src/core/lib/channel/handshaker.c b/src/core/lib/channel/handshaker.c
index 8f9fb17a31..0d759887bc 100644
--- a/src/core/lib/channel/handshaker.c
+++ b/src/core/lib/channel/handshaker.c
@@ -183,7 +183,7 @@ void grpc_handshake_manager_do_handshake(
gpr_timespec deadline, grpc_tcp_server_acceptor* acceptor,
grpc_handshaker_done_cb cb, void* user_data) {
grpc_channel_args* args_copy = grpc_channel_args_copy(args);
- gpr_slice_buffer* read_buffer = malloc(sizeof(*read_buffer));
+ gpr_slice_buffer* read_buffer = gpr_malloc(sizeof(*read_buffer));
gpr_slice_buffer_init(read_buffer);
if (mgr->count == 0) {
// No handshakers registered, so we just immediately call the done
diff --git a/src/core/lib/channel/http_server_filter.c b/src/core/lib/channel/http_server_filter.c
index 0f2bf97824..f2221fb0fb 100644
--- a/src/core/lib/channel/http_server_filter.c
+++ b/src/core/lib/channel/http_server_filter.c
@@ -42,6 +42,8 @@
#define EXPECTED_CONTENT_TYPE "application/grpc"
#define EXPECTED_CONTENT_TYPE_LENGTH sizeof(EXPECTED_CONTENT_TYPE) - 1
+extern int grpc_http_trace;
+
typedef struct call_data {
uint8_t seen_path;
uint8_t seen_method;
@@ -209,6 +211,11 @@ static void hs_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
err, GRPC_ERROR_CREATE("Missing te: trailers header"));
}
/* Error this call out */
+ if (grpc_http_trace) {
+ const char *error_str = grpc_error_string(err);
+ gpr_log(GPR_ERROR, "Invalid http2 headers: %s", error_str);
+ grpc_error_free_string(error_str);
+ }
grpc_call_element_send_cancel(exec_ctx, elem);
}
} else {
diff --git a/src/core/lib/channel/message_size_filter.c b/src/core/lib/channel/message_size_filter.c
index f067a3a51c..7dc5ae0df1 100644
--- a/src/core/lib/channel/message_size_filter.c
+++ b/src/core/lib/channel/message_size_filter.c
@@ -39,12 +39,53 @@
#include <grpc/support/string_util.h>
#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/transport/method_config.h"
#define DEFAULT_MAX_SEND_MESSAGE_LENGTH -1 // Unlimited.
// The protobuf library will (by default) start warning at 100 megs.
#define DEFAULT_MAX_RECV_MESSAGE_LENGTH (4 * 1024 * 1024)
+typedef struct message_size_limits {
+ int max_send_size;
+ int max_recv_size;
+} message_size_limits;
+
+static void* message_size_limits_copy(void* value) {
+ void* new_value = gpr_malloc(sizeof(message_size_limits));
+ memcpy(new_value, value, sizeof(message_size_limits));
+ return new_value;
+}
+
+static int message_size_limits_cmp(void* value1, void* value2) {
+ const message_size_limits* v1 = value1;
+ const message_size_limits* v2 = value2;
+ if (v1->max_send_size > v2->max_send_size) return 1;
+ if (v1->max_send_size < v2->max_send_size) return -1;
+ if (v1->max_recv_size > v2->max_recv_size) return 1;
+ if (v1->max_recv_size < v2->max_recv_size) return -1;
+ return 0;
+}
+
+static const grpc_mdstr_hash_table_vtable message_size_limits_vtable = {
+ gpr_free, message_size_limits_copy, message_size_limits_cmp};
+
+static void* method_config_convert_value(
+ const grpc_method_config* method_config) {
+ message_size_limits* value = gpr_malloc(sizeof(message_size_limits));
+ const int32_t* max_request_message_bytes =
+ grpc_method_config_get_max_request_message_bytes(method_config);
+ value->max_send_size =
+ max_request_message_bytes != NULL ? *max_request_message_bytes : -1;
+ const int32_t* max_response_message_bytes =
+ grpc_method_config_get_max_response_message_bytes(method_config);
+ value->max_recv_size =
+ max_response_message_bytes != NULL ? *max_response_message_bytes : -1;
+ return value;
+}
+
typedef struct call_data {
+ int max_send_size;
+ int max_recv_size;
// Receive closures are chained: we inject this closure as the
// recv_message_ready up-call on transport_stream_op, and remember to
// call our next_recv_message_ready member after handling it.
@@ -58,6 +99,8 @@ typedef struct call_data {
typedef struct channel_data {
int max_send_size;
int max_recv_size;
+ // Maps path names to message_size_limits structs.
+ grpc_mdstr_hash_table* method_limit_table;
} channel_data;
// Callback invoked when we receive a message. Here we check the max
@@ -66,13 +109,12 @@ static void recv_message_ready(grpc_exec_ctx* exec_ctx, void* user_data,
grpc_error* error) {
grpc_call_element* elem = user_data;
call_data* calld = elem->call_data;
- channel_data* chand = elem->channel_data;
- if (*calld->recv_message != NULL && chand->max_recv_size >= 0 &&
- (*calld->recv_message)->length > (size_t)chand->max_recv_size) {
+ if (*calld->recv_message != NULL && calld->max_recv_size >= 0 &&
+ (*calld->recv_message)->length > (size_t)calld->max_recv_size) {
char* message_string;
gpr_asprintf(&message_string,
"Received message larger than max (%u vs. %d)",
- (*calld->recv_message)->length, chand->max_recv_size);
+ (*calld->recv_message)->length, calld->max_recv_size);
grpc_error* new_error = grpc_error_set_int(
GRPC_ERROR_CREATE(message_string), GRPC_ERROR_INT_GRPC_STATUS,
GRPC_STATUS_INVALID_ARGUMENT);
@@ -93,13 +135,12 @@ static void start_transport_stream_op(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
grpc_transport_stream_op* op) {
call_data* calld = elem->call_data;
- channel_data* chand = elem->channel_data;
// Check max send message size.
- if (op->send_message != NULL && chand->max_send_size >= 0 &&
- op->send_message->length > (size_t)chand->max_send_size) {
+ if (op->send_message != NULL && calld->max_send_size >= 0 &&
+ op->send_message->length > (size_t)calld->max_send_size) {
char* message_string;
gpr_asprintf(&message_string, "Sent message larger than max (%u vs. %d)",
- op->send_message->length, chand->max_send_size);
+ op->send_message->length, calld->max_send_size);
gpr_slice message = gpr_slice_from_copied_string(message_string);
gpr_free(message_string);
grpc_call_element_send_close_with_message(
@@ -119,9 +160,32 @@ static void start_transport_stream_op(grpc_exec_ctx* exec_ctx,
static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
grpc_call_element_args* args) {
+ channel_data* chand = elem->channel_data;
call_data* calld = elem->call_data;
calld->next_recv_message_ready = NULL;
grpc_closure_init(&calld->recv_message_ready, recv_message_ready, elem);
+ // Get max sizes from channel data, then merge in per-method config values.
+ // Note: Per-method config is only available on the client, so we
+ // apply the max request size to the send limit and the max response
+ // size to the receive limit.
+ calld->max_send_size = chand->max_send_size;
+ calld->max_recv_size = chand->max_recv_size;
+ if (chand->method_limit_table != NULL) {
+ message_size_limits* limits =
+ grpc_method_config_table_get(chand->method_limit_table, args->path);
+ if (limits != NULL) {
+ if (limits->max_send_size >= 0 &&
+ (limits->max_send_size < calld->max_send_size ||
+ calld->max_send_size < 0)) {
+ calld->max_send_size = limits->max_send_size;
+ }
+ if (limits->max_recv_size >= 0 &&
+ (limits->max_recv_size < calld->max_recv_size ||
+ calld->max_recv_size < 0)) {
+ calld->max_recv_size = limits->max_recv_size;
+ }
+ }
+ }
return GRPC_ERROR_NONE;
}
@@ -155,11 +219,23 @@ static void init_channel_elem(grpc_exec_ctx* exec_ctx,
grpc_channel_arg_get_integer(&args->channel_args->args[i], options);
}
}
+ // Get method config table from channel args.
+ const grpc_arg* channel_arg =
+ grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVICE_CONFIG);
+ if (channel_arg != NULL) {
+ GPR_ASSERT(channel_arg->type == GRPC_ARG_POINTER);
+ chand->method_limit_table = grpc_method_config_table_convert(
+ (grpc_method_config_table*)channel_arg->value.pointer.p,
+ method_config_convert_value, &message_size_limits_vtable);
+ }
}
// Destructor for channel_data.
static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
- grpc_channel_element* elem) {}
+ grpc_channel_element* elem) {
+ channel_data* chand = elem->channel_data;
+ grpc_mdstr_hash_table_unref(chand->method_limit_table);
+}
const grpc_channel_filter grpc_message_size_filter = {
start_transport_stream_op,
diff --git a/src/core/lib/http/httpcli.c b/src/core/lib/http/httpcli.c
index c56fae6d0c..411e669b53 100644
--- a/src/core/lib/http/httpcli.c
+++ b/src/core/lib/http/httpcli.c
@@ -32,7 +32,6 @@
*/
#include "src/core/lib/http/httpcli.h"
-#include "src/core/lib/iomgr/sockaddr.h"
#include <string.h>
@@ -71,6 +70,7 @@ typedef struct {
grpc_closure done_write;
grpc_closure connected;
grpc_error *overall_error;
+ grpc_resource_quota *resource_quota;
} internal_request;
static grpc_httpcli_get_override g_get_override = NULL;
@@ -118,6 +118,7 @@ static void finish(grpc_exec_ctx *exec_ctx, internal_request *req,
gpr_slice_buffer_destroy(&req->incoming);
gpr_slice_buffer_destroy(&req->outgoing);
GRPC_ERROR_UNREF(req->overall_error);
+ grpc_resource_quota_internal_unref(exec_ctx, req->resource_quota);
gpr_free(req);
}
@@ -126,7 +127,7 @@ static void append_error(internal_request *req, grpc_error *error) {
req->overall_error = GRPC_ERROR_CREATE("Failed HTTP/1 client request");
}
grpc_resolved_address *addr = &req->addresses->addrs[req->next_address - 1];
- char *addr_text = grpc_sockaddr_to_uri((struct sockaddr *)addr->addr);
+ char *addr_text = grpc_sockaddr_to_uri(addr);
req->overall_error = grpc_error_add_child(
req->overall_error,
grpc_error_set_str(error, GRPC_ERROR_STR_TARGET_ADDRESS, addr_text));
@@ -213,7 +214,6 @@ static void on_connected(grpc_exec_ctx *exec_ctx, void *arg,
static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req,
grpc_error *error) {
grpc_resolved_address *addr;
- grpc_tcp_client_connect_args tcp_client_connect_args;
if (error != GRPC_ERROR_NONE) {
append_error(req, error);
}
@@ -225,13 +225,15 @@ static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req,
}
addr = &req->addresses->addrs[req->next_address++];
grpc_closure_init(&req->connected, on_connected, req);
- tcp_client_connect_args.interested_parties = req->context->pollset_set;
- tcp_client_connect_args.addr = (struct sockaddr *)&addr->addr;
- tcp_client_connect_args.addr_len = addr->len;
- tcp_client_connect_args.deadline = req->deadline;
- tcp_client_connect_args.channel_args = NULL;
+ grpc_arg arg;
+ arg.key = GRPC_ARG_RESOURCE_QUOTA;
+ arg.type = GRPC_ARG_POINTER;
+ arg.value.pointer.p = req->resource_quota;
+ arg.value.pointer.vtable = grpc_resource_quota_arg_vtable();
+ grpc_channel_args args = {1, &arg};
grpc_tcp_client_connect(exec_ctx, &req->connected, &req->ep,
- &tcp_client_connect_args);
+ req->context->pollset_set, &args, addr,
+ req->deadline);
}
static void on_resolved(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
@@ -247,6 +249,7 @@ static void on_resolved(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
static void internal_request_begin(grpc_exec_ctx *exec_ctx,
grpc_httpcli_context *context,
grpc_polling_entity *pollent,
+ grpc_resource_quota *resource_quota,
const grpc_httpcli_request *request,
gpr_timespec deadline, grpc_closure *on_done,
grpc_httpcli_response *response,
@@ -262,6 +265,7 @@ static void internal_request_begin(grpc_exec_ctx *exec_ctx,
req->context = context;
req->pollent = pollent;
req->overall_error = GRPC_ERROR_NONE;
+ req->resource_quota = grpc_resource_quota_internal_ref(resource_quota);
grpc_closure_init(&req->on_read, on_read, req);
grpc_closure_init(&req->done_write, done_write, req);
gpr_slice_buffer_init(&req->incoming);
@@ -279,6 +283,7 @@ static void internal_request_begin(grpc_exec_ctx *exec_ctx,
void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
grpc_polling_entity *pollent,
+ grpc_resource_quota *resource_quota,
const grpc_httpcli_request *request,
gpr_timespec deadline, grpc_closure *on_done,
grpc_httpcli_response *response) {
@@ -288,14 +293,15 @@ void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
return;
}
gpr_asprintf(&name, "HTTP:GET:%s:%s", request->host, request->http.path);
- internal_request_begin(exec_ctx, context, pollent, request, deadline, on_done,
- response, name,
+ internal_request_begin(exec_ctx, context, pollent, resource_quota, request,
+ deadline, on_done, response, name,
grpc_httpcli_format_get_request(request));
gpr_free(name);
}
void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
grpc_polling_entity *pollent,
+ grpc_resource_quota *resource_quota,
const grpc_httpcli_request *request,
const char *body_bytes, size_t body_size,
gpr_timespec deadline, grpc_closure *on_done,
@@ -308,7 +314,8 @@ void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
}
gpr_asprintf(&name, "HTTP:POST:%s:%s", request->host, request->http.path);
internal_request_begin(
- exec_ctx, context, pollent, request, deadline, on_done, response, name,
+ exec_ctx, context, pollent, resource_quota, request, deadline, on_done,
+ response, name,
grpc_httpcli_format_post_request(request, body_bytes, body_size));
gpr_free(name);
}
diff --git a/src/core/lib/http/httpcli.h b/src/core/lib/http/httpcli.h
index 320c0f86c6..11e03b44df 100644
--- a/src/core/lib/http/httpcli.h
+++ b/src/core/lib/http/httpcli.h
@@ -96,6 +96,7 @@ void grpc_httpcli_context_destroy(grpc_httpcli_context *context);
'on_response' is a callback to report results to */
void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
grpc_polling_entity *pollent,
+ grpc_resource_quota *resource_quota,
const grpc_httpcli_request *request,
gpr_timespec deadline, grpc_closure *on_complete,
grpc_httpcli_response *response);
@@ -116,6 +117,7 @@ void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
Does not support ?var1=val1&var2=val2 in the path. */
void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
grpc_polling_entity *pollent,
+ grpc_resource_quota *resource_quota,
const grpc_httpcli_request *request,
const char *body_bytes, size_t body_size,
gpr_timespec deadline, grpc_closure *on_complete,
diff --git a/src/core/lib/iomgr/closure.c b/src/core/lib/iomgr/closure.c
index 1ba0a5c141..c6ddc76732 100644
--- a/src/core/lib/iomgr/closure.c
+++ b/src/core/lib/iomgr/closure.c
@@ -35,6 +35,8 @@
#include <grpc/support/alloc.h>
+#include "src/core/lib/profiling/timers.h"
+
void grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
void *cb_arg) {
closure->cb = cb;
@@ -51,7 +53,7 @@ void grpc_closure_list_append(grpc_closure_list *closure_list,
GRPC_ERROR_UNREF(error);
return;
}
- closure->error = error;
+ closure->error_data.error = error;
closure->next_data.next = NULL;
if (closure_list->head == NULL) {
closure_list->head = closure;
@@ -64,8 +66,8 @@ void grpc_closure_list_append(grpc_closure_list *closure_list,
void grpc_closure_list_fail_all(grpc_closure_list *list,
grpc_error *forced_failure) {
for (grpc_closure *c = list->head; c != NULL; c = c->next_data.next) {
- if (c->error == GRPC_ERROR_NONE) {
- c->error = GRPC_ERROR_REF(forced_failure);
+ if (c->error_data.error == GRPC_ERROR_NONE) {
+ c->error_data.error = GRPC_ERROR_REF(forced_failure);
}
}
GRPC_ERROR_UNREF(forced_failure);
@@ -110,3 +112,13 @@ grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg) {
grpc_closure_init(&wc->wrapper, closure_wrapper, wc);
return &wc->wrapper;
}
+
+void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *c,
+ grpc_error *error) {
+ GPR_TIMER_BEGIN("grpc_closure_run", 0);
+ if (c != NULL) {
+ c->cb(exec_ctx, c->cb_arg, error);
+ }
+ GRPC_ERROR_UNREF(error);
+ GPR_TIMER_END("grpc_closure_run", 0);
+}
diff --git a/src/core/lib/iomgr/closure.h b/src/core/lib/iomgr/closure.h
index c1a22b6021..2b4b271eaa 100644
--- a/src/core/lib/iomgr/closure.h
+++ b/src/core/lib/iomgr/closure.h
@@ -76,7 +76,10 @@ struct grpc_closure {
void *cb_arg;
/** Once queued, the result of the closure. Before then: scratch space */
- grpc_error *error;
+ union {
+ grpc_error *error;
+ uintptr_t scratch;
+ } error_data;
};
/** Initializes \a closure with \a cb and \a cb_arg. */
@@ -106,4 +109,10 @@ void grpc_closure_list_move(grpc_closure_list *src, grpc_closure_list *dst);
/** return whether \a list is empty. */
bool grpc_closure_list_empty(grpc_closure_list list);
+/** Run a closure directly. Caller ensures that no locks are being held above.
+ * Note that calling this at the end of a closure callback function itself is
+ * by definition safe. */
+void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+ grpc_error *error);
+
#endif /* GRPC_CORE_LIB_IOMGR_CLOSURE_H */
diff --git a/src/core/lib/iomgr/combiner.c b/src/core/lib/iomgr/combiner.c
index 831bdb4aff..60ee14eb23 100644
--- a/src/core/lib/iomgr/combiner.c
+++ b/src/core/lib/iomgr/combiner.c
@@ -50,25 +50,57 @@ int grpc_combiner_trace = 0;
} \
} while (0)
+#define STATE_UNORPHANED 1
+#define STATE_ELEM_COUNT_LOW_BIT 2
+
struct grpc_combiner {
+ grpc_combiner *next_combiner_on_this_exec_ctx;
grpc_workqueue *optional_workqueue;
gpr_mpscq queue;
// state is:
- // lower bit - zero if orphaned
- // other bits - number of items queued on the lock
+ // lower bit - zero if orphaned (STATE_UNORPHANED)
+ // other bits - number of items queued on the lock (STATE_ELEM_COUNT_LOW_BIT)
gpr_atm state;
- bool take_async_break_before_final_list;
+ // number of elements in the list that are covered by a poller: if >0, we can
+ // offload safely
+ gpr_atm elements_covered_by_poller;
+ bool time_to_execute_final_list;
+ bool final_list_covered_by_poller;
grpc_closure_list final_list;
- grpc_closure continue_finishing;
+ grpc_closure offload;
};
+static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
+
+typedef struct {
+ grpc_error *error;
+ bool covered_by_poller;
+} error_data;
+
+static uintptr_t pack_error_data(error_data d) {
+ return ((uintptr_t)d.error) | (d.covered_by_poller ? 1 : 0);
+}
+
+static error_data unpack_error_data(uintptr_t p) {
+ return (error_data){(grpc_error *)(p & ~(uintptr_t)1), p & 1};
+}
+
+static bool is_covered_by_poller(grpc_combiner *lock) {
+ return lock->final_list_covered_by_poller ||
+ gpr_atm_acq_load(&lock->elements_covered_by_poller) > 0;
+}
+
grpc_combiner *grpc_combiner_create(grpc_workqueue *optional_workqueue) {
grpc_combiner *lock = gpr_malloc(sizeof(*lock));
+ lock->next_combiner_on_this_exec_ctx = NULL;
+ lock->time_to_execute_final_list = false;
lock->optional_workqueue = optional_workqueue;
- gpr_atm_no_barrier_store(&lock->state, 1);
+ lock->final_list_covered_by_poller = false;
+ gpr_atm_no_barrier_store(&lock->state, STATE_UNORPHANED);
+ gpr_atm_no_barrier_store(&lock->elements_covered_by_poller, 0);
gpr_mpscq_init(&lock->queue);
- lock->take_async_break_before_final_list = false;
grpc_closure_list_init(&lock->final_list);
+ grpc_closure_init(&lock->offload, offload, lock);
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p create", lock));
return lock;
}
@@ -82,7 +114,7 @@ static void really_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
}
void grpc_combiner_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
- gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -1);
+ gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -STATE_UNORPHANED);
GRPC_COMBINER_TRACE(gpr_log(
GPR_DEBUG, "C:%p really_destroy old_state=%" PRIdPTR, lock, old_state));
if (old_state == 1) {
@@ -90,170 +122,186 @@ void grpc_combiner_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
}
}
-static bool maybe_finish_one(grpc_exec_ctx *exec_ctx, grpc_combiner *lock);
-static void finish(grpc_exec_ctx *exec_ctx, grpc_combiner *lock);
+static void push_last_on_exec_ctx(grpc_exec_ctx *exec_ctx,
+ grpc_combiner *lock) {
+ lock->next_combiner_on_this_exec_ctx = NULL;
+ if (exec_ctx->active_combiner == NULL) {
+ exec_ctx->active_combiner = exec_ctx->last_combiner = lock;
+ } else {
+ exec_ctx->last_combiner->next_combiner_on_this_exec_ctx = lock;
+ exec_ctx->last_combiner = lock;
+ }
+}
-static void continue_finishing_mainline(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- GPR_TIMER_BEGIN("combiner.continue_executing_mainline", 0);
- grpc_combiner *lock = arg;
- GRPC_COMBINER_TRACE(
- gpr_log(GPR_DEBUG, "C:%p continue_finishing_mainline", lock));
- GPR_ASSERT(exec_ctx->active_combiner == NULL);
+static void push_first_on_exec_ctx(grpc_exec_ctx *exec_ctx,
+ grpc_combiner *lock) {
+ lock->next_combiner_on_this_exec_ctx = exec_ctx->active_combiner;
exec_ctx->active_combiner = lock;
- if (maybe_finish_one(exec_ctx, lock)) finish(exec_ctx, lock);
- GPR_ASSERT(exec_ctx->active_combiner == lock);
- exec_ctx->active_combiner = NULL;
- GPR_TIMER_END("combiner.continue_executing_mainline", 0);
+ if (lock->next_combiner_on_this_exec_ctx == NULL) {
+ exec_ctx->last_combiner = lock;
+ }
}
-static void execute_final(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
- GPR_TIMER_BEGIN("combiner.execute_final", 0);
- grpc_closure *c = lock->final_list.head;
- GPR_ASSERT(c != NULL);
- grpc_closure_list_init(&lock->final_list);
- lock->take_async_break_before_final_list = false;
- int loops = 0;
- while (c != NULL) {
- GRPC_COMBINER_TRACE(
- gpr_log(GPR_DEBUG, "C:%p execute_final[%d] c=%p", lock, loops, c));
- grpc_closure *next = c->next_data.next;
- grpc_error *error = c->error;
- c->cb(exec_ctx, c->cb_arg, error);
- GRPC_ERROR_UNREF(error);
- c = next;
- loops++;
+void grpc_combiner_execute(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
+ grpc_closure *cl, grpc_error *error,
+ bool covered_by_poller) {
+ GPR_TIMER_BEGIN("combiner.execute", 0);
+ gpr_atm last = gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT);
+ GRPC_COMBINER_TRACE(gpr_log(
+ GPR_DEBUG, "C:%p grpc_combiner_execute c=%p cov=%d last=%" PRIdPTR, lock,
+ cl, covered_by_poller, last));
+ GPR_ASSERT(last & STATE_UNORPHANED); // ensure lock has not been destroyed
+ cl->error_data.scratch =
+ pack_error_data((error_data){error, covered_by_poller});
+ if (covered_by_poller) {
+ gpr_atm_no_barrier_fetch_add(&lock->elements_covered_by_poller, 1);
+ }
+ gpr_mpscq_push(&lock->queue, &cl->next_data.atm_next);
+ if (last == 1) {
+ // first element on this list: add it to the list of combiner locks
+ // executing within this exec_ctx
+ push_last_on_exec_ctx(exec_ctx, lock);
}
- GPR_TIMER_END("combiner.execute_final", 0);
+ GPR_TIMER_END("combiner.execute", 0);
}
-static void continue_executing_final(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- GPR_TIMER_BEGIN("combiner.continue_executing_final", 0);
- grpc_combiner *lock = arg;
- GRPC_COMBINER_TRACE(
- gpr_log(GPR_DEBUG, "C:%p continue_executing_final", lock));
- GPR_ASSERT(exec_ctx->active_combiner == NULL);
- exec_ctx->active_combiner = lock;
- // quick peek to see if new things have turned up on the queue: if so, go back
- // to executing them before the final list
- if ((gpr_atm_acq_load(&lock->state) >> 1) > 1) {
- if (maybe_finish_one(exec_ctx, lock)) finish(exec_ctx, lock);
- } else {
- execute_final(exec_ctx, lock);
- finish(exec_ctx, lock);
+static void move_next(grpc_exec_ctx *exec_ctx) {
+ exec_ctx->active_combiner =
+ exec_ctx->active_combiner->next_combiner_on_this_exec_ctx;
+ if (exec_ctx->active_combiner == NULL) {
+ exec_ctx->last_combiner = NULL;
}
- GPR_ASSERT(exec_ctx->active_combiner == lock);
- exec_ctx->active_combiner = NULL;
- GPR_TIMER_END("combiner.continue_executing_final", 0);
}
-static bool start_execute_final(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
- GPR_TIMER_BEGIN("combiner.start_execute_final", 0);
- GPR_ASSERT(exec_ctx->active_combiner == lock);
- GRPC_COMBINER_TRACE(
- gpr_log(GPR_DEBUG,
- "C:%p start_execute_final take_async_break_before_final_list=%d",
- lock, lock->take_async_break_before_final_list));
- if (lock->take_async_break_before_final_list) {
- grpc_closure_init(&lock->continue_finishing, continue_executing_final,
- lock);
- grpc_exec_ctx_sched(exec_ctx, &lock->continue_finishing, GRPC_ERROR_NONE,
- GRPC_WORKQUEUE_REF(lock->optional_workqueue, "sched"));
- GPR_TIMER_END("combiner.start_execute_final", 0);
+static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
+ grpc_combiner *lock = arg;
+ push_last_on_exec_ctx(exec_ctx, lock);
+}
+
+static void queue_offload(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
+ move_next(exec_ctx);
+ GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload --> %p", lock,
+ lock->optional_workqueue));
+ grpc_workqueue_enqueue(exec_ctx, lock->optional_workqueue, &lock->offload,
+ GRPC_ERROR_NONE);
+}
+
+bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) {
+ GPR_TIMER_BEGIN("combiner.continue_exec_ctx", 0);
+ grpc_combiner *lock = exec_ctx->active_combiner;
+ if (lock == NULL) {
+ GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return false;
- } else {
- execute_final(exec_ctx, lock);
- GPR_TIMER_END("combiner.start_execute_final", 0);
- return true;
}
-}
-static bool maybe_finish_one(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
- GPR_TIMER_BEGIN("combiner.maybe_finish_one", 0);
- gpr_mpscq_node *n = gpr_mpscq_pop(&lock->queue);
GRPC_COMBINER_TRACE(
- gpr_log(GPR_DEBUG, "C:%p maybe_finish_one n=%p", lock, n));
- GPR_ASSERT(exec_ctx->active_combiner == lock);
- if (n == NULL) {
- // Queue is in an transiently inconsistent state: a new item is being queued
- // but is not visible to this thread yet.
- // Use this as a cue that we should go off and do something else for a while
- // (and come back later)
- grpc_closure_init(&lock->continue_finishing, continue_finishing_mainline,
- lock);
- grpc_exec_ctx_sched(exec_ctx, &lock->continue_finishing, GRPC_ERROR_NONE,
- GRPC_WORKQUEUE_REF(lock->optional_workqueue, "sched"));
- GPR_TIMER_END("combiner.maybe_finish_one", 0);
- return false;
+ gpr_log(GPR_DEBUG,
+ "C:%p grpc_combiner_continue_exec_ctx workqueue=%p "
+ "is_covered_by_poller=%d exec_ctx_ready_to_finish=%d "
+ "time_to_execute_final_list=%d",
+ lock, lock->optional_workqueue, is_covered_by_poller(lock),
+ grpc_exec_ctx_ready_to_finish(exec_ctx),
+ lock->time_to_execute_final_list));
+
+ if (lock->optional_workqueue != NULL && is_covered_by_poller(lock) &&
+ grpc_exec_ctx_ready_to_finish(exec_ctx)) {
+ GPR_TIMER_MARK("offload_from_finished_exec_ctx", 0);
+ // this execution context wants to move on, and we have a workqueue (and
+ // so can help the execution context out): schedule remaining work to be
+ // picked up on the workqueue
+ queue_offload(exec_ctx, lock);
+ GPR_TIMER_END("combiner.continue_exec_ctx", 0);
+ return true;
}
- grpc_closure *cl = (grpc_closure *)n;
- grpc_error *error = cl->error;
- cl->cb(exec_ctx, cl->cb_arg, error);
- GRPC_ERROR_UNREF(error);
- GPR_TIMER_END("combiner.maybe_finish_one", 0);
- return true;
-}
-static void finish(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
- bool (*executor)(grpc_exec_ctx * exec_ctx, grpc_combiner * lock);
- GPR_TIMER_BEGIN("combiner.finish", 0);
- int loops = 0;
- do {
- executor = maybe_finish_one;
- gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -2);
- GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG,
- "C:%p finish[%d] old_state=%" PRIdPTR, lock,
- loops, old_state));
- switch (old_state) {
- default:
- // we have multiple queued work items: just continue executing them
- break;
- case 5: // we're down to one queued item: if it's the final list we
- case 4: // should do that
- if (!grpc_closure_list_empty(lock->final_list)) {
- executor = start_execute_final;
- }
- break;
- case 3: // had one count, one unorphaned --> unlocked unorphaned
- GPR_TIMER_END("combiner.finish", 0);
- return;
- case 2: // and one count, one orphaned --> unlocked and orphaned
- really_destroy(exec_ctx, lock);
- GPR_TIMER_END("combiner.finish", 0);
- return;
- case 1:
- case 0:
- // these values are illegal - representing an already unlocked or
- // deleted lock
- GPR_UNREACHABLE_CODE(return );
+ if (!lock->time_to_execute_final_list ||
+ // peek to see if something new has shown up, and execute that with
+ // priority
+ (gpr_atm_acq_load(&lock->state) >> 1) > 1) {
+ gpr_mpscq_node *n = gpr_mpscq_pop(&lock->queue);
+ GRPC_COMBINER_TRACE(
+ gpr_log(GPR_DEBUG, "C:%p maybe_finish_one n=%p", lock, n));
+ if (n == NULL) {
+ // queue is in an inconsistent state: use this as a cue that we should
+ // go off and do something else for a while (and come back later)
+ GPR_TIMER_MARK("delay_busy", 0);
+ if (lock->optional_workqueue != NULL && is_covered_by_poller(lock)) {
+ queue_offload(exec_ctx, lock);
+ }
+ GPR_TIMER_END("combiner.continue_exec_ctx", 0);
+ return true;
}
- loops++;
- } while (executor(exec_ctx, lock));
- GPR_TIMER_END("combiner.finish", 0);
-}
+ GPR_TIMER_BEGIN("combiner.exec1", 0);
+ grpc_closure *cl = (grpc_closure *)n;
+ error_data err = unpack_error_data(cl->error_data.scratch);
+ cl->cb(exec_ctx, cl->cb_arg, err.error);
+ if (err.covered_by_poller) {
+ gpr_atm_no_barrier_fetch_add(&lock->elements_covered_by_poller, -1);
+ }
+ GRPC_ERROR_UNREF(err.error);
+ GPR_TIMER_END("combiner.exec1", 0);
+ } else {
+ grpc_closure *c = lock->final_list.head;
+ GPR_ASSERT(c != NULL);
+ grpc_closure_list_init(&lock->final_list);
+ lock->final_list_covered_by_poller = false;
+ int loops = 0;
+ while (c != NULL) {
+ GPR_TIMER_BEGIN("combiner.exec_1final", 0);
+ GRPC_COMBINER_TRACE(
+ gpr_log(GPR_DEBUG, "C:%p execute_final[%d] c=%p", lock, loops, c));
+ grpc_closure *next = c->next_data.next;
+ grpc_error *error = c->error_data.error;
+ c->cb(exec_ctx, c->cb_arg, error);
+ GRPC_ERROR_UNREF(error);
+ c = next;
+ GPR_TIMER_END("combiner.exec_1final", 0);
+ }
+ }
-void grpc_combiner_execute(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
- grpc_closure *cl, grpc_error *error) {
+ GPR_TIMER_MARK("unref", 0);
+ move_next(exec_ctx);
+ lock->time_to_execute_final_list = false;
+ gpr_atm old_state =
+ gpr_atm_full_fetch_add(&lock->state, -STATE_ELEM_COUNT_LOW_BIT);
GRPC_COMBINER_TRACE(
- gpr_log(GPR_DEBUG, "C:%p grpc_combiner_execute c=%p", lock, cl));
- GPR_TIMER_BEGIN("combiner.execute", 0);
- gpr_atm last = gpr_atm_full_fetch_add(&lock->state, 2);
- GPR_ASSERT(last & 1); // ensure lock has not been destroyed
- if (last == 1) {
- exec_ctx->active_combiner = lock;
- GPR_TIMER_BEGIN("combiner.execute_first_cb", 0);
- cl->cb(exec_ctx, cl->cb_arg, error);
- GPR_TIMER_END("combiner.execute_first_cb", 0);
- GRPC_ERROR_UNREF(error);
- finish(exec_ctx, lock);
- GPR_ASSERT(exec_ctx->active_combiner == lock);
- exec_ctx->active_combiner = NULL;
- } else {
- cl->error = error;
- gpr_mpscq_push(&lock->queue, &cl->next_data.atm_next);
+ gpr_log(GPR_DEBUG, "C:%p finish old_state=%" PRIdPTR, lock, old_state));
+// Define a macro to ease readability of the following switch statement.
+#define OLD_STATE_WAS(orphaned, elem_count) \
+ (((orphaned) ? 0 : STATE_UNORPHANED) | \
+ ((elem_count)*STATE_ELEM_COUNT_LOW_BIT))
+ // Depending on what the previous state was, we need to perform different
+ // actions.
+ switch (old_state) {
+ default:
+ // we have multiple queued work items: just continue executing them
+ break;
+ case OLD_STATE_WAS(false, 2):
+ case OLD_STATE_WAS(true, 2):
+ // we're down to one queued item: if it's the final list we should do that
+ if (!grpc_closure_list_empty(lock->final_list)) {
+ lock->time_to_execute_final_list = true;
+ }
+ break;
+ case OLD_STATE_WAS(false, 1):
+ // had one count, one unorphaned --> unlocked unorphaned
+ GPR_TIMER_END("combiner.continue_exec_ctx", 0);
+ return true;
+ case OLD_STATE_WAS(true, 1):
+ // and one count, one orphaned --> unlocked and orphaned
+ really_destroy(exec_ctx, lock);
+ GPR_TIMER_END("combiner.continue_exec_ctx", 0);
+ return true;
+ case OLD_STATE_WAS(false, 0):
+ case OLD_STATE_WAS(true, 0):
+ // these values are illegal - representing an already unlocked or
+ // deleted lock
+ GPR_TIMER_END("combiner.continue_exec_ctx", 0);
+ GPR_UNREACHABLE_CODE(return true);
}
- GPR_TIMER_END("combiner.execute", 0);
+ push_first_on_exec_ctx(exec_ctx, lock);
+ GPR_TIMER_END("combiner.continue_exec_ctx", 0);
+ return true;
}
static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
@@ -264,30 +312,26 @@ static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
void grpc_combiner_execute_finally(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
grpc_closure *closure, grpc_error *error,
- bool force_async_break) {
+ bool covered_by_poller) {
GRPC_COMBINER_TRACE(gpr_log(
- GPR_DEBUG,
- "C:%p grpc_combiner_execute_finally c=%p force_async_break=%d; ac=%p",
- lock, closure, force_async_break, exec_ctx->active_combiner));
+ GPR_DEBUG, "C:%p grpc_combiner_execute_finally c=%p; ac=%p; cov=%d", lock,
+ closure, exec_ctx->active_combiner, covered_by_poller));
GPR_TIMER_BEGIN("combiner.execute_finally", 0);
if (exec_ctx->active_combiner != lock) {
GPR_TIMER_MARK("slowpath", 0);
grpc_combiner_execute(exec_ctx, lock,
- grpc_closure_create(enqueue_finally, closure), error);
+ grpc_closure_create(enqueue_finally, closure), error,
+ false);
GPR_TIMER_END("combiner.execute_finally", 0);
return;
}
- if (force_async_break) {
- lock->take_async_break_before_final_list = true;
- }
if (grpc_closure_list_empty(lock->final_list)) {
- gpr_atm_full_fetch_add(&lock->state, 2);
+ gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT);
+ }
+ if (covered_by_poller) {
+ lock->final_list_covered_by_poller = true;
}
grpc_closure_list_append(&lock->final_list, closure, error);
GPR_TIMER_END("combiner.execute_finally", 0);
}
-
-void grpc_combiner_force_async_finally(grpc_combiner *lock) {
- lock->take_async_break_before_final_list = true;
-}
diff --git a/src/core/lib/iomgr/combiner.h b/src/core/lib/iomgr/combiner.h
index 1409db24b9..d04eeed83a 100644
--- a/src/core/lib/iomgr/combiner.h
+++ b/src/core/lib/iomgr/combiner.h
@@ -52,19 +52,14 @@ grpc_combiner *grpc_combiner_create(grpc_workqueue *optional_workqueue);
void grpc_combiner_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock);
// Execute \a action within the lock.
void grpc_combiner_execute(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
- grpc_closure *closure, grpc_error *error);
+ grpc_closure *closure, grpc_error *error,
+ bool covered_by_poller);
// Execute \a action within the lock just prior to unlocking.
-// if \a hint_async_break is true, the combiner tries to hand execution to
-// another thread before finishing the primary queue of combined closures and
-// executing the finally list.
-// Deprecation warning: \a hint_async_break will be removed in a future version
-// Takes a very slow and round-about path if not called from a
-// grpc_combiner_execute closure.
void grpc_combiner_execute_finally(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
grpc_closure *closure, grpc_error *error,
- bool hint_async_break);
-// Deprecated: force the finally list execution onto another thread
-void grpc_combiner_force_async_finally(grpc_combiner *lock);
+ bool covered_by_poller);
+
+bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx);
extern int grpc_combiner_trace;
diff --git a/src/core/lib/iomgr/endpoint.c b/src/core/lib/iomgr/endpoint.c
index f901fcf962..74fa9c45df 100644
--- a/src/core/lib/iomgr/endpoint.c
+++ b/src/core/lib/iomgr/endpoint.c
@@ -69,3 +69,7 @@ char* grpc_endpoint_get_peer(grpc_endpoint* ep) {
grpc_workqueue* grpc_endpoint_get_workqueue(grpc_endpoint* ep) {
return ep->vtable->get_workqueue(ep);
}
+
+grpc_resource_user* grpc_endpoint_get_resource_user(grpc_endpoint* ep) {
+ return ep->vtable->get_resource_user(ep);
+}
diff --git a/src/core/lib/iomgr/endpoint.h b/src/core/lib/iomgr/endpoint.h
index 910a6f6532..0ac5486ff5 100644
--- a/src/core/lib/iomgr/endpoint.h
+++ b/src/core/lib/iomgr/endpoint.h
@@ -39,6 +39,7 @@
#include <grpc/support/time.h>
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/pollset_set.h"
+#include "src/core/lib/iomgr/resource_quota.h"
/* An endpoint caps a streaming channel between two communicating processes.
Examples may be: a tcp socket, <stdin+stdout>, or some shared memory. */
@@ -58,6 +59,7 @@ struct grpc_endpoint_vtable {
grpc_pollset_set *pollset);
void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep);
void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep);
+ grpc_resource_user *(*get_resource_user)(grpc_endpoint *ep);
char *(*get_peer)(grpc_endpoint *ep);
};
@@ -100,6 +102,8 @@ void grpc_endpoint_add_to_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_endpoint *ep,
grpc_pollset_set *pollset_set);
+grpc_resource_user *grpc_endpoint_get_resource_user(grpc_endpoint *endpoint);
+
struct grpc_endpoint {
const grpc_endpoint_vtable *vtable;
};
diff --git a/src/core/lib/iomgr/endpoint_pair.h b/src/core/lib/iomgr/endpoint_pair.h
index 5cd78051bd..f9de0c715e 100644
--- a/src/core/lib/iomgr/endpoint_pair.h
+++ b/src/core/lib/iomgr/endpoint_pair.h
@@ -41,7 +41,8 @@ typedef struct {
grpc_endpoint *server;
} grpc_endpoint_pair;
-grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name,
- size_t read_slice_size);
+grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(
+ const char *name, grpc_resource_quota *resource_quota,
+ size_t read_slice_size);
#endif /* GRPC_CORE_LIB_IOMGR_ENDPOINT_PAIR_H */
diff --git a/src/core/lib/iomgr/endpoint_pair_posix.c b/src/core/lib/iomgr/endpoint_pair_posix.c
index e295fb4867..b9ff969e81 100644
--- a/src/core/lib/iomgr/endpoint_pair_posix.c
+++ b/src/core/lib/iomgr/endpoint_pair_posix.c
@@ -31,9 +31,9 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_POSIX_SOCKET
+#ifdef GRPC_POSIX_SOCKET
#include "src/core/lib/iomgr/endpoint_pair.h"
#include "src/core/lib/iomgr/socket_utils_posix.h"
@@ -62,20 +62,21 @@ static void create_sockets(int sv[2]) {
GPR_ASSERT(grpc_set_socket_no_sigpipe_if_possible(sv[1]) == GRPC_ERROR_NONE);
}
-grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name,
- size_t read_slice_size) {
+grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(
+ const char *name, grpc_resource_quota *resource_quota,
+ size_t read_slice_size) {
int sv[2];
grpc_endpoint_pair p;
char *final_name;
create_sockets(sv);
gpr_asprintf(&final_name, "%s:client", name);
- p.client = grpc_tcp_create(grpc_fd_create(sv[1], final_name), read_slice_size,
- "socketpair-server");
+ p.client = grpc_tcp_create(grpc_fd_create(sv[1], final_name), resource_quota,
+ read_slice_size, "socketpair-server");
gpr_free(final_name);
gpr_asprintf(&final_name, "%s:server", name);
- p.server = grpc_tcp_create(grpc_fd_create(sv[0], final_name), read_slice_size,
- "socketpair-client");
+ p.server = grpc_tcp_create(grpc_fd_create(sv[0], final_name), resource_quota,
+ read_slice_size, "socketpair-client");
gpr_free(final_name);
return p;
}
diff --git a/src/core/lib/security/credentials/google_default/credentials_windows.c b/src/core/lib/iomgr/endpoint_pair_uv.c
index 208b8fd9ad..ff24894c6d 100644
--- a/src/core/lib/security/credentials/google_default/credentials_windows.c
+++ b/src/core/lib/iomgr/endpoint_pair_uv.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,31 +31,24 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_WINDOWS
+#ifdef GRPC_UV
-#include "src/core/lib/security/credentials/google_default/google_default_credentials.h"
+#include <stdlib.h>
-#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/string_util.h>
-#include "src/core/lib/support/env.h"
-#include "src/core/lib/support/string.h"
+#include "src/core/lib/iomgr/endpoint_pair.h"
-char *grpc_get_well_known_google_credentials_file_path_impl(void) {
- char *result = NULL;
- char *appdata_path = gpr_getenv("APPDATA");
- if (appdata_path == NULL) {
- gpr_log(GPR_ERROR, "Could not get APPDATA environment variable.");
- return NULL;
- }
- gpr_asprintf(&result, "%s/%s/%s", appdata_path,
- GRPC_GOOGLE_CLOUD_SDK_CONFIG_DIRECTORY,
- GRPC_GOOGLE_WELL_KNOWN_CREDENTIALS_FILE);
- gpr_free(appdata_path);
- return result;
+grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(
+ const char *name, grpc_resource_quota *resource_quota,
+ size_t read_slice_size) {
+ grpc_endpoint_pair endpoint_pair;
+ // TODO(mlumish): implement this properly under libuv
+ GPR_ASSERT(false &&
+ "grpc_iomgr_create_endpoint_pair is not suppoted with libuv");
+ return endpoint_pair;
}
-#endif /* GPR_WINDOWS */
+#endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/endpoint_pair_windows.c b/src/core/lib/iomgr/endpoint_pair_windows.c
index 582704e267..93f71b745c 100644
--- a/src/core/lib/iomgr/endpoint_pair_windows.c
+++ b/src/core/lib/iomgr/endpoint_pair_windows.c
@@ -31,9 +31,9 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_WINSOCK_SOCKET
+#ifdef GRPC_WINSOCK_SOCKET
#include "src/core/lib/iomgr/endpoint_pair.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
@@ -82,15 +82,16 @@ static void create_sockets(SOCKET sv[2]) {
sv[0] = svr_sock;
}
-grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name,
- size_t read_slice_size) {
+grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(
+ const char *name, grpc_resource_quota *resource_quota,
+ size_t read_slice_size) {
SOCKET sv[2];
grpc_endpoint_pair p;
create_sockets(sv);
p.client = grpc_tcp_create(grpc_winsocket_create(sv[1], "endpoint:client"),
- "endpoint:server");
+ resource_quota, "endpoint:server");
p.server = grpc_tcp_create(grpc_winsocket_create(sv[0], "endpoint:server"),
- "endpoint:client");
+ resource_quota, "endpoint:client");
return p;
}
diff --git a/src/core/lib/iomgr/error.c b/src/core/lib/iomgr/error.c
index 31c80260f8..f6bb3a0477 100644
--- a/src/core/lib/iomgr/error.c
+++ b/src/core/lib/iomgr/error.c
@@ -120,6 +120,8 @@ static const char *error_int_name(grpc_error_ints key) {
return "http_status";
case GRPC_ERROR_INT_LIMIT:
return "limit";
+ case GRPC_ERROR_INT_OCCURRED_DURING_WRITE:
+ return "occurred_during_write";
}
GPR_UNREACHABLE_CODE(return "unknown");
}
@@ -144,6 +146,8 @@ static const char *error_str_name(grpc_error_strs key) {
return "tsi_error";
case GRPC_ERROR_STR_FILENAME:
return "filename";
+ case GRPC_ERROR_STR_QUEUED_BUFFERS:
+ return "queued_buffers";
}
GPR_UNREACHABLE_CODE(return "unknown");
}
@@ -265,7 +269,7 @@ static grpc_error *copy_error_and_unref(grpc_error *in) {
} else {
out = gpr_malloc(sizeof(*out));
#ifdef GRPC_ERROR_REFCOUNT_DEBUG
- gpr_log(GPR_DEBUG, "%p create copying", out);
+ gpr_log(GPR_DEBUG, "%p create copying %p", out, in);
#endif
out->ints = gpr_avl_ref(in->ints);
out->strs = gpr_avl_ref(in->strs);
@@ -523,21 +527,25 @@ static char *fmt_time(void *p) {
return out;
}
-static void add_errs(gpr_avl_node *n, char **s, size_t *sz, size_t *cap) {
+static void add_errs(gpr_avl_node *n, char **s, size_t *sz, size_t *cap,
+ bool *first) {
if (n == NULL) return;
- add_errs(n->left, s, sz, cap);
+ add_errs(n->left, s, sz, cap, first);
+ if (!*first) append_chr(',', s, sz, cap);
+ *first = false;
const char *e = grpc_error_string(n->value);
append_str(e, s, sz, cap);
grpc_error_free_string(e);
- add_errs(n->right, s, sz, cap);
+ add_errs(n->right, s, sz, cap, first);
}
static char *errs_string(grpc_error *err) {
char *s = NULL;
size_t sz = 0;
size_t cap = 0;
+ bool first = true;
append_chr('[', &s, &sz, &cap);
- add_errs(err->errs.root, &s, &sz, &cap);
+ add_errs(err->errs.root, &s, &sz, &cap, &first);
append_chr(']', &s, &sz, &cap);
append_chr(0, &s, &sz, &cap);
return s;
diff --git a/src/core/lib/iomgr/error.h b/src/core/lib/iomgr/error.h
index 00ace8a7a9..f3f3b80a09 100644
--- a/src/core/lib/iomgr/error.h
+++ b/src/core/lib/iomgr/error.h
@@ -100,6 +100,8 @@ typedef enum {
GRPC_ERROR_INT_HTTP_STATUS,
/// context sensitive limit associated with the error
GRPC_ERROR_INT_LIMIT,
+ /// chttp2: did the error occur while a write was in progress
+ GRPC_ERROR_INT_OCCURRED_DURING_WRITE,
} grpc_error_ints;
typedef enum {
@@ -121,6 +123,8 @@ typedef enum {
GRPC_ERROR_STR_TSI_ERROR,
/// filename that we were trying to read/write when this error occurred
GRPC_ERROR_STR_FILENAME,
+ /// which data was queued for writing when the error occurred
+ GRPC_ERROR_STR_QUEUED_BUFFERS
} grpc_error_strs;
typedef enum {
@@ -128,9 +132,13 @@ typedef enum {
GRPC_ERROR_TIME_CREATED,
} grpc_error_times;
+/// The following "special" errors can be propagated without allocating memory.
+/// They are always even so that other code (particularly combiner locks) can
+/// safely use the lower bit for themselves.
+
#define GRPC_ERROR_NONE ((grpc_error *)NULL)
-#define GRPC_ERROR_OOM ((grpc_error *)1)
-#define GRPC_ERROR_CANCELLED ((grpc_error *)2)
+#define GRPC_ERROR_OOM ((grpc_error *)2)
+#define GRPC_ERROR_CANCELLED ((grpc_error *)4)
const char *grpc_error_string(grpc_error *error);
void grpc_error_free_string(const char *str);
diff --git a/src/core/lib/iomgr/ev_epoll_linux.c b/src/core/lib/iomgr/ev_epoll_linux.c
index ab77ebc78b..db51ec4939 100644
--- a/src/core/lib/iomgr/ev_epoll_linux.c
+++ b/src/core/lib/iomgr/ev_epoll_linux.c
@@ -32,10 +32,10 @@
*/
#include <grpc/grpc_posix.h>
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
/* This polling engine is only relevant on linux kernels supporting epoll() */
-#ifdef GPR_LINUX_EPOLL
+#ifdef GRPC_LINUX_EPOLL
#include "src/core/lib/iomgr/ev_epoll_linux.h"
@@ -152,20 +152,20 @@ static void fd_global_shutdown(void);
* Polling island Declarations
*/
-//#define GRPC_PI_REF_COUNT_DEBUG
-#ifdef GRPC_PI_REF_COUNT_DEBUG
+#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
#define PI_ADD_REF(p, r) pi_add_ref_dbg((p), (r), __FILE__, __LINE__)
#define PI_UNREF(exec_ctx, p, r) \
pi_unref_dbg((exec_ctx), (p), (r), __FILE__, __LINE__)
-#else /* defined(GRPC_PI_REF_COUNT_DEBUG) */
+#else /* defined(GRPC_WORKQUEUE_REFCOUNT_DEBUG) */
#define PI_ADD_REF(p, r) pi_add_ref((p))
#define PI_UNREF(exec_ctx, p, r) pi_unref((exec_ctx), (p))
#endif /* !defined(GPRC_PI_REF_COUNT_DEBUG) */
+/* This is also used as grpc_workqueue (by directly casing it) */
typedef struct polling_island {
gpr_mu mu;
/* Ref count. Use PI_ADD_REF() and PI_UNREF() macros to increment/decrement
@@ -185,8 +185,17 @@ typedef struct polling_island {
* (except mu and ref_count) are invalid and must be ignored. */
gpr_atm merged_to;
- /* The workqueue associated with this polling island */
- grpc_workqueue *workqueue;
+ /* Number of threads currently polling on this island */
+ gpr_atm poller_count;
+ /* Mutex guarding the read end of the workqueue (must be held to pop from
+ * workqueue_items) */
+ gpr_mu workqueue_read_mu;
+ /* Queue of closures to be executed */
+ gpr_mpscq workqueue_items;
+ /* Count of items in workqueue_items */
+ gpr_atm workqueue_item_count;
+ /* Wakeup fd used to wake pollers to check the contents of workqueue_items */
+ grpc_wakeup_fd workqueue_wakeup_fd;
/* The fd of the underlying epoll set */
int epoll_fd;
@@ -275,6 +284,10 @@ static bool append_error(grpc_error **composite, grpc_error *error,
threads that woke up MUST NOT call grpc_wakeup_fd_consume_wakeup() */
static grpc_wakeup_fd polling_island_wakeup_fd;
+/* The polling island being polled right now.
+ See comments in workqueue_maybe_wakeup for why this is tracked. */
+static __thread polling_island *g_current_thread_polling_island;
+
/* Forward declaration */
static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi);
@@ -289,12 +302,12 @@ static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi);
gpr_atm g_epoll_sync;
#endif /* defined(GRPC_TSAN) */
-#ifdef GRPC_PI_REF_COUNT_DEBUG
static void pi_add_ref(polling_island *pi);
static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi);
-static void pi_add_ref_dbg(polling_island *pi, char *reason, char *file,
- int line) {
+#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
+static void pi_add_ref_dbg(polling_island *pi, const char *reason,
+ const char *file, int line) {
long old_cnt = gpr_atm_acq_load(&pi->ref_count);
pi_add_ref(pi);
gpr_log(GPR_DEBUG, "Add ref pi: %p, old: %ld -> new:%ld (%s) - (%s, %d)",
@@ -302,12 +315,42 @@ static void pi_add_ref_dbg(polling_island *pi, char *reason, char *file,
}
static void pi_unref_dbg(grpc_exec_ctx *exec_ctx, polling_island *pi,
- char *reason, char *file, int line) {
+ const char *reason, const char *file, int line) {
long old_cnt = gpr_atm_acq_load(&pi->ref_count);
pi_unref(exec_ctx, pi);
gpr_log(GPR_DEBUG, "Unref pi: %p, old:%ld -> new:%ld (%s) - (%s, %d)",
(void *)pi, old_cnt, (old_cnt - 1), reason, file, line);
}
+
+static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
+ const char *file, int line,
+ const char *reason) {
+ if (workqueue != NULL) {
+ pi_add_ref_dbg((polling_island *)workqueue, reason, file, line);
+ }
+ return workqueue;
+}
+
+static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
+ const char *file, int line, const char *reason) {
+ if (workqueue != NULL) {
+ pi_unref_dbg(exec_ctx, (polling_island *)workqueue, reason, file, line);
+ }
+}
+#else
+static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
+ if (workqueue != NULL) {
+ pi_add_ref((polling_island *)workqueue);
+ }
+ return workqueue;
+}
+
+static void workqueue_unref(grpc_exec_ctx *exec_ctx,
+ grpc_workqueue *workqueue) {
+ if (workqueue != NULL) {
+ pi_unref(exec_ctx, (polling_island *)workqueue);
+ }
+}
#endif
static void pi_add_ref(polling_island *pi) {
@@ -315,10 +358,7 @@ static void pi_add_ref(polling_island *pi) {
}
static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi) {
- /* If ref count went to one, we're back to just the workqueue owning a ref.
- Unref the workqueue to break the loop.
-
- If ref count went to zero, delete the polling island.
+ /* If ref count went to zero, delete the polling island.
Note that this deletion not be done under a lock. Once the ref count goes
to zero, we are guaranteed that no one else holds a reference to the
polling island (and that there is no racing pi_add_ref() call either).
@@ -326,20 +366,12 @@ static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi) {
Also, if we are deleting the polling island and the merged_to field is
non-empty, we should remove a ref to the merged_to polling island
*/
- switch (gpr_atm_full_fetch_add(&pi->ref_count, -1)) {
- case 2: /* last external ref: the only one now owned is by the workqueue */
- GRPC_WORKQUEUE_UNREF(exec_ctx, pi->workqueue, "polling_island");
- break;
- case 1: {
- polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
- polling_island_delete(exec_ctx, pi);
- if (next != NULL) {
- PI_UNREF(exec_ctx, next, "pi_delete"); /* Recursive call */
- }
- break;
+ if (1 == gpr_atm_full_fetch_add(&pi->ref_count, -1)) {
+ polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
+ polling_island_delete(exec_ctx, pi);
+ if (next != NULL) {
+ PI_UNREF(exec_ctx, next, "pi_delete"); /* Recursive call */
}
- case 0:
- GPR_UNREACHABLE_CODE(return );
}
}
@@ -488,11 +520,20 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
pi->fd_capacity = 0;
pi->fds = NULL;
pi->epoll_fd = -1;
- pi->workqueue = NULL;
+
+ gpr_mu_init(&pi->workqueue_read_mu);
+ gpr_mpscq_init(&pi->workqueue_items);
+ gpr_atm_rel_store(&pi->workqueue_item_count, 0);
gpr_atm_rel_store(&pi->ref_count, 0);
+ gpr_atm_rel_store(&pi->poller_count, 0);
gpr_atm_rel_store(&pi->merged_to, (gpr_atm)NULL);
+ if (!append_error(error, grpc_wakeup_fd_init(&pi->workqueue_wakeup_fd),
+ err_desc)) {
+ goto done;
+ }
+
pi->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
if (pi->epoll_fd < 0) {
@@ -501,26 +542,14 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
}
polling_island_add_wakeup_fd_locked(pi, &grpc_global_wakeup_fd, error);
+ polling_island_add_wakeup_fd_locked(pi, &pi->workqueue_wakeup_fd, error);
if (initial_fd != NULL) {
polling_island_add_fds_locked(pi, &initial_fd, 1, true, error);
}
- if (append_error(error, grpc_workqueue_create(exec_ctx, &pi->workqueue),
- err_desc) &&
- *error == GRPC_ERROR_NONE) {
- polling_island_add_fds_locked(pi, &pi->workqueue->wakeup_read_fd, 1, true,
- error);
- GPR_ASSERT(pi->workqueue->wakeup_read_fd->polling_island == NULL);
- pi->workqueue->wakeup_read_fd->polling_island = pi;
- PI_ADD_REF(pi, "fd");
- }
-
done:
if (*error != GRPC_ERROR_NONE) {
- if (pi->workqueue != NULL) {
- GRPC_WORKQUEUE_UNREF(exec_ctx, pi->workqueue, "polling_island");
- }
polling_island_delete(exec_ctx, pi);
pi = NULL;
}
@@ -533,7 +562,11 @@ static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi) {
if (pi->epoll_fd >= 0) {
close(pi->epoll_fd);
}
+ GPR_ASSERT(gpr_atm_no_barrier_load(&pi->workqueue_item_count) == 0);
+ gpr_mu_destroy(&pi->workqueue_read_mu);
+ gpr_mpscq_destroy(&pi->workqueue_items);
gpr_mu_destroy(&pi->mu);
+ grpc_wakeup_fd_destroy(&pi->workqueue_wakeup_fd);
gpr_free(pi->fds);
gpr_free(pi);
}
@@ -678,6 +711,45 @@ static void polling_island_unlock_pair(polling_island *p, polling_island *q) {
}
}
+static void workqueue_maybe_wakeup(polling_island *pi) {
+ /* If this thread is the current poller, then it may be that it's about to
+ decrement the current poller count, so we need to look past this thread */
+ bool is_current_poller = (g_current_thread_polling_island == pi);
+ gpr_atm min_current_pollers_for_wakeup = is_current_poller ? 1 : 0;
+ gpr_atm current_pollers = gpr_atm_no_barrier_load(&pi->poller_count);
+ /* Only issue a wakeup if it's likely that some poller could come in and take
+ it right now. Note that since we do an anticipatory mpscq_pop every poll
+ loop, it's ok if we miss the wakeup here, as we'll get the work item when
+ the next poller enters anyway. */
+ if (current_pollers > min_current_pollers_for_wakeup) {
+ GRPC_LOG_IF_ERROR("workqueue_wakeup_fd",
+ grpc_wakeup_fd_wakeup(&pi->workqueue_wakeup_fd));
+ }
+}
+
+static void workqueue_move_items_to_parent(polling_island *q) {
+ polling_island *p = (polling_island *)gpr_atm_no_barrier_load(&q->merged_to);
+ if (p == NULL) {
+ return;
+ }
+ gpr_mu_lock(&q->workqueue_read_mu);
+ int num_added = 0;
+ while (gpr_atm_no_barrier_load(&q->workqueue_item_count) > 0) {
+ gpr_mpscq_node *n = gpr_mpscq_pop(&q->workqueue_items);
+ if (n != NULL) {
+ gpr_atm_no_barrier_fetch_add(&q->workqueue_item_count, -1);
+ gpr_atm_no_barrier_fetch_add(&p->workqueue_item_count, 1);
+ gpr_mpscq_push(&p->workqueue_items, n);
+ num_added++;
+ }
+ }
+ gpr_mu_unlock(&q->workqueue_read_mu);
+ if (num_added > 0) {
+ workqueue_maybe_wakeup(p);
+ }
+ workqueue_move_items_to_parent(p);
+}
+
static polling_island *polling_island_merge(polling_island *p,
polling_island *q,
grpc_error **error) {
@@ -702,6 +774,8 @@ static polling_island *polling_island_merge(polling_island *p,
/* Add the 'merged_to' link from p --> q */
gpr_atm_rel_store(&p->merged_to, (gpr_atm)q);
PI_ADD_REF(q, "pi_merge"); /* To account for the new incoming ref from p */
+
+ workqueue_move_items_to_parent(q);
}
/* else if p == q, nothing needs to be done */
@@ -712,6 +786,26 @@ static polling_island *polling_island_merge(polling_island *p,
return q;
}
+static void workqueue_enqueue(grpc_exec_ctx *exec_ctx,
+ grpc_workqueue *workqueue, grpc_closure *closure,
+ grpc_error *error) {
+ GPR_TIMER_BEGIN("workqueue.enqueue", 0);
+ /* take a ref to the workqueue: otherwise it can happen that whatever events
+ * this kicks off ends up destroying the workqueue before this function
+ * completes */
+ GRPC_WORKQUEUE_REF(workqueue, "enqueue");
+ polling_island *pi = (polling_island *)workqueue;
+ gpr_atm last = gpr_atm_no_barrier_fetch_add(&pi->workqueue_item_count, 1);
+ closure->error_data.error = error;
+ gpr_mpscq_push(&pi->workqueue_items, &closure->next_data.atm_next);
+ if (last == 0) {
+ workqueue_maybe_wakeup(pi);
+ }
+ workqueue_move_items_to_parent(pi);
+ GRPC_WORKQUEUE_UNREF(exec_ctx, workqueue, "enqueue");
+ GPR_TIMER_END("workqueue.enqueue", 0);
+}
+
static grpc_error *polling_island_global_init() {
grpc_error *error = GRPC_ERROR_NONE;
@@ -1042,11 +1136,8 @@ static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) {
gpr_mu_lock(&fd->mu);
- grpc_workqueue *workqueue = NULL;
- if (fd->polling_island != NULL) {
- workqueue =
- GRPC_WORKQUEUE_REF(fd->polling_island->workqueue, "get_workqueue");
- }
+ grpc_workqueue *workqueue = GRPC_WORKQUEUE_REF(
+ (grpc_workqueue *)fd->polling_island, "fd_get_workqueue");
gpr_mu_unlock(&fd->mu);
return workqueue;
}
@@ -1299,7 +1390,29 @@ static void pollset_reset(grpc_pollset *pollset) {
GPR_ASSERT(pollset->polling_island == NULL);
}
-#define GRPC_EPOLL_MAX_EVENTS 1000
+static bool maybe_do_workqueue_work(grpc_exec_ctx *exec_ctx,
+ polling_island *pi) {
+ if (gpr_mu_trylock(&pi->workqueue_read_mu)) {
+ gpr_mpscq_node *n = gpr_mpscq_pop(&pi->workqueue_items);
+ gpr_mu_unlock(&pi->workqueue_read_mu);
+ if (n != NULL) {
+ if (gpr_atm_full_fetch_add(&pi->workqueue_item_count, -1) > 1) {
+ workqueue_maybe_wakeup(pi);
+ }
+ grpc_closure *c = (grpc_closure *)n;
+ grpc_closure_run(exec_ctx, c, c->error_data.error);
+ return true;
+ } else if (gpr_atm_no_barrier_load(&pi->workqueue_item_count) > 0) {
+ /* n == NULL might mean there's work but it's not available to be popped
+ * yet - try to ensure another workqueue wakes up to check shortly if so
+ */
+ workqueue_maybe_wakeup(pi);
+ }
+ }
+ return false;
+}
+
+#define GRPC_EPOLL_MAX_EVENTS 100
/* Note: sig_mask contains the signal mask to use *during* epoll_wait() */
static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
grpc_pollset *pollset,
@@ -1354,7 +1467,13 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
PI_ADD_REF(pi, "ps_work");
gpr_mu_unlock(&pollset->mu);
- do {
+ /* If we get some workqueue work to do, it might end up completing an item on
+ the completion queue, so there's no need to poll... so we skip that and
+ redo the complete loop to verify */
+ if (!maybe_do_workqueue_work(exec_ctx, pi)) {
+ gpr_atm_no_barrier_fetch_add(&pi->poller_count, 1);
+ g_current_thread_polling_island = pi;
+
GRPC_SCHEDULING_START_BLOCKING_REGION;
ep_rv = epoll_pwait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, timeout_ms,
sig_mask);
@@ -1386,6 +1505,11 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
append_error(error,
grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd),
err_desc);
+ } else if (data_ptr == &pi->workqueue_wakeup_fd) {
+ append_error(error,
+ grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd),
+ err_desc);
+ maybe_do_workqueue_work(exec_ctx, pi);
} else if (data_ptr == &polling_island_wakeup_fd) {
GRPC_POLLING_TRACE(
"pollset_work: pollset: %p, worker: %p polling island (epoll_fd: "
@@ -1408,7 +1532,10 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
}
}
}
- } while (ep_rv == GRPC_EPOLL_MAX_EVENTS);
+
+ g_current_thread_polling_island = NULL;
+ gpr_atm_no_barrier_fetch_add(&pi->poller_count, -1);
+ }
GPR_ASSERT(pi != NULL);
@@ -1584,6 +1711,12 @@ retry:
"pollset_add_fd: Raced creating new polling island. pi_new: %p "
"(fd: %d, pollset: %p)",
(void *)pi_new, fd->fd, (void *)pollset);
+
+ /* No need to lock 'pi_new' here since this is a new polling island and
+ * no one has a reference to it yet */
+ polling_island_remove_all_fds_locked(pi_new, true, &error);
+
+ /* Ref and unref so that the polling island gets deleted during unref */
PI_ADD_REF(pi_new, "dance_of_destruction");
PI_UNREF(exec_ctx, pi_new, "dance_of_destruction");
goto retry;
@@ -1868,6 +2001,10 @@ static const grpc_event_engine_vtable vtable = {
.kick_poller = kick_poller,
+ .workqueue_ref = workqueue_ref,
+ .workqueue_unref = workqueue_unref,
+ .workqueue_enqueue = workqueue_enqueue,
+
.shutdown_engine = shutdown_engine,
};
@@ -1892,6 +2029,10 @@ const grpc_event_engine_vtable *grpc_init_epoll_linux(void) {
return NULL;
}
+ if (!grpc_has_wakeup_fd()) {
+ return NULL;
+ }
+
if (!is_epoll_available()) {
return NULL;
}
@@ -1914,13 +2055,13 @@ const grpc_event_engine_vtable *grpc_init_epoll_linux(void) {
return &vtable;
}
-#else /* defined(GPR_LINUX_EPOLL) */
-#if defined(GPR_POSIX_SOCKET)
+#else /* defined(GRPC_LINUX_EPOLL) */
+#if defined(GRPC_POSIX_SOCKET)
#include "src/core/lib/iomgr/ev_posix.h"
-/* If GPR_LINUX_EPOLL is not defined, it means epoll is not available. Return
+/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
* NULL */
const grpc_event_engine_vtable *grpc_init_epoll_linux(void) { return NULL; }
-#endif /* defined(GPR_POSIX_SOCKET) */
+#endif /* defined(GRPC_POSIX_SOCKET) */
void grpc_use_signal(int signum) {}
-#endif /* !defined(GPR_LINUX_EPOLL) */
+#endif /* !defined(GRPC_LINUX_EPOLL) */
diff --git a/src/core/lib/iomgr/ev_epoll_linux.h b/src/core/lib/iomgr/ev_epoll_linux.h
index 7a494aba19..8fc3ff59a3 100644
--- a/src/core/lib/iomgr/ev_epoll_linux.h
+++ b/src/core/lib/iomgr/ev_epoll_linux.h
@@ -35,13 +35,14 @@
#define GRPC_CORE_LIB_IOMGR_EV_EPOLL_LINUX_H
#include "src/core/lib/iomgr/ev_posix.h"
+#include "src/core/lib/iomgr/port.h"
const grpc_event_engine_vtable *grpc_init_epoll_linux(void);
-#ifdef GPR_LINUX_EPOLL
+#ifdef GRPC_LINUX_EPOLL
void *grpc_fd_get_polling_island(grpc_fd *fd);
void *grpc_pollset_get_polling_island(grpc_pollset *ps);
bool grpc_are_polling_islands_equal(void *p, void *q);
-#endif /* defined(GPR_LINUX_EPOLL) */
+#endif /* defined(GRPC_LINUX_EPOLL) */
#endif /* GRPC_CORE_LIB_IOMGR_EV_EPOLL_LINUX_H */
diff --git a/src/core/lib/iomgr/ev_poll_and_epoll_posix.c b/src/core/lib/iomgr/ev_poll_and_epoll_posix.c
index c2107e5e39..bf51404203 100644
--- a/src/core/lib/iomgr/ev_poll_and_epoll_posix.c
+++ b/src/core/lib/iomgr/ev_poll_and_epoll_posix.c
@@ -42,9 +42,9 @@
* - ev_epoll_posix.{h,c}
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_POSIX_SOCKET
+#ifdef GRPC_POSIX_SOCKET
#include "src/core/lib/iomgr/ev_poll_and_epoll_posix.h"
@@ -1338,7 +1338,7 @@ static void become_basic_pollset(grpc_pollset *pollset, grpc_fd *fd_or_null) {
* pollset_multipoller_with_poll_posix.c
*/
-#ifndef GPR_LINUX_MULTIPOLL_WITH_EPOLL
+#ifndef GRPC_LINUX_MULTIPOLL_WITH_EPOLL
typedef struct {
/* all polled fds */
@@ -1520,13 +1520,13 @@ static void poll_become_multipoller(grpc_exec_ctx *exec_ctx,
}
}
-#endif /* !GPR_LINUX_MULTIPOLL_WITH_EPOLL */
+#endif /* !GRPC_LINUX_MULTIPOLL_WITH_EPOLL */
/*******************************************************************************
* pollset_multipoller_with_epoll_posix.c
*/
-#ifdef GPR_LINUX_MULTIPOLL_WITH_EPOLL
+#ifdef GRPC_LINUX_MULTIPOLL_WITH_EPOLL
#include <errno.h>
#include <poll.h>
@@ -1839,11 +1839,11 @@ static void epoll_become_multipoller(grpc_exec_ctx *exec_ctx,
}
}
-#else /* GPR_LINUX_MULTIPOLL_WITH_EPOLL */
+#else /* GRPC_LINUX_MULTIPOLL_WITH_EPOLL */
static void remove_fd_from_all_epoll_sets(int fd) {}
-#endif /* GPR_LINUX_MULTIPOLL_WITH_EPOLL */
+#endif /* GRPC_LINUX_MULTIPOLL_WITH_EPOLL */
/*******************************************************************************
* pollset_set_posix.c
@@ -1989,6 +1989,32 @@ static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
}
/*******************************************************************************
+ * workqueue stubs
+ */
+
+#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
+static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
+ const char *file, int line,
+ const char *reason) {
+ return workqueue;
+}
+static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
+ const char *file, int line, const char *reason) {}
+#else
+static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
+ return workqueue;
+}
+static void workqueue_unref(grpc_exec_ctx *exec_ctx,
+ grpc_workqueue *workqueue) {}
+#endif
+
+static void workqueue_enqueue(grpc_exec_ctx *exec_ctx,
+ grpc_workqueue *workqueue, grpc_closure *closure,
+ grpc_error *error) {
+ grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
+}
+
+/*******************************************************************************
* event engine binding
*/
@@ -2029,11 +2055,15 @@ static const grpc_event_engine_vtable vtable = {
.kick_poller = kick_poller,
+ .workqueue_ref = workqueue_ref,
+ .workqueue_unref = workqueue_unref,
+ .workqueue_enqueue = workqueue_enqueue,
+
.shutdown_engine = shutdown_engine,
};
const grpc_event_engine_vtable *grpc_init_poll_and_epoll_posix(void) {
-#ifdef GPR_LINUX_MULTIPOLL_WITH_EPOLL
+#ifdef GRPC_LINUX_MULTIPOLL_WITH_EPOLL
platform_become_multipoller = epoll_become_multipoller;
#else
platform_become_multipoller = poll_become_multipoller;
diff --git a/src/core/lib/iomgr/ev_poll_posix.c b/src/core/lib/iomgr/ev_poll_posix.c
index 16a5e3083e..e1d620cfff 100644
--- a/src/core/lib/iomgr/ev_poll_posix.c
+++ b/src/core/lib/iomgr/ev_poll_posix.c
@@ -31,9 +31,9 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_POSIX_SOCKET
+#ifdef GRPC_POSIX_SOCKET
#include "src/core/lib/iomgr/ev_poll_posix.h"
@@ -47,10 +47,12 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
+#include <grpc/support/thd.h>
#include <grpc/support/tls.h>
#include <grpc/support/useful.h>
#include "src/core/lib/iomgr/iomgr_internal.h"
+#include "src/core/lib/iomgr/wakeup_fd_cv.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/block_annotate.h"
@@ -246,6 +248,28 @@ struct grpc_pollset_set {
};
/*******************************************************************************
+ * condition variable polling definitions
+ */
+
+#define CV_POLL_PERIOD_MS 1000
+#define CV_DEFAULT_TABLE_SIZE 16
+
+typedef enum poll_status_t { INPROGRESS, COMPLETED, CANCELLED } poll_status_t;
+
+typedef struct poll_args {
+ gpr_refcount refcount;
+ gpr_cv *cv;
+ struct pollfd *fds;
+ nfds_t nfds;
+ int timeout;
+ int retval;
+ int err;
+ gpr_atm status;
+} poll_args;
+
+cv_fd_table g_cvfds;
+
+/*******************************************************************************
* fd_posix.c
*/
@@ -961,8 +985,15 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (errno != EINTR) {
work_combine_error(&error, GRPC_OS_ERROR(errno, "poll"));
}
+
for (i = 2; i < pfd_count; i++) {
- fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL);
+ if (watchers[i].fd == NULL) {
+ fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL);
+ } else {
+ // Wake up all the file descriptors, if we have an invalid one
+ // we can identify it on the next pollset_work()
+ fd_end_poll(exec_ctx, &watchers[i], 1, 1, pollset);
+ }
}
} else if (r == 0) {
for (i = 2; i < pfd_count; i++) {
@@ -1236,10 +1267,237 @@ static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
}
/*******************************************************************************
+ * workqueue stubs
+ */
+
+#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
+static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
+ const char *file, int line,
+ const char *reason) {
+ return workqueue;
+}
+static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
+ const char *file, int line, const char *reason) {}
+#else
+static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
+ return workqueue;
+}
+static void workqueue_unref(grpc_exec_ctx *exec_ctx,
+ grpc_workqueue *workqueue) {}
+#endif
+
+static void workqueue_enqueue(grpc_exec_ctx *exec_ctx,
+ grpc_workqueue *workqueue, grpc_closure *closure,
+ grpc_error *error) {
+ grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
+}
+
+/*******************************************************************************
+ * Condition Variable polling extensions
+ */
+
+static void decref_poll_args(poll_args *args) {
+ if (gpr_unref(&args->refcount)) {
+ gpr_free(args->fds);
+ gpr_cv_destroy(args->cv);
+ gpr_free(args->cv);
+ gpr_free(args);
+ }
+}
+
+// Poll in a background thread
+static void run_poll(void *arg) {
+ int timeout, retval;
+ poll_args *pargs = (poll_args *)arg;
+ while (gpr_atm_no_barrier_load(&pargs->status) == INPROGRESS) {
+ if (pargs->timeout < 0) {
+ timeout = CV_POLL_PERIOD_MS;
+ } else {
+ timeout = GPR_MIN(CV_POLL_PERIOD_MS, pargs->timeout);
+ pargs->timeout -= timeout;
+ }
+ retval = g_cvfds.poll(pargs->fds, pargs->nfds, timeout);
+ if (retval != 0 || pargs->timeout == 0) {
+ pargs->retval = retval;
+ pargs->err = errno;
+ break;
+ }
+ }
+ gpr_mu_lock(&g_cvfds.mu);
+ if (gpr_atm_no_barrier_load(&pargs->status) == INPROGRESS) {
+ // Signal main thread that the poll completed
+ gpr_atm_no_barrier_store(&pargs->status, COMPLETED);
+ gpr_cv_signal(pargs->cv);
+ }
+ decref_poll_args(pargs);
+ g_cvfds.pollcount--;
+ if (g_cvfds.shutdown && g_cvfds.pollcount == 0) {
+ gpr_cv_signal(&g_cvfds.shutdown_complete);
+ }
+ gpr_mu_unlock(&g_cvfds.mu);
+}
+
+// This function overrides poll() to handle condition variable wakeup fds
+static int cvfd_poll(struct pollfd *fds, nfds_t nfds, int timeout) {
+ unsigned int i;
+ int res, idx;
+ gpr_cv *pollcv;
+ cv_node *cvn, *prev;
+ nfds_t nsockfds = 0;
+ gpr_thd_id t_id;
+ gpr_thd_options opt;
+ poll_args *pargs = NULL;
+ gpr_mu_lock(&g_cvfds.mu);
+ pollcv = gpr_malloc(sizeof(gpr_cv));
+ gpr_cv_init(pollcv);
+ for (i = 0; i < nfds; i++) {
+ fds[i].revents = 0;
+ if (fds[i].fd < 0 && (fds[i].events & POLLIN)) {
+ idx = FD_TO_IDX(fds[i].fd);
+ cvn = gpr_malloc(sizeof(cv_node));
+ cvn->cv = pollcv;
+ cvn->next = g_cvfds.cvfds[idx].cvs;
+ g_cvfds.cvfds[idx].cvs = cvn;
+ // We should return immediately if there are pending events,
+ // but we still need to call poll() to check for socket events
+ if (g_cvfds.cvfds[idx].is_set) {
+ timeout = 0;
+ }
+ } else if (fds[i].fd >= 0) {
+ nsockfds++;
+ }
+ }
+
+ if (nsockfds > 0) {
+ pargs = gpr_malloc(sizeof(struct poll_args));
+ // Both the main thread and calling thread get a reference
+ gpr_ref_init(&pargs->refcount, 2);
+ pargs->cv = pollcv;
+ pargs->fds = gpr_malloc(sizeof(struct pollfd) * nsockfds);
+ pargs->nfds = nsockfds;
+ pargs->timeout = timeout;
+ pargs->retval = 0;
+ pargs->err = 0;
+ gpr_atm_no_barrier_store(&pargs->status, INPROGRESS);
+ idx = 0;
+ for (i = 0; i < nfds; i++) {
+ if (fds[i].fd >= 0) {
+ pargs->fds[idx].fd = fds[i].fd;
+ pargs->fds[idx].events = fds[i].events;
+ pargs->fds[idx].revents = 0;
+ idx++;
+ }
+ }
+ g_cvfds.pollcount++;
+ opt = gpr_thd_options_default();
+ gpr_thd_options_set_detached(&opt);
+ gpr_thd_new(&t_id, &run_poll, pargs, &opt);
+ // We want the poll() thread to trigger the deadline, so wait forever here
+ gpr_cv_wait(pollcv, &g_cvfds.mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
+ if (gpr_atm_no_barrier_load(&pargs->status) == COMPLETED) {
+ res = pargs->retval;
+ errno = pargs->err;
+ } else {
+ res = 0;
+ errno = 0;
+ gpr_atm_no_barrier_store(&pargs->status, CANCELLED);
+ }
+ } else {
+ gpr_timespec deadline = gpr_now(GPR_CLOCK_REALTIME);
+ deadline =
+ gpr_time_add(deadline, gpr_time_from_millis(timeout, GPR_TIMESPAN));
+ gpr_cv_wait(pollcv, &g_cvfds.mu, deadline);
+ res = 0;
+ }
+
+ idx = 0;
+ for (i = 0; i < nfds; i++) {
+ if (fds[i].fd < 0 && (fds[i].events & POLLIN)) {
+ cvn = g_cvfds.cvfds[FD_TO_IDX(fds[i].fd)].cvs;
+ prev = NULL;
+ while (cvn->cv != pollcv) {
+ prev = cvn;
+ cvn = cvn->next;
+ GPR_ASSERT(cvn);
+ }
+ if (!prev) {
+ g_cvfds.cvfds[FD_TO_IDX(fds[i].fd)].cvs = cvn->next;
+ } else {
+ prev->next = cvn->next;
+ }
+ gpr_free(cvn);
+
+ if (g_cvfds.cvfds[FD_TO_IDX(fds[i].fd)].is_set) {
+ fds[i].revents = POLLIN;
+ if (res >= 0) res++;
+ }
+ } else if (fds[i].fd >= 0 &&
+ gpr_atm_no_barrier_load(&pargs->status) == COMPLETED) {
+ fds[i].revents = pargs->fds[idx].revents;
+ idx++;
+ }
+ }
+
+ if (pargs) {
+ decref_poll_args(pargs);
+ } else {
+ gpr_cv_destroy(pollcv);
+ gpr_free(pollcv);
+ }
+ gpr_mu_unlock(&g_cvfds.mu);
+
+ return res;
+}
+
+static void global_cv_fd_table_init() {
+ gpr_mu_init(&g_cvfds.mu);
+ gpr_mu_lock(&g_cvfds.mu);
+ gpr_cv_init(&g_cvfds.shutdown_complete);
+ g_cvfds.shutdown = 0;
+ g_cvfds.pollcount = 0;
+ g_cvfds.size = CV_DEFAULT_TABLE_SIZE;
+ g_cvfds.cvfds = gpr_malloc(sizeof(fd_node) * CV_DEFAULT_TABLE_SIZE);
+ g_cvfds.free_fds = NULL;
+ for (int i = 0; i < CV_DEFAULT_TABLE_SIZE; i++) {
+ g_cvfds.cvfds[i].is_set = 0;
+ g_cvfds.cvfds[i].cvs = NULL;
+ g_cvfds.cvfds[i].next_free = g_cvfds.free_fds;
+ g_cvfds.free_fds = &g_cvfds.cvfds[i];
+ }
+ // Override the poll function with one that supports cvfds
+ g_cvfds.poll = grpc_poll_function;
+ grpc_poll_function = &cvfd_poll;
+ gpr_mu_unlock(&g_cvfds.mu);
+}
+
+static void global_cv_fd_table_shutdown() {
+ gpr_mu_lock(&g_cvfds.mu);
+ g_cvfds.shutdown = 1;
+ // Attempt to wait for all abandoned poll() threads to terminate
+ // Not doing so will result in reported memory leaks
+ if (g_cvfds.pollcount > 0) {
+ int res = gpr_cv_wait(&g_cvfds.shutdown_complete, &g_cvfds.mu,
+ gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
+ gpr_time_from_seconds(3, GPR_TIMESPAN)));
+ GPR_ASSERT(res == 0);
+ }
+ gpr_cv_destroy(&g_cvfds.shutdown_complete);
+ grpc_poll_function = g_cvfds.poll;
+ gpr_free(g_cvfds.cvfds);
+ gpr_mu_unlock(&g_cvfds.mu);
+ gpr_mu_destroy(&g_cvfds.mu);
+}
+
+/*******************************************************************************
* event engine binding
*/
-static void shutdown_engine(void) { pollset_global_shutdown(); }
+static void shutdown_engine(void) {
+ pollset_global_shutdown();
+ if (grpc_cv_wakeup_fds_enabled()) {
+ global_cv_fd_table_shutdown();
+ }
+}
static const grpc_event_engine_vtable vtable = {
.pollset_size = sizeof(grpc_pollset),
@@ -1273,11 +1531,29 @@ static const grpc_event_engine_vtable vtable = {
.kick_poller = kick_poller,
+ .workqueue_ref = workqueue_ref,
+ .workqueue_unref = workqueue_unref,
+ .workqueue_enqueue = workqueue_enqueue,
+
.shutdown_engine = shutdown_engine,
};
const grpc_event_engine_vtable *grpc_init_poll_posix(void) {
+ if (!grpc_has_wakeup_fd()) {
+ return NULL;
+ }
+ if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
+ return NULL;
+ }
+ return &vtable;
+}
+
+const grpc_event_engine_vtable *grpc_init_poll_cv_posix(void) {
+ global_cv_fd_table_init();
+ grpc_enable_cv_wakeup_fds(1);
if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
+ global_cv_fd_table_shutdown();
+ grpc_enable_cv_wakeup_fds(0);
return NULL;
}
return &vtable;
diff --git a/src/core/lib/iomgr/ev_poll_posix.h b/src/core/lib/iomgr/ev_poll_posix.h
index 291736a2db..202ffca14c 100644
--- a/src/core/lib/iomgr/ev_poll_posix.h
+++ b/src/core/lib/iomgr/ev_poll_posix.h
@@ -37,5 +37,6 @@
#include "src/core/lib/iomgr/ev_posix.h"
const grpc_event_engine_vtable *grpc_init_poll_posix(void);
+const grpc_event_engine_vtable *grpc_init_poll_cv_posix(void);
#endif /* GRPC_CORE_LIB_IOMGR_EV_POLL_POSIX_H */
diff --git a/src/core/lib/iomgr/ev_posix.c b/src/core/lib/iomgr/ev_posix.c
index 6536672685..ef36ba89b2 100644
--- a/src/core/lib/iomgr/ev_posix.c
+++ b/src/core/lib/iomgr/ev_posix.c
@@ -31,9 +31,9 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_POSIX_SOCKET
+#ifdef GRPC_POSIX_SOCKET
#include "src/core/lib/iomgr/ev_posix.h"
@@ -66,6 +66,7 @@ typedef struct {
static const event_engine_factory g_factories[] = {
{"epoll", grpc_init_epoll_linux},
{"poll", grpc_init_poll_posix},
+ {"poll-cv", grpc_init_poll_cv_posix},
{"legacy", grpc_init_poll_and_epoll_posix},
};
@@ -258,4 +259,27 @@ void grpc_pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
grpc_error *grpc_kick_poller(void) { return g_event_engine->kick_poller(); }
-#endif // GPR_POSIX_SOCKET
+#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
+grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file,
+ int line, const char *reason) {
+ return g_event_engine->workqueue_ref(workqueue, file, line, reason);
+}
+void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
+ const char *file, int line, const char *reason) {
+ g_event_engine->workqueue_unref(exec_ctx, workqueue, file, line, reason);
+}
+#else
+grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue) {
+ return g_event_engine->workqueue_ref(workqueue);
+}
+void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {
+ g_event_engine->workqueue_unref(exec_ctx, workqueue);
+}
+#endif
+
+void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
+ grpc_closure *closure, grpc_error *error) {
+ g_event_engine->workqueue_enqueue(exec_ctx, workqueue, closure, error);
+}
+
+#endif // GRPC_POSIX_SOCKET
diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h
index c2aa1756ea..2fdef06838 100644
--- a/src/core/lib/iomgr/ev_posix.h
+++ b/src/core/lib/iomgr/ev_posix.h
@@ -40,6 +40,7 @@
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/pollset_set.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
+#include "src/core/lib/iomgr/workqueue.h"
typedef struct grpc_fd grpc_fd;
@@ -95,6 +96,18 @@ typedef struct grpc_event_engine_vtable {
grpc_error *(*kick_poller)(void);
void (*shutdown_engine)(void);
+
+#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
+ grpc_workqueue *(*workqueue_ref)(grpc_workqueue *workqueue, const char *file,
+ int line, const char *reason);
+ void (*workqueue_unref)(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
+ const char *file, int line, const char *reason);
+#else
+ grpc_workqueue *(*workqueue_ref)(grpc_workqueue *workqueue);
+ void (*workqueue_unref)(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue);
+#endif
+ void (*workqueue_enqueue)(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
+ grpc_closure *closure, grpc_error *error);
} grpc_event_engine_vtable;
void grpc_event_engine_init(void);
diff --git a/src/core/lib/iomgr/exec_ctx.c b/src/core/lib/iomgr/exec_ctx.c
index ac7785ec13..604713e578 100644
--- a/src/core/lib/iomgr/exec_ctx.c
+++ b/src/core/lib/iomgr/exec_ctx.c
@@ -37,6 +37,7 @@
#include <grpc/support/sync.h>
#include <grpc/support/thd.h>
+#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/workqueue.h"
#include "src/core/lib/profiling/timers.h"
@@ -60,18 +61,43 @@ bool grpc_always_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored) {
bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
bool did_something = 0;
GPR_TIMER_BEGIN("grpc_exec_ctx_flush", 0);
- while (!grpc_closure_list_empty(exec_ctx->closure_list)) {
- grpc_closure *c = exec_ctx->closure_list.head;
- exec_ctx->closure_list.head = exec_ctx->closure_list.tail = NULL;
- while (c != NULL) {
- grpc_closure *next = c->next_data.next;
- grpc_error *error = c->error;
- did_something = true;
- GPR_TIMER_BEGIN("grpc_exec_ctx_flush.cb", 0);
+ for (;;) {
+ if (!grpc_closure_list_empty(exec_ctx->closure_list)) {
+ grpc_closure *c = exec_ctx->closure_list.head;
+ exec_ctx->closure_list.head = exec_ctx->closure_list.tail = NULL;
+ while (c != NULL) {
+ grpc_closure *next = c->next_data.next;
+ did_something = true;
+ grpc_closure_run(exec_ctx, c, c->error_data.error);
+ c = next;
+ }
+ } else if (!grpc_combiner_continue_exec_ctx(exec_ctx)) {
+ break;
+ }
+ }
+ GPR_ASSERT(exec_ctx->active_combiner == NULL);
+ if (exec_ctx->stealing_from_workqueue != NULL) {
+ if (grpc_exec_ctx_ready_to_finish(exec_ctx)) {
+ grpc_workqueue_enqueue(exec_ctx, exec_ctx->stealing_from_workqueue,
+ exec_ctx->stolen_closure,
+ exec_ctx->stolen_closure->error_data.error);
+ GRPC_WORKQUEUE_UNREF(exec_ctx, exec_ctx->stealing_from_workqueue,
+ "exec_ctx_sched");
+ exec_ctx->stealing_from_workqueue = NULL;
+ exec_ctx->stolen_closure = NULL;
+ } else {
+ grpc_closure *c = exec_ctx->stolen_closure;
+ GRPC_WORKQUEUE_UNREF(exec_ctx, exec_ctx->stealing_from_workqueue,
+ "exec_ctx_sched");
+ exec_ctx->stealing_from_workqueue = NULL;
+ exec_ctx->stolen_closure = NULL;
+ grpc_error *error = c->error_data.error;
+ GPR_TIMER_BEGIN("grpc_exec_ctx_flush.stolen_cb", 0);
c->cb(exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
- GPR_TIMER_END("grpc_exec_ctx_flush.cb", 0);
- c = next;
+ GPR_TIMER_END("grpc_exec_ctx_flush.stolen_cb", 0);
+ grpc_exec_ctx_flush(exec_ctx);
+ did_something = true;
}
}
GPR_TIMER_END("grpc_exec_ctx_flush", 0);
@@ -86,12 +112,25 @@ void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx) {
void grpc_exec_ctx_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error,
grpc_workqueue *offload_target_or_null) {
+ GPR_TIMER_BEGIN("grpc_exec_ctx_sched", 0);
if (offload_target_or_null == NULL) {
grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
- } else {
+ } else if (exec_ctx->stealing_from_workqueue == NULL) {
+ exec_ctx->stealing_from_workqueue = offload_target_or_null;
+ closure->error_data.error = error;
+ exec_ctx->stolen_closure = closure;
+ } else if (exec_ctx->stealing_from_workqueue != offload_target_or_null) {
grpc_workqueue_enqueue(exec_ctx, offload_target_or_null, closure, error);
GRPC_WORKQUEUE_UNREF(exec_ctx, offload_target_or_null, "exec_ctx_sched");
+ } else { /* stealing_from_workqueue == offload_target_or_null */
+ grpc_workqueue_enqueue(exec_ctx, offload_target_or_null,
+ exec_ctx->stolen_closure,
+ exec_ctx->stolen_closure->error_data.error);
+ closure->error_data.error = error;
+ exec_ctx->stolen_closure = closure;
+ GRPC_WORKQUEUE_UNREF(exec_ctx, offload_target_or_null, "exec_ctx_sched");
}
+ GPR_TIMER_END("grpc_exec_ctx_sched", 0);
}
void grpc_exec_ctx_enqueue_list(grpc_exec_ctx *exec_ctx,
diff --git a/src/core/lib/iomgr/exec_ctx.h b/src/core/lib/iomgr/exec_ctx.h
index 4d20ecf922..7e50cb9825 100644
--- a/src/core/lib/iomgr/exec_ctx.h
+++ b/src/core/lib/iomgr/exec_ctx.h
@@ -66,15 +66,33 @@ typedef struct grpc_combiner grpc_combiner;
#ifndef GRPC_EXECUTION_CONTEXT_SANITIZER
struct grpc_exec_ctx {
grpc_closure_list closure_list;
+ /** The workqueue we're stealing work from.
+ As items are queued to the execution context, we try to steal one
+ workqueue item and execute it inline (assuming the exec_ctx is not
+ finished) - doing so does not invalidate the workqueue's contract, and
+ provides a small latency win in cases where we get a hit */
+ grpc_workqueue *stealing_from_workqueue;
+ /** The workqueue item that was stolen from the workqueue above. When new
+ items are scheduled to be offloaded to that workqueue, we need to update
+ this like a 1-deep fifo to maintain the invariant that workqueue items
+ queued by one thread are started in order */
+ grpc_closure *stolen_closure;
/** currently active combiner: updated only via combiner.c */
grpc_combiner *active_combiner;
+ /** last active combiner in the active combiner list */
+ grpc_combiner *last_combiner;
bool cached_ready_to_finish;
void *check_ready_to_finish_arg;
bool (*check_ready_to_finish)(grpc_exec_ctx *exec_ctx, void *arg);
};
+/* initializer for grpc_exec_ctx:
+ prefer to use GRPC_EXEC_CTX_INIT whenever possible */
#define GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(finish_check, finish_check_arg) \
- { GRPC_CLOSURE_LIST_INIT, NULL, false, finish_check_arg, finish_check }
+ { \
+ GRPC_CLOSURE_LIST_INIT, NULL, NULL, NULL, NULL, false, finish_check_arg, \
+ finish_check \
+ }
#else
struct grpc_exec_ctx {
bool cached_ready_to_finish;
@@ -85,8 +103,10 @@ struct grpc_exec_ctx {
{ false, finish_check_arg, finish_check }
#endif
+/* initialize an execution context at the top level of an API call into grpc
+ (this is safe to use elsewhere, though possibly not as efficient) */
#define GRPC_EXEC_CTX_INIT \
- GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(grpc_never_ready_to_finish, NULL)
+ GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(grpc_always_ready_to_finish, NULL)
/** Flush any work that has been enqueued onto this grpc_exec_ctx.
* Caller must guarantee that no interfering locks are held.
diff --git a/src/core/lib/iomgr/iocp_windows.c b/src/core/lib/iomgr/iocp_windows.c
index 2532e52e48..60ebe43676 100644
--- a/src/core/lib/iomgr/iocp_windows.c
+++ b/src/core/lib/iomgr/iocp_windows.c
@@ -31,9 +31,9 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_WINSOCK_SOCKET
+#ifdef GRPC_WINSOCK_SOCKET
#include <winsock2.h>
@@ -166,4 +166,4 @@ void grpc_iocp_add_socket(grpc_winsocket *socket) {
GPR_ASSERT(ret == g_iocp);
}
-#endif /* GPR_WINSOCK_SOCKET */
+#endif /* GRPC_WINSOCK_SOCKET */
diff --git a/src/core/lib/iomgr/iomgr.c b/src/core/lib/iomgr/iomgr.c
index d67d388b8c..4fd83e0b22 100644
--- a/src/core/lib/iomgr/iomgr.c
+++ b/src/core/lib/iomgr/iomgr.c
@@ -112,6 +112,14 @@ void grpc_iomgr_shutdown(void) {
continue;
}
if (g_root_object.next != &g_root_object) {
+ if (grpc_iomgr_abort_on_leaks()) {
+ gpr_log(GPR_DEBUG, "Failed to free %" PRIuPTR
+ " iomgr objects before shutdown deadline: "
+ "memory leaks are likely",
+ count_objects());
+ dump_objects("LEAKED");
+ abort();
+ }
gpr_timespec short_deadline = gpr_time_add(
gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(100, GPR_TIMESPAN));
if (gpr_cv_wait(&g_rcv, &g_mu, short_deadline)) {
@@ -122,9 +130,6 @@ void grpc_iomgr_shutdown(void) {
"memory leaks are likely",
count_objects());
dump_objects("LEAKED");
- if (grpc_iomgr_abort_on_leaks()) {
- abort();
- }
}
break;
}
diff --git a/src/core/lib/iomgr/iomgr.h b/src/core/lib/iomgr/iomgr.h
index 6c82de78ac..c1cfaf302e 100644
--- a/src/core/lib/iomgr/iomgr.h
+++ b/src/core/lib/iomgr/iomgr.h
@@ -34,6 +34,8 @@
#ifndef GRPC_CORE_LIB_IOMGR_IOMGR_H
#define GRPC_CORE_LIB_IOMGR_IOMGR_H
+#include "src/core/lib/iomgr/port.h"
+
/** Initializes the iomgr. */
void grpc_iomgr_init(void);
diff --git a/src/core/lib/iomgr/iomgr_posix.c b/src/core/lib/iomgr/iomgr_posix.c
index cede97f4c6..f5ee0c9ee4 100644
--- a/src/core/lib/iomgr/iomgr_posix.c
+++ b/src/core/lib/iomgr/iomgr_posix.c
@@ -31,9 +31,9 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_POSIX_SOCKET
+#ifdef GRPC_POSIX_SOCKET
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/ev_posix.h"
diff --git a/src/core/lib/iomgr/iomgr_uv.c b/src/core/lib/iomgr/iomgr_uv.c
new file mode 100644
index 0000000000..96516ff167
--- /dev/null
+++ b/src/core/lib/iomgr/iomgr_uv.c
@@ -0,0 +1,49 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/iomgr/port.h"
+
+#ifdef GRPC_UV
+
+#include "src/core/lib/debug/trace.h"
+#include "src/core/lib/iomgr/pollset_uv.h"
+#include "src/core/lib/iomgr/tcp_uv.h"
+
+void grpc_iomgr_platform_init(void) {
+ grpc_pollset_global_init();
+ grpc_register_tracer("tcp", &grpc_tcp_trace);
+}
+void grpc_iomgr_platform_flush(void) {}
+void grpc_iomgr_platform_shutdown(void) { grpc_pollset_global_shutdown(); }
+
+#endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/iomgr_windows.c b/src/core/lib/iomgr/iomgr_windows.c
index 7653f6e635..b659264ede 100644
--- a/src/core/lib/iomgr/iomgr_windows.c
+++ b/src/core/lib/iomgr/iomgr_windows.c
@@ -31,9 +31,9 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_WINSOCK_SOCKET
+#ifdef GRPC_WINSOCK_SOCKET
#include "src/core/lib/iomgr/sockaddr_windows.h"
diff --git a/src/core/lib/iomgr/pollset_set_uv.c b/src/core/lib/iomgr/pollset_set_uv.c
new file mode 100644
index 0000000000..e5ef8b29e0
--- /dev/null
+++ b/src/core/lib/iomgr/pollset_set_uv.c
@@ -0,0 +1,62 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/iomgr/port.h"
+
+#ifdef GRPC_UV
+
+#include "src/core/lib/iomgr/pollset_set.h"
+
+grpc_pollset_set* grpc_pollset_set_create(void) {
+ return (grpc_pollset_set*)((intptr_t)0xdeafbeef);
+}
+
+void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set) {}
+
+void grpc_pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set,
+ grpc_pollset* pollset) {}
+
+void grpc_pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set,
+ grpc_pollset* pollset) {}
+
+void grpc_pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
+ grpc_pollset_set* item) {}
+
+void grpc_pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
+ grpc_pollset_set* item) {}
+
+#endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/pollset_set_windows.c b/src/core/lib/iomgr/pollset_set_windows.c
index a35a9766fc..645650db9b 100644
--- a/src/core/lib/iomgr/pollset_set_windows.c
+++ b/src/core/lib/iomgr/pollset_set_windows.c
@@ -31,10 +31,10 @@
*
*/
-#include <grpc/support/port_platform.h>
#include <stdint.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_WINSOCK_SOCKET
+#ifdef GRPC_WINSOCK_SOCKET
#include "src/core/lib/iomgr/pollset_set_windows.h"
@@ -60,4 +60,4 @@ void grpc_pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx,
grpc_pollset_set* bag,
grpc_pollset_set* item) {}
-#endif /* GPR_WINSOCK_SOCKET */
+#endif /* GRPC_WINSOCK_SOCKET */
diff --git a/src/core/lib/iomgr/pollset_uv.c b/src/core/lib/iomgr/pollset_uv.c
new file mode 100644
index 0000000000..3a74b842b6
--- /dev/null
+++ b/src/core/lib/iomgr/pollset_uv.c
@@ -0,0 +1,142 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/iomgr/port.h"
+
+#ifdef GRPC_UV
+
+#include <uv.h>
+
+#include <string.h>
+
+#include <grpc/support/log.h>
+#include <grpc/support/sync.h>
+
+#include "src/core/lib/iomgr/pollset.h"
+#include "src/core/lib/iomgr/pollset_uv.h"
+
+struct grpc_pollset {
+ uv_timer_t timer;
+ int shutting_down;
+};
+
+/* Indicates that grpc_pollset_work should run an iteration of the UV loop
+ before running callbacks. This defaults to 1, and should be disabled if
+ grpc_pollset_work will be called within the callstack of uv_run */
+int grpc_pollset_work_run_loop;
+
+gpr_mu grpc_polling_mu;
+
+size_t grpc_pollset_size() { return sizeof(grpc_pollset); }
+
+void grpc_pollset_global_init(void) {
+ gpr_mu_init(&grpc_polling_mu);
+ grpc_pollset_work_run_loop = 1;
+}
+
+void grpc_pollset_global_shutdown(void) { gpr_mu_destroy(&grpc_polling_mu); }
+
+void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
+ *mu = &grpc_polling_mu;
+ memset(pollset, 0, sizeof(grpc_pollset));
+ uv_timer_init(uv_default_loop(), &pollset->timer);
+ pollset->shutting_down = 0;
+}
+
+static void timer_close_cb(uv_handle_t *handle) { handle->data = (void *)1; }
+
+void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_closure *closure) {
+ GPR_ASSERT(!pollset->shutting_down);
+ pollset->shutting_down = 1;
+ if (grpc_pollset_work_run_loop) {
+ // Drain any pending UV callbacks without blocking
+ uv_run(uv_default_loop(), UV_RUN_NOWAIT);
+ }
+ grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_NONE, NULL);
+}
+
+void grpc_pollset_destroy(grpc_pollset *pollset) {
+ uv_close((uv_handle_t *)&pollset->timer, timer_close_cb);
+ // timer.data is a boolean indicating that the timer has finished closing
+ pollset->timer.data = (void *)0;
+ if (grpc_pollset_work_run_loop) {
+ while (!pollset->timer.data) {
+ uv_run(uv_default_loop(), UV_RUN_NOWAIT);
+ }
+ }
+}
+
+void grpc_pollset_reset(grpc_pollset *pollset) {
+ GPR_ASSERT(pollset->shutting_down);
+ pollset->shutting_down = 0;
+}
+
+static void timer_run_cb(uv_timer_t *timer) {}
+
+grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_pollset_worker **worker_hdl,
+ gpr_timespec now, gpr_timespec deadline) {
+ uint64_t timeout;
+ gpr_mu_unlock(&grpc_polling_mu);
+ if (grpc_pollset_work_run_loop) {
+ if (gpr_time_cmp(deadline, now) >= 0) {
+ timeout = (uint64_t)gpr_time_to_millis(gpr_time_sub(deadline, now));
+ } else {
+ timeout = 0;
+ }
+ /* We special-case timeout=0 so that we don't bother with the timer when
+ the loop won't block anyway */
+ if (timeout > 0) {
+ uv_timer_start(&pollset->timer, timer_run_cb, timeout, 0);
+ /* Run until there is some I/O activity or the timer triggers. It doesn't
+ matter which happens */
+ uv_run(uv_default_loop(), UV_RUN_ONCE);
+ uv_timer_stop(&pollset->timer);
+ } else {
+ uv_run(uv_default_loop(), UV_RUN_NOWAIT);
+ }
+ }
+ if (!grpc_closure_list_empty(exec_ctx->closure_list)) {
+ grpc_exec_ctx_flush(exec_ctx);
+ }
+ gpr_mu_lock(&grpc_polling_mu);
+ return GRPC_ERROR_NONE;
+}
+
+grpc_error *grpc_pollset_kick(grpc_pollset *pollset,
+ grpc_pollset_worker *specific_worker) {
+ return GRPC_ERROR_NONE;
+}
+
+#endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/pollset_uv.h b/src/core/lib/iomgr/pollset_uv.h
new file mode 100644
index 0000000000..0715eb4295
--- /dev/null
+++ b/src/core/lib/iomgr/pollset_uv.h
@@ -0,0 +1,42 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_POLLSET_UV_H
+#define GRPC_CORE_LIB_IOMGR_POLLSET_UV_H
+
+extern int grpc_pollset_work_run_loop;
+
+void grpc_pollset_global_init(void);
+void grpc_pollset_global_shutdown(void);
+
+#endif /* GRPC_CORE_LIB_IOMGR_POLLSET_UV_H */
diff --git a/src/core/lib/iomgr/pollset_windows.c b/src/core/lib/iomgr/pollset_windows.c
index 626dd784b3..5540303e49 100644
--- a/src/core/lib/iomgr/pollset_windows.c
+++ b/src/core/lib/iomgr/pollset_windows.c
@@ -31,9 +31,9 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_WINSOCK_SOCKET
+#ifdef GRPC_WINSOCK_SOCKET
#include <grpc/support/log.h>
#include <grpc/support/thd.h>
@@ -241,4 +241,4 @@ grpc_error *grpc_pollset_kick(grpc_pollset *p,
void grpc_kick_poller(void) { grpc_iocp_kick(); }
-#endif /* GPR_WINSOCK_SOCKET */
+#endif /* GRPC_WINSOCK_SOCKET */
diff --git a/src/core/lib/iomgr/port.h b/src/core/lib/iomgr/port.h
new file mode 100644
index 0000000000..f1897bb91f
--- /dev/null
+++ b/src/core/lib/iomgr/port.h
@@ -0,0 +1,129 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#ifndef GRPC_CORE_LIB_IOMGR_PORT_H
+#define GRPC_CORE_LIB_IOMGR_PORT_H
+
+#if defined(GRPC_UV)
+// Do nothing
+#elif defined(GPR_MANYLINUX1)
+#define GRPC_HAVE_IPV6_RECVPKTINFO 1
+#define GRPC_HAVE_IP_PKTINFO 1
+#define GRPC_HAVE_MSG_NOSIGNAL 1
+#define GRPC_HAVE_UNIX_SOCKET 1
+#define GRPC_POSIX_NO_SPECIAL_WAKEUP_FD 1
+#define GRPC_POSIX_SOCKET 1
+#define GRPC_POSIX_SOCKETADDR 1
+#define GRPC_POSIX_SOCKETUTILS 1
+#define GRPC_POSIX_WAKEUP_FD 1
+#define GRPC_TIMER_USE_GENERIC 1
+#elif defined(GPR_WINDOWS)
+#define GRPC_TIMER_USE_GENERIC 1
+#define GRPC_WINSOCK_SOCKET 1
+#define GRPC_WINDOWS_SOCKETUTILS 1
+#elif defined(GPR_ANDROID)
+#define GRPC_HAVE_IPV6_RECVPKTINFO 1
+#define GRPC_HAVE_IP_PKTINFO 1
+#define GRPC_HAVE_MSG_NOSIGNAL 1
+#define GRPC_HAVE_UNIX_SOCKET 1
+#define GRPC_LINUX_EVENTFD 1
+#define GRPC_POSIX_SOCKET 1
+#define GRPC_POSIX_SOCKETADDR 1
+#define GRPC_POSIX_SOCKETUTILS 1
+#define GRPC_POSIX_WAKEUP_FD 1
+#define GRPC_TIMER_USE_GENERIC 1
+#elif defined(GPR_LINUX)
+#define GRPC_HAVE_IPV6_RECVPKTINFO 1
+#define GRPC_HAVE_IP_PKTINFO 1
+#define GRPC_HAVE_MSG_NOSIGNAL 1
+#define GRPC_HAVE_UNIX_SOCKET 1
+#define GRPC_LINUX_MULTIPOLL_WITH_EPOLL 1
+#define GRPC_POSIX_SOCKET 1
+#define GRPC_POSIX_SOCKETADDR 1
+#define GRPC_POSIX_WAKEUP_FD 1
+#define GRPC_TIMER_USE_GENERIC 1
+#ifdef __GLIBC_PREREQ
+#if __GLIBC_PREREQ(2, 9)
+#define GRPC_LINUX_EPOLL 1
+#define GRPC_LINUX_EVENTFD 1
+#endif
+#if __GLIBC_PREREQ(2, 10)
+#define GRPC_LINUX_SOCKETUTILS 1
+#endif
+#endif
+#ifndef GRPC_LINUX_EVENTFD
+#define GRPC_POSIX_NO_SPECIAL_WAKEUP_FD 1
+#endif
+#ifndef GRPC_LINUX_SOCKETUTILS
+#define GRPC_POSIX_SOCKETUTILS
+#endif
+#elif defined(GPR_APPLE)
+#define GRPC_HAVE_SO_NOSIGPIPE 1
+#define GRPC_HAVE_UNIX_SOCKET 1
+#define GRPC_MSG_IOVLEN_TYPE int
+#define GRPC_POSIX_NO_SPECIAL_WAKEUP_FD 1
+#define GRPC_POSIX_SOCKET 1
+#define GRPC_POSIX_SOCKETADDR 1
+#define GRPC_POSIX_SOCKETUTILS 1
+#define GRPC_POSIX_WAKEUP_FD 1
+#define GRPC_TIMER_USE_GENERIC 1
+#elif defined(GPR_FREEBSD)
+#define GRPC_HAVE_IPV6_RECVPKTINFO 1
+#define GRPC_HAVE_SO_NOSIGPIPE 1
+#define GRPC_HAVE_UNIX_SOCKET 1
+#define GRPC_POSIX_NO_SPECIAL_WAKEUP_FD 1
+#define GRPC_POSIX_SOCKET 1
+#define GRPC_POSIX_SOCKETADDR 1
+#define GRPC_POSIX_SOCKETUTILS 1
+#define GRPC_POSIX_WAKEUP_FD 1
+#define GRPC_TIMER_USE_GENERIC 1
+#elif defined(GPR_NACL)
+#define GRPC_POSIX_NO_SPECIAL_WAKEUP_FD 1
+#define GRPC_POSIX_SOCKET 1
+#define GRPC_POSIX_SOCKETADDR 1
+#define GRPC_POSIX_SOCKETUTILS 1
+#define GRPC_POSIX_WAKEUP_FD 1
+#define GRPC_TIMER_USE_GENERIC 1
+#elif !defined(GPR_NO_AUTODETECT_PLATFORM)
+#error "Platform not recognized"
+#endif
+
+#if defined(GRPC_POSIX_SOCKET) + defined(GRPC_WINSOCK_SOCKET) + \
+ defined(GRPC_CUSTOM_SOCKET) + defined(GRPC_UV) != \
+ 1
+#error Must define exactly one of GRPC_POSIX_SOCKET, GRPC_WINSOCK_SOCKET, GPR_CUSTOM_SOCKET
+#endif
+
+#endif /* GRPC_CORE_LIB_IOMGR_PORT_H */
diff --git a/src/core/lib/iomgr/resolve_address.h b/src/core/lib/iomgr/resolve_address.h
index ddbe375755..275924448a 100644
--- a/src/core/lib/iomgr/resolve_address.h
+++ b/src/core/lib/iomgr/resolve_address.h
@@ -36,7 +36,6 @@
#include <stddef.h>
#include "src/core/lib/iomgr/exec_ctx.h"
-#include "src/core/lib/iomgr/iomgr.h"
#define GRPC_MAX_SOCKADDR_SIZE 128
diff --git a/src/core/lib/iomgr/resolve_address_posix.c b/src/core/lib/iomgr/resolve_address_posix.c
index 4e9f978584..de791b2b67 100644
--- a/src/core/lib/iomgr/resolve_address_posix.c
+++ b/src/core/lib/iomgr/resolve_address_posix.c
@@ -31,12 +31,13 @@
*
*/
-#include <grpc/support/port_platform.h>
-#ifdef GPR_POSIX_SOCKET
+#include "src/core/lib/iomgr/port.h"
+#ifdef GRPC_POSIX_SOCKET
-#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/lib/iomgr/resolve_address.h"
+
#include <string.h>
#include <sys/types.h>
@@ -49,7 +50,6 @@
#include <grpc/support/useful.h>
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
-#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/iomgr/unix_sockets_posix.h"
#include "src/core/lib/support/block_annotate.h"
#include "src/core/lib/support/string.h"
diff --git a/src/core/lib/iomgr/resolve_address_uv.c b/src/core/lib/iomgr/resolve_address_uv.c
new file mode 100644
index 0000000000..b8295acfa1
--- /dev/null
+++ b/src/core/lib/iomgr/resolve_address_uv.c
@@ -0,0 +1,231 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/iomgr/port.h"
+#ifdef GRPC_UV
+
+#include <uv.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/host_port.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+
+#include "src/core/lib/iomgr/closure.h"
+#include "src/core/lib/iomgr/error.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/iomgr/resolve_address.h"
+#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/lib/iomgr/sockaddr_utils.h"
+
+#include <string.h>
+
+typedef struct request {
+ grpc_closure *on_done;
+ grpc_resolved_addresses **addresses;
+ struct addrinfo *hints;
+} request;
+
+static grpc_error *handle_addrinfo_result(int status, struct addrinfo *result,
+ grpc_resolved_addresses **addresses) {
+ struct addrinfo *resp;
+ size_t i;
+ if (status != 0) {
+ grpc_error *error;
+ *addresses = NULL;
+ error = GRPC_ERROR_CREATE("getaddrinfo failed");
+ error =
+ grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, uv_strerror(status));
+ return error;
+ }
+ (*addresses) = gpr_malloc(sizeof(grpc_resolved_addresses));
+ (*addresses)->naddrs = 0;
+ for (resp = result; resp != NULL; resp = resp->ai_next) {
+ (*addresses)->naddrs++;
+ }
+ (*addresses)->addrs =
+ gpr_malloc(sizeof(grpc_resolved_address) * (*addresses)->naddrs);
+ i = 0;
+ for (resp = result; resp != NULL; resp = resp->ai_next) {
+ memcpy(&(*addresses)->addrs[i].addr, resp->ai_addr, resp->ai_addrlen);
+ (*addresses)->addrs[i].len = resp->ai_addrlen;
+ i++;
+ }
+
+ {
+ for (i = 0; i < (*addresses)->naddrs; i++) {
+ char *buf;
+ grpc_sockaddr_to_string(&buf, &(*addresses)->addrs[i], 0);
+ gpr_free(buf);
+ }
+ }
+ return GRPC_ERROR_NONE;
+}
+
+static void getaddrinfo_callback(uv_getaddrinfo_t *req, int status,
+ struct addrinfo *res) {
+ request *r = (request *)req->data;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_error *error;
+ error = handle_addrinfo_result(status, res, r->addresses);
+ grpc_exec_ctx_sched(&exec_ctx, r->on_done, error, NULL);
+ grpc_exec_ctx_finish(&exec_ctx);
+
+ gpr_free(r->hints);
+ gpr_free(r);
+ gpr_free(req);
+ uv_freeaddrinfo(res);
+}
+
+static grpc_error *try_split_host_port(const char *name,
+ const char *default_port, char **host,
+ char **port) {
+ /* parse name, splitting it into host and port parts */
+ grpc_error *error;
+ gpr_split_host_port(name, host, port);
+ if (host == NULL) {
+ char *msg;
+ gpr_asprintf(&msg, "unparseable host:port: '%s'", name);
+ error = GRPC_ERROR_CREATE(msg);
+ gpr_free(msg);
+ return error;
+ }
+ if (port == NULL) {
+ if (default_port == NULL) {
+ char *msg;
+ gpr_asprintf(&msg, "no port in name '%s'", name);
+ error = GRPC_ERROR_CREATE(msg);
+ gpr_free(msg);
+ return error;
+ }
+ *port = gpr_strdup(default_port);
+ }
+ return GRPC_ERROR_NONE;
+}
+
+static grpc_error *blocking_resolve_address_impl(
+ const char *name, const char *default_port,
+ grpc_resolved_addresses **addresses) {
+ char *host;
+ char *port;
+ struct addrinfo hints;
+ uv_getaddrinfo_t req;
+ int s;
+ grpc_error *err;
+
+ req.addrinfo = NULL;
+
+ err = try_split_host_port(name, default_port, &host, &port);
+ if (err != GRPC_ERROR_NONE) {
+ goto done;
+ }
+
+ /* Call getaddrinfo */
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = AF_UNSPEC; /* ipv4 or ipv6 */
+ hints.ai_socktype = SOCK_STREAM; /* stream socket */
+ hints.ai_flags = AI_PASSIVE; /* for wildcard IP address */
+
+ s = uv_getaddrinfo(uv_default_loop(), &req, NULL, host, port, &hints);
+ err = handle_addrinfo_result(s, req.addrinfo, addresses);
+
+done:
+ gpr_free(host);
+ gpr_free(port);
+ if (req.addrinfo) {
+ uv_freeaddrinfo(req.addrinfo);
+ }
+ return err;
+}
+
+grpc_error *(*grpc_blocking_resolve_address)(
+ const char *name, const char *default_port,
+ grpc_resolved_addresses **addresses) = blocking_resolve_address_impl;
+
+void grpc_resolved_addresses_destroy(grpc_resolved_addresses *addrs) {
+ if (addrs != NULL) {
+ gpr_free(addrs->addrs);
+ }
+ gpr_free(addrs);
+}
+
+static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
+ const char *default_port,
+ grpc_closure *on_done,
+ grpc_resolved_addresses **addrs) {
+ uv_getaddrinfo_t *req;
+ request *r;
+ struct addrinfo *hints;
+ char *host;
+ char *port;
+ grpc_error *err;
+ int s;
+ err = try_split_host_port(name, default_port, &host, &port);
+ if (err != GRPC_ERROR_NONE) {
+ grpc_exec_ctx_sched(exec_ctx, on_done, err, NULL);
+ return;
+ }
+ r = gpr_malloc(sizeof(request));
+ r->on_done = on_done;
+ r->addresses = addrs;
+ req = gpr_malloc(sizeof(uv_getaddrinfo_t));
+ req->data = r;
+
+ /* Call getaddrinfo */
+ hints = gpr_malloc(sizeof(struct addrinfo));
+ memset(hints, 0, sizeof(struct addrinfo));
+ hints->ai_family = AF_UNSPEC; /* ipv4 or ipv6 */
+ hints->ai_socktype = SOCK_STREAM; /* stream socket */
+ hints->ai_flags = AI_PASSIVE; /* for wildcard IP address */
+ r->hints = hints;
+
+ s = uv_getaddrinfo(uv_default_loop(), req, getaddrinfo_callback, host, port,
+ hints);
+
+ if (s != 0) {
+ *addrs = NULL;
+ err = GRPC_ERROR_CREATE("getaddrinfo failed");
+ err = grpc_error_set_str(err, GRPC_ERROR_STR_OS_ERROR, uv_strerror(s));
+ grpc_exec_ctx_sched(exec_ctx, on_done, err, NULL);
+ gpr_free(r);
+ gpr_free(req);
+ gpr_free(hints);
+ }
+}
+
+void (*grpc_resolve_address)(grpc_exec_ctx *exec_ctx, const char *name,
+ const char *default_port, grpc_closure *on_done,
+ grpc_resolved_addresses **addrs) =
+ resolve_address_impl;
+
+#endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/resolve_address_windows.c b/src/core/lib/iomgr/resolve_address_windows.c
index 2af8af82dc..e139293c03 100644
--- a/src/core/lib/iomgr/resolve_address_windows.c
+++ b/src/core/lib/iomgr/resolve_address_windows.c
@@ -31,12 +31,13 @@
*
*/
-#include <grpc/support/port_platform.h>
-#ifdef GPR_WINSOCK_SOCKET
+#include "src/core/lib/iomgr/port.h"
+#ifdef GRPC_WINSOCK_SOCKET
-#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/lib/iomgr/resolve_address.h"
+
#include <string.h>
#include <sys/types.h>
@@ -124,8 +125,7 @@ static grpc_error *blocking_resolve_address_impl(
{
for (i = 0; i < (*addresses)->naddrs; i++) {
char *buf;
- grpc_sockaddr_to_string(
- &buf, (struct sockaddr *)&(*addresses)->addrs[i].addr, 0);
+ grpc_sockaddr_to_string(&buf, &(*addresses)->addrs[i], 0);
gpr_free(buf);
}
}
diff --git a/src/core/lib/iomgr/resource_quota.c b/src/core/lib/iomgr/resource_quota.c
new file mode 100644
index 0000000000..8a06443d58
--- /dev/null
+++ b/src/core/lib/iomgr/resource_quota.c
@@ -0,0 +1,724 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/iomgr/resource_quota.h"
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/useful.h>
+
+#include "src/core/lib/iomgr/combiner.h"
+
+int grpc_resource_quota_trace = 0;
+
+struct grpc_resource_quota {
+ /* refcount */
+ gpr_refcount refs;
+
+ /* Master combiner lock: all activity on a quota executes under this combiner
+ * (so no mutex is needed for this data structure)
+ */
+ grpc_combiner *combiner;
+ /* Size of the resource quota */
+ int64_t size;
+ /* Amount of free memory in the resource quota */
+ int64_t free_pool;
+
+ /* Has rq_step been scheduled to occur? */
+ bool step_scheduled;
+ /* Are we currently reclaiming memory */
+ bool reclaiming;
+ /* Closure around rq_step */
+ grpc_closure rq_step_closure;
+ /* Closure around rq_reclamation_done */
+ grpc_closure rq_reclamation_done_closure;
+
+ /* Roots of all resource user lists */
+ grpc_resource_user *roots[GRPC_RULIST_COUNT];
+
+ char *name;
+};
+
+/*******************************************************************************
+ * list management
+ */
+
+static void rulist_add_head(grpc_resource_user *resource_user,
+ grpc_rulist list) {
+ grpc_resource_quota *resource_quota = resource_user->resource_quota;
+ grpc_resource_user **root = &resource_quota->roots[list];
+ if (*root == NULL) {
+ *root = resource_user;
+ resource_user->links[list].next = resource_user->links[list].prev =
+ resource_user;
+ } else {
+ resource_user->links[list].next = *root;
+ resource_user->links[list].prev = (*root)->links[list].prev;
+ resource_user->links[list].next->links[list].prev =
+ resource_user->links[list].prev->links[list].next = resource_user;
+ *root = resource_user;
+ }
+}
+
+static void rulist_add_tail(grpc_resource_user *resource_user,
+ grpc_rulist list) {
+ grpc_resource_quota *resource_quota = resource_user->resource_quota;
+ grpc_resource_user **root = &resource_quota->roots[list];
+ if (*root == NULL) {
+ *root = resource_user;
+ resource_user->links[list].next = resource_user->links[list].prev =
+ resource_user;
+ } else {
+ resource_user->links[list].next = (*root)->links[list].next;
+ resource_user->links[list].prev = *root;
+ resource_user->links[list].next->links[list].prev =
+ resource_user->links[list].prev->links[list].next = resource_user;
+ }
+}
+
+static bool rulist_empty(grpc_resource_quota *resource_quota,
+ grpc_rulist list) {
+ return resource_quota->roots[list] == NULL;
+}
+
+static grpc_resource_user *rulist_pop_head(grpc_resource_quota *resource_quota,
+ grpc_rulist list) {
+ grpc_resource_user **root = &resource_quota->roots[list];
+ grpc_resource_user *resource_user = *root;
+ if (resource_user == NULL) {
+ return NULL;
+ }
+ if (resource_user->links[list].next == resource_user) {
+ *root = NULL;
+ } else {
+ resource_user->links[list].next->links[list].prev =
+ resource_user->links[list].prev;
+ resource_user->links[list].prev->links[list].next =
+ resource_user->links[list].next;
+ *root = resource_user->links[list].next;
+ }
+ resource_user->links[list].next = resource_user->links[list].prev = NULL;
+ return resource_user;
+}
+
+static void rulist_remove(grpc_resource_user *resource_user, grpc_rulist list) {
+ if (resource_user->links[list].next == NULL) return;
+ grpc_resource_quota *resource_quota = resource_user->resource_quota;
+ if (resource_quota->roots[list] == resource_user) {
+ resource_quota->roots[list] = resource_user->links[list].next;
+ if (resource_quota->roots[list] == resource_user) {
+ resource_quota->roots[list] = NULL;
+ }
+ }
+ resource_user->links[list].next->links[list].prev =
+ resource_user->links[list].prev;
+ resource_user->links[list].prev->links[list].next =
+ resource_user->links[list].next;
+}
+
+/*******************************************************************************
+ * resource quota state machine
+ */
+
+static bool rq_alloc(grpc_exec_ctx *exec_ctx,
+ grpc_resource_quota *resource_quota);
+static bool rq_reclaim_from_per_user_free_pool(
+ grpc_exec_ctx *exec_ctx, grpc_resource_quota *resource_quota);
+static bool rq_reclaim(grpc_exec_ctx *exec_ctx,
+ grpc_resource_quota *resource_quota, bool destructive);
+
+static void rq_step(grpc_exec_ctx *exec_ctx, void *rq, grpc_error *error) {
+ grpc_resource_quota *resource_quota = rq;
+ resource_quota->step_scheduled = false;
+ do {
+ if (rq_alloc(exec_ctx, resource_quota)) goto done;
+ } while (rq_reclaim_from_per_user_free_pool(exec_ctx, resource_quota));
+
+ if (!rq_reclaim(exec_ctx, resource_quota, false)) {
+ rq_reclaim(exec_ctx, resource_quota, true);
+ }
+
+done:
+ grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
+}
+
+static void rq_step_sched(grpc_exec_ctx *exec_ctx,
+ grpc_resource_quota *resource_quota) {
+ if (resource_quota->step_scheduled) return;
+ resource_quota->step_scheduled = true;
+ grpc_resource_quota_internal_ref(resource_quota);
+ grpc_combiner_execute_finally(exec_ctx, resource_quota->combiner,
+ &resource_quota->rq_step_closure,
+ GRPC_ERROR_NONE, false);
+}
+
+/* returns true if all allocations are completed */
+static bool rq_alloc(grpc_exec_ctx *exec_ctx,
+ grpc_resource_quota *resource_quota) {
+ grpc_resource_user *resource_user;
+ while ((resource_user = rulist_pop_head(resource_quota,
+ GRPC_RULIST_AWAITING_ALLOCATION))) {
+ gpr_mu_lock(&resource_user->mu);
+ if (resource_user->free_pool < 0 &&
+ -resource_user->free_pool <= resource_quota->free_pool) {
+ int64_t amt = -resource_user->free_pool;
+ resource_user->free_pool = 0;
+ resource_quota->free_pool -= amt;
+ if (grpc_resource_quota_trace) {
+ gpr_log(GPR_DEBUG, "RQ %s %s: grant alloc %" PRId64
+ " bytes; rq_free_pool -> %" PRId64,
+ resource_quota->name, resource_user->name, amt,
+ resource_quota->free_pool);
+ }
+ } else if (grpc_resource_quota_trace && resource_user->free_pool >= 0) {
+ gpr_log(GPR_DEBUG, "RQ %s %s: discard already satisfied alloc request",
+ resource_quota->name, resource_user->name);
+ }
+ if (resource_user->free_pool >= 0) {
+ resource_user->allocating = false;
+ grpc_exec_ctx_enqueue_list(exec_ctx, &resource_user->on_allocated, NULL);
+ gpr_mu_unlock(&resource_user->mu);
+ } else {
+ rulist_add_head(resource_user, GRPC_RULIST_AWAITING_ALLOCATION);
+ gpr_mu_unlock(&resource_user->mu);
+ return false;
+ }
+ }
+ return true;
+}
+
+/* returns true if any memory could be reclaimed from buffers */
+static bool rq_reclaim_from_per_user_free_pool(
+ grpc_exec_ctx *exec_ctx, grpc_resource_quota *resource_quota) {
+ grpc_resource_user *resource_user;
+ while ((resource_user = rulist_pop_head(resource_quota,
+ GRPC_RULIST_NON_EMPTY_FREE_POOL))) {
+ gpr_mu_lock(&resource_user->mu);
+ if (resource_user->free_pool > 0) {
+ int64_t amt = resource_user->free_pool;
+ resource_user->free_pool = 0;
+ resource_quota->free_pool += amt;
+ if (grpc_resource_quota_trace) {
+ gpr_log(GPR_DEBUG, "RQ %s %s: reclaim_from_per_user_free_pool %" PRId64
+ " bytes; rq_free_pool -> %" PRId64,
+ resource_quota->name, resource_user->name, amt,
+ resource_quota->free_pool);
+ }
+ gpr_mu_unlock(&resource_user->mu);
+ return true;
+ } else {
+ gpr_mu_unlock(&resource_user->mu);
+ }
+ }
+ return false;
+}
+
+/* returns true if reclamation is proceeding */
+static bool rq_reclaim(grpc_exec_ctx *exec_ctx,
+ grpc_resource_quota *resource_quota, bool destructive) {
+ if (resource_quota->reclaiming) return true;
+ grpc_rulist list = destructive ? GRPC_RULIST_RECLAIMER_DESTRUCTIVE
+ : GRPC_RULIST_RECLAIMER_BENIGN;
+ grpc_resource_user *resource_user = rulist_pop_head(resource_quota, list);
+ if (resource_user == NULL) return false;
+ if (grpc_resource_quota_trace) {
+ gpr_log(GPR_DEBUG, "RQ %s %s: initiate %s reclamation",
+ resource_quota->name, resource_user->name,
+ destructive ? "destructive" : "benign");
+ }
+ resource_quota->reclaiming = true;
+ grpc_resource_quota_internal_ref(resource_quota);
+ grpc_closure *c = resource_user->reclaimers[destructive];
+ resource_user->reclaimers[destructive] = NULL;
+ grpc_closure_run(exec_ctx, c, GRPC_ERROR_NONE);
+ return true;
+}
+
+/*******************************************************************************
+ * ru_slice: a slice implementation that is backed by a grpc_resource_user
+ */
+
+typedef struct {
+ gpr_slice_refcount base;
+ gpr_refcount refs;
+ grpc_resource_user *resource_user;
+ size_t size;
+} ru_slice_refcount;
+
+static void ru_slice_ref(void *p) {
+ ru_slice_refcount *rc = p;
+ gpr_ref(&rc->refs);
+}
+
+static void ru_slice_unref(void *p) {
+ ru_slice_refcount *rc = p;
+ if (gpr_unref(&rc->refs)) {
+ /* TODO(ctiller): this is dangerous, but I think safe for now:
+ we have no guarantee here that we're at a safe point for creating an
+ execution context, but we have no way of writing this code otherwise.
+ In the future: consider lifting gpr_slice to grpc, and offering an
+ internal_{ref,unref} pair that is execution context aware.
+ Alternatively,
+ make exec_ctx be thread local and 'do the right thing' (whatever that
+ is)
+ if NULL */
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_resource_user_free(&exec_ctx, rc->resource_user, rc->size);
+ grpc_exec_ctx_finish(&exec_ctx);
+ gpr_free(rc);
+ }
+}
+
+static gpr_slice ru_slice_create(grpc_resource_user *resource_user,
+ size_t size) {
+ ru_slice_refcount *rc = gpr_malloc(sizeof(ru_slice_refcount) + size);
+ rc->base.ref = ru_slice_ref;
+ rc->base.unref = ru_slice_unref;
+ gpr_ref_init(&rc->refs, 1);
+ rc->resource_user = resource_user;
+ rc->size = size;
+ gpr_slice slice;
+ slice.refcount = &rc->base;
+ slice.data.refcounted.bytes = (uint8_t *)(rc + 1);
+ slice.data.refcounted.length = size;
+ return slice;
+}
+
+/*******************************************************************************
+ * grpc_resource_quota internal implementation: resource user manipulation under
+ * the combiner
+ */
+
+static void ru_allocate(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
+ grpc_resource_user *resource_user = ru;
+ if (rulist_empty(resource_user->resource_quota,
+ GRPC_RULIST_AWAITING_ALLOCATION)) {
+ rq_step_sched(exec_ctx, resource_user->resource_quota);
+ }
+ rulist_add_tail(resource_user, GRPC_RULIST_AWAITING_ALLOCATION);
+}
+
+static void ru_add_to_free_pool(grpc_exec_ctx *exec_ctx, void *ru,
+ grpc_error *error) {
+ grpc_resource_user *resource_user = ru;
+ if (!rulist_empty(resource_user->resource_quota,
+ GRPC_RULIST_AWAITING_ALLOCATION) &&
+ rulist_empty(resource_user->resource_quota,
+ GRPC_RULIST_NON_EMPTY_FREE_POOL)) {
+ rq_step_sched(exec_ctx, resource_user->resource_quota);
+ }
+ rulist_add_tail(resource_user, GRPC_RULIST_NON_EMPTY_FREE_POOL);
+}
+
+static void ru_post_benign_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
+ grpc_error *error) {
+ grpc_resource_user *resource_user = ru;
+ if (!rulist_empty(resource_user->resource_quota,
+ GRPC_RULIST_AWAITING_ALLOCATION) &&
+ rulist_empty(resource_user->resource_quota,
+ GRPC_RULIST_NON_EMPTY_FREE_POOL) &&
+ rulist_empty(resource_user->resource_quota,
+ GRPC_RULIST_RECLAIMER_BENIGN)) {
+ rq_step_sched(exec_ctx, resource_user->resource_quota);
+ }
+ rulist_add_tail(resource_user, GRPC_RULIST_RECLAIMER_BENIGN);
+}
+
+static void ru_post_destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
+ grpc_error *error) {
+ grpc_resource_user *resource_user = ru;
+ if (!rulist_empty(resource_user->resource_quota,
+ GRPC_RULIST_AWAITING_ALLOCATION) &&
+ rulist_empty(resource_user->resource_quota,
+ GRPC_RULIST_NON_EMPTY_FREE_POOL) &&
+ rulist_empty(resource_user->resource_quota,
+ GRPC_RULIST_RECLAIMER_BENIGN) &&
+ rulist_empty(resource_user->resource_quota,
+ GRPC_RULIST_RECLAIMER_DESTRUCTIVE)) {
+ rq_step_sched(exec_ctx, resource_user->resource_quota);
+ }
+ rulist_add_tail(resource_user, GRPC_RULIST_RECLAIMER_DESTRUCTIVE);
+}
+
+static void ru_destroy(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
+ grpc_resource_user *resource_user = ru;
+ GPR_ASSERT(resource_user->allocated == 0);
+ for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
+ rulist_remove(resource_user, (grpc_rulist)i);
+ }
+ grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[0],
+ GRPC_ERROR_CANCELLED, NULL);
+ grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[1],
+ GRPC_ERROR_CANCELLED, NULL);
+ grpc_exec_ctx_sched(exec_ctx, (grpc_closure *)gpr_atm_no_barrier_load(
+ &resource_user->on_done_destroy_closure),
+ GRPC_ERROR_NONE, NULL);
+ if (resource_user->free_pool != 0) {
+ resource_user->resource_quota->free_pool += resource_user->free_pool;
+ rq_step_sched(exec_ctx, resource_user->resource_quota);
+ }
+}
+
+static void ru_allocated_slices(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_resource_user_slice_allocator *slice_allocator = arg;
+ if (error == GRPC_ERROR_NONE) {
+ for (size_t i = 0; i < slice_allocator->count; i++) {
+ gpr_slice_buffer_add_indexed(
+ slice_allocator->dest, ru_slice_create(slice_allocator->resource_user,
+ slice_allocator->length));
+ }
+ }
+ grpc_closure_run(exec_ctx, &slice_allocator->on_done, GRPC_ERROR_REF(error));
+}
+
+/*******************************************************************************
+ * grpc_resource_quota internal implementation: quota manipulation under the
+ * combiner
+ */
+
+typedef struct {
+ int64_t size;
+ grpc_resource_quota *resource_quota;
+ grpc_closure closure;
+} rq_resize_args;
+
+static void rq_resize(grpc_exec_ctx *exec_ctx, void *args, grpc_error *error) {
+ rq_resize_args *a = args;
+ int64_t delta = a->size - a->resource_quota->size;
+ a->resource_quota->size += delta;
+ a->resource_quota->free_pool += delta;
+ rq_step_sched(exec_ctx, a->resource_quota);
+ grpc_resource_quota_internal_unref(exec_ctx, a->resource_quota);
+ gpr_free(a);
+}
+
+static void rq_reclamation_done(grpc_exec_ctx *exec_ctx, void *rq,
+ grpc_error *error) {
+ grpc_resource_quota *resource_quota = rq;
+ resource_quota->reclaiming = false;
+ rq_step_sched(exec_ctx, resource_quota);
+ grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
+}
+
+/*******************************************************************************
+ * grpc_resource_quota api
+ */
+
+/* Public API */
+grpc_resource_quota *grpc_resource_quota_create(const char *name) {
+ grpc_resource_quota *resource_quota = gpr_malloc(sizeof(*resource_quota));
+ gpr_ref_init(&resource_quota->refs, 1);
+ resource_quota->combiner = grpc_combiner_create(NULL);
+ resource_quota->free_pool = INT64_MAX;
+ resource_quota->size = INT64_MAX;
+ resource_quota->step_scheduled = false;
+ resource_quota->reclaiming = false;
+ if (name != NULL) {
+ resource_quota->name = gpr_strdup(name);
+ } else {
+ gpr_asprintf(&resource_quota->name, "anonymous_pool_%" PRIxPTR,
+ (intptr_t)resource_quota);
+ }
+ grpc_closure_init(&resource_quota->rq_step_closure, rq_step, resource_quota);
+ grpc_closure_init(&resource_quota->rq_reclamation_done_closure,
+ rq_reclamation_done, resource_quota);
+ for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
+ resource_quota->roots[i] = NULL;
+ }
+ return resource_quota;
+}
+
+void grpc_resource_quota_internal_unref(grpc_exec_ctx *exec_ctx,
+ grpc_resource_quota *resource_quota) {
+ if (gpr_unref(&resource_quota->refs)) {
+ grpc_combiner_destroy(exec_ctx, resource_quota->combiner);
+ gpr_free(resource_quota->name);
+ gpr_free(resource_quota);
+ }
+}
+
+/* Public API */
+void grpc_resource_quota_unref(grpc_resource_quota *resource_quota) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+grpc_resource_quota *grpc_resource_quota_internal_ref(
+ grpc_resource_quota *resource_quota) {
+ gpr_ref(&resource_quota->refs);
+ return resource_quota;
+}
+
+/* Public API */
+void grpc_resource_quota_ref(grpc_resource_quota *resource_quota) {
+ grpc_resource_quota_internal_ref(resource_quota);
+}
+
+/* Public API */
+void grpc_resource_quota_resize(grpc_resource_quota *resource_quota,
+ size_t size) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ rq_resize_args *a = gpr_malloc(sizeof(*a));
+ a->resource_quota = grpc_resource_quota_internal_ref(resource_quota);
+ a->size = (int64_t)size;
+ grpc_closure_init(&a->closure, rq_resize, a);
+ grpc_combiner_execute(&exec_ctx, resource_quota->combiner, &a->closure,
+ GRPC_ERROR_NONE, false);
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+/*******************************************************************************
+ * grpc_resource_user channel args api
+ */
+
+grpc_resource_quota *grpc_resource_quota_from_channel_args(
+ const grpc_channel_args *channel_args) {
+ for (size_t i = 0; i < channel_args->num_args; i++) {
+ if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
+ if (channel_args->args[i].type == GRPC_ARG_POINTER) {
+ return grpc_resource_quota_internal_ref(
+ channel_args->args[i].value.pointer.p);
+ } else {
+ gpr_log(GPR_DEBUG, GRPC_ARG_RESOURCE_QUOTA " should be a pointer");
+ }
+ }
+ }
+ return grpc_resource_quota_create(NULL);
+}
+
+static void *rq_copy(void *rq) {
+ grpc_resource_quota_ref(rq);
+ return rq;
+}
+
+static void rq_destroy(void *rq) { grpc_resource_quota_unref(rq); }
+
+static int rq_cmp(void *a, void *b) { return GPR_ICMP(a, b); }
+
+const grpc_arg_pointer_vtable *grpc_resource_quota_arg_vtable(void) {
+ static const grpc_arg_pointer_vtable vtable = {rq_copy, rq_destroy, rq_cmp};
+ return &vtable;
+}
+
+/*******************************************************************************
+ * grpc_resource_user api
+ */
+
+void grpc_resource_user_init(grpc_resource_user *resource_user,
+ grpc_resource_quota *resource_quota,
+ const char *name) {
+ resource_user->resource_quota =
+ grpc_resource_quota_internal_ref(resource_quota);
+ grpc_closure_init(&resource_user->allocate_closure, &ru_allocate,
+ resource_user);
+ grpc_closure_init(&resource_user->add_to_free_pool_closure,
+ &ru_add_to_free_pool, resource_user);
+ grpc_closure_init(&resource_user->post_reclaimer_closure[0],
+ &ru_post_benign_reclaimer, resource_user);
+ grpc_closure_init(&resource_user->post_reclaimer_closure[1],
+ &ru_post_destructive_reclaimer, resource_user);
+ grpc_closure_init(&resource_user->destroy_closure, &ru_destroy,
+ resource_user);
+ gpr_mu_init(&resource_user->mu);
+ resource_user->allocated = 0;
+ resource_user->free_pool = 0;
+ grpc_closure_list_init(&resource_user->on_allocated);
+ resource_user->allocating = false;
+ resource_user->added_to_free_pool = false;
+ gpr_atm_no_barrier_store(&resource_user->on_done_destroy_closure, 0);
+ resource_user->reclaimers[0] = NULL;
+ resource_user->reclaimers[1] = NULL;
+ for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
+ resource_user->links[i].next = resource_user->links[i].prev = NULL;
+ }
+ if (name != NULL) {
+ resource_user->name = gpr_strdup(name);
+ } else {
+ gpr_asprintf(&resource_user->name, "anonymous_resource_user_%" PRIxPTR,
+ (intptr_t)resource_user);
+ }
+}
+
+void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user,
+ grpc_closure *on_done) {
+ gpr_mu_lock(&resource_user->mu);
+ GPR_ASSERT(gpr_atm_no_barrier_load(&resource_user->on_done_destroy_closure) ==
+ 0);
+ gpr_atm_no_barrier_store(&resource_user->on_done_destroy_closure,
+ (gpr_atm)on_done);
+ if (resource_user->allocated == 0) {
+ grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
+ &resource_user->destroy_closure, GRPC_ERROR_NONE,
+ false);
+ }
+ gpr_mu_unlock(&resource_user->mu);
+}
+
+void grpc_resource_user_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user) {
+ grpc_resource_quota_internal_unref(exec_ctx, resource_user->resource_quota);
+ gpr_mu_destroy(&resource_user->mu);
+ gpr_free(resource_user->name);
+}
+
+void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user, size_t size,
+ grpc_closure *optional_on_done) {
+ gpr_mu_lock(&resource_user->mu);
+ grpc_closure *on_done_destroy = (grpc_closure *)gpr_atm_no_barrier_load(
+ &resource_user->on_done_destroy_closure);
+ if (on_done_destroy != NULL) {
+ /* already shutdown */
+ if (grpc_resource_quota_trace) {
+ gpr_log(GPR_DEBUG, "RQ %s %s: alloc %" PRIdPTR " after shutdown",
+ resource_user->resource_quota->name, resource_user->name, size);
+ }
+ grpc_exec_ctx_sched(
+ exec_ctx, optional_on_done,
+ GRPC_ERROR_CREATE("Buffer pool user is already shutdown"), NULL);
+ gpr_mu_unlock(&resource_user->mu);
+ return;
+ }
+ resource_user->allocated += (int64_t)size;
+ resource_user->free_pool -= (int64_t)size;
+ if (grpc_resource_quota_trace) {
+ gpr_log(GPR_DEBUG, "RQ %s %s: alloc %" PRIdPTR "; allocated -> %" PRId64
+ ", free_pool -> %" PRId64,
+ resource_user->resource_quota->name, resource_user->name, size,
+ resource_user->allocated, resource_user->free_pool);
+ }
+ if (resource_user->free_pool < 0) {
+ grpc_closure_list_append(&resource_user->on_allocated, optional_on_done,
+ GRPC_ERROR_NONE);
+ if (!resource_user->allocating) {
+ resource_user->allocating = true;
+ grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
+ &resource_user->allocate_closure, GRPC_ERROR_NONE,
+ false);
+ }
+ } else {
+ grpc_exec_ctx_sched(exec_ctx, optional_on_done, GRPC_ERROR_NONE, NULL);
+ }
+ gpr_mu_unlock(&resource_user->mu);
+}
+
+void grpc_resource_user_free(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user, size_t size) {
+ gpr_mu_lock(&resource_user->mu);
+ GPR_ASSERT(resource_user->allocated >= (int64_t)size);
+ bool was_zero_or_negative = resource_user->free_pool <= 0;
+ resource_user->free_pool += (int64_t)size;
+ resource_user->allocated -= (int64_t)size;
+ if (grpc_resource_quota_trace) {
+ gpr_log(GPR_DEBUG, "RQ %s %s: free %" PRIdPTR "; allocated -> %" PRId64
+ ", free_pool -> %" PRId64,
+ resource_user->resource_quota->name, resource_user->name, size,
+ resource_user->allocated, resource_user->free_pool);
+ }
+ bool is_bigger_than_zero = resource_user->free_pool > 0;
+ if (is_bigger_than_zero && was_zero_or_negative &&
+ !resource_user->added_to_free_pool) {
+ resource_user->added_to_free_pool = true;
+ grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
+ &resource_user->add_to_free_pool_closure,
+ GRPC_ERROR_NONE, false);
+ }
+ grpc_closure *on_done_destroy = (grpc_closure *)gpr_atm_no_barrier_load(
+ &resource_user->on_done_destroy_closure);
+ if (on_done_destroy != NULL && resource_user->allocated == 0) {
+ grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
+ &resource_user->destroy_closure, GRPC_ERROR_NONE,
+ false);
+ }
+ gpr_mu_unlock(&resource_user->mu);
+}
+
+void grpc_resource_user_post_reclaimer(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user,
+ bool destructive,
+ grpc_closure *closure) {
+ if (gpr_atm_acq_load(&resource_user->on_done_destroy_closure) == 0) {
+ GPR_ASSERT(resource_user->reclaimers[destructive] == NULL);
+ resource_user->reclaimers[destructive] = closure;
+ grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
+ &resource_user->post_reclaimer_closure[destructive],
+ GRPC_ERROR_NONE, false);
+ } else {
+ grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_CANCELLED, NULL);
+ }
+}
+
+void grpc_resource_user_finish_reclamation(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user) {
+ if (grpc_resource_quota_trace) {
+ gpr_log(GPR_DEBUG, "RQ %s %s: reclamation complete",
+ resource_user->resource_quota->name, resource_user->name);
+ }
+ grpc_combiner_execute(
+ exec_ctx, resource_user->resource_quota->combiner,
+ &resource_user->resource_quota->rq_reclamation_done_closure,
+ GRPC_ERROR_NONE, false);
+}
+
+void grpc_resource_user_slice_allocator_init(
+ grpc_resource_user_slice_allocator *slice_allocator,
+ grpc_resource_user *resource_user, grpc_iomgr_cb_func cb, void *p) {
+ grpc_closure_init(&slice_allocator->on_allocated, ru_allocated_slices,
+ slice_allocator);
+ grpc_closure_init(&slice_allocator->on_done, cb, p);
+ slice_allocator->resource_user = resource_user;
+}
+
+void grpc_resource_user_alloc_slices(
+ grpc_exec_ctx *exec_ctx,
+ grpc_resource_user_slice_allocator *slice_allocator, size_t length,
+ size_t count, gpr_slice_buffer *dest) {
+ slice_allocator->length = length;
+ slice_allocator->count = count;
+ slice_allocator->dest = dest;
+ grpc_resource_user_alloc(exec_ctx, slice_allocator->resource_user,
+ count * length, &slice_allocator->on_allocated);
+}
+
+gpr_slice grpc_resource_user_slice_malloc(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user,
+ size_t size) {
+ grpc_resource_user_alloc(exec_ctx, resource_user, size, NULL);
+ return ru_slice_create(resource_user, size);
+}
diff --git a/src/core/lib/iomgr/resource_quota.h b/src/core/lib/iomgr/resource_quota.h
new file mode 100644
index 0000000000..da68f21a2c
--- /dev/null
+++ b/src/core/lib/iomgr/resource_quota.h
@@ -0,0 +1,229 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_RESOURCE_QUOTA_H
+#define GRPC_CORE_LIB_IOMGR_RESOURCE_QUOTA_H
+
+#include <grpc/grpc.h>
+
+#include "src/core/lib/iomgr/exec_ctx.h"
+
+/** \file Tracks resource usage against a pool.
+
+ The current implementation tracks only memory usage, but in the future
+ this may be extended to (for example) threads and file descriptors.
+
+ A grpc_resource_quota represents the pooled resources, and
+ grpc_resource_user instances attach to the quota and consume those
+ resources. They also offer a vector for reclamation: if we become
+ resource constrained, grpc_resource_user instances are asked (in turn) to
+ free up whatever they can so that the system as a whole can make progress.
+
+ There are three kinds of reclamation that take place, in order of increasing
+ invasiveness:
+ - an internal reclamation, where cached resource at the resource user level
+ is returned to the quota
+ - a benign reclamation phase, whereby resources that are in use but are not
+ helping anything make progress are reclaimed
+ - a destructive reclamation, whereby resources that are helping something
+ make progress may be enacted so that at least one part of the system can
+ complete.
+
+ Only one reclamation will be outstanding for a given quota at a given time.
+ On each reclamation attempt, the kinds of reclamation are tried in order of
+ increasing invasiveness, stopping at the first one that succeeds. Thus, on a
+ given reclamation attempt, if internal and benign reclamation both fail, it
+ will wind up doing a destructive reclamation. However, the next reclamation
+ attempt may then be able to get what it needs via internal or benign
+ reclamation, due to resources that may have been freed up by the destructive
+ reclamation in the previous attempt.
+
+ Future work will be to expose the current resource pressure so that back
+ pressure can be applied to avoid reclamation phases starting.
+
+ Resource users own references to resource quotas, and resource quotas
+ maintain lists of users (which users arrange to leave before they are
+ destroyed) */
+
+extern int grpc_resource_quota_trace;
+
+grpc_resource_quota *grpc_resource_quota_internal_ref(
+ grpc_resource_quota *resource_quota);
+void grpc_resource_quota_internal_unref(grpc_exec_ctx *exec_ctx,
+ grpc_resource_quota *resource_quota);
+grpc_resource_quota *grpc_resource_quota_from_channel_args(
+ const grpc_channel_args *channel_args);
+
+/* Resource users are kept in (potentially) several intrusive linked lists
+ at once. These are the list names. */
+typedef enum {
+ /* Resource users that are waiting for an allocation */
+ GRPC_RULIST_AWAITING_ALLOCATION,
+ /* Resource users that have free memory available for internal reclamation */
+ GRPC_RULIST_NON_EMPTY_FREE_POOL,
+ /* Resource users that have published a benign reclamation is available */
+ GRPC_RULIST_RECLAIMER_BENIGN,
+ /* Resource users that have published a destructive reclamation is
+ available */
+ GRPC_RULIST_RECLAIMER_DESTRUCTIVE,
+ /* Number of lists: must be last */
+ GRPC_RULIST_COUNT
+} grpc_rulist;
+
+typedef struct grpc_resource_user grpc_resource_user;
+
+/* Internal linked list pointers for a resource user */
+typedef struct {
+ grpc_resource_user *next;
+ grpc_resource_user *prev;
+} grpc_resource_user_link;
+
+struct grpc_resource_user {
+ /* The quota this resource user consumes from */
+ grpc_resource_quota *resource_quota;
+
+ /* Closure to schedule an allocation under the resource quota combiner lock */
+ grpc_closure allocate_closure;
+ /* Closure to publish a non empty free pool under the resource quota combiner
+ lock */
+ grpc_closure add_to_free_pool_closure;
+
+ gpr_mu mu;
+ /* Total allocated memory outstanding by this resource user in bytes;
+ always positive */
+ int64_t allocated;
+ /* The amount of memory (in bytes) this user has cached for its own use: to
+ avoid quota contention, each resource user can keep some memory in
+ addition to what it is immediately using (e.g., for caching), and the quota
+ can pull it back under memory pressure.
+ This value can become negative if more memory has been requested than
+ existed in the free pool, at which point the quota is consulted to bring
+ this value non-negative (asynchronously). */
+ int64_t free_pool;
+ /* A list of closures to call once free_pool becomes non-negative - ie when
+ all outstanding allocations have been granted. */
+ grpc_closure_list on_allocated;
+ /* True if we are currently trying to allocate from the quota, false if not */
+ bool allocating;
+ /* True if we are currently trying to add ourselves to the non-free quota
+ list, false otherwise */
+ bool added_to_free_pool;
+
+ /* Reclaimers: index 0 is the benign reclaimer, 1 is the destructive reclaimer
+ */
+ grpc_closure *reclaimers[2];
+ /* Trampoline closures to finish reclamation and re-enter the quota combiner
+ lock */
+ grpc_closure post_reclaimer_closure[2];
+
+ /* Closure to execute under the quota combiner to de-register and shutdown the
+ resource user */
+ grpc_closure destroy_closure;
+ /* User supplied closure to call once the user has finished shutting down AND
+ all outstanding allocations have been freed. Real type is grpc_closure*,
+ but it's stored as an atomic to avoid a mutex on some fast paths. */
+ gpr_atm on_done_destroy_closure;
+
+ /* Links in the various grpc_rulist lists */
+ grpc_resource_user_link links[GRPC_RULIST_COUNT];
+
+ /* The name of this resource user, for debugging/tracing */
+ char *name;
+};
+
+void grpc_resource_user_init(grpc_resource_user *resource_user,
+ grpc_resource_quota *resource_quota,
+ const char *name);
+void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user,
+ grpc_closure *on_done);
+void grpc_resource_user_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user);
+
+/* Allocate from the resource user (and its quota).
+ If optional_on_done is NULL, then allocate immediately. This may push the
+ quota over-limit, at which point reclamation will kick in.
+ If optional_on_done is non-NULL, it will be scheduled when the allocation has
+ been granted by the quota. */
+void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user, size_t size,
+ grpc_closure *optional_on_done);
+/* Release memory back to the quota */
+void grpc_resource_user_free(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user, size_t size);
+/* Post a memory reclaimer to the resource user. Only one benign and one
+ destructive reclaimer can be posted at once. When executed, the reclaimer
+ MUST call grpc_resource_user_finish_reclamation before it completes, to
+ return control to the resource quota. */
+void grpc_resource_user_post_reclaimer(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user,
+ bool destructive, grpc_closure *closure);
+/* Finish a reclamation step */
+void grpc_resource_user_finish_reclamation(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user);
+
+/* Helper to allocate slices from a resource user */
+typedef struct grpc_resource_user_slice_allocator {
+ /* Closure for when a resource user allocation completes */
+ grpc_closure on_allocated;
+ /* Closure to call when slices have been allocated */
+ grpc_closure on_done;
+ /* Length of slices to allocate on the current request */
+ size_t length;
+ /* Number of slices to allocate on the current request */
+ size_t count;
+ /* Destination for slices to allocate on the current request */
+ gpr_slice_buffer *dest;
+ /* Parent resource user */
+ grpc_resource_user *resource_user;
+} grpc_resource_user_slice_allocator;
+
+/* Initialize a slice allocator.
+ When an allocation is completed, calls \a cb with arg \p. */
+void grpc_resource_user_slice_allocator_init(
+ grpc_resource_user_slice_allocator *slice_allocator,
+ grpc_resource_user *resource_user, grpc_iomgr_cb_func cb, void *p);
+
+/* Allocate \a count slices of length \a length into \a dest. Only one request
+ can be outstanding at a time. */
+void grpc_resource_user_alloc_slices(
+ grpc_exec_ctx *exec_ctx,
+ grpc_resource_user_slice_allocator *slice_allocator, size_t length,
+ size_t count, gpr_slice_buffer *dest);
+
+/* Allocate one slice of length \a size synchronously. */
+gpr_slice grpc_resource_user_slice_malloc(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user,
+ size_t size);
+
+#endif /* GRPC_CORE_LIB_IOMGR_RESOURCE_QUOTA_H */
diff --git a/src/core/lib/iomgr/sockaddr.h b/src/core/lib/iomgr/sockaddr.h
index 5563d0b8a6..52b504390d 100644
--- a/src/core/lib/iomgr/sockaddr.h
+++ b/src/core/lib/iomgr/sockaddr.h
@@ -31,16 +31,24 @@
*
*/
+/* This header transitively includes other headers that care about include
+ * order, so it should be included first. As a consequence, it should not be
+ * included in any other header. */
+
#ifndef GRPC_CORE_LIB_IOMGR_SOCKADDR_H
#define GRPC_CORE_LIB_IOMGR_SOCKADDR_H
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
+
+#ifdef GRPC_UV
+#include <uv.h>
+#endif
#ifdef GPR_WINDOWS
#include "src/core/lib/iomgr/sockaddr_windows.h"
#endif
-#ifdef GPR_POSIX_SOCKETADDR
+#ifdef GRPC_POSIX_SOCKETADDR
#include "src/core/lib/iomgr/sockaddr_posix.h"
#endif
diff --git a/src/core/lib/iomgr/sockaddr_utils.c b/src/core/lib/iomgr/sockaddr_utils.c
index 127d95c618..44bc2f968b 100644
--- a/src/core/lib/iomgr/sockaddr_utils.c
+++ b/src/core/lib/iomgr/sockaddr_utils.c
@@ -42,26 +42,32 @@
#include <grpc/support/port_platform.h>
#include <grpc/support/string_util.h>
+#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/lib/iomgr/socket_utils.h"
#include "src/core/lib/iomgr/unix_sockets_posix.h"
#include "src/core/lib/support/string.h"
static const uint8_t kV4MappedPrefix[] = {0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0xff, 0xff};
-int grpc_sockaddr_is_v4mapped(const struct sockaddr *addr,
- struct sockaddr_in *addr4_out) {
- GPR_ASSERT(addr != (struct sockaddr *)addr4_out);
+int grpc_sockaddr_is_v4mapped(const grpc_resolved_address *resolved_addr,
+ grpc_resolved_address *resolved_addr4_out) {
+ GPR_ASSERT(resolved_addr != resolved_addr4_out);
+ const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr;
+ struct sockaddr_in *addr4_out =
+ (struct sockaddr_in *)resolved_addr4_out->addr;
if (addr->sa_family == AF_INET6) {
const struct sockaddr_in6 *addr6 = (const struct sockaddr_in6 *)addr;
if (memcmp(addr6->sin6_addr.s6_addr, kV4MappedPrefix,
sizeof(kV4MappedPrefix)) == 0) {
- if (addr4_out != NULL) {
+ if (resolved_addr4_out != NULL) {
/* Normalize ::ffff:0.0.0.0/96 to IPv4. */
- memset(addr4_out, 0, sizeof(*addr4_out));
+ memset(resolved_addr4_out, 0, sizeof(*resolved_addr4_out));
addr4_out->sin_family = AF_INET;
/* s6_addr32 would be nice, but it's non-standard. */
memcpy(&addr4_out->sin_addr, &addr6->sin6_addr.s6_addr[12], 4);
addr4_out->sin_port = addr6->sin6_port;
+ resolved_addr4_out->len = sizeof(struct sockaddr_in);
}
return 1;
}
@@ -69,26 +75,33 @@ int grpc_sockaddr_is_v4mapped(const struct sockaddr *addr,
return 0;
}
-int grpc_sockaddr_to_v4mapped(const struct sockaddr *addr,
- struct sockaddr_in6 *addr6_out) {
- GPR_ASSERT(addr != (struct sockaddr *)addr6_out);
+int grpc_sockaddr_to_v4mapped(const grpc_resolved_address *resolved_addr,
+ grpc_resolved_address *resolved_addr6_out) {
+ GPR_ASSERT(resolved_addr != resolved_addr6_out);
+ const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr;
+ struct sockaddr_in6 *addr6_out =
+ (struct sockaddr_in6 *)resolved_addr6_out->addr;
if (addr->sa_family == AF_INET) {
const struct sockaddr_in *addr4 = (const struct sockaddr_in *)addr;
- memset(addr6_out, 0, sizeof(*addr6_out));
+ memset(resolved_addr6_out, 0, sizeof(*resolved_addr6_out));
addr6_out->sin6_family = AF_INET6;
memcpy(&addr6_out->sin6_addr.s6_addr[0], kV4MappedPrefix, 12);
memcpy(&addr6_out->sin6_addr.s6_addr[12], &addr4->sin_addr, 4);
addr6_out->sin6_port = addr4->sin_port;
+ resolved_addr6_out->len = sizeof(struct sockaddr_in6);
return 1;
}
return 0;
}
-int grpc_sockaddr_is_wildcard(const struct sockaddr *addr, int *port_out) {
- struct sockaddr_in addr4_normalized;
- if (grpc_sockaddr_is_v4mapped(addr, &addr4_normalized)) {
- addr = (struct sockaddr *)&addr4_normalized;
+int grpc_sockaddr_is_wildcard(const grpc_resolved_address *resolved_addr,
+ int *port_out) {
+ const struct sockaddr *addr;
+ grpc_resolved_address addr4_normalized;
+ if (grpc_sockaddr_is_v4mapped(resolved_addr, &addr4_normalized)) {
+ resolved_addr = &addr4_normalized;
}
+ addr = (const struct sockaddr *)resolved_addr->addr;
if (addr->sa_family == AF_INET) {
/* Check for 0.0.0.0 */
const struct sockaddr_in *addr4 = (const struct sockaddr_in *)addr;
@@ -113,39 +126,49 @@ int grpc_sockaddr_is_wildcard(const struct sockaddr *addr, int *port_out) {
}
}
-void grpc_sockaddr_make_wildcards(int port, struct sockaddr_in *wild4_out,
- struct sockaddr_in6 *wild6_out) {
+void grpc_sockaddr_make_wildcards(int port, grpc_resolved_address *wild4_out,
+ grpc_resolved_address *wild6_out) {
grpc_sockaddr_make_wildcard4(port, wild4_out);
grpc_sockaddr_make_wildcard6(port, wild6_out);
}
-void grpc_sockaddr_make_wildcard4(int port, struct sockaddr_in *wild_out) {
+void grpc_sockaddr_make_wildcard4(int port,
+ grpc_resolved_address *resolved_wild_out) {
+ struct sockaddr_in *wild_out = (struct sockaddr_in *)resolved_wild_out->addr;
GPR_ASSERT(port >= 0 && port < 65536);
- memset(wild_out, 0, sizeof(*wild_out));
+ memset(resolved_wild_out, 0, sizeof(*resolved_wild_out));
wild_out->sin_family = AF_INET;
wild_out->sin_port = htons((uint16_t)port);
+ resolved_wild_out->len = sizeof(struct sockaddr_in);
}
-void grpc_sockaddr_make_wildcard6(int port, struct sockaddr_in6 *wild_out) {
+void grpc_sockaddr_make_wildcard6(int port,
+ grpc_resolved_address *resolved_wild_out) {
+ struct sockaddr_in6 *wild_out =
+ (struct sockaddr_in6 *)resolved_wild_out->addr;
GPR_ASSERT(port >= 0 && port < 65536);
- memset(wild_out, 0, sizeof(*wild_out));
+ memset(resolved_wild_out, 0, sizeof(*resolved_wild_out));
wild_out->sin6_family = AF_INET6;
wild_out->sin6_port = htons((uint16_t)port);
+ resolved_wild_out->len = sizeof(struct sockaddr_in6);
}
-int grpc_sockaddr_to_string(char **out, const struct sockaddr *addr,
+int grpc_sockaddr_to_string(char **out,
+ const grpc_resolved_address *resolved_addr,
int normalize) {
+ const struct sockaddr *addr;
const int save_errno = errno;
- struct sockaddr_in addr_normalized;
+ grpc_resolved_address addr_normalized;
char ntop_buf[INET6_ADDRSTRLEN];
const void *ip = NULL;
int port;
int ret;
*out = NULL;
- if (normalize && grpc_sockaddr_is_v4mapped(addr, &addr_normalized)) {
- addr = (const struct sockaddr *)&addr_normalized;
+ if (normalize && grpc_sockaddr_is_v4mapped(resolved_addr, &addr_normalized)) {
+ resolved_addr = &addr_normalized;
}
+ addr = (const struct sockaddr *)resolved_addr->addr;
if (addr->sa_family == AF_INET) {
const struct sockaddr_in *addr4 = (const struct sockaddr_in *)addr;
ip = &addr4->sin_addr;
@@ -155,10 +178,8 @@ int grpc_sockaddr_to_string(char **out, const struct sockaddr *addr,
ip = &addr6->sin6_addr;
port = ntohs(addr6->sin6_port);
}
- /* Windows inet_ntop wants a mutable ip pointer */
if (ip != NULL &&
- inet_ntop(addr->sa_family, (void *)ip, ntop_buf, sizeof(ntop_buf)) !=
- NULL) {
+ grpc_inet_ntop(addr->sa_family, ip, ntop_buf, sizeof(ntop_buf)) != NULL) {
ret = gpr_join_host_port(out, ntop_buf, port);
} else {
ret = gpr_asprintf(out, "(sockaddr family=%d)", addr->sa_family);
@@ -168,39 +189,43 @@ int grpc_sockaddr_to_string(char **out, const struct sockaddr *addr,
return ret;
}
-char *grpc_sockaddr_to_uri(const struct sockaddr *addr) {
+char *grpc_sockaddr_to_uri(const grpc_resolved_address *resolved_addr) {
char *temp;
char *result;
- struct sockaddr_in addr_normalized;
+ grpc_resolved_address addr_normalized;
+ const struct sockaddr *addr;
- if (grpc_sockaddr_is_v4mapped(addr, &addr_normalized)) {
- addr = (const struct sockaddr *)&addr_normalized;
+ if (grpc_sockaddr_is_v4mapped(resolved_addr, &addr_normalized)) {
+ resolved_addr = &addr_normalized;
}
+ addr = (const struct sockaddr *)resolved_addr->addr;
+
switch (addr->sa_family) {
case AF_INET:
- grpc_sockaddr_to_string(&temp, addr, 0);
+ grpc_sockaddr_to_string(&temp, resolved_addr, 0);
gpr_asprintf(&result, "ipv4:%s", temp);
gpr_free(temp);
return result;
case AF_INET6:
- grpc_sockaddr_to_string(&temp, addr, 0);
+ grpc_sockaddr_to_string(&temp, resolved_addr, 0);
gpr_asprintf(&result, "ipv6:%s", temp);
gpr_free(temp);
return result;
default:
- return grpc_sockaddr_to_uri_unix_if_possible(addr);
+ return grpc_sockaddr_to_uri_unix_if_possible(resolved_addr);
}
}
-int grpc_sockaddr_get_port(const struct sockaddr *addr) {
+int grpc_sockaddr_get_port(const grpc_resolved_address *resolved_addr) {
+ const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr;
switch (addr->sa_family) {
case AF_INET:
return ntohs(((struct sockaddr_in *)addr)->sin_port);
case AF_INET6:
return ntohs(((struct sockaddr_in6 *)addr)->sin6_port);
default:
- if (grpc_is_unix_socket(addr)) {
+ if (grpc_is_unix_socket(resolved_addr)) {
return 1;
}
gpr_log(GPR_ERROR, "Unknown socket family %d in grpc_sockaddr_get_port",
@@ -209,7 +234,9 @@ int grpc_sockaddr_get_port(const struct sockaddr *addr) {
}
}
-int grpc_sockaddr_set_port(const struct sockaddr *addr, int port) {
+int grpc_sockaddr_set_port(const grpc_resolved_address *resolved_addr,
+ int port) {
+ const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr;
switch (addr->sa_family) {
case AF_INET:
GPR_ASSERT(port >= 0 && port < 65536);
diff --git a/src/core/lib/iomgr/sockaddr_utils.h b/src/core/lib/iomgr/sockaddr_utils.h
index 9f81992e6b..5371e360c5 100644
--- a/src/core/lib/iomgr/sockaddr_utils.h
+++ b/src/core/lib/iomgr/sockaddr_utils.h
@@ -34,40 +34,40 @@
#ifndef GRPC_CORE_LIB_IOMGR_SOCKADDR_UTILS_H
#define GRPC_CORE_LIB_IOMGR_SOCKADDR_UTILS_H
-#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/lib/iomgr/resolve_address.h"
/* Returns true if addr is an IPv4-mapped IPv6 address within the
::ffff:0.0.0.0/96 range, or false otherwise.
If addr4_out is non-NULL, the inner IPv4 address will be copied here when
returning true. */
-int grpc_sockaddr_is_v4mapped(const struct sockaddr *addr,
- struct sockaddr_in *addr4_out);
+int grpc_sockaddr_is_v4mapped(const grpc_resolved_address *addr,
+ grpc_resolved_address *addr4_out);
/* If addr is an AF_INET address, writes the corresponding ::ffff:0.0.0.0/96
address to addr6_out and returns true. Otherwise returns false. */
-int grpc_sockaddr_to_v4mapped(const struct sockaddr *addr,
- struct sockaddr_in6 *addr6_out);
+int grpc_sockaddr_to_v4mapped(const grpc_resolved_address *addr,
+ grpc_resolved_address *addr6_out);
/* If addr is ::, 0.0.0.0, or ::ffff:0.0.0.0, writes the port number to
*port_out (if not NULL) and returns true, otherwise returns false. */
-int grpc_sockaddr_is_wildcard(const struct sockaddr *addr, int *port_out);
+int grpc_sockaddr_is_wildcard(const grpc_resolved_address *addr, int *port_out);
/* Writes 0.0.0.0:port and [::]:port to separate sockaddrs. */
-void grpc_sockaddr_make_wildcards(int port, struct sockaddr_in *wild4_out,
- struct sockaddr_in6 *wild6_out);
+void grpc_sockaddr_make_wildcards(int port, grpc_resolved_address *wild4_out,
+ grpc_resolved_address *wild6_out);
/* Writes 0.0.0.0:port. */
-void grpc_sockaddr_make_wildcard4(int port, struct sockaddr_in *wild_out);
+void grpc_sockaddr_make_wildcard4(int port, grpc_resolved_address *wild_out);
/* Writes [::]:port. */
-void grpc_sockaddr_make_wildcard6(int port, struct sockaddr_in6 *wild_out);
+void grpc_sockaddr_make_wildcard6(int port, grpc_resolved_address *wild_out);
/* Return the IP port number of a sockaddr */
-int grpc_sockaddr_get_port(const struct sockaddr *addr);
+int grpc_sockaddr_get_port(const grpc_resolved_address *addr);
/* Set IP port number of a sockaddr */
-int grpc_sockaddr_set_port(const struct sockaddr *addr, int port);
+int grpc_sockaddr_set_port(const grpc_resolved_address *addr, int port);
/* Converts a sockaddr into a newly-allocated human-readable string.
@@ -81,9 +81,9 @@ int grpc_sockaddr_set_port(const struct sockaddr *addr, int port);
In the unlikely event of an error, returns -1 and sets *out to NULL.
The existing value of errno is always preserved. */
-int grpc_sockaddr_to_string(char **out, const struct sockaddr *addr,
+int grpc_sockaddr_to_string(char **out, const grpc_resolved_address *addr,
int normalize);
-char *grpc_sockaddr_to_uri(const struct sockaddr *addr);
+char *grpc_sockaddr_to_uri(const grpc_resolved_address *addr);
#endif /* GRPC_CORE_LIB_IOMGR_SOCKADDR_UTILS_H */
diff --git a/src/core/lib/iomgr/workqueue_posix.h b/src/core/lib/iomgr/socket_utils.h
index 03ee21cef7..cc3ee2e30c 100644
--- a/src/core/lib/iomgr/workqueue_posix.h
+++ b/src/core/lib/iomgr/socket_utils.h
@@ -31,31 +31,12 @@
*
*/
-#ifndef GRPC_CORE_LIB_IOMGR_WORKQUEUE_POSIX_H
-#define GRPC_CORE_LIB_IOMGR_WORKQUEUE_POSIX_H
+#ifndef GRPC_CORE_LIB_IOMGR_SOCKET_UTILS_H
+#define GRPC_CORE_LIB_IOMGR_SOCKET_UTILS_H
-#include "src/core/lib/iomgr/wakeup_fd_posix.h"
-#include "src/core/lib/support/mpscq.h"
+#include <stddef.h>
-struct grpc_fd;
+/* A wrapper for inet_ntop on POSIX systems and InetNtop on Windows systems */
+const char *grpc_inet_ntop(int af, const void *src, char *dst, size_t size);
-struct grpc_workqueue {
- gpr_refcount refs;
- gpr_mpscq queue;
- // state is:
- // lower bit - zero if orphaned
- // other bits - number of items enqueued
- gpr_atm state;
-
- grpc_wakeup_fd wakeup_fd;
- struct grpc_fd *wakeup_read_fd;
-
- grpc_closure read_closure;
-};
-
-/** Create a work queue. Returns an error if creation fails. If creation
- succeeds, sets *workqueue to point to it. */
-grpc_error *grpc_workqueue_create(grpc_exec_ctx *exec_ctx,
- grpc_workqueue **workqueue);
-
-#endif /* GRPC_CORE_LIB_IOMGR_WORKQUEUE_POSIX_H */
+#endif /* GRPC_CORE_LIB_IOMGR_SOCKET_UTILS_H */
diff --git a/src/core/lib/iomgr/socket_utils_common_posix.c b/src/core/lib/iomgr/socket_utils_common_posix.c
index 9c67ef8940..19e290b95a 100644
--- a/src/core/lib/iomgr/socket_utils_common_posix.c
+++ b/src/core/lib/iomgr/socket_utils_common_posix.c
@@ -31,10 +31,11 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_POSIX_SOCKET
+#ifdef GRPC_POSIX_SOCKET
+#include "src/core/lib/iomgr/socket_utils.h"
#include "src/core/lib/iomgr/socket_utils_posix.h"
#include <arpa/inet.h>
@@ -78,7 +79,7 @@ grpc_error *grpc_set_socket_nonblocking(int fd, int non_blocking) {
}
grpc_error *grpc_set_socket_no_sigpipe_if_possible(int fd) {
-#ifdef GPR_HAVE_SO_NOSIGPIPE
+#ifdef GRPC_HAVE_SO_NOSIGPIPE
int val = 1;
int newval;
socklen_t intlen = sizeof(newval);
@@ -96,7 +97,7 @@ grpc_error *grpc_set_socket_no_sigpipe_if_possible(int fd) {
}
grpc_error *grpc_set_socket_ip_pktinfo_if_possible(int fd) {
-#ifdef GPR_HAVE_IP_PKTINFO
+#ifdef GRPC_HAVE_IP_PKTINFO
int get_local_ip = 1;
if (0 != setsockopt(fd, IPPROTO_IP, IP_PKTINFO, &get_local_ip,
sizeof(get_local_ip))) {
@@ -107,7 +108,7 @@ grpc_error *grpc_set_socket_ip_pktinfo_if_possible(int fd) {
}
grpc_error *grpc_set_socket_ipv6_recvpktinfo_if_possible(int fd) {
-#ifdef GPR_HAVE_IPV6_RECVPKTINFO
+#ifdef GRPC_HAVE_IPV6_RECVPKTINFO
int get_local_ip = 1;
if (0 != setsockopt(fd, IPPROTO_IPV6, IPV6_RECVPKTINFO, &get_local_ip,
sizeof(get_local_ip))) {
@@ -262,7 +263,7 @@ static int set_socket_dualstack(int fd) {
}
}
-static grpc_error *error_for_fd(int fd, const struct sockaddr *addr) {
+static grpc_error *error_for_fd(int fd, const grpc_resolved_address *addr) {
if (fd >= 0) return GRPC_ERROR_NONE;
char *addr_str;
grpc_sockaddr_to_string(&addr_str, addr, 0);
@@ -272,10 +273,10 @@ static grpc_error *error_for_fd(int fd, const struct sockaddr *addr) {
return err;
}
-grpc_error *grpc_create_dualstack_socket(const struct sockaddr *addr, int type,
- int protocol,
- grpc_dualstack_mode *dsmode,
- int *newfd) {
+grpc_error *grpc_create_dualstack_socket(
+ const grpc_resolved_address *resolved_addr, int type, int protocol,
+ grpc_dualstack_mode *dsmode, int *newfd) {
+ const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr;
int family = addr->sa_family;
if (family == AF_INET6) {
if (grpc_ipv6_loopback_available()) {
@@ -290,9 +291,9 @@ grpc_error *grpc_create_dualstack_socket(const struct sockaddr *addr, int type,
return GRPC_ERROR_NONE;
}
/* If this isn't an IPv4 address, then return whatever we've got. */
- if (!grpc_sockaddr_is_v4mapped(addr, NULL)) {
+ if (!grpc_sockaddr_is_v4mapped(resolved_addr, NULL)) {
*dsmode = GRPC_DSMODE_IPV6;
- return error_for_fd(*newfd, addr);
+ return error_for_fd(*newfd, resolved_addr);
}
/* Fall back to AF_INET. */
if (*newfd >= 0) {
@@ -302,7 +303,12 @@ grpc_error *grpc_create_dualstack_socket(const struct sockaddr *addr, int type,
}
*dsmode = family == AF_INET ? GRPC_DSMODE_IPV4 : GRPC_DSMODE_NONE;
*newfd = socket(family, type, protocol);
- return error_for_fd(*newfd, addr);
+ return error_for_fd(*newfd, resolved_addr);
+}
+
+const char *grpc_inet_ntop(int af, const void *src, char *dst, size_t size) {
+ GPR_ASSERT(size <= (socklen_t)-1);
+ return inet_ntop(af, src, dst, (socklen_t)size);
}
#endif
diff --git a/src/core/lib/iomgr/socket_utils_linux.c b/src/core/lib/iomgr/socket_utils_linux.c
index 144e3110c8..bf6e9e4f55 100644
--- a/src/core/lib/iomgr/socket_utils_linux.c
+++ b/src/core/lib/iomgr/socket_utils_linux.c
@@ -31,21 +31,27 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_LINUX_SOCKETUTILS
+#ifdef GRPC_LINUX_SOCKETUTILS
+#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/iomgr/socket_utils_posix.h"
+#include <grpc/support/log.h>
+
#include <sys/socket.h>
#include <sys/types.h>
-int grpc_accept4(int sockfd, struct sockaddr *addr, socklen_t *addrlen,
- int nonblock, int cloexec) {
+int grpc_accept4(int sockfd, grpc_resolved_address *resolved_addr, int nonblock,
+ int cloexec) {
int flags = 0;
+ GPR_ASSERT(sizeof(socklen_t) <= sizeof(size_t));
+ GPR_ASSERT(resolved_addr->len <= (socklen_t)-1);
flags |= nonblock ? SOCK_NONBLOCK : 0;
flags |= cloexec ? SOCK_CLOEXEC : 0;
- return accept4(sockfd, addr, addrlen, flags);
+ return accept4(sockfd, (struct sockaddr *)resolved_addr->addr,
+ (socklen_t *)&resolved_addr->len, flags);
}
#endif
diff --git a/src/core/lib/iomgr/socket_utils_posix.c b/src/core/lib/iomgr/socket_utils_posix.c
index 57ae64c103..9dea0c0cd8 100644
--- a/src/core/lib/iomgr/socket_utils_posix.c
+++ b/src/core/lib/iomgr/socket_utils_posix.c
@@ -31,9 +31,9 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_POSIX_SOCKETUTILS
+#ifdef GRPC_POSIX_SOCKETUTILS
#include "src/core/lib/iomgr/socket_utils_posix.h"
@@ -42,12 +42,15 @@
#include <unistd.h>
#include <grpc/support/log.h>
+#include "src/core/lib/iomgr/sockaddr.h"
-int grpc_accept4(int sockfd, struct sockaddr *addr, socklen_t *addrlen,
- int nonblock, int cloexec) {
+int grpc_accept4(int sockfd, grpc_resolved_address *resolved_addr, int nonblock,
+ int cloexec) {
int fd, flags;
-
- fd = accept(sockfd, addr, addrlen);
+ GPR_ASSERT(sizeof(socklen_t) <= sizeof(size_t));
+ GPR_ASSERT(resolved_addr->len <= (socklen_t)-1);
+ fd = accept(sockfd, (struct sockaddr *)resolved_addr->addr,
+ (socklen_t *)&resolved_addr->len);
if (fd >= 0) {
if (nonblock) {
flags = fcntl(fd, F_GETFL, 0);
@@ -67,4 +70,4 @@ close_and_error:
return -1;
}
-#endif /* GPR_POSIX_SOCKETUTILS */
+#endif /* GRPC_POSIX_SOCKETUTILS */
diff --git a/src/core/lib/iomgr/socket_utils_posix.h b/src/core/lib/iomgr/socket_utils_posix.h
index 0ad2d39497..e84d3781a1 100644
--- a/src/core/lib/iomgr/socket_utils_posix.h
+++ b/src/core/lib/iomgr/socket_utils_posix.h
@@ -34,6 +34,8 @@
#ifndef GRPC_CORE_LIB_IOMGR_SOCKET_UTILS_POSIX_H
#define GRPC_CORE_LIB_IOMGR_SOCKET_UTILS_POSIX_H
+#include "src/core/lib/iomgr/resolve_address.h"
+
#include <sys/socket.h>
#include <unistd.h>
@@ -42,8 +44,8 @@
#include "src/core/lib/iomgr/socket_mutator.h"
/* a wrapper for accept or accept4 */
-int grpc_accept4(int sockfd, struct sockaddr *addr, socklen_t *addrlen,
- int nonblock, int cloexec);
+int grpc_accept4(int sockfd, grpc_resolved_address *resolved_addr, int nonblock,
+ int cloexec);
/* set a socket to non blocking mode */
grpc_error *grpc_set_socket_nonblocking(int fd, int non_blocking);
@@ -130,8 +132,8 @@ extern int grpc_forbid_dualstack_sockets_for_testing;
IPv4, so that bind() or connect() see the correct family.
Also, it's important to distinguish between DUALSTACK and IPV6 when
listening on the [::] wildcard address. */
-grpc_error *grpc_create_dualstack_socket(const struct sockaddr *addr, int type,
- int protocol,
+grpc_error *grpc_create_dualstack_socket(const grpc_resolved_address *addr,
+ int type, int protocol,
grpc_dualstack_mode *dsmode,
int *newfd);
diff --git a/src/core/lib/iomgr/socket_utils_uv.c b/src/core/lib/iomgr/socket_utils_uv.c
new file mode 100644
index 0000000000..741bf28969
--- /dev/null
+++ b/src/core/lib/iomgr/socket_utils_uv.c
@@ -0,0 +1,49 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/iomgr/port.h"
+
+#ifdef GRPC_UV
+
+#include <uv.h>
+
+#include "src/core/lib/iomgr/socket_utils.h"
+
+#include <grpc/support/log.h>
+
+const char *grpc_inet_ntop(int af, const void *src, char *dst, size_t size) {
+ uv_inet_ntop(af, src, dst, size);
+ return dst;
+}
+
+#endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/socket_utils_windows.c b/src/core/lib/iomgr/socket_utils_windows.c
new file mode 100644
index 0000000000..628ad4a45b
--- /dev/null
+++ b/src/core/lib/iomgr/socket_utils_windows.c
@@ -0,0 +1,48 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/iomgr/port.h"
+
+#ifdef GRPC_WINDOWS_SOCKETUTILS
+
+#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/lib/iomgr/socket_utils.h"
+
+#include <grpc/support/log.h>
+
+const char *grpc_inet_ntop(int af, const void *src, char *dst, size_t size) {
+ /* Windows InetNtopA wants a mutable ip pointer */
+ return InetNtopA(af, (void *)src, dst, size);
+}
+
+#endif /* GRPC_WINDOWS_SOCKETUTILS */
diff --git a/src/core/lib/iomgr/socket_windows.c b/src/core/lib/iomgr/socket_windows.c
index 78ef46d042..35f23300dc 100644
--- a/src/core/lib/iomgr/socket_windows.c
+++ b/src/core/lib/iomgr/socket_windows.c
@@ -31,9 +31,9 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_WINSOCK_SOCKET
+#ifdef GRPC_WINSOCK_SOCKET
#include <winsock2.h>
@@ -156,4 +156,4 @@ void grpc_socket_become_ready(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket,
if (should_destroy) destroy(socket);
}
-#endif /* GPR_WINSOCK_SOCKET */
+#endif /* GRPC_WINSOCK_SOCKET */
diff --git a/src/core/lib/iomgr/tcp_client.h b/src/core/lib/iomgr/tcp_client.h
index 215c2fbe5b..0485661316 100644
--- a/src/core/lib/iomgr/tcp_client.h
+++ b/src/core/lib/iomgr/tcp_client.h
@@ -38,20 +38,11 @@
#include <grpc/support/time.h>
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/pollset_set.h"
-#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/lib/iomgr/resolve_address.h"
-/** arguments for a tcp client connection */
-typedef struct {
- /** set of pollsets interested in this connection */
- grpc_pollset_set *interested_parties;
- /** address to connect to */
- const struct sockaddr *addr;
- size_t addr_len;
- /** deadline for connection */
- gpr_timespec deadline;
- /** channel arguments */
- const grpc_channel_args *channel_args;
-} grpc_tcp_client_connect_args;
+/* Channel arg (integer) setting how large a slice to try and read from the wire
+ each time recvmsg (or equivalent) is called */
+#define GRPC_ARG_TCP_READ_CHUNK_SIZE "grpc.experimental.tcp_read_chunk_size"
/* Asynchronously connect to an address (specified as (addr, len)), and call
cb with arg and the completed connection when done (or call cb with arg and
@@ -60,6 +51,9 @@ typedef struct {
in this connection being established (in order to continue their work) */
void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *on_connect,
grpc_endpoint **endpoint,
- const grpc_tcp_client_connect_args *args);
+ grpc_pollset_set *interested_parties,
+ const grpc_channel_args *channel_args,
+ const grpc_resolved_address *addr,
+ gpr_timespec deadline);
#endif /* GRPC_CORE_LIB_IOMGR_TCP_CLIENT_H */
diff --git a/src/core/lib/iomgr/tcp_client_posix.c b/src/core/lib/iomgr/tcp_client_posix.c
index 9089751d94..169ddcabed 100644
--- a/src/core/lib/iomgr/tcp_client_posix.c
+++ b/src/core/lib/iomgr/tcp_client_posix.c
@@ -31,11 +31,11 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_POSIX_SOCKET
+#ifdef GRPC_POSIX_SOCKET
-#include "src/core/lib/iomgr/tcp_client.h"
+#include "src/core/lib/iomgr/tcp_client_posix.h"
#include <errno.h>
#include <netinet/in.h>
@@ -47,6 +47,7 @@
#include <grpc/support/string_util.h>
#include <grpc/support/time.h>
+#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/iomgr_posix.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
@@ -70,9 +71,10 @@ typedef struct {
char *addr_str;
grpc_endpoint **ep;
grpc_closure *closure;
+ grpc_channel_args *channel_args;
} async_connect;
-static grpc_error *prepare_socket(const struct sockaddr *addr, int fd,
+static grpc_error *prepare_socket(const grpc_resolved_address *addr, int fd,
const grpc_channel_args *channel_args) {
grpc_error *err = GRPC_ERROR_NONE;
@@ -126,10 +128,39 @@ static void tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
if (done) {
gpr_mu_destroy(&ac->mu);
gpr_free(ac->addr_str);
+ grpc_channel_args_destroy(ac->channel_args);
gpr_free(ac);
}
}
+grpc_endpoint *grpc_tcp_client_create_from_fd(
+ grpc_exec_ctx *exec_ctx, grpc_fd *fd, const grpc_channel_args *channel_args,
+ const char *addr_str) {
+ size_t tcp_read_chunk_size = GRPC_TCP_DEFAULT_READ_SLICE_SIZE;
+ grpc_resource_quota *resource_quota = grpc_resource_quota_create(NULL);
+ if (channel_args != NULL) {
+ for (size_t i = 0; i < channel_args->num_args; i++) {
+ if (0 ==
+ strcmp(channel_args->args[i].key, GRPC_ARG_TCP_READ_CHUNK_SIZE)) {
+ grpc_integer_options options = {(int)tcp_read_chunk_size, 1,
+ 8 * 1024 * 1024};
+ tcp_read_chunk_size = (size_t)grpc_channel_arg_get_integer(
+ &channel_args->args[i], options);
+ } else if (0 ==
+ strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
+ grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
+ resource_quota = grpc_resource_quota_internal_ref(
+ channel_args->args[i].value.pointer.p);
+ }
+ }
+ }
+
+ grpc_endpoint *ep =
+ grpc_tcp_create(fd, resource_quota, tcp_read_chunk_size, addr_str);
+ grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
+ return ep;
+}
+
static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
async_connect *ac = acp;
int so_error = 0;
@@ -177,7 +208,8 @@ static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
switch (so_error) {
case 0:
grpc_pollset_set_del_fd(exec_ctx, ac->interested_parties, fd);
- *ep = grpc_tcp_create(fd, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, ac->addr_str);
+ *ep = grpc_tcp_client_create_from_fd(exec_ctx, fd, ac->channel_args,
+ ac->addr_str);
fd = NULL;
break;
case ENOBUFS:
@@ -227,6 +259,7 @@ finish:
if (done) {
gpr_mu_destroy(&ac->mu);
gpr_free(ac->addr_str);
+ grpc_channel_args_destroy(ac->channel_args);
gpr_free(ac);
}
grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
@@ -234,26 +267,26 @@ finish:
static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
grpc_closure *closure, grpc_endpoint **ep,
- const grpc_tcp_client_connect_args *args) {
+ grpc_pollset_set *interested_parties,
+ const grpc_channel_args *channel_args,
+ const grpc_resolved_address *addr,
+ gpr_timespec deadline) {
int fd;
grpc_dualstack_mode dsmode;
int err;
async_connect *ac;
- struct sockaddr_in6 addr6_v4mapped;
- struct sockaddr_in addr4_copy;
+ grpc_resolved_address addr6_v4mapped;
+ grpc_resolved_address addr4_copy;
grpc_fd *fdobj;
char *name;
char *addr_str;
grpc_error *error;
- const struct sockaddr *addr = args->addr;
- size_t addr_len = args->addr_len;
*ep = NULL;
/* Use dualstack sockets where available. */
if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) {
- addr = (const struct sockaddr *)&addr6_v4mapped;
- addr_len = sizeof(addr6_v4mapped);
+ addr = &addr6_v4mapped;
}
error = grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, &dsmode, &fd);
@@ -264,18 +297,18 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
if (dsmode == GRPC_DSMODE_IPV4) {
/* If we got an AF_INET socket, map the address back to IPv4. */
GPR_ASSERT(grpc_sockaddr_is_v4mapped(addr, &addr4_copy));
- addr = (struct sockaddr *)&addr4_copy;
- addr_len = sizeof(addr4_copy);
+ addr = &addr4_copy;
}
- if ((error = prepare_socket(addr, fd, args->channel_args)) !=
+ if ((error = prepare_socket(addr, fd, channel_args)) !=
GRPC_ERROR_NONE) {
grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
return;
}
do {
- GPR_ASSERT(addr_len < ~(socklen_t)0);
- err = connect(fd, addr, (socklen_t)addr_len);
+ GPR_ASSERT(addr->len < ~(socklen_t)0);
+ err =
+ connect(fd, (const struct sockaddr *)addr->addr, (socklen_t)addr->len);
} while (err < 0 && errno == EINTR);
addr_str = grpc_sockaddr_to_uri(addr);
@@ -284,7 +317,8 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
fdobj = grpc_fd_create(fd, name);
if (err >= 0) {
- *ep = grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str);
+ *ep =
+ grpc_tcp_client_create_from_fd(exec_ctx, fdobj, channel_args, addr_str);
grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_NONE, NULL);
goto done;
}
@@ -296,19 +330,20 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
goto done;
}
- grpc_pollset_set_add_fd(exec_ctx, args->interested_parties, fdobj);
+ grpc_pollset_set_add_fd(exec_ctx, interested_parties, fdobj);
ac = gpr_malloc(sizeof(async_connect));
ac->closure = closure;
ac->ep = ep;
ac->fd = fdobj;
- ac->interested_parties = args->interested_parties;
+ ac->interested_parties = interested_parties;
ac->addr_str = addr_str;
addr_str = NULL;
gpr_mu_init(&ac->mu);
ac->refs = 2;
ac->write_closure.cb = on_writable;
ac->write_closure.cb_arg = ac;
+ ac->channel_args = grpc_channel_args_copy(channel_args);
if (grpc_tcp_trace) {
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: asynchronously connecting",
@@ -317,7 +352,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&ac->mu);
grpc_timer_init(exec_ctx, &ac->alarm,
- gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC),
+ gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
tc_on_alarm, ac, gpr_now(GPR_CLOCK_MONOTONIC));
grpc_fd_notify_on_write(exec_ctx, ac->fd, &ac->write_closure);
gpr_mu_unlock(&ac->mu);
@@ -329,13 +364,19 @@ done:
// overridden by api_fuzzer.c
void (*grpc_tcp_client_connect_impl)(
- grpc_exec_ctx *exec_ctx, grpc_closure *on_connect, grpc_endpoint **endpoint,
- const grpc_tcp_client_connect_args *args) = tcp_client_connect_impl;
+ grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep,
+ grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args,
+ const grpc_resolved_address *addr,
+ gpr_timespec deadline) = tcp_client_connect_impl;
void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_endpoint **ep,
- const grpc_tcp_client_connect_args *args) {
- grpc_tcp_client_connect_impl(exec_ctx, closure, ep, args);
+ grpc_pollset_set *interested_parties,
+ const grpc_channel_args *channel_args,
+ const grpc_resolved_address *addr,
+ gpr_timespec deadline) {
+ grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties,
+ channel_args, addr, deadline);
}
#endif
diff --git a/src/core/lib/iomgr/tcp_client_posix.h b/src/core/lib/iomgr/tcp_client_posix.h
new file mode 100644
index 0000000000..efc5fcd5bb
--- /dev/null
+++ b/src/core/lib/iomgr/tcp_client_posix.h
@@ -0,0 +1,45 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_TCP_CLIENT_POSIX_H
+#define GRPC_CORE_LIB_IOMGR_TCP_CLIENT_POSIX_H
+
+#include "src/core/lib/iomgr/endpoint.h"
+#include "src/core/lib/iomgr/ev_posix.h"
+#include "src/core/lib/iomgr/tcp_client.h"
+
+grpc_endpoint *grpc_tcp_client_create_from_fd(
+ grpc_exec_ctx *exec_ctx, grpc_fd *fd, const grpc_channel_args *channel_args,
+ const char *addr_str);
+
+#endif /* GRPC_CORE_LIB_IOMGR_TCP_CLIENT_POSIX_H */
diff --git a/src/core/lib/iomgr/tcp_client_uv.c b/src/core/lib/iomgr/tcp_client_uv.c
new file mode 100644
index 0000000000..b07f9ceffa
--- /dev/null
+++ b/src/core/lib/iomgr/tcp_client_uv.c
@@ -0,0 +1,173 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/iomgr/port.h"
+
+#ifdef GRPC_UV
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+
+#include "src/core/lib/iomgr/error.h"
+#include "src/core/lib/iomgr/sockaddr_utils.h"
+#include "src/core/lib/iomgr/tcp_client.h"
+#include "src/core/lib/iomgr/tcp_uv.h"
+#include "src/core/lib/iomgr/timer.h"
+
+typedef struct grpc_uv_tcp_connect {
+ uv_connect_t connect_req;
+ grpc_timer alarm;
+ uv_tcp_t *tcp_handle;
+ grpc_closure *closure;
+ grpc_endpoint **endpoint;
+ int refs;
+ char *addr_name;
+ grpc_resource_quota *resource_quota;
+} grpc_uv_tcp_connect;
+
+static void uv_tcp_connect_cleanup(grpc_exec_ctx *exec_ctx,
+ grpc_uv_tcp_connect *connect) {
+ grpc_resource_quota_internal_unref(exec_ctx, connect->resource_quota);
+ gpr_free(connect);
+}
+
+static void tcp_close_callback(uv_handle_t *handle) { gpr_free(handle); }
+
+static void uv_tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp,
+ grpc_error *error) {
+ int done;
+ grpc_uv_tcp_connect *connect = acp;
+ if (error == GRPC_ERROR_NONE) {
+ /* error == NONE implies that the timer ran out, and wasn't cancelled. If
+ it was cancelled, then the handler that cancelled it also should close
+ the handle, if applicable */
+ uv_close((uv_handle_t *)connect->tcp_handle, tcp_close_callback);
+ }
+ done = (--connect->refs == 0);
+ if (done) {
+ uv_tcp_connect_cleanup(exec_ctx, connect);
+ }
+}
+
+static void uv_tc_on_connect(uv_connect_t *req, int status) {
+ grpc_uv_tcp_connect *connect = req->data;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_error *error = GRPC_ERROR_NONE;
+ int done;
+ grpc_closure *closure = connect->closure;
+ grpc_timer_cancel(&exec_ctx, &connect->alarm);
+ if (status == 0) {
+ *connect->endpoint = grpc_tcp_create(
+ connect->tcp_handle, connect->resource_quota, connect->addr_name);
+ } else {
+ error = GRPC_ERROR_CREATE("Failed to connect to remote host");
+ error = grpc_error_set_int(error, GRPC_ERROR_INT_ERRNO, -status);
+ error =
+ grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, uv_strerror(status));
+ if (status == UV_ECANCELED) {
+ error = grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR,
+ "Timeout occurred");
+ // This should only happen if the handle is already closed
+ } else {
+ error = grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR,
+ uv_strerror(status));
+ uv_close((uv_handle_t *)connect->tcp_handle, tcp_close_callback);
+ }
+ }
+ done = (--connect->refs == 0);
+ if (done) {
+ uv_tcp_connect_cleanup(&exec_ctx, connect);
+ }
+ grpc_exec_ctx_sched(&exec_ctx, closure, error, NULL);
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
+ grpc_closure *closure, grpc_endpoint **ep,
+ grpc_pollset_set *interested_parties,
+ const grpc_channel_args *channel_args,
+ const grpc_resolved_address *resolved_addr,
+ gpr_timespec deadline) {
+ grpc_uv_tcp_connect *connect;
+ grpc_resource_quota *resource_quota = grpc_resource_quota_create(NULL);
+ (void)channel_args;
+ (void)interested_parties;
+
+ if (channel_args != NULL) {
+ for (size_t i = 0; i < channel_args->num_args; i++) {
+ if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
+ grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
+ resource_quota = grpc_resource_quota_internal_ref(
+ channel_args->args[i].value.pointer.p);
+ }
+ }
+ }
+
+ connect = gpr_malloc(sizeof(grpc_uv_tcp_connect));
+ memset(connect, 0, sizeof(grpc_uv_tcp_connect));
+ connect->closure = closure;
+ connect->endpoint = ep;
+ connect->tcp_handle = gpr_malloc(sizeof(uv_tcp_t));
+ connect->addr_name = grpc_sockaddr_to_uri(resolved_addr);
+ connect->resource_quota = resource_quota;
+ uv_tcp_init(uv_default_loop(), connect->tcp_handle);
+ connect->connect_req.data = connect;
+ // TODO(murgatroid99): figure out what the return value here means
+ uv_tcp_connect(&connect->connect_req, connect->tcp_handle,
+ (const struct sockaddr *)resolved_addr->addr,
+ uv_tc_on_connect);
+ grpc_timer_init(exec_ctx, &connect->alarm,
+ gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
+ uv_tc_on_alarm, connect, gpr_now(GPR_CLOCK_MONOTONIC));
+}
+
+// overridden by api_fuzzer.c
+void (*grpc_tcp_client_connect_impl)(
+ grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep,
+ grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args,
+ const grpc_resolved_address *addr,
+ gpr_timespec deadline) = tcp_client_connect_impl;
+
+void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+ grpc_endpoint **ep,
+ grpc_pollset_set *interested_parties,
+ const grpc_channel_args *channel_args,
+ const grpc_resolved_address *addr,
+ gpr_timespec deadline) {
+ grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties,
+ channel_args, addr, deadline);
+}
+
+#endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/tcp_client_windows.c b/src/core/lib/iomgr/tcp_client_windows.c
index b4517b3bdb..30f7c66f15 100644
--- a/src/core/lib/iomgr/tcp_client_windows.c
+++ b/src/core/lib/iomgr/tcp_client_windows.c
@@ -31,9 +31,9 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_WINSOCK_SOCKET
+#ifdef GRPC_WINSOCK_SOCKET
#include "src/core/lib/iomgr/sockaddr_windows.h"
@@ -43,6 +43,7 @@
#include <grpc/support/slice_buffer.h>
#include <grpc/support/useful.h>
+#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/iocp_windows.h"
#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
@@ -61,13 +62,16 @@ typedef struct {
int refs;
grpc_closure on_connect;
grpc_endpoint **endpoint;
+ grpc_resource_quota *resource_quota;
} async_connect;
-static void async_connect_unlock_and_cleanup(async_connect *ac,
+static void async_connect_unlock_and_cleanup(grpc_exec_ctx *exec_ctx,
+ async_connect *ac,
grpc_winsocket *socket) {
int done = (--ac->refs == 0);
gpr_mu_unlock(&ac->mu);
if (done) {
+ grpc_resource_quota_internal_unref(exec_ctx, ac->resource_quota);
gpr_mu_destroy(&ac->mu);
gpr_free(ac->addr_name);
gpr_free(ac);
@@ -83,7 +87,7 @@ static void on_alarm(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
if (socket != NULL) {
grpc_winsocket_shutdown(socket);
}
- async_connect_unlock_and_cleanup(ac, socket);
+ async_connect_unlock_and_cleanup(exec_ctx, ac, socket);
}
static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
@@ -113,12 +117,12 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
if (!wsa_success) {
error = GRPC_WSA_ERROR(WSAGetLastError(), "ConnectEx");
} else {
- *ep = grpc_tcp_create(socket, ac->addr_name);
+ *ep = grpc_tcp_create(socket, ac->resource_quota, ac->addr_name);
socket = NULL;
}
}
- async_connect_unlock_and_cleanup(ac, socket);
+ async_connect_unlock_and_cleanup(exec_ctx, ac, socket);
/* If the connection was aborted, the callback was already called when
the deadline was met. */
grpc_exec_ctx_sched(exec_ctx, on_done, error, NULL);
@@ -128,12 +132,15 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
notification request for the connection, and one timeout alert. */
void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *on_done,
grpc_endpoint **endpoint,
- const grpc_tcp_client_connect_args *args) {
+ grpc_pollset_set *interested_parties,
+ const grpc_channel_args *channel_args,
+ const grpc_resolved_address *addr,
+ gpr_timespec deadline) {
SOCKET sock = INVALID_SOCKET;
BOOL success;
int status;
- struct sockaddr_in6 addr6_v4mapped;
- struct sockaddr_in6 local_address;
+ grpc_resolved_address addr6_v4mapped;
+ grpc_resolved_address local_address;
async_connect *ac;
grpc_winsocket *socket = NULL;
LPFN_CONNECTEX ConnectEx;
@@ -141,15 +148,23 @@ void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *on_done,
DWORD ioctl_num_bytes;
grpc_winsocket_callback_info *info;
grpc_error *error = GRPC_ERROR_NONE;
- const struct sockaddr *addr = args->addr;
- size_t addr_len = args->addr_len;
+
+ grpc_resource_quota *resource_quota = grpc_resource_quota_create(NULL);
+ if (channel_args != NULL) {
+ for (size_t i = 0; i < channel_args->num_args; i++) {
+ if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
+ grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
+ resource_quota = grpc_resource_quota_internal_ref(
+ channel_args->args[i].value.pointer.p);
+ }
+ }
+ }
*endpoint = NULL;
/* Use dualstack sockets where available. */
if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) {
- addr = (const struct sockaddr *)&addr6_v4mapped;
- addr_len = sizeof(addr6_v4mapped);
+ addr = &addr6_v4mapped;
}
sock = WSASocket(AF_INET6, SOCK_STREAM, IPPROTO_TCP, NULL, 0,
@@ -178,7 +193,8 @@ void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *on_done,
grpc_sockaddr_make_wildcard6(0, &local_address);
- status = bind(sock, (struct sockaddr *)&local_address, sizeof(local_address));
+ status = bind(sock, (struct sockaddr *)&local_address.addr,
+ (int)local_address.len);
if (status != 0) {
error = GRPC_WSA_ERROR(WSAGetLastError(), "bind");
goto failure;
@@ -186,8 +202,8 @@ void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *on_done,
socket = grpc_winsocket_create(sock, "client");
info = &socket->write_info;
- success =
- ConnectEx(sock, addr, (int)addr_len, NULL, 0, NULL, &info->overlapped);
+ success = ConnectEx(sock, (struct sockaddr *)&addr->addr, (int)addr->len,
+ NULL, 0, NULL, &info->overlapped);
/* It wouldn't be unusual to get a success immediately. But we'll still get
an IOCP notification, so let's ignore it. */
@@ -206,9 +222,10 @@ void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *on_done,
ac->refs = 2;
ac->addr_name = grpc_sockaddr_to_uri(addr);
ac->endpoint = endpoint;
+ ac->resource_quota = resource_quota;
grpc_closure_init(&ac->on_connect, on_connect, ac);
- grpc_timer_init(exec_ctx, &ac->alarm, args->deadline, on_alarm, ac,
+ grpc_timer_init(exec_ctx, &ac->alarm, deadline, on_alarm, ac,
gpr_now(GPR_CLOCK_MONOTONIC));
grpc_socket_notify_on_write(exec_ctx, socket, &ac->on_connect);
return;
@@ -225,7 +242,8 @@ failure:
} else if (sock != INVALID_SOCKET) {
closesocket(sock);
}
+ grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
grpc_exec_ctx_sched(exec_ctx, on_done, final_error, NULL);
}
-#endif /* GPR_WINSOCK_SOCKET */
+#endif /* GRPC_WINSOCK_SOCKET */
diff --git a/src/core/lib/iomgr/tcp_posix.c b/src/core/lib/iomgr/tcp_posix.c
index 92767721d5..880af93ee1 100644
--- a/src/core/lib/iomgr/tcp_posix.c
+++ b/src/core/lib/iomgr/tcp_posix.c
@@ -31,9 +31,9 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_POSIX_SOCKET
+#ifdef GRPC_POSIX_SOCKET
#include "src/core/lib/iomgr/network_status_tracker.h"
#include "src/core/lib/iomgr/tcp_posix.h"
@@ -58,14 +58,14 @@
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/string.h"
-#ifdef GPR_HAVE_MSG_NOSIGNAL
+#ifdef GRPC_HAVE_MSG_NOSIGNAL
#define SENDMSG_FLAGS MSG_NOSIGNAL
#else
#define SENDMSG_FLAGS 0
#endif
-#ifdef GPR_MSG_IOVLEN_TYPE
-typedef GPR_MSG_IOVLEN_TYPE msg_iovlen_type;
+#ifdef GRPC_MSG_IOVLEN_TYPE
+typedef GRPC_MSG_IOVLEN_TYPE msg_iovlen_type;
#else
typedef size_t msg_iovlen_type;
#endif
@@ -80,6 +80,7 @@ typedef struct {
msg_iovlen_type iov_size; /* Number of slices to allocate per read attempt */
size_t slice_size;
gpr_refcount refcount;
+ gpr_atm shutdown_count;
/* garbage after the last read */
gpr_slice_buffer last_read_buffer;
@@ -100,15 +101,29 @@ typedef struct {
grpc_closure write_closure;
char *peer_string;
+
+ grpc_resource_user resource_user;
+ grpc_resource_user_slice_allocator slice_allocator;
} grpc_tcp;
static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
grpc_error *error);
static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
grpc_error *error);
+static void tcp_unref_closure(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
+ grpc_error *error);
+
+static void tcp_maybe_shutdown_resource_user(grpc_exec_ctx *exec_ctx,
+ grpc_tcp *tcp) {
+ if (gpr_atm_full_fetch_add(&tcp->shutdown_count, 1) == 0) {
+ grpc_resource_user_shutdown(exec_ctx, &tcp->resource_user,
+ grpc_closure_create(tcp_unref_closure, tcp));
+ }
+}
static void tcp_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *)ep;
+ tcp_maybe_shutdown_resource_user(exec_ctx, tcp);
grpc_fd_shutdown(exec_ctx, tcp->em_fd);
}
@@ -116,6 +131,7 @@ static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
grpc_fd_orphan(exec_ctx, tcp->em_fd, tcp->release_fd_cb, tcp->release_fd,
"tcp_unref_orphan");
gpr_slice_buffer_destroy(&tcp->last_read_buffer);
+ grpc_resource_user_destroy(exec_ctx, &tcp->resource_user);
gpr_free(tcp->peer_string);
gpr_free(tcp);
}
@@ -152,9 +168,16 @@ static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); }
#endif
+static void tcp_unref_closure(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ TCP_UNREF(exec_ctx, arg, "resource_user");
+}
+
static void tcp_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
grpc_network_status_unregister_endpoint(ep);
grpc_tcp *tcp = (grpc_tcp *)ep;
+ tcp_maybe_shutdown_resource_user(exec_ctx, tcp);
+ gpr_slice_buffer_reset_and_unref(&tcp->last_read_buffer);
TCP_UNREF(exec_ctx, tcp, "destroy");
}
@@ -177,11 +200,11 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
tcp->read_cb = NULL;
tcp->incoming_buffer = NULL;
- grpc_exec_ctx_sched(exec_ctx, cb, error, NULL);
+ grpc_closure_run(exec_ctx, cb, error);
}
#define MAX_READ_IOVEC 4
-static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
struct msghdr msg;
struct iovec iov[MAX_READ_IOVEC];
ssize_t read_bytes;
@@ -192,10 +215,6 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
GPR_ASSERT(tcp->incoming_buffer->count <= MAX_READ_IOVEC);
GPR_TIMER_BEGIN("tcp_continue_read", 0);
- while (tcp->incoming_buffer->count < (size_t)tcp->iov_size) {
- gpr_slice_buffer_add_indexed(tcp->incoming_buffer,
- gpr_slice_malloc(tcp->slice_size));
- }
for (i = 0; i < tcp->incoming_buffer->count; i++) {
iov[i].iov_base = GPR_SLICE_START_PTR(tcp->incoming_buffer->slices[i]);
iov[i].iov_len = GPR_SLICE_LENGTH(tcp->incoming_buffer->slices[i]);
@@ -209,11 +228,11 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
msg.msg_controllen = 0;
msg.msg_flags = 0;
- GPR_TIMER_BEGIN("recvmsg", 1);
+ GPR_TIMER_BEGIN("recvmsg", 0);
do {
read_bytes = recvmsg(tcp->fd, &msg, 0);
} while (read_bytes < 0 && errno == EINTR);
- GPR_TIMER_END("recvmsg", 0);
+ GPR_TIMER_END("recvmsg", read_bytes >= 0);
if (read_bytes < 0) {
/* NB: After calling call_read_cb a parallel call of the read handler may
@@ -232,7 +251,7 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
} else if (read_bytes == 0) {
/* 0 read size ==> end of stream */
gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer);
- call_read_cb(exec_ctx, tcp, GRPC_ERROR_CREATE("EOF"));
+ call_read_cb(exec_ctx, tcp, GRPC_ERROR_CREATE("Socket closed"));
TCP_UNREF(exec_ctx, tcp, "read");
} else {
GPR_ASSERT((size_t)read_bytes <= tcp->incoming_buffer->length);
@@ -252,6 +271,30 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
GPR_TIMER_END("tcp_continue_read", 0);
}
+static void tcp_read_allocation_done(grpc_exec_ctx *exec_ctx, void *tcpp,
+ grpc_error *error) {
+ grpc_tcp *tcp = tcpp;
+ if (error != GRPC_ERROR_NONE) {
+ gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer);
+ gpr_slice_buffer_reset_and_unref(&tcp->last_read_buffer);
+ call_read_cb(exec_ctx, tcp, GRPC_ERROR_REF(error));
+ TCP_UNREF(exec_ctx, tcp, "read");
+ } else {
+ tcp_do_read(exec_ctx, tcp);
+ }
+}
+
+static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+ if (tcp->incoming_buffer->count < (size_t)tcp->iov_size) {
+ grpc_resource_user_alloc_slices(
+ exec_ctx, &tcp->slice_allocator, tcp->slice_size,
+ (size_t)tcp->iov_size - tcp->incoming_buffer->count,
+ tcp->incoming_buffer);
+ } else {
+ tcp_do_read(exec_ctx, tcp);
+ }
+}
+
static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
grpc_error *error) {
grpc_tcp *tcp = (grpc_tcp *)arg;
@@ -259,6 +302,7 @@ static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
if (error != GRPC_ERROR_NONE) {
gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer);
+ gpr_slice_buffer_reset_and_unref(&tcp->last_read_buffer);
call_read_cb(exec_ctx, tcp, GRPC_ERROR_REF(error));
TCP_UNREF(exec_ctx, tcp, "read");
} else {
@@ -392,11 +436,8 @@ static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
grpc_error_free_string(str);
}
- GPR_TIMER_BEGIN("tcp_handle_write.cb", 0);
- cb->cb(exec_ctx, cb->cb_arg, error);
- GPR_TIMER_END("tcp_handle_write.cb", 0);
+ grpc_closure_run(exec_ctx, cb, error);
TCP_UNREF(exec_ctx, tcp, "write");
- GRPC_ERROR_UNREF(error);
}
}
@@ -472,6 +513,11 @@ static grpc_workqueue *tcp_get_workqueue(grpc_endpoint *ep) {
return grpc_fd_get_workqueue(tcp->em_fd);
}
+static grpc_resource_user *tcp_get_resource_user(grpc_endpoint *ep) {
+ grpc_tcp *tcp = (grpc_tcp *)ep;
+ return &tcp->resource_user;
+}
+
static const grpc_endpoint_vtable vtable = {tcp_read,
tcp_write,
tcp_get_workqueue,
@@ -479,10 +525,12 @@ static const grpc_endpoint_vtable vtable = {tcp_read,
tcp_add_to_pollset_set,
tcp_shutdown,
tcp_destroy,
+ tcp_get_resource_user,
tcp_get_peer};
-grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size,
- const char *peer_string) {
+grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd,
+ grpc_resource_quota *resource_quota,
+ size_t slice_size, const char *peer_string) {
grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp));
tcp->base.vtable = &vtable;
tcp->peer_string = gpr_strdup(peer_string);
@@ -495,14 +543,20 @@ grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size,
tcp->slice_size = slice_size;
tcp->iov_size = 1;
tcp->finished_edge = true;
- /* paired with unref in grpc_tcp_destroy */
- gpr_ref_init(&tcp->refcount, 1);
+ /* paired with unref in grpc_tcp_destroy, and with the shutdown for our
+ * resource_user */
+ gpr_ref_init(&tcp->refcount, 2);
+ gpr_atm_no_barrier_store(&tcp->shutdown_count, 0);
tcp->em_fd = em_fd;
tcp->read_closure.cb = tcp_handle_read;
tcp->read_closure.cb_arg = tcp;
tcp->write_closure.cb = tcp_handle_write;
tcp->write_closure.cb_arg = tcp;
gpr_slice_buffer_init(&tcp->last_read_buffer);
+ grpc_resource_user_init(&tcp->resource_user, resource_quota, peer_string);
+ grpc_resource_user_slice_allocator_init(&tcp->slice_allocator,
+ &tcp->resource_user,
+ tcp_read_allocation_done, tcp);
/* Tell network status tracker about new endpoint */
grpc_network_status_register_endpoint(&tcp->base);
@@ -517,10 +571,13 @@ int grpc_tcp_fd(grpc_endpoint *ep) {
void grpc_tcp_destroy_and_release_fd(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
int *fd, grpc_closure *done) {
+ grpc_network_status_unregister_endpoint(ep);
grpc_tcp *tcp = (grpc_tcp *)ep;
GPR_ASSERT(ep->vtable == &vtable);
tcp->release_fd = fd;
tcp->release_fd_cb = done;
+ tcp_maybe_shutdown_resource_user(exec_ctx, tcp);
+ gpr_slice_buffer_reset_and_unref(&tcp->last_read_buffer);
TCP_UNREF(exec_ctx, tcp, "destroy");
}
diff --git a/src/core/lib/iomgr/tcp_posix.h b/src/core/lib/iomgr/tcp_posix.h
index 99125836d6..1c0d13f96e 100644
--- a/src/core/lib/iomgr/tcp_posix.h
+++ b/src/core/lib/iomgr/tcp_posix.h
@@ -53,8 +53,8 @@ extern int grpc_tcp_trace;
/* Create a tcp endpoint given a file desciptor and a read slice size.
Takes ownership of fd. */
-grpc_endpoint *grpc_tcp_create(grpc_fd *fd, size_t read_slice_size,
- const char *peer_string);
+grpc_endpoint *grpc_tcp_create(grpc_fd *fd, grpc_resource_quota *resource_quota,
+ size_t read_slice_size, const char *peer_string);
/* Return the tcp endpoint's fd, or -1 if this is not available. Does not
release the fd.
diff --git a/src/core/lib/iomgr/tcp_server.h b/src/core/lib/iomgr/tcp_server.h
index 5a25d39a0c..6eba8c4057 100644
--- a/src/core/lib/iomgr/tcp_server.h
+++ b/src/core/lib/iomgr/tcp_server.h
@@ -38,6 +38,7 @@
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/endpoint.h"
+#include "src/core/lib/iomgr/resolve_address.h"
/* Forward decl of grpc_tcp_server */
typedef struct grpc_tcp_server grpc_tcp_server;
@@ -60,7 +61,8 @@ typedef void (*grpc_tcp_server_cb)(grpc_exec_ctx *exec_ctx, void *arg,
/* Create a server, initially not bound to any ports. The caller owns one ref.
If shutdown_complete is not NULL, it will be used by
grpc_tcp_server_unref() when the ref count reaches zero. */
-grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete,
+grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
+ grpc_closure *shutdown_complete,
const grpc_channel_args *args,
grpc_tcp_server **server);
@@ -78,8 +80,9 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *server,
but not dualstack sockets. */
/* TODO(ctiller): deprecate this, and make grpc_tcp_server_add_ports to handle
all of the multiple socket port matching logic in one place */
-grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
- size_t addr_len, int *out_port);
+grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s,
+ const grpc_resolved_address *addr,
+ int *out_port);
/* Number of fds at the given port_index, or 0 if port_index is out of
bounds. */
@@ -101,8 +104,8 @@ grpc_tcp_server *grpc_tcp_server_ref(grpc_tcp_server *s);
void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server *s,
grpc_closure *shutdown_starting);
-/* If the refcount drops to zero, delete s, and call (exec_ctx==NULL) or enqueue
- a call (exec_ctx!=NULL) to shutdown_complete. */
+/* If the refcount drops to zero, enqueue calls on exec_ctx to
+ shutdown_listeners and delete s. */
void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s);
/* Shutdown the fds of listeners. */
diff --git a/src/core/lib/iomgr/tcp_server_posix.c b/src/core/lib/iomgr/tcp_server_posix.c
index 2d3f6cf9a7..b6fc1e4ca2 100644
--- a/src/core/lib/iomgr/tcp_server_posix.c
+++ b/src/core/lib/iomgr/tcp_server_posix.c
@@ -36,9 +36,9 @@
#define _GNU_SOURCE
#endif
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_POSIX_SOCKET
+#ifdef GRPC_POSIX_SOCKET
#include "src/core/lib/iomgr/tcp_server.h"
@@ -62,6 +62,7 @@
#include <grpc/support/useful.h>
#include "src/core/lib/iomgr/resolve_address.h"
+#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/iomgr/socket_utils_posix.h"
#include "src/core/lib/iomgr/tcp_posix.h"
@@ -79,11 +80,7 @@ struct grpc_tcp_listener {
int fd;
grpc_fd *emfd;
grpc_tcp_server *server;
- union {
- uint8_t untyped[GRPC_MAX_SOCKADDR_SIZE];
- struct sockaddr sockaddr;
- } addr;
- size_t addr_len;
+ grpc_resolved_address addr;
int port;
unsigned port_index;
unsigned fd_index;
@@ -137,6 +134,8 @@ struct grpc_tcp_server {
/* next pollset to assign a channel to */
gpr_atm next_pollset_to_assign;
+
+ grpc_resource_quota *resource_quota;
};
static gpr_once check_init = GPR_ONCE_INIT;
@@ -153,23 +152,37 @@ static void init(void) {
#endif
}
-grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete,
+grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
+ grpc_closure *shutdown_complete,
const grpc_channel_args *args,
grpc_tcp_server **server) {
gpr_once_init(&check_init, init);
grpc_tcp_server *s = gpr_malloc(sizeof(grpc_tcp_server));
s->so_reuseport = has_so_reuseport;
+ s->resource_quota = grpc_resource_quota_create(NULL);
for (size_t i = 0; i < (args == NULL ? 0 : args->num_args); i++) {
if (0 == strcmp(GRPC_ARG_ALLOW_REUSEPORT, args->args[i].key)) {
if (args->args[i].type == GRPC_ARG_INTEGER) {
s->so_reuseport =
has_so_reuseport && (args->args[i].value.integer != 0);
} else {
+ grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
gpr_free(s);
return GRPC_ERROR_CREATE(GRPC_ARG_ALLOW_REUSEPORT
" must be an integer");
}
+ } else if (0 == strcmp(GRPC_ARG_RESOURCE_QUOTA, args->args[i].key)) {
+ if (args->args[i].type == GRPC_ARG_POINTER) {
+ grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
+ s->resource_quota =
+ grpc_resource_quota_internal_ref(args->args[i].value.pointer.p);
+ } else {
+ grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
+ gpr_free(s);
+ return GRPC_ERROR_CREATE(GRPC_ARG_RESOURCE_QUOTA
+ " must be a pointer to a buffer pool");
+ }
}
}
gpr_ref_init(&s->refs, 1);
@@ -191,6 +204,9 @@ grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete,
}
static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
+ gpr_mu_lock(&s->mu);
+ GPR_ASSERT(s->shutdown);
+ gpr_mu_unlock(&s->mu);
if (s->shutdown_complete != NULL) {
grpc_exec_ctx_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE, NULL);
}
@@ -203,6 +219,8 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
gpr_free(sp);
}
+ grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
+
gpr_free(s);
}
@@ -235,7 +253,7 @@ static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
if (s->head) {
grpc_tcp_listener *sp;
for (sp = s->head; sp; sp = sp->next) {
- grpc_unlink_if_unix_domain_socket(&sp->addr.sockaddr);
+ grpc_unlink_if_unix_domain_socket(&sp->addr);
sp->destroyed_closure.cb = destroyed_port;
sp->destroyed_closure.cb_arg = s;
grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure, NULL,
@@ -301,11 +319,9 @@ static int get_max_accept_queue_size(void) {
}
/* Prepare a recently-created socket for listening. */
-static grpc_error *prepare_socket(int fd, const struct sockaddr *addr,
- size_t addr_len, bool so_reuseport,
- int *port) {
- struct sockaddr_storage sockname_temp;
- socklen_t sockname_len;
+static grpc_error *prepare_socket(int fd, const grpc_resolved_address *addr,
+ bool so_reuseport, int *port) {
+ grpc_resolved_address sockname_temp;
grpc_error *err = GRPC_ERROR_NONE;
GPR_ASSERT(fd >= 0);
@@ -328,8 +344,8 @@ static grpc_error *prepare_socket(int fd, const struct sockaddr *addr,
err = grpc_set_socket_no_sigpipe_if_possible(fd);
if (err != GRPC_ERROR_NONE) goto error;
- GPR_ASSERT(addr_len < ~(socklen_t)0);
- if (bind(fd, addr, (socklen_t)addr_len) < 0) {
+ GPR_ASSERT(addr->len < ~(socklen_t)0);
+ if (bind(fd, (struct sockaddr *)addr->addr, (socklen_t)addr->len) < 0) {
err = GRPC_OS_ERROR(errno, "bind");
goto error;
}
@@ -339,13 +355,15 @@ static grpc_error *prepare_socket(int fd, const struct sockaddr *addr,
goto error;
}
- sockname_len = sizeof(sockname_temp);
- if (getsockname(fd, (struct sockaddr *)&sockname_temp, &sockname_len) < 0) {
+ sockname_temp.len = sizeof(struct sockaddr_storage);
+
+ if (getsockname(fd, (struct sockaddr *)sockname_temp.addr,
+ (socklen_t *)&sockname_temp.len) < 0) {
err = GRPC_OS_ERROR(errno, "getsockname");
goto error;
}
- *port = grpc_sockaddr_get_port((struct sockaddr *)&sockname_temp);
+ *port = grpc_sockaddr_get_port(&sockname_temp);
return GRPC_ERROR_NONE;
error:
@@ -379,13 +397,13 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
/* loop until accept4 returns EAGAIN, and then re-arm notification */
for (;;) {
- struct sockaddr_storage addr;
- socklen_t addrlen = sizeof(addr);
+ grpc_resolved_address addr;
char *addr_str;
char *name;
+ addr.len = sizeof(struct sockaddr_storage);
/* Note: If we ever decide to return this address to the user, remember to
strip off the ::ffff:0.0.0.0/96 prefix first. */
- int fd = grpc_accept4(sp->fd, (struct sockaddr *)&addr, &addrlen, 1, 1);
+ int fd = grpc_accept4(sp->fd, &addr, 1, 1);
if (fd < 0) {
switch (errno) {
case EINTR:
@@ -401,7 +419,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
grpc_set_socket_no_sigpipe_if_possible(fd);
- addr_str = grpc_sockaddr_to_uri((struct sockaddr *)&addr);
+ addr_str = grpc_sockaddr_to_uri(&addr);
gpr_asprintf(&name, "tcp-server-connection:%s", addr_str);
if (grpc_tcp_trace) {
@@ -419,7 +437,8 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
sp->server->on_accept_cb(
exec_ctx, sp->server->on_accept_cb_arg,
- grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str),
+ grpc_tcp_create(fdobj, sp->server->resource_quota,
+ GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str),
read_notifier_pollset, &acceptor);
gpr_free(name);
@@ -439,19 +458,18 @@ error:
}
static grpc_error *add_socket_to_server(grpc_tcp_server *s, int fd,
- const struct sockaddr *addr,
- size_t addr_len, unsigned port_index,
- unsigned fd_index,
+ const grpc_resolved_address *addr,
+ unsigned port_index, unsigned fd_index,
grpc_tcp_listener **listener) {
grpc_tcp_listener *sp = NULL;
int port = -1;
char *addr_str;
char *name;
- grpc_error *err = prepare_socket(fd, addr, addr_len, s->so_reuseport, &port);
+ grpc_error *err = prepare_socket(fd, addr, s->so_reuseport, &port);
if (err == GRPC_ERROR_NONE) {
GPR_ASSERT(port > 0);
- grpc_sockaddr_to_string(&addr_str, (struct sockaddr *)&addr, 1);
+ grpc_sockaddr_to_string(&addr_str, addr, 1);
gpr_asprintf(&name, "tcp-server-listener:%s", addr_str);
gpr_mu_lock(&s->mu);
s->nports++;
@@ -467,8 +485,7 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, int fd,
sp->server = s;
sp->fd = fd;
sp->emfd = grpc_fd_create(fd, name);
- memcpy(sp->addr.untyped, addr, addr_len);
- sp->addr_len = addr_len;
+ memcpy(&sp->addr, addr, sizeof(grpc_resolved_address));
sp->port = port;
sp->port_index = port_index;
sp->fd_index = fd_index;
@@ -501,14 +518,13 @@ static grpc_error *clone_port(grpc_tcp_listener *listener, unsigned count) {
int fd = -1;
int port = -1;
grpc_dualstack_mode dsmode;
- err = grpc_create_dualstack_socket(&listener->addr.sockaddr, SOCK_STREAM, 0,
- &dsmode, &fd);
+ err = grpc_create_dualstack_socket(&listener->addr, SOCK_STREAM, 0, &dsmode,
+ &fd);
if (err != GRPC_ERROR_NONE) return err;
- err = prepare_socket(fd, &listener->addr.sockaddr, listener->addr_len, true,
- &port);
+ err = prepare_socket(fd, &listener->addr, true, &port);
if (err != GRPC_ERROR_NONE) return err;
listener->server->nports++;
- grpc_sockaddr_to_string(&addr_str, &listener->addr.sockaddr, 1);
+ grpc_sockaddr_to_string(&addr_str, &listener->addr, 1);
gpr_asprintf(&name, "tcp-server-listener:%s/clone-%d", addr_str, i);
sp = gpr_malloc(sizeof(grpc_tcp_listener));
sp->next = listener->next;
@@ -521,8 +537,7 @@ static grpc_error *clone_port(grpc_tcp_listener *listener, unsigned count) {
sp->server = listener->server;
sp->fd = fd;
sp->emfd = grpc_fd_create(fd, name);
- memcpy(sp->addr.untyped, listener->addr.untyped, listener->addr_len);
- sp->addr_len = listener->addr_len;
+ memcpy(&sp->addr, &listener->addr, sizeof(grpc_resolved_address));
sp->port = port;
sp->port_index = listener->port_index;
sp->fd_index = listener->fd_index + count - i;
@@ -537,19 +552,19 @@ static grpc_error *clone_port(grpc_tcp_listener *listener, unsigned count) {
return GRPC_ERROR_NONE;
}
-grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
- size_t addr_len, int *out_port) {
+grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s,
+ const grpc_resolved_address *addr,
+ int *out_port) {
grpc_tcp_listener *sp;
grpc_tcp_listener *sp2 = NULL;
int fd;
grpc_dualstack_mode dsmode;
- struct sockaddr_in6 addr6_v4mapped;
- struct sockaddr_in wild4;
- struct sockaddr_in6 wild6;
- struct sockaddr_in addr4_copy;
- struct sockaddr *allocated_addr = NULL;
- struct sockaddr_storage sockname_temp;
- socklen_t sockname_len;
+ grpc_resolved_address addr6_v4mapped;
+ grpc_resolved_address wild4;
+ grpc_resolved_address wild6;
+ grpc_resolved_address addr4_copy;
+ grpc_resolved_address *allocated_addr = NULL;
+ grpc_resolved_address sockname_temp;
int port;
unsigned port_index = 0;
unsigned fd_index = 0;
@@ -557,19 +572,19 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
if (s->tail != NULL) {
port_index = s->tail->port_index + 1;
}
- grpc_unlink_if_unix_domain_socket((struct sockaddr *)addr);
+ grpc_unlink_if_unix_domain_socket(addr);
/* Check if this is a wildcard port, and if so, try to keep the port the same
as some previously created listener. */
if (grpc_sockaddr_get_port(addr) == 0) {
for (sp = s->head; sp; sp = sp->next) {
- sockname_len = sizeof(sockname_temp);
- if (0 == getsockname(sp->fd, (struct sockaddr *)&sockname_temp,
- &sockname_len)) {
- port = grpc_sockaddr_get_port((struct sockaddr *)&sockname_temp);
+ sockname_temp.len = sizeof(struct sockaddr_storage);
+ if (0 == getsockname(sp->fd, (struct sockaddr *)sockname_temp.addr,
+ (socklen_t *)&sockname_temp.len)) {
+ port = grpc_sockaddr_get_port(&sockname_temp);
if (port > 0) {
- allocated_addr = gpr_malloc(addr_len);
- memcpy(allocated_addr, addr, addr_len);
+ allocated_addr = gpr_malloc(sizeof(grpc_resolved_address));
+ memcpy(allocated_addr, addr, addr->len);
grpc_sockaddr_set_port(allocated_addr, port);
addr = allocated_addr;
break;
@@ -581,8 +596,7 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
sp = NULL;
if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) {
- addr = (const struct sockaddr *)&addr6_v4mapped;
- addr_len = sizeof(addr6_v4mapped);
+ addr = &addr6_v4mapped;
}
/* Treat :: or 0.0.0.0 as a family-agnostic wildcard. */
@@ -590,12 +604,10 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
grpc_sockaddr_make_wildcards(port, &wild4, &wild6);
/* Try listening on IPv6 first. */
- addr = (struct sockaddr *)&wild6;
- addr_len = sizeof(wild6);
+ addr = &wild6;
errs[0] = grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, &dsmode, &fd);
if (errs[0] == GRPC_ERROR_NONE) {
- errs[0] = add_socket_to_server(s, fd, addr, addr_len, port_index,
- fd_index, &sp);
+ errs[0] = add_socket_to_server(s, fd, addr, port_index, fd_index, &sp);
if (fd >= 0 && dsmode == GRPC_DSMODE_DUALSTACK) {
goto done;
}
@@ -604,23 +616,20 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
}
/* If we didn't get a dualstack socket, also listen on 0.0.0.0. */
if (port == 0 && sp != NULL) {
- grpc_sockaddr_set_port((struct sockaddr *)&wild4, sp->port);
+ grpc_sockaddr_set_port(&wild4, sp->port);
}
}
- addr = (struct sockaddr *)&wild4;
- addr_len = sizeof(wild4);
+ addr = &wild4;
}
errs[1] = grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, &dsmode, &fd);
if (errs[1] == GRPC_ERROR_NONE) {
if (dsmode == GRPC_DSMODE_IPV4 &&
grpc_sockaddr_is_v4mapped(addr, &addr4_copy)) {
- addr = (struct sockaddr *)&addr4_copy;
- addr_len = sizeof(addr4_copy);
+ addr = &addr4_copy;
}
sp2 = sp;
- errs[1] =
- add_socket_to_server(s, fd, addr, addr_len, port_index, fd_index, &sp);
+ errs[1] = add_socket_to_server(s, fd, addr, port_index, fd_index, &sp);
if (sp2 != NULL && sp != NULL) {
sp2->sibling = sp;
sp->is_sibling = 1;
@@ -652,6 +661,7 @@ unsigned grpc_tcp_server_port_fd_count(grpc_tcp_server *s,
unsigned port_index) {
unsigned num_fds = 0;
grpc_tcp_listener *sp;
+ gpr_mu_lock(&s->mu);
for (sp = s->head; sp && port_index != 0; sp = sp->next) {
if (!sp->is_sibling) {
--port_index;
@@ -659,12 +669,15 @@ unsigned grpc_tcp_server_port_fd_count(grpc_tcp_server *s,
}
for (; sp; sp = sp->sibling, ++num_fds)
;
+ gpr_mu_unlock(&s->mu);
return num_fds;
}
int grpc_tcp_server_port_fd(grpc_tcp_server *s, unsigned port_index,
unsigned fd_index) {
grpc_tcp_listener *sp;
+ int fd;
+ gpr_mu_lock(&s->mu);
for (sp = s->head; sp && port_index != 0; sp = sp->next) {
if (!sp->is_sibling) {
--port_index;
@@ -673,10 +686,12 @@ int grpc_tcp_server_port_fd(grpc_tcp_server *s, unsigned port_index,
for (; sp && fd_index != 0; sp = sp->sibling, --fd_index)
;
if (sp) {
- return sp->fd;
+ fd = sp->fd;
} else {
- return -1;
+ fd = -1;
}
+ gpr_mu_unlock(&s->mu);
+ return fd;
}
void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
@@ -695,7 +710,7 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
s->pollset_count = pollset_count;
sp = s->head;
while (sp != NULL) {
- if (s->so_reuseport && !grpc_is_unix_socket(&sp->addr.sockaddr) &&
+ if (s->so_reuseport && !grpc_is_unix_socket(&sp->addr) &&
pollset_count > 1) {
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"clone_port", clone_port(sp, (unsigned)(pollset_count - 1))));
@@ -722,7 +737,7 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
}
grpc_tcp_server *grpc_tcp_server_ref(grpc_tcp_server *s) {
- gpr_ref(&s->refs);
+ gpr_ref_non_zero(&s->refs);
return s;
}
@@ -736,19 +751,11 @@ void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server *s,
void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
if (gpr_unref(&s->refs)) {
- /* Complete shutdown_starting work before destroying. */
- grpc_exec_ctx local_exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_tcp_server_shutdown_listeners(exec_ctx, s);
gpr_mu_lock(&s->mu);
- grpc_exec_ctx_enqueue_list(&local_exec_ctx, &s->shutdown_starting, NULL);
+ grpc_exec_ctx_enqueue_list(exec_ctx, &s->shutdown_starting, NULL);
gpr_mu_unlock(&s->mu);
- if (exec_ctx == NULL) {
- grpc_exec_ctx_flush(&local_exec_ctx);
- tcp_server_destroy(&local_exec_ctx, s);
- grpc_exec_ctx_finish(&local_exec_ctx);
- } else {
- grpc_exec_ctx_finish(&local_exec_ctx);
- tcp_server_destroy(exec_ctx, s);
- }
+ tcp_server_destroy(exec_ctx, s);
}
}
diff --git a/src/core/lib/iomgr/tcp_server_uv.c b/src/core/lib/iomgr/tcp_server_uv.c
new file mode 100644
index 0000000000..b5b9b92a20
--- /dev/null
+++ b/src/core/lib/iomgr/tcp_server_uv.c
@@ -0,0 +1,383 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/iomgr/port.h"
+
+#ifdef GRPC_UV
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+
+#include "src/core/lib/iomgr/error.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/lib/iomgr/sockaddr_utils.h"
+#include "src/core/lib/iomgr/tcp_server.h"
+#include "src/core/lib/iomgr/tcp_uv.h"
+
+/* one listening port */
+typedef struct grpc_tcp_listener grpc_tcp_listener;
+struct grpc_tcp_listener {
+ uv_tcp_t *handle;
+ grpc_tcp_server *server;
+ unsigned port_index;
+ int port;
+ /* linked list */
+ struct grpc_tcp_listener *next;
+};
+
+struct grpc_tcp_server {
+ gpr_refcount refs;
+
+ /* Called whenever accept() succeeds on a server port. */
+ grpc_tcp_server_cb on_accept_cb;
+ void *on_accept_cb_arg;
+
+ int open_ports;
+
+ /* linked list of server ports */
+ grpc_tcp_listener *head;
+ grpc_tcp_listener *tail;
+
+ /* List of closures passed to shutdown_starting_add(). */
+ grpc_closure_list shutdown_starting;
+
+ /* shutdown callback */
+ grpc_closure *shutdown_complete;
+
+ grpc_resource_quota *resource_quota;
+};
+
+grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
+ grpc_closure *shutdown_complete,
+ const grpc_channel_args *args,
+ grpc_tcp_server **server) {
+ grpc_tcp_server *s = gpr_malloc(sizeof(grpc_tcp_server));
+ s->resource_quota = grpc_resource_quota_create(NULL);
+ for (size_t i = 0; i < (args == NULL ? 0 : args->num_args); i++) {
+ if (0 == strcmp(GRPC_ARG_RESOURCE_QUOTA, args->args[i].key)) {
+ if (args->args[i].type == GRPC_ARG_POINTER) {
+ grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
+ s->resource_quota =
+ grpc_resource_quota_internal_ref(args->args[i].value.pointer.p);
+ } else {
+ grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
+ gpr_free(s);
+ return GRPC_ERROR_CREATE(GRPC_ARG_RESOURCE_QUOTA
+ " must be a pointer to a buffer pool");
+ }
+ }
+ }
+ gpr_ref_init(&s->refs, 1);
+ s->on_accept_cb = NULL;
+ s->on_accept_cb_arg = NULL;
+ s->open_ports = 0;
+ s->head = NULL;
+ s->tail = NULL;
+ s->shutdown_starting.head = NULL;
+ s->shutdown_starting.tail = NULL;
+ s->shutdown_complete = shutdown_complete;
+ *server = s;
+ return GRPC_ERROR_NONE;
+}
+
+grpc_tcp_server *grpc_tcp_server_ref(grpc_tcp_server *s) {
+ gpr_ref(&s->refs);
+ return s;
+}
+
+void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server *s,
+ grpc_closure *shutdown_starting) {
+ grpc_closure_list_append(&s->shutdown_starting, shutdown_starting,
+ GRPC_ERROR_NONE);
+}
+
+static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
+ if (s->shutdown_complete != NULL) {
+ grpc_exec_ctx_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE, NULL);
+ }
+
+ while (s->head) {
+ grpc_tcp_listener *sp = s->head;
+ s->head = sp->next;
+ sp->next = NULL;
+ gpr_free(sp->handle);
+ gpr_free(sp);
+ }
+ grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
+ gpr_free(s);
+}
+
+static void handle_close_callback(uv_handle_t *handle) {
+ grpc_tcp_listener *sp = (grpc_tcp_listener *)handle->data;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ sp->server->open_ports--;
+ if (sp->server->open_ports == 0) {
+ finish_shutdown(&exec_ctx, sp->server);
+ }
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
+ int immediately_done = 0;
+ grpc_tcp_listener *sp;
+
+ if (s->open_ports == 0) {
+ immediately_done = 1;
+ }
+ for (sp = s->head; sp; sp = sp->next) {
+ uv_close((uv_handle_t *)sp->handle, handle_close_callback);
+ }
+
+ if (immediately_done) {
+ finish_shutdown(exec_ctx, s);
+ }
+}
+
+void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
+ if (gpr_unref(&s->refs)) {
+ /* Complete shutdown_starting work before destroying. */
+ grpc_exec_ctx local_exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_exec_ctx_enqueue_list(&local_exec_ctx, &s->shutdown_starting, NULL);
+ if (exec_ctx == NULL) {
+ grpc_exec_ctx_flush(&local_exec_ctx);
+ tcp_server_destroy(&local_exec_ctx, s);
+ grpc_exec_ctx_finish(&local_exec_ctx);
+ } else {
+ grpc_exec_ctx_finish(&local_exec_ctx);
+ tcp_server_destroy(exec_ctx, s);
+ }
+ }
+}
+
+static void accepted_connection_close_cb(uv_handle_t *handle) {
+ gpr_free(handle);
+}
+
+static void on_connect(uv_stream_t *server, int status) {
+ grpc_tcp_listener *sp = (grpc_tcp_listener *)server->data;
+ grpc_tcp_server_acceptor acceptor = {sp->server, sp->port_index, 0};
+ uv_tcp_t *client;
+ grpc_endpoint *ep = NULL;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_resolved_address peer_name;
+ char *peer_name_string;
+ int err;
+
+ if (status < 0) {
+ gpr_log(GPR_INFO, "Skipping on_accept due to error: %s",
+ uv_strerror(status));
+ return;
+ }
+ client = gpr_malloc(sizeof(uv_tcp_t));
+ uv_tcp_init(uv_default_loop(), client);
+ // UV documentation says this is guaranteed to succeed
+ uv_accept((uv_stream_t *)server, (uv_stream_t *)client);
+ // If the server has not been started, we discard incoming connections
+ if (sp->server->on_accept_cb == NULL) {
+ uv_close((uv_handle_t *)client, accepted_connection_close_cb);
+ } else {
+ peer_name_string = NULL;
+ memset(&peer_name, 0, sizeof(grpc_resolved_address));
+ peer_name.len = sizeof(struct sockaddr_storage);
+ err = uv_tcp_getpeername(client, (struct sockaddr *)&peer_name.addr,
+ (int *)&peer_name.len);
+ if (err == 0) {
+ peer_name_string = grpc_sockaddr_to_uri(&peer_name);
+ } else {
+ gpr_log(GPR_INFO, "uv_tcp_getpeername error: %s", uv_strerror(status));
+ }
+ ep = grpc_tcp_create(client, sp->server->resource_quota, peer_name_string);
+ sp->server->on_accept_cb(&exec_ctx, sp->server->on_accept_cb_arg, ep, NULL,
+ &acceptor);
+ grpc_exec_ctx_finish(&exec_ctx);
+ }
+}
+
+static grpc_error *add_socket_to_server(grpc_tcp_server *s, uv_tcp_t *handle,
+ const grpc_resolved_address *addr,
+ unsigned port_index,
+ grpc_tcp_listener **listener) {
+ grpc_tcp_listener *sp = NULL;
+ int port = -1;
+ int status;
+ grpc_error *error;
+ grpc_resolved_address sockname_temp;
+
+ // The last argument to uv_tcp_bind is flags
+ status = uv_tcp_bind(handle, (struct sockaddr *)addr->addr, 0);
+ if (status != 0) {
+ error = GRPC_ERROR_CREATE("Failed to bind to port");
+ error =
+ grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, uv_strerror(status));
+ return error;
+ }
+
+ status = uv_listen((uv_stream_t *)handle, SOMAXCONN, on_connect);
+ if (status != 0) {
+ error = GRPC_ERROR_CREATE("Failed to listen to port");
+ error =
+ grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, uv_strerror(status));
+ return error;
+ }
+
+ sockname_temp.len = (int)sizeof(struct sockaddr_storage);
+ status = uv_tcp_getsockname(handle, (struct sockaddr *)&sockname_temp.addr,
+ (int *)&sockname_temp.len);
+ if (status != 0) {
+ error = GRPC_ERROR_CREATE("getsockname failed");
+ error =
+ grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, uv_strerror(status));
+ return error;
+ }
+
+ port = grpc_sockaddr_get_port(&sockname_temp);
+
+ GPR_ASSERT(port >= 0);
+ GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server");
+ sp = gpr_malloc(sizeof(grpc_tcp_listener));
+ sp->next = NULL;
+ if (s->head == NULL) {
+ s->head = sp;
+ } else {
+ s->tail->next = sp;
+ }
+ s->tail = sp;
+ sp->server = s;
+ sp->handle = handle;
+ sp->port = port;
+ sp->port_index = port_index;
+ handle->data = sp;
+ s->open_ports++;
+ GPR_ASSERT(sp->handle);
+ *listener = sp;
+
+ return GRPC_ERROR_NONE;
+}
+
+grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s,
+ const grpc_resolved_address *addr,
+ int *port) {
+ // This function is mostly copied from tcp_server_windows.c
+ grpc_tcp_listener *sp = NULL;
+ uv_tcp_t *handle;
+ grpc_resolved_address addr6_v4mapped;
+ grpc_resolved_address wildcard;
+ grpc_resolved_address *allocated_addr = NULL;
+ grpc_resolved_address sockname_temp;
+ unsigned port_index = 0;
+ int status;
+ grpc_error *error = GRPC_ERROR_NONE;
+
+ if (s->tail != NULL) {
+ port_index = s->tail->port_index + 1;
+ }
+
+ /* Check if this is a wildcard port, and if so, try to keep the port the same
+ as some previously created listener. */
+ if (grpc_sockaddr_get_port(addr) == 0) {
+ for (sp = s->head; sp; sp = sp->next) {
+ sockname_temp.len = sizeof(struct sockaddr_storage);
+ if (0 == uv_tcp_getsockname(sp->handle,
+ (struct sockaddr *)&sockname_temp.addr,
+ (int *)&sockname_temp.len)) {
+ *port = grpc_sockaddr_get_port(&sockname_temp);
+ if (*port > 0) {
+ allocated_addr = gpr_malloc(sizeof(grpc_resolved_address));
+ memcpy(allocated_addr, addr, sizeof(grpc_resolved_address));
+ grpc_sockaddr_set_port(allocated_addr, *port);
+ addr = allocated_addr;
+ break;
+ }
+ }
+ }
+ }
+
+ if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) {
+ addr = &addr6_v4mapped;
+ }
+
+ /* Treat :: or 0.0.0.0 as a family-agnostic wildcard. */
+ if (grpc_sockaddr_is_wildcard(addr, port)) {
+ grpc_sockaddr_make_wildcard6(*port, &wildcard);
+
+ addr = &wildcard;
+ }
+
+ handle = gpr_malloc(sizeof(uv_tcp_t));
+ status = uv_tcp_init(uv_default_loop(), handle);
+ if (status == 0) {
+ error = add_socket_to_server(s, handle, addr, port_index, &sp);
+ } else {
+ error = GRPC_ERROR_CREATE("Failed to initialize UV tcp handle");
+ error =
+ grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, uv_strerror(status));
+ }
+
+ gpr_free(allocated_addr);
+
+ if (error != GRPC_ERROR_NONE) {
+ grpc_error *error_out = GRPC_ERROR_CREATE_REFERENCING(
+ "Failed to add port to server", &error, 1);
+ GRPC_ERROR_UNREF(error);
+ error = error_out;
+ *port = -1;
+ } else {
+ GPR_ASSERT(sp != NULL);
+ *port = sp->port;
+ }
+ return error;
+}
+
+void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *server,
+ grpc_pollset **pollsets, size_t pollset_count,
+ grpc_tcp_server_cb on_accept_cb, void *cb_arg) {
+ grpc_tcp_listener *sp;
+ (void)pollsets;
+ (void)pollset_count;
+ GPR_ASSERT(on_accept_cb);
+ GPR_ASSERT(!server->on_accept_cb);
+ server->on_accept_cb = on_accept_cb;
+ server->on_accept_cb_arg = cb_arg;
+ for (sp = server->head; sp; sp = sp->next) {
+ GPR_ASSERT(uv_listen((uv_stream_t *)sp->handle, SOMAXCONN, on_connect) ==
+ 0);
+ }
+}
+
+void grpc_tcp_server_shutdown_listeners(grpc_exec_ctx *exec_ctx,
+ grpc_tcp_server *s) {}
+
+#endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/tcp_server_windows.c b/src/core/lib/iomgr/tcp_server_windows.c
index 1b125e7005..ae54c70d2d 100644
--- a/src/core/lib/iomgr/tcp_server_windows.c
+++ b/src/core/lib/iomgr/tcp_server_windows.c
@@ -31,13 +31,13 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_WINSOCK_SOCKET
+#ifdef GRPC_WINSOCK_SOCKET
-#include <io.h>
+#include "src/core/lib/iomgr/sockaddr.h"
-#include "src/core/lib/iomgr/sockaddr_utils.h"
+#include <io.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
@@ -48,6 +48,8 @@
#include "src/core/lib/iomgr/iocp_windows.h"
#include "src/core/lib/iomgr/pollset_windows.h"
+#include "src/core/lib/iomgr/resolve_address.h"
+#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/iomgr/socket_windows.h"
#include "src/core/lib/iomgr/tcp_server.h"
#include "src/core/lib/iomgr/tcp_windows.h"
@@ -98,14 +100,32 @@ struct grpc_tcp_server {
/* shutdown callback */
grpc_closure *shutdown_complete;
+
+ grpc_resource_quota *resource_quota;
};
/* Public function. Allocates the proper data structures to hold a
grpc_tcp_server. */
-grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete,
+grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
+ grpc_closure *shutdown_complete,
const grpc_channel_args *args,
grpc_tcp_server **server) {
grpc_tcp_server *s = gpr_malloc(sizeof(grpc_tcp_server));
+ s->resource_quota = grpc_resource_quota_create(NULL);
+ for (size_t i = 0; i < (args == NULL ? 0 : args->num_args); i++) {
+ if (0 == strcmp(GRPC_ARG_RESOURCE_QUOTA, args->args[i].key)) {
+ if (args->args[i].type == GRPC_ARG_POINTER) {
+ grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
+ s->resource_quota =
+ grpc_resource_quota_internal_ref(args->args[i].value.pointer.p);
+ } else {
+ grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
+ gpr_free(s);
+ return GRPC_ERROR_CREATE(GRPC_ARG_RESOURCE_QUOTA
+ " must be a pointer to a buffer pool");
+ }
+ }
+ }
gpr_ref_init(&s->refs, 1);
gpr_mu_init(&s->mu);
s->active_ports = 0;
@@ -135,11 +155,12 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
grpc_winsocket_destroy(sp->socket);
gpr_free(sp);
}
+ grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
gpr_free(s);
}
grpc_tcp_server *grpc_tcp_server_ref(grpc_tcp_server *s) {
- gpr_ref(&s->refs);
+ gpr_ref_non_zero(&s->refs);
return s;
}
@@ -174,27 +195,19 @@ static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
if (gpr_unref(&s->refs)) {
- /* Complete shutdown_starting work before destroying. */
- grpc_exec_ctx local_exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_tcp_server_shutdown_listeners(exec_ctx, s);
gpr_mu_lock(&s->mu);
- grpc_exec_ctx_enqueue_list(&local_exec_ctx, &s->shutdown_starting, NULL);
+ grpc_exec_ctx_enqueue_list(exec_ctx, &s->shutdown_starting, NULL);
gpr_mu_unlock(&s->mu);
- if (exec_ctx == NULL) {
- grpc_exec_ctx_flush(&local_exec_ctx);
- tcp_server_destroy(&local_exec_ctx, s);
- grpc_exec_ctx_finish(&local_exec_ctx);
- } else {
- grpc_exec_ctx_finish(&local_exec_ctx);
- tcp_server_destroy(exec_ctx, s);
- }
+ tcp_server_destroy(exec_ctx, s);
}
}
/* Prepare (bind) a recently-created socket for listening. */
-static grpc_error *prepare_socket(SOCKET sock, const struct sockaddr *addr,
- size_t addr_len, int *port) {
- struct sockaddr_storage sockname_temp;
- socklen_t sockname_len;
+static grpc_error *prepare_socket(SOCKET sock,
+ const grpc_resolved_address *addr,
+ int *port) {
+ grpc_resolved_address sockname_temp;
grpc_error *error = GRPC_ERROR_NONE;
error = grpc_tcp_prepare_socket(sock);
@@ -202,7 +215,8 @@ static grpc_error *prepare_socket(SOCKET sock, const struct sockaddr *addr,
goto failure;
}
- if (bind(sock, addr, (int)addr_len) == SOCKET_ERROR) {
+ if (bind(sock, (const struct sockaddr *)addr->addr, (int)addr->len) ==
+ SOCKET_ERROR) {
error = GRPC_WSA_ERROR(WSAGetLastError(), "bind");
goto failure;
}
@@ -212,14 +226,15 @@ static grpc_error *prepare_socket(SOCKET sock, const struct sockaddr *addr,
goto failure;
}
- sockname_len = sizeof(sockname_temp);
- if (getsockname(sock, (struct sockaddr *)&sockname_temp, &sockname_len) ==
- SOCKET_ERROR) {
+ int sockname_temp_len = sizeof(struct sockaddr_storage);
+ if (getsockname(sock, (struct sockaddr *)sockname_temp.addr,
+ &sockname_temp_len) == SOCKET_ERROR) {
error = GRPC_WSA_ERROR(WSAGetLastError(), "getsockname");
goto failure;
}
+ sockname_temp.len = sockname_temp_len;
- *port = grpc_sockaddr_get_port((struct sockaddr *)&sockname_temp);
+ *port = grpc_sockaddr_get_port(&sockname_temp);
return GRPC_ERROR_NONE;
failure:
@@ -315,15 +330,16 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
SOCKET sock = sp->new_socket;
grpc_winsocket_callback_info *info = &sp->socket->read_info;
grpc_endpoint *ep = NULL;
- struct sockaddr_storage peer_name;
+ grpc_resolved_address peer_name;
char *peer_name_string;
char *fd_name;
- int peer_name_len = sizeof(peer_name);
DWORD transfered_bytes;
DWORD flags;
BOOL wsa_success;
int err;
+ peer_name.len = sizeof(struct sockaddr_storage);
+
/* The general mechanism for shutting down is to queue abortion calls. While
this is necessary in the read/write case, it's useless for the accept
case. We only need to adjust the pending callback count */
@@ -361,9 +377,12 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
gpr_log(GPR_ERROR, "setsockopt error: %s", utf8_message);
gpr_free(utf8_message);
}
- err = getpeername(sock, (struct sockaddr *)&peer_name, &peer_name_len);
+ int peer_name_len = (int)peer_name.len;
+ err =
+ getpeername(sock, (struct sockaddr *)peer_name.addr, &peer_name_len);
+ peer_name.len = peer_name_len;
if (!err) {
- peer_name_string = grpc_sockaddr_to_uri((struct sockaddr *)&peer_name);
+ peer_name_string = grpc_sockaddr_to_uri(&peer_name);
} else {
char *utf8_message = gpr_format_message(WSAGetLastError());
gpr_log(GPR_ERROR, "getpeername error: %s", utf8_message);
@@ -371,7 +390,7 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
}
gpr_asprintf(&fd_name, "tcp_server:%s", peer_name_string);
ep = grpc_tcp_create(grpc_winsocket_create(sock, fd_name),
- peer_name_string);
+ sp->server->resource_quota, peer_name_string);
gpr_free(fd_name);
gpr_free(peer_name_string);
} else {
@@ -393,8 +412,8 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
}
static grpc_error *add_socket_to_server(grpc_tcp_server *s, SOCKET sock,
- const struct sockaddr *addr,
- size_t addr_len, unsigned port_index,
+ const grpc_resolved_address *addr,
+ unsigned port_index,
grpc_tcp_listener **listener) {
grpc_tcp_listener *sp = NULL;
int port = -1;
@@ -418,7 +437,7 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, SOCKET sock,
return NULL;
}
- error = prepare_socket(sock, addr, addr_len, &port);
+ error = prepare_socket(sock, addr, &port);
if (error != GRPC_ERROR_NONE) {
return error;
}
@@ -449,15 +468,15 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, SOCKET sock,
return GRPC_ERROR_NONE;
}
-grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
- size_t addr_len, int *port) {
+grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s,
+ const grpc_resolved_address *addr,
+ int *port) {
grpc_tcp_listener *sp = NULL;
SOCKET sock;
- struct sockaddr_in6 addr6_v4mapped;
- struct sockaddr_in6 wildcard;
- struct sockaddr *allocated_addr = NULL;
- struct sockaddr_storage sockname_temp;
- socklen_t sockname_len;
+ grpc_resolved_address addr6_v4mapped;
+ grpc_resolved_address wildcard;
+ grpc_resolved_address *allocated_addr = NULL;
+ grpc_resolved_address sockname_temp;
unsigned port_index = 0;
grpc_error *error = GRPC_ERROR_NONE;
@@ -469,13 +488,15 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
as some previously created listener. */
if (grpc_sockaddr_get_port(addr) == 0) {
for (sp = s->head; sp; sp = sp->next) {
- sockname_len = sizeof(sockname_temp);
+ int sockname_temp_len = sizeof(struct sockaddr_storage);
if (0 == getsockname(sp->socket->socket,
- (struct sockaddr *)&sockname_temp, &sockname_len)) {
- *port = grpc_sockaddr_get_port((struct sockaddr *)&sockname_temp);
+ (struct sockaddr *)sockname_temp.addr,
+ &sockname_temp_len)) {
+ sockname_temp.len = sockname_temp_len;
+ *port = grpc_sockaddr_get_port(&sockname_temp);
if (*port > 0) {
- allocated_addr = gpr_malloc(addr_len);
- memcpy(allocated_addr, addr, addr_len);
+ allocated_addr = gpr_malloc(sizeof(grpc_resolved_address));
+ memcpy(allocated_addr, addr, sizeof(grpc_resolved_address));
grpc_sockaddr_set_port(allocated_addr, *port);
addr = allocated_addr;
break;
@@ -485,16 +506,14 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
}
if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) {
- addr = (const struct sockaddr *)&addr6_v4mapped;
- addr_len = sizeof(addr6_v4mapped);
+ addr = &addr6_v4mapped;
}
/* Treat :: or 0.0.0.0 as a family-agnostic wildcard. */
if (grpc_sockaddr_is_wildcard(addr, port)) {
grpc_sockaddr_make_wildcard6(*port, &wildcard);
- addr = (struct sockaddr *)&wildcard;
- addr_len = sizeof(wildcard);
+ addr = &wildcard;
}
sock = WSASocket(AF_INET6, SOCK_STREAM, IPPROTO_TCP, NULL, 0,
@@ -504,7 +523,7 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
goto done;
}
- error = add_socket_to_server(s, sock, addr, addr_len, port_index, &sp);
+ error = add_socket_to_server(s, sock, addr, port_index, &sp);
done:
gpr_free(allocated_addr);
@@ -543,4 +562,4 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
void grpc_tcp_server_shutdown_listeners(grpc_exec_ctx *exec_ctx,
grpc_tcp_server *s) {}
-#endif /* GPR_WINSOCK_SOCKET */
+#endif /* GRPC_WINSOCK_SOCKET */
diff --git a/src/core/lib/iomgr/tcp_uv.c b/src/core/lib/iomgr/tcp_uv.c
new file mode 100644
index 0000000000..8e74c9e863
--- /dev/null
+++ b/src/core/lib/iomgr/tcp_uv.c
@@ -0,0 +1,379 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/iomgr/port.h"
+
+#ifdef GRPC_UV
+
+#include <limits.h>
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/slice_buffer.h>
+#include <grpc/support/string_util.h>
+
+#include "src/core/lib/iomgr/error.h"
+#include "src/core/lib/iomgr/network_status_tracker.h"
+#include "src/core/lib/iomgr/tcp_uv.h"
+#include "src/core/lib/support/string.h"
+
+int grpc_tcp_trace = 0;
+
+typedef struct {
+ grpc_endpoint base;
+ gpr_refcount refcount;
+
+ uv_write_t write_req;
+ uv_shutdown_t shutdown_req;
+
+ uv_tcp_t *handle;
+
+ grpc_closure *read_cb;
+ grpc_closure *write_cb;
+
+ gpr_slice read_slice;
+ gpr_slice_buffer *read_slices;
+ gpr_slice_buffer *write_slices;
+ uv_buf_t *write_buffers;
+
+ grpc_resource_user resource_user;
+
+ bool shutting_down;
+ bool resource_user_shutting_down;
+
+ char *peer_string;
+ grpc_pollset *pollset;
+} grpc_tcp;
+
+static void uv_close_callback(uv_handle_t *handle) { gpr_free(handle); }
+
+static void tcp_free(grpc_tcp *tcp) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_resource_user_destroy(&exec_ctx, &tcp->resource_user);
+ gpr_free(tcp);
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+/*#define GRPC_TCP_REFCOUNT_DEBUG*/
+#ifdef GRPC_TCP_REFCOUNT_DEBUG
+#define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__)
+#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__)
+static void tcp_unref(grpc_tcp *tcp, const char *reason, const char *file,
+ int line) {
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP unref %p : %s %d -> %d", tcp,
+ reason, tcp->refcount.count, tcp->refcount.count - 1);
+ if (gpr_unref(&tcp->refcount)) {
+ tcp_free(tcp);
+ }
+}
+
+static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file,
+ int line) {
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP ref %p : %s %d -> %d", tcp,
+ reason, tcp->refcount.count, tcp->refcount.count + 1);
+ gpr_ref(&tcp->refcount);
+}
+#else
+#define TCP_UNREF(tcp, reason) tcp_unref((tcp))
+#define TCP_REF(tcp, reason) tcp_ref((tcp))
+static void tcp_unref(grpc_tcp *tcp) {
+ if (gpr_unref(&tcp->refcount)) {
+ tcp_free(tcp);
+ }
+}
+
+static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); }
+#endif
+
+static void alloc_uv_buf(uv_handle_t *handle, size_t suggested_size,
+ uv_buf_t *buf) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_tcp *tcp = handle->data;
+ (void)suggested_size;
+ tcp->read_slice = grpc_resource_user_slice_malloc(
+ &exec_ctx, &tcp->resource_user, GRPC_TCP_DEFAULT_READ_SLICE_SIZE);
+ buf->base = (char *)GPR_SLICE_START_PTR(tcp->read_slice);
+ buf->len = GPR_SLICE_LENGTH(tcp->read_slice);
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+static void read_callback(uv_stream_t *stream, ssize_t nread,
+ const uv_buf_t *buf) {
+ gpr_slice sub;
+ grpc_error *error;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_tcp *tcp = stream->data;
+ grpc_closure *cb = tcp->read_cb;
+ if (nread == 0) {
+ // Nothing happened. Wait for the next callback
+ return;
+ }
+ TCP_UNREF(tcp, "read");
+ tcp->read_cb = NULL;
+ // TODO(murgatroid99): figure out what the return value here means
+ uv_read_stop(stream);
+ if (nread == UV_EOF) {
+ error = GRPC_ERROR_CREATE("EOF");
+ } else if (nread > 0) {
+ // Successful read
+ sub = gpr_slice_sub_no_ref(tcp->read_slice, 0, (size_t)nread);
+ gpr_slice_buffer_add(tcp->read_slices, sub);
+ error = GRPC_ERROR_NONE;
+ if (grpc_tcp_trace) {
+ size_t i;
+ const char *str = grpc_error_string(error);
+ gpr_log(GPR_DEBUG, "read: error=%s", str);
+ grpc_error_free_string(str);
+ for (i = 0; i < tcp->read_slices->count; i++) {
+ char *dump = gpr_dump_slice(tcp->read_slices->slices[i],
+ GPR_DUMP_HEX | GPR_DUMP_ASCII);
+ gpr_log(GPR_DEBUG, "READ %p (peer=%s): %s", tcp, tcp->peer_string,
+ dump);
+ gpr_free(dump);
+ }
+ }
+ } else {
+ // nread < 0: Error
+ error = GRPC_ERROR_CREATE("TCP Read failed");
+ }
+ grpc_exec_ctx_sched(&exec_ctx, cb, error, NULL);
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+static void uv_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ gpr_slice_buffer *read_slices, grpc_closure *cb) {
+ grpc_tcp *tcp = (grpc_tcp *)ep;
+ int status;
+ grpc_error *error = GRPC_ERROR_NONE;
+ GPR_ASSERT(tcp->read_cb == NULL);
+ tcp->read_cb = cb;
+ tcp->read_slices = read_slices;
+ gpr_slice_buffer_reset_and_unref(read_slices);
+ TCP_REF(tcp, "read");
+ // TODO(murgatroid99): figure out what the return value here means
+ status =
+ uv_read_start((uv_stream_t *)tcp->handle, alloc_uv_buf, read_callback);
+ if (status != 0) {
+ error = GRPC_ERROR_CREATE("TCP Read failed at start");
+ error =
+ grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, uv_strerror(status));
+ grpc_exec_ctx_sched(exec_ctx, cb, error, NULL);
+ }
+ if (grpc_tcp_trace) {
+ const char *str = grpc_error_string(error);
+ gpr_log(GPR_DEBUG, "Initiating read on %p: error=%s", tcp, str);
+ }
+}
+
+static void write_callback(uv_write_t *req, int status) {
+ grpc_tcp *tcp = req->data;
+ grpc_error *error;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_closure *cb = tcp->write_cb;
+ tcp->write_cb = NULL;
+ TCP_UNREF(tcp, "write");
+ if (status == 0) {
+ error = GRPC_ERROR_NONE;
+ } else {
+ error = GRPC_ERROR_CREATE("TCP Write failed");
+ }
+ if (grpc_tcp_trace) {
+ const char *str = grpc_error_string(error);
+ gpr_log(GPR_DEBUG, "write complete on %p: error=%s", tcp, str);
+ }
+ gpr_free(tcp->write_buffers);
+ grpc_resource_user_free(&exec_ctx, &tcp->resource_user,
+ sizeof(uv_buf_t) * tcp->write_slices->count);
+ grpc_exec_ctx_sched(&exec_ctx, cb, error, NULL);
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ gpr_slice_buffer *write_slices,
+ grpc_closure *cb) {
+ grpc_tcp *tcp = (grpc_tcp *)ep;
+ uv_buf_t *buffers;
+ unsigned int buffer_count;
+ unsigned int i;
+ gpr_slice *slice;
+ uv_write_t *write_req;
+
+ if (grpc_tcp_trace) {
+ size_t j;
+
+ for (j = 0; j < write_slices->count; j++) {
+ char *data = gpr_dump_slice(write_slices->slices[j],
+ GPR_DUMP_HEX | GPR_DUMP_ASCII);
+ gpr_log(GPR_DEBUG, "WRITE %p (peer=%s): %s", tcp, tcp->peer_string, data);
+ gpr_free(data);
+ }
+ }
+
+ if (tcp->shutting_down) {
+ grpc_exec_ctx_sched(exec_ctx, cb,
+ GRPC_ERROR_CREATE("TCP socket is shutting down"), NULL);
+ return;
+ }
+
+ GPR_ASSERT(tcp->write_cb == NULL);
+ tcp->write_slices = write_slices;
+ GPR_ASSERT(tcp->write_slices->count <= UINT_MAX);
+ if (tcp->write_slices->count == 0) {
+ // No slices means we don't have to do anything,
+ // and libuv doesn't like empty writes
+ grpc_exec_ctx_sched(exec_ctx, cb, GRPC_ERROR_NONE, NULL);
+ return;
+ }
+
+ tcp->write_cb = cb;
+ buffer_count = (unsigned int)tcp->write_slices->count;
+ buffers = gpr_malloc(sizeof(uv_buf_t) * buffer_count);
+ grpc_resource_user_alloc(exec_ctx, &tcp->resource_user,
+ sizeof(uv_buf_t) * buffer_count, NULL);
+ for (i = 0; i < buffer_count; i++) {
+ slice = &tcp->write_slices->slices[i];
+ buffers[i].base = (char *)GPR_SLICE_START_PTR(*slice);
+ buffers[i].len = GPR_SLICE_LENGTH(*slice);
+ }
+ tcp->write_buffers = buffers;
+ write_req = &tcp->write_req;
+ write_req->data = tcp;
+ TCP_REF(tcp, "write");
+ // TODO(murgatroid99): figure out what the return value here means
+ uv_write(write_req, (uv_stream_t *)tcp->handle, buffers, buffer_count,
+ write_callback);
+}
+
+static void uv_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ grpc_pollset *pollset) {
+ // No-op. We're ignoring pollsets currently
+ (void)exec_ctx;
+ (void)ep;
+ (void)pollset;
+ grpc_tcp *tcp = (grpc_tcp *)ep;
+ tcp->pollset = pollset;
+}
+
+static void uv_add_to_pollset_set(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ grpc_pollset_set *pollset) {
+ // No-op. We're ignoring pollsets currently
+ (void)exec_ctx;
+ (void)ep;
+ (void)pollset;
+}
+
+static void shutdown_callback(uv_shutdown_t *req, int status) {}
+
+static void resource_user_shutdown_done(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ TCP_UNREF(arg, "resource_user");
+}
+
+static void uv_resource_user_maybe_shutdown(grpc_exec_ctx *exec_ctx,
+ grpc_tcp *tcp) {
+ if (!tcp->resource_user_shutting_down) {
+ tcp->resource_user_shutting_down = true;
+ TCP_REF(tcp, "resource_user");
+ grpc_resource_user_shutdown(
+ exec_ctx, &tcp->resource_user,
+ grpc_closure_create(resource_user_shutdown_done, tcp));
+ }
+}
+
+static void uv_endpoint_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
+ grpc_tcp *tcp = (grpc_tcp *)ep;
+ if (!tcp->shutting_down) {
+ tcp->shutting_down = true;
+ uv_shutdown_t *req = &tcp->shutdown_req;
+ uv_shutdown(req, (uv_stream_t *)tcp->handle, shutdown_callback);
+ }
+}
+
+static void uv_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
+ grpc_network_status_unregister_endpoint(ep);
+ grpc_tcp *tcp = (grpc_tcp *)ep;
+ uv_close((uv_handle_t *)tcp->handle, uv_close_callback);
+ uv_resource_user_maybe_shutdown(exec_ctx, tcp);
+ TCP_UNREF(tcp, "destroy");
+}
+
+static char *uv_get_peer(grpc_endpoint *ep) {
+ grpc_tcp *tcp = (grpc_tcp *)ep;
+ return gpr_strdup(tcp->peer_string);
+}
+
+static grpc_resource_user *uv_get_resource_user(grpc_endpoint *ep) {
+ grpc_tcp *tcp = (grpc_tcp *)ep;
+ return &tcp->resource_user;
+}
+
+static grpc_workqueue *uv_get_workqueue(grpc_endpoint *ep) { return NULL; }
+
+static grpc_endpoint_vtable vtable = {
+ uv_endpoint_read, uv_endpoint_write, uv_get_workqueue,
+ uv_add_to_pollset, uv_add_to_pollset_set, uv_endpoint_shutdown,
+ uv_destroy, uv_get_resource_user, uv_get_peer};
+
+grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle,
+ grpc_resource_quota *resource_quota,
+ char *peer_string) {
+ grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp));
+
+ if (grpc_tcp_trace) {
+ gpr_log(GPR_DEBUG, "Creating TCP endpoint %p", tcp);
+ }
+
+ /* Disable Nagle's Algorithm */
+ uv_tcp_nodelay(handle, 1);
+
+ memset(tcp, 0, sizeof(grpc_tcp));
+ tcp->base.vtable = &vtable;
+ tcp->handle = handle;
+ handle->data = tcp;
+ gpr_ref_init(&tcp->refcount, 1);
+ tcp->peer_string = gpr_strdup(peer_string);
+ tcp->shutting_down = false;
+ tcp->resource_user_shutting_down = false;
+ grpc_resource_user_init(&tcp->resource_user, resource_quota, peer_string);
+ /* Tell network status tracking code about the new endpoint */
+ grpc_network_status_register_endpoint(&tcp->base);
+
+#ifndef GRPC_UV_TCP_HOLD_LOOP
+ uv_unref((uv_handle_t *)handle);
+#endif
+
+ return &tcp->base;
+}
+
+#endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/tcp_uv.h b/src/core/lib/iomgr/tcp_uv.h
new file mode 100644
index 0000000000..970fcafe4a
--- /dev/null
+++ b/src/core/lib/iomgr/tcp_uv.h
@@ -0,0 +1,59 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_TCP_UV_H
+#define GRPC_CORE_LIB_IOMGR_TCP_UV_H
+/*
+ Low level TCP "bottom half" implementation, for use by transports built on
+ top of a TCP connection.
+
+ Note that this file does not (yet) include APIs for creating the socket in
+ the first place.
+
+ All calls passing slice transfer ownership of a slice refcount unless
+ otherwise specified.
+*/
+
+#include "src/core/lib/iomgr/endpoint.h"
+
+#include <uv.h>
+
+extern int grpc_tcp_trace;
+
+#define GRPC_TCP_DEFAULT_READ_SLICE_SIZE 8192
+
+grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle,
+ grpc_resource_quota *resource_quota,
+ char *peer_string);
+
+#endif /* GRPC_CORE_LIB_IOMGR_TCP_UV_H */
diff --git a/src/core/lib/iomgr/tcp_windows.c b/src/core/lib/iomgr/tcp_windows.c
index 448a72671c..46f0491d10 100644
--- a/src/core/lib/iomgr/tcp_windows.c
+++ b/src/core/lib/iomgr/tcp_windows.c
@@ -31,9 +31,9 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_WINSOCK_SOCKET
+#ifdef GRPC_WINSOCK_SOCKET
#include <limits.h>
@@ -109,14 +109,29 @@ typedef struct grpc_tcp {
gpr_slice_buffer *write_slices;
gpr_slice_buffer *read_slices;
+ grpc_resource_user resource_user;
+
/* The IO Completion Port runs from another thread. We need some mechanism
to protect ourselves when requesting a shutdown. */
gpr_mu mu;
int shutting_down;
+ gpr_atm resource_user_shutdown_count;
+
char *peer_string;
} grpc_tcp;
+static void win_unref_closure(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
+ grpc_error *error);
+
+static void win_maybe_shutdown_resource_user(grpc_exec_ctx *exec_ctx,
+ grpc_tcp *tcp) {
+ if (gpr_atm_full_fetch_add(&tcp->resource_user_shutdown_count, 1) == 0) {
+ grpc_resource_user_shutdown(exec_ctx, &tcp->resource_user,
+ grpc_closure_create(win_unref_closure, tcp));
+ }
+}
+
static void tcp_free(grpc_tcp *tcp) {
grpc_winsocket_destroy(tcp->socket);
gpr_mu_destroy(&tcp->mu);
@@ -155,6 +170,11 @@ static void tcp_unref(grpc_tcp *tcp) {
static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); }
#endif
+static void win_unref_closure(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ TCP_UNREF(arg, "resource_user");
+}
+
/* Asynchronous callback from the IOCP, or the background thread. */
static void on_read(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) {
grpc_tcp *tcp = tcpp;
@@ -376,12 +396,14 @@ static void win_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
callback. See the comments in on_read and on_write. */
tcp->shutting_down = 1;
grpc_winsocket_shutdown(tcp->socket);
+ win_maybe_shutdown_resource_user(exec_ctx, tcp);
gpr_mu_unlock(&tcp->mu);
}
static void win_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
grpc_network_status_unregister_endpoint(ep);
grpc_tcp *tcp = (grpc_tcp *)ep;
+ win_maybe_shutdown_resource_user(exec_ctx, tcp);
TCP_UNREF(tcp, "destroy");
}
@@ -392,6 +414,11 @@ static char *win_get_peer(grpc_endpoint *ep) {
static grpc_workqueue *win_get_workqueue(grpc_endpoint *ep) { return NULL; }
+static grpc_resource_user *win_get_resource_user(grpc_endpoint *ep) {
+ grpc_tcp *tcp = (grpc_tcp *)ep;
+ return &tcp->resource_user;
+}
+
static grpc_endpoint_vtable vtable = {win_read,
win_write,
win_get_workqueue,
@@ -399,22 +426,26 @@ static grpc_endpoint_vtable vtable = {win_read,
win_add_to_pollset_set,
win_shutdown,
win_destroy,
+ win_get_resource_user,
win_get_peer};
-grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket, char *peer_string) {
+grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket,
+ grpc_resource_quota *resource_quota,
+ char *peer_string) {
grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp));
memset(tcp, 0, sizeof(grpc_tcp));
tcp->base.vtable = &vtable;
tcp->socket = socket;
gpr_mu_init(&tcp->mu);
- gpr_ref_init(&tcp->refcount, 1);
+ gpr_ref_init(&tcp->refcount, 2);
grpc_closure_init(&tcp->on_read, on_read, tcp);
grpc_closure_init(&tcp->on_write, on_write, tcp);
tcp->peer_string = gpr_strdup(peer_string);
+ grpc_resource_user_init(&tcp->resource_user, resource_quota, peer_string);
/* Tell network status tracking code about the new endpoint */
grpc_network_status_register_endpoint(&tcp->base);
return &tcp->base;
}
-#endif /* GPR_WINSOCK_SOCKET */
+#endif /* GRPC_WINSOCK_SOCKET */
diff --git a/src/core/lib/iomgr/tcp_windows.h b/src/core/lib/iomgr/tcp_windows.h
index 86d777235e..4402de1c38 100644
--- a/src/core/lib/iomgr/tcp_windows.h
+++ b/src/core/lib/iomgr/tcp_windows.h
@@ -50,7 +50,9 @@
/* Create a tcp endpoint given a winsock handle.
* Takes ownership of the handle.
*/
-grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket, char *peer_string);
+grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket,
+ grpc_resource_quota *resource_quota,
+ char *peer_string);
grpc_error *grpc_tcp_prepare_socket(SOCKET sock);
diff --git a/src/core/lib/iomgr/timer.h b/src/core/lib/iomgr/timer.h
index a825d2a28b..20fe98c4a7 100644
--- a/src/core/lib/iomgr/timer.h
+++ b/src/core/lib/iomgr/timer.h
@@ -34,26 +34,27 @@
#ifndef GRPC_CORE_LIB_IOMGR_TIMER_H
#define GRPC_CORE_LIB_IOMGR_TIMER_H
+#include "src/core/lib/iomgr/port.h"
+
+#ifdef GRPC_UV
+#include "src/core/lib/iomgr/timer_uv.h"
+#else
+#include "src/core/lib/iomgr/timer_generic.h"
+#endif /* GRPC_UV */
+
#include <grpc/support/port_platform.h>
#include <grpc/support/time.h>
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/iomgr.h"
-typedef struct grpc_timer {
- gpr_timespec deadline;
- uint32_t heap_index; /* INVALID_HEAP_INDEX if not in heap */
- int triggered;
- struct grpc_timer *next;
- struct grpc_timer *prev;
- grpc_closure closure;
-} grpc_timer;
+typedef struct grpc_timer grpc_timer;
/* Initialize *timer. When expired or canceled, timer_cb will be called with
- *timer_cb_arg and status to indicate if it expired (SUCCESS) or was
- canceled (CANCELLED). timer_cb is guaranteed to be called exactly once,
- and application code should check the status to determine how it was
- invoked. The application callback is also responsible for maintaining
- information about when to free up any user-level state. */
+ *timer_cb_arg and error set to indicate if it expired (GRPC_ERROR_NONE) or
+ was canceled (GRPC_ERROR_CANCELLED). timer_cb is guaranteed to be called
+ exactly once, and application code should check the error to determine
+ how it was invoked. The application callback is also responsible for
+ maintaining information about when to free up any user-level state. */
void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
gpr_timespec deadline, grpc_iomgr_cb_func timer_cb,
void *timer_cb_arg, gpr_timespec now);
@@ -74,8 +75,8 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
In all of these cases, the cancellation is still considered successful.
They are essentially distinguished in that the timer_cb will be run
- exactly once from either the cancellation (with status CANCELLED)
- or from the activation (with status SUCCESS)
+ exactly once from either the cancellation (with error GRPC_ERROR_CANCELLED)
+ or from the activation (with error GRPC_ERROR_NONE).
Note carefully that the callback function MAY occur in the same callstack
as grpc_timer_cancel. It's expected that most timers will be cancelled (their
@@ -83,14 +84,13 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
that cancellation costs as little as possible. Making callbacks run inline
matches this aim.
- Requires: cancel() must happen after add() on a given timer */
+ Requires: cancel() must happen after init() on a given timer */
void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer);
/* iomgr internal api for dealing with timers */
/* Check for timers to be run, and run them.
Return true if timer callbacks were executed.
- Drops drop_mu if it is non-null before executing callbacks.
If next is non-null, TRY to update *next with the next running timer
IF that timer occurs before *next current value.
*next is never guaranteed to be updated on any given execution; however,
diff --git a/src/core/lib/iomgr/timer.c b/src/core/lib/iomgr/timer_generic.c
index 9975fa1671..00058f9d86 100644
--- a/src/core/lib/iomgr/timer.c
+++ b/src/core/lib/iomgr/timer_generic.c
@@ -31,6 +31,10 @@
*
*/
+#include "src/core/lib/iomgr/port.h"
+
+#ifdef GRPC_TIMER_USE_GENERIC
+
#include "src/core/lib/iomgr/timer.h"
#include <grpc/support/log.h>
@@ -382,3 +386,5 @@ bool grpc_timer_check(grpc_exec_ctx *exec_ctx, gpr_timespec now,
? GRPC_ERROR_NONE
: GRPC_ERROR_CREATE("Shutting down timer system"));
}
+
+#endif /* GRPC_TIMER_USE_GENERIC */
diff --git a/src/core/lib/iomgr/timer_generic.h b/src/core/lib/iomgr/timer_generic.h
new file mode 100644
index 0000000000..e4494adb5f
--- /dev/null
+++ b/src/core/lib/iomgr/timer_generic.h
@@ -0,0 +1,49 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_TIMER_GENERIC_H
+#define GRPC_CORE_LIB_IOMGR_TIMER_GENERIC_H
+
+#include <grpc/support/time.h>
+#include "src/core/lib/iomgr/exec_ctx.h"
+
+struct grpc_timer {
+ gpr_timespec deadline;
+ uint32_t heap_index; /* INVALID_HEAP_INDEX if not in heap */
+ int triggered;
+ struct grpc_timer *next;
+ struct grpc_timer *prev;
+ grpc_closure closure;
+};
+
+#endif /* GRPC_CORE_LIB_IOMGR_TIMER_GENERIC_H */
diff --git a/src/core/lib/iomgr/timer_heap.c b/src/core/lib/iomgr/timer_heap.c
index 2ad9bb9cd2..f736d335e6 100644
--- a/src/core/lib/iomgr/timer_heap.c
+++ b/src/core/lib/iomgr/timer_heap.c
@@ -31,6 +31,10 @@
*
*/
+#include "src/core/lib/iomgr/port.h"
+
+#ifdef GRPC_TIMER_USE_GENERIC
+
#include "src/core/lib/iomgr/timer_heap.h"
#include <string.h>
@@ -144,3 +148,5 @@ grpc_timer *grpc_timer_heap_top(grpc_timer_heap *heap) {
void grpc_timer_heap_pop(grpc_timer_heap *heap) {
grpc_timer_heap_remove(heap, grpc_timer_heap_top(heap));
}
+
+#endif /* GRPC_TIMER_USE_GENERIC */
diff --git a/src/core/lib/iomgr/timer_uv.c b/src/core/lib/iomgr/timer_uv.c
new file mode 100644
index 0000000000..cfcb89268b
--- /dev/null
+++ b/src/core/lib/iomgr/timer_uv.c
@@ -0,0 +1,99 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/iomgr/port.h"
+
+#if GRPC_UV
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+
+#include "src/core/lib/iomgr/timer.h"
+
+#include <uv.h>
+
+static void timer_close_callback(uv_handle_t *handle) { gpr_free(handle); }
+
+static void stop_uv_timer(uv_timer_t *handle) {
+ uv_timer_stop(handle);
+ uv_unref((uv_handle_t *)handle);
+ uv_close((uv_handle_t *)handle, timer_close_callback);
+}
+
+void run_expired_timer(uv_timer_t *handle) {
+ grpc_timer *timer = (grpc_timer *)handle->data;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ GPR_ASSERT(!timer->triggered);
+ timer->triggered = 1;
+ grpc_exec_ctx_sched(&exec_ctx, &timer->closure, GRPC_ERROR_NONE, NULL);
+ stop_uv_timer(handle);
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
+ gpr_timespec deadline, grpc_iomgr_cb_func timer_cb,
+ void *timer_cb_arg, gpr_timespec now) {
+ uint64_t timeout;
+ uv_timer_t *uv_timer;
+ grpc_closure_init(&timer->closure, timer_cb, timer_cb_arg);
+ if (gpr_time_cmp(deadline, now) <= 0) {
+ timer->triggered = 1;
+ grpc_exec_ctx_sched(exec_ctx, &timer->closure, GRPC_ERROR_NONE, NULL);
+ return;
+ }
+ timer->triggered = 0;
+ timeout = (uint64_t)gpr_time_to_millis(gpr_time_sub(deadline, now));
+ uv_timer = gpr_malloc(sizeof(uv_timer_t));
+ uv_timer_init(uv_default_loop(), uv_timer);
+ uv_timer->data = timer;
+ timer->uv_timer = uv_timer;
+ uv_timer_start(uv_timer, run_expired_timer, timeout, 0);
+}
+
+void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
+ if (!timer->triggered) {
+ timer->triggered = 1;
+ grpc_exec_ctx_sched(exec_ctx, &timer->closure, GRPC_ERROR_CANCELLED, NULL);
+ stop_uv_timer((uv_timer_t *)timer->uv_timer);
+ }
+}
+
+bool grpc_timer_check(grpc_exec_ctx *exec_ctx, gpr_timespec now,
+ gpr_timespec *next) {
+ return false;
+}
+
+void grpc_timer_list_init(gpr_timespec now) {}
+void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) {}
+
+#endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/timer_uv.h b/src/core/lib/iomgr/timer_uv.h
new file mode 100644
index 0000000000..3de383ebd5
--- /dev/null
+++ b/src/core/lib/iomgr/timer_uv.h
@@ -0,0 +1,47 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_TIMER_UV_H
+#define GRPC_CORE_LIB_IOMGR_TIMER_UV_H
+
+#include "src/core/lib/iomgr/exec_ctx.h"
+
+struct grpc_timer {
+ grpc_closure closure;
+ /* This is actually a uv_timer_t*, but we want to keep platform-specific
+ types out of headers */
+ void *uv_timer;
+ int triggered;
+};
+
+#endif /* GRPC_CORE_LIB_IOMGR_TIMER_UV_H */
diff --git a/src/core/lib/iomgr/udp_server.c b/src/core/lib/iomgr/udp_server.c
index edf7b133e9..fd0c7a0f9d 100644
--- a/src/core/lib/iomgr/udp_server.c
+++ b/src/core/lib/iomgr/udp_server.c
@@ -36,9 +36,9 @@
#define _GNU_SOURCE
#endif
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_POSIX_SOCKET
+#ifdef GRPC_POSIX_SOCKET
#include "src/core/lib/iomgr/udp_server.h"
@@ -62,32 +62,30 @@
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/resolve_address.h"
+#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/iomgr/socket_utils_posix.h"
+#include "src/core/lib/iomgr/unix_sockets_posix.h"
#include "src/core/lib/support/string.h"
-#define INIT_PORT_CAP 2
-
/* one listening port */
-typedef struct {
+typedef struct grpc_udp_listener grpc_udp_listener;
+struct grpc_udp_listener {
int fd;
grpc_fd *emfd;
grpc_udp_server *server;
- union {
- uint8_t untyped[GRPC_MAX_SOCKADDR_SIZE];
- struct sockaddr sockaddr;
- } addr;
- size_t addr_len;
+ grpc_resolved_address addr;
grpc_closure read_closure;
grpc_closure destroyed_closure;
grpc_udp_server_read_cb read_cb;
grpc_udp_server_orphan_cb orphan_cb;
-} server_port;
+
+ struct grpc_udp_listener *next;
+};
/* the overall server */
struct grpc_udp_server {
gpr_mu mu;
- gpr_cv cv;
/* active port count: how many ports are actually still listening */
size_t active_ports;
@@ -97,10 +95,10 @@ struct grpc_udp_server {
/* is this server shutting down? (boolean) */
int shutdown;
- /* all listening ports */
- server_port *ports;
- size_t nports;
- size_t port_capacity;
+ /* linked list of server ports */
+ grpc_udp_listener *head;
+ grpc_udp_listener *tail;
+ unsigned nports;
/* shutdown callback */
grpc_closure *shutdown_complete;
@@ -116,24 +114,29 @@ struct grpc_udp_server {
grpc_udp_server *grpc_udp_server_create(void) {
grpc_udp_server *s = gpr_malloc(sizeof(grpc_udp_server));
gpr_mu_init(&s->mu);
- gpr_cv_init(&s->cv);
s->active_ports = 0;
s->destroyed_ports = 0;
s->shutdown = 0;
- s->ports = gpr_malloc(sizeof(server_port) * INIT_PORT_CAP);
+ s->head = NULL;
+ s->tail = NULL;
s->nports = 0;
- s->port_capacity = INIT_PORT_CAP;
return s;
}
static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
- grpc_exec_ctx_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE, NULL);
+ if (s->shutdown_complete != NULL) {
+ grpc_exec_ctx_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE, NULL);
+ }
gpr_mu_destroy(&s->mu);
- gpr_cv_destroy(&s->cv);
- gpr_free(s->ports);
+ while (s->head) {
+ grpc_udp_listener *sp = s->head;
+ s->head = sp->next;
+ gpr_free(sp);
+ }
+
gpr_free(s);
}
@@ -154,8 +157,6 @@ static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server,
events will be received on them - at this point it's safe to destroy
things */
static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
- size_t i;
-
/* delete ALL the things */
gpr_mu_lock(&s->mu);
@@ -164,9 +165,11 @@ static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
return;
}
- if (s->nports) {
- for (i = 0; i < s->nports; i++) {
- server_port *sp = &s->ports[i];
+ if (s->head) {
+ grpc_udp_listener *sp;
+ for (sp = s->head; sp; sp = sp->next) {
+ grpc_unlink_if_unix_domain_socket(&sp->addr);
+
sp->destroyed_closure.cb = destroyed_port;
sp->destroyed_closure.cb_arg = s;
@@ -187,7 +190,7 @@ static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
grpc_closure *on_done) {
- size_t i;
+ grpc_udp_listener *sp;
gpr_mu_lock(&s->mu);
GPR_ASSERT(!s->shutdown);
@@ -197,14 +200,10 @@ void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
/* shutdown all fd's */
if (s->active_ports) {
- for (i = 0; i < s->nports; i++) {
- server_port *sp = &s->ports[i];
- /* Call the orphan_cb to signal that the FD is about to be closed and
- * should no longer be used. */
+ for (sp = s->head; sp; sp = sp->next) {
GPR_ASSERT(sp->orphan_cb);
sp->orphan_cb(sp->emfd);
-
- grpc_fd_shutdown(exec_ctx, s->ports[i].emfd);
+ grpc_fd_shutdown(exec_ctx, sp->emfd);
}
gpr_mu_unlock(&s->mu);
} else {
@@ -214,10 +213,9 @@ void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
}
/* Prepare a recently-created socket for listening. */
-static int prepare_socket(int fd, const struct sockaddr *addr,
- size_t addr_len) {
- struct sockaddr_storage sockname_temp;
- socklen_t sockname_len;
+static int prepare_socket(int fd, const grpc_resolved_address *addr) {
+ grpc_resolved_address sockname_temp;
+ struct sockaddr *addr_ptr = (struct sockaddr *)addr->addr;
/* Set send/receive socket buffers to 1 MB */
int buffer_size_bytes = 1024 * 1024;
@@ -237,15 +235,15 @@ static int prepare_socket(int fd, const struct sockaddr *addr,
if (grpc_set_socket_ip_pktinfo_if_possible(fd) != GRPC_ERROR_NONE) {
gpr_log(GPR_ERROR, "Unable to set ip_pktinfo.");
goto error;
- } else if (addr->sa_family == AF_INET6) {
+ } else if (addr_ptr->sa_family == AF_INET6) {
if (grpc_set_socket_ipv6_recvpktinfo_if_possible(fd) != GRPC_ERROR_NONE) {
gpr_log(GPR_ERROR, "Unable to set ipv6_recvpktinfo.");
goto error;
}
}
- GPR_ASSERT(addr_len < ~(socklen_t)0);
- if (bind(fd, addr, (socklen_t)addr_len) < 0) {
+ GPR_ASSERT(addr->len < ~(socklen_t)0);
+ if (bind(fd, (struct sockaddr *)addr, (socklen_t)addr->len) < 0) {
char *addr_str;
grpc_sockaddr_to_string(&addr_str, addr, 0);
gpr_log(GPR_ERROR, "bind addr=%s: %s", addr_str, strerror(errno));
@@ -253,8 +251,10 @@ static int prepare_socket(int fd, const struct sockaddr *addr,
goto error;
}
- sockname_len = sizeof(sockname_temp);
- if (getsockname(fd, (struct sockaddr *)&sockname_temp, &sockname_len) < 0) {
+ sockname_temp.len = sizeof(struct sockaddr_storage);
+
+ if (getsockname(fd, (struct sockaddr *)sockname_temp.addr,
+ (socklen_t *)&sockname_temp.len) < 0) {
goto error;
}
@@ -270,7 +270,7 @@ static int prepare_socket(int fd, const struct sockaddr *addr,
goto error;
}
- return grpc_sockaddr_get_port((struct sockaddr *)&sockname_temp);
+ return grpc_sockaddr_get_port(&sockname_temp);
error:
if (fd >= 0) {
@@ -281,10 +281,10 @@ error:
/* event manager callback when reads are ready */
static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- server_port *sp = arg;
+ grpc_udp_listener *sp = arg;
+ gpr_mu_lock(&sp->server->mu);
if (error != GRPC_ERROR_NONE) {
- gpr_mu_lock(&sp->server->mu);
if (0 == --sp->server->active_ports) {
gpr_mu_unlock(&sp->server->mu);
deactivated_all_ports(exec_ctx, sp->server);
@@ -300,34 +300,37 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
/* Re-arm the notification event so we get another chance to read. */
grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
+ gpr_mu_unlock(&sp->server->mu);
}
static int add_socket_to_server(grpc_udp_server *s, int fd,
- const struct sockaddr *addr, size_t addr_len,
+ const grpc_resolved_address *addr,
grpc_udp_server_read_cb read_cb,
grpc_udp_server_orphan_cb orphan_cb) {
- server_port *sp;
+ grpc_udp_listener *sp;
int port;
char *addr_str;
char *name;
- port = prepare_socket(fd, addr, addr_len);
+ port = prepare_socket(fd, addr);
if (port >= 0) {
- grpc_sockaddr_to_string(&addr_str, (struct sockaddr *)&addr, 1);
+ grpc_sockaddr_to_string(&addr_str, addr, 1);
gpr_asprintf(&name, "udp-server-listener:%s", addr_str);
gpr_free(addr_str);
gpr_mu_lock(&s->mu);
- /* append it to the list under a lock */
- if (s->nports == s->port_capacity) {
- s->port_capacity *= 2;
- s->ports = gpr_realloc(s->ports, sizeof(server_port) * s->port_capacity);
+ s->nports++;
+ sp = gpr_malloc(sizeof(grpc_udp_listener));
+ sp->next = NULL;
+ if (s->head == NULL) {
+ s->head = sp;
+ } else {
+ s->tail->next = sp;
}
- sp = &s->ports[s->nports++];
+ s->tail = sp;
sp->server = s;
sp->fd = fd;
sp->emfd = grpc_fd_create(fd, name);
- memcpy(sp->addr.untyped, addr, addr_len);
- sp->addr_len = addr_len;
+ memcpy(&sp->addr, addr, sizeof(grpc_resolved_address));
sp->read_cb = read_cb;
sp->orphan_cb = orphan_cb;
GPR_ASSERT(sp->emfd);
@@ -338,34 +341,34 @@ static int add_socket_to_server(grpc_udp_server *s, int fd,
return port;
}
-int grpc_udp_server_add_port(grpc_udp_server *s, const void *addr,
- size_t addr_len, grpc_udp_server_read_cb read_cb,
+int grpc_udp_server_add_port(grpc_udp_server *s,
+ const grpc_resolved_address *addr,
+ grpc_udp_server_read_cb read_cb,
grpc_udp_server_orphan_cb orphan_cb) {
+ grpc_udp_listener *sp;
int allocated_port1 = -1;
int allocated_port2 = -1;
- unsigned i;
int fd;
grpc_dualstack_mode dsmode;
- struct sockaddr_in6 addr6_v4mapped;
- struct sockaddr_in wild4;
- struct sockaddr_in6 wild6;
- struct sockaddr_in addr4_copy;
- struct sockaddr *allocated_addr = NULL;
- struct sockaddr_storage sockname_temp;
- socklen_t sockname_len;
+ grpc_resolved_address addr6_v4mapped;
+ grpc_resolved_address wild4;
+ grpc_resolved_address wild6;
+ grpc_resolved_address addr4_copy;
+ grpc_resolved_address *allocated_addr = NULL;
+ grpc_resolved_address sockname_temp;
int port;
/* Check if this is a wildcard port, and if so, try to keep the port the same
as some previously created listener. */
if (grpc_sockaddr_get_port(addr) == 0) {
- for (i = 0; i < s->nports; i++) {
- sockname_len = sizeof(sockname_temp);
- if (0 == getsockname(s->ports[i].fd, (struct sockaddr *)&sockname_temp,
- &sockname_len)) {
- port = grpc_sockaddr_get_port((struct sockaddr *)&sockname_temp);
+ for (sp = s->head; sp; sp = sp->next) {
+ sockname_temp.len = sizeof(struct sockaddr_storage);
+ if (0 == getsockname(sp->fd, (struct sockaddr *)sockname_temp.addr,
+ (socklen_t *)&sockname_temp.len)) {
+ port = grpc_sockaddr_get_port(&sockname_temp);
if (port > 0) {
- allocated_addr = gpr_malloc(addr_len);
- memcpy(allocated_addr, addr, addr_len);
+ allocated_addr = gpr_malloc(sizeof(grpc_resolved_address));
+ memcpy(allocated_addr, addr, sizeof(grpc_resolved_address));
grpc_sockaddr_set_port(allocated_addr, port);
addr = allocated_addr;
break;
@@ -375,8 +378,7 @@ int grpc_udp_server_add_port(grpc_udp_server *s, const void *addr,
}
if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) {
- addr = (const struct sockaddr *)&addr6_v4mapped;
- addr_len = sizeof(addr6_v4mapped);
+ addr = &addr6_v4mapped;
}
/* Treat :: or 0.0.0.0 as a family-agnostic wildcard. */
@@ -384,22 +386,19 @@ int grpc_udp_server_add_port(grpc_udp_server *s, const void *addr,
grpc_sockaddr_make_wildcards(port, &wild4, &wild6);
/* Try listening on IPv6 first. */
- addr = (struct sockaddr *)&wild6;
- addr_len = sizeof(wild6);
+ addr = &wild6;
// TODO(rjshade): Test and propagate the returned grpc_error*:
grpc_create_dualstack_socket(addr, SOCK_DGRAM, IPPROTO_UDP, &dsmode, &fd);
- allocated_port1 =
- add_socket_to_server(s, fd, addr, addr_len, read_cb, orphan_cb);
+ allocated_port1 = add_socket_to_server(s, fd, addr, read_cb, orphan_cb);
if (fd >= 0 && dsmode == GRPC_DSMODE_DUALSTACK) {
goto done;
}
/* If we didn't get a dualstack socket, also listen on 0.0.0.0. */
if (port == 0 && allocated_port1 > 0) {
- grpc_sockaddr_set_port((struct sockaddr *)&wild4, allocated_port1);
+ grpc_sockaddr_set_port(&wild4, allocated_port1);
}
- addr = (struct sockaddr *)&wild4;
- addr_len = sizeof(wild4);
+ addr = &wild4;
}
// TODO(rjshade): Test and propagate the returned grpc_error*:
@@ -409,11 +408,9 @@ int grpc_udp_server_add_port(grpc_udp_server *s, const void *addr,
}
if (dsmode == GRPC_DSMODE_IPV4 &&
grpc_sockaddr_is_v4mapped(addr, &addr4_copy)) {
- addr = (struct sockaddr *)&addr4_copy;
- addr_len = sizeof(addr4_copy);
+ addr = &addr4_copy;
}
- allocated_port2 =
- add_socket_to_server(s, fd, addr, addr_len, read_cb, orphan_cb);
+ allocated_port2 = add_socket_to_server(s, fd, addr, read_cb, orphan_cb);
done:
gpr_free(allocated_addr);
@@ -421,27 +418,40 @@ done:
}
int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned port_index) {
- return (port_index < s->nports) ? s->ports[port_index].fd : -1;
+ grpc_udp_listener *sp;
+ if (port_index >= s->nports) {
+ return -1;
+ }
+
+ for (sp = s->head; sp && port_index != 0; sp = sp->next) {
+ --port_index;
+ }
+ return sp->fd;
}
void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
grpc_pollset **pollsets, size_t pollset_count,
grpc_server *server) {
- size_t i, j;
+ size_t i;
gpr_mu_lock(&s->mu);
+ grpc_udp_listener *sp;
GPR_ASSERT(s->active_ports == 0);
s->pollsets = pollsets;
s->grpc_server = server;
- for (i = 0; i < s->nports; i++) {
- for (j = 0; j < pollset_count; j++) {
- grpc_pollset_add_fd(exec_ctx, pollsets[j], s->ports[i].emfd);
+
+ sp = s->head;
+ while (sp != NULL) {
+ for (i = 0; i < pollset_count; i++) {
+ grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd);
}
- s->ports[i].read_closure.cb = on_read;
- s->ports[i].read_closure.cb_arg = &s->ports[i];
- grpc_fd_notify_on_read(exec_ctx, s->ports[i].emfd,
- &s->ports[i].read_closure);
+ sp->read_closure.cb = on_read;
+ sp->read_closure.cb_arg = sp;
+ grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
+
s->active_ports++;
+ sp = sp->next;
}
+
gpr_mu_unlock(&s->mu);
}
diff --git a/src/core/lib/iomgr/udp_server.h b/src/core/lib/iomgr/udp_server.h
index 33c5ce11cd..f3c466a031 100644
--- a/src/core/lib/iomgr/udp_server.h
+++ b/src/core/lib/iomgr/udp_server.h
@@ -36,6 +36,7 @@
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/ev_posix.h"
+#include "src/core/lib/iomgr/resolve_address.h"
/* Forward decl of struct grpc_server */
/* This is not typedef'ed to avoid a typedef-redefinition error */
@@ -59,7 +60,7 @@ void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *udp_server,
grpc_pollset **pollsets, size_t pollset_count,
struct grpc_server *server);
-int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned index);
+int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned port_index);
/* Add a port to the server, returning port number on success, or negative
on failure.
@@ -71,8 +72,9 @@ int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned index);
/* TODO(ctiller): deprecate this, and make grpc_udp_server_add_ports to handle
all of the multiple socket port matching logic in one place */
-int grpc_udp_server_add_port(grpc_udp_server *s, const void *addr,
- size_t addr_len, grpc_udp_server_read_cb read_cb,
+int grpc_udp_server_add_port(grpc_udp_server *s,
+ const grpc_resolved_address *addr,
+ grpc_udp_server_read_cb read_cb,
grpc_udp_server_orphan_cb orphan_cb);
void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *server,
diff --git a/src/core/lib/iomgr/unix_sockets_posix.c b/src/core/lib/iomgr/unix_sockets_posix.c
index 0e7670e5a5..030acd9811 100644
--- a/src/core/lib/iomgr/unix_sockets_posix.c
+++ b/src/core/lib/iomgr/unix_sockets_posix.c
@@ -30,16 +30,19 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
+#include "src/core/lib/iomgr/port.h"
-#include "src/core/lib/iomgr/unix_sockets_posix.h"
+#ifdef GRPC_HAVE_UNIX_SOCKET
-#ifdef GPR_HAVE_UNIX_SOCKET
+#include "src/core/lib/iomgr/sockaddr.h"
#include <string.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/un.h>
+#include "src/core/lib/iomgr/unix_sockets_posix.h"
+
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
@@ -61,15 +64,18 @@ grpc_error *grpc_resolve_unix_domain_address(const char *name,
return GRPC_ERROR_NONE;
}
-int grpc_is_unix_socket(const struct sockaddr *addr) {
+int grpc_is_unix_socket(const grpc_resolved_address *resolved_addr) {
+ const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr;
return addr->sa_family == AF_UNIX;
}
-void grpc_unlink_if_unix_domain_socket(const struct sockaddr *addr) {
+void grpc_unlink_if_unix_domain_socket(
+ const grpc_resolved_address *resolved_addr) {
+ const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr;
if (addr->sa_family != AF_UNIX) {
return;
}
- struct sockaddr_un *un = (struct sockaddr_un *)addr;
+ struct sockaddr_un *un = (struct sockaddr_un *)resolved_addr->addr;
struct stat st;
if (stat(un->sun_path, &st) == 0 && (st.st_mode & S_IFMT) == S_IFSOCK) {
@@ -77,7 +83,9 @@ void grpc_unlink_if_unix_domain_socket(const struct sockaddr *addr) {
}
}
-char *grpc_sockaddr_to_uri_unix_if_possible(const struct sockaddr *addr) {
+char *grpc_sockaddr_to_uri_unix_if_possible(
+ const grpc_resolved_address *resolved_addr) {
+ const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr;
if (addr->sa_family != AF_UNIX) {
return NULL;
}
diff --git a/src/core/lib/iomgr/unix_sockets_posix.h b/src/core/lib/iomgr/unix_sockets_posix.h
index db0516d945..21afd3aa15 100644
--- a/src/core/lib/iomgr/unix_sockets_posix.h
+++ b/src/core/lib/iomgr/unix_sockets_posix.h
@@ -34,22 +34,23 @@
#ifndef GRPC_CORE_LIB_IOMGR_UNIX_SOCKETS_POSIX_H
#define GRPC_CORE_LIB_IOMGR_UNIX_SOCKETS_POSIX_H
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
#include <grpc/support/string_util.h>
#include "src/core/lib/iomgr/resolve_address.h"
-#include "src/core/lib/iomgr/sockaddr.h"
void grpc_create_socketpair_if_unix(int sv[2]);
grpc_error *grpc_resolve_unix_domain_address(
const char *name, grpc_resolved_addresses **addresses);
-int grpc_is_unix_socket(const struct sockaddr *addr);
+int grpc_is_unix_socket(const grpc_resolved_address *resolved_addr);
-void grpc_unlink_if_unix_domain_socket(const struct sockaddr *addr);
+void grpc_unlink_if_unix_domain_socket(
+ const grpc_resolved_address *resolved_addr);
-char *grpc_sockaddr_to_uri_unix_if_possible(const struct sockaddr *addr);
+char *grpc_sockaddr_to_uri_unix_if_possible(
+ const grpc_resolved_address *resolved_addr);
#endif /* GRPC_CORE_LIB_IOMGR_UNIX_SOCKETS_POSIX_H */
diff --git a/src/core/lib/iomgr/unix_sockets_posix_noop.c b/src/core/lib/iomgr/unix_sockets_posix_noop.c
index 56b47c3daf..1daf5152c1 100644
--- a/src/core/lib/iomgr/unix_sockets_posix_noop.c
+++ b/src/core/lib/iomgr/unix_sockets_posix_noop.c
@@ -33,7 +33,7 @@
#include "src/core/lib/iomgr/unix_sockets_posix.h"
-#ifndef GPR_HAVE_UNIX_SOCKET
+#ifndef GRPC_HAVE_UNIX_SOCKET
#include <grpc/support/log.h>
@@ -50,11 +50,11 @@ grpc_error *grpc_resolve_unix_domain_address(
return GRPC_ERROR_CREATE("Unix domain sockets are not supported on Windows");
}
-int grpc_is_unix_socket(const struct sockaddr *addr) { return false; }
+int grpc_is_unix_socket(const grpc_resolved_address *addr) { return false; }
-void grpc_unlink_if_unix_domain_socket(const struct sockaddr *addr) {}
+void grpc_unlink_if_unix_domain_socket(const grpc_resolved_address *addr) {}
-char *grpc_sockaddr_to_uri_unix_if_possible(const struct sockaddr *addr) {
+char *grpc_sockaddr_to_uri_unix_if_possible(const grpc_resolved_address *addr) {
return NULL;
}
diff --git a/src/core/lib/iomgr/wakeup_fd_cv.c b/src/core/lib/iomgr/wakeup_fd_cv.c
new file mode 100644
index 0000000000..da4c2870cd
--- /dev/null
+++ b/src/core/lib/iomgr/wakeup_fd_cv.c
@@ -0,0 +1,118 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/iomgr/port.h"
+
+#ifdef GRPC_POSIX_WAKEUP_FD
+
+#include "src/core/lib/iomgr/wakeup_fd_cv.h"
+
+#include <errno.h>
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/sync.h>
+#include <grpc/support/thd.h>
+#include <grpc/support/time.h>
+#include <grpc/support/useful.h>
+
+#define MAX_TABLE_RESIZE 256
+
+extern cv_fd_table g_cvfds;
+
+static grpc_error* cv_fd_init(grpc_wakeup_fd* fd_info) {
+ unsigned int i, newsize;
+ int idx;
+ gpr_mu_lock(&g_cvfds.mu);
+ if (!g_cvfds.free_fds) {
+ newsize = GPR_MIN(g_cvfds.size * 2, g_cvfds.size + MAX_TABLE_RESIZE);
+ g_cvfds.cvfds = gpr_realloc(g_cvfds.cvfds, sizeof(fd_node) * newsize);
+ for (i = g_cvfds.size; i < newsize; i++) {
+ g_cvfds.cvfds[i].is_set = 0;
+ g_cvfds.cvfds[i].cvs = NULL;
+ g_cvfds.cvfds[i].next_free = g_cvfds.free_fds;
+ g_cvfds.free_fds = &g_cvfds.cvfds[i];
+ }
+ g_cvfds.size = newsize;
+ }
+
+ idx = (int)(g_cvfds.free_fds - g_cvfds.cvfds);
+ g_cvfds.free_fds = g_cvfds.free_fds->next_free;
+ g_cvfds.cvfds[idx].cvs = NULL;
+ g_cvfds.cvfds[idx].is_set = 0;
+ fd_info->read_fd = IDX_TO_FD(idx);
+ fd_info->write_fd = -1;
+ gpr_mu_unlock(&g_cvfds.mu);
+ return GRPC_ERROR_NONE;
+}
+
+static grpc_error* cv_fd_wakeup(grpc_wakeup_fd* fd_info) {
+ cv_node* cvn;
+ gpr_mu_lock(&g_cvfds.mu);
+ g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].is_set = 1;
+ cvn = g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].cvs;
+ while (cvn) {
+ gpr_cv_signal(cvn->cv);
+ cvn = cvn->next;
+ }
+ gpr_mu_unlock(&g_cvfds.mu);
+ return GRPC_ERROR_NONE;
+}
+
+static grpc_error* cv_fd_consume(grpc_wakeup_fd* fd_info) {
+ gpr_mu_lock(&g_cvfds.mu);
+ g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].is_set = 0;
+ gpr_mu_unlock(&g_cvfds.mu);
+ return GRPC_ERROR_NONE;
+}
+
+static void cv_fd_destroy(grpc_wakeup_fd* fd_info) {
+ if (fd_info->read_fd == 0) {
+ return;
+ }
+ gpr_mu_lock(&g_cvfds.mu);
+ // Assert that there are no active pollers
+ GPR_ASSERT(!g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].cvs);
+ g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].next_free = g_cvfds.free_fds;
+ g_cvfds.free_fds = &g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)];
+ gpr_mu_unlock(&g_cvfds.mu);
+}
+
+static int cv_check_availability(void) { return 1; }
+
+const grpc_wakeup_fd_vtable grpc_cv_wakeup_fd_vtable = {
+ cv_fd_init, cv_fd_consume, cv_fd_wakeup, cv_fd_destroy,
+ cv_check_availability};
+
+#endif /* GRPC_POSIX_WAKUP_FD */
diff --git a/src/core/lib/iomgr/wakeup_fd_cv.h b/src/core/lib/iomgr/wakeup_fd_cv.h
new file mode 100644
index 0000000000..ac16be1750
--- /dev/null
+++ b/src/core/lib/iomgr/wakeup_fd_cv.h
@@ -0,0 +1,80 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * wakeup_fd_cv uses condition variables to implement wakeup fds.
+ *
+ * It is intended for use only in cases when eventfd() and pipe() are not
+ * available. It can only be used with the "poll" engine.
+ *
+ * Implementation:
+ * A global table of cv wakeup fds is mantained. A cv wakeup fd is a negative
+ * file descriptor. poll() is then run in a background thread with only the
+ * real socket fds while we wait on a condition variable trigged by either the
+ * poll() completion or a wakeup_fd() call.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_WAKEUP_FD_CV_H
+#define GRPC_CORE_LIB_IOMGR_WAKEUP_FD_CV_H
+
+#include <grpc/support/sync.h>
+
+#include "src/core/lib/iomgr/ev_posix.h"
+
+#define FD_TO_IDX(fd) (-(fd)-1)
+#define IDX_TO_FD(idx) (-(idx)-1)
+
+typedef struct cv_node {
+ gpr_cv* cv;
+ struct cv_node* next;
+} cv_node;
+
+typedef struct fd_node {
+ int is_set;
+ cv_node* cvs;
+ struct fd_node* next_free;
+} fd_node;
+
+typedef struct cv_fd_table {
+ gpr_mu mu;
+ int pollcount;
+ int shutdown;
+ gpr_cv shutdown_complete;
+ fd_node* cvfds;
+ fd_node* free_fds;
+ unsigned int size;
+ grpc_poll_function_type poll;
+} cv_fd_table;
+
+#endif /* GRPC_CORE_LIB_IOMGR_WAKEUP_FD_CV_H */
diff --git a/src/core/lib/iomgr/wakeup_fd_eventfd.c b/src/core/lib/iomgr/wakeup_fd_eventfd.c
index 95f6102330..373e21d3e1 100644
--- a/src/core/lib/iomgr/wakeup_fd_eventfd.c
+++ b/src/core/lib/iomgr/wakeup_fd_eventfd.c
@@ -31,9 +31,9 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_LINUX_EVENTFD
+#ifdef GRPC_LINUX_EVENTFD
#include <errno.h>
#include <sys/eventfd.h>
@@ -94,4 +94,4 @@ const grpc_wakeup_fd_vtable grpc_specialized_wakeup_fd_vtable = {
eventfd_create, eventfd_consume, eventfd_wakeup, eventfd_destroy,
eventfd_check_availability};
-#endif /* GPR_LINUX_EVENTFD */
+#endif /* GRPC_LINUX_EVENTFD */
diff --git a/src/core/lib/iomgr/wakeup_fd_nospecial.c b/src/core/lib/iomgr/wakeup_fd_nospecial.c
index cb2f707dc5..611bced029 100644
--- a/src/core/lib/iomgr/wakeup_fd_nospecial.c
+++ b/src/core/lib/iomgr/wakeup_fd_nospecial.c
@@ -36,9 +36,9 @@
* systems without anything better than pipe.
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_POSIX_NO_SPECIAL_WAKEUP_FD
+#ifdef GRPC_POSIX_NO_SPECIAL_WAKEUP_FD
#include <stddef.h>
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
@@ -48,4 +48,4 @@ static int check_availability_invalid(void) { return 0; }
const grpc_wakeup_fd_vtable grpc_specialized_wakeup_fd_vtable = {
NULL, NULL, NULL, NULL, check_availability_invalid};
-#endif /* GPR_POSIX_NO_SPECIAL_WAKEUP_FD */
+#endif /* GRPC_POSIX_NO_SPECIAL_WAKEUP_FD */
diff --git a/src/core/lib/iomgr/wakeup_fd_pipe.c b/src/core/lib/iomgr/wakeup_fd_pipe.c
index 4e5dbdcb73..183f0eb930 100644
--- a/src/core/lib/iomgr/wakeup_fd_pipe.c
+++ b/src/core/lib/iomgr/wakeup_fd_pipe.c
@@ -31,9 +31,9 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_POSIX_WAKEUP_FD
+#ifdef GRPC_POSIX_WAKEUP_FD
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
@@ -47,11 +47,10 @@
static grpc_error* pipe_init(grpc_wakeup_fd* fd_info) {
int pipefd[2];
- /* TODO(klempner): Make this nonfatal */
int r = pipe(pipefd);
if (0 != r) {
gpr_log(GPR_ERROR, "pipe creation failed (%d): %s", errno, strerror(errno));
- abort();
+ return GRPC_OS_ERROR(errno, "pipe");
}
grpc_error* err;
err = grpc_set_socket_nonblocking(pipefd[0], 1);
@@ -95,8 +94,13 @@ static void pipe_destroy(grpc_wakeup_fd* fd_info) {
}
static int pipe_check_availability(void) {
- /* Assume that pipes are always available. */
- return 1;
+ grpc_wakeup_fd fd;
+ if (pipe_init(&fd) == GRPC_ERROR_NONE) {
+ pipe_destroy(&fd);
+ return 1;
+ } else {
+ return 0;
+ }
}
const grpc_wakeup_fd_vtable grpc_pipe_wakeup_fd_vtable = {
diff --git a/src/core/lib/iomgr/wakeup_fd_posix.c b/src/core/lib/iomgr/wakeup_fd_posix.c
index 046208abc8..85526402bd 100644
--- a/src/core/lib/iomgr/wakeup_fd_posix.c
+++ b/src/core/lib/iomgr/wakeup_fd_posix.c
@@ -31,42 +31,71 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_POSIX_WAKEUP_FD
+#ifdef GRPC_POSIX_WAKEUP_FD
#include <stddef.h>
+#include "src/core/lib/iomgr/wakeup_fd_cv.h"
#include "src/core/lib/iomgr/wakeup_fd_pipe.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
+extern grpc_wakeup_fd_vtable grpc_cv_wakeup_fd_vtable;
static const grpc_wakeup_fd_vtable *wakeup_fd_vtable = NULL;
+
int grpc_allow_specialized_wakeup_fd = 1;
+int grpc_allow_pipe_wakeup_fd = 1;
+
+int has_real_wakeup_fd = 1;
+int cv_wakeup_fds_enabled = 0;
void grpc_wakeup_fd_global_init(void) {
if (grpc_allow_specialized_wakeup_fd &&
grpc_specialized_wakeup_fd_vtable.check_availability()) {
wakeup_fd_vtable = &grpc_specialized_wakeup_fd_vtable;
- } else {
+ } else if (grpc_allow_pipe_wakeup_fd &&
+ grpc_pipe_wakeup_fd_vtable.check_availability()) {
wakeup_fd_vtable = &grpc_pipe_wakeup_fd_vtable;
+ } else {
+ has_real_wakeup_fd = 0;
}
}
void grpc_wakeup_fd_global_destroy(void) { wakeup_fd_vtable = NULL; }
+int grpc_has_wakeup_fd(void) { return has_real_wakeup_fd; }
+
+int grpc_cv_wakeup_fds_enabled(void) { return cv_wakeup_fds_enabled; }
+
+void grpc_enable_cv_wakeup_fds(int enable) { cv_wakeup_fds_enabled = enable; }
+
grpc_error *grpc_wakeup_fd_init(grpc_wakeup_fd *fd_info) {
+ if (cv_wakeup_fds_enabled) {
+ return grpc_cv_wakeup_fd_vtable.init(fd_info);
+ }
return wakeup_fd_vtable->init(fd_info);
}
grpc_error *grpc_wakeup_fd_consume_wakeup(grpc_wakeup_fd *fd_info) {
+ if (cv_wakeup_fds_enabled) {
+ return grpc_cv_wakeup_fd_vtable.consume(fd_info);
+ }
return wakeup_fd_vtable->consume(fd_info);
}
grpc_error *grpc_wakeup_fd_wakeup(grpc_wakeup_fd *fd_info) {
+ if (cv_wakeup_fds_enabled) {
+ return grpc_cv_wakeup_fd_vtable.wakeup(fd_info);
+ }
return wakeup_fd_vtable->wakeup(fd_info);
}
void grpc_wakeup_fd_destroy(grpc_wakeup_fd *fd_info) {
- wakeup_fd_vtable->destroy(fd_info);
+ if (cv_wakeup_fds_enabled) {
+ grpc_cv_wakeup_fd_vtable.destroy(fd_info);
+ } else {
+ wakeup_fd_vtable->destroy(fd_info);
+ }
}
-#endif /* GPR_POSIX_WAKEUP_FD */
+#endif /* GRPC_POSIX_WAKEUP_FD */
diff --git a/src/core/lib/iomgr/wakeup_fd_posix.h b/src/core/lib/iomgr/wakeup_fd_posix.h
index e269f242d8..71d32d97ba 100644
--- a/src/core/lib/iomgr/wakeup_fd_posix.h
+++ b/src/core/lib/iomgr/wakeup_fd_posix.h
@@ -71,6 +71,10 @@ void grpc_wakeup_fd_global_destroy(void);
* purposes only.*/
void grpc_wakeup_fd_global_init_force_fallback(void);
+int grpc_has_wakeup_fd(void);
+int grpc_cv_wakeup_fds_enabled(void);
+void grpc_enable_cv_wakeup_fds(int enable);
+
typedef struct grpc_wakeup_fd grpc_wakeup_fd;
typedef struct grpc_wakeup_fd_vtable {
@@ -88,6 +92,7 @@ struct grpc_wakeup_fd {
};
extern int grpc_allow_specialized_wakeup_fd;
+extern int grpc_allow_pipe_wakeup_fd;
#define GRPC_WAKEUP_FD_GET_READ_FD(fd_info) ((fd_info)->read_fd)
diff --git a/src/core/lib/iomgr/workqueue.h b/src/core/lib/iomgr/workqueue.h
index b2805dc66c..73d9849843 100644
--- a/src/core/lib/iomgr/workqueue.h
+++ b/src/core/lib/iomgr/workqueue.h
@@ -39,10 +39,7 @@
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/pollset_set.h"
-
-#ifdef GPR_POSIX_SOCKET
-#include "src/core/lib/iomgr/workqueue_posix.h"
-#endif
+#include "src/core/lib/iomgr/port.h"
#ifdef GPR_WINDOWS
#include "src/core/lib/iomgr/workqueue_windows.h"
@@ -58,20 +55,20 @@
string will be printed alongside the refcount. When it is not defined, the
string will be discarded at compilation time. */
-//#define GRPC_WORKQUEUE_REFCOUNT_DEBUG
+/*#define GRPC_WORKQUEUE_REFCOUNT_DEBUG*/
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
#define GRPC_WORKQUEUE_REF(p, r) \
- (grpc_workqueue_ref((p), __FILE__, __LINE__, (r)), (p))
+ grpc_workqueue_ref((p), __FILE__, __LINE__, (r))
#define GRPC_WORKQUEUE_UNREF(exec_ctx, p, r) \
grpc_workqueue_unref((exec_ctx), (p), __FILE__, __LINE__, (r))
-void grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file, int line,
- const char *reason);
+grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file,
+ int line, const char *reason);
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason);
#else
-#define GRPC_WORKQUEUE_REF(p, r) (grpc_workqueue_ref((p)), (p))
+#define GRPC_WORKQUEUE_REF(p, r) grpc_workqueue_ref((p))
#define GRPC_WORKQUEUE_UNREF(cl, p, r) grpc_workqueue_unref((cl), (p))
-void grpc_workqueue_ref(grpc_workqueue *workqueue);
+grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue);
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue);
#endif
diff --git a/src/core/lib/iomgr/workqueue_posix.c b/src/core/lib/iomgr/workqueue_posix.c
deleted file mode 100644
index ecfea68f56..0000000000
--- a/src/core/lib/iomgr/workqueue_posix.c
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <grpc/support/port_platform.h>
-
-#ifdef GPR_POSIX_SOCKET
-
-#include "src/core/lib/iomgr/workqueue.h"
-
-#include <stdio.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/useful.h>
-
-#include "src/core/lib/iomgr/ev_posix.h"
-#include "src/core/lib/profiling/timers.h"
-
-static void on_readable(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
-
-grpc_error *grpc_workqueue_create(grpc_exec_ctx *exec_ctx,
- grpc_workqueue **workqueue) {
- char name[32];
- *workqueue = gpr_malloc(sizeof(grpc_workqueue));
- gpr_ref_init(&(*workqueue)->refs, 1);
- gpr_atm_no_barrier_store(&(*workqueue)->state, 1);
- grpc_error *err = grpc_wakeup_fd_init(&(*workqueue)->wakeup_fd);
- if (err != GRPC_ERROR_NONE) {
- gpr_free(*workqueue);
- return err;
- }
- sprintf(name, "workqueue:%p", (void *)(*workqueue));
- (*workqueue)->wakeup_read_fd = grpc_fd_create(
- GRPC_WAKEUP_FD_GET_READ_FD(&(*workqueue)->wakeup_fd), name);
- gpr_mpscq_init(&(*workqueue)->queue);
- grpc_closure_init(&(*workqueue)->read_closure, on_readable, *workqueue);
- grpc_fd_notify_on_read(exec_ctx, (*workqueue)->wakeup_read_fd,
- &(*workqueue)->read_closure);
- return GRPC_ERROR_NONE;
-}
-
-static void workqueue_destroy(grpc_exec_ctx *exec_ctx,
- grpc_workqueue *workqueue) {
- grpc_fd_shutdown(exec_ctx, workqueue->wakeup_read_fd);
-}
-
-static void workqueue_orphan(grpc_exec_ctx *exec_ctx,
- grpc_workqueue *workqueue) {
- if (gpr_atm_full_fetch_add(&workqueue->state, -1) == 1) {
- workqueue_destroy(exec_ctx, workqueue);
- }
-}
-
-#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
-void grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file, int line,
- const char *reason) {
- if (workqueue == NULL) return;
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "WORKQUEUE:%p ref %d -> %d %s",
- workqueue, (int)workqueue->refs.count, (int)workqueue->refs.count + 1,
- reason);
- gpr_ref(&workqueue->refs);
-}
-#else
-void grpc_workqueue_ref(grpc_workqueue *workqueue) {
- if (workqueue == NULL) return;
- gpr_ref(&workqueue->refs);
-}
-#endif
-
-#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
-void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
- const char *file, int line, const char *reason) {
- if (workqueue == NULL) return;
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "WORKQUEUE:%p unref %d -> %d %s",
- workqueue, (int)workqueue->refs.count, (int)workqueue->refs.count - 1,
- reason);
- if (gpr_unref(&workqueue->refs)) {
- workqueue_orphan(exec_ctx, workqueue);
- }
-}
-#else
-void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {
- if (workqueue == NULL) return;
- if (gpr_unref(&workqueue->refs)) {
- workqueue_orphan(exec_ctx, workqueue);
- }
-}
-#endif
-
-static void drain(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {
- abort();
-}
-
-static void wakeup(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {
- GPR_TIMER_MARK("workqueue.wakeup", 0);
- grpc_error *err = grpc_wakeup_fd_wakeup(&workqueue->wakeup_fd);
- if (!GRPC_LOG_IF_ERROR("wakeupfd_wakeup", err)) {
- drain(exec_ctx, workqueue);
- }
-}
-
-static void on_readable(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- GPR_TIMER_BEGIN("workqueue.on_readable", 0);
-
- grpc_workqueue *workqueue = arg;
-
- if (error != GRPC_ERROR_NONE) {
- /* HACK: let wakeup_fd code know that we stole the fd */
- workqueue->wakeup_fd.read_fd = 0;
- grpc_wakeup_fd_destroy(&workqueue->wakeup_fd);
- grpc_fd_orphan(exec_ctx, workqueue->wakeup_read_fd, NULL, NULL, "destroy");
- GPR_ASSERT(gpr_atm_no_barrier_load(&workqueue->state) == 0);
- gpr_free(workqueue);
- } else {
- error = grpc_wakeup_fd_consume_wakeup(&workqueue->wakeup_fd);
- gpr_mpscq_node *n = gpr_mpscq_pop(&workqueue->queue);
- if (error == GRPC_ERROR_NONE) {
- grpc_fd_notify_on_read(exec_ctx, workqueue->wakeup_read_fd,
- &workqueue->read_closure);
- } else {
- /* recurse to get error handling */
- on_readable(exec_ctx, arg, error);
- }
- if (n == NULL) {
- /* try again - queue in an inconsistant state */
- wakeup(exec_ctx, workqueue);
- } else {
- switch (gpr_atm_full_fetch_add(&workqueue->state, -2)) {
- case 3: // had one count, one unorphaned --> done, unorphaned
- break;
- case 2: // had one count, one orphaned --> done, orphaned
- workqueue_destroy(exec_ctx, workqueue);
- break;
- case 1:
- case 0:
- // these values are illegal - representing an already done or
- // deleted workqueue
- GPR_UNREACHABLE_CODE(break);
- default:
- // schedule a wakeup since there's more to do
- wakeup(exec_ctx, workqueue);
- }
- grpc_closure *cl = (grpc_closure *)n;
- grpc_error *clerr = cl->error;
- cl->cb(exec_ctx, cl->cb_arg, clerr);
- GRPC_ERROR_UNREF(clerr);
- }
- }
-
- GPR_TIMER_END("workqueue.on_readable", 0);
-}
-
-void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
- grpc_closure *closure, grpc_error *error) {
- GPR_TIMER_BEGIN("workqueue.enqueue", 0);
- gpr_atm last = gpr_atm_full_fetch_add(&workqueue->state, 2);
- GPR_ASSERT(last & 1);
- closure->error = error;
- gpr_mpscq_push(&workqueue->queue, &closure->next_data.atm_next);
- if (last == 1) {
- wakeup(exec_ctx, workqueue);
- }
- GPR_TIMER_END("workqueue.enqueue", 0);
-}
-
-#endif /* GPR_POSIX_SOCKET */
diff --git a/src/core/lib/iomgr/workqueue_uv.c b/src/core/lib/iomgr/workqueue_uv.c
new file mode 100644
index 0000000000..e58ca476cc
--- /dev/null
+++ b/src/core/lib/iomgr/workqueue_uv.c
@@ -0,0 +1,66 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/iomgr/port.h"
+
+#ifdef GRPC_UV
+
+#include "src/core/lib/iomgr/workqueue.h"
+
+// Minimal implementation of grpc_workqueue for libuv
+// Works by directly enqueuing workqueue items onto the current execution
+// context, which is at least correct, if not performant or in the spirit of
+// workqueues.
+
+void grpc_workqueue_flush(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {}
+
+#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
+grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file,
+ int line, const char *reason) {
+ return workqueue;
+}
+void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
+ const char *file, int line, const char *reason) {}
+#else
+grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue) {
+ return workqueue;
+}
+void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {}
+#endif
+
+void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
+ grpc_closure *closure, grpc_error *error) {
+ grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
+}
+
+#endif /* GPR_UV */
diff --git a/src/core/lib/iomgr/workqueue_uv.h b/src/core/lib/iomgr/workqueue_uv.h
new file mode 100644
index 0000000000..be3f8e4d93
--- /dev/null
+++ b/src/core/lib/iomgr/workqueue_uv.h
@@ -0,0 +1,37 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_WORKQUEUE_UV_H
+#define GRPC_CORE_LIB_IOMGR_WORKQUEUE_UV_H
+
+#endif /* GRPC_CORE_LIB_IOMGR_WORKQUEUE_UV_H */
diff --git a/src/core/lib/iomgr/workqueue_windows.c b/src/core/lib/iomgr/workqueue_windows.c
index ee81dc248e..5c93d3c59e 100644
--- a/src/core/lib/iomgr/workqueue_windows.c
+++ b/src/core/lib/iomgr/workqueue_windows.c
@@ -43,12 +43,16 @@
// workqueues.
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
-void grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file, int line,
- const char *reason) {}
+grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file,
+ int line, const char *reason) {
+ return workqueue;
+}
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason) {}
#else
-void grpc_workqueue_ref(grpc_workqueue *workqueue) {}
+grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue) {
+ return workqueue;
+}
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {}
#endif
diff --git a/src/core/lib/profiling/basic_timers.c b/src/core/lib/profiling/basic_timers.c
index 51813d0461..bdf9af2339 100644
--- a/src/core/lib/profiling/basic_timers.c
+++ b/src/core/lib/profiling/basic_timers.c
@@ -83,6 +83,7 @@ static int g_shutdown;
static gpr_thd_id g_writing_thread;
static __thread int g_thread_id;
static int g_next_thread_id;
+static int g_writing_enabled = 1;
static int timer_log_push_back(gpr_timer_log_list *list, gpr_timer_log *log) {
if (list->head == NULL) {
@@ -177,7 +178,7 @@ static void flush_logs(gpr_timer_log_list *list) {
}
}
-static void finish_writing() {
+static void finish_writing(void) {
pthread_mutex_lock(&g_mu);
g_shutdown = 1;
pthread_cond_signal(&g_cv);
@@ -230,6 +231,10 @@ static void gpr_timers_log_add(const char *tagstr, marker_type type,
int important, const char *file, int line) {
gpr_timer_entry *entry;
+ if (!g_writing_enabled) {
+ return;
+ }
+
if (g_thread_log == NULL || g_thread_log->num_entries == MAX_COUNT) {
rotate_log();
}
@@ -261,6 +266,8 @@ void gpr_timer_end(const char *tagstr, int important, const char *file,
gpr_timers_log_add(tagstr, END, important, file, line);
}
+void gpr_timer_set_enabled(int enabled) { g_writing_enabled = enabled; }
+
/* Basic profiler specific API functions. */
void gpr_timers_global_init(void) {}
@@ -272,4 +279,6 @@ void gpr_timers_global_init(void) {}
void gpr_timers_global_destroy(void) {}
void gpr_timers_set_log_filename(const char *filename) {}
+
+void gpr_timer_set_enabled(int enabled) {}
#endif /* GRPC_BASIC_PROFILER */
diff --git a/src/core/lib/profiling/timers.h b/src/core/lib/profiling/timers.h
index c8567e8137..621cdbf656 100644
--- a/src/core/lib/profiling/timers.h
+++ b/src/core/lib/profiling/timers.h
@@ -50,6 +50,8 @@ void gpr_timer_end(const char *tagstr, int important, const char *file,
void gpr_timers_set_log_filename(const char *filename);
+void gpr_timer_set_enabled(int enabled);
+
#if !(defined(GRPC_STAP_PROFILER) + defined(GRPC_BASIC_PROFILER))
/* No profiling. No-op all the things. */
#define GPR_TIMER_MARK(tag, important) \
diff --git a/src/core/lib/security/credentials/composite/composite_credentials.c b/src/core/lib/security/credentials/composite/composite_credentials.c
index 850e41e646..d55d00b7b6 100644
--- a/src/core/lib/security/credentials/composite/composite_credentials.c
+++ b/src/core/lib/security/credentials/composite/composite_credentials.c
@@ -242,8 +242,17 @@ static grpc_security_status composite_channel_create_security_connector(
return status;
}
+static grpc_channel_credentials *
+composite_channel_duplicate_without_call_credentials(
+ grpc_channel_credentials *creds) {
+ grpc_composite_channel_credentials *c =
+ (grpc_composite_channel_credentials *)creds;
+ return grpc_channel_credentials_ref(c->inner_creds);
+}
+
static grpc_channel_credentials_vtable composite_channel_credentials_vtable = {
- composite_channel_destruct, composite_channel_create_security_connector};
+ composite_channel_destruct, composite_channel_create_security_connector,
+ composite_channel_duplicate_without_call_credentials};
grpc_channel_credentials *grpc_composite_channel_credentials_create(
grpc_channel_credentials *channel_creds, grpc_call_credentials *call_creds,
diff --git a/src/core/lib/security/credentials/composite/composite_credentials.h b/src/core/lib/security/credentials/composite/composite_credentials.h
index 0d8966f464..f8425c2b76 100644
--- a/src/core/lib/security/credentials/composite/composite_credentials.h
+++ b/src/core/lib/security/credentials/composite/composite_credentials.h
@@ -53,7 +53,7 @@ grpc_call_credentials *grpc_credentials_contains_type(
grpc_call_credentials *creds, const char *type,
grpc_call_credentials **composite_creds);
-/* -- Channel composite credentials. -- */
+/* -- Composite channel credentials. -- */
typedef struct {
grpc_channel_credentials base;
@@ -61,7 +61,7 @@ typedef struct {
grpc_call_credentials *call_creds;
} grpc_composite_channel_credentials;
-/* -- Composite credentials. -- */
+/* -- Composite call credentials. -- */
typedef struct {
grpc_call_credentials base;
diff --git a/src/core/lib/security/credentials/credentials.c b/src/core/lib/security/credentials/credentials.c
index 029a357261..1149e5c2ed 100644
--- a/src/core/lib/security/credentials/credentials.c
+++ b/src/core/lib/security/credentials/credentials.c
@@ -138,6 +138,18 @@ grpc_security_status grpc_channel_credentials_create_security_connector(
channel_creds, NULL, target, args, sc, new_args);
}
+grpc_channel_credentials *
+grpc_channel_credentials_duplicate_without_call_credentials(
+ grpc_channel_credentials *channel_creds) {
+ if (channel_creds != NULL && channel_creds->vtable != NULL &&
+ channel_creds->vtable->duplicate_without_call_credentials != NULL) {
+ return channel_creds->vtable->duplicate_without_call_credentials(
+ channel_creds);
+ } else {
+ return grpc_channel_credentials_ref(channel_creds);
+ }
+}
+
grpc_server_credentials *grpc_server_credentials_ref(
grpc_server_credentials *creds) {
if (creds == NULL) return NULL;
diff --git a/src/core/lib/security/credentials/credentials.h b/src/core/lib/security/credentials/credentials.h
index 8e9d842ead..6fb5b5b15a 100644
--- a/src/core/lib/security/credentials/credentials.h
+++ b/src/core/lib/security/credentials/credentials.h
@@ -107,6 +107,9 @@ typedef struct {
grpc_channel_credentials *c, grpc_call_credentials *call_creds,
const char *target, const grpc_channel_args *args,
grpc_channel_security_connector **sc, grpc_channel_args **new_args);
+
+ grpc_channel_credentials *(*duplicate_without_call_credentials)(
+ grpc_channel_credentials *c);
} grpc_channel_credentials_vtable;
struct grpc_channel_credentials {
@@ -128,6 +131,13 @@ grpc_security_status grpc_channel_credentials_create_security_connector(
const grpc_channel_args *args, grpc_channel_security_connector **sc,
grpc_channel_args **new_args);
+/* Creates a version of the channel credentials without any attached call
+ credentials. This can be used in order to open a channel to a non-trusted
+ gRPC load balancer. */
+grpc_channel_credentials *
+grpc_channel_credentials_duplicate_without_call_credentials(
+ grpc_channel_credentials *creds);
+
/* --- grpc_credentials_md. --- */
typedef struct {
diff --git a/src/core/lib/security/credentials/fake/fake_credentials.c b/src/core/lib/security/credentials/fake/fake_credentials.c
index 51cafd986f..ea4cb76fb9 100644
--- a/src/core/lib/security/credentials/fake/fake_credentials.c
+++ b/src/core/lib/security/credentials/fake/fake_credentials.c
@@ -61,7 +61,7 @@ fake_transport_security_server_create_security_connector(
static grpc_channel_credentials_vtable
fake_transport_security_credentials_vtable = {
- NULL, fake_transport_security_create_security_connector};
+ NULL, fake_transport_security_create_security_connector, NULL};
static grpc_server_credentials_vtable
fake_transport_security_server_credentials_vtable = {
diff --git a/src/core/lib/security/credentials/google_default/credentials_posix.c b/src/core/lib/security/credentials/google_default/credentials_generic.c
index 42c9d7f997..d13d8c5200 100644
--- a/src/core/lib/security/credentials/google_default/credentials_posix.c
+++ b/src/core/lib/security/credentials/google_default/credentials_generic.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,10 +31,6 @@
*
*/
-#include <grpc/support/port_platform.h>
-
-#ifdef GPR_POSIX_FILE
-
#include "src/core/lib/security/credentials/google_default/google_default_credentials.h"
#include <grpc/support/alloc.h>
@@ -46,16 +42,13 @@
char *grpc_get_well_known_google_credentials_file_path_impl(void) {
char *result = NULL;
- char *home = gpr_getenv("HOME");
- if (home == NULL) {
- gpr_log(GPR_ERROR, "Could not get HOME environment variable.");
+ char *base = gpr_getenv(GRPC_GOOGLE_CREDENTIALS_PATH_ENV_VAR);
+ if (base == NULL) {
+ gpr_log(GPR_ERROR, "Could not get " GRPC_GOOGLE_CREDENTIALS_ENV_VAR
+ " environment variable.");
return NULL;
}
- gpr_asprintf(&result, "%s/.config/%s/%s", home,
- GRPC_GOOGLE_CLOUD_SDK_CONFIG_DIRECTORY,
- GRPC_GOOGLE_WELL_KNOWN_CREDENTIALS_FILE);
- gpr_free(home);
+ gpr_asprintf(&result, "%s/%s", base, GRPC_GOOGLE_CREDENTIALS_PATH_SUFFIX);
+ gpr_free(base);
return result;
}
-
-#endif /* GPR_POSIX_FILE */
diff --git a/src/core/lib/security/credentials/google_default/google_default_credentials.c b/src/core/lib/security/credentials/google_default/google_default_credentials.c
index 312a3d4f90..cb5ba554b0 100644
--- a/src/core/lib/security/credentials/google_default/google_default_credentials.c
+++ b/src/core/lib/security/credentials/google_default/google_default_credentials.c
@@ -124,11 +124,14 @@ static int is_stack_running_on_compute_engine(void) {
grpc_httpcli_context_init(&context);
+ grpc_resource_quota *resource_quota =
+ grpc_resource_quota_create("google_default_credentials");
grpc_httpcli_get(
- &exec_ctx, &context, &detector.pollent, &request,
+ &exec_ctx, &context, &detector.pollent, resource_quota, &request,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), max_detection_delay),
grpc_closure_create(on_compute_engine_detection_http_response, &detector),
&detector.response);
+ grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
grpc_exec_ctx_flush(&exec_ctx);
diff --git a/src/core/lib/security/credentials/google_default/google_default_credentials.h b/src/core/lib/security/credentials/google_default/google_default_credentials.h
index fac4377e2c..b55546ded0 100644
--- a/src/core/lib/security/credentials/google_default/google_default_credentials.h
+++ b/src/core/lib/security/credentials/google_default/google_default_credentials.h
@@ -34,12 +34,26 @@
#ifndef GRPC_CORE_LIB_SECURITY_CREDENTIALS_GOOGLE_DEFAULT_GOOGLE_DEFAULT_CREDENTIALS_H
#define GRPC_CORE_LIB_SECURITY_CREDENTIALS_GOOGLE_DEFAULT_GOOGLE_DEFAULT_CREDENTIALS_H
+#include <grpc/support/port_platform.h>
+
#include "src/core/lib/security/credentials/credentials.h"
#define GRPC_GOOGLE_CLOUD_SDK_CONFIG_DIRECTORY "gcloud"
#define GRPC_GOOGLE_WELL_KNOWN_CREDENTIALS_FILE \
"application_default_credentials.json"
+#ifdef GPR_WINDOWS
+#define GRPC_GOOGLE_CREDENTIALS_PATH_ENV_VAR "APPDATA"
+#define GRPC_GOOGLE_CREDENTIALS_PATH_SUFFIX \
+ GRPC_GOOGLE_CLOUD_SDK_CONFIG_DIRECTORY \
+ "/" GRPC_GOOGLE_WELL_KNOWN_CREDENTIALS_FILE
+#else
+#define GRPC_GOOGLE_CREDENTIALS_PATH_ENV_VAR "HOME"
+#define GRPC_GOOGLE_CREDENTIALS_PATH_SUFFIX \
+ ".config/" GRPC_GOOGLE_CLOUD_SDK_CONFIG_DIRECTORY \
+ "/" GRPC_GOOGLE_WELL_KNOWN_CREDENTIALS_FILE
+#endif
+
void grpc_flush_cached_google_default_credentials(void);
#endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_GOOGLE_DEFAULT_GOOGLE_DEFAULT_CREDENTIALS_H \
diff --git a/src/core/lib/security/credentials/jwt/jwt_verifier.c b/src/core/lib/security/credentials/jwt/jwt_verifier.c
index 73eb2e3258..43eb642515 100644
--- a/src/core/lib/security/credentials/jwt/jwt_verifier.c
+++ b/src/core/lib/security/credentials/jwt/jwt_verifier.c
@@ -657,11 +657,17 @@ static void on_openid_config_retrieved(grpc_exec_ctx *exec_ctx, void *user_data,
*(req.host + (req.http.path - jwks_uri)) = '\0';
}
+ /* TODO(ctiller): Carry the resource_quota in ctx and share it with the host
+ channel. This would allow us to cancel an authentication query when under
+ extreme memory pressure. */
+ grpc_resource_quota *resource_quota =
+ grpc_resource_quota_create("jwt_verifier");
grpc_httpcli_get(
- exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, &req,
+ exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay),
grpc_closure_create(on_keys_retrieved, ctx),
&ctx->responses[HTTP_RESPONSE_KEYS]);
+ grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
grpc_json_destroy(json);
gpr_free(req.host);
return;
@@ -764,10 +770,16 @@ static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx,
rsp_idx = HTTP_RESPONSE_OPENID;
}
+ /* TODO(ctiller): Carry the resource_quota in ctx and share it with the host
+ channel. This would allow us to cancel an authentication query when under
+ extreme memory pressure. */
+ grpc_resource_quota *resource_quota =
+ grpc_resource_quota_create("jwt_verifier");
grpc_httpcli_get(
- exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, &req,
+ exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay),
http_cb, &ctx->responses[rsp_idx]);
+ grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
gpr_free(req.host);
gpr_free(req.http.path);
return;
diff --git a/src/core/lib/security/credentials/oauth2/oauth2_credentials.c b/src/core/lib/security/credentials/oauth2/oauth2_credentials.c
index c22ea5c468..d980577c46 100644
--- a/src/core/lib/security/credentials/oauth2/oauth2_credentials.c
+++ b/src/core/lib/security/credentials/oauth2/oauth2_credentials.c
@@ -307,9 +307,15 @@ static void compute_engine_fetch_oauth2(
request.http.path = GRPC_COMPUTE_ENGINE_METADATA_TOKEN_PATH;
request.http.hdr_count = 1;
request.http.hdrs = &header;
- grpc_httpcli_get(exec_ctx, httpcli_context, pollent, &request, deadline,
- grpc_closure_create(response_cb, metadata_req),
+ /* TODO(ctiller): Carry the resource_quota in ctx and share it with the host
+ channel. This would allow us to cancel an authentication query when under
+ extreme memory pressure. */
+ grpc_resource_quota *resource_quota =
+ grpc_resource_quota_create("oauth2_credentials");
+ grpc_httpcli_get(exec_ctx, httpcli_context, pollent, resource_quota, &request,
+ deadline, grpc_closure_create(response_cb, metadata_req),
&metadata_req->response);
+ grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
}
grpc_call_credentials *grpc_google_compute_engine_credentials_create(
@@ -357,10 +363,16 @@ static void refresh_token_fetch_oauth2(
request.http.hdr_count = 1;
request.http.hdrs = &header;
request.handshaker = &grpc_httpcli_ssl;
- grpc_httpcli_post(exec_ctx, httpcli_context, pollent, &request, body,
- strlen(body), deadline,
+ /* TODO(ctiller): Carry the resource_quota in ctx and share it with the host
+ channel. This would allow us to cancel an authentication query when under
+ extreme memory pressure. */
+ grpc_resource_quota *resource_quota =
+ grpc_resource_quota_create("oauth2_credentials_refresh");
+ grpc_httpcli_post(exec_ctx, httpcli_context, pollent, resource_quota,
+ &request, body, strlen(body), deadline,
grpc_closure_create(response_cb, metadata_req),
&metadata_req->response);
+ grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
gpr_free(body);
}
diff --git a/src/core/lib/security/credentials/ssl/ssl_credentials.c b/src/core/lib/security/credentials/ssl/ssl_credentials.c
index 545bca9d98..0dc1fccec4 100644
--- a/src/core/lib/security/credentials/ssl/ssl_credentials.c
+++ b/src/core/lib/security/credentials/ssl/ssl_credentials.c
@@ -95,7 +95,7 @@ static grpc_security_status ssl_create_security_connector(
}
static grpc_channel_credentials_vtable ssl_vtable = {
- ssl_destruct, ssl_create_security_connector};
+ ssl_destruct, ssl_create_security_connector, NULL};
static void ssl_build_config(const char *pem_root_certs,
grpc_ssl_pem_key_cert_pair *pem_key_cert_pair,
diff --git a/src/core/lib/security/transport/secure_endpoint.c b/src/core/lib/security/transport/secure_endpoint.c
index acb0113ea8..3924997d31 100644
--- a/src/core/lib/security/transport/secure_endpoint.c
+++ b/src/core/lib/security/transport/secure_endpoint.c
@@ -370,6 +370,12 @@ static grpc_workqueue *endpoint_get_workqueue(grpc_endpoint *secure_ep) {
return grpc_endpoint_get_workqueue(ep->wrapped_ep);
}
+static grpc_resource_user *endpoint_get_resource_user(
+ grpc_endpoint *secure_ep) {
+ secure_endpoint *ep = (secure_endpoint *)secure_ep;
+ return grpc_endpoint_get_resource_user(ep->wrapped_ep);
+}
+
static const grpc_endpoint_vtable vtable = {endpoint_read,
endpoint_write,
endpoint_get_workqueue,
@@ -377,6 +383,7 @@ static const grpc_endpoint_vtable vtable = {endpoint_read,
endpoint_add_to_pollset_set,
endpoint_shutdown,
endpoint_destroy,
+ endpoint_get_resource_user,
endpoint_get_peer};
grpc_endpoint *grpc_secure_endpoint_create(
diff --git a/src/core/lib/security/transport/security_connector.c b/src/core/lib/security/transport/security_connector.c
index 0eca46eb52..ebf72a3abb 100644
--- a/src/core/lib/security/transport/security_connector.c
+++ b/src/core/lib/security/transport/security_connector.c
@@ -210,11 +210,11 @@ void grpc_security_connector_unref(grpc_security_connector *sc) {
}
static void connector_pointer_arg_destroy(void *p) {
- GRPC_SECURITY_CONNECTOR_UNREF(p, "connector_pointer_arg");
+ GRPC_SECURITY_CONNECTOR_UNREF(p, "connector_pointer_arg_destroy");
}
static void *connector_pointer_arg_copy(void *p) {
- return GRPC_SECURITY_CONNECTOR_REF(p, "connector_pointer_arg");
+ return GRPC_SECURITY_CONNECTOR_REF(p, "connector_pointer_arg_copy");
}
static int connector_pointer_cmp(void *a, void *b) { return GPR_ICMP(a, b); }
diff --git a/src/core/lib/support/log.c b/src/core/lib/support/log.c
index 899f1218b6..af1651dae5 100644
--- a/src/core/lib/support/log.c
+++ b/src/core/lib/support/log.c
@@ -60,8 +60,9 @@ const char *gpr_log_severity_string(gpr_log_severity severity) {
void gpr_log_message(const char *file, int line, gpr_log_severity severity,
const char *message) {
- if ((gpr_atm)severity < gpr_atm_no_barrier_load(&g_min_severity_to_print))
+ if ((gpr_atm)severity < gpr_atm_no_barrier_load(&g_min_severity_to_print)) {
return;
+ }
gpr_log_func_args lfargs;
memset(&lfargs, 0, sizeof(lfargs));
@@ -82,11 +83,11 @@ void gpr_log_verbosity_init() {
gpr_atm min_severity_to_print = GPR_LOG_SEVERITY_ERROR;
if (verbosity != NULL) {
- if (strcmp(verbosity, "DEBUG") == 0) {
+ if (gpr_stricmp(verbosity, "DEBUG") == 0) {
min_severity_to_print = (gpr_atm)GPR_LOG_SEVERITY_DEBUG;
- } else if (strcmp(verbosity, "INFO") == 0) {
+ } else if (gpr_stricmp(verbosity, "INFO") == 0) {
min_severity_to_print = (gpr_atm)GPR_LOG_SEVERITY_INFO;
- } else if (strcmp(verbosity, "ERROR") == 0) {
+ } else if (gpr_stricmp(verbosity, "ERROR") == 0) {
min_severity_to_print = (gpr_atm)GPR_LOG_SEVERITY_ERROR;
}
gpr_free(verbosity);
diff --git a/src/core/lib/support/string.c b/src/core/lib/support/string.c
index 30c1e67647..d17fb9da4b 100644
--- a/src/core/lib/support/string.c
+++ b/src/core/lib/support/string.c
@@ -304,3 +304,14 @@ void gpr_strvec_add(gpr_strvec *sv, char *str) {
char *gpr_strvec_flatten(gpr_strvec *sv, size_t *final_length) {
return gpr_strjoin((const char **)sv->strs, sv->count, final_length);
}
+
+int gpr_stricmp(const char *a, const char *b) {
+ int ca, cb;
+ do {
+ ca = tolower(*a);
+ cb = tolower(*b);
+ ++a;
+ ++b;
+ } while (ca == cb && ca && cb);
+ return ca - cb;
+}
diff --git a/src/core/lib/support/string.h b/src/core/lib/support/string.h
index 2b6bb3eec6..9a94e9471c 100644
--- a/src/core/lib/support/string.h
+++ b/src/core/lib/support/string.h
@@ -118,6 +118,10 @@ void gpr_strvec_add(gpr_strvec *strs, char *add);
total_length as per gpr_strjoin */
char *gpr_strvec_flatten(gpr_strvec *strs, size_t *total_length);
+/** Case insensitive string comparison... return <0 if lower(a)<lower(b), ==0 if
+ lower(a)==lower(b), >0 if lower(a)>lower(b) */
+int gpr_stricmp(const char *a, const char *b);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/core/lib/support/thd.c b/src/core/lib/support/thd.c
index 41daeb5d0e..40f53a18e5 100644
--- a/src/core/lib/support/thd.c
+++ b/src/core/lib/support/thd.c
@@ -33,7 +33,7 @@
/* Posix implementation for gpr threads. */
-#include <memory.h>
+#include <string.h>
#include <grpc/support/thd.h>
diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c
index b0f66f4f61..6c25952c0a 100644
--- a/src/core/lib/surface/call.c
+++ b/src/core/lib/surface/call.c
@@ -30,6 +30,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
+
#include <assert.h>
#include <limits.h>
#include <stdio.h>
@@ -223,33 +224,37 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call_stack,
static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
grpc_error *error);
-grpc_call *grpc_call_create(
- grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask,
- grpc_completion_queue *cq, grpc_pollset_set *pollset_set_alternative,
- const void *server_transport_data, grpc_mdelem **add_initial_metadata,
- size_t add_initial_metadata_count, gpr_timespec send_deadline) {
+grpc_error *grpc_call_create(const grpc_call_create_args *args,
+ grpc_call **out_call) {
size_t i, j;
- grpc_channel_stack *channel_stack = grpc_channel_get_channel_stack(channel);
+ grpc_channel_stack *channel_stack =
+ grpc_channel_get_channel_stack(args->channel);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_call *call;
GPR_TIMER_BEGIN("grpc_call_create", 0);
call = gpr_malloc(sizeof(grpc_call) + channel_stack->call_stack_size);
+ *out_call = call;
memset(call, 0, sizeof(grpc_call));
gpr_mu_init(&call->mu);
- call->channel = channel;
- call->cq = cq;
- call->parent = parent_call;
+ call->channel = args->channel;
+ call->cq = args->cq;
+ call->parent = args->parent_call;
/* Always support no compression */
GPR_BITSET(&call->encodings_accepted_by_peer, GRPC_COMPRESS_NONE);
- call->is_client = server_transport_data == NULL;
+ call->is_client = args->server_transport_data == NULL;
+ grpc_mdstr *path = NULL;
if (call->is_client) {
- GPR_ASSERT(add_initial_metadata_count < MAX_SEND_EXTRA_METADATA_COUNT);
- for (i = 0; i < add_initial_metadata_count; i++) {
- call->send_extra_metadata[i].md = add_initial_metadata[i];
+ GPR_ASSERT(args->add_initial_metadata_count <
+ MAX_SEND_EXTRA_METADATA_COUNT);
+ for (i = 0; i < args->add_initial_metadata_count; i++) {
+ call->send_extra_metadata[i].md = args->add_initial_metadata[i];
+ if (args->add_initial_metadata[i]->key == GRPC_MDSTR_PATH) {
+ path = GRPC_MDSTR_REF(args->add_initial_metadata[i]->value);
+ }
}
- call->send_extra_metadata_count = (int)add_initial_metadata_count;
+ call->send_extra_metadata_count = (int)args->add_initial_metadata_count;
} else {
- GPR_ASSERT(add_initial_metadata_count == 0);
+ GPR_ASSERT(args->add_initial_metadata_count == 0);
call->send_extra_metadata_count = 0;
}
for (i = 0; i < 2; i++) {
@@ -257,84 +262,87 @@ grpc_call *grpc_call_create(
call->metadata_batch[i][j].deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
}
}
- send_deadline = gpr_convert_clock_type(send_deadline, GPR_CLOCK_MONOTONIC);
+ gpr_timespec send_deadline =
+ gpr_convert_clock_type(args->send_deadline, GPR_CLOCK_MONOTONIC);
- if (parent_call != NULL) {
- GRPC_CALL_INTERNAL_REF(parent_call, "child");
+ if (args->parent_call != NULL) {
+ GRPC_CALL_INTERNAL_REF(args->parent_call, "child");
GPR_ASSERT(call->is_client);
- GPR_ASSERT(!parent_call->is_client);
+ GPR_ASSERT(!args->parent_call->is_client);
- gpr_mu_lock(&parent_call->mu);
+ gpr_mu_lock(&args->parent_call->mu);
- if (propagation_mask & GRPC_PROPAGATE_DEADLINE) {
+ if (args->propagation_mask & GRPC_PROPAGATE_DEADLINE) {
send_deadline = gpr_time_min(
gpr_convert_clock_type(send_deadline,
- parent_call->send_deadline.clock_type),
- parent_call->send_deadline);
+ args->parent_call->send_deadline.clock_type),
+ args->parent_call->send_deadline);
}
/* for now GRPC_PROPAGATE_TRACING_CONTEXT *MUST* be passed with
* GRPC_PROPAGATE_STATS_CONTEXT */
/* TODO(ctiller): This should change to use the appropriate census start_op
* call. */
- if (propagation_mask & GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT) {
- GPR_ASSERT(propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT);
- grpc_call_context_set(call, GRPC_CONTEXT_TRACING,
- parent_call->context[GRPC_CONTEXT_TRACING].value,
- NULL);
+ if (args->propagation_mask & GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT) {
+ GPR_ASSERT(args->propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT);
+ grpc_call_context_set(
+ call, GRPC_CONTEXT_TRACING,
+ args->parent_call->context[GRPC_CONTEXT_TRACING].value, NULL);
} else {
- GPR_ASSERT(propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT);
+ GPR_ASSERT(args->propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT);
}
- if (propagation_mask & GRPC_PROPAGATE_CANCELLATION) {
+ if (args->propagation_mask & GRPC_PROPAGATE_CANCELLATION) {
call->cancellation_is_inherited = 1;
}
- if (parent_call->first_child == NULL) {
- parent_call->first_child = call;
+ if (args->parent_call->first_child == NULL) {
+ args->parent_call->first_child = call;
call->sibling_next = call->sibling_prev = call;
} else {
- call->sibling_next = parent_call->first_child;
- call->sibling_prev = parent_call->first_child->sibling_prev;
+ call->sibling_next = args->parent_call->first_child;
+ call->sibling_prev = args->parent_call->first_child->sibling_prev;
call->sibling_next->sibling_prev = call->sibling_prev->sibling_next =
call;
}
- gpr_mu_unlock(&parent_call->mu);
+ gpr_mu_unlock(&args->parent_call->mu);
}
call->send_deadline = send_deadline;
- GRPC_CHANNEL_INTERNAL_REF(channel, "call");
+ GRPC_CHANNEL_INTERNAL_REF(args->channel, "call");
/* initial refcount dropped by grpc_call_destroy */
- grpc_error *error = grpc_call_stack_init(
- &exec_ctx, channel_stack, 1, destroy_call, call, call->context,
- server_transport_data, send_deadline, CALL_STACK_FROM_CALL(call));
+ grpc_error *error =
+ grpc_call_stack_init(&exec_ctx, channel_stack, 1, destroy_call, call,
+ call->context, args->server_transport_data, path,
+ send_deadline, CALL_STACK_FROM_CALL(call));
if (error != GRPC_ERROR_NONE) {
grpc_status_code status;
const char *error_str;
grpc_error_get_status(error, &status, &error_str);
close_with_status(&exec_ctx, call, status, error_str);
- GRPC_ERROR_UNREF(error);
}
- if (cq != NULL) {
+ if (args->cq != NULL) {
GPR_ASSERT(
- pollset_set_alternative == NULL &&
+ args->pollset_set_alternative == NULL &&
"Only one of 'cq' and 'pollset_set_alternative' should be non-NULL.");
- GRPC_CQ_INTERNAL_REF(cq, "bind");
+ GRPC_CQ_INTERNAL_REF(args->cq, "bind");
call->pollent =
- grpc_polling_entity_create_from_pollset(grpc_cq_pollset(cq));
+ grpc_polling_entity_create_from_pollset(grpc_cq_pollset(args->cq));
}
- if (pollset_set_alternative != NULL) {
- call->pollent =
- grpc_polling_entity_create_from_pollset_set(pollset_set_alternative);
+ if (args->pollset_set_alternative != NULL) {
+ call->pollent = grpc_polling_entity_create_from_pollset_set(
+ args->pollset_set_alternative);
}
if (!grpc_polling_entity_is_empty(&call->pollent)) {
grpc_call_stack_set_pollset_or_pollset_set(
&exec_ctx, CALL_STACK_FROM_CALL(call), &call->pollent);
}
+ if (path != NULL) GRPC_MDSTR_UNREF(path);
+
grpc_exec_ctx_finish(&exec_ctx);
GPR_TIMER_END("grpc_call_create", 0);
- return call;
+ return error;
}
void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call,
@@ -1038,9 +1046,14 @@ static void finish_batch_completion(grpc_exec_ctx *exec_ctx, void *user_data,
static void post_batch_completion(grpc_exec_ctx *exec_ctx,
batch_control *bctl) {
grpc_call *call = bctl->call;
+ grpc_error *error = bctl->error;
+ if (bctl->recv_final_op) {
+ GRPC_ERROR_UNREF(error);
+ error = GRPC_ERROR_NONE;
+ }
if (bctl->is_notify_tag_closure) {
/* unrefs bctl->error */
- grpc_exec_ctx_sched(exec_ctx, bctl->notify_tag, bctl->error, NULL);
+ grpc_closure_run(exec_ctx, bctl->notify_tag, error);
gpr_mu_lock(&call->mu);
bctl->call->used_batches =
(uint8_t)(bctl->call->used_batches &
@@ -1049,7 +1062,7 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx,
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion");
} else {
/* unrefs bctl->error */
- grpc_cq_end_op(exec_ctx, bctl->call->cq, bctl->notify_tag, bctl->error,
+ grpc_cq_end_op(exec_ctx, bctl->call->cq, bctl->notify_tag, error,
finish_batch_completion, bctl, &bctl->cq_completion);
}
}
@@ -1198,6 +1211,14 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
}
}
+static void add_batch_error(batch_control *bctl, grpc_error *error) {
+ if (error == GRPC_ERROR_NONE) return;
+ if (bctl->error == GRPC_ERROR_NONE) {
+ bctl->error = GRPC_ERROR_CREATE("Call batch operation failed");
+ }
+ bctl->error = grpc_error_add_child(bctl->error, error);
+}
+
static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
void *bctlp, grpc_error *error) {
batch_control *bctl = bctlp;
@@ -1205,9 +1226,8 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&call->mu);
- if (error != GRPC_ERROR_NONE) {
- bctl->error = GRPC_ERROR_REF(error);
- } else {
+ add_batch_error(bctl, GRPC_ERROR_REF(error));
+ if (error == GRPC_ERROR_NONE) {
grpc_metadata_batch *md =
&call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */];
grpc_metadata_batch_filter(md, recv_initial_filter, call);
@@ -1304,8 +1324,7 @@ static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp,
GRPC_ERROR_UNREF(error);
error = GRPC_ERROR_NONE;
}
- GRPC_ERROR_UNREF(bctl->error);
- bctl->error = GRPC_ERROR_REF(error);
+ add_batch_error(bctl, GRPC_ERROR_REF(error));
gpr_mu_unlock(&call->mu);
if (gpr_unref(&bctl->steps_to_complete)) {
post_batch_completion(exec_ctx, bctl);
@@ -1341,6 +1360,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
grpc_transport_stream_op *stream_op = &bctl->op;
memset(stream_op, 0, sizeof(*stream_op));
+ stream_op->covered_by_poller = true;
if (nops == 0) {
GRPC_CALL_INTERNAL_REF(call, "completion");
@@ -1496,8 +1516,10 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
call, STATUS_FROM_API_OVERRIDE,
GRPC_MDSTR_REF(call->send_extra_metadata[1].md->value));
}
- set_status_code(call, STATUS_FROM_API_OVERRIDE,
- (uint32_t)op->data.send_status_from_server.status);
+ if (op->data.send_status_from_server.status != GRPC_STATUS_OK) {
+ set_status_code(call, STATUS_FROM_API_OVERRIDE,
+ (uint32_t)op->data.send_status_from_server.status);
+ }
if (!prepare_application_metadata(
call,
(int)op->data.send_status_from_server.trailing_metadata_count,
diff --git a/src/core/lib/surface/call.h b/src/core/lib/surface/call.h
index 3a78fe3aa3..18af41b7fb 100644
--- a/src/core/lib/surface/call.h
+++ b/src/core/lib/surface/call.h
@@ -49,15 +49,29 @@ typedef void (*grpc_ioreq_completion_func)(grpc_exec_ctx *exec_ctx,
grpc_call *call, int success,
void *user_data);
-grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
- uint32_t propagation_mask,
- grpc_completion_queue *cq,
- /* if not NULL, it'll be used in lieu of \a cq */
- grpc_pollset_set *pollset_set_alternative,
- const void *server_transport_data,
- grpc_mdelem **add_initial_metadata,
- size_t add_initial_metadata_count,
- gpr_timespec send_deadline);
+typedef struct grpc_call_create_args {
+ grpc_channel *channel;
+
+ grpc_call *parent_call;
+ uint32_t propagation_mask;
+
+ grpc_completion_queue *cq;
+ /* if not NULL, it'll be used in lieu of cq */
+ grpc_pollset_set *pollset_set_alternative;
+
+ const void *server_transport_data;
+
+ grpc_mdelem **add_initial_metadata;
+ size_t add_initial_metadata_count;
+
+ gpr_timespec send_deadline;
+} grpc_call_create_args;
+
+/* Create a new call based on \a args.
+ Regardless of success or failure, always returns a valid new call into *call
+ */
+grpc_error *grpc_call_create(const grpc_call_create_args *args,
+ grpc_call **call);
void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call,
grpc_completion_queue *cq);
diff --git a/src/core/lib/surface/channel.c b/src/core/lib/surface/channel.c
index 6adb70a987..92d783b78d 100644
--- a/src/core/lib/surface/channel.c
+++ b/src/core/lib/surface/channel.c
@@ -193,9 +193,21 @@ static grpc_call *grpc_channel_create_call_internal(
send_metadata[num_metadata++] = GRPC_MDELEM_REF(channel->default_authority);
}
- return grpc_call_create(channel, parent_call, propagation_mask, cq,
- pollset_set_alternative, NULL, send_metadata,
- num_metadata, deadline);
+ grpc_call_create_args args;
+ memset(&args, 0, sizeof(args));
+ args.channel = channel;
+ args.parent_call = parent_call;
+ args.propagation_mask = propagation_mask;
+ args.cq = cq;
+ args.pollset_set_alternative = pollset_set_alternative;
+ args.server_transport_data = NULL;
+ args.add_initial_metadata = send_metadata;
+ args.add_initial_metadata_count = num_metadata;
+ args.send_deadline = deadline;
+
+ grpc_call *call;
+ GRPC_LOG_IF_ERROR("call_create", grpc_call_create(&args, &call));
+ return call;
}
grpc_call *grpc_channel_create_call(grpc_channel *channel,
diff --git a/src/core/lib/surface/completion_queue.c b/src/core/lib/surface/completion_queue.c
index 5978884db8..4e0feb56ac 100644
--- a/src/core/lib/surface/completion_queue.c
+++ b/src/core/lib/surface/completion_queue.c
@@ -39,6 +39,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
#include <grpc/support/time.h>
#include "src/core/lib/iomgr/pollset.h"
@@ -50,6 +51,9 @@
#include "src/core/lib/surface/event_string.h"
int grpc_trace_operation_failures;
+#ifndef NDEBUG
+int grpc_trace_pending_tags;
+#endif
typedef struct {
grpc_pollset_worker **worker;
@@ -67,6 +71,9 @@ struct grpc_completion_queue {
gpr_refcount pending_events;
/** Once owning_refs drops to zero, we will destroy the cq */
gpr_refcount owning_refs;
+ /** counter of how many things have ever been queued on this completion queue
+ useful for avoiding locks to check the queue */
+ gpr_atm things_queued_ever;
/** 0 initially, 1 once we've begun shutting down */
int shutdown;
int shutdown_called;
@@ -121,15 +128,6 @@ void grpc_cq_global_shutdown(void) {
}
}
-struct grpc_cq_alarm {
- grpc_timer alarm;
- grpc_cq_completion completion;
- /** completion queue where events about this alarm will be posted */
- grpc_completion_queue *cq;
- /** user supplied tag */
- void *tag;
-};
-
grpc_completion_queue *grpc_completion_queue_create(void *reserved) {
grpc_completion_queue *cc;
GPR_ASSERT(!reserved);
@@ -166,6 +164,7 @@ grpc_completion_queue *grpc_completion_queue_create(void *reserved) {
cc->is_server_cq = 0;
cc->is_non_listening_server_cq = 0;
cc->num_pluckers = 0;
+ gpr_atm_no_barrier_store(&cc->things_queued_ever, 0);
#ifndef NDEBUG
cc->outstanding_tag_count = 0;
#endif
@@ -276,6 +275,7 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
GPR_ASSERT(found);
#endif
shutdown = gpr_unref(&cc->pending_events);
+ gpr_atm_no_barrier_fetch_add(&cc->things_queued_ever, 1);
if (!shutdown) {
cc->completed_tail->next =
((uintptr_t)storage) | (1u & (uintptr_t)cc->completed_tail->next);
@@ -313,13 +313,66 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
GRPC_ERROR_UNREF(error);
}
+typedef struct {
+ gpr_atm last_seen_things_queued_ever;
+ grpc_completion_queue *cq;
+ gpr_timespec deadline;
+ grpc_cq_completion *stolen_completion;
+ void *tag; /* for pluck */
+ bool first_loop;
+} cq_is_finished_arg;
+
+static bool cq_is_next_finished(grpc_exec_ctx *exec_ctx, void *arg) {
+ cq_is_finished_arg *a = arg;
+ grpc_completion_queue *cq = a->cq;
+ GPR_ASSERT(a->stolen_completion == NULL);
+ gpr_atm current_last_seen_things_queued_ever =
+ gpr_atm_no_barrier_load(&cq->things_queued_ever);
+ if (current_last_seen_things_queued_ever != a->last_seen_things_queued_ever) {
+ gpr_mu_lock(cq->mu);
+ a->last_seen_things_queued_ever =
+ gpr_atm_no_barrier_load(&cq->things_queued_ever);
+ if (cq->completed_tail != &cq->completed_head) {
+ a->stolen_completion = (grpc_cq_completion *)cq->completed_head.next;
+ cq->completed_head.next = a->stolen_completion->next & ~(uintptr_t)1;
+ if (a->stolen_completion == cq->completed_tail) {
+ cq->completed_tail = &cq->completed_head;
+ }
+ gpr_mu_unlock(cq->mu);
+ return true;
+ }
+ gpr_mu_unlock(cq->mu);
+ }
+ return !a->first_loop &&
+ gpr_time_cmp(a->deadline, gpr_now(a->deadline.clock_type)) < 0;
+}
+
+#ifndef NDEBUG
+static void dump_pending_tags(grpc_completion_queue *cc) {
+ if (!grpc_trace_pending_tags) return;
+
+ gpr_strvec v;
+ gpr_strvec_init(&v);
+ gpr_strvec_add(&v, gpr_strdup("PENDING TAGS:"));
+ for (size_t i = 0; i < cc->outstanding_tag_count; i++) {
+ char *s;
+ gpr_asprintf(&s, " %p", cc->outstanding_tags[i]);
+ gpr_strvec_add(&v, s);
+ }
+ char *out = gpr_strvec_flatten(&v, NULL);
+ gpr_strvec_destroy(&v);
+ gpr_log(GPR_DEBUG, "%s", out);
+ gpr_free(out);
+}
+#else
+static void dump_pending_tags(grpc_completion_queue *cc) {}
+#endif
+
grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
gpr_timespec deadline, void *reserved) {
grpc_event ret;
grpc_pollset_worker *worker = NULL;
- int first_loop = 1;
gpr_timespec now;
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_TIMER_BEGIN("grpc_completion_queue_next", 0);
@@ -333,11 +386,33 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
reserved));
GPR_ASSERT(!reserved);
+ dump_pending_tags(cc);
+
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
GRPC_CQ_INTERNAL_REF(cc, "next");
gpr_mu_lock(cc->mu);
+ cq_is_finished_arg is_finished_arg = {
+ .last_seen_things_queued_ever =
+ gpr_atm_no_barrier_load(&cc->things_queued_ever),
+ .cq = cc,
+ .deadline = deadline,
+ .stolen_completion = NULL,
+ .tag = NULL,
+ .first_loop = true};
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(
+ cq_is_next_finished, &is_finished_arg);
for (;;) {
+ if (is_finished_arg.stolen_completion != NULL) {
+ gpr_mu_unlock(cc->mu);
+ grpc_cq_completion *c = is_finished_arg.stolen_completion;
+ is_finished_arg.stolen_completion = NULL;
+ ret.type = GRPC_OP_COMPLETE;
+ ret.success = c->next & 1u;
+ ret.tag = c->tag;
+ c->done(&exec_ctx, c->done_arg, c);
+ break;
+ }
if (cc->completed_tail != &cc->completed_head) {
grpc_cq_completion *c = (grpc_cq_completion *)cc->completed_head.next;
cc->completed_head.next = c->next & ~(uintptr_t)1;
@@ -358,13 +433,13 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
break;
}
now = gpr_now(GPR_CLOCK_MONOTONIC);
- if (!first_loop && gpr_time_cmp(now, deadline) >= 0) {
+ if (!is_finished_arg.first_loop && gpr_time_cmp(now, deadline) >= 0) {
gpr_mu_unlock(cc->mu);
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
+ dump_pending_tags(cc);
break;
}
- first_loop = 0;
/* Check alarms - these are a global resource so we just ping
each time through on every pollset.
May update deadline to ensure timely wakeups.
@@ -387,13 +462,16 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
GRPC_ERROR_UNREF(err);
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
+ dump_pending_tags(cc);
break;
}
}
+ is_finished_arg.first_loop = false;
}
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
GRPC_CQ_INTERNAL_UNREF(cc, "next");
grpc_exec_ctx_finish(&exec_ctx);
+ GPR_ASSERT(is_finished_arg.stolen_completion == NULL);
GPR_TIMER_END("grpc_completion_queue_next", 0);
@@ -424,6 +502,37 @@ static void del_plucker(grpc_completion_queue *cc, void *tag,
GPR_UNREACHABLE_CODE(return );
}
+static bool cq_is_pluck_finished(grpc_exec_ctx *exec_ctx, void *arg) {
+ cq_is_finished_arg *a = arg;
+ grpc_completion_queue *cq = a->cq;
+ GPR_ASSERT(a->stolen_completion == NULL);
+ gpr_atm current_last_seen_things_queued_ever =
+ gpr_atm_no_barrier_load(&cq->things_queued_ever);
+ if (current_last_seen_things_queued_ever != a->last_seen_things_queued_ever) {
+ gpr_mu_lock(cq->mu);
+ a->last_seen_things_queued_ever =
+ gpr_atm_no_barrier_load(&cq->things_queued_ever);
+ grpc_cq_completion *c;
+ grpc_cq_completion *prev = &cq->completed_head;
+ while ((c = (grpc_cq_completion *)(prev->next & ~(uintptr_t)1)) !=
+ &cq->completed_head) {
+ if (c->tag == a->tag) {
+ prev->next = (prev->next & (uintptr_t)1) | (c->next & ~(uintptr_t)1);
+ if (c == cq->completed_tail) {
+ cq->completed_tail = prev;
+ }
+ gpr_mu_unlock(cq->mu);
+ a->stolen_completion = c;
+ return true;
+ }
+ prev = c;
+ }
+ gpr_mu_unlock(cq->mu);
+ }
+ return !a->first_loop &&
+ gpr_time_cmp(a->deadline, gpr_now(a->deadline.clock_type)) < 0;
+}
+
grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
gpr_timespec deadline, void *reserved) {
grpc_event ret;
@@ -431,8 +540,6 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
grpc_cq_completion *prev;
grpc_pollset_worker *worker = NULL;
gpr_timespec now;
- int first_loop = 1;
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_TIMER_BEGIN("grpc_completion_queue_pluck", 0);
@@ -448,11 +555,33 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
}
GPR_ASSERT(!reserved);
+ dump_pending_tags(cc);
+
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
GRPC_CQ_INTERNAL_REF(cc, "pluck");
gpr_mu_lock(cc->mu);
+ cq_is_finished_arg is_finished_arg = {
+ .last_seen_things_queued_ever =
+ gpr_atm_no_barrier_load(&cc->things_queued_ever),
+ .cq = cc,
+ .deadline = deadline,
+ .stolen_completion = NULL,
+ .tag = tag,
+ .first_loop = true};
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(
+ cq_is_pluck_finished, &is_finished_arg);
for (;;) {
+ if (is_finished_arg.stolen_completion != NULL) {
+ gpr_mu_unlock(cc->mu);
+ c = is_finished_arg.stolen_completion;
+ is_finished_arg.stolen_completion = NULL;
+ ret.type = GRPC_OP_COMPLETE;
+ ret.success = c->next & 1u;
+ ret.tag = c->tag;
+ c->done(&exec_ctx, c->done_arg, c);
+ break;
+ }
prev = &cc->completed_head;
while ((c = (grpc_cq_completion *)(prev->next & ~(uintptr_t)1)) !=
&cc->completed_head) {
@@ -485,17 +614,18 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
memset(&ret, 0, sizeof(ret));
/* TODO(ctiller): should we use a different result here */
ret.type = GRPC_QUEUE_TIMEOUT;
+ dump_pending_tags(cc);
break;
}
now = gpr_now(GPR_CLOCK_MONOTONIC);
- if (!first_loop && gpr_time_cmp(now, deadline) >= 0) {
+ if (!is_finished_arg.first_loop && gpr_time_cmp(now, deadline) >= 0) {
del_plucker(cc, tag, &worker);
gpr_mu_unlock(cc->mu);
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
+ dump_pending_tags(cc);
break;
}
- first_loop = 0;
/* Check alarms - these are a global resource so we just ping
each time through on every pollset.
May update deadline to ensure timely wakeups.
@@ -518,15 +648,18 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
GRPC_ERROR_UNREF(err);
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
+ dump_pending_tags(cc);
break;
}
}
+ is_finished_arg.first_loop = false;
del_plucker(cc, tag, &worker);
}
done:
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
GRPC_CQ_INTERNAL_UNREF(cc, "pluck");
grpc_exec_ctx_finish(&exec_ctx);
+ GPR_ASSERT(is_finished_arg.stolen_completion == NULL);
GPR_TIMER_END("grpc_completion_queue_pluck", 0);
diff --git a/src/core/lib/surface/completion_queue.h b/src/core/lib/surface/completion_queue.h
index 4dbf3aae63..c1cafba5f2 100644
--- a/src/core/lib/surface/completion_queue.h
+++ b/src/core/lib/surface/completion_queue.h
@@ -44,6 +44,9 @@
extern int grpc_cq_pluck_trace;
extern int grpc_cq_event_timeout_trace;
extern int grpc_trace_operation_failures;
+#ifndef NDEBUG
+extern int grpc_trace_pending_tags;
+#endif
typedef struct grpc_cq_completion {
/** user supplied tag */
diff --git a/src/core/lib/surface/init.c b/src/core/lib/surface/init.c
index 289f4ce8e8..7903f57a68 100644
--- a/src/core/lib/surface/init.c
+++ b/src/core/lib/surface/init.c
@@ -52,6 +52,7 @@
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr.h"
+#include "src/core/lib/iomgr/resource_quota.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/call.h"
@@ -184,12 +185,17 @@ void grpc_init(void) {
grpc_register_tracer("compression", &grpc_compression_trace);
grpc_register_tracer("queue_pluck", &grpc_cq_pluck_trace);
grpc_register_tracer("combiner", &grpc_combiner_trace);
+ grpc_register_tracer("server_channel", &grpc_server_channel_trace);
// Default pluck trace to 1
grpc_cq_pluck_trace = 1;
grpc_register_tracer("queue_timeout", &grpc_cq_event_timeout_trace);
// Default timeout trace to 1
grpc_cq_event_timeout_trace = 1;
grpc_register_tracer("op_failure", &grpc_trace_operation_failures);
+ grpc_register_tracer("resource_quota", &grpc_resource_quota_trace);
+#ifndef NDEBUG
+ grpc_register_tracer("pending_tags", &grpc_trace_pending_tags);
+#endif
grpc_security_pre_init();
grpc_iomgr_init();
grpc_executor_init();
diff --git a/src/core/lib/surface/server.c b/src/core/lib/surface/server.c
index 7300d79b9f..3a90308058 100644
--- a/src/core/lib/surface/server.c
+++ b/src/core/lib/surface/server.c
@@ -71,6 +71,8 @@ typedef struct registered_method registered_method;
typedef enum { BATCH_CALL, REGISTERED_CALL } requested_call_type;
+int grpc_server_channel_trace = 0;
+
typedef struct requested_call {
requested_call_type type;
size_t cq_idx;
@@ -280,6 +282,7 @@ static void send_shutdown(grpc_exec_ctx *exec_ctx, grpc_channel *channel,
grpc_channel_element *elem;
op->send_goaway = send_goaway;
+ op->set_accept_stream = true;
sc->slice = gpr_slice_from_copied_string("Server shutdown");
op->goaway_message = &sc->slice;
op->goaway_status = GRPC_STATUS_OK;
@@ -439,6 +442,13 @@ static void destroy_channel(grpc_exec_ctx *exec_ctx, channel_data *chand,
chand->finish_destroy_channel_closure.cb = finish_destroy_channel;
chand->finish_destroy_channel_closure.cb_arg = chand;
+ if (grpc_server_channel_trace && error != GRPC_ERROR_NONE) {
+ const char *msg = grpc_error_string(error);
+ gpr_log(GPR_INFO, "Disconnected client: %s", msg);
+ grpc_error_free_string(msg);
+ }
+ GRPC_ERROR_UNREF(error);
+
grpc_transport_op *op =
grpc_make_transport_op(&chand->finish_destroy_channel_closure);
op->set_accept_stream = true;
@@ -446,13 +456,6 @@ static void destroy_channel(grpc_exec_ctx *exec_ctx, channel_data *chand,
grpc_channel_stack_element(
grpc_channel_get_channel_stack(chand->channel), 0),
op);
-
- if (error != GRPC_ERROR_NONE) {
- const char *msg = grpc_error_string(error);
- gpr_log(GPR_INFO, "Disconnected client: %s", msg);
- grpc_error_free_string(msg);
- }
- GRPC_ERROR_UNREF(error);
}
static void cpstr(char **dest, size_t *capacity, grpc_mdstr *value) {
@@ -773,8 +776,7 @@ static void server_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
GRPC_ERROR_CREATE_REFERENCING("Missing :authority or :path", &error, 1);
}
- grpc_exec_ctx_sched(exec_ctx, calld->on_done_recv_initial_metadata, error,
- NULL);
+ grpc_closure_run(exec_ctx, calld->on_done_recv_initial_metadata, error);
}
static void server_mutate_op(grpc_call_element *elem,
@@ -829,11 +831,20 @@ static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd,
const void *transport_server_data) {
channel_data *chand = cd;
/* create a call */
- grpc_call *call = grpc_call_create(chand->channel, NULL, 0, NULL, NULL,
- transport_server_data, NULL, 0,
- gpr_inf_future(GPR_CLOCK_MONOTONIC));
+ grpc_call_create_args args;
+ memset(&args, 0, sizeof(args));
+ args.channel = chand->channel;
+ args.server_transport_data = transport_server_data;
+ args.send_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ grpc_call *call;
+ grpc_error *error = grpc_call_create(&args, &call);
grpc_call_element *elem =
grpc_call_stack_element(grpc_call_get_call_stack(call), 0);
+ if (error != GRPC_ERROR_NONE) {
+ got_initial_metadata(exec_ctx, elem, error);
+ GRPC_ERROR_UNREF(error);
+ return;
+ }
call_data *calld = elem->call_data;
grpc_op op;
memset(&op, 0, sizeof(op));
diff --git a/src/core/lib/surface/server.h b/src/core/lib/surface/server.h
index 551a40a4ff..a85d9f4964 100644
--- a/src/core/lib/surface/server.h
+++ b/src/core/lib/surface/server.h
@@ -40,6 +40,9 @@
extern const grpc_channel_filter grpc_server_top_filter;
+/** Lightweight tracing of server channel state */
+extern int grpc_server_channel_trace;
+
/* Add a listener to the server: when the server starts, it will call start,
and when it shuts down, it will call destroy */
void grpc_server_add_listener(
diff --git a/src/core/lib/transport/connectivity_state.c b/src/core/lib/transport/connectivity_state.c
index 68d05e3a85..fdb5307814 100644
--- a/src/core/lib/transport/connectivity_state.c
+++ b/src/core/lib/transport/connectivity_state.c
@@ -180,7 +180,8 @@ void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx,
*w->current = tracker->current_state;
tracker->watchers = w->next;
if (grpc_connectivity_state_trace) {
- gpr_log(GPR_DEBUG, "NOTIFY: %p", w->notify);
+ gpr_log(GPR_DEBUG, "NOTIFY: %p %s: %p", tracker, tracker->name,
+ w->notify);
}
grpc_exec_ctx_sched(exec_ctx, w->notify,
GRPC_ERROR_REF(tracker->current_error), NULL);
diff --git a/src/core/lib/transport/mdstr_hash_table.c b/src/core/lib/transport/mdstr_hash_table.c
new file mode 100644
index 0000000000..8e914c420b
--- /dev/null
+++ b/src/core/lib/transport/mdstr_hash_table.c
@@ -0,0 +1,157 @@
+//
+// Copyright 2016, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "src/core/lib/transport/mdstr_hash_table.h"
+
+#include <stdbool.h>
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+
+#include "src/core/lib/transport/metadata.h"
+
+struct grpc_mdstr_hash_table {
+ gpr_refcount refs;
+ size_t num_entries;
+ size_t size;
+ grpc_mdstr_hash_table_entry* entries;
+};
+
+// Helper function for insert and get operations that performs quadratic
+// probing (https://en.wikipedia.org/wiki/Quadratic_probing).
+static size_t grpc_mdstr_hash_table_find_index(
+ const grpc_mdstr_hash_table* table, const grpc_mdstr* key,
+ bool find_empty) {
+ for (size_t i = 0; i < table->size; ++i) {
+ const size_t idx = (key->hash + i * i) % table->size;
+ if (table->entries[idx].key == NULL) return find_empty ? idx : table->size;
+ if (table->entries[idx].key == key) return idx;
+ }
+ return table->size; // Not found.
+}
+
+static void grpc_mdstr_hash_table_add(
+ grpc_mdstr_hash_table* table, grpc_mdstr* key, void* value,
+ const grpc_mdstr_hash_table_vtable* vtable) {
+ GPR_ASSERT(value != NULL);
+ const size_t idx =
+ grpc_mdstr_hash_table_find_index(table, key, true /* find_empty */);
+ GPR_ASSERT(idx != table->size); // Table should never be full.
+ grpc_mdstr_hash_table_entry* entry = &table->entries[idx];
+ entry->key = GRPC_MDSTR_REF(key);
+ entry->value = vtable->copy_value(value);
+ entry->vtable = vtable;
+}
+
+grpc_mdstr_hash_table* grpc_mdstr_hash_table_create(
+ size_t num_entries, grpc_mdstr_hash_table_entry* entries) {
+ grpc_mdstr_hash_table* table = gpr_malloc(sizeof(*table));
+ memset(table, 0, sizeof(*table));
+ gpr_ref_init(&table->refs, 1);
+ table->num_entries = num_entries;
+ // Quadratic probing gets best performance when the table is no more
+ // than half full.
+ table->size = num_entries * 2;
+ const size_t entry_size = sizeof(grpc_mdstr_hash_table_entry) * table->size;
+ table->entries = gpr_malloc(entry_size);
+ memset(table->entries, 0, entry_size);
+ for (size_t i = 0; i < num_entries; ++i) {
+ grpc_mdstr_hash_table_entry* entry = &entries[i];
+ grpc_mdstr_hash_table_add(table, entry->key, entry->value, entry->vtable);
+ }
+ return table;
+}
+
+grpc_mdstr_hash_table* grpc_mdstr_hash_table_ref(grpc_mdstr_hash_table* table) {
+ if (table != NULL) gpr_ref(&table->refs);
+ return table;
+}
+
+int grpc_mdstr_hash_table_unref(grpc_mdstr_hash_table* table) {
+ if (table != NULL && gpr_unref(&table->refs)) {
+ for (size_t i = 0; i < table->size; ++i) {
+ grpc_mdstr_hash_table_entry* entry = &table->entries[i];
+ if (entry->key != NULL) {
+ GRPC_MDSTR_UNREF(entry->key);
+ entry->vtable->destroy_value(entry->value);
+ }
+ }
+ gpr_free(table->entries);
+ gpr_free(table);
+ return 1;
+ }
+ return 0;
+}
+
+size_t grpc_mdstr_hash_table_num_entries(const grpc_mdstr_hash_table* table) {
+ return table->num_entries;
+}
+
+void* grpc_mdstr_hash_table_get(const grpc_mdstr_hash_table* table,
+ const grpc_mdstr* key) {
+ const size_t idx =
+ grpc_mdstr_hash_table_find_index(table, key, false /* find_empty */);
+ if (idx == table->size) return NULL; // Not found.
+ return table->entries[idx].value;
+}
+
+int grpc_mdstr_hash_table_cmp(const grpc_mdstr_hash_table* table1,
+ const grpc_mdstr_hash_table* table2) {
+ // Compare by num_entries.
+ if (table1->num_entries < table2->num_entries) return -1;
+ if (table1->num_entries > table2->num_entries) return 1;
+ for (size_t i = 0; i < table1->num_entries; ++i) {
+ grpc_mdstr_hash_table_entry* e1 = &table1->entries[i];
+ grpc_mdstr_hash_table_entry* e2 = &table2->entries[i];
+ // Compare keys by hash value.
+ if (e1->key->hash < e2->key->hash) return -1;
+ if (e1->key->hash > e2->key->hash) return 1;
+ // Compare by vtable (pointer equality).
+ if (e1->vtable < e2->vtable) return -1;
+ if (e1->vtable > e2->vtable) return 1;
+ // Compare values via vtable.
+ const int value_result = e1->vtable->compare_value(e1->value, e2->value);
+ if (value_result != 0) return value_result;
+ }
+ return 0;
+}
+
+void grpc_mdstr_hash_table_iterate(
+ const grpc_mdstr_hash_table* table,
+ void (*func)(const grpc_mdstr_hash_table_entry* entry, void* user_data),
+ void* user_data) {
+ for (size_t i = 0; i < table->size; ++i) {
+ if (table->entries[i].key != NULL) {
+ func(&table->entries[i], user_data);
+ }
+ }
+}
diff --git a/src/core/lib/transport/mdstr_hash_table.h b/src/core/lib/transport/mdstr_hash_table.h
new file mode 100644
index 0000000000..bceb4df93d
--- /dev/null
+++ b/src/core/lib/transport/mdstr_hash_table.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef GRPC_CORE_LIB_TRANSPORT_MDSTR_HASH_TABLE_H
+#define GRPC_CORE_LIB_TRANSPORT_MDSTR_HASH_TABLE_H
+
+#include "src/core/lib/transport/metadata.h"
+
+/** Hash table implementation.
+ *
+ * This implementation uses open addressing
+ * (https://en.wikipedia.org/wiki/Open_addressing) with quadratic
+ * probing (https://en.wikipedia.org/wiki/Quadratic_probing).
+ *
+ * The keys are \a grpc_mdstr objects. The values are arbitrary pointers
+ * with a common vtable.
+ *
+ * Hash tables are intentionally immutable, to avoid the need for locking.
+ */
+
+typedef struct grpc_mdstr_hash_table grpc_mdstr_hash_table;
+
+typedef struct grpc_mdstr_hash_table_vtable {
+ void (*destroy_value)(void* value);
+ void* (*copy_value)(void* value);
+ int (*compare_value)(void* value1, void* value2);
+} grpc_mdstr_hash_table_vtable;
+
+typedef struct grpc_mdstr_hash_table_entry {
+ grpc_mdstr* key;
+ void* value; /* Must not be NULL. */
+ const grpc_mdstr_hash_table_vtable* vtable;
+} grpc_mdstr_hash_table_entry;
+
+/** Creates a new hash table of containing \a entries, which is an array
+ of length \a num_entries.
+ Creates its own copy of all keys and values from \a entries. */
+grpc_mdstr_hash_table* grpc_mdstr_hash_table_create(
+ size_t num_entries, grpc_mdstr_hash_table_entry* entries);
+
+grpc_mdstr_hash_table* grpc_mdstr_hash_table_ref(grpc_mdstr_hash_table* table);
+/** Returns 1 when \a table is destroyed. */
+int grpc_mdstr_hash_table_unref(grpc_mdstr_hash_table* table);
+
+/** Returns the number of entries in \a table. */
+size_t grpc_mdstr_hash_table_num_entries(const grpc_mdstr_hash_table* table);
+
+/** Returns the value from \a table associated with \a key.
+ Returns NULL if \a key is not found. */
+void* grpc_mdstr_hash_table_get(const grpc_mdstr_hash_table* table,
+ const grpc_mdstr* key);
+
+/** Compares two hash tables.
+ The sort order is stable but undefined. */
+int grpc_mdstr_hash_table_cmp(const grpc_mdstr_hash_table* table1,
+ const grpc_mdstr_hash_table* table2);
+
+/** Iterates over the entries in \a table, calling \a func for each entry. */
+void grpc_mdstr_hash_table_iterate(
+ const grpc_mdstr_hash_table* table,
+ void (*func)(const grpc_mdstr_hash_table_entry* entry, void* user_data),
+ void* user_data);
+
+#endif /* GRPC_CORE_LIB_TRANSPORT_MDSTR_HASH_TABLE_H */
diff --git a/src/core/lib/transport/method_config.c b/src/core/lib/transport/method_config.c
new file mode 100644
index 0000000000..57d97700bf
--- /dev/null
+++ b/src/core/lib/transport/method_config.c
@@ -0,0 +1,340 @@
+//
+// Copyright 2015, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "src/core/lib/transport/method_config.h"
+
+#include <string.h>
+
+#include <grpc/impl/codegen/grpc_types.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/time.h>
+
+#include "src/core/lib/transport/mdstr_hash_table.h"
+#include "src/core/lib/transport/metadata.h"
+
+//
+// grpc_method_config
+//
+
+// bool vtable
+
+static void* bool_copy(void* valuep) {
+ bool value = *(bool*)valuep;
+ bool* new_value = gpr_malloc(sizeof(bool));
+ *new_value = value;
+ return new_value;
+}
+
+static int bool_cmp(void* v1, void* v2) {
+ bool b1 = *(bool*)v1;
+ bool b2 = *(bool*)v2;
+ if (!b1 && b2) return -1;
+ if (b1 && !b2) return 1;
+ return 0;
+}
+
+static grpc_mdstr_hash_table_vtable bool_vtable = {gpr_free, bool_copy,
+ bool_cmp};
+
+// timespec vtable
+
+static void* timespec_copy(void* valuep) {
+ gpr_timespec value = *(gpr_timespec*)valuep;
+ gpr_timespec* new_value = gpr_malloc(sizeof(gpr_timespec));
+ *new_value = value;
+ return new_value;
+}
+
+static int timespec_cmp(void* v1, void* v2) {
+ return gpr_time_cmp(*(gpr_timespec*)v1, *(gpr_timespec*)v2);
+}
+
+static grpc_mdstr_hash_table_vtable timespec_vtable = {gpr_free, timespec_copy,
+ timespec_cmp};
+
+// int32 vtable
+
+static void* int32_copy(void* valuep) {
+ int32_t value = *(int32_t*)valuep;
+ int32_t* new_value = gpr_malloc(sizeof(int32_t));
+ *new_value = value;
+ return new_value;
+}
+
+static int int32_cmp(void* v1, void* v2) {
+ int32_t i1 = *(int32_t*)v1;
+ int32_t i2 = *(int32_t*)v2;
+ if (i1 < i2) return -1;
+ if (i1 > i2) return 1;
+ return 0;
+}
+
+static grpc_mdstr_hash_table_vtable int32_vtable = {gpr_free, int32_copy,
+ int32_cmp};
+
+// Hash table keys.
+#define GRPC_METHOD_CONFIG_WAIT_FOR_READY "grpc.wait_for_ready" // bool
+#define GRPC_METHOD_CONFIG_TIMEOUT "grpc.timeout" // gpr_timespec
+#define GRPC_METHOD_CONFIG_MAX_REQUEST_MESSAGE_BYTES \
+ "grpc.max_request_message_bytes" // int32
+#define GRPC_METHOD_CONFIG_MAX_RESPONSE_MESSAGE_BYTES \
+ "grpc.max_response_message_bytes" // int32
+
+struct grpc_method_config {
+ grpc_mdstr_hash_table* table;
+ grpc_mdstr* wait_for_ready_key;
+ grpc_mdstr* timeout_key;
+ grpc_mdstr* max_request_message_bytes_key;
+ grpc_mdstr* max_response_message_bytes_key;
+};
+
+grpc_method_config* grpc_method_config_create(
+ bool* wait_for_ready, gpr_timespec* timeout,
+ int32_t* max_request_message_bytes, int32_t* max_response_message_bytes) {
+ grpc_method_config* method_config = gpr_malloc(sizeof(grpc_method_config));
+ memset(method_config, 0, sizeof(grpc_method_config));
+ method_config->wait_for_ready_key =
+ grpc_mdstr_from_string(GRPC_METHOD_CONFIG_WAIT_FOR_READY);
+ method_config->timeout_key =
+ grpc_mdstr_from_string(GRPC_METHOD_CONFIG_TIMEOUT);
+ method_config->max_request_message_bytes_key =
+ grpc_mdstr_from_string(GRPC_METHOD_CONFIG_MAX_REQUEST_MESSAGE_BYTES);
+ method_config->max_response_message_bytes_key =
+ grpc_mdstr_from_string(GRPC_METHOD_CONFIG_MAX_RESPONSE_MESSAGE_BYTES);
+ grpc_mdstr_hash_table_entry entries[4];
+ size_t num_entries = 0;
+ if (wait_for_ready != NULL) {
+ entries[num_entries].key = method_config->wait_for_ready_key;
+ entries[num_entries].value = wait_for_ready;
+ entries[num_entries].vtable = &bool_vtable;
+ ++num_entries;
+ }
+ if (timeout != NULL) {
+ entries[num_entries].key = method_config->timeout_key;
+ entries[num_entries].value = timeout;
+ entries[num_entries].vtable = &timespec_vtable;
+ ++num_entries;
+ }
+ if (max_request_message_bytes != NULL) {
+ entries[num_entries].key = method_config->max_request_message_bytes_key;
+ entries[num_entries].value = max_request_message_bytes;
+ entries[num_entries].vtable = &int32_vtable;
+ ++num_entries;
+ }
+ if (max_response_message_bytes != NULL) {
+ entries[num_entries].key = method_config->max_response_message_bytes_key;
+ entries[num_entries].value = max_response_message_bytes;
+ entries[num_entries].vtable = &int32_vtable;
+ ++num_entries;
+ }
+ method_config->table = grpc_mdstr_hash_table_create(num_entries, entries);
+ return method_config;
+}
+
+grpc_method_config* grpc_method_config_ref(grpc_method_config* method_config) {
+ grpc_mdstr_hash_table_ref(method_config->table);
+ return method_config;
+}
+
+void grpc_method_config_unref(grpc_method_config* method_config) {
+ if (grpc_mdstr_hash_table_unref(method_config->table)) {
+ GRPC_MDSTR_UNREF(method_config->wait_for_ready_key);
+ GRPC_MDSTR_UNREF(method_config->timeout_key);
+ GRPC_MDSTR_UNREF(method_config->max_request_message_bytes_key);
+ GRPC_MDSTR_UNREF(method_config->max_response_message_bytes_key);
+ gpr_free(method_config);
+ }
+}
+
+int grpc_method_config_cmp(const grpc_method_config* method_config1,
+ const grpc_method_config* method_config2) {
+ return grpc_mdstr_hash_table_cmp(method_config1->table,
+ method_config2->table);
+}
+
+const bool* grpc_method_config_get_wait_for_ready(
+ const grpc_method_config* method_config) {
+ return grpc_mdstr_hash_table_get(method_config->table,
+ method_config->wait_for_ready_key);
+}
+
+const gpr_timespec* grpc_method_config_get_timeout(
+ const grpc_method_config* method_config) {
+ return grpc_mdstr_hash_table_get(method_config->table,
+ method_config->timeout_key);
+}
+
+const int32_t* grpc_method_config_get_max_request_message_bytes(
+ const grpc_method_config* method_config) {
+ return grpc_mdstr_hash_table_get(
+ method_config->table, method_config->max_request_message_bytes_key);
+}
+
+const int32_t* grpc_method_config_get_max_response_message_bytes(
+ const grpc_method_config* method_config) {
+ return grpc_mdstr_hash_table_get(
+ method_config->table, method_config->max_response_message_bytes_key);
+}
+
+//
+// grpc_method_config_table
+//
+
+static void method_config_unref(void* valuep) {
+ grpc_method_config_unref(valuep);
+}
+
+static void* method_config_ref(void* valuep) {
+ return grpc_method_config_ref(valuep);
+}
+
+static int method_config_cmp(void* valuep1, void* valuep2) {
+ return grpc_method_config_cmp(valuep1, valuep2);
+}
+
+static const grpc_mdstr_hash_table_vtable method_config_table_vtable = {
+ method_config_unref, method_config_ref, method_config_cmp};
+
+grpc_method_config_table* grpc_method_config_table_create(
+ size_t num_entries, grpc_method_config_table_entry* entries) {
+ grpc_mdstr_hash_table_entry* hash_table_entries =
+ gpr_malloc(sizeof(grpc_mdstr_hash_table_entry) * num_entries);
+ for (size_t i = 0; i < num_entries; ++i) {
+ hash_table_entries[i].key = entries[i].method_name;
+ hash_table_entries[i].value = entries[i].method_config;
+ hash_table_entries[i].vtable = &method_config_table_vtable;
+ }
+ grpc_method_config_table* method_config_table =
+ grpc_mdstr_hash_table_create(num_entries, hash_table_entries);
+ gpr_free(hash_table_entries);
+ return method_config_table;
+}
+
+grpc_method_config_table* grpc_method_config_table_ref(
+ grpc_method_config_table* table) {
+ return grpc_mdstr_hash_table_ref(table);
+}
+
+void grpc_method_config_table_unref(grpc_method_config_table* table) {
+ grpc_mdstr_hash_table_unref(table);
+}
+
+int grpc_method_config_table_cmp(const grpc_method_config_table* table1,
+ const grpc_method_config_table* table2) {
+ return grpc_mdstr_hash_table_cmp(table1, table2);
+}
+
+void* grpc_method_config_table_get(const grpc_mdstr_hash_table* table,
+ const grpc_mdstr* path) {
+ void* value = grpc_mdstr_hash_table_get(table, path);
+ // If we didn't find a match for the path, try looking for a wildcard
+ // entry (i.e., change "/service/method" to "/service/*").
+ if (value == NULL) {
+ const char* path_str = grpc_mdstr_as_c_string(path);
+ const char* sep = strrchr(path_str, '/') + 1;
+ const size_t len = (size_t)(sep - path_str);
+ char* buf = gpr_malloc(len + 2); // '*' and NUL
+ memcpy(buf, path_str, len);
+ buf[len] = '*';
+ buf[len + 1] = '\0';
+ grpc_mdstr* wildcard_path = grpc_mdstr_from_string(buf);
+ gpr_free(buf);
+ value = grpc_mdstr_hash_table_get(table, wildcard_path);
+ GRPC_MDSTR_UNREF(wildcard_path);
+ }
+ return value;
+}
+
+static void* copy_arg(void* p) { return grpc_method_config_table_ref(p); }
+
+static void destroy_arg(void* p) { grpc_method_config_table_unref(p); }
+
+static int cmp_arg(void* p1, void* p2) {
+ return grpc_method_config_table_cmp(p1, p2);
+}
+
+static grpc_arg_pointer_vtable arg_vtable = {copy_arg, destroy_arg, cmp_arg};
+
+grpc_arg grpc_method_config_table_create_channel_arg(
+ grpc_method_config_table* table) {
+ grpc_arg arg;
+ arg.type = GRPC_ARG_POINTER;
+ arg.key = GRPC_ARG_SERVICE_CONFIG;
+ arg.value.pointer.p = table;
+ arg.value.pointer.vtable = &arg_vtable;
+ return arg;
+}
+
+// State used by convert_entry() below.
+typedef struct conversion_state {
+ void* (*convert_value)(const grpc_method_config* method_config);
+ const grpc_mdstr_hash_table_vtable* vtable;
+ size_t num_entries;
+ grpc_mdstr_hash_table_entry* entries;
+} conversion_state;
+
+// A function to be passed to grpc_mdstr_hash_table_iterate() to create
+// a copy of the entries.
+static void convert_entry(const grpc_mdstr_hash_table_entry* entry,
+ void* user_data) {
+ conversion_state* state = user_data;
+ state->entries[state->num_entries].key = GRPC_MDSTR_REF(entry->key);
+ state->entries[state->num_entries].value = state->convert_value(entry->value);
+ state->entries[state->num_entries].vtable = state->vtable;
+ ++state->num_entries;
+}
+
+grpc_mdstr_hash_table* grpc_method_config_table_convert(
+ const grpc_method_config_table* table,
+ void* (*convert_value)(const grpc_method_config* method_config),
+ const grpc_mdstr_hash_table_vtable* vtable) {
+ // Create an array of the entries in the table with converted values.
+ conversion_state state;
+ state.convert_value = convert_value;
+ state.vtable = vtable;
+ state.num_entries = 0;
+ state.entries = gpr_malloc(sizeof(grpc_mdstr_hash_table_entry) *
+ grpc_mdstr_hash_table_num_entries(table));
+ grpc_mdstr_hash_table_iterate(table, convert_entry, &state);
+ // Create a new table based on the array we just constructed.
+ grpc_mdstr_hash_table* new_table =
+ grpc_mdstr_hash_table_create(state.num_entries, state.entries);
+ // Clean up the array.
+ for (size_t i = 0; i < state.num_entries; ++i) {
+ GRPC_MDSTR_UNREF(state.entries[i].key);
+ vtable->destroy_value(state.entries[i].value);
+ }
+ gpr_free(state.entries);
+ // Return the new table.
+ return new_table;
+}
diff --git a/src/core/lib/transport/method_config.h b/src/core/lib/transport/method_config.h
new file mode 100644
index 0000000000..58fedd9436
--- /dev/null
+++ b/src/core/lib/transport/method_config.h
@@ -0,0 +1,136 @@
+//
+// Copyright 2016, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef GRPC_CORE_LIB_TRANSPORT_METHOD_CONFIG_H
+#define GRPC_CORE_LIB_TRANSPORT_METHOD_CONFIG_H
+
+#include <stdbool.h>
+
+#include <grpc/impl/codegen/gpr_types.h>
+#include <grpc/impl/codegen/grpc_types.h>
+
+#include "src/core/lib/transport/mdstr_hash_table.h"
+#include "src/core/lib/transport/metadata.h"
+
+/// Per-method configuration.
+typedef struct grpc_method_config grpc_method_config;
+
+/// Creates a grpc_method_config with the specified parameters.
+/// Any parameter may be NULL to indicate that the value is unset.
+///
+/// \a wait_for_ready indicates whether the client should wait until the
+/// request deadline for the channel to become ready, even if there is a
+/// temporary failure before the deadline while attempting to connect.
+///
+/// \a timeout indicates the timeout for calls.
+///
+/// \a max_request_message_bytes and \a max_response_message_bytes
+/// indicate the maximum sizes of the request (checked when sending) and
+/// response (checked when receiving) messages.
+grpc_method_config* grpc_method_config_create(
+ bool* wait_for_ready, gpr_timespec* timeout,
+ int32_t* max_request_message_bytes, int32_t* max_response_message_bytes);
+
+grpc_method_config* grpc_method_config_ref(grpc_method_config* method_config);
+void grpc_method_config_unref(grpc_method_config* method_config);
+
+/// Compares two grpc_method_configs.
+/// The sort order is stable but undefined.
+int grpc_method_config_cmp(const grpc_method_config* method_config1,
+ const grpc_method_config* method_config2);
+
+/// These methods return NULL if the requested field is unset.
+/// The caller does NOT take ownership of the result.
+const bool* grpc_method_config_get_wait_for_ready(
+ const grpc_method_config* method_config);
+const gpr_timespec* grpc_method_config_get_timeout(
+ const grpc_method_config* method_config);
+const int32_t* grpc_method_config_get_max_request_message_bytes(
+ const grpc_method_config* method_config);
+const int32_t* grpc_method_config_get_max_response_message_bytes(
+ const grpc_method_config* method_config);
+
+/// A table of method configs.
+typedef grpc_mdstr_hash_table grpc_method_config_table;
+
+typedef struct grpc_method_config_table_entry {
+ /// The name is of one of the following forms:
+ /// service/method -- specifies exact service and method name
+ /// service/* -- matches all methods for the specified service
+ grpc_mdstr* method_name;
+ grpc_method_config* method_config;
+} grpc_method_config_table_entry;
+
+/// Takes new references to all keys and values in \a entries.
+grpc_method_config_table* grpc_method_config_table_create(
+ size_t num_entries, grpc_method_config_table_entry* entries);
+
+grpc_method_config_table* grpc_method_config_table_ref(
+ grpc_method_config_table* table);
+void grpc_method_config_table_unref(grpc_method_config_table* table);
+
+/// Compares two grpc_method_config_tables.
+/// The sort order is stable but undefined.
+int grpc_method_config_table_cmp(const grpc_method_config_table* table1,
+ const grpc_method_config_table* table2);
+
+/// Gets the method config for the specified \a path, which should be of
+/// the form "/service/method".
+/// Returns NULL if the method has no config.
+/// Caller does NOT own a reference to the result.
+///
+/// Note: This returns a void* instead of a grpc_method_config* so that
+/// it can also be used for tables constructed via
+/// grpc_method_config_table_convert().
+void* grpc_method_config_table_get(const grpc_mdstr_hash_table* table,
+ const grpc_mdstr* path);
+
+/// Returns a channel arg containing \a table.
+grpc_arg grpc_method_config_table_create_channel_arg(
+ grpc_method_config_table* table);
+
+/// Generates a new table from \a table whose values are converted to a
+/// new form via the \a convert_value function. The new table will use
+/// \a vtable for its values.
+///
+/// This is generally used to convert the table's value type from
+/// grpc_method_config to a simple struct containing only the parameters
+/// relevant to a particular filter, thus avoiding the need for a hash
+/// table lookup on the fast path. In that scenario, \a convert_value
+/// will return a new instance of the struct containing the values from
+/// the grpc_method_config, and \a vtable provides the methods for
+/// operating on the struct type.
+grpc_mdstr_hash_table* grpc_method_config_table_convert(
+ const grpc_method_config_table* table,
+ void* (*convert_value)(const grpc_method_config* method_config),
+ const grpc_mdstr_hash_table_vtable* vtable);
+
+#endif /* GRPC_CORE_LIB_TRANSPORT_METHOD_CONFIG_H */
diff --git a/src/core/lib/transport/static_metadata.c b/src/core/lib/transport/static_metadata.c
index 5e0352a467..8b22592b45 100644
--- a/src/core/lib/transport/static_metadata.c
+++ b/src/core/lib/transport/static_metadata.c
@@ -126,9 +126,9 @@ const char *const grpc_static_metadata_strings[GRPC_STATIC_MDSTR_COUNT] = {
"if-range",
"if-unmodified-since",
"last-modified",
+ "lb-cost-bin",
+ "lb-token",
"link",
- "load-reporting-initial",
- "load-reporting-trailing",
"location",
"max-forwards",
":method",
diff --git a/src/core/lib/transport/static_metadata.h b/src/core/lib/transport/static_metadata.h
index 5b9ee1a60a..28ad6f2961 100644
--- a/src/core/lib/transport/static_metadata.h
+++ b/src/core/lib/transport/static_metadata.h
@@ -175,12 +175,12 @@ extern grpc_mdstr grpc_static_mdstr_table[GRPC_STATIC_MDSTR_COUNT];
#define GRPC_MDSTR_IF_UNMODIFIED_SINCE (&grpc_static_mdstr_table[62])
/* "last-modified" */
#define GRPC_MDSTR_LAST_MODIFIED (&grpc_static_mdstr_table[63])
+/* "lb-cost-bin" */
+#define GRPC_MDSTR_LB_COST_BIN (&grpc_static_mdstr_table[64])
+/* "lb-token" */
+#define GRPC_MDSTR_LB_TOKEN (&grpc_static_mdstr_table[65])
/* "link" */
-#define GRPC_MDSTR_LINK (&grpc_static_mdstr_table[64])
-/* "load-reporting-initial" */
-#define GRPC_MDSTR_LOAD_REPORTING_INITIAL (&grpc_static_mdstr_table[65])
-/* "load-reporting-trailing" */
-#define GRPC_MDSTR_LOAD_REPORTING_TRAILING (&grpc_static_mdstr_table[66])
+#define GRPC_MDSTR_LINK (&grpc_static_mdstr_table[66])
/* "location" */
#define GRPC_MDSTR_LOCATION (&grpc_static_mdstr_table[67])
/* "max-forwards" */
@@ -337,13 +337,12 @@ extern uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT];
#define GRPC_MDELEM_IF_UNMODIFIED_SINCE_EMPTY (&grpc_static_mdelem_table[44])
/* "last-modified": "" */
#define GRPC_MDELEM_LAST_MODIFIED_EMPTY (&grpc_static_mdelem_table[45])
+/* "lb-cost-bin": "" */
+#define GRPC_MDELEM_LB_COST_BIN_EMPTY (&grpc_static_mdelem_table[46])
+/* "lb-token": "" */
+#define GRPC_MDELEM_LB_TOKEN_EMPTY (&grpc_static_mdelem_table[47])
/* "link": "" */
-#define GRPC_MDELEM_LINK_EMPTY (&grpc_static_mdelem_table[46])
-/* "load-reporting-initial": "" */
-#define GRPC_MDELEM_LOAD_REPORTING_INITIAL_EMPTY (&grpc_static_mdelem_table[47])
-/* "load-reporting-trailing": "" */
-#define GRPC_MDELEM_LOAD_REPORTING_TRAILING_EMPTY \
- (&grpc_static_mdelem_table[48])
+#define GRPC_MDELEM_LINK_EMPTY (&grpc_static_mdelem_table[48])
/* "location": "" */
#define GRPC_MDELEM_LOCATION_EMPTY (&grpc_static_mdelem_table[49])
/* "max-forwards": "" */
diff --git a/src/core/lib/transport/transport.c b/src/core/lib/transport/transport.c
index 82fc605218..75aec7a5b4 100644
--- a/src/core/lib/transport/transport.c
+++ b/src/core/lib/transport/transport.c
@@ -46,8 +46,9 @@
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
void grpc_stream_ref(grpc_stream_refcount *refcount, const char *reason) {
gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count);
- gpr_log(GPR_DEBUG, "%s %p:%p REF %d->%d %s", refcount->object_type,
- refcount, refcount->destroy.cb_arg, (int)val, (int)val + 1, reason);
+ gpr_log(GPR_DEBUG, "%s %p:%p REF %" PRIdPTR "->%" PRIdPTR " %s",
+ refcount->object_type, refcount, refcount->destroy.cb_arg, val,
+ val + 1, reason);
#else
void grpc_stream_ref(grpc_stream_refcount *refcount) {
#endif
@@ -58,8 +59,9 @@ void grpc_stream_ref(grpc_stream_refcount *refcount) {
void grpc_stream_unref(grpc_exec_ctx *exec_ctx, grpc_stream_refcount *refcount,
const char *reason) {
gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count);
- gpr_log(GPR_DEBUG, "%s %p:%p UNREF %d->%d %s", refcount->object_type,
- refcount, refcount->destroy.cb_arg, (int)val, (int)val - 1, reason);
+ gpr_log(GPR_DEBUG, "%s %p:%p UNREF %" PRIdPTR "->%" PRIdPTR " %s",
+ refcount->object_type, refcount, refcount->destroy.cb_arg, val,
+ val - 1, reason);
#else
void grpc_stream_unref(grpc_exec_ctx *exec_ctx,
grpc_stream_refcount *refcount) {
@@ -274,3 +276,28 @@ grpc_transport_op *grpc_make_transport_op(grpc_closure *on_complete) {
op->op.on_consumed = &op->outer_on_complete;
return &op->op;
}
+
+typedef struct {
+ grpc_closure outer_on_complete;
+ grpc_closure *inner_on_complete;
+ grpc_transport_stream_op op;
+} made_transport_stream_op;
+
+static void destroy_made_transport_stream_op(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ made_transport_stream_op *op = arg;
+ grpc_exec_ctx_sched(exec_ctx, op->inner_on_complete, GRPC_ERROR_REF(error),
+ NULL);
+ gpr_free(op);
+}
+
+grpc_transport_stream_op *grpc_make_transport_stream_op(
+ grpc_closure *on_complete) {
+ made_transport_stream_op *op = gpr_malloc(sizeof(*op));
+ grpc_closure_init(&op->outer_on_complete, destroy_made_transport_stream_op,
+ op);
+ op->inner_on_complete = on_complete;
+ memset(&op->op, 0, sizeof(op->op));
+ op->op.on_complete = &op->outer_on_complete;
+ return &op->op;
+}
diff --git a/src/core/lib/transport/transport.h b/src/core/lib/transport/transport.h
index 8dc393fd61..50253ebad1 100644
--- a/src/core/lib/transport/transport.h
+++ b/src/core/lib/transport/transport.h
@@ -113,6 +113,10 @@ typedef struct grpc_transport_stream_op {
have been completed. */
grpc_closure *on_complete;
+ /** Is the completion of this op covered by a poller (if false: the op should
+ complete independently of some pollset being polled) */
+ bool covered_by_poller;
+
/** Send initial metadata to the peer, from the provided metadata batch.
idempotent_request MUST be set if this is non-null */
grpc_metadata_batch *send_initial_metadata;
@@ -252,6 +256,7 @@ void grpc_transport_stream_op_add_close(grpc_transport_stream_op *op,
gpr_slice *optional_message);
char *grpc_transport_stream_op_string(grpc_transport_stream_op *op);
+char *grpc_transport_op_string(grpc_transport_op *op);
/* Send a batch of operations on a transport
@@ -293,6 +298,10 @@ char *grpc_transport_get_peer(grpc_exec_ctx *exec_ctx,
/* Allocate a grpc_transport_op, and preconfigure the on_consumed closure to
\a on_consumed and then delete the returned transport op */
grpc_transport_op *grpc_make_transport_op(grpc_closure *on_consumed);
+/* Allocate a grpc_transport_stream_op, and preconfigure the on_consumed closure
+ to \a on_consumed and then delete the returned transport op */
+grpc_transport_stream_op *grpc_make_transport_stream_op(
+ grpc_closure *on_consumed);
#ifdef __cplusplus
}
diff --git a/src/core/lib/transport/transport_op_string.c b/src/core/lib/transport/transport_op_string.c
index 138591db2a..533ec52077 100644
--- a/src/core/lib/transport/transport_op_string.c
+++ b/src/core/lib/transport/transport_op_string.c
@@ -41,6 +41,7 @@
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
#include "src/core/lib/support/string.h"
+#include "src/core/lib/transport/connectivity_state.h"
/* These routines are here to facilitate debugging - they produce string
representations of various transport data structures */
@@ -72,56 +73,51 @@ static void put_metadata_list(gpr_strvec *b, grpc_metadata_batch md) {
char *grpc_transport_stream_op_string(grpc_transport_stream_op *op) {
char *tmp;
char *out;
- int first = 1;
gpr_strvec b;
gpr_strvec_init(&b);
+ gpr_strvec_add(
+ &b, gpr_strdup(op->covered_by_poller ? "[COVERED]" : "[UNCOVERED]"));
+
if (op->send_initial_metadata != NULL) {
- if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
- first = 0;
+ gpr_strvec_add(&b, gpr_strdup(" "));
gpr_strvec_add(&b, gpr_strdup("SEND_INITIAL_METADATA{"));
put_metadata_list(&b, *op->send_initial_metadata);
gpr_strvec_add(&b, gpr_strdup("}"));
}
if (op->send_message != NULL) {
- if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
- first = 0;
+ gpr_strvec_add(&b, gpr_strdup(" "));
gpr_asprintf(&tmp, "SEND_MESSAGE:flags=0x%08x:len=%d",
op->send_message->flags, op->send_message->length);
gpr_strvec_add(&b, tmp);
}
if (op->send_trailing_metadata != NULL) {
- if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
- first = 0;
+ gpr_strvec_add(&b, gpr_strdup(" "));
gpr_strvec_add(&b, gpr_strdup("SEND_TRAILING_METADATA{"));
put_metadata_list(&b, *op->send_trailing_metadata);
gpr_strvec_add(&b, gpr_strdup("}"));
}
if (op->recv_initial_metadata != NULL) {
- if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
- first = 0;
+ gpr_strvec_add(&b, gpr_strdup(" "));
gpr_strvec_add(&b, gpr_strdup("RECV_INITIAL_METADATA"));
}
if (op->recv_message != NULL) {
- if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
- first = 0;
+ gpr_strvec_add(&b, gpr_strdup(" "));
gpr_strvec_add(&b, gpr_strdup("RECV_MESSAGE"));
}
if (op->recv_trailing_metadata != NULL) {
- if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
- first = 0;
+ gpr_strvec_add(&b, gpr_strdup(" "));
gpr_strvec_add(&b, gpr_strdup("RECV_TRAILING_METADATA"));
}
if (op->cancel_error != GRPC_ERROR_NONE) {
- if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
- first = 0;
+ gpr_strvec_add(&b, gpr_strdup(" "));
const char *msg = grpc_error_string(op->cancel_error);
gpr_asprintf(&tmp, "CANCEL:%s", msg);
grpc_error_free_string(msg);
@@ -129,8 +125,7 @@ char *grpc_transport_stream_op_string(grpc_transport_stream_op *op) {
}
if (op->close_error != GRPC_ERROR_NONE) {
- if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
- first = 0;
+ gpr_strvec_add(&b, gpr_strdup(" "));
const char *msg = grpc_error_string(op->close_error);
gpr_asprintf(&tmp, "CLOSE:%s", msg);
grpc_error_free_string(msg);
@@ -143,6 +138,82 @@ char *grpc_transport_stream_op_string(grpc_transport_stream_op *op) {
return out;
}
+char *grpc_transport_op_string(grpc_transport_op *op) {
+ char *tmp;
+ char *out;
+ bool first = true;
+
+ gpr_strvec b;
+ gpr_strvec_init(&b);
+
+ if (op->on_connectivity_state_change != NULL) {
+ if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
+ first = false;
+ if (op->connectivity_state != NULL) {
+ gpr_asprintf(&tmp, "ON_CONNECTIVITY_STATE_CHANGE:p=%p:from=%s",
+ op->on_connectivity_state_change,
+ grpc_connectivity_state_name(*op->connectivity_state));
+ gpr_strvec_add(&b, tmp);
+ } else {
+ gpr_asprintf(&tmp, "ON_CONNECTIVITY_STATE_CHANGE:p=%p:unsubscribe",
+ op->on_connectivity_state_change);
+ gpr_strvec_add(&b, tmp);
+ }
+ }
+
+ if (op->disconnect_with_error != GRPC_ERROR_NONE) {
+ if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
+ first = false;
+ const char *err = grpc_error_string(op->disconnect_with_error);
+ gpr_asprintf(&tmp, "DISCONNECT:%s", err);
+ gpr_strvec_add(&b, tmp);
+ grpc_error_free_string(err);
+ }
+
+ if (op->send_goaway) {
+ if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
+ first = false;
+ char *msg = op->goaway_message == NULL
+ ? "null"
+ : gpr_dump_slice(*op->goaway_message,
+ GPR_DUMP_ASCII | GPR_DUMP_HEX);
+ gpr_asprintf(&tmp, "SEND_GOAWAY:status=%d:msg=%s", op->goaway_status, msg);
+ if (op->goaway_message != NULL) gpr_free(msg);
+ gpr_strvec_add(&b, tmp);
+ }
+
+ if (op->set_accept_stream) {
+ if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
+ first = false;
+ gpr_asprintf(&tmp, "SET_ACCEPT_STREAM:%p(%p,...)", op->set_accept_stream_fn,
+ op->set_accept_stream_user_data);
+ gpr_strvec_add(&b, tmp);
+ }
+
+ if (op->bind_pollset != NULL) {
+ if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
+ first = false;
+ gpr_strvec_add(&b, gpr_strdup("BIND_POLLSET"));
+ }
+
+ if (op->bind_pollset_set != NULL) {
+ if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
+ first = false;
+ gpr_strvec_add(&b, gpr_strdup("BIND_POLLSET_SET"));
+ }
+
+ if (op->send_ping != NULL) {
+ if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
+ first = false;
+ gpr_strvec_add(&b, gpr_strdup("SEND_PING"));
+ }
+
+ out = gpr_strvec_flatten(&b, NULL);
+ gpr_strvec_destroy(&b);
+
+ return out;
+}
+
void grpc_call_log_op(char *file, int line, gpr_log_severity severity,
grpc_call_element *elem, grpc_transport_stream_op *op) {
char *str = grpc_transport_stream_op_string(op);
diff --git a/src/core/lib/tsi/ssl_transport_security.c b/src/core/lib/tsi/ssl_transport_security.c
index e91c6316e7..366dca9507 100644
--- a/src/core/lib/tsi/ssl_transport_security.c
+++ b/src/core/lib/tsi/ssl_transport_security.c
@@ -39,7 +39,9 @@
#include <string.h>
/* TODO(jboeuf): refactor inet_ntop into a portability header. */
-#ifdef GPR_WINSOCK_SOCKET
+/* Note: for whomever reads this and tries to refactor this, this
+ can't be in grpc, it has to be in gpr. */
+#ifdef GPR_WINDOWS
#include <ws2tcpip.h>
#else
#include <arpa/inet.h>
diff --git a/src/core/plugin_registry/grpc_cronet_plugin_registry.c b/src/core/plugin_registry/grpc_cronet_plugin_registry.c
index d0b5f5c702..d339ed327f 100644
--- a/src/core/plugin_registry/grpc_cronet_plugin_registry.c
+++ b/src/core/plugin_registry/grpc_cronet_plugin_registry.c
@@ -35,12 +35,12 @@
extern void grpc_chttp2_plugin_init(void);
extern void grpc_chttp2_plugin_shutdown(void);
-extern void grpc_client_config_init(void);
-extern void grpc_client_config_shutdown(void);
+extern void grpc_client_channel_init(void);
+extern void grpc_client_channel_shutdown(void);
void grpc_register_built_in_plugins(void) {
grpc_register_plugin(grpc_chttp2_plugin_init,
grpc_chttp2_plugin_shutdown);
- grpc_register_plugin(grpc_client_config_init,
- grpc_client_config_shutdown);
+ grpc_register_plugin(grpc_client_channel_init,
+ grpc_client_channel_shutdown);
}
diff --git a/src/core/plugin_registry/grpc_plugin_registry.c b/src/core/plugin_registry/grpc_plugin_registry.c
index 7a7a9ce477..2efd9cd1ad 100644
--- a/src/core/plugin_registry/grpc_plugin_registry.c
+++ b/src/core/plugin_registry/grpc_plugin_registry.c
@@ -35,8 +35,8 @@
extern void grpc_chttp2_plugin_init(void);
extern void grpc_chttp2_plugin_shutdown(void);
-extern void grpc_client_config_init(void);
-extern void grpc_client_config_shutdown(void);
+extern void grpc_client_channel_init(void);
+extern void grpc_client_channel_shutdown(void);
extern void grpc_lb_policy_grpclb_init(void);
extern void grpc_lb_policy_grpclb_shutdown(void);
extern void grpc_lb_policy_pick_first_init(void);
@@ -55,8 +55,8 @@ extern void census_grpc_plugin_shutdown(void);
void grpc_register_built_in_plugins(void) {
grpc_register_plugin(grpc_chttp2_plugin_init,
grpc_chttp2_plugin_shutdown);
- grpc_register_plugin(grpc_client_config_init,
- grpc_client_config_shutdown);
+ grpc_register_plugin(grpc_client_channel_init,
+ grpc_client_channel_shutdown);
grpc_register_plugin(grpc_lb_policy_grpclb_init,
grpc_lb_policy_grpclb_shutdown);
grpc_register_plugin(grpc_lb_policy_pick_first_init,
diff --git a/src/core/plugin_registry/grpc_unsecure_plugin_registry.c b/src/core/plugin_registry/grpc_unsecure_plugin_registry.c
index ad4ddf0ff4..8b18af699d 100644
--- a/src/core/plugin_registry/grpc_unsecure_plugin_registry.c
+++ b/src/core/plugin_registry/grpc_unsecure_plugin_registry.c
@@ -35,8 +35,8 @@
extern void grpc_chttp2_plugin_init(void);
extern void grpc_chttp2_plugin_shutdown(void);
-extern void grpc_client_config_init(void);
-extern void grpc_client_config_shutdown(void);
+extern void grpc_client_channel_init(void);
+extern void grpc_client_channel_shutdown(void);
extern void grpc_resolver_dns_native_init(void);
extern void grpc_resolver_dns_native_shutdown(void);
extern void grpc_resolver_sockaddr_init(void);
@@ -55,8 +55,8 @@ extern void census_grpc_plugin_shutdown(void);
void grpc_register_built_in_plugins(void) {
grpc_register_plugin(grpc_chttp2_plugin_init,
grpc_chttp2_plugin_shutdown);
- grpc_register_plugin(grpc_client_config_init,
- grpc_client_config_shutdown);
+ grpc_register_plugin(grpc_client_channel_init,
+ grpc_client_channel_shutdown);
grpc_register_plugin(grpc_resolver_dns_native_init,
grpc_resolver_dns_native_shutdown);
grpc_register_plugin(grpc_resolver_sockaddr_init,