aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
authorGravatar Yash Tibrewal <yashkt@google.com>2018-03-14 18:41:33 -0700
committerGravatar Yash Tibrewal <yashkt@google.com>2018-03-14 18:41:33 -0700
commit44a1588b5488bd3f57dfc959af3aa7cc02480e41 (patch)
treec7ad5ded604de4cbbf98fe9e54b426e2bd402413 /src
parent0dee3d93c91bb27df25ad418c8bd0029a27360d1 (diff)
parent7e24da469c2a931ee988d35bf99b9e47e8fa5940 (diff)
Merge master into yashykt:socklent
Diffstat (limited to 'src')
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc15
-rw-r--r--src/core/ext/filters/client_channel/parse_address.cc26
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc16
-rw-r--r--src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc1
-rw-r--r--src/core/lib/gpr/cpu_linux.cc4
-rw-r--r--src/core/lib/iomgr/endpoint.cc2
-rw-r--r--src/core/lib/iomgr/endpoint_pair_windows.cc9
-rw-r--r--src/core/lib/iomgr/ev_epollex_linux.cc20
-rw-r--r--src/core/lib/iomgr/ev_posix.cc52
-rw-r--r--src/core/lib/iomgr/exec_ctx.h28
-rw-r--r--src/core/lib/iomgr/iomgr.cc1
-rw-r--r--src/core/lib/iomgr/iomgr_custom.cc63
-rw-r--r--src/core/lib/iomgr/iomgr_custom.h (renamed from src/core/lib/iomgr/iomgr_uv.h)24
-rw-r--r--src/core/lib/iomgr/iomgr_internal.cc43
-rw-r--r--src/core/lib/iomgr/iomgr_internal.h12
-rw-r--r--src/core/lib/iomgr/iomgr_posix.cc31
-rw-r--r--src/core/lib/iomgr/iomgr_uv.cc35
-rw-r--r--src/core/lib/iomgr/iomgr_windows.cc30
-rw-r--r--src/core/lib/iomgr/pollset.cc56
-rw-r--r--src/core/lib/iomgr/pollset.h18
-rw-r--r--src/core/lib/iomgr/pollset_custom.cc106
-rw-r--r--src/core/lib/iomgr/pollset_custom.h35
-rw-r--r--src/core/lib/iomgr/pollset_set.cc (renamed from src/core/lib/iomgr/pollset_set_uv.cc)36
-rw-r--r--src/core/lib/iomgr/pollset_set.h11
-rw-r--r--src/core/lib/iomgr/pollset_set_custom.cc48
-rw-r--r--src/core/lib/iomgr/pollset_set_custom.h (renamed from src/core/lib/iomgr/timer_uv.h)18
-rw-r--r--src/core/lib/iomgr/pollset_set_windows.cc25
-rw-r--r--src/core/lib/iomgr/pollset_uv.cc145
-rw-r--r--src/core/lib/iomgr/pollset_uv.h9
-rw-r--r--src/core/lib/iomgr/pollset_windows.cc28
-rw-r--r--src/core/lib/iomgr/port.h27
-rw-r--r--src/core/lib/iomgr/resolve_address.cc50
-rw-r--r--src/core/lib/iomgr/resolve_address.h33
-rw-r--r--src/core/lib/iomgr/resolve_address_custom.cc187
-rw-r--r--src/core/lib/iomgr/resolve_address_custom.h43
-rw-r--r--src/core/lib/iomgr/resolve_address_posix.cc28
-rw-r--r--src/core/lib/iomgr/resolve_address_uv.cc286
-rw-r--r--src/core/lib/iomgr/resolve_address_windows.cc28
-rw-r--r--src/core/lib/iomgr/resource_quota.h4
-rw-r--r--src/core/lib/iomgr/sockaddr.h14
-rw-r--r--src/core/lib/iomgr/sockaddr_custom.h54
-rw-r--r--src/core/lib/iomgr/sockaddr_posix.h24
-rw-r--r--src/core/lib/iomgr/sockaddr_utils.cc157
-rw-r--r--src/core/lib/iomgr/sockaddr_utils.h2
-rw-r--r--src/core/lib/iomgr/sockaddr_windows.h19
-rw-r--r--src/core/lib/iomgr/socket_utils.h9
-rw-r--r--src/core/lib/iomgr/socket_utils_common_posix.cc18
-rw-r--r--src/core/lib/iomgr/socket_utils_linux.cc3
-rw-r--r--src/core/lib/iomgr/socket_utils_posix.cc2
-rw-r--r--src/core/lib/iomgr/socket_utils_uv.cc17
-rw-r--r--src/core/lib/iomgr/socket_utils_windows.cc8
-rw-r--r--src/core/lib/iomgr/tcp_client.cc36
-rw-r--r--src/core/lib/iomgr/tcp_client.h11
-rw-r--r--src/core/lib/iomgr/tcp_client_custom.cc151
-rw-r--r--src/core/lib/iomgr/tcp_client_posix.cc30
-rw-r--r--src/core/lib/iomgr/tcp_client_uv.cc177
-rw-r--r--src/core/lib/iomgr/tcp_client_windows.cc31
-rw-r--r--src/core/lib/iomgr/tcp_custom.cc365
-rw-r--r--src/core/lib/iomgr/tcp_custom.h83
-rw-r--r--src/core/lib/iomgr/tcp_posix.cc2
-rw-r--r--src/core/lib/iomgr/tcp_server.cc73
-rw-r--r--src/core/lib/iomgr/tcp_server.h22
-rw-r--r--src/core/lib/iomgr/tcp_server_custom.cc479
-rw-r--r--src/core/lib/iomgr/tcp_server_posix.cc49
-rw-r--r--src/core/lib/iomgr/tcp_server_utils_posix_common.cc5
-rw-r--r--src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc10
-rw-r--r--src/core/lib/iomgr/tcp_server_uv.cc473
-rw-r--r--src/core/lib/iomgr/tcp_server_windows.cc62
-rw-r--r--src/core/lib/iomgr/tcp_uv.cc627
-rw-r--r--src/core/lib/iomgr/tcp_uv.h53
-rw-r--r--src/core/lib/iomgr/tcp_windows.cc2
-rw-r--r--src/core/lib/iomgr/timer.cc45
-rw-r--r--src/core/lib/iomgr/timer.h48
-rw-r--r--src/core/lib/iomgr/timer_custom.cc93
-rw-r--r--src/core/lib/iomgr/timer_custom.h43
-rw-r--r--src/core/lib/iomgr/timer_generic.cc20
-rw-r--r--src/core/lib/iomgr/timer_heap.cc4
-rw-r--r--src/core/lib/iomgr/timer_uv.cc62
-rw-r--r--src/core/lib/iomgr/udp_server.cc15
-rw-r--r--src/core/lib/iomgr/unix_sockets_posix.cc12
-rw-r--r--src/core/lib/surface/call.cc34
-rwxr-xr-xsrc/csharp/build_packages_dotnetcli.bat8
-rwxr-xr-xsrc/csharp/build_packages_dotnetcli.sh6
-rw-r--r--src/php/ext/grpc/call_credentials.c3
-rw-r--r--src/python/grpcio/grpc_core_dependencies.py19
-rw-r--r--src/python/grpcio_tests/tests/tests.json168
-rw-r--r--src/python/grpcio_tests/tests/unit/_compression_test.py6
-rw-r--r--src/python/grpcio_tests/tests/unit/_early_ok_test.py206
-rw-r--r--src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py6
-rw-r--r--src/python/grpcio_tests/tests/unit/_metadata_test.py9
-rw-r--r--src/python/grpcio_tests/tests/unit/_resource_exhausted_test.py9
-rw-r--r--src/python/grpcio_tests/tests/unit/beta/_face_interface_test.py132
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/interfaces/__init__.py13
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/interfaces/face/_3069_test_constant.py21
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/interfaces/face/__init__.py13
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/interfaces/face/_blocking_invocation_inline_service.py287
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/interfaces/face/_digest.py432
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py508
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/interfaces/face/_invocation.py198
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/interfaces/face/_service.py304
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/interfaces/face/_stock_service.py390
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/interfaces/face/test_cases.py53
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/interfaces/face/test_interfaces.py212
-rw-r--r--src/ruby/lib/grpc/core/time_consts.rb2
-rw-r--r--src/ruby/lib/grpc/generic/bidi_call.rb2
-rw-r--r--src/ruby/lib/grpc/generic/client_stub.rb16
-rw-r--r--src/ruby/lib/grpc/generic/interceptors.rb2
-rw-r--r--src/ruby/lib/grpc/generic/rpc_server.rb4
108 files changed, 3556 insertions, 4550 deletions
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
index 101b631945..e805593dd8 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
@@ -61,6 +61,7 @@
#include <grpc/support/port_platform.h>
#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/lib/iomgr/socket_utils.h"
#include <inttypes.h>
#include <limits.h>
@@ -417,20 +418,20 @@ void ParseServer(const grpc_grpclb_server* server,
grpc_resolved_address* addr) {
memset(addr, 0, sizeof(*addr));
if (server->drop) return;
- const uint16_t netorder_port = htons((uint16_t)server->port);
+ const uint16_t netorder_port = grpc_htons((uint16_t)server->port);
/* the addresses are given in binary format (a in(6)_addr struct) in
* server->ip_address.bytes. */
const grpc_grpclb_ip_address* ip = &server->ip_address;
if (ip->size == 4) {
- addr->len = static_cast<socklen_t>(sizeof(struct sockaddr_in));
- struct sockaddr_in* addr4 = (struct sockaddr_in*)&addr->addr;
- addr4->sin_family = AF_INET;
+ addr->len = static_cast<socklen_t>(sizeof(grpc_sockaddr_in));
+ grpc_sockaddr_in* addr4 = reinterpret_cast<grpc_sockaddr_in*>(&addr->addr);
+ addr4->sin_family = GRPC_AF_INET;
memcpy(&addr4->sin_addr, ip->bytes, ip->size);
addr4->sin_port = netorder_port;
} else if (ip->size == 16) {
- addr->len = static_cast<socklen_t>(sizeof(struct sockaddr_in6));
- struct sockaddr_in6* addr6 = (struct sockaddr_in6*)&addr->addr;
- addr6->sin6_family = AF_INET6;
+ addr->len = static_cast<socklen_t>(sizeof(grpc_sockaddr_in6));
+ grpc_sockaddr_in6* addr6 = (grpc_sockaddr_in6*)&addr->addr;
+ addr6->sin6_family = GRPC_AF_INET6;
memcpy(&addr6->sin6_addr, ip->bytes, ip->size);
addr6->sin6_port = netorder_port;
}
diff --git a/src/core/ext/filters/client_channel/parse_address.cc b/src/core/ext/filters/client_channel/parse_address.cc
index 2414227e22..b3900114ad 100644
--- a/src/core/ext/filters/client_channel/parse_address.cc
+++ b/src/core/ext/filters/client_channel/parse_address.cc
@@ -20,6 +20,7 @@
#include "src/core/ext/filters/client_channel/parse_address.h"
#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/lib/iomgr/socket_utils.h"
#include <stdio.h>
#include <string.h>
@@ -71,10 +72,10 @@ bool grpc_parse_ipv4_hostport(const char* hostport, grpc_resolved_address* addr,
if (!gpr_split_host_port(hostport, &host, &port)) return false;
// Parse IP address.
memset(addr, 0, sizeof(*addr));
- addr->len = static_cast<socklen_t>(sizeof(struct sockaddr_in));
- struct sockaddr_in* in = reinterpret_cast<struct sockaddr_in*>(addr->addr);
- in->sin_family = AF_INET;
- if (inet_pton(AF_INET, host, &in->sin_addr) == 0) {
+ addr->len = static_cast<socklen_t>(sizeof(grpc_sockaddr_in));
+ grpc_sockaddr_in* in = reinterpret_cast<grpc_sockaddr_in*>(addr->addr);
+ in->sin_family = GRPC_AF_INET;
+ if (grpc_inet_pton(GRPC_AF_INET, host, &in->sin_addr) == 0) {
if (log_errors) gpr_log(GPR_ERROR, "invalid ipv4 address: '%s'", host);
goto done;
}
@@ -88,7 +89,7 @@ bool grpc_parse_ipv4_hostport(const char* hostport, grpc_resolved_address* addr,
if (log_errors) gpr_log(GPR_ERROR, "invalid ipv4 port: '%s'", port);
goto done;
}
- in->sin_port = htons(static_cast<uint16_t>(port_num));
+ in->sin_port = grpc_htons(static_cast<uint16_t>(port_num));
success = true;
done:
gpr_free(host);
@@ -117,19 +118,20 @@ bool grpc_parse_ipv6_hostport(const char* hostport, grpc_resolved_address* addr,
if (!gpr_split_host_port(hostport, &host, &port)) return false;
// Parse IP address.
memset(addr, 0, sizeof(*addr));
- addr->len = static_cast<socklen_t>(sizeof(struct sockaddr_in6));
- struct sockaddr_in6* in6 = reinterpret_cast<struct sockaddr_in6*>(addr->addr);
- in6->sin6_family = AF_INET6;
+ addr->len = static_cast<socklen_t>(sizeof(grpc_sockaddr_in6));
+ grpc_sockaddr_in6* in6 = reinterpret_cast<grpc_sockaddr_in6*>(addr->addr);
+ in6->sin6_family = GRPC_AF_INET6;
// Handle the RFC6874 syntax for IPv6 zone identifiers.
char* host_end = static_cast<char*>(gpr_memrchr(host, '%', strlen(host)));
if (host_end != nullptr) {
GPR_ASSERT(host_end >= host);
- char host_without_scope[INET6_ADDRSTRLEN];
+ char host_without_scope[GRPC_INET6_ADDRSTRLEN];
size_t host_without_scope_len = static_cast<size_t>(host_end - host);
uint32_t sin6_scope_id = 0;
strncpy(host_without_scope, host, host_without_scope_len);
host_without_scope[host_without_scope_len] = '\0';
- if (inet_pton(AF_INET6, host_without_scope, &in6->sin6_addr) == 0) {
+ if (grpc_inet_pton(GRPC_AF_INET6, host_without_scope, &in6->sin6_addr) ==
+ 0) {
gpr_log(GPR_ERROR, "invalid ipv6 address: '%s'", host_without_scope);
goto done;
}
@@ -142,7 +144,7 @@ bool grpc_parse_ipv6_hostport(const char* hostport, grpc_resolved_address* addr,
// Handle "sin6_scope_id" being type "u_long". See grpc issue #10027.
in6->sin6_scope_id = sin6_scope_id;
} else {
- if (inet_pton(AF_INET6, host, &in6->sin6_addr) == 0) {
+ if (grpc_inet_pton(GRPC_AF_INET6, host, &in6->sin6_addr) == 0) {
gpr_log(GPR_ERROR, "invalid ipv6 address: '%s'", host);
goto done;
}
@@ -157,7 +159,7 @@ bool grpc_parse_ipv6_hostport(const char* hostport, grpc_resolved_address* addr,
if (log_errors) gpr_log(GPR_ERROR, "invalid ipv6 port: '%s'", port);
goto done;
}
- in6->sin6_port = htons(static_cast<uint16_t>(port_num));
+ in6->sin6_port = grpc_htons(static_cast<uint16_t>(port_num));
success = true;
done:
gpr_free(host);
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
index aa93e5d8de..c63de3c509 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
@@ -440,6 +440,19 @@ class AresDnsResolverFactory : public ResolverFactory {
} // namespace grpc_core
+extern grpc_address_resolver_vtable* grpc_resolve_address_impl;
+static grpc_address_resolver_vtable* default_resolver;
+
+static grpc_error* blocking_resolve_address_ares(
+ const char* name, const char* default_port,
+ grpc_resolved_addresses** addresses) {
+ return default_resolver->blocking_resolve_address(name, default_port,
+ addresses);
+}
+
+static grpc_address_resolver_vtable ares_resolver = {
+ grpc_resolve_address_ares, blocking_resolve_address_ares};
+
void grpc_resolver_dns_ares_init() {
char* resolver_env = gpr_getenv("GRPC_DNS_RESOLVER");
/* TODO(zyc): Turn on c-ares based resolver by default after the address
@@ -450,7 +463,8 @@ void grpc_resolver_dns_ares_init() {
GRPC_LOG_IF_ERROR("ares_library_init() failed", error);
return;
}
- grpc_resolve_address = grpc_resolve_address_ares;
+ default_resolver = grpc_resolve_address_impl;
+ grpc_set_resolver_impl(&ares_resolver);
grpc_core::ResolverRegistry::Builder::RegisterResolverFactory(
grpc_core::UniquePtr<grpc_core::ResolverFactory>(
grpc_core::New<grpc_core::AresDnsResolverFactory>()));
diff --git a/src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc b/src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc
index 822236dd2d..99f18cdf39 100644
--- a/src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc
+++ b/src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc
@@ -41,6 +41,5 @@ int grpc_server_add_insecure_http2_port(grpc_server* server, const char* addr) {
GRPC_ERROR_UNREF(err);
}
-
return port_num;
}
diff --git a/src/core/lib/gpr/cpu_linux.cc b/src/core/lib/gpr/cpu_linux.cc
index fda28916f8..9fc2f0b141 100644
--- a/src/core/lib/gpr/cpu_linux.cc
+++ b/src/core/lib/gpr/cpu_linux.cc
@@ -71,6 +71,10 @@ unsigned gpr_cpu_current_cpu(void) {
gpr_log(GPR_ERROR, "Error determining current CPU: %s\n", strerror(errno));
return 0;
}
+ if (static_cast<unsigned>(cpu) >= gpr_cpu_num_cores()) {
+ gpr_log(GPR_ERROR, "Cannot handle hot-plugged CPUs");
+ return 0;
+ }
return static_cast<unsigned>(cpu);
#endif
}
diff --git a/src/core/lib/iomgr/endpoint.cc b/src/core/lib/iomgr/endpoint.cc
index e22c21e4bd..92e7930111 100644
--- a/src/core/lib/iomgr/endpoint.cc
+++ b/src/core/lib/iomgr/endpoint.cc
@@ -20,6 +20,8 @@
#include "src/core/lib/iomgr/endpoint.h"
+grpc_core::TraceFlag grpc_tcp_trace(false, "tcp");
+
void grpc_endpoint_read(grpc_endpoint* ep, grpc_slice_buffer* slices,
grpc_closure* cb) {
ep->vtable->read(ep, slices, cb);
diff --git a/src/core/lib/iomgr/endpoint_pair_windows.cc b/src/core/lib/iomgr/endpoint_pair_windows.cc
index 416c9d88a1..177331d681 100644
--- a/src/core/lib/iomgr/endpoint_pair_windows.cc
+++ b/src/core/lib/iomgr/endpoint_pair_windows.cc
@@ -22,6 +22,7 @@
#ifdef GRPC_WINSOCK_SOCKET
#include "src/core/lib/iomgr/endpoint_pair.h"
+#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include <errno.h>
@@ -46,19 +47,19 @@ static void create_sockets(SOCKET sv[2]) {
memset(&addr, 0, sizeof(addr));
addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
addr.sin_family = AF_INET;
- GPR_ASSERT(bind(lst_sock, (struct sockaddr*)&addr, sizeof(addr)) !=
+ GPR_ASSERT(bind(lst_sock, (grpc_sockaddr*)&addr, sizeof(addr)) !=
SOCKET_ERROR);
GPR_ASSERT(listen(lst_sock, SOMAXCONN) != SOCKET_ERROR);
- GPR_ASSERT(getsockname(lst_sock, (struct sockaddr*)&addr, &addr_len) !=
+ GPR_ASSERT(getsockname(lst_sock, (grpc_sockaddr*)&addr, &addr_len) !=
SOCKET_ERROR);
cli_sock = WSASocket(AF_INET, SOCK_STREAM, IPPROTO_TCP, NULL, 0,
WSA_FLAG_OVERLAPPED);
GPR_ASSERT(cli_sock != INVALID_SOCKET);
- GPR_ASSERT(WSAConnect(cli_sock, (struct sockaddr*)&addr, addr_len, NULL, NULL,
+ GPR_ASSERT(WSAConnect(cli_sock, (grpc_sockaddr*)&addr, addr_len, NULL, NULL,
NULL, NULL) == 0);
- svr_sock = accept(lst_sock, (struct sockaddr*)&addr, &addr_len);
+ svr_sock = accept(lst_sock, (grpc_sockaddr*)&addr, &addr_len);
GPR_ASSERT(svr_sock != INVALID_SOCKET);
closesocket(lst_sock);
diff --git a/src/core/lib/iomgr/ev_epollex_linux.cc b/src/core/lib/iomgr/ev_epollex_linux.cc
index d3cbaf9d0a..0ef7c03056 100644
--- a/src/core/lib/iomgr/ev_epollex_linux.cc
+++ b/src/core/lib/iomgr/ev_epollex_linux.cc
@@ -59,7 +59,7 @@
//#define GRPC_EPOLLEX_CREATE_WORKERS_ON_HEAP 1
#define MAX_EPOLL_EVENTS 100
-#define MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL 16
+#define MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL 1
grpc_core::DebugOnlyTraceFlag grpc_trace_pollable_refcount(false,
"pollable_refcount");
@@ -198,7 +198,6 @@ struct grpc_pollset_worker {
struct grpc_pollset {
gpr_mu mu;
- gpr_atm worker_count;
pollable* active_pollable;
bool kicked_without_poller;
grpc_closure* shutdown_closure;
@@ -686,7 +685,6 @@ static grpc_error* pollset_kick_all(grpc_pollset* pollset) {
static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
gpr_mu_init(&pollset->mu);
- gpr_atm_no_barrier_store(&pollset->worker_count, 0);
pollset->active_pollable = POLLABLE_REF(g_empty_pollable, "pollset");
pollset->kicked_without_poller = false;
pollset->shutdown_closure = nullptr;
@@ -760,20 +758,8 @@ static grpc_error* pollable_process_events(grpc_pollset* pollset,
pollable* pollable_obj, bool drain) {
GPR_TIMER_SCOPE("pollable_process_events", 0);
static const char* err_desc = "pollset_process_events";
- // Use a simple heuristic to determine how many fd events to process
- // per loop iteration. (events/workers)
- int handle_count = 1;
- int worker_count = gpr_atm_no_barrier_load(&pollset->worker_count);
- GPR_ASSERT(worker_count > 0);
- handle_count =
- (pollable_obj->event_count - pollable_obj->event_cursor) / worker_count;
- if (handle_count == 0) {
- handle_count = 1;
- } else if (handle_count > MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL) {
- handle_count = MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL;
- }
grpc_error* error = GRPC_ERROR_NONE;
- for (int i = 0; (drain || i < handle_count) &&
+ for (int i = 0; (drain || i < MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL) &&
pollable_obj->event_cursor != pollable_obj->event_count;
i++) {
int n = pollable_obj->event_cursor++;
@@ -898,7 +884,6 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
GPR_TIMER_SCOPE("begin_worker", 0);
bool do_poll =
(pollset->shutdown_closure == nullptr && !pollset->already_shutdown);
- gpr_atm_no_barrier_fetch_add(&pollset->worker_count, 1);
if (worker_hdl != nullptr) *worker_hdl = worker;
worker->initialized_cv = false;
worker->kicked = false;
@@ -979,7 +964,6 @@ static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
if (worker->initialized_cv) {
gpr_cv_destroy(&worker->cv);
}
- gpr_atm_no_barrier_fetch_add(&pollset->worker_count, -1);
}
#ifndef NDEBUG
diff --git a/src/core/lib/iomgr/ev_posix.cc b/src/core/lib/iomgr/ev_posix.cc
index 39ce459f1e..8b80070265 100644
--- a/src/core/lib/iomgr/ev_posix.cc
+++ b/src/core/lib/iomgr/ev_posix.cc
@@ -224,26 +224,26 @@ void grpc_fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) {
g_event_engine->fd_notify_on_write(fd, closure);
}
-size_t grpc_pollset_size(void) { return g_event_engine->pollset_size; }
+static size_t pollset_size(void) { return g_event_engine->pollset_size; }
-void grpc_pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
+static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
GRPC_POLLING_API_TRACE("pollset_init(%p)", pollset);
g_event_engine->pollset_init(pollset, mu);
}
-void grpc_pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
+static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
GRPC_POLLING_API_TRACE("pollset_shutdown(%p)", pollset);
g_event_engine->pollset_shutdown(pollset, closure);
}
-void grpc_pollset_destroy(grpc_pollset* pollset) {
+static void pollset_destroy(grpc_pollset* pollset) {
GRPC_POLLING_API_TRACE("pollset_destroy(%p)", pollset);
g_event_engine->pollset_destroy(pollset);
}
-grpc_error* grpc_pollset_work(grpc_pollset* pollset,
- grpc_pollset_worker** worker,
- grpc_millis deadline) {
+static grpc_error* pollset_work(grpc_pollset* pollset,
+ grpc_pollset_worker** worker,
+ grpc_millis deadline) {
GRPC_POLLING_API_TRACE("pollset_work(%p, %" PRIdPTR ") begin", pollset,
deadline);
grpc_error* err = g_event_engine->pollset_work(pollset, worker, deadline);
@@ -252,8 +252,8 @@ grpc_error* grpc_pollset_work(grpc_pollset* pollset,
return err;
}
-grpc_error* grpc_pollset_kick(grpc_pollset* pollset,
- grpc_pollset_worker* specific_worker) {
+static grpc_error* pollset_kick(grpc_pollset* pollset,
+ grpc_pollset_worker* specific_worker) {
GRPC_POLLING_API_TRACE("pollset_kick(%p, %p)", pollset, specific_worker);
return g_event_engine->pollset_kick(pollset, specific_worker);
}
@@ -264,43 +264,57 @@ void grpc_pollset_add_fd(grpc_pollset* pollset, struct grpc_fd* fd) {
g_event_engine->pollset_add_fd(pollset, fd);
}
-grpc_pollset_set* grpc_pollset_set_create(void) {
+void pollset_global_init() {}
+void pollset_global_shutdown() {}
+
+grpc_pollset_vtable grpc_posix_pollset_vtable = {
+ pollset_global_init, pollset_global_shutdown,
+ pollset_init, pollset_shutdown,
+ pollset_destroy, pollset_work,
+ pollset_kick, pollset_size};
+
+static grpc_pollset_set* pollset_set_create(void) {
grpc_pollset_set* pss = g_event_engine->pollset_set_create();
GRPC_POLLING_API_TRACE("pollset_set_create(%p)", pss);
return pss;
}
-void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set) {
+static void pollset_set_destroy(grpc_pollset_set* pollset_set) {
GRPC_POLLING_API_TRACE("pollset_set_destroy(%p)", pollset_set);
g_event_engine->pollset_set_destroy(pollset_set);
}
-void grpc_pollset_set_add_pollset(grpc_pollset_set* pollset_set,
- grpc_pollset* pollset) {
+static void pollset_set_add_pollset(grpc_pollset_set* pollset_set,
+ grpc_pollset* pollset) {
GRPC_POLLING_API_TRACE("pollset_set_add_pollset(%p, %p)", pollset_set,
pollset);
g_event_engine->pollset_set_add_pollset(pollset_set, pollset);
}
-void grpc_pollset_set_del_pollset(grpc_pollset_set* pollset_set,
- grpc_pollset* pollset) {
+static void pollset_set_del_pollset(grpc_pollset_set* pollset_set,
+ grpc_pollset* pollset) {
GRPC_POLLING_API_TRACE("pollset_set_del_pollset(%p, %p)", pollset_set,
pollset);
g_event_engine->pollset_set_del_pollset(pollset_set, pollset);
}
-void grpc_pollset_set_add_pollset_set(grpc_pollset_set* bag,
- grpc_pollset_set* item) {
+static void pollset_set_add_pollset_set(grpc_pollset_set* bag,
+ grpc_pollset_set* item) {
GRPC_POLLING_API_TRACE("pollset_set_add_pollset_set(%p, %p)", bag, item);
g_event_engine->pollset_set_add_pollset_set(bag, item);
}
-void grpc_pollset_set_del_pollset_set(grpc_pollset_set* bag,
- grpc_pollset_set* item) {
+static void pollset_set_del_pollset_set(grpc_pollset_set* bag,
+ grpc_pollset_set* item) {
GRPC_POLLING_API_TRACE("pollset_set_del_pollset_set(%p, %p)", bag, item);
g_event_engine->pollset_set_del_pollset_set(bag, item);
}
+grpc_pollset_set_vtable grpc_posix_pollset_set_vtable = {
+ pollset_set_create, pollset_set_destroy,
+ pollset_set_add_pollset, pollset_set_del_pollset,
+ pollset_set_add_pollset_set, pollset_set_del_pollset_set};
+
void grpc_pollset_set_add_fd(grpc_pollset_set* pollset_set, grpc_fd* fd) {
GRPC_POLLING_API_TRACE("pollset_set_add_fd(%p, %d)", pollset_set,
grpc_fd_wrapped_fd(fd));
diff --git a/src/core/lib/iomgr/exec_ctx.h b/src/core/lib/iomgr/exec_ctx.h
index de97164f02..72d0ae58c1 100644
--- a/src/core/lib/iomgr/exec_ctx.h
+++ b/src/core/lib/iomgr/exec_ctx.h
@@ -54,23 +54,32 @@ grpc_millis grpc_timespec_to_millis_round_up(gpr_timespec timespec);
namespace grpc_core {
/** Execution context.
* A bag of data that collects information along a callstack.
- * Generally created at public API entry points, and passed down as
- * pointer to child functions that manipulate it.
+ * It is created on the stack at public API entry points, and stored internally
+ * as a thread-local variable.
+ *
+ * Generally, to create an exec_ctx instance, add the following line at the top
+ * of the public API entry point or at the start of a thread's work function :
+ *
+ * grpc_core::ExecCtx exec_ctx;
+ *
+ * Access the created ExecCtx instance using :
+ * grpc_core::ExecCtx::Get()
*
* Specific responsibilities (this may grow in the future):
* - track a list of work that needs to be delayed until the top of the
* call stack (this provides a convenient mechanism to run callbacks
* without worrying about locking issues)
- * - provide a decision maker (via grpc_exec_ctx_ready_to_finish) that provides
+ * - provide a decision maker (via IsReadyToFinish) that provides a
* signal as to whether a borrowed thread should continue to do work or
* should actively try to finish up and get this thread back to its owner
*
* CONVENTIONS:
* - Instance of this must ALWAYS be constructed on the stack, never
* heap allocated.
- * - Instances and pointers to them must always be called exec_ctx.
- * - Instances are always passed as the first argument to a function that
- * takes it, and always as a pointer (grpc_exec_ctx is never copied).
+ * - Exactly one instance of ExecCtx must be created per thread. Instances must
+ * always be called exec_ctx.
+ * - Do not pass exec_ctx as a parameter to a function. Always access it using
+ * grpc_core::ExecCtx::Get()
*/
class ExecCtx {
public:
@@ -171,6 +180,10 @@ on outside context */
return reinterpret_cast<ExecCtx*>(gpr_tls_get(&exec_ctx_));
}
+ static void Set(ExecCtx* exec_ctx) {
+ gpr_tls_set(&exec_ctx_, reinterpret_cast<intptr_t>(exec_ctx));
+ }
+
protected:
/** Check if ready to finish */
virtual bool CheckReadyToFinish() { return false; }
@@ -180,9 +193,6 @@ on outside context */
private:
/** Set exec_ctx_ to exec_ctx */
- void Set(ExecCtx* exec_ctx) {
- gpr_tls_set(&exec_ctx_, reinterpret_cast<intptr_t>(exec_ctx));
- }
grpc_closure_list closure_list_ = GRPC_CLOSURE_LIST_INIT;
CombinerData combiner_data_ = {nullptr, nullptr};
diff --git a/src/core/lib/iomgr/iomgr.cc b/src/core/lib/iomgr/iomgr.cc
index 3c2b83a549..468814eaee 100644
--- a/src/core/lib/iomgr/iomgr.cc
+++ b/src/core/lib/iomgr/iomgr.cc
@@ -47,6 +47,7 @@ static grpc_iomgr_object g_root_object;
void grpc_iomgr_init() {
grpc_core::ExecCtx exec_ctx;
+ grpc_determine_iomgr_platform();
g_shutdown = 0;
gpr_mu_init(&g_mu);
gpr_cv_init(&g_rcv);
diff --git a/src/core/lib/iomgr/iomgr_custom.cc b/src/core/lib/iomgr/iomgr_custom.cc
new file mode 100644
index 0000000000..d34c8e7cd1
--- /dev/null
+++ b/src/core/lib/iomgr/iomgr_custom.cc
@@ -0,0 +1,63 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/lib/iomgr/port.h"
+
+#include <grpc/support/thd_id.h>
+
+#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/iomgr/executor.h"
+#include "src/core/lib/iomgr/iomgr_custom.h"
+#include "src/core/lib/iomgr/iomgr_internal.h"
+#include "src/core/lib/iomgr/pollset_custom.h"
+#include "src/core/lib/iomgr/pollset_set_custom.h"
+#include "src/core/lib/iomgr/resolve_address_custom.h"
+
+gpr_thd_id g_init_thread;
+
+static void iomgr_platform_init(void) {
+ grpc_core::ExecCtx exec_ctx;
+ grpc_executor_set_threading(false);
+ g_init_thread = gpr_thd_currentid();
+ grpc_pollset_global_init();
+}
+static void iomgr_platform_flush(void) {}
+static void iomgr_platform_shutdown(void) { grpc_pollset_global_shutdown(); }
+
+static grpc_iomgr_platform_vtable vtable = {
+ iomgr_platform_init, iomgr_platform_flush, iomgr_platform_shutdown};
+
+void grpc_custom_iomgr_init(grpc_socket_vtable* socket,
+ grpc_custom_resolver_vtable* resolver,
+ grpc_custom_timer_vtable* timer,
+ grpc_custom_poller_vtable* poller) {
+ grpc_custom_endpoint_init(socket);
+ grpc_custom_timer_init(timer);
+ grpc_custom_pollset_init(poller);
+ grpc_custom_pollset_set_init();
+ grpc_custom_resolver_init(resolver);
+ grpc_set_iomgr_platform_vtable(&vtable);
+}
+
+#ifdef GRPC_CUSTOM_SOCKET
+grpc_iomgr_platform_vtable* grpc_default_iomgr_platform_vtable() {
+ return &vtable;
+}
+#endif
diff --git a/src/core/lib/iomgr/iomgr_uv.h b/src/core/lib/iomgr/iomgr_custom.h
index 4d62f00ad6..ceb6c65db2 100644
--- a/src/core/lib/iomgr/iomgr_uv.h
+++ b/src/core/lib/iomgr/iomgr_custom.h
@@ -16,24 +16,32 @@
*
*/
-#ifndef GRPC_CORE_LIB_IOMGR_IOMGR_UV_H
-#define GRPC_CORE_LIB_IOMGR_IOMGR_UV_H
+#ifndef GRPC_CORE_LIB_IOMGR_IOMGR_CUSTOM_H
+#define GRPC_CORE_LIB_IOMGR_IOMGR_CUSTOM_H
#include <grpc/support/port_platform.h>
-#include "src/core/lib/iomgr/iomgr_internal.h"
+#include "src/core/lib/iomgr/pollset_custom.h"
+#include "src/core/lib/iomgr/resolve_address_custom.h"
+#include "src/core/lib/iomgr/tcp_custom.h"
+#include "src/core/lib/iomgr/timer_custom.h"
#include <grpc/support/thd_id.h>
/* The thread ID of the thread on which grpc was initialized. Used to verify
- * that all calls into libuv are made on that same thread */
+ * that all calls into the custom iomgr are made on that same thread */
extern gpr_thd_id g_init_thread;
-#ifdef GRPC_UV_THREAD_CHECK
-#define GRPC_UV_ASSERT_SAME_THREAD() \
+#ifdef GRPC_CUSTOM_IOMGR_THREAD_CHECK
+#define GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD() \
GPR_ASSERT(gpr_thd_currentid() == g_init_thread)
#else
-#define GRPC_UV_ASSERT_SAME_THREAD()
+#define GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD()
#endif /* GRPC_UV_THREAD_CHECK */
-#endif /* GRPC_CORE_LIB_IOMGR_IOMGR_UV_H */
+void grpc_custom_iomgr_init(grpc_socket_vtable* socket,
+ grpc_custom_resolver_vtable* resolver,
+ grpc_custom_timer_vtable* timer,
+ grpc_custom_poller_vtable* poller);
+
+#endif /* GRPC_CORE_LIB_IOMGR_IOMGR_CUSTOM_H */
diff --git a/src/core/lib/iomgr/iomgr_internal.cc b/src/core/lib/iomgr/iomgr_internal.cc
new file mode 100644
index 0000000000..32dbabb79d
--- /dev/null
+++ b/src/core/lib/iomgr/iomgr_internal.cc
@@ -0,0 +1,43 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include <stddef.h>
+
+#include "src/core/lib/iomgr/iomgr_internal.h"
+#include "src/core/lib/iomgr/timer.h"
+#include "src/core/lib/iomgr/timer_manager.h"
+
+static grpc_iomgr_platform_vtable* iomgr_platform_vtable = nullptr;
+
+void grpc_set_iomgr_platform_vtable(grpc_iomgr_platform_vtable* vtable) {
+ iomgr_platform_vtable = vtable;
+}
+
+void grpc_determine_iomgr_platform() {
+ if (iomgr_platform_vtable == nullptr) {
+ grpc_set_default_iomgr_platform();
+ }
+}
+
+void grpc_iomgr_platform_init() { iomgr_platform_vtable->init(); }
+
+void grpc_iomgr_platform_flush() { iomgr_platform_vtable->flush(); }
+
+void grpc_iomgr_platform_shutdown() { iomgr_platform_vtable->shutdown(); }
diff --git a/src/core/lib/iomgr/iomgr_internal.h b/src/core/lib/iomgr/iomgr_internal.h
index 644219fb4d..b011d9c7b1 100644
--- a/src/core/lib/iomgr/iomgr_internal.h
+++ b/src/core/lib/iomgr/iomgr_internal.h
@@ -31,9 +31,21 @@ typedef struct grpc_iomgr_object {
struct grpc_iomgr_object* prev;
} grpc_iomgr_object;
+typedef struct grpc_iomgr_platform_vtable {
+ void (*init)(void);
+ void (*flush)(void);
+ void (*shutdown)(void);
+} grpc_iomgr_platform_vtable;
+
void grpc_iomgr_register_object(grpc_iomgr_object* obj, const char* name);
void grpc_iomgr_unregister_object(grpc_iomgr_object* obj);
+void grpc_determine_iomgr_platform();
+
+void grpc_set_iomgr_platform_vtable(grpc_iomgr_platform_vtable* vtable);
+
+void grpc_set_default_iomgr_platform();
+
void grpc_iomgr_platform_init(void);
/** flush any globally queued work from iomgr */
void grpc_iomgr_platform_flush(void);
diff --git a/src/core/lib/iomgr/iomgr_posix.cc b/src/core/lib/iomgr/iomgr_posix.cc
index 35b8adf01e..66c9cb7ff7 100644
--- a/src/core/lib/iomgr/iomgr_posix.cc
+++ b/src/core/lib/iomgr/iomgr_posix.cc
@@ -24,19 +24,44 @@
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/ev_posix.h"
+#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/iomgr_posix.h"
+#include "src/core/lib/iomgr/resolve_address.h"
+#include "src/core/lib/iomgr/tcp_client.h"
#include "src/core/lib/iomgr/tcp_posix.h"
+#include "src/core/lib/iomgr/tcp_server.h"
+#include "src/core/lib/iomgr/timer.h"
-void grpc_iomgr_platform_init(void) {
+extern grpc_tcp_server_vtable grpc_posix_tcp_server_vtable;
+extern grpc_tcp_client_vtable grpc_posix_tcp_client_vtable;
+extern grpc_timer_vtable grpc_generic_timer_vtable;
+extern grpc_pollset_vtable grpc_posix_pollset_vtable;
+extern grpc_pollset_set_vtable grpc_posix_pollset_set_vtable;
+extern grpc_address_resolver_vtable grpc_posix_resolver_vtable;
+
+static void iomgr_platform_init(void) {
grpc_wakeup_fd_global_init();
grpc_event_engine_init();
}
-void grpc_iomgr_platform_flush(void) {}
+static void iomgr_platform_flush(void) {}
-void grpc_iomgr_platform_shutdown(void) {
+static void iomgr_platform_shutdown(void) {
grpc_event_engine_shutdown();
grpc_wakeup_fd_global_destroy();
}
+static grpc_iomgr_platform_vtable vtable = {
+ iomgr_platform_init, iomgr_platform_flush, iomgr_platform_shutdown};
+
+void grpc_set_default_iomgr_platform() {
+ grpc_set_tcp_client_impl(&grpc_posix_tcp_client_vtable);
+ grpc_set_tcp_server_impl(&grpc_posix_tcp_server_vtable);
+ grpc_set_timer_impl(&grpc_generic_timer_vtable);
+ grpc_set_pollset_vtable(&grpc_posix_pollset_vtable);
+ grpc_set_pollset_set_vtable(&grpc_posix_pollset_set_vtable);
+ grpc_set_resolver_impl(&grpc_posix_resolver_vtable);
+ grpc_set_iomgr_platform_vtable(&vtable);
+}
+
#endif /* GRPC_POSIX_SOCKET */
diff --git a/src/core/lib/iomgr/iomgr_uv.cc b/src/core/lib/iomgr/iomgr_uv.cc
index c11c37ca20..4a984446db 100644
--- a/src/core/lib/iomgr/iomgr_uv.cc
+++ b/src/core/lib/iomgr/iomgr_uv.cc
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2016 gRPC authors.
+ * Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -20,26 +20,21 @@
#include "src/core/lib/iomgr/port.h"
-#ifdef GRPC_UV
+#if defined(GRPC_CUSTOM_SOCKET) && defined(GRPC_UV)
-#include <grpc/support/thd_id.h>
+#include "src/core/lib/iomgr/iomgr_custom.h"
+#include "src/core/lib/iomgr/iomgr_internal.h"
+#include "src/core/lib/iomgr/pollset_custom.h"
+#include "src/core/lib/iomgr/tcp_custom.h"
+#include "src/core/lib/iomgr/timer_custom.h"
-#include "src/core/lib/debug/trace.h"
-#include "src/core/lib/iomgr/executor.h"
-#include "src/core/lib/iomgr/iomgr_uv.h"
-#include "src/core/lib/iomgr/pollset_uv.h"
-#include "src/core/lib/iomgr/tcp_uv.h"
+extern grpc_socket_vtable grpc_uv_socket_vtable;
+extern grpc_custom_resolver_vtable uv_resolver_vtable;
+extern grpc_custom_timer_vtable uv_timer_vtable;
+extern grpc_custom_poller_vtable uv_pollset_vtable;
-gpr_thd_id g_init_thread;
-
-void grpc_iomgr_platform_init(void) {
- grpc_core::ExecCtx exec_ctx;
- grpc_pollset_global_init();
-
- grpc_executor_set_threading(false);
- g_init_thread = gpr_thd_currentid();
+void grpc_set_default_iomgr_platform() {
+ grpc_custom_iomgr_init(&grpc_uv_socket_vtable, &uv_resolver_vtable,
+ &uv_timer_vtable, &uv_pollset_vtable);
}
-void grpc_iomgr_platform_flush(void) {}
-void grpc_iomgr_platform_shutdown(void) { grpc_pollset_global_shutdown(); }
-
-#endif /* GRPC_UV */
+#endif
diff --git a/src/core/lib/iomgr/iomgr_windows.cc b/src/core/lib/iomgr/iomgr_windows.cc
index 8c4888ca97..cdef89cbf0 100644
--- a/src/core/lib/iomgr/iomgr_windows.cc
+++ b/src/core/lib/iomgr/iomgr_windows.cc
@@ -29,7 +29,18 @@
#include "src/core/lib/iomgr/iocp_windows.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/pollset_windows.h"
+#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/socket_windows.h"
+#include "src/core/lib/iomgr/tcp_client.h"
+#include "src/core/lib/iomgr/tcp_server.h"
+#include "src/core/lib/iomgr/timer.h"
+
+extern grpc_tcp_server_vtable grpc_windows_tcp_server_vtable;
+extern grpc_tcp_client_vtable grpc_windows_tcp_client_vtable;
+extern grpc_timer_vtable grpc_generic_timer_vtable;
+extern grpc_pollset_vtable grpc_windows_pollset_vtable;
+extern grpc_pollset_set_vtable grpc_windows_pollset_set_vtable;
+extern grpc_address_resolver_vtable grpc_windows_resolver_vtable;
/* Windows' io manager is going to be fully designed using IO completion
ports. All of what we're doing here is basically make sure that
@@ -46,18 +57,31 @@ static void winsock_shutdown(void) {
GPR_ASSERT(status == 0);
}
-void grpc_iomgr_platform_init(void) {
+static void iomgr_platform_init(void) {
winsock_init();
grpc_iocp_init();
grpc_pollset_global_init();
}
-void grpc_iomgr_platform_flush(void) { grpc_iocp_flush(); }
+static void iomgr_platform_flush(void) { grpc_iocp_flush(); }
-void grpc_iomgr_platform_shutdown(void) {
+static void iomgr_platform_shutdown(void) {
grpc_pollset_global_shutdown();
grpc_iocp_shutdown();
winsock_shutdown();
}
+static grpc_iomgr_platform_vtable vtable = {
+ iomgr_platform_init, iomgr_platform_flush, iomgr_platform_shutdown};
+
+void grpc_set_default_iomgr_platform() {
+ grpc_set_tcp_client_impl(&grpc_windows_tcp_client_vtable);
+ grpc_set_tcp_server_impl(&grpc_windows_tcp_server_vtable);
+ grpc_set_timer_impl(&grpc_generic_timer_vtable);
+ grpc_set_pollset_vtable(&grpc_windows_pollset_vtable);
+ grpc_set_pollset_set_vtable(&grpc_windows_pollset_set_vtable);
+ grpc_set_resolver_impl(&grpc_windows_resolver_vtable);
+ grpc_set_iomgr_platform_vtable(&vtable);
+}
+
#endif /* GRPC_WINSOCK_SOCKET */
diff --git a/src/core/lib/iomgr/pollset.cc b/src/core/lib/iomgr/pollset.cc
new file mode 100644
index 0000000000..ebfef1dbc7
--- /dev/null
+++ b/src/core/lib/iomgr/pollset.cc
@@ -0,0 +1,56 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/lib/iomgr/pollset.h"
+
+grpc_pollset_vtable* grpc_pollset_impl;
+
+void grpc_set_pollset_vtable(grpc_pollset_vtable* vtable) {
+ grpc_pollset_impl = vtable;
+}
+
+void grpc_pollset_global_init() { grpc_pollset_impl->global_init(); }
+
+void grpc_pollset_global_shutdown() { grpc_pollset_impl->global_shutdown(); }
+
+void grpc_pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
+ grpc_pollset_impl->init(pollset, mu);
+}
+
+void grpc_pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
+ grpc_pollset_impl->shutdown(pollset, closure);
+}
+
+void grpc_pollset_destroy(grpc_pollset* pollset) {
+ grpc_pollset_impl->destroy(pollset);
+}
+
+grpc_error* grpc_pollset_work(grpc_pollset* pollset,
+ grpc_pollset_worker** worker,
+ grpc_millis deadline) {
+ return grpc_pollset_impl->work(pollset, worker, deadline);
+}
+
+grpc_error* grpc_pollset_kick(grpc_pollset* pollset,
+ grpc_pollset_worker* specific_worker) {
+ return grpc_pollset_impl->kick(pollset, specific_worker);
+}
+
+size_t grpc_pollset_size(void) { return grpc_pollset_impl->pollset_size(); }
diff --git a/src/core/lib/iomgr/pollset.h b/src/core/lib/iomgr/pollset.h
index 9cc3e4c7fa..28472b360d 100644
--- a/src/core/lib/iomgr/pollset.h
+++ b/src/core/lib/iomgr/pollset.h
@@ -38,6 +38,24 @@ extern grpc_core::DebugOnlyTraceFlag grpc_trace_fd_refcount;
typedef struct grpc_pollset grpc_pollset;
typedef struct grpc_pollset_worker grpc_pollset_worker;
+typedef struct grpc_pollset_vtable {
+ void (*global_init)(void);
+ void (*global_shutdown)(void);
+ void (*init)(grpc_pollset* pollset, gpr_mu** mu);
+ void (*shutdown)(grpc_pollset* pollset, grpc_closure* closure);
+ void (*destroy)(grpc_pollset* pollset);
+ grpc_error* (*work)(grpc_pollset* pollset, grpc_pollset_worker** worker,
+ grpc_millis deadline);
+ grpc_error* (*kick)(grpc_pollset* pollset,
+ grpc_pollset_worker* specific_worker);
+ size_t (*pollset_size)(void);
+} grpc_pollset_vtable;
+
+void grpc_set_pollset_vtable(grpc_pollset_vtable* vtable);
+
+void grpc_pollset_global_init(void);
+void grpc_pollset_global_shutdown(void);
+
size_t grpc_pollset_size(void);
/* Initialize a pollset: assumes *pollset contains all zeros */
void grpc_pollset_init(grpc_pollset* pollset, gpr_mu** mu);
diff --git a/src/core/lib/iomgr/pollset_custom.cc b/src/core/lib/iomgr/pollset_custom.cc
new file mode 100644
index 0000000000..04bd104055
--- /dev/null
+++ b/src/core/lib/iomgr/pollset_custom.cc
@@ -0,0 +1,106 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/lib/iomgr/port.h"
+
+#include <stddef.h>
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/sync.h>
+
+#include "src/core/lib/iomgr/closure.h"
+#include "src/core/lib/iomgr/iomgr_custom.h"
+#include "src/core/lib/iomgr/pollset.h"
+#include "src/core/lib/iomgr/pollset_custom.h"
+#include "src/core/lib/iomgr/timer.h"
+
+#include "src/core/lib/debug/trace.h"
+
+static grpc_custom_poller_vtable* poller_vtable;
+
+struct grpc_pollset {
+ gpr_mu mu;
+};
+
+static size_t pollset_size() { return sizeof(grpc_pollset); }
+
+static void pollset_global_init() { poller_vtable->init(); }
+
+static void pollset_global_shutdown() { poller_vtable->shutdown(); }
+
+static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
+ GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD();
+ gpr_mu_init(&pollset->mu);
+ *mu = &pollset->mu;
+}
+
+static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
+ GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD();
+ GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE);
+}
+
+static void pollset_destroy(grpc_pollset* pollset) {
+ GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD();
+ gpr_mu_destroy(&pollset->mu);
+}
+
+static grpc_error* pollset_work(grpc_pollset* pollset,
+ grpc_pollset_worker** worker_hdl,
+ grpc_millis deadline) {
+ GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD();
+ gpr_mu_unlock(&pollset->mu);
+ grpc_millis now = grpc_core::ExecCtx::Get()->Now();
+ size_t timeout = 0;
+ if (deadline > now) {
+ timeout = deadline - now;
+ }
+ // We yield here because the poll() call might yield
+ // control back to the application
+ grpc_core::ExecCtx* curr = grpc_core::ExecCtx::Get();
+ grpc_core::ExecCtx::Set(nullptr);
+ poller_vtable->poll(timeout);
+ grpc_core::ExecCtx::Set(curr);
+ grpc_core::ExecCtx::Get()->InvalidateNow();
+ if (grpc_core::ExecCtx::Get()->HasWork()) {
+ grpc_core::ExecCtx::Get()->Flush();
+ }
+ gpr_mu_lock(&pollset->mu);
+ return GRPC_ERROR_NONE;
+}
+
+static grpc_error* pollset_kick(grpc_pollset* pollset,
+ grpc_pollset_worker* specific_worker) {
+ GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD();
+ poller_vtable->kick();
+ return GRPC_ERROR_NONE;
+}
+
+grpc_pollset_vtable custom_pollset_vtable = {
+ pollset_global_init, pollset_global_shutdown,
+ pollset_init, pollset_shutdown,
+ pollset_destroy, pollset_work,
+ pollset_kick, pollset_size};
+
+void grpc_custom_pollset_init(grpc_custom_poller_vtable* vtable) {
+ poller_vtable = vtable;
+ grpc_set_pollset_vtable(&custom_pollset_vtable);
+}
diff --git a/src/core/lib/iomgr/pollset_custom.h b/src/core/lib/iomgr/pollset_custom.h
new file mode 100644
index 0000000000..9e2027f7f4
--- /dev/null
+++ b/src/core/lib/iomgr/pollset_custom.h
@@ -0,0 +1,35 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_POLLSET_CUSTOM_H
+#define GRPC_CORE_LIB_IOMGR_POLLSET_CUSTOM_H
+
+#include <grpc/support/port_platform.h>
+
+#include <stddef.h>
+
+typedef struct grpc_custom_poller_vtable {
+ void (*init)();
+ void (*poll)(size_t timeout_ms);
+ void (*kick)();
+ void (*shutdown)();
+} grpc_custom_poller_vtable;
+
+void grpc_custom_pollset_init(grpc_custom_poller_vtable* vtable);
+
+#endif /* GRPC_CORE_LIB_IOMGR_POLLSET_CUSTOM_H */
diff --git a/src/core/lib/iomgr/pollset_set_uv.cc b/src/core/lib/iomgr/pollset_set.cc
index 50814c1f0a..42a647a737 100644
--- a/src/core/lib/iomgr/pollset_set_uv.cc
+++ b/src/core/lib/iomgr/pollset_set.cc
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2016 gRPC authors.
+ * Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,28 +18,38 @@
#include <grpc/support/port_platform.h>
-#include "src/core/lib/iomgr/port.h"
+#include "src/core/lib/iomgr/pollset_set.h"
-#ifdef GRPC_UV
+grpc_pollset_set_vtable* grpc_pollset_set_impl;
-#include "src/core/lib/iomgr/pollset_set.h"
+void grpc_set_pollset_set_vtable(grpc_pollset_set_vtable* vtable) {
+ grpc_pollset_set_impl = vtable;
+}
-grpc_pollset_set* grpc_pollset_set_create(void) {
- return (grpc_pollset_set*)((intptr_t)0xdeafbeef);
+grpc_pollset_set* grpc_pollset_set_create() {
+ return grpc_pollset_set_impl->create();
}
-void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set) {}
+void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set) {
+ grpc_pollset_set_impl->destroy(pollset_set);
+}
void grpc_pollset_set_add_pollset(grpc_pollset_set* pollset_set,
- grpc_pollset* pollset) {}
+ grpc_pollset* pollset) {
+ grpc_pollset_set_impl->add_pollset(pollset_set, pollset);
+}
void grpc_pollset_set_del_pollset(grpc_pollset_set* pollset_set,
- grpc_pollset* pollset) {}
+ grpc_pollset* pollset) {
+ grpc_pollset_set_impl->del_pollset(pollset_set, pollset);
+}
void grpc_pollset_set_add_pollset_set(grpc_pollset_set* bag,
- grpc_pollset_set* item) {}
+ grpc_pollset_set* item) {
+ grpc_pollset_set_impl->add_pollset_set(bag, item);
+}
void grpc_pollset_set_del_pollset_set(grpc_pollset_set* bag,
- grpc_pollset_set* item) {}
-
-#endif /* GRPC_UV */
+ grpc_pollset_set* item) {
+ grpc_pollset_set_impl->del_pollset_set(bag, item);
+}
diff --git a/src/core/lib/iomgr/pollset_set.h b/src/core/lib/iomgr/pollset_set.h
index 18f30aa94e..d3355b8ff8 100644
--- a/src/core/lib/iomgr/pollset_set.h
+++ b/src/core/lib/iomgr/pollset_set.h
@@ -30,6 +30,17 @@
typedef struct grpc_pollset_set grpc_pollset_set;
+typedef struct grpc_pollset_set_vtable {
+ grpc_pollset_set* (*create)(void);
+ void (*destroy)(grpc_pollset_set* pollset_set);
+ void (*add_pollset)(grpc_pollset_set* pollset_set, grpc_pollset* pollset);
+ void (*del_pollset)(grpc_pollset_set* pollset_set, grpc_pollset* pollset);
+ void (*add_pollset_set)(grpc_pollset_set* bag, grpc_pollset_set* item);
+ void (*del_pollset_set)(grpc_pollset_set* bag, grpc_pollset_set* item);
+} grpc_pollset_set_vtable;
+
+void grpc_set_pollset_set_vtable(grpc_pollset_set_vtable* vtable);
+
grpc_pollset_set* grpc_pollset_set_create(void);
void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set);
void grpc_pollset_set_add_pollset(grpc_pollset_set* pollset_set,
diff --git a/src/core/lib/iomgr/pollset_set_custom.cc b/src/core/lib/iomgr/pollset_set_custom.cc
new file mode 100644
index 0000000000..b1ee66020d
--- /dev/null
+++ b/src/core/lib/iomgr/pollset_set_custom.cc
@@ -0,0 +1,48 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/lib/iomgr/port.h"
+
+#include "src/core/lib/iomgr/pollset_set.h"
+
+grpc_pollset_set* pollset_set_create(void) {
+ return (grpc_pollset_set*)((intptr_t)0xdeafbeef);
+}
+
+void pollset_set_destroy(grpc_pollset_set* pollset_set) {}
+
+void pollset_set_add_pollset(grpc_pollset_set* pollset_set,
+ grpc_pollset* pollset) {}
+
+void pollset_set_del_pollset(grpc_pollset_set* pollset_set,
+ grpc_pollset* pollset) {}
+
+void pollset_set_add_pollset_set(grpc_pollset_set* bag,
+ grpc_pollset_set* item) {}
+
+void pollset_set_del_pollset_set(grpc_pollset_set* bag,
+ grpc_pollset_set* item) {}
+
+static grpc_pollset_set_vtable vtable = {
+ pollset_set_create, pollset_set_destroy,
+ pollset_set_add_pollset, pollset_set_del_pollset,
+ pollset_set_add_pollset_set, pollset_set_del_pollset_set};
+
+void grpc_custom_pollset_set_init() { grpc_set_pollset_set_vtable(&vtable); }
diff --git a/src/core/lib/iomgr/timer_uv.h b/src/core/lib/iomgr/pollset_set_custom.h
index 093b2d085d..80e19a1fef 100644
--- a/src/core/lib/iomgr/timer_uv.h
+++ b/src/core/lib/iomgr/pollset_set_custom.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2016 gRPC authors.
+ * Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,19 +16,11 @@
*
*/
-#ifndef GRPC_CORE_LIB_IOMGR_TIMER_UV_H
-#define GRPC_CORE_LIB_IOMGR_TIMER_UV_H
+#ifndef GRPC_CORE_LIB_IOMGR_POLLSET_SET_CUSTOM_H
+#define GRPC_CORE_LIB_IOMGR_POLLSET_SET_CUSTOM_H
#include <grpc/support/port_platform.h>
-#include "src/core/lib/iomgr/exec_ctx.h"
+void grpc_custom_pollset_set_init();
-struct grpc_timer {
- grpc_closure* closure;
- /* This is actually a uv_timer_t*, but we want to keep platform-specific
- types out of headers */
- void* uv_timer;
- int pending;
-};
-
-#endif /* GRPC_CORE_LIB_IOMGR_TIMER_UV_H */
+#endif /* GRPC_CORE_LIB_IOMGR_POLLSET_SET_CUSTOM_H */
diff --git a/src/core/lib/iomgr/pollset_set_windows.cc b/src/core/lib/iomgr/pollset_set_windows.cc
index ff3f6a944e..bb9e7f5d28 100644
--- a/src/core/lib/iomgr/pollset_set_windows.cc
+++ b/src/core/lib/iomgr/pollset_set_windows.cc
@@ -25,22 +25,27 @@
#include "src/core/lib/iomgr/pollset_set_windows.h"
-grpc_pollset_set* grpc_pollset_set_create(void) {
+static grpc_pollset_set* pollset_set_create(void) {
return (grpc_pollset_set*)((intptr_t)0xdeafbeef);
}
-void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set) {}
+static void pollset_set_destroy(grpc_pollset_set* pollset_set) {}
-void grpc_pollset_set_add_pollset(grpc_pollset_set* pollset_set,
- grpc_pollset* pollset) {}
+static void pollset_set_add_pollset(grpc_pollset_set* pollset_set,
+ grpc_pollset* pollset) {}
-void grpc_pollset_set_del_pollset(grpc_pollset_set* pollset_set,
- grpc_pollset* pollset) {}
+static void pollset_set_del_pollset(grpc_pollset_set* pollset_set,
+ grpc_pollset* pollset) {}
-void grpc_pollset_set_add_pollset_set(grpc_pollset_set* bag,
- grpc_pollset_set* item) {}
+static void pollset_set_add_pollset_set(grpc_pollset_set* bag,
+ grpc_pollset_set* item) {}
-void grpc_pollset_set_del_pollset_set(grpc_pollset_set* bag,
- grpc_pollset_set* item) {}
+static void pollset_set_del_pollset_set(grpc_pollset_set* bag,
+ grpc_pollset_set* item) {}
+
+grpc_pollset_set_vtable grpc_windows_pollset_set_vtable = {
+ pollset_set_create, pollset_set_destroy,
+ pollset_set_add_pollset, pollset_set_del_pollset,
+ pollset_set_add_pollset_set, pollset_set_del_pollset_set};
#endif /* GRPC_WINSOCK_SOCKET */
diff --git a/src/core/lib/iomgr/pollset_uv.cc b/src/core/lib/iomgr/pollset_uv.cc
index c6a2f43bf1..bade6eae6c 100644
--- a/src/core/lib/iomgr/pollset_uv.cc
+++ b/src/core/lib/iomgr/pollset_uv.cc
@@ -22,137 +22,72 @@
#ifdef GRPC_UV
-#include <uv.h>
-
-#include <string.h>
-
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/sync.h>
-
-#include "src/core/lib/iomgr/iomgr_uv.h"
-#include "src/core/lib/iomgr/pollset.h"
-#include "src/core/lib/iomgr/pollset_uv.h"
+#include "src/core/lib/iomgr/pollset_custom.h"
-#include "src/core/lib/debug/trace.h"
-
-grpc_core::DebugOnlyTraceFlag grpc_trace_fd_refcount(false, "fd_refcount");
-
-struct grpc_pollset {
- uv_timer_t* timer;
- int shutting_down;
-};
+#include <uv.h>
/* Indicates that grpc_pollset_work should run an iteration of the UV loop
before running callbacks. This defaults to 1, and should be disabled if
grpc_pollset_work will be called within the callstack of uv_run */
-int grpc_pollset_work_run_loop;
-
-gpr_mu grpc_polling_mu;
+int grpc_pollset_work_run_loop = 1;
-/* This is used solely to kick the uv loop, by setting a callback to be run
- immediately in the next loop iteration.
- Note: In the future, if there is a bug that involves missing wakeups in the
- future, try adding a uv_async_t to kick the loop differently */
-uv_timer_t* dummy_uv_handle;
+static bool g_kicked = false;
-size_t grpc_pollset_size() { return sizeof(grpc_pollset); }
+typedef struct uv_poller_handle {
+ uv_timer_t poll_timer;
+ uv_timer_t kick_timer;
+ int refs;
+} uv_poller_handle;
-void dummy_timer_cb(uv_timer_t* handle) {}
+static uv_poller_handle* g_handle;
-void dummy_handle_close_cb(uv_handle_t* handle) { gpr_free(handle); }
-
-void grpc_pollset_global_init(void) {
- gpr_mu_init(&grpc_polling_mu);
- dummy_uv_handle = (uv_timer_t*)gpr_malloc(sizeof(uv_timer_t));
- uv_timer_init(uv_default_loop(), dummy_uv_handle);
- grpc_pollset_work_run_loop = 1;
-}
-
-void grpc_pollset_global_shutdown(void) {
- GRPC_UV_ASSERT_SAME_THREAD();
- gpr_mu_destroy(&grpc_polling_mu);
- uv_close((uv_handle_t*)dummy_uv_handle, dummy_handle_close_cb);
+static void init() {
+ g_handle = (uv_poller_handle*)gpr_malloc(sizeof(uv_poller_handle));
+ g_handle->refs = 2;
+ uv_timer_init(uv_default_loop(), &g_handle->poll_timer);
+ uv_timer_init(uv_default_loop(), &g_handle->kick_timer);
}
-static void timer_run_cb(uv_timer_t* timer) {}
+static void empty_timer_cb(uv_timer_t* handle) {}
-static void timer_close_cb(uv_handle_t* handle) {
- handle->data = (void*)1;
- gpr_free(handle);
-}
+static void kick_timer_cb(uv_timer_t* handle) { g_kicked = false; }
-void grpc_pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
- GRPC_UV_ASSERT_SAME_THREAD();
- *mu = &grpc_polling_mu;
- pollset->timer = (uv_timer_t*)gpr_malloc(sizeof(uv_timer_t));
- uv_timer_init(uv_default_loop(), pollset->timer);
- pollset->shutting_down = 0;
+static void run_loop(size_t timeout) {
+ if (grpc_pollset_work_run_loop) {
+ if (timeout == 0) {
+ uv_run(uv_default_loop(), UV_RUN_NOWAIT);
+ } else {
+ uv_timer_start(&g_handle->poll_timer, empty_timer_cb, timeout, 0);
+ uv_run(uv_default_loop(), UV_RUN_ONCE);
+ uv_timer_stop(&g_handle->poll_timer);
+ }
+ }
}
-void grpc_pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
- GPR_ASSERT(!pollset->shutting_down);
- GRPC_UV_ASSERT_SAME_THREAD();
- pollset->shutting_down = 1;
- if (grpc_pollset_work_run_loop) {
- // Drain any pending UV callbacks without blocking
- uv_run(uv_default_loop(), UV_RUN_NOWAIT);
- } else {
- // kick the loop once
- uv_timer_start(dummy_uv_handle, dummy_timer_cb, 0, 0);
+static void kick() {
+ if (!g_kicked) {
+ g_kicked = true;
+ uv_timer_start(&g_handle->kick_timer, kick_timer_cb, 0, 0);
}
- GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE);
}
-void grpc_pollset_destroy(grpc_pollset* pollset) {
- GRPC_UV_ASSERT_SAME_THREAD();
- uv_close((uv_handle_t*)pollset->timer, timer_close_cb);
- // timer.data is a boolean indicating that the timer has finished closing
- pollset->timer->data = (void*)0;
- if (grpc_pollset_work_run_loop) {
- while (!pollset->timer->data) {
- uv_run(uv_default_loop(), UV_RUN_NOWAIT);
- }
+static void close_timer_cb(uv_handle_t* handle) {
+ g_handle->refs--;
+ if (g_handle->refs == 0) {
+ gpr_free(g_handle);
}
}
-grpc_error* grpc_pollset_work(grpc_pollset* pollset,
- grpc_pollset_worker** worker_hdl,
- grpc_millis deadline) {
- uint64_t timeout;
- GRPC_UV_ASSERT_SAME_THREAD();
- gpr_mu_unlock(&grpc_polling_mu);
+static void shutdown() {
+ uv_close((uv_handle_t*)&g_handle->poll_timer, close_timer_cb);
+ uv_close((uv_handle_t*)&g_handle->kick_timer, close_timer_cb);
if (grpc_pollset_work_run_loop) {
- grpc_millis now = grpc_core::ExecCtx::Get()->Now();
- if (deadline >= now) {
- timeout = deadline - now;
- } else {
- timeout = 0;
- }
- /* We special-case timeout=0 so that we don't bother with the timer when
- the loop won't block anyway */
- if (timeout > 0) {
- uv_timer_start(pollset->timer, timer_run_cb, timeout, 0);
- /* Run until there is some I/O activity or the timer triggers. It doesn't
- matter which happens */
- uv_run(uv_default_loop(), UV_RUN_ONCE);
- uv_timer_stop(pollset->timer);
- } else {
- uv_run(uv_default_loop(), UV_RUN_NOWAIT);
- }
- }
- if (!grpc_closure_list_empty(*grpc_core::ExecCtx::Get()->closure_list())) {
- grpc_core::ExecCtx::Get()->Flush();
+ GPR_ASSERT(uv_run(uv_default_loop(), UV_RUN_DEFAULT) == 0);
}
- gpr_mu_lock(&grpc_polling_mu);
- return GRPC_ERROR_NONE;
}
-grpc_error* grpc_pollset_kick(grpc_pollset* pollset,
- grpc_pollset_worker* specific_worker) {
- GRPC_UV_ASSERT_SAME_THREAD();
- uv_timer_start(dummy_uv_handle, dummy_timer_cb, 0, 0);
- return GRPC_ERROR_NONE;
-}
+grpc_custom_poller_vtable uv_pollset_vtable = {init, run_loop, kick, shutdown};
#endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/pollset_uv.h b/src/core/lib/iomgr/pollset_uv.h
index 566c110ca6..de82bcc1d3 100644
--- a/src/core/lib/iomgr/pollset_uv.h
+++ b/src/core/lib/iomgr/pollset_uv.h
@@ -21,7 +21,12 @@
extern int grpc_pollset_work_run_loop;
-void grpc_pollset_global_init(void);
-void grpc_pollset_global_shutdown(void);
+typedef struct grpc_custom_poller_vtable {
+ void (*init)(void);
+ void (*run_loop)(int blocking);
+} grpc_custom_poller_vtable;
+
+void grpc_custom_pollset_global_init(grpc_custom_poller_vtable* vtable);
+void grpc_custom_pollset_global_shutdown(void);
#endif /* GRPC_CORE_LIB_IOMGR_POLLSET_UV_H */
diff --git a/src/core/lib/iomgr/pollset_windows.cc b/src/core/lib/iomgr/pollset_windows.cc
index c1b83ddc14..e9a808d8ad 100644
--- a/src/core/lib/iomgr/pollset_windows.cc
+++ b/src/core/lib/iomgr/pollset_windows.cc
@@ -38,7 +38,7 @@ gpr_mu grpc_polling_mu;
static grpc_pollset_worker* g_active_poller;
static grpc_pollset_worker g_global_root_worker;
-void grpc_pollset_global_init(void) {
+static void pollset_global_init(void) {
gpr_mu_init(&grpc_polling_mu);
g_active_poller = NULL;
g_global_root_worker.links[GRPC_POLLSET_WORKER_LINK_GLOBAL].next =
@@ -46,7 +46,7 @@ void grpc_pollset_global_init(void) {
&g_global_root_worker;
}
-void grpc_pollset_global_shutdown(void) { gpr_mu_destroy(&grpc_polling_mu); }
+static void pollset_global_shutdown(void) { gpr_mu_destroy(&grpc_polling_mu); }
static void remove_worker(grpc_pollset_worker* worker,
grpc_pollset_worker_link_type type) {
@@ -80,21 +80,21 @@ static void push_front_worker(grpc_pollset_worker* root,
worker->links[type].next->links[type].prev = worker;
}
-size_t grpc_pollset_size(void) { return sizeof(grpc_pollset); }
+static size_t pollset_size(void) { return sizeof(grpc_pollset); }
/* There isn't really any such thing as a pollset under Windows, due to the
nature of the IO completion ports. We're still going to provide a minimal
set of features for the sake of the rest of grpc. But grpc_pollset_work
won't actually do any polling, and return as quickly as possible. */
-void grpc_pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
+static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
*mu = &grpc_polling_mu;
pollset->root_worker.links[GRPC_POLLSET_WORKER_LINK_POLLSET].next =
pollset->root_worker.links[GRPC_POLLSET_WORKER_LINK_POLLSET].prev =
&pollset->root_worker;
}
-void grpc_pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
+static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
pollset->shutting_down = 1;
grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
if (!pollset->is_iocp_worker) {
@@ -104,11 +104,11 @@ void grpc_pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
}
}
-void grpc_pollset_destroy(grpc_pollset* pollset) {}
+static void pollset_destroy(grpc_pollset* pollset) {}
-grpc_error* grpc_pollset_work(grpc_pollset* pollset,
- grpc_pollset_worker** worker_hdl,
- grpc_millis deadline) {
+static grpc_error* pollset_work(grpc_pollset* pollset,
+ grpc_pollset_worker** worker_hdl,
+ grpc_millis deadline) {
grpc_pollset_worker worker;
if (worker_hdl) *worker_hdl = &worker;
@@ -182,8 +182,8 @@ done:
return GRPC_ERROR_NONE;
}
-grpc_error* grpc_pollset_kick(grpc_pollset* p,
- grpc_pollset_worker* specific_worker) {
+static grpc_error* pollset_kick(grpc_pollset* p,
+ grpc_pollset_worker* specific_worker) {
if (specific_worker != NULL) {
if (specific_worker == GRPC_POLLSET_KICK_BROADCAST) {
for (specific_worker =
@@ -220,4 +220,10 @@ grpc_error* grpc_pollset_kick(grpc_pollset* p,
return GRPC_ERROR_NONE;
}
+grpc_pollset_vtable grpc_windows_pollset_vtable = {
+ pollset_global_init, pollset_global_shutdown,
+ pollset_init, pollset_shutdown,
+ pollset_destroy, pollset_work,
+ pollset_kick, pollset_size};
+
#endif /* GRPC_WINSOCK_SOCKET */
diff --git a/src/core/lib/iomgr/port.h b/src/core/lib/iomgr/port.h
index 25090898ed..c1dcc52618 100644
--- a/src/core/lib/iomgr/port.h
+++ b/src/core/lib/iomgr/port.h
@@ -21,8 +21,11 @@
#ifndef GRPC_CORE_LIB_IOMGR_PORT_H
#define GRPC_CORE_LIB_IOMGR_PORT_H
-#if defined(GRPC_UV)
-// Do nothing
+#ifdef GRPC_UV
+#define GRPC_CUSTOM_SOCKET
+#endif
+#if defined(GRPC_CUSTOM_SOCKET)
+// Do Nothing
#elif defined(GPR_MANYLINUX1)
#define GRPC_HAVE_ARPA_NAMESER 1
#define GRPC_HAVE_IFADDRS 1
@@ -33,13 +36,10 @@
#define GRPC_POSIX_FORK 1
#define GRPC_POSIX_NO_SPECIAL_WAKEUP_FD 1
#define GRPC_POSIX_SOCKET 1
-#define GRPC_POSIX_SOCKETADDR 1
#define GRPC_POSIX_SOCKETUTILS 1
#define GRPC_POSIX_WAKEUP_FD 1
-#define GRPC_TIMER_USE_GENERIC 1
#define GRPC_LINUX_EPOLL 1
#elif defined(GPR_WINDOWS)
-#define GRPC_TIMER_USE_GENERIC 1
#define GRPC_WINSOCK_SOCKET 1
#define GRPC_WINDOWS_SOCKETUTILS 1
#elif defined(GPR_ANDROID)
@@ -49,10 +49,8 @@
#define GRPC_HAVE_UNIX_SOCKET 1
#define GRPC_LINUX_EVENTFD 1
#define GRPC_POSIX_SOCKET 1
-#define GRPC_POSIX_SOCKETADDR 1
#define GRPC_POSIX_SOCKETUTILS 1
#define GRPC_POSIX_WAKEUP_FD 1
-#define GRPC_TIMER_USE_GENERIC 1
#elif defined(GPR_LINUX)
#define GRPC_HAVE_ARPA_NAMESER 1
#define GRPC_HAVE_IFADDRS 1
@@ -64,9 +62,7 @@
#define GRPC_POSIX_FORK 1
#define GRPC_POSIX_HOST_NAME_MAX 1
#define GRPC_POSIX_SOCKET 1
-#define GRPC_POSIX_SOCKETADDR 1
#define GRPC_POSIX_WAKEUP_FD 1
-#define GRPC_TIMER_USE_GENERIC 1
#ifdef __GLIBC_PREREQ
#if __GLIBC_PREREQ(2, 4)
#define GRPC_LINUX_EPOLL 1
@@ -100,11 +96,9 @@
#define GRPC_POSIX_FORK 1
#define GRPC_POSIX_NO_SPECIAL_WAKEUP_FD 1
#define GRPC_POSIX_SOCKET 1
-#define GRPC_POSIX_SOCKETADDR 1
#define GRPC_POSIX_SOCKETUTILS 1
#define GRPC_POSIX_SYSCONF 1
#define GRPC_POSIX_WAKEUP_FD 1
-#define GRPC_TIMER_USE_GENERIC 1
#elif defined(GPR_FREEBSD)
#define GRPC_HAVE_ARPA_NAMESER 1
#define GRPC_HAVE_IFADDRS 1
@@ -114,36 +108,31 @@
#define GRPC_POSIX_FORK 1
#define GRPC_POSIX_NO_SPECIAL_WAKEUP_FD 1
#define GRPC_POSIX_SOCKET 1
-#define GRPC_POSIX_SOCKETADDR 1
#define GRPC_POSIX_SOCKETUTILS 1
#define GRPC_POSIX_WAKEUP_FD 1
-#define GRPC_TIMER_USE_GENERIC 1
#elif defined(GPR_OPENBSD)
#define GRPC_HAVE_IFADDRS 1
#define GRPC_HAVE_IPV6_RECVPKTINFO 1
#define GRPC_HAVE_UNIX_SOCKET 1
#define GRPC_POSIX_NO_SPECIAL_WAKEUP_FD 1
#define GRPC_POSIX_SOCKET 1
-#define GRPC_POSIX_SOCKETADDR 1
#define GRPC_POSIX_SOCKETUTILS 1
#define GRPC_POSIX_WAKEUP_FD 1
-#define GRPC_TIMER_USE_GENERIC 1
#elif defined(GPR_NACL)
#define GRPC_HAVE_ARPA_NAMESER 1
#define GRPC_POSIX_NO_SPECIAL_WAKEUP_FD 1
#define GRPC_POSIX_SOCKET 1
-#define GRPC_POSIX_SOCKETADDR 1
#define GRPC_POSIX_SOCKETUTILS 1
#define GRPC_POSIX_WAKEUP_FD 1
-#define GRPC_TIMER_USE_GENERIC 1
#elif !defined(GPR_NO_AUTODETECT_PLATFORM)
#error "Platform not recognized"
#endif
#if defined(GRPC_POSIX_SOCKET) + defined(GRPC_WINSOCK_SOCKET) + \
- defined(GRPC_CUSTOM_SOCKET) + defined(GRPC_UV) != \
+ defined(GRPC_CUSTOM_SOCKET) != \
1
-#error Must define exactly one of GRPC_POSIX_SOCKET, GRPC_WINSOCK_SOCKET, GPR_CUSTOM_SOCKET
+#error \
+ "Must define exactly one of GRPC_POSIX_SOCKET, GRPC_WINSOCK_SOCKET, GRPC_CUSTOM_SOCKET"
#endif
#if defined(GRPC_POSIX_HOST_NAME_MAX) && defined(GRPC_POSIX_SYSCONF)
diff --git a/src/core/lib/iomgr/resolve_address.cc b/src/core/lib/iomgr/resolve_address.cc
new file mode 100644
index 0000000000..f2a4676369
--- /dev/null
+++ b/src/core/lib/iomgr/resolve_address.cc
@@ -0,0 +1,50 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include <grpc/support/alloc.h>
+#include "src/core/lib/iomgr/resolve_address.h"
+
+grpc_address_resolver_vtable* grpc_resolve_address_impl;
+
+void grpc_set_resolver_impl(grpc_address_resolver_vtable* vtable) {
+ grpc_resolve_address_impl = vtable;
+}
+
+void grpc_resolve_address(const char* addr, const char* default_port,
+ grpc_pollset_set* interested_parties,
+ grpc_closure* on_done,
+ grpc_resolved_addresses** addresses) {
+ grpc_resolve_address_impl->resolve_address(
+ addr, default_port, interested_parties, on_done, addresses);
+}
+
+void grpc_resolved_addresses_destroy(grpc_resolved_addresses* addrs) {
+ if (addrs != nullptr) {
+ gpr_free(addrs->addrs);
+ }
+ gpr_free(addrs);
+}
+
+grpc_error* grpc_blocking_resolve_address(const char* name,
+ const char* default_port,
+ grpc_resolved_addresses** addresses) {
+ return grpc_resolve_address_impl->blocking_resolve_address(name, default_port,
+ addresses);
+}
diff --git a/src/core/lib/iomgr/resolve_address.h b/src/core/lib/iomgr/resolve_address.h
index 7bd1983abd..fe0d834582 100644
--- a/src/core/lib/iomgr/resolve_address.h
+++ b/src/core/lib/iomgr/resolve_address.h
@@ -29,11 +29,11 @@
#include <uv.h>
#endif
-#ifdef GPR_WINDOWS
+#ifdef GRPC_WINSOCK_SOCKET
#include <ws2tcpip.h>
#endif
-#ifdef GRPC_POSIX_SOCKETADDR
+#ifdef GRPC_POSIX_SOCKET
#include <sys/socket.h>
#endif
@@ -51,20 +51,33 @@ typedef struct {
grpc_resolved_address* addrs;
} grpc_resolved_addresses;
+typedef struct grpc_address_resolver_vtable {
+ void (*resolve_address)(const char* addr, const char* default_port,
+ grpc_pollset_set* interested_parties,
+ grpc_closure* on_done,
+ grpc_resolved_addresses** addresses);
+ grpc_error* (*blocking_resolve_address)(const char* name,
+ const char* default_port,
+ grpc_resolved_addresses** addresses);
+} grpc_address_resolver_vtable;
+
+void grpc_set_resolver_impl(grpc_address_resolver_vtable* vtable);
+
/* Asynchronously resolve addr. Use default_port if a port isn't designated
in addr, otherwise use the port in addr. */
/* TODO(ctiller): add a timeout here */
-extern void (*grpc_resolve_address)(const char* addr, const char* default_port,
- grpc_pollset_set* interested_parties,
- grpc_closure* on_done,
- grpc_resolved_addresses** addresses);
+void grpc_resolve_address(const char* addr, const char* default_port,
+ grpc_pollset_set* interested_parties,
+ grpc_closure* on_done,
+ grpc_resolved_addresses** addresses);
+
/* Destroy resolved addresses */
void grpc_resolved_addresses_destroy(grpc_resolved_addresses* addresses);
-/* Resolve addr in a blocking fashion. Returns NULL on failure. On success,
+/* Resolve addr in a blocking fashion. On success,
result must be freed with grpc_resolved_addresses_destroy. */
-extern grpc_error* (*grpc_blocking_resolve_address)(
- const char* name, const char* default_port,
- grpc_resolved_addresses** addresses);
+grpc_error* grpc_blocking_resolve_address(const char* name,
+ const char* default_port,
+ grpc_resolved_addresses** addresses);
#endif /* GRPC_CORE_LIB_IOMGR_RESOLVE_ADDRESS_H */
diff --git a/src/core/lib/iomgr/resolve_address_custom.cc b/src/core/lib/iomgr/resolve_address_custom.cc
new file mode 100644
index 0000000000..9cf7817f66
--- /dev/null
+++ b/src/core/lib/iomgr/resolve_address_custom.cc
@@ -0,0 +1,187 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/lib/iomgr/port.h"
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/string_util.h>
+
+#include <grpc/support/log.h>
+#include "src/core/lib/gpr/host_port.h"
+#include "src/core/lib/gpr/string.h"
+#include "src/core/lib/gpr/useful.h"
+
+#include "src/core/lib/iomgr/iomgr_custom.h"
+#include "src/core/lib/iomgr/resolve_address_custom.h"
+#include "src/core/lib/iomgr/sockaddr_utils.h"
+
+#include <string.h>
+
+typedef struct grpc_custom_resolver {
+ grpc_closure* on_done;
+ grpc_resolved_addresses** addresses;
+ char* host;
+ char* port;
+} grpc_custom_resolver;
+
+static grpc_custom_resolver_vtable* resolve_address_vtable = nullptr;
+
+static int retry_named_port_failure(grpc_custom_resolver* r,
+ grpc_resolved_addresses** res) {
+ // This loop is copied from resolve_address_posix.c
+ const char* svc[][2] = {{"http", "80"}, {"https", "443"}};
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(svc); i++) {
+ if (strcmp(r->port, svc[i][0]) == 0) {
+ gpr_free(r->port);
+ r->port = gpr_strdup(svc[i][1]);
+ if (res) {
+ grpc_error* error =
+ resolve_address_vtable->resolve(r->host, r->port, res);
+ if (error != GRPC_ERROR_NONE) {
+ GRPC_ERROR_UNREF(error);
+ return 0;
+ }
+ } else {
+ resolve_address_vtable->resolve_async(r, r->host, r->port);
+ }
+ return 1;
+ }
+ }
+ return 0;
+}
+
+void grpc_custom_resolve_callback(grpc_custom_resolver* r,
+ grpc_resolved_addresses* result,
+ grpc_error* error) {
+ GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD();
+ grpc_core::ExecCtx exec_ctx;
+ if (error == GRPC_ERROR_NONE) {
+ *r->addresses = result;
+ } else if (retry_named_port_failure(r, nullptr)) {
+ return;
+ }
+ if (r->on_done) {
+ GRPC_CLOSURE_SCHED(r->on_done, error);
+ }
+ gpr_free(r->host);
+ gpr_free(r->port);
+ gpr_free(r);
+}
+
+static grpc_error* try_split_host_port(const char* name,
+ const char* default_port, char** host,
+ char** port) {
+ /* parse name, splitting it into host and port parts */
+ grpc_error* error;
+ gpr_split_host_port(name, host, port);
+ if (*host == nullptr) {
+ char* msg;
+ gpr_asprintf(&msg, "unparseable host:port: '%s'", name);
+ error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
+ gpr_free(msg);
+ return error;
+ }
+ if (*port == nullptr) {
+ // TODO(murgatroid99): add tests for this case
+ if (default_port == nullptr) {
+ char* msg;
+ gpr_asprintf(&msg, "no port in name '%s'", name);
+ error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
+ gpr_free(msg);
+ return error;
+ }
+ *port = gpr_strdup(default_port);
+ }
+ return GRPC_ERROR_NONE;
+}
+
+static grpc_error* blocking_resolve_address_impl(
+ const char* name, const char* default_port,
+ grpc_resolved_addresses** addresses) {
+ char* host;
+ char* port;
+ grpc_error* err;
+
+ GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD();
+
+ err = try_split_host_port(name, default_port, &host, &port);
+ if (err != GRPC_ERROR_NONE) {
+ gpr_free(host);
+ gpr_free(port);
+ return err;
+ }
+
+ /* Call getaddrinfo */
+ grpc_custom_resolver resolver;
+ resolver.host = host;
+ resolver.port = port;
+
+ grpc_resolved_addresses* addrs;
+ grpc_core::ExecCtx* curr = grpc_core::ExecCtx::Get();
+ grpc_core::ExecCtx::Set(nullptr);
+ err = resolve_address_vtable->resolve(host, port, &addrs);
+ if (err != GRPC_ERROR_NONE) {
+ if (retry_named_port_failure(&resolver, &addrs)) {
+ GRPC_ERROR_UNREF(err);
+ err = GRPC_ERROR_NONE;
+ }
+ }
+ grpc_core::ExecCtx::Set(curr);
+ if (err == GRPC_ERROR_NONE) {
+ *addresses = addrs;
+ }
+ gpr_free(resolver.host);
+ gpr_free(resolver.port);
+ return err;
+}
+
+static void resolve_address_impl(const char* name, const char* default_port,
+ grpc_pollset_set* interested_parties,
+ grpc_closure* on_done,
+ grpc_resolved_addresses** addrs) {
+ grpc_custom_resolver* r = nullptr;
+ char* host = nullptr;
+ char* port = nullptr;
+ grpc_error* err;
+ GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD();
+ err = try_split_host_port(name, default_port, &host, &port);
+ if (err != GRPC_ERROR_NONE) {
+ GRPC_CLOSURE_SCHED(on_done, err);
+ gpr_free(host);
+ gpr_free(port);
+ return;
+ }
+ r = (grpc_custom_resolver*)gpr_malloc(sizeof(grpc_custom_resolver));
+ r->on_done = on_done;
+ r->addresses = addrs;
+ r->host = host;
+ r->port = port;
+
+ /* Call getaddrinfo */
+ resolve_address_vtable->resolve_async(r, r->host, r->port);
+}
+
+static grpc_address_resolver_vtable custom_resolver_vtable = {
+ resolve_address_impl, blocking_resolve_address_impl};
+
+void grpc_custom_resolver_init(grpc_custom_resolver_vtable* impl) {
+ resolve_address_vtable = impl;
+ grpc_set_resolver_impl(&custom_resolver_vtable);
+}
diff --git a/src/core/lib/iomgr/resolve_address_custom.h b/src/core/lib/iomgr/resolve_address_custom.h
new file mode 100644
index 0000000000..e0c6714087
--- /dev/null
+++ b/src/core/lib/iomgr/resolve_address_custom.h
@@ -0,0 +1,43 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_RESOLVE_ADDRESS_CUSTOM_H
+#define GRPC_CORE_LIB_IOMGR_RESOLVE_ADDRESS_CUSTOM_H
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/lib/iomgr/port.h"
+
+#include "src/core/lib/iomgr/resolve_address.h"
+#include "src/core/lib/iomgr/sockaddr.h"
+
+typedef struct grpc_custom_resolver grpc_custom_resolver;
+
+typedef struct grpc_custom_resolver_vtable {
+ grpc_error* (*resolve)(char* host, char* port, grpc_resolved_addresses** res);
+ void (*resolve_async)(grpc_custom_resolver* resolver, char* host, char* port);
+} grpc_custom_resolver_vtable;
+
+void grpc_custom_resolve_callback(grpc_custom_resolver* resolver,
+ grpc_resolved_addresses* result,
+ grpc_error* error);
+
+/* Internal APIs */
+void grpc_custom_resolver_init(grpc_custom_resolver_vtable* impl);
+
+#endif /* GRPC_CORE_LIB_IOMGR_RESOLVE_ADDRESS_CUSTOM_H */
diff --git a/src/core/lib/iomgr/resolve_address_posix.cc b/src/core/lib/iomgr/resolve_address_posix.cc
index 2f68dbe214..a82075542f 100644
--- a/src/core/lib/iomgr/resolve_address_posix.cc
+++ b/src/core/lib/iomgr/resolve_address_posix.cc
@@ -42,7 +42,7 @@
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/unix_sockets_posix.h"
-static grpc_error* blocking_resolve_address_impl(
+static grpc_error* posix_blocking_resolve_address(
const char* name, const char* default_port,
grpc_resolved_addresses** addresses) {
grpc_core::ExecCtx exec_ctx;
@@ -141,10 +141,6 @@ done:
return err;
}
-grpc_error* (*grpc_blocking_resolve_address)(
- const char* name, const char* default_port,
- grpc_resolved_addresses** addresses) = blocking_resolve_address_impl;
-
typedef struct {
char* name;
char* default_port;
@@ -165,17 +161,10 @@ static void do_request_thread(void* rp, grpc_error* error) {
gpr_free(r);
}
-void grpc_resolved_addresses_destroy(grpc_resolved_addresses* addrs) {
- if (addrs != nullptr) {
- gpr_free(addrs->addrs);
- }
- gpr_free(addrs);
-}
-
-static void resolve_address_impl(const char* name, const char* default_port,
- grpc_pollset_set* interested_parties,
- grpc_closure* on_done,
- grpc_resolved_addresses** addrs) {
+static void posix_resolve_address(const char* name, const char* default_port,
+ grpc_pollset_set* interested_parties,
+ grpc_closure* on_done,
+ grpc_resolved_addresses** addrs) {
request* r = static_cast<request*>(gpr_malloc(sizeof(request)));
GRPC_CLOSURE_INIT(&r->request_closure, do_request_thread, r,
grpc_executor_scheduler(GRPC_EXECUTOR_SHORT));
@@ -186,9 +175,6 @@ static void resolve_address_impl(const char* name, const char* default_port,
GRPC_CLOSURE_SCHED(&r->request_closure, GRPC_ERROR_NONE);
}
-void (*grpc_resolve_address)(
- const char* name, const char* default_port,
- grpc_pollset_set* interested_parties, grpc_closure* on_done,
- grpc_resolved_addresses** addrs) = resolve_address_impl;
-
+grpc_address_resolver_vtable grpc_posix_resolver_vtable = {
+ posix_resolve_address, posix_blocking_resolve_address};
#endif
diff --git a/src/core/lib/iomgr/resolve_address_uv.cc b/src/core/lib/iomgr/resolve_address_uv.cc
deleted file mode 100644
index 4d8ea596f3..0000000000
--- a/src/core/lib/iomgr/resolve_address_uv.cc
+++ /dev/null
@@ -1,286 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpc/support/port_platform.h>
-
-#include "src/core/lib/iomgr/port.h"
-#ifdef GRPC_UV
-
-#include <uv.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/string_util.h>
-
-#include "src/core/lib/gpr/host_port.h"
-#include "src/core/lib/gpr/useful.h"
-#include "src/core/lib/iomgr/closure.h"
-#include "src/core/lib/iomgr/error.h"
-#include "src/core/lib/iomgr/exec_ctx.h"
-#include "src/core/lib/iomgr/iomgr_uv.h"
-#include "src/core/lib/iomgr/resolve_address.h"
-#include "src/core/lib/iomgr/sockaddr.h"
-#include "src/core/lib/iomgr/sockaddr_utils.h"
-
-#include <string.h>
-
-typedef struct request {
- grpc_closure* on_done;
- grpc_resolved_addresses** addresses;
- struct addrinfo* hints;
- char* host;
- char* port;
-} request;
-
-static int retry_named_port_failure(int status, request* r,
- uv_getaddrinfo_cb getaddrinfo_cb) {
- if (status != 0) {
- // This loop is copied from resolve_address_posix.c
- const char* svc[][2] = {{"http", "80"}, {"https", "443"}};
- for (size_t i = 0; i < GPR_ARRAY_SIZE(svc); i++) {
- if (strcmp(r->port, svc[i][0]) == 0) {
- int retry_status;
- uv_getaddrinfo_t* req =
- (uv_getaddrinfo_t*)gpr_malloc(sizeof(uv_getaddrinfo_t));
- req->data = r;
- r->port = gpr_strdup(svc[i][1]);
- retry_status = uv_getaddrinfo(uv_default_loop(), req, getaddrinfo_cb,
- r->host, r->port, r->hints);
- if (retry_status < 0 || getaddrinfo_cb == NULL) {
- // The callback will not be called
- gpr_free(req);
- }
- return retry_status;
- }
- }
- }
- /* If this function calls uv_getaddrinfo, it will return that function's
- return value. That function only returns numbers <=0, so we can safely
- return 1 to indicate that we never retried */
- return 1;
-}
-
-static grpc_error* handle_addrinfo_result(int status, struct addrinfo* result,
- grpc_resolved_addresses** addresses) {
- struct addrinfo* resp;
- size_t i;
- if (status != 0) {
- grpc_error* error;
- *addresses = NULL;
- error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("getaddrinfo failed");
- error =
- grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR,
- grpc_slice_from_static_string(uv_strerror(status)));
- return error;
- }
- (*addresses) =
- (grpc_resolved_addresses*)gpr_malloc(sizeof(grpc_resolved_addresses));
- (*addresses)->naddrs = 0;
- for (resp = result; resp != NULL; resp = resp->ai_next) {
- (*addresses)->naddrs++;
- }
- (*addresses)->addrs = (grpc_resolved_address*)gpr_malloc(
- sizeof(grpc_resolved_address) * (*addresses)->naddrs);
- i = 0;
- for (resp = result; resp != NULL; resp = resp->ai_next) {
- memcpy(&(*addresses)->addrs[i].addr, resp->ai_addr, resp->ai_addrlen);
- (*addresses)->addrs[i].len = resp->ai_addrlen;
- i++;
- }
-
- {
- for (i = 0; i < (*addresses)->naddrs; i++) {
- char* buf;
- grpc_sockaddr_to_string(&buf, &(*addresses)->addrs[i], 0);
- gpr_free(buf);
- }
- }
- return GRPC_ERROR_NONE;
-}
-
-static void getaddrinfo_callback(uv_getaddrinfo_t* req, int status,
- struct addrinfo* res) {
- request* r = (request*)req->data;
- grpc_core::ExecCtx exec_ctx;
- grpc_error* error;
- int retry_status;
- char* port = r->port;
-
- gpr_free(req);
- retry_status = retry_named_port_failure(status, r, getaddrinfo_callback);
- if (retry_status == 0) {
- /* The request is being retried. It is using its own port string, so we free
- * the original one */
- gpr_free(port);
- return;
- }
- /* Either no retry was attempted, or the retry failed. Either way, the
- original error probably has more interesting information */
- error = handle_addrinfo_result(status, res, r->addresses);
- GRPC_CLOSURE_SCHED(r->on_done, error);
-
- gpr_free(r->hints);
- gpr_free(r->host);
- gpr_free(r->port);
- gpr_free(r);
- uv_freeaddrinfo(res);
-}
-
-static grpc_error* try_split_host_port(const char* name,
- const char* default_port, char** host,
- char** port) {
- /* parse name, splitting it into host and port parts */
- grpc_error* error;
- gpr_split_host_port(name, host, port);
- if (*host == NULL) {
- char* msg;
- gpr_asprintf(&msg, "unparseable host:port: '%s'", name);
- error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
- gpr_free(msg);
- return error;
- }
- if (*port == NULL) {
- // TODO(murgatroid99): add tests for this case
- if (default_port == NULL) {
- char* msg;
- gpr_asprintf(&msg, "no port in name '%s'", name);
- error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
- gpr_free(msg);
- return error;
- }
- *port = gpr_strdup(default_port);
- }
- return GRPC_ERROR_NONE;
-}
-
-static grpc_error* blocking_resolve_address_impl(
- const char* name, const char* default_port,
- grpc_resolved_addresses** addresses) {
- char* host;
- char* port;
- struct addrinfo hints;
- uv_getaddrinfo_t req;
- int s;
- grpc_error* err;
- int retry_status;
- request r;
-
- GRPC_UV_ASSERT_SAME_THREAD();
-
- req.addrinfo = NULL;
-
- err = try_split_host_port(name, default_port, &host, &port);
- if (err != GRPC_ERROR_NONE) {
- goto done;
- }
-
- /* Call getaddrinfo */
- memset(&hints, 0, sizeof(hints));
- hints.ai_family = AF_UNSPEC; /* ipv4 or ipv6 */
- hints.ai_socktype = SOCK_STREAM; /* stream socket */
- hints.ai_flags = AI_PASSIVE; /* for wildcard IP address */
-
- s = uv_getaddrinfo(uv_default_loop(), &req, NULL, host, port, &hints);
- r.addresses = addresses;
- r.hints = &hints;
- r.host = host;
- r.port = port;
- retry_status = retry_named_port_failure(s, &r, NULL);
- if (retry_status <= 0) {
- s = retry_status;
- }
- err = handle_addrinfo_result(s, req.addrinfo, addresses);
-
-done:
- gpr_free(host);
- gpr_free(port);
- if (req.addrinfo) {
- uv_freeaddrinfo(req.addrinfo);
- }
- return err;
-}
-
-grpc_error* (*grpc_blocking_resolve_address)(
- const char* name, const char* default_port,
- grpc_resolved_addresses** addresses) = blocking_resolve_address_impl;
-
-void grpc_resolved_addresses_destroy(grpc_resolved_addresses* addrs) {
- if (addrs != NULL) {
- gpr_free(addrs->addrs);
- }
- gpr_free(addrs);
-}
-
-static void resolve_address_impl(const char* name, const char* default_port,
- grpc_pollset_set* interested_parties,
- grpc_closure* on_done,
- grpc_resolved_addresses** addrs) {
- uv_getaddrinfo_t* req = NULL;
- request* r = NULL;
- struct addrinfo* hints = NULL;
- char* host = NULL;
- char* port = NULL;
- grpc_error* err;
- int s;
- GRPC_UV_ASSERT_SAME_THREAD();
- err = try_split_host_port(name, default_port, &host, &port);
- if (err != GRPC_ERROR_NONE) {
- GRPC_CLOSURE_SCHED(on_done, err);
- gpr_free(host);
- gpr_free(port);
- return;
- }
- r = (request*)gpr_malloc(sizeof(request));
- r->on_done = on_done;
- r->addresses = addrs;
- r->host = host;
- r->port = port;
- req = (uv_getaddrinfo_t*)gpr_malloc(sizeof(uv_getaddrinfo_t));
- req->data = r;
-
- /* Call getaddrinfo */
- hints = (addrinfo*)gpr_malloc(sizeof(struct addrinfo));
- memset(hints, 0, sizeof(struct addrinfo));
- hints->ai_family = AF_UNSPEC; /* ipv4 or ipv6 */
- hints->ai_socktype = SOCK_STREAM; /* stream socket */
- hints->ai_flags = AI_PASSIVE; /* for wildcard IP address */
- r->hints = hints;
-
- s = uv_getaddrinfo(uv_default_loop(), req, getaddrinfo_callback, host, port,
- hints);
-
- if (s != 0) {
- *addrs = NULL;
- err = GRPC_ERROR_CREATE_FROM_STATIC_STRING("getaddrinfo failed");
- err = grpc_error_set_str(err, GRPC_ERROR_STR_OS_ERROR,
- grpc_slice_from_static_string(uv_strerror(s)));
- GRPC_CLOSURE_SCHED(on_done, err);
- gpr_free(r);
- gpr_free(req);
- gpr_free(hints);
- gpr_free(host);
- gpr_free(port);
- }
-}
-
-void (*grpc_resolve_address)(
- const char* name, const char* default_port,
- grpc_pollset_set* interested_parties, grpc_closure* on_done,
- grpc_resolved_addresses** addrs) = resolve_address_impl;
-
-#endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/resolve_address_windows.cc b/src/core/lib/iomgr/resolve_address_windows.cc
index 7a62c88720..71c92615ad 100644
--- a/src/core/lib/iomgr/resolve_address_windows.cc
+++ b/src/core/lib/iomgr/resolve_address_windows.cc
@@ -51,7 +51,7 @@ typedef struct {
grpc_resolved_addresses** addresses;
} request;
-static grpc_error* blocking_resolve_address_impl(
+static grpc_error* windows_blocking_resolve_address(
const char* name, const char* default_port,
grpc_resolved_addresses** addresses) {
grpc_core::ExecCtx exec_ctx;
@@ -130,10 +130,6 @@ done:
return error;
}
-grpc_error* (*grpc_blocking_resolve_address)(
- const char* name, const char* default_port,
- grpc_resolved_addresses** addresses) = blocking_resolve_address_impl;
-
/* Callback to be passed to grpc_executor to asynch-ify
* grpc_blocking_resolve_address */
static void do_request_thread(void* rp, grpc_error* error) {
@@ -150,17 +146,10 @@ static void do_request_thread(void* rp, grpc_error* error) {
gpr_free(r);
}
-void grpc_resolved_addresses_destroy(grpc_resolved_addresses* addrs) {
- if (addrs != NULL) {
- gpr_free(addrs->addrs);
- }
- gpr_free(addrs);
-}
-
-static void resolve_address_impl(const char* name, const char* default_port,
- grpc_pollset_set* interested_parties,
- grpc_closure* on_done,
- grpc_resolved_addresses** addresses) {
+static void windows_resolve_address(const char* name, const char* default_port,
+ grpc_pollset_set* interested_parties,
+ grpc_closure* on_done,
+ grpc_resolved_addresses** addresses) {
request* r = (request*)gpr_malloc(sizeof(request));
GRPC_CLOSURE_INIT(&r->request_closure, do_request_thread, r,
grpc_executor_scheduler(GRPC_EXECUTOR_SHORT));
@@ -171,9 +160,6 @@ static void resolve_address_impl(const char* name, const char* default_port,
GRPC_CLOSURE_SCHED(&r->request_closure, GRPC_ERROR_NONE);
}
-void (*grpc_resolve_address)(
- const char* name, const char* default_port,
- grpc_pollset_set* interested_parties, grpc_closure* on_done,
- grpc_resolved_addresses** addresses) = resolve_address_impl;
-
+grpc_address_resolver_vtable grpc_windows_resolver_vtable = {
+ windows_resolve_address, windows_blocking_resolve_address};
#endif
diff --git a/src/core/lib/iomgr/resource_quota.h b/src/core/lib/iomgr/resource_quota.h
index 4e1c651278..89e8a39118 100644
--- a/src/core/lib/iomgr/resource_quota.h
+++ b/src/core/lib/iomgr/resource_quota.h
@@ -139,8 +139,4 @@ void grpc_resource_user_alloc_slices(
grpc_resource_user_slice_allocator* slice_allocator, size_t length,
size_t count, grpc_slice_buffer* dest);
-/* Allocate one slice of length \a size synchronously. */
-grpc_slice grpc_resource_user_slice_malloc(grpc_resource_user* resource_user,
- size_t size);
-
#endif /* GRPC_CORE_LIB_IOMGR_RESOURCE_QUOTA_H */
diff --git a/src/core/lib/iomgr/sockaddr.h b/src/core/lib/iomgr/sockaddr.h
index 3b30da8a7d..5edf735cd1 100644
--- a/src/core/lib/iomgr/sockaddr.h
+++ b/src/core/lib/iomgr/sockaddr.h
@@ -25,18 +25,8 @@
#include <grpc/support/port_platform.h>
-#include "src/core/lib/iomgr/port.h"
-
-#ifdef GRPC_UV
-#include <uv.h>
-#endif
-
-#ifdef GPR_WINDOWS
-#include "src/core/lib/iomgr/sockaddr_windows.h"
-#endif
-
-#ifdef GRPC_POSIX_SOCKETADDR
+#include "src/core/lib/iomgr/sockaddr_custom.h"
#include "src/core/lib/iomgr/sockaddr_posix.h"
-#endif
+#include "src/core/lib/iomgr/sockaddr_windows.h"
#endif /* GRPC_CORE_LIB_IOMGR_SOCKADDR_H */
diff --git a/src/core/lib/iomgr/sockaddr_custom.h b/src/core/lib/iomgr/sockaddr_custom.h
new file mode 100644
index 0000000000..d85cc504d3
--- /dev/null
+++ b/src/core/lib/iomgr/sockaddr_custom.h
@@ -0,0 +1,54 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_SOCKADDR_CUSTOM_H
+#define GRPC_CORE_LIB_IOMGR_SOCKADDR_CUSTOM_H
+
+#include <grpc/support/port_platform.h>
+
+#include <stddef.h>
+#include "src/core/lib/iomgr/port.h"
+
+#ifdef GRPC_UV
+
+#include <uv.h>
+
+// TODO(kpayson) It would be nice to abstract this so we don't
+// depend on anything uv specific
+typedef struct sockaddr grpc_sockaddr;
+typedef struct sockaddr_in grpc_sockaddr_in;
+typedef struct in_addr grpc_in_addr;
+typedef struct sockaddr_in6 grpc_sockaddr_in6;
+typedef struct in6_addr grpc_in6_addr;
+
+#define GRPC_INET_ADDRSTRLEN INET_ADDRSTRLEN
+#define GRPC_INET6_ADDRSTRLEN INET6_ADDRSTRLEN
+
+#define GRPC_SOCK_STREAM SOCK_STREAM
+#define GRPC_SOCK_DGRAM SOCK_DGRAM
+
+#define GRPC_AF_UNSPEC AF_UNSPEC
+#define GRPC_AF_UNIX AF_UNIX
+#define GRPC_AF_INET AF_INET
+#define GRPC_AF_INET6 AF_INET6
+
+#define GRPC_AI_PASSIVE AI_PASSIVE
+
+#endif // GRPC_UV
+
+#endif /* GRPC_CORE_LIB_IOMGR_SOCKADDR_CUSTOM_H */
diff --git a/src/core/lib/iomgr/sockaddr_posix.h b/src/core/lib/iomgr/sockaddr_posix.h
index 83981e0aa5..5b18bbc465 100644
--- a/src/core/lib/iomgr/sockaddr_posix.h
+++ b/src/core/lib/iomgr/sockaddr_posix.h
@@ -21,6 +21,9 @@
#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
+
+#ifdef GRPC_POSIX_SOCKET
#include <arpa/inet.h>
#include <netdb.h>
#include <netinet/in.h>
@@ -28,4 +31,25 @@
#include <sys/types.h>
#include <unistd.h>
+typedef struct sockaddr grpc_sockaddr;
+typedef struct sockaddr_in grpc_sockaddr_in;
+typedef struct in_addr grpc_in_addr;
+typedef struct sockaddr_in6 grpc_sockaddr_in6;
+typedef struct in6_addr grpc_in6_addr;
+
+#define GRPC_INET_ADDRSTRLEN INET_ADDRSTRLEN
+#define GRPC_INET6_ADDRSTRLEN INET6_ADDRSTRLEN
+
+#define GRPC_SOCK_STREAM SOCK_STREAM
+#define GRPC_SOCK_DGRAM SOCK_DGRAM
+
+#define GRPC_AF_UNSPEC AF_UNSPEC
+#define GRPC_AF_UNIX AF_UNIX
+#define GRPC_AF_INET AF_INET
+#define GRPC_AF_INET6 AF_INET6
+
+#define GRPC_AI_PASSIVE AI_PASSIVE
+
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_SOCKADDR_POSIX_H */
diff --git a/src/core/lib/iomgr/sockaddr_utils.cc b/src/core/lib/iomgr/sockaddr_utils.cc
index fe341779cf..df25f7778a 100644
--- a/src/core/lib/iomgr/sockaddr_utils.cc
+++ b/src/core/lib/iomgr/sockaddr_utils.cc
@@ -40,26 +40,26 @@ static const uint8_t kV4MappedPrefix[] = {0, 0, 0, 0, 0, 0,
int grpc_sockaddr_is_v4mapped(const grpc_resolved_address* resolved_addr,
grpc_resolved_address* resolved_addr4_out) {
GPR_ASSERT(resolved_addr != resolved_addr4_out);
- const struct sockaddr* addr =
- reinterpret_cast<const struct sockaddr*>(resolved_addr->addr);
- struct sockaddr_in* addr4_out =
+ const grpc_sockaddr* addr =
+ reinterpret_cast<const grpc_sockaddr*>(resolved_addr->addr);
+ grpc_sockaddr_in* addr4_out =
resolved_addr4_out == nullptr
? nullptr
- : reinterpret_cast<struct sockaddr_in*>(resolved_addr4_out->addr);
- if (addr->sa_family == AF_INET6) {
- const struct sockaddr_in6* addr6 =
- reinterpret_cast<const struct sockaddr_in6*>(addr);
+ : reinterpret_cast<grpc_sockaddr_in*>(resolved_addr4_out->addr);
+ if (addr->sa_family == GRPC_AF_INET6) {
+ const grpc_sockaddr_in6* addr6 =
+ reinterpret_cast<const grpc_sockaddr_in6*>(addr);
if (memcmp(addr6->sin6_addr.s6_addr, kV4MappedPrefix,
sizeof(kV4MappedPrefix)) == 0) {
if (resolved_addr4_out != nullptr) {
/* Normalize ::ffff:0.0.0.0/96 to IPv4. */
memset(resolved_addr4_out, 0, sizeof(*resolved_addr4_out));
- addr4_out->sin_family = AF_INET;
+ addr4_out->sin_family = GRPC_AF_INET;
/* s6_addr32 would be nice, but it's non-standard. */
memcpy(&addr4_out->sin_addr, &addr6->sin6_addr.s6_addr[12], 4);
addr4_out->sin_port = addr6->sin6_port;
resolved_addr4_out->len =
- static_cast<socklen_t>(sizeof(struct sockaddr_in));
+ static_cast<socklen_t>(sizeof(grpc_sockaddr_in));
}
return 1;
}
@@ -70,20 +70,19 @@ int grpc_sockaddr_is_v4mapped(const grpc_resolved_address* resolved_addr,
int grpc_sockaddr_to_v4mapped(const grpc_resolved_address* resolved_addr,
grpc_resolved_address* resolved_addr6_out) {
GPR_ASSERT(resolved_addr != resolved_addr6_out);
- const struct sockaddr* addr =
- reinterpret_cast<const struct sockaddr*>(resolved_addr->addr);
- struct sockaddr_in6* addr6_out =
- reinterpret_cast<struct sockaddr_in6*>(resolved_addr6_out->addr);
- if (addr->sa_family == AF_INET) {
- const struct sockaddr_in* addr4 =
- reinterpret_cast<const struct sockaddr_in*>(addr);
+ const grpc_sockaddr* addr =
+ reinterpret_cast<const grpc_sockaddr*>(resolved_addr->addr);
+ grpc_sockaddr_in6* addr6_out =
+ reinterpret_cast<grpc_sockaddr_in6*>(resolved_addr6_out->addr);
+ if (addr->sa_family == GRPC_AF_INET) {
+ const grpc_sockaddr_in* addr4 =
+ reinterpret_cast<const grpc_sockaddr_in*>(addr);
memset(resolved_addr6_out, 0, sizeof(*resolved_addr6_out));
- addr6_out->sin6_family = AF_INET6;
+ addr6_out->sin6_family = GRPC_AF_INET6;
memcpy(&addr6_out->sin6_addr.s6_addr[0], kV4MappedPrefix, 12);
memcpy(&addr6_out->sin6_addr.s6_addr[12], &addr4->sin_addr, 4);
addr6_out->sin6_port = addr4->sin_port;
- resolved_addr6_out->len =
- static_cast<socklen_t>(sizeof(struct sockaddr_in6));
+ resolved_addr6_out->len = static_cast<socklen_t>(sizeof(grpc_sockaddr_in6));
return 1;
}
return 0;
@@ -91,32 +90,32 @@ int grpc_sockaddr_to_v4mapped(const grpc_resolved_address* resolved_addr,
int grpc_sockaddr_is_wildcard(const grpc_resolved_address* resolved_addr,
int* port_out) {
- const struct sockaddr* addr;
+ const grpc_sockaddr* addr;
grpc_resolved_address addr4_normalized;
if (grpc_sockaddr_is_v4mapped(resolved_addr, &addr4_normalized)) {
resolved_addr = &addr4_normalized;
}
- addr = reinterpret_cast<const struct sockaddr*>(resolved_addr->addr);
- if (addr->sa_family == AF_INET) {
+ addr = reinterpret_cast<const grpc_sockaddr*>(resolved_addr->addr);
+ if (addr->sa_family == GRPC_AF_INET) {
/* Check for 0.0.0.0 */
- const struct sockaddr_in* addr4 =
- reinterpret_cast<const struct sockaddr_in*>(addr);
+ const grpc_sockaddr_in* addr4 =
+ reinterpret_cast<const grpc_sockaddr_in*>(addr);
if (addr4->sin_addr.s_addr != 0) {
return 0;
}
- *port_out = ntohs(addr4->sin_port);
+ *port_out = grpc_ntohs(addr4->sin_port);
return 1;
- } else if (addr->sa_family == AF_INET6) {
+ } else if (addr->sa_family == GRPC_AF_INET6) {
/* Check for :: */
- const struct sockaddr_in6* addr6 =
- reinterpret_cast<const struct sockaddr_in6*>(addr);
+ const grpc_sockaddr_in6* addr6 =
+ reinterpret_cast<const grpc_sockaddr_in6*>(addr);
int i;
for (i = 0; i < 16; i++) {
if (addr6->sin6_addr.s6_addr[i] != 0) {
return 0;
}
}
- *port_out = ntohs(addr6->sin6_port);
+ *port_out = grpc_ntohs(addr6->sin6_port);
return 1;
} else {
return 0;
@@ -131,33 +130,33 @@ void grpc_sockaddr_make_wildcards(int port, grpc_resolved_address* wild4_out,
void grpc_sockaddr_make_wildcard4(int port,
grpc_resolved_address* resolved_wild_out) {
- struct sockaddr_in* wild_out =
- reinterpret_cast<struct sockaddr_in*>(resolved_wild_out->addr);
+ grpc_sockaddr_in* wild_out =
+ reinterpret_cast<grpc_sockaddr_in*>(resolved_wild_out->addr);
GPR_ASSERT(port >= 0 && port < 65536);
memset(resolved_wild_out, 0, sizeof(*resolved_wild_out));
- wild_out->sin_family = AF_INET;
- wild_out->sin_port = htons(static_cast<uint16_t>(port));
- resolved_wild_out->len = static_cast<socklen_t>(sizeof(struct sockaddr_in));
+ wild_out->sin_family = GRPC_AF_INET;
+ wild_out->sin_port = grpc_htons(static_cast<uint16_t>(port));
+ resolved_wild_out->len = static_cast<socklen_t>(sizeof(grpc_sockaddr_in));
}
void grpc_sockaddr_make_wildcard6(int port,
grpc_resolved_address* resolved_wild_out) {
- struct sockaddr_in6* wild_out =
- reinterpret_cast<struct sockaddr_in6*>(resolved_wild_out->addr);
+ grpc_sockaddr_in6* wild_out =
+ reinterpret_cast<grpc_sockaddr_in6*>(resolved_wild_out->addr);
GPR_ASSERT(port >= 0 && port < 65536);
memset(resolved_wild_out, 0, sizeof(*resolved_wild_out));
- wild_out->sin6_family = AF_INET6;
- wild_out->sin6_port = htons(static_cast<uint16_t>(port));
- resolved_wild_out->len = static_cast<socklen_t>(sizeof(struct sockaddr_in6));
+ wild_out->sin6_family = GRPC_AF_INET6;
+ wild_out->sin6_port = grpc_htons(static_cast<uint16_t>(port));
+ resolved_wild_out->len = static_cast<socklen_t>(sizeof(grpc_sockaddr_in6));
}
int grpc_sockaddr_to_string(char** out,
const grpc_resolved_address* resolved_addr,
int normalize) {
- const struct sockaddr* addr;
+ const grpc_sockaddr* addr;
const int save_errno = errno;
grpc_resolved_address addr_normalized;
- char ntop_buf[INET6_ADDRSTRLEN];
+ char ntop_buf[GRPC_INET6_ADDRSTRLEN];
const void* ip = nullptr;
int port = 0;
uint32_t sin6_scope_id = 0;
@@ -167,17 +166,17 @@ int grpc_sockaddr_to_string(char** out,
if (normalize && grpc_sockaddr_is_v4mapped(resolved_addr, &addr_normalized)) {
resolved_addr = &addr_normalized;
}
- addr = reinterpret_cast<const struct sockaddr*>(resolved_addr->addr);
- if (addr->sa_family == AF_INET) {
- const struct sockaddr_in* addr4 =
- reinterpret_cast<const struct sockaddr_in*>(addr);
+ addr = reinterpret_cast<const grpc_sockaddr*>(resolved_addr->addr);
+ if (addr->sa_family == GRPC_AF_INET) {
+ const grpc_sockaddr_in* addr4 =
+ reinterpret_cast<const grpc_sockaddr_in*>(addr);
ip = &addr4->sin_addr;
- port = ntohs(addr4->sin_port);
- } else if (addr->sa_family == AF_INET6) {
- const struct sockaddr_in6* addr6 =
- reinterpret_cast<const struct sockaddr_in6*>(addr);
+ port = grpc_ntohs(addr4->sin_port);
+ } else if (addr->sa_family == GRPC_AF_INET6) {
+ const grpc_sockaddr_in6* addr6 =
+ reinterpret_cast<const grpc_sockaddr_in6*>(addr);
ip = &addr6->sin6_addr;
- port = ntohs(addr6->sin6_port);
+ port = grpc_ntohs(addr6->sin6_port);
sin6_scope_id = addr6->sin6_scope_id;
}
if (ip != nullptr && grpc_inet_ntop(addr->sa_family, ip, ntop_buf,
@@ -199,6 +198,22 @@ int grpc_sockaddr_to_string(char** out,
return ret;
}
+void grpc_string_to_sockaddr(grpc_resolved_address* out, char* addr, int port) {
+ grpc_sockaddr_in6* addr6 = (grpc_sockaddr_in6*)out->addr;
+ grpc_sockaddr_in* addr4 = (grpc_sockaddr_in*)out->addr;
+
+ if (grpc_inet_pton(GRPC_AF_INET6, addr, &addr6->sin6_addr) == 1) {
+ addr6->sin6_family = GRPC_AF_INET6;
+ out->len = sizeof(grpc_sockaddr_in6);
+ } else if (grpc_inet_pton(GRPC_AF_INET, addr, &addr4->sin_addr) == 1) {
+ addr4->sin_family = GRPC_AF_INET;
+ out->len = sizeof(grpc_sockaddr_in);
+ } else {
+ GPR_ASSERT(0);
+ }
+ grpc_sockaddr_set_port(out, port);
+}
+
char* grpc_sockaddr_to_uri(const grpc_resolved_address* resolved_addr) {
grpc_resolved_address addr_normalized;
if (grpc_sockaddr_is_v4mapped(resolved_addr, &addr_normalized)) {
@@ -221,33 +236,33 @@ char* grpc_sockaddr_to_uri(const grpc_resolved_address* resolved_addr) {
const char* grpc_sockaddr_get_uri_scheme(
const grpc_resolved_address* resolved_addr) {
- const struct sockaddr* addr =
- reinterpret_cast<const struct sockaddr*>(resolved_addr->addr);
+ const grpc_sockaddr* addr =
+ reinterpret_cast<const grpc_sockaddr*>(resolved_addr->addr);
switch (addr->sa_family) {
- case AF_INET:
+ case GRPC_AF_INET:
return "ipv4";
- case AF_INET6:
+ case GRPC_AF_INET6:
return "ipv6";
- case AF_UNIX:
+ case GRPC_AF_UNIX:
return "unix";
}
return nullptr;
}
int grpc_sockaddr_get_family(const grpc_resolved_address* resolved_addr) {
- const struct sockaddr* addr =
- reinterpret_cast<const struct sockaddr*>(resolved_addr->addr);
+ const grpc_sockaddr* addr =
+ reinterpret_cast<const grpc_sockaddr*>(resolved_addr->addr);
return addr->sa_family;
}
int grpc_sockaddr_get_port(const grpc_resolved_address* resolved_addr) {
- const struct sockaddr* addr =
- reinterpret_cast<const struct sockaddr*>(resolved_addr->addr);
+ const grpc_sockaddr* addr =
+ reinterpret_cast<const grpc_sockaddr*>(resolved_addr->addr);
switch (addr->sa_family) {
- case AF_INET:
- return ntohs(((struct sockaddr_in*)addr)->sin_port);
- case AF_INET6:
- return ntohs(((struct sockaddr_in6*)addr)->sin6_port);
+ case GRPC_AF_INET:
+ return grpc_ntohs(((grpc_sockaddr_in*)addr)->sin_port);
+ case GRPC_AF_INET6:
+ return grpc_ntohs(((grpc_sockaddr_in6*)addr)->sin6_port);
default:
if (grpc_is_unix_socket(resolved_addr)) {
return 1;
@@ -260,18 +275,18 @@ int grpc_sockaddr_get_port(const grpc_resolved_address* resolved_addr) {
int grpc_sockaddr_set_port(const grpc_resolved_address* resolved_addr,
int port) {
- const struct sockaddr* addr =
- reinterpret_cast<const struct sockaddr*>(resolved_addr->addr);
+ const grpc_sockaddr* addr =
+ reinterpret_cast<const grpc_sockaddr*>(resolved_addr->addr);
switch (addr->sa_family) {
- case AF_INET:
+ case GRPC_AF_INET:
GPR_ASSERT(port >= 0 && port < 65536);
- ((struct sockaddr_in*)addr)->sin_port =
- htons(static_cast<uint16_t>(port));
+ ((grpc_sockaddr_in*)addr)->sin_port =
+ grpc_htons(static_cast<uint16_t>(port));
return 1;
- case AF_INET6:
+ case GRPC_AF_INET6:
GPR_ASSERT(port >= 0 && port < 65536);
- ((struct sockaddr_in6*)addr)->sin6_port =
- htons(static_cast<uint16_t>(port));
+ ((grpc_sockaddr_in6*)addr)->sin6_port =
+ grpc_htons(static_cast<uint16_t>(port));
return 1;
default:
gpr_log(GPR_ERROR, "Unknown socket family %d in grpc_sockaddr_set_port",
diff --git a/src/core/lib/iomgr/sockaddr_utils.h b/src/core/lib/iomgr/sockaddr_utils.h
index ace54a2a80..a4e90a73ab 100644
--- a/src/core/lib/iomgr/sockaddr_utils.h
+++ b/src/core/lib/iomgr/sockaddr_utils.h
@@ -71,6 +71,8 @@ int grpc_sockaddr_set_port(const grpc_resolved_address* addr, int port);
int grpc_sockaddr_to_string(char** out, const grpc_resolved_address* addr,
int normalize);
+void grpc_string_to_sockaddr(grpc_resolved_address* out, char* addr, int port);
+
/* Returns the URI string corresponding to \a addr */
char* grpc_sockaddr_to_uri(const grpc_resolved_address* addr);
diff --git a/src/core/lib/iomgr/sockaddr_windows.h b/src/core/lib/iomgr/sockaddr_windows.h
index 3a4fcc9e8a..4d637251a1 100644
--- a/src/core/lib/iomgr/sockaddr_windows.h
+++ b/src/core/lib/iomgr/sockaddr_windows.h
@@ -31,6 +31,25 @@
// must be included after the above
#include <mswsock.h>
+typedef struct sockaddr grpc_sockaddr;
+typedef struct sockaddr_in grpc_sockaddr_in;
+typedef struct in_addr grpc_in_addr;
+typedef struct sockaddr_in6 grpc_sockaddr_in6;
+typedef struct in6_addr grpc_in6_addr;
+
+#define GRPC_INET_ADDRSTRLEN INET_ADDRSTRLEN
+#define GRPC_INET6_ADDRSTRLEN INET6_ADDRSTRLEN
+
+#define GRPC_SOCK_STREAM SOCK_STREAM
+#define GRPC_SOCK_DGRAM SOCK_DGRAM
+
+#define GRPC_AF_UNSPEC AF_UNSPEC
+#define GRPC_AF_UNIX AF_UNIX
+#define GRPC_AF_INET AF_INET
+#define GRPC_AF_INET6 AF_INET6
+
+#define GRPC_AI_PASSIVE AI_PASSIVE
+
#endif
#endif /* GRPC_CORE_LIB_IOMGR_SOCKADDR_WINDOWS_H */
diff --git a/src/core/lib/iomgr/socket_utils.h b/src/core/lib/iomgr/socket_utils.h
index e96eb97a7e..cf1a7be648 100644
--- a/src/core/lib/iomgr/socket_utils.h
+++ b/src/core/lib/iomgr/socket_utils.h
@@ -23,6 +23,15 @@
#include <stddef.h>
+/* A wrapper for htons on POSIX and Windows */
+uint16_t grpc_htons(uint16_t hostshort);
+
+/* A wrapper for ntohs on POSIX and WINDOWS */
+uint16_t grpc_ntohs(uint16_t netshort);
+
+/* A wrapper for inet_pton on POSIX and WINDOWS */
+int grpc_inet_pton(int af, const char* src, void* dst);
+
/* A wrapper for inet_ntop on POSIX systems and InetNtop on Windows systems */
const char* grpc_inet_ntop(int af, const void* src, char* dst, size_t size);
diff --git a/src/core/lib/iomgr/socket_utils_common_posix.cc b/src/core/lib/iomgr/socket_utils_common_posix.cc
index 4fb6c7ad63..c52e237fa8 100644
--- a/src/core/lib/iomgr/socket_utils_common_posix.cc
+++ b/src/core/lib/iomgr/socket_utils_common_posix.cc
@@ -43,6 +43,7 @@
#include "src/core/lib/gpr/host_port.h"
#include "src/core/lib/gpr/string.h"
+#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
/* set a socket to non blocking mode */
@@ -215,12 +216,11 @@ static void probe_ipv6_once(void) {
if (fd < 0) {
gpr_log(GPR_INFO, "Disabling AF_INET6 sockets because socket() failed.");
} else {
- struct sockaddr_in6 addr;
+ grpc_sockaddr_in6 addr;
memset(&addr, 0, sizeof(addr));
addr.sin6_family = AF_INET6;
addr.sin6_addr.s6_addr[15] = 1; /* [::1]:0 */
- if (bind(fd, reinterpret_cast<struct sockaddr*>(&addr), sizeof(addr)) ==
- 0) {
+ if (bind(fd, reinterpret_cast<grpc_sockaddr*>(&addr), sizeof(addr)) == 0) {
g_ipv6_loopback_available = 1;
} else {
gpr_log(GPR_INFO,
@@ -280,8 +280,8 @@ static int create_socket(grpc_socket_factory* factory, int domain, int type,
grpc_error* grpc_create_dualstack_socket_using_factory(
grpc_socket_factory* factory, const grpc_resolved_address* resolved_addr,
int type, int protocol, grpc_dualstack_mode* dsmode, int* newfd) {
- const struct sockaddr* addr =
- reinterpret_cast<const struct sockaddr*>(resolved_addr->addr);
+ const grpc_sockaddr* addr =
+ reinterpret_cast<const grpc_sockaddr*>(resolved_addr->addr);
int family = addr->sa_family;
if (family == AF_INET6) {
if (grpc_ipv6_loopback_available()) {
@@ -311,6 +311,14 @@ grpc_error* grpc_create_dualstack_socket_using_factory(
return error_for_fd(*newfd, resolved_addr);
}
+uint16_t grpc_htons(uint16_t hostshort) { return htons(hostshort); }
+
+uint16_t grpc_ntohs(uint16_t netshort) { return ntohs(netshort); }
+
+int grpc_inet_pton(int af, const char* src, void* dst) {
+ return inet_pton(af, src, dst);
+}
+
const char* grpc_inet_ntop(int af, const void* src, char* dst, size_t size) {
GPR_ASSERT(size <= (socklen_t)-1);
return inet_ntop(af, src, dst, static_cast<socklen_t>(size));
diff --git a/src/core/lib/iomgr/socket_utils_linux.cc b/src/core/lib/iomgr/socket_utils_linux.cc
index a5ce9f9912..f506329f97 100644
--- a/src/core/lib/iomgr/socket_utils_linux.cc
+++ b/src/core/lib/iomgr/socket_utils_linux.cc
@@ -37,8 +37,7 @@ int grpc_accept4(int sockfd, grpc_resolved_address* resolved_addr, int nonblock,
GPR_ASSERT(resolved_addr->len <= (socklen_t)-1);
flags |= nonblock ? SOCK_NONBLOCK : 0;
flags |= cloexec ? SOCK_CLOEXEC : 0;
- return accept4(sockfd,
- reinterpret_cast<struct sockaddr*>(resolved_addr->addr),
+ return accept4(sockfd, reinterpret_cast<grpc_sockaddr*>(resolved_addr->addr),
&resolved_addr->len, flags);
}
diff --git a/src/core/lib/iomgr/socket_utils_posix.cc b/src/core/lib/iomgr/socket_utils_posix.cc
index c856f641e3..d5d00af976 100644
--- a/src/core/lib/iomgr/socket_utils_posix.cc
+++ b/src/core/lib/iomgr/socket_utils_posix.cc
@@ -36,7 +36,7 @@ int grpc_accept4(int sockfd, grpc_resolved_address* resolved_addr, int nonblock,
int fd, flags;
GPR_ASSERT(sizeof(socklen_t) <= sizeof(size_t));
GPR_ASSERT(resolved_addr->len <= (socklen_t)-1);
- fd = accept(sockfd, (struct sockaddr*)resolved_addr->addr,
+ fd = accept(sockfd, (grpc_sockaddr*)resolved_addr->addr,
(socklen_t*)&resolved_addr->len);
if (fd >= 0) {
if (nonblock) {
diff --git a/src/core/lib/iomgr/socket_utils_uv.cc b/src/core/lib/iomgr/socket_utils_uv.cc
index 3f650eef66..8538abc7e4 100644
--- a/src/core/lib/iomgr/socket_utils_uv.cc
+++ b/src/core/lib/iomgr/socket_utils_uv.cc
@@ -22,15 +22,24 @@
#ifdef GRPC_UV
-#include <uv.h>
-
+#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/iomgr/socket_utils.h"
#include <grpc/support/log.h>
+#include <uv.h>
+
+uint16_t grpc_htons(uint16_t hostshort) { return htons(hostshort); }
+
+uint16_t grpc_ntohs(uint16_t netshort) { return ntohs(netshort); }
+
+int grpc_inet_pton(int af, const char* src, void* dst) {
+ return inet_pton(af, src, dst);
+}
+
const char* grpc_inet_ntop(int af, const void* src, char* dst, size_t size) {
- uv_inet_ntop(af, src, dst, size);
- return dst;
+ /* Windows InetNtopA wants a mutable ip pointer */
+ return inet_ntop(af, src, dst, (socklen_t)size);
}
#endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/socket_utils_windows.cc b/src/core/lib/iomgr/socket_utils_windows.cc
index 5fc3b7617e..3e7b5b812d 100644
--- a/src/core/lib/iomgr/socket_utils_windows.cc
+++ b/src/core/lib/iomgr/socket_utils_windows.cc
@@ -27,6 +27,14 @@
#include <grpc/support/log.h>
+uint16_t grpc_htons(uint16_t hostshort) { return htons(hostshort); }
+
+uint16_t grpc_ntohs(uint16_t netshort) { return ntohs(netshort); }
+
+int grpc_inet_pton(int af, const char* src, void* dst) {
+ return inet_pton(af, src, dst);
+}
+
const char* grpc_inet_ntop(int af, const void* src, char* dst, size_t size) {
/* Windows InetNtopA wants a mutable ip pointer */
return InetNtopA(af, (void*)src, dst, size);
diff --git a/src/core/lib/iomgr/tcp_client.cc b/src/core/lib/iomgr/tcp_client.cc
new file mode 100644
index 0000000000..6c0ba40781
--- /dev/null
+++ b/src/core/lib/iomgr/tcp_client.cc
@@ -0,0 +1,36 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/lib/iomgr/tcp_client.h"
+
+grpc_tcp_client_vtable* grpc_tcp_client_impl;
+
+void grpc_tcp_client_connect(grpc_closure* closure, grpc_endpoint** ep,
+ grpc_pollset_set* interested_parties,
+ const grpc_channel_args* channel_args,
+ const grpc_resolved_address* addr,
+ grpc_millis deadline) {
+ grpc_tcp_client_impl->connect(closure, ep, interested_parties, channel_args,
+ addr, deadline);
+}
+
+void grpc_set_tcp_client_impl(grpc_tcp_client_vtable* impl) {
+ grpc_tcp_client_impl = impl;
+}
diff --git a/src/core/lib/iomgr/tcp_client.h b/src/core/lib/iomgr/tcp_client.h
index a6b99e63c2..d209eeb8c2 100644
--- a/src/core/lib/iomgr/tcp_client.h
+++ b/src/core/lib/iomgr/tcp_client.h
@@ -27,6 +27,13 @@
#include "src/core/lib/iomgr/pollset_set.h"
#include "src/core/lib/iomgr/resolve_address.h"
+typedef struct grpc_tcp_client_vtable {
+ void (*connect)(grpc_closure* on_connect, grpc_endpoint** endpoint,
+ grpc_pollset_set* interested_parties,
+ const grpc_channel_args* channel_args,
+ const grpc_resolved_address* addr, grpc_millis deadline);
+} grpc_tcp_client_vtable;
+
/* Asynchronously connect to an address (specified as (addr, len)), and call
cb with arg and the completed connection when done (or call cb with arg and
NULL on failure).
@@ -38,4 +45,8 @@ void grpc_tcp_client_connect(grpc_closure* on_connect, grpc_endpoint** endpoint,
const grpc_resolved_address* addr,
grpc_millis deadline);
+void grpc_tcp_client_global_init();
+
+void grpc_set_tcp_client_impl(grpc_tcp_client_vtable* impl);
+
#endif /* GRPC_CORE_LIB_IOMGR_TCP_CLIENT_H */
diff --git a/src/core/lib/iomgr/tcp_client_custom.cc b/src/core/lib/iomgr/tcp_client_custom.cc
new file mode 100644
index 0000000000..55632a55a1
--- /dev/null
+++ b/src/core/lib/iomgr/tcp_client_custom.cc
@@ -0,0 +1,151 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/lib/iomgr/port.h"
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+
+#include "src/core/lib/iomgr/error.h"
+#include "src/core/lib/iomgr/iomgr_custom.h"
+#include "src/core/lib/iomgr/sockaddr_utils.h"
+#include "src/core/lib/iomgr/tcp_client.h"
+#include "src/core/lib/iomgr/tcp_custom.h"
+#include "src/core/lib/iomgr/timer.h"
+
+extern grpc_core::TraceFlag grpc_tcp_trace;
+extern grpc_socket_vtable* grpc_custom_socket_vtable;
+
+struct grpc_custom_tcp_connect {
+ grpc_custom_socket* socket;
+ grpc_timer alarm;
+ grpc_closure on_alarm;
+ grpc_closure* closure;
+ grpc_endpoint** endpoint;
+ int refs;
+ char* addr_name;
+ grpc_resource_quota* resource_quota;
+};
+
+static void custom_tcp_connect_cleanup(grpc_custom_tcp_connect* connect) {
+ grpc_custom_socket* socket = connect->socket;
+ grpc_resource_quota_unref_internal(connect->resource_quota);
+ gpr_free(connect->addr_name);
+ gpr_free(connect);
+ socket->refs--;
+ if (socket->refs == 0) {
+ grpc_custom_socket_vtable->destroy(socket);
+ gpr_free(socket);
+ }
+}
+
+static void custom_close_callback(grpc_custom_socket* socket) {}
+
+static void on_alarm(void* acp, grpc_error* error) {
+ int done;
+ grpc_custom_socket* socket = (grpc_custom_socket*)acp;
+ grpc_custom_tcp_connect* connect = socket->connector;
+ if (grpc_tcp_trace.enabled()) {
+ const char* str = grpc_error_string(error);
+ gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_alarm: error=%s",
+ connect->addr_name, str);
+ }
+ if (error == GRPC_ERROR_NONE) {
+ /* error == NONE implies that the timer ran out, and wasn't cancelled. If
+ it was cancelled, then the handler that cancelled it also should close
+ the handle, if applicable */
+ grpc_custom_socket_vtable->close(socket, custom_close_callback);
+ }
+ done = (--connect->refs == 0);
+ if (done) {
+ custom_tcp_connect_cleanup(connect);
+ }
+}
+
+static void custom_connect_callback(grpc_custom_socket* socket,
+ grpc_error* error) {
+ grpc_core::ExecCtx exec_ctx;
+ grpc_custom_tcp_connect* connect = socket->connector;
+ int done;
+ grpc_closure* closure = connect->closure;
+ grpc_timer_cancel(&connect->alarm);
+ if (error == GRPC_ERROR_NONE) {
+ *connect->endpoint = custom_tcp_endpoint_create(
+ socket, connect->resource_quota, connect->addr_name);
+ }
+ done = (--connect->refs == 0);
+ if (done) {
+ grpc_core::ExecCtx::Get()->Flush();
+ custom_tcp_connect_cleanup(connect);
+ }
+ GRPC_CLOSURE_SCHED(closure, error);
+}
+
+static void tcp_connect(grpc_closure* closure, grpc_endpoint** ep,
+ grpc_pollset_set* interested_parties,
+ const grpc_channel_args* channel_args,
+ const grpc_resolved_address* resolved_addr,
+ grpc_millis deadline) {
+ GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD();
+ (void)channel_args;
+ (void)interested_parties;
+ grpc_custom_tcp_connect* connect;
+ grpc_resource_quota* resource_quota = grpc_resource_quota_create(nullptr);
+ if (channel_args != nullptr) {
+ for (size_t i = 0; i < channel_args->num_args; i++) {
+ if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
+ grpc_resource_quota_unref_internal(resource_quota);
+ resource_quota = grpc_resource_quota_ref_internal(
+ (grpc_resource_quota*)channel_args->args[i].value.pointer.p);
+ }
+ }
+ }
+ grpc_custom_socket* socket =
+ (grpc_custom_socket*)gpr_malloc(sizeof(grpc_custom_socket));
+ socket->refs = 2;
+ grpc_custom_socket_vtable->init(socket, GRPC_AF_UNSPEC);
+ connect =
+ (grpc_custom_tcp_connect*)gpr_malloc(sizeof(grpc_custom_tcp_connect));
+ connect->closure = closure;
+ connect->endpoint = ep;
+ connect->addr_name = grpc_sockaddr_to_uri(resolved_addr);
+ connect->resource_quota = resource_quota;
+ connect->socket = socket;
+ socket->connector = connect;
+ socket->endpoint = nullptr;
+ socket->listener = nullptr;
+ connect->refs = 2;
+
+ if (grpc_tcp_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %p %s: asynchronously connecting",
+ socket, connect->addr_name);
+ }
+
+ grpc_custom_socket_vtable->connect(
+ socket, (const grpc_sockaddr*)resolved_addr->addr, resolved_addr->len,
+ custom_connect_callback);
+ GRPC_CLOSURE_INIT(&connect->on_alarm, on_alarm, socket,
+ grpc_schedule_on_exec_ctx);
+ grpc_timer_init(&connect->alarm, deadline, &connect->on_alarm);
+}
+
+grpc_tcp_client_vtable custom_tcp_client_vtable = {tcp_connect};
diff --git a/src/core/lib/iomgr/tcp_client_posix.cc b/src/core/lib/iomgr/tcp_client_posix.cc
index 8c72f675f0..ba943d302a 100644
--- a/src/core/lib/iomgr/tcp_client_posix.cc
+++ b/src/core/lib/iomgr/tcp_client_posix.cc
@@ -38,6 +38,7 @@
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/iomgr_posix.h"
+#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/iomgr/socket_mutator.h"
#include "src/core/lib/iomgr/socket_utils_posix.h"
@@ -293,7 +294,7 @@ void grpc_tcp_client_create_from_prepared_fd(
async_connect* ac;
do {
GPR_ASSERT(addr->len < ~(socklen_t)0);
- err = connect(fd, reinterpret_cast<const struct sockaddr*>(addr->addr),
+ err = connect(fd, reinterpret_cast<const grpc_sockaddr*>(addr->addr),
addr->len);
} while (err < 0 && errno == EINTR);
if (err >= 0) {
@@ -336,11 +337,11 @@ void grpc_tcp_client_create_from_prepared_fd(
gpr_mu_unlock(&ac->mu);
}
-static void tcp_client_connect_impl(grpc_closure* closure, grpc_endpoint** ep,
- grpc_pollset_set* interested_parties,
- const grpc_channel_args* channel_args,
- const grpc_resolved_address* addr,
- grpc_millis deadline) {
+static void tcp_connect(grpc_closure* closure, grpc_endpoint** ep,
+ grpc_pollset_set* interested_parties,
+ const grpc_channel_args* channel_args,
+ const grpc_resolved_address* addr,
+ grpc_millis deadline) {
grpc_resolved_address mapped_addr;
grpc_fd* fdobj = nullptr;
grpc_error* error;
@@ -355,20 +356,5 @@ static void tcp_client_connect_impl(grpc_closure* closure, grpc_endpoint** ep,
ep);
}
-// overridden by api_fuzzer.c
-void (*grpc_tcp_client_connect_impl)(
- grpc_closure* closure, grpc_endpoint** ep,
- grpc_pollset_set* interested_parties, const grpc_channel_args* channel_args,
- const grpc_resolved_address* addr,
- grpc_millis deadline) = tcp_client_connect_impl;
-
-void grpc_tcp_client_connect(grpc_closure* closure, grpc_endpoint** ep,
- grpc_pollset_set* interested_parties,
- const grpc_channel_args* channel_args,
- const grpc_resolved_address* addr,
- grpc_millis deadline) {
- grpc_tcp_client_connect_impl(closure, ep, interested_parties, channel_args,
- addr, deadline);
-}
-
+grpc_tcp_client_vtable grpc_posix_tcp_client_vtable = {tcp_connect};
#endif
diff --git a/src/core/lib/iomgr/tcp_client_uv.cc b/src/core/lib/iomgr/tcp_client_uv.cc
deleted file mode 100644
index d29d6c8f41..0000000000
--- a/src/core/lib/iomgr/tcp_client_uv.cc
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpc/support/port_platform.h>
-
-#include "src/core/lib/iomgr/port.h"
-
-#ifdef GRPC_UV
-
-#include <string.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-
-#include "src/core/lib/iomgr/error.h"
-#include "src/core/lib/iomgr/iomgr_uv.h"
-#include "src/core/lib/iomgr/sockaddr_utils.h"
-#include "src/core/lib/iomgr/tcp_client.h"
-#include "src/core/lib/iomgr/tcp_uv.h"
-#include "src/core/lib/iomgr/timer.h"
-
-extern grpc_core::TraceFlag grpc_tcp_trace;
-
-typedef struct grpc_uv_tcp_connect {
- uv_connect_t connect_req;
- grpc_timer alarm;
- grpc_closure on_alarm;
- uv_tcp_t* tcp_handle;
- grpc_closure* closure;
- grpc_endpoint** endpoint;
- int refs;
- char* addr_name;
- grpc_resource_quota* resource_quota;
-} grpc_uv_tcp_connect;
-
-static void uv_tcp_connect_cleanup(grpc_uv_tcp_connect* connect) {
- grpc_resource_quota_unref_internal(connect->resource_quota);
- gpr_free(connect->addr_name);
- gpr_free(connect);
-}
-
-static void tcp_close_callback(uv_handle_t* handle) { gpr_free(handle); }
-
-static void uv_tc_on_alarm(void* acp, grpc_error* error) {
- int done;
- grpc_uv_tcp_connect* connect = (grpc_uv_tcp_connect*)acp;
- if (grpc_tcp_trace.enabled()) {
- const char* str = grpc_error_string(error);
- gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_alarm: error=%s",
- connect->addr_name, str);
- }
- if (error == GRPC_ERROR_NONE) {
- /* error == NONE implies that the timer ran out, and wasn't cancelled. If
- it was cancelled, then the handler that cancelled it also should close
- the handle, if applicable */
- uv_close((uv_handle_t*)connect->tcp_handle, tcp_close_callback);
- }
- done = (--connect->refs == 0);
- if (done) {
- uv_tcp_connect_cleanup(connect);
- }
-}
-
-static void uv_tc_on_connect(uv_connect_t* req, int status) {
- grpc_uv_tcp_connect* connect = (grpc_uv_tcp_connect*)req->data;
- grpc_core::ExecCtx exec_ctx;
- grpc_error* error = GRPC_ERROR_NONE;
- int done;
- grpc_closure* closure = connect->closure;
- grpc_timer_cancel(&connect->alarm);
- if (status == 0) {
- *connect->endpoint = grpc_tcp_create(
- connect->tcp_handle, connect->resource_quota, connect->addr_name);
- } else {
- error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "Failed to connect to remote host");
- error = grpc_error_set_int(error, GRPC_ERROR_INT_ERRNO, -status);
- error =
- grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR,
- grpc_slice_from_static_string(uv_strerror(status)));
- if (status == UV_ECANCELED) {
- error =
- grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR,
- grpc_slice_from_static_string("Timeout occurred"));
- // This should only happen if the handle is already closed
- } else {
- error = grpc_error_set_str(
- error, GRPC_ERROR_STR_OS_ERROR,
- grpc_slice_from_static_string(uv_strerror(status)));
- uv_close((uv_handle_t*)connect->tcp_handle, tcp_close_callback);
- }
- }
- done = (--connect->refs == 0);
- if (done) {
- grpc_core::ExecCtx::Get()->Flush();
- uv_tcp_connect_cleanup(connect);
- }
- GRPC_CLOSURE_SCHED(closure, error);
-}
-
-static void tcp_client_connect_impl(grpc_closure* closure, grpc_endpoint** ep,
- grpc_pollset_set* interested_parties,
- const grpc_channel_args* channel_args,
- const grpc_resolved_address* resolved_addr,
- grpc_millis deadline) {
- grpc_uv_tcp_connect* connect;
- grpc_resource_quota* resource_quota = grpc_resource_quota_create(NULL);
- (void)channel_args;
- (void)interested_parties;
-
- GRPC_UV_ASSERT_SAME_THREAD();
-
- if (channel_args != NULL) {
- for (size_t i = 0; i < channel_args->num_args; i++) {
- if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
- grpc_resource_quota_unref_internal(resource_quota);
- resource_quota = grpc_resource_quota_ref_internal(
- (grpc_resource_quota*)channel_args->args[i].value.pointer.p);
- }
- }
- }
-
- connect = (grpc_uv_tcp_connect*)gpr_zalloc(sizeof(grpc_uv_tcp_connect));
- connect->closure = closure;
- connect->endpoint = ep;
- connect->tcp_handle = (uv_tcp_t*)gpr_malloc(sizeof(uv_tcp_t));
- connect->addr_name = grpc_sockaddr_to_uri(resolved_addr);
- connect->resource_quota = resource_quota;
- uv_tcp_init(uv_default_loop(), connect->tcp_handle);
- connect->connect_req.data = connect;
- connect->refs = 2; // One for the connect operation, one for the timer.
-
- if (grpc_tcp_trace.enabled()) {
- gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: asynchronously connecting",
- connect->addr_name);
- }
-
- // TODO(murgatroid99): figure out what the return value here means
- uv_tcp_connect(&connect->connect_req, connect->tcp_handle,
- (const struct sockaddr*)resolved_addr->addr, uv_tc_on_connect);
- GRPC_CLOSURE_INIT(&connect->on_alarm, uv_tc_on_alarm, connect,
- grpc_schedule_on_exec_ctx);
- grpc_timer_init(&connect->alarm, deadline, &connect->on_alarm);
-}
-
-// overridden by api_fuzzer.c
-void (*grpc_tcp_client_connect_impl)(
- grpc_closure* closure, grpc_endpoint** ep,
- grpc_pollset_set* interested_parties, const grpc_channel_args* channel_args,
- const grpc_resolved_address* addr,
- grpc_millis deadline) = tcp_client_connect_impl;
-
-void grpc_tcp_client_connect(grpc_closure* closure, grpc_endpoint** ep,
- grpc_pollset_set* interested_parties,
- const grpc_channel_args* channel_args,
- const grpc_resolved_address* addr,
- grpc_millis deadline) {
- grpc_tcp_client_connect_impl(closure, ep, interested_parties, channel_args,
- addr, deadline);
-}
-
-#endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/tcp_client_windows.cc b/src/core/lib/iomgr/tcp_client_windows.cc
index 70c2495350..e5b5502597 100644
--- a/src/core/lib/iomgr/tcp_client_windows.cc
+++ b/src/core/lib/iomgr/tcp_client_windows.cc
@@ -122,12 +122,11 @@ static void on_connect(void* acp, grpc_error* error) {
/* Tries to issue one async connection, then schedules both an IOCP
notification request for the connection, and one timeout alert. */
-static void tcp_client_connect_impl(grpc_closure* on_done,
- grpc_endpoint** endpoint,
- grpc_pollset_set* interested_parties,
- const grpc_channel_args* channel_args,
- const grpc_resolved_address* addr,
- grpc_millis deadline) {
+static void tcp_connect(grpc_closure* on_done, grpc_endpoint** endpoint,
+ grpc_pollset_set* interested_parties,
+ const grpc_channel_args* channel_args,
+ const grpc_resolved_address* addr,
+ grpc_millis deadline) {
SOCKET sock = INVALID_SOCKET;
BOOL success;
int status;
@@ -175,7 +174,7 @@ static void tcp_client_connect_impl(grpc_closure* on_done,
grpc_sockaddr_make_wildcard6(0, &local_address);
status =
- bind(sock, (struct sockaddr*)&local_address.addr, (int)local_address.len);
+ bind(sock, (grpc_sockaddr*)&local_address.addr, (int)local_address.len);
if (status != 0) {
error = GRPC_WSA_ERROR(WSAGetLastError(), "bind");
goto failure;
@@ -183,7 +182,7 @@ static void tcp_client_connect_impl(grpc_closure* on_done,
socket = grpc_winsocket_create(sock, "client");
info = &socket->write_info;
- success = ConnectEx(sock, (struct sockaddr*)&addr->addr, (int)addr->len, NULL,
+ success = ConnectEx(sock, (grpc_sockaddr*)&addr->addr, (int)addr->len, NULL,
0, NULL, &info->overlapped);
/* It wouldn't be unusual to get a success immediately. But we'll still get
@@ -227,20 +226,6 @@ failure:
GRPC_CLOSURE_SCHED(on_done, final_error);
}
-// overridden by api_fuzzer.c
-void (*grpc_tcp_client_connect_impl)(
- grpc_closure* closure, grpc_endpoint** ep,
- grpc_pollset_set* interested_parties, const grpc_channel_args* channel_args,
- const grpc_resolved_address* addr,
- grpc_millis deadline) = tcp_client_connect_impl;
-
-void grpc_tcp_client_connect(grpc_closure* closure, grpc_endpoint** ep,
- grpc_pollset_set* interested_parties,
- const grpc_channel_args* channel_args,
- const grpc_resolved_address* addr,
- grpc_millis deadline) {
- grpc_tcp_client_connect_impl(closure, ep, interested_parties, channel_args,
- addr, deadline);
-}
+grpc_tcp_client_vtable grpc_windows_tcp_client_vtable = {tcp_connect};
#endif /* GRPC_WINSOCK_SOCKET */
diff --git a/src/core/lib/iomgr/tcp_custom.cc b/src/core/lib/iomgr/tcp_custom.cc
new file mode 100644
index 0000000000..2b1fc93028
--- /dev/null
+++ b/src/core/lib/iomgr/tcp_custom.cc
@@ -0,0 +1,365 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/lib/iomgr/port.h"
+
+#include <limits.h>
+#include <string.h>
+
+#include <grpc/slice_buffer.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+
+#include "src/core/lib/iomgr/error.h"
+#include "src/core/lib/iomgr/iomgr_custom.h"
+#include "src/core/lib/iomgr/network_status_tracker.h"
+#include "src/core/lib/iomgr/resource_quota.h"
+#include "src/core/lib/iomgr/tcp_client.h"
+#include "src/core/lib/iomgr/tcp_custom.h"
+#include "src/core/lib/iomgr/tcp_server.h"
+#include "src/core/lib/slice/slice_internal.h"
+#include "src/core/lib/slice/slice_string_helpers.h"
+
+#define GRPC_TCP_DEFAULT_READ_SLICE_SIZE 8192
+
+extern grpc_core::TraceFlag grpc_tcp_trace;
+
+grpc_socket_vtable* grpc_custom_socket_vtable = nullptr;
+extern grpc_tcp_server_vtable custom_tcp_server_vtable;
+extern grpc_tcp_client_vtable custom_tcp_client_vtable;
+
+void grpc_custom_endpoint_init(grpc_socket_vtable* impl) {
+ grpc_custom_socket_vtable = impl;
+ grpc_set_tcp_client_impl(&custom_tcp_client_vtable);
+ grpc_set_tcp_server_impl(&custom_tcp_server_vtable);
+}
+
+typedef struct {
+ grpc_endpoint base;
+ gpr_refcount refcount;
+ grpc_custom_socket* socket;
+
+ grpc_closure* read_cb;
+ grpc_closure* write_cb;
+
+ grpc_slice_buffer* read_slices;
+ grpc_slice_buffer* write_slices;
+
+ grpc_resource_user* resource_user;
+ grpc_resource_user_slice_allocator slice_allocator;
+
+ bool shutting_down;
+
+ char* peer_string;
+} custom_tcp_endpoint;
+
+static void tcp_free(grpc_custom_socket* s) {
+ custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)s->endpoint;
+ grpc_resource_user_unref(tcp->resource_user);
+ gpr_free(tcp->peer_string);
+ gpr_free(tcp);
+ s->refs--;
+ if (s->refs == 0) {
+ grpc_custom_socket_vtable->destroy(s);
+ gpr_free(s);
+ }
+}
+
+#ifndef NDEBUG
+#define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__)
+#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__)
+static void tcp_unref(custom_tcp_endpoint* tcp, const char* reason,
+ const char* file, int line) {
+ if (grpc_tcp_trace.enabled()) {
+ gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_ERROR,
+ "TCP unref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp->socket, reason,
+ val, val - 1);
+ }
+ if (gpr_unref(&tcp->refcount)) {
+ tcp_free(tcp->socket);
+ }
+}
+
+static void tcp_ref(custom_tcp_endpoint* tcp, const char* reason,
+ const char* file, int line) {
+ if (grpc_tcp_trace.enabled()) {
+ gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_ERROR,
+ "TCP ref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp->socket, reason,
+ val, val + 1);
+ }
+ gpr_ref(&tcp->refcount);
+}
+#else
+#define TCP_UNREF(tcp, reason) tcp_unref((tcp))
+#define TCP_REF(tcp, reason) tcp_ref((tcp))
+static void tcp_unref(custom_tcp_endpoint* tcp) {
+ if (gpr_unref(&tcp->refcount)) {
+ tcp_free(tcp->socket);
+ }
+}
+
+static void tcp_ref(custom_tcp_endpoint* tcp) { gpr_ref(&tcp->refcount); }
+#endif
+
+static void call_read_cb(custom_tcp_endpoint* tcp, grpc_error* error) {
+ grpc_closure* cb = tcp->read_cb;
+ if (grpc_tcp_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "TCP:%p call_cb %p %p:%p", tcp->socket, cb, cb->cb,
+ cb->cb_arg);
+ size_t i;
+ const char* str = grpc_error_string(error);
+ gpr_log(GPR_DEBUG, "read: error=%s", str);
+
+ for (i = 0; i < tcp->read_slices->count; i++) {
+ char* dump = grpc_dump_slice(tcp->read_slices->slices[i],
+ GPR_DUMP_HEX | GPR_DUMP_ASCII);
+ gpr_log(GPR_DEBUG, "READ %p (peer=%s): %s", tcp, tcp->peer_string, dump);
+ gpr_free(dump);
+ }
+ }
+ TCP_UNREF(tcp, "read");
+ tcp->read_slices = nullptr;
+ tcp->read_cb = nullptr;
+ GRPC_CLOSURE_RUN(cb, error);
+}
+
+static void custom_read_callback(grpc_custom_socket* socket, size_t nread,
+ grpc_error* error) {
+ grpc_core::ExecCtx exec_ctx;
+ grpc_slice_buffer garbage;
+ custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)socket->endpoint;
+ if (error == GRPC_ERROR_NONE && nread == 0) {
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF");
+ }
+ if (error == GRPC_ERROR_NONE) {
+ // Successful read
+ if ((size_t)nread < tcp->read_slices->length) {
+ /* TODO(murgatroid99): Instead of discarding the unused part of the read
+ * buffer, reuse it as the next read buffer. */
+ grpc_slice_buffer_init(&garbage);
+ grpc_slice_buffer_trim_end(
+ tcp->read_slices, tcp->read_slices->length - (size_t)nread, &garbage);
+ grpc_slice_buffer_reset_and_unref_internal(&garbage);
+ }
+ } else {
+ grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices);
+ }
+ call_read_cb(tcp, error);
+}
+
+static void tcp_read_allocation_done(void* tcpp, grpc_error* error) {
+ custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)tcpp;
+ if (grpc_tcp_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "TCP:%p read_allocation_done: %s", tcp->socket,
+ grpc_error_string(error));
+ }
+ if (error == GRPC_ERROR_NONE) {
+ /* Before calling read, we allocate a buffer with exactly one slice
+ * to tcp->read_slices and wait for the callback indicating that the
+ * allocation was successful. So slices[0] should always exist here */
+ char* buffer = (char*)GRPC_SLICE_START_PTR(tcp->read_slices->slices[0]);
+ size_t len = GRPC_SLICE_LENGTH(tcp->read_slices->slices[0]);
+ grpc_custom_socket_vtable->read(tcp->socket, buffer, len,
+ custom_read_callback);
+ } else {
+ grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices);
+ call_read_cb(tcp, GRPC_ERROR_REF(error));
+ }
+ if (grpc_tcp_trace.enabled()) {
+ const char* str = grpc_error_string(error);
+ gpr_log(GPR_DEBUG, "Initiating read on %p: error=%s", tcp->socket, str);
+ }
+}
+
+static void endpoint_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
+ grpc_closure* cb) {
+ custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)ep;
+ GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD();
+ GPR_ASSERT(tcp->read_cb == nullptr);
+ tcp->read_cb = cb;
+ tcp->read_slices = read_slices;
+ grpc_slice_buffer_reset_and_unref_internal(read_slices);
+ TCP_REF(tcp, "read");
+ grpc_resource_user_alloc_slices(&tcp->slice_allocator,
+ GRPC_TCP_DEFAULT_READ_SLICE_SIZE, 1,
+ tcp->read_slices);
+}
+
+static void custom_write_callback(grpc_custom_socket* socket,
+ grpc_error* error) {
+ grpc_core::ExecCtx exec_ctx;
+ custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)socket->endpoint;
+ grpc_closure* cb = tcp->write_cb;
+ tcp->write_cb = nullptr;
+ if (grpc_tcp_trace.enabled()) {
+ const char* str = grpc_error_string(error);
+ gpr_log(GPR_DEBUG, "write complete on %p: error=%s", tcp->socket, str);
+ }
+ TCP_UNREF(tcp, "write");
+ GRPC_CLOSURE_SCHED(cb, error);
+}
+
+static void endpoint_write(grpc_endpoint* ep, grpc_slice_buffer* write_slices,
+ grpc_closure* cb) {
+ custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)ep;
+ GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD();
+
+ if (grpc_tcp_trace.enabled()) {
+ size_t j;
+
+ for (j = 0; j < write_slices->count; j++) {
+ char* data = grpc_dump_slice(write_slices->slices[j],
+ GPR_DUMP_HEX | GPR_DUMP_ASCII);
+ gpr_log(GPR_DEBUG, "WRITE %p (peer=%s): %s", tcp->socket,
+ tcp->peer_string, data);
+ gpr_free(data);
+ }
+ }
+
+ if (tcp->shutting_down) {
+ GRPC_CLOSURE_SCHED(cb, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "TCP socket is shutting down"));
+ return;
+ }
+
+ GPR_ASSERT(tcp->write_cb == nullptr);
+ tcp->write_slices = write_slices;
+ GPR_ASSERT(tcp->write_slices->count <= UINT_MAX);
+ if (tcp->write_slices->count == 0) {
+ // No slices means we don't have to do anything,
+ // and libuv doesn't like empty writes
+ GRPC_CLOSURE_SCHED(cb, GRPC_ERROR_NONE);
+ return;
+ }
+ tcp->write_cb = cb;
+ TCP_REF(tcp, "write");
+ grpc_custom_socket_vtable->write(tcp->socket, tcp->write_slices,
+ custom_write_callback);
+}
+
+static void endpoint_add_to_pollset(grpc_endpoint* ep, grpc_pollset* pollset) {
+ // No-op. We're ignoring pollsets currently
+ (void)ep;
+ (void)pollset;
+}
+
+static void endpoint_add_to_pollset_set(grpc_endpoint* ep,
+ grpc_pollset_set* pollset) {
+ // No-op. We're ignoring pollsets currently
+ (void)ep;
+ (void)pollset;
+}
+
+static void endpoint_delete_from_pollset_set(grpc_endpoint* ep,
+ grpc_pollset_set* pollset) {
+ // No-op. We're ignoring pollsets currently
+ (void)ep;
+ (void)pollset;
+}
+
+static void endpoint_shutdown(grpc_endpoint* ep, grpc_error* why) {
+ custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)ep;
+ if (!tcp->shutting_down) {
+ if (grpc_tcp_trace.enabled()) {
+ const char* str = grpc_error_string(why);
+ gpr_log(GPR_DEBUG, "TCP %p shutdown why=%s", tcp->socket, str);
+ }
+ tcp->shutting_down = true;
+ // GRPC_CLOSURE_SCHED(tcp->read_cb, GRPC_ERROR_REF(why));
+ // GRPC_CLOSURE_SCHED(tcp->write_cb, GRPC_ERROR_REF(why));
+ // tcp->read_cb = nullptr;
+ // tcp->write_cb = nullptr;
+ grpc_resource_user_shutdown(tcp->resource_user);
+ grpc_custom_socket_vtable->shutdown(tcp->socket);
+ }
+ GRPC_ERROR_UNREF(why);
+}
+
+static void custom_close_callback(grpc_custom_socket* socket) {
+ socket->refs--;
+ if (socket->refs == 0) {
+ grpc_custom_socket_vtable->destroy(socket);
+ gpr_free(socket);
+ } else if (socket->endpoint) {
+ grpc_core::ExecCtx exec_ctx;
+ custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)socket->endpoint;
+ TCP_UNREF(tcp, "destroy");
+ }
+}
+
+static void endpoint_destroy(grpc_endpoint* ep) {
+ grpc_network_status_unregister_endpoint(ep);
+ custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)ep;
+ grpc_custom_socket_vtable->close(tcp->socket, custom_close_callback);
+}
+
+static char* endpoint_get_peer(grpc_endpoint* ep) {
+ custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)ep;
+ return gpr_strdup(tcp->peer_string);
+}
+
+static grpc_resource_user* endpoint_get_resource_user(grpc_endpoint* ep) {
+ custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)ep;
+ return tcp->resource_user;
+}
+
+static int endpoint_get_fd(grpc_endpoint* ep) { return -1; }
+
+static grpc_endpoint_vtable vtable = {endpoint_read,
+ endpoint_write,
+ endpoint_add_to_pollset,
+ endpoint_add_to_pollset_set,
+ endpoint_delete_from_pollset_set,
+ endpoint_shutdown,
+ endpoint_destroy,
+ endpoint_get_resource_user,
+ endpoint_get_peer,
+ endpoint_get_fd};
+
+grpc_endpoint* custom_tcp_endpoint_create(grpc_custom_socket* socket,
+ grpc_resource_quota* resource_quota,
+ char* peer_string) {
+ custom_tcp_endpoint* tcp =
+ (custom_tcp_endpoint*)gpr_malloc(sizeof(custom_tcp_endpoint));
+ grpc_core::ExecCtx exec_ctx;
+
+ if (grpc_tcp_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "Creating TCP endpoint %p", socket);
+ }
+ memset(tcp, 0, sizeof(custom_tcp_endpoint));
+ socket->refs++;
+ socket->endpoint = (grpc_endpoint*)tcp;
+ tcp->socket = socket;
+ tcp->base.vtable = &vtable;
+ gpr_ref_init(&tcp->refcount, 1);
+ tcp->peer_string = gpr_strdup(peer_string);
+ tcp->shutting_down = false;
+ tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
+ grpc_resource_user_slice_allocator_init(
+ &tcp->slice_allocator, tcp->resource_user, tcp_read_allocation_done, tcp);
+ /* Tell network status tracking code about the new endpoint */
+ grpc_network_status_register_endpoint(&tcp->base);
+
+ return &tcp->base;
+}
diff --git a/src/core/lib/iomgr/tcp_custom.h b/src/core/lib/iomgr/tcp_custom.h
new file mode 100644
index 0000000000..22caa149f8
--- /dev/null
+++ b/src/core/lib/iomgr/tcp_custom.h
@@ -0,0 +1,83 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_TCP_CUSTOM_H
+#define GRPC_CORE_LIB_IOMGR_TCP_CUSTOM_H
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/lib/iomgr/endpoint.h"
+#include "src/core/lib/iomgr/sockaddr.h"
+
+typedef struct grpc_tcp_listener grpc_tcp_listener;
+typedef struct grpc_custom_tcp_connect grpc_custom_tcp_connect;
+
+typedef struct grpc_custom_socket {
+ // Implementation defined
+ void* impl;
+ grpc_endpoint* endpoint;
+ grpc_tcp_listener* listener;
+ grpc_custom_tcp_connect* connector;
+ int refs;
+} grpc_custom_socket;
+
+typedef void (*grpc_custom_connect_callback)(grpc_custom_socket* socket,
+ grpc_error* error);
+typedef void (*grpc_custom_write_callback)(grpc_custom_socket* socket,
+ grpc_error* error);
+typedef void (*grpc_custom_read_callback)(grpc_custom_socket* socket,
+ size_t nread, grpc_error* error);
+typedef void (*grpc_custom_accept_callback)(grpc_custom_socket* socket,
+ grpc_custom_socket* client,
+ grpc_error* error);
+typedef void (*grpc_custom_close_callback)(grpc_custom_socket* socket);
+
+typedef struct grpc_socket_vtable {
+ grpc_error* (*init)(grpc_custom_socket* socket, int domain);
+ void (*connect)(grpc_custom_socket* socket, const grpc_sockaddr* addr,
+ size_t len, grpc_custom_connect_callback cb);
+ void (*destroy)(grpc_custom_socket* socket);
+ void (*shutdown)(grpc_custom_socket* socket);
+ void (*close)(grpc_custom_socket* socket, grpc_custom_close_callback cb);
+ void (*write)(grpc_custom_socket* socket, grpc_slice_buffer* slices,
+ grpc_custom_write_callback cb);
+ void (*read)(grpc_custom_socket* socket, char* buffer, size_t length,
+ grpc_custom_read_callback cb);
+ grpc_error* (*getpeername)(grpc_custom_socket* socket,
+ const grpc_sockaddr* addr, int* len);
+ grpc_error* (*getsockname)(grpc_custom_socket* socket,
+ const grpc_sockaddr* addr, int* len);
+ grpc_error* (*setsockopt)(grpc_custom_socket* socket, int level, int optname,
+ const void* optval, uint32_t optlen);
+ grpc_error* (*bind)(grpc_custom_socket* socket, const grpc_sockaddr* addr,
+ size_t len, int flags);
+ grpc_error* (*listen)(grpc_custom_socket* socket);
+ void (*accept)(grpc_custom_socket* socket, grpc_custom_socket* client,
+ grpc_custom_accept_callback cb);
+} grpc_socket_vtable;
+
+/* Internal APIs */
+void grpc_custom_endpoint_init(grpc_socket_vtable* impl);
+
+void grpc_custom_close_server_callback(grpc_tcp_listener* listener);
+
+grpc_endpoint* custom_tcp_endpoint_create(grpc_custom_socket* socket,
+ grpc_resource_quota* resource_quota,
+ char* peer_string);
+
+#endif /* GRPC_CORE_LIB_IOMGR_TCP_CUSTOM_H */
diff --git a/src/core/lib/iomgr/tcp_posix.cc b/src/core/lib/iomgr/tcp_posix.cc
index ca0046b83b..205af22531 100644
--- a/src/core/lib/iomgr/tcp_posix.cc
+++ b/src/core/lib/iomgr/tcp_posix.cc
@@ -63,7 +63,7 @@ typedef GRPC_MSG_IOVLEN_TYPE msg_iovlen_type;
typedef size_t msg_iovlen_type;
#endif
-grpc_core::TraceFlag grpc_tcp_trace(false, "tcp");
+extern grpc_core::TraceFlag grpc_tcp_trace;
namespace {
struct grpc_tcp {
diff --git a/src/core/lib/iomgr/tcp_server.cc b/src/core/lib/iomgr/tcp_server.cc
new file mode 100644
index 0000000000..ea745f266b
--- /dev/null
+++ b/src/core/lib/iomgr/tcp_server.cc
@@ -0,0 +1,73 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/lib/iomgr/tcp_server.h"
+
+grpc_tcp_server_vtable* grpc_tcp_server_impl;
+
+grpc_error* grpc_tcp_server_create(grpc_closure* shutdown_complete,
+ const grpc_channel_args* args,
+ grpc_tcp_server** server) {
+ return grpc_tcp_server_impl->create(shutdown_complete, args, server);
+}
+
+void grpc_tcp_server_start(grpc_tcp_server* server, grpc_pollset** pollsets,
+ size_t pollset_count,
+ grpc_tcp_server_cb on_accept_cb, void* cb_arg) {
+ grpc_tcp_server_impl->start(server, pollsets, pollset_count, on_accept_cb,
+ cb_arg);
+}
+
+grpc_error* grpc_tcp_server_add_port(grpc_tcp_server* s,
+ const grpc_resolved_address* addr,
+ int* out_port) {
+ return grpc_tcp_server_impl->add_port(s, addr, out_port);
+}
+
+unsigned grpc_tcp_server_port_fd_count(grpc_tcp_server* s,
+ unsigned port_index) {
+ return grpc_tcp_server_impl->port_fd_count(s, port_index);
+}
+
+int grpc_tcp_server_port_fd(grpc_tcp_server* s, unsigned port_index,
+ unsigned fd_index) {
+ return grpc_tcp_server_impl->port_fd(s, port_index, fd_index);
+}
+
+grpc_tcp_server* grpc_tcp_server_ref(grpc_tcp_server* s) {
+ return grpc_tcp_server_impl->ref(s);
+}
+
+void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server* s,
+ grpc_closure* shutdown_starting) {
+ grpc_tcp_server_impl->shutdown_starting_add(s, shutdown_starting);
+}
+
+void grpc_tcp_server_unref(grpc_tcp_server* s) {
+ grpc_tcp_server_impl->unref(s);
+}
+
+void grpc_tcp_server_shutdown_listeners(grpc_tcp_server* s) {
+ grpc_tcp_server_impl->shutdown_listeners(s);
+}
+
+void grpc_set_tcp_server_impl(grpc_tcp_server_vtable* impl) {
+ grpc_tcp_server_impl = impl;
+}
diff --git a/src/core/lib/iomgr/tcp_server.h b/src/core/lib/iomgr/tcp_server.h
index 965d97407f..8fcbb2f680 100644
--- a/src/core/lib/iomgr/tcp_server.h
+++ b/src/core/lib/iomgr/tcp_server.h
@@ -45,6 +45,24 @@ typedef void (*grpc_tcp_server_cb)(void* arg, grpc_endpoint* ep,
grpc_pollset* accepting_pollset,
grpc_tcp_server_acceptor* acceptor);
+typedef struct grpc_tcp_server_vtable {
+ grpc_error* (*create)(grpc_closure* shutdown_complete,
+ const grpc_channel_args* args,
+ grpc_tcp_server** server);
+ void (*start)(grpc_tcp_server* server, grpc_pollset** pollsets,
+ size_t pollset_count, grpc_tcp_server_cb on_accept_cb,
+ void* cb_arg);
+ grpc_error* (*add_port)(grpc_tcp_server* s, const grpc_resolved_address* addr,
+ int* out_port);
+ unsigned (*port_fd_count)(grpc_tcp_server* s, unsigned port_index);
+ int (*port_fd)(grpc_tcp_server* s, unsigned port_index, unsigned fd_index);
+ grpc_tcp_server* (*ref)(grpc_tcp_server* s);
+ void (*shutdown_starting_add)(grpc_tcp_server* s,
+ grpc_closure* shutdown_starting);
+ void (*unref)(grpc_tcp_server* s);
+ void (*shutdown_listeners)(grpc_tcp_server* s);
+} grpc_tcp_server_vtable;
+
/* Create a server, initially not bound to any ports. The caller owns one ref.
If shutdown_complete is not NULL, it will be used by
grpc_tcp_server_unref() when the ref count reaches zero. */
@@ -97,4 +115,8 @@ void grpc_tcp_server_unref(grpc_tcp_server* s);
/* Shutdown the fds of listeners. */
void grpc_tcp_server_shutdown_listeners(grpc_tcp_server* s);
+void grpc_tcp_server_global_init();
+
+void grpc_set_tcp_server_impl(grpc_tcp_server_vtable* impl);
+
#endif /* GRPC_CORE_LIB_IOMGR_TCP_SERVER_H */
diff --git a/src/core/lib/iomgr/tcp_server_custom.cc b/src/core/lib/iomgr/tcp_server_custom.cc
new file mode 100644
index 0000000000..be92e61b62
--- /dev/null
+++ b/src/core/lib/iomgr/tcp_server_custom.cc
@@ -0,0 +1,479 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/lib/iomgr/port.h"
+
+#include <assert.h>
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+
+#include "src/core/lib/iomgr/error.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/iomgr/iomgr_custom.h"
+#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/lib/iomgr/sockaddr_utils.h"
+#include "src/core/lib/iomgr/tcp_custom.h"
+#include "src/core/lib/iomgr/tcp_server.h"
+
+extern grpc_core::TraceFlag grpc_tcp_trace;
+
+extern grpc_socket_vtable* grpc_custom_socket_vtable;
+
+/* one listening port */
+struct grpc_tcp_listener {
+ grpc_tcp_server* server;
+ unsigned port_index;
+ int port;
+
+ grpc_custom_socket* socket;
+
+ /* linked list */
+ struct grpc_tcp_listener* next;
+
+ bool closed;
+};
+
+struct grpc_tcp_server {
+ gpr_refcount refs;
+
+ /* Called whenever accept() succeeds on a server port. */
+ grpc_tcp_server_cb on_accept_cb;
+ void* on_accept_cb_arg;
+
+ int open_ports;
+
+ /* linked list of server ports */
+ grpc_tcp_listener* head;
+ grpc_tcp_listener* tail;
+
+ /* List of closures passed to shutdown_starting_add(). */
+ grpc_closure_list shutdown_starting;
+
+ /* shutdown callback */
+ grpc_closure* shutdown_complete;
+
+ bool shutdown;
+
+ grpc_resource_quota* resource_quota;
+};
+
+static grpc_error* tcp_server_create(grpc_closure* shutdown_complete,
+ const grpc_channel_args* args,
+ grpc_tcp_server** server) {
+ grpc_tcp_server* s = (grpc_tcp_server*)gpr_malloc(sizeof(grpc_tcp_server));
+ s->resource_quota = grpc_resource_quota_create(nullptr);
+ for (size_t i = 0; i < (args == nullptr ? 0 : args->num_args); i++) {
+ if (0 == strcmp(GRPC_ARG_RESOURCE_QUOTA, args->args[i].key)) {
+ if (args->args[i].type == GRPC_ARG_POINTER) {
+ grpc_resource_quota_unref_internal(s->resource_quota);
+ s->resource_quota = grpc_resource_quota_ref_internal(
+ (grpc_resource_quota*)args->args[i].value.pointer.p);
+ } else {
+ grpc_resource_quota_unref_internal(s->resource_quota);
+ gpr_free(s);
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ GRPC_ARG_RESOURCE_QUOTA " must be a pointer to a buffer pool");
+ }
+ }
+ }
+ gpr_ref_init(&s->refs, 1);
+ s->on_accept_cb = nullptr;
+ s->on_accept_cb_arg = nullptr;
+ s->open_ports = 0;
+ s->head = nullptr;
+ s->tail = nullptr;
+ s->shutdown_starting.head = nullptr;
+ s->shutdown_starting.tail = nullptr;
+ s->shutdown_complete = shutdown_complete;
+ s->shutdown = false;
+ *server = s;
+ return GRPC_ERROR_NONE;
+}
+
+static grpc_tcp_server* tcp_server_ref(grpc_tcp_server* s) {
+ GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD();
+ gpr_ref(&s->refs);
+ return s;
+}
+
+static void tcp_server_shutdown_starting_add(grpc_tcp_server* s,
+ grpc_closure* shutdown_starting) {
+ grpc_closure_list_append(&s->shutdown_starting, shutdown_starting,
+ GRPC_ERROR_NONE);
+}
+
+static void finish_shutdown(grpc_tcp_server* s) {
+ GPR_ASSERT(s->shutdown);
+ if (s->shutdown_complete != nullptr) {
+ GRPC_CLOSURE_SCHED(s->shutdown_complete, GRPC_ERROR_NONE);
+ }
+
+ while (s->head) {
+ grpc_tcp_listener* sp = s->head;
+ s->head = sp->next;
+ sp->next = nullptr;
+ gpr_free(sp);
+ }
+ grpc_resource_quota_unref_internal(s->resource_quota);
+ gpr_free(s);
+}
+
+static void custom_close_callback(grpc_custom_socket* socket) {
+ grpc_tcp_listener* sp = socket->listener;
+ if (sp) {
+ grpc_core::ExecCtx exec_ctx;
+ sp->server->open_ports--;
+ if (sp->server->open_ports == 0 && sp->server->shutdown) {
+ finish_shutdown(sp->server);
+ }
+ }
+ socket->refs--;
+ if (socket->refs == 0) {
+ grpc_custom_socket_vtable->destroy(socket);
+ gpr_free(socket);
+ }
+}
+
+void grpc_custom_close_server_callback(grpc_tcp_listener* sp) {
+ if (sp) {
+ grpc_core::ExecCtx exec_ctx;
+ sp->server->open_ports--;
+ if (sp->server->open_ports == 0 && sp->server->shutdown) {
+ finish_shutdown(sp->server);
+ }
+ }
+}
+
+static void close_listener(grpc_tcp_listener* sp) {
+ grpc_custom_socket* socket = sp->socket;
+ if (!sp->closed) {
+ sp->closed = true;
+ grpc_custom_socket_vtable->close(socket, custom_close_callback);
+ }
+}
+
+static void tcp_server_destroy(grpc_tcp_server* s) {
+ int immediately_done = 0;
+ grpc_tcp_listener* sp;
+
+ GPR_ASSERT(!s->shutdown);
+ s->shutdown = true;
+
+ if (s->open_ports == 0) {
+ immediately_done = 1;
+ }
+ for (sp = s->head; sp; sp = sp->next) {
+ close_listener(sp);
+ }
+
+ if (immediately_done) {
+ finish_shutdown(s);
+ }
+}
+
+static void tcp_server_unref(grpc_tcp_server* s) {
+ GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD();
+ if (gpr_unref(&s->refs)) {
+ /* Complete shutdown_starting work before destroying. */
+ grpc_core::ExecCtx exec_ctx;
+ GRPC_CLOSURE_LIST_SCHED(&s->shutdown_starting);
+ grpc_core::ExecCtx::Get()->Flush();
+ tcp_server_destroy(s);
+ }
+}
+
+static void finish_accept(grpc_tcp_listener* sp, grpc_custom_socket* socket) {
+ grpc_tcp_server_acceptor* acceptor =
+ (grpc_tcp_server_acceptor*)gpr_malloc(sizeof(*acceptor));
+ grpc_endpoint* ep = nullptr;
+ grpc_resolved_address peer_name;
+ char* peer_name_string;
+ grpc_error* err;
+
+ peer_name_string = nullptr;
+ memset(&peer_name, 0, sizeof(grpc_resolved_address));
+ peer_name.len = GRPC_MAX_SOCKADDR_SIZE;
+ err = grpc_custom_socket_vtable->getpeername(
+ socket, (grpc_sockaddr*)&peer_name.addr, (int*)&peer_name.len);
+ if (err == GRPC_ERROR_NONE) {
+ peer_name_string = grpc_sockaddr_to_uri(&peer_name);
+ } else {
+ GRPC_LOG_IF_ERROR("getpeername error", err);
+ GRPC_ERROR_UNREF(err);
+ }
+ if (grpc_tcp_trace.enabled()) {
+ if (peer_name_string) {
+ gpr_log(GPR_DEBUG, "SERVER_CONNECT: %p accepted connection: %s",
+ sp->server, peer_name_string);
+ } else {
+ gpr_log(GPR_DEBUG, "SERVER_CONNECT: %p accepted connection", sp->server);
+ }
+ }
+ ep = custom_tcp_endpoint_create(socket, sp->server->resource_quota,
+ peer_name_string);
+ acceptor->from_server = sp->server;
+ acceptor->port_index = sp->port_index;
+ acceptor->fd_index = 0;
+ sp->server->on_accept_cb(sp->server->on_accept_cb_arg, ep, nullptr, acceptor);
+ gpr_free(peer_name_string);
+}
+
+static void custom_accept_callback(grpc_custom_socket* socket,
+ grpc_custom_socket* client,
+ grpc_error* error);
+
+static void custom_accept_callback(grpc_custom_socket* socket,
+ grpc_custom_socket* client,
+ grpc_error* error) {
+ grpc_core::ExecCtx exec_ctx;
+ grpc_tcp_listener* sp = socket->listener;
+ if (error != GRPC_ERROR_NONE) {
+ if (!sp->closed) {
+ gpr_log(GPR_ERROR, "Accept failed: %s", grpc_error_string(error));
+ }
+ gpr_free(client);
+ GRPC_ERROR_UNREF(error);
+ return;
+ }
+ finish_accept(sp, client);
+ if (!sp->closed) {
+ grpc_custom_socket* new_socket =
+ (grpc_custom_socket*)gpr_malloc(sizeof(grpc_custom_socket));
+ new_socket->endpoint = nullptr;
+ new_socket->listener = nullptr;
+ new_socket->connector = nullptr;
+ new_socket->refs = 1;
+ grpc_custom_socket_vtable->accept(sp->socket, new_socket,
+ custom_accept_callback);
+ }
+}
+
+static grpc_error* add_socket_to_server(grpc_tcp_server* s,
+ grpc_custom_socket* socket,
+ const grpc_resolved_address* addr,
+ unsigned port_index,
+ grpc_tcp_listener** listener) {
+ grpc_tcp_listener* sp = nullptr;
+ int port = -1;
+ grpc_error* error;
+ grpc_resolved_address sockname_temp;
+
+ // The last argument to uv_tcp_bind is flags
+ error = grpc_custom_socket_vtable->bind(socket, (grpc_sockaddr*)addr->addr,
+ addr->len, 0);
+ if (error != GRPC_ERROR_NONE) {
+ return error;
+ }
+
+ error = grpc_custom_socket_vtable->listen(socket);
+ if (error != GRPC_ERROR_NONE) {
+ return error;
+ }
+
+ sockname_temp.len = GRPC_MAX_SOCKADDR_SIZE;
+ error = grpc_custom_socket_vtable->getsockname(
+ socket, (grpc_sockaddr*)&sockname_temp.addr, (int*)&sockname_temp.len);
+ if (error != GRPC_ERROR_NONE) {
+ return error;
+ }
+
+ port = grpc_sockaddr_get_port(&sockname_temp);
+
+ GPR_ASSERT(port >= 0);
+ GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server");
+ sp = (grpc_tcp_listener*)gpr_zalloc(sizeof(grpc_tcp_listener));
+ sp->next = nullptr;
+ if (s->head == nullptr) {
+ s->head = sp;
+ } else {
+ s->tail->next = sp;
+ }
+ s->tail = sp;
+ sp->server = s;
+ sp->socket = socket;
+ sp->port = port;
+ sp->port_index = port_index;
+ sp->closed = false;
+ s->open_ports++;
+ *listener = sp;
+
+ return GRPC_ERROR_NONE;
+}
+
+static grpc_error* tcp_server_add_port(grpc_tcp_server* s,
+ const grpc_resolved_address* addr,
+ int* port) {
+ // This function is mostly copied from tcp_server_windows.c
+ grpc_tcp_listener* sp = nullptr;
+ grpc_custom_socket* socket;
+ grpc_resolved_address addr6_v4mapped;
+ grpc_resolved_address wildcard;
+ grpc_resolved_address* allocated_addr = nullptr;
+ grpc_resolved_address sockname_temp;
+ unsigned port_index = 0;
+ grpc_error* error = GRPC_ERROR_NONE;
+ int family;
+
+ GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD();
+
+ if (s->tail != nullptr) {
+ port_index = s->tail->port_index + 1;
+ }
+
+ /* Check if this is a wildcard port, and if so, try to keep the port the same
+ as some previously created listener. */
+ if (grpc_sockaddr_get_port(addr) == 0) {
+ for (sp = s->head; sp; sp = sp->next) {
+ socket = sp->socket;
+ sockname_temp.len = GRPC_MAX_SOCKADDR_SIZE;
+ if (nullptr == grpc_custom_socket_vtable->getsockname(
+ socket, (grpc_sockaddr*)&sockname_temp.addr,
+ (int*)&sockname_temp.len)) {
+ *port = grpc_sockaddr_get_port(&sockname_temp);
+ if (*port > 0) {
+ allocated_addr =
+ (grpc_resolved_address*)gpr_malloc(sizeof(grpc_resolved_address));
+ memcpy(allocated_addr, addr, sizeof(grpc_resolved_address));
+ grpc_sockaddr_set_port(allocated_addr, *port);
+ addr = allocated_addr;
+ break;
+ }
+ }
+ }
+ }
+
+ if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) {
+ addr = &addr6_v4mapped;
+ }
+
+ /* Treat :: or 0.0.0.0 as a family-agnostic wildcard. */
+ if (grpc_sockaddr_is_wildcard(addr, port)) {
+ grpc_sockaddr_make_wildcard6(*port, &wildcard);
+
+ addr = &wildcard;
+ }
+
+ if (grpc_tcp_trace.enabled()) {
+ char* port_string;
+ grpc_sockaddr_to_string(&port_string, addr, 0);
+ const char* str = grpc_error_string(error);
+ if (port_string) {
+ gpr_log(GPR_DEBUG, "SERVER %p add_port %s error=%s", s, port_string, str);
+ gpr_free(port_string);
+ } else {
+ gpr_log(GPR_DEBUG, "SERVER %p add_port error=%s", s, str);
+ }
+ }
+
+ family = grpc_sockaddr_get_family(addr);
+ socket = (grpc_custom_socket*)gpr_malloc(sizeof(grpc_custom_socket));
+ socket->refs = 1;
+ socket->endpoint = nullptr;
+ socket->listener = nullptr;
+ socket->connector = nullptr;
+ grpc_custom_socket_vtable->init(socket, family);
+
+ if (error == GRPC_ERROR_NONE) {
+#if defined(GPR_LINUX) && defined(SO_REUSEPORT)
+ if (family == AF_INET || family == AF_INET6) {
+ int enable = 1;
+ grpc_custom_socket_vtable->setsockopt(socket, SOL_SOCKET, SO_REUSEPORT,
+ &enable, sizeof(enable));
+ }
+#endif /* GPR_LINUX && SO_REUSEPORT */
+ error = add_socket_to_server(s, socket, addr, port_index, &sp);
+ }
+ gpr_free(allocated_addr);
+
+ if (error != GRPC_ERROR_NONE) {
+ grpc_error* error_out = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Failed to add port to server", &error, 1);
+ GRPC_ERROR_UNREF(error);
+ error = error_out;
+ *port = -1;
+ } else {
+ GPR_ASSERT(sp != nullptr);
+ *port = sp->port;
+ }
+ socket->listener = sp;
+ return error;
+}
+
+static void tcp_server_start(grpc_tcp_server* server, grpc_pollset** pollsets,
+ size_t pollset_count,
+ grpc_tcp_server_cb on_accept_cb, void* cb_arg) {
+ grpc_tcp_listener* sp;
+ (void)pollsets;
+ (void)pollset_count;
+ GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD();
+ if (grpc_tcp_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "SERVER_START %p", server);
+ }
+ GPR_ASSERT(on_accept_cb);
+ GPR_ASSERT(!server->on_accept_cb);
+ server->on_accept_cb = on_accept_cb;
+ server->on_accept_cb_arg = cb_arg;
+ for (sp = server->head; sp; sp = sp->next) {
+ grpc_custom_socket* new_socket =
+ (grpc_custom_socket*)gpr_malloc(sizeof(grpc_custom_socket));
+ new_socket->endpoint = nullptr;
+ new_socket->listener = nullptr;
+ new_socket->connector = nullptr;
+ new_socket->refs = 1;
+ grpc_custom_socket_vtable->accept(sp->socket, new_socket,
+ custom_accept_callback);
+ }
+}
+
+static unsigned tcp_server_port_fd_count(grpc_tcp_server* s,
+ unsigned port_index) {
+ return 0;
+}
+
+static int tcp_server_port_fd(grpc_tcp_server* s, unsigned port_index,
+ unsigned fd_index) {
+ return -1;
+}
+
+static void tcp_server_shutdown_listeners(grpc_tcp_server* s) {
+ for (grpc_tcp_listener* sp = s->head; sp; sp = sp->next) {
+ if (!sp->closed) {
+ sp->closed = true;
+ grpc_custom_socket_vtable->close(sp->socket, custom_close_callback);
+ }
+ }
+}
+
+grpc_tcp_server_vtable custom_tcp_server_vtable = {
+ tcp_server_create,
+ tcp_server_start,
+ tcp_server_add_port,
+ tcp_server_port_fd_count,
+ tcp_server_port_fd,
+ tcp_server_ref,
+ tcp_server_shutdown_starting_add,
+ tcp_server_unref,
+ tcp_server_shutdown_listeners};
+
+#ifdef GRPC_UV_TEST
+grpc_tcp_server_vtable* default_tcp_server_vtable = &custom_tcp_server_vtable;
+#endif
diff --git a/src/core/lib/iomgr/tcp_server_posix.cc b/src/core/lib/iomgr/tcp_server_posix.cc
index 806e2a2152..712529c9ac 100644
--- a/src/core/lib/iomgr/tcp_server_posix.cc
+++ b/src/core/lib/iomgr/tcp_server_posix.cc
@@ -69,9 +69,9 @@ static void init(void) {
#endif
}
-grpc_error* grpc_tcp_server_create(grpc_closure* shutdown_complete,
- const grpc_channel_args* args,
- grpc_tcp_server** server) {
+static grpc_error* tcp_server_create(grpc_closure* shutdown_complete,
+ const grpc_channel_args* args,
+ grpc_tcp_server** server) {
gpr_once_init(&check_init, init);
grpc_tcp_server* s =
@@ -392,9 +392,9 @@ static grpc_error* clone_port(grpc_tcp_listener* listener, unsigned count) {
return GRPC_ERROR_NONE;
}
-grpc_error* grpc_tcp_server_add_port(grpc_tcp_server* s,
- const grpc_resolved_address* addr,
- int* out_port) {
+static grpc_error* tcp_server_add_port(grpc_tcp_server* s,
+ const grpc_resolved_address* addr,
+ int* out_port) {
grpc_tcp_listener* sp;
grpc_resolved_address sockname_temp;
grpc_resolved_address addr6_v4mapped;
@@ -416,7 +416,7 @@ grpc_error* grpc_tcp_server_add_port(grpc_tcp_server* s,
static_cast<socklen_t>(sizeof(struct sockaddr_storage));
if (0 ==
getsockname(sp->fd,
- reinterpret_cast<struct sockaddr*>(&sockname_temp.addr),
+ reinterpret_cast<grpc_sockaddr*>(&sockname_temp.addr),
&sockname_temp.len)) {
int used_port = grpc_sockaddr_get_port(&sockname_temp);
if (used_port > 0) {
@@ -459,8 +459,7 @@ static grpc_tcp_listener* get_port_index(grpc_tcp_server* s,
return nullptr;
}
-unsigned grpc_tcp_server_port_fd_count(grpc_tcp_server* s,
- unsigned port_index) {
+unsigned tcp_server_port_fd_count(grpc_tcp_server* s, unsigned port_index) {
unsigned num_fds = 0;
gpr_mu_lock(&s->mu);
grpc_tcp_listener* sp = get_port_index(s, port_index);
@@ -471,8 +470,8 @@ unsigned grpc_tcp_server_port_fd_count(grpc_tcp_server* s,
return num_fds;
}
-int grpc_tcp_server_port_fd(grpc_tcp_server* s, unsigned port_index,
- unsigned fd_index) {
+static int tcp_server_port_fd(grpc_tcp_server* s, unsigned port_index,
+ unsigned fd_index) {
gpr_mu_lock(&s->mu);
grpc_tcp_listener* sp = get_port_index(s, port_index);
for (; sp; sp = sp->sibling, --fd_index) {
@@ -485,10 +484,10 @@ int grpc_tcp_server_port_fd(grpc_tcp_server* s, unsigned port_index,
return -1;
}
-void grpc_tcp_server_start(grpc_tcp_server* s, grpc_pollset** pollsets,
- size_t pollset_count,
- grpc_tcp_server_cb on_accept_cb,
- void* on_accept_cb_arg) {
+static void tcp_server_start(grpc_tcp_server* s, grpc_pollset** pollsets,
+ size_t pollset_count,
+ grpc_tcp_server_cb on_accept_cb,
+ void* on_accept_cb_arg) {
size_t i;
grpc_tcp_listener* sp;
GPR_ASSERT(on_accept_cb);
@@ -527,20 +526,20 @@ void grpc_tcp_server_start(grpc_tcp_server* s, grpc_pollset** pollsets,
gpr_mu_unlock(&s->mu);
}
-grpc_tcp_server* grpc_tcp_server_ref(grpc_tcp_server* s) {
+grpc_tcp_server* tcp_server_ref(grpc_tcp_server* s) {
gpr_ref_non_zero(&s->refs);
return s;
}
-void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server* s,
- grpc_closure* shutdown_starting) {
+static void tcp_server_shutdown_starting_add(grpc_tcp_server* s,
+ grpc_closure* shutdown_starting) {
gpr_mu_lock(&s->mu);
grpc_closure_list_append(&s->shutdown_starting, shutdown_starting,
GRPC_ERROR_NONE);
gpr_mu_unlock(&s->mu);
}
-void grpc_tcp_server_unref(grpc_tcp_server* s) {
+static void tcp_server_unref(grpc_tcp_server* s) {
if (gpr_unref(&s->refs)) {
grpc_tcp_server_shutdown_listeners(s);
gpr_mu_lock(&s->mu);
@@ -550,7 +549,7 @@ void grpc_tcp_server_unref(grpc_tcp_server* s) {
}
}
-void grpc_tcp_server_shutdown_listeners(grpc_tcp_server* s) {
+static void tcp_server_shutdown_listeners(grpc_tcp_server* s) {
gpr_mu_lock(&s->mu);
s->shutdown_listeners = true;
/* shutdown all fd's */
@@ -564,4 +563,14 @@ void grpc_tcp_server_shutdown_listeners(grpc_tcp_server* s) {
gpr_mu_unlock(&s->mu);
}
+grpc_tcp_server_vtable grpc_posix_tcp_server_vtable = {
+ tcp_server_create,
+ tcp_server_start,
+ tcp_server_add_port,
+ tcp_server_port_fd_count,
+ tcp_server_port_fd,
+ tcp_server_ref,
+ tcp_server_shutdown_starting_add,
+ tcp_server_unref,
+ tcp_server_shutdown_listeners};
#endif
diff --git a/src/core/lib/iomgr/tcp_server_utils_posix_common.cc b/src/core/lib/iomgr/tcp_server_utils_posix_common.cc
index 0a9876d9d8..0734453364 100644
--- a/src/core/lib/iomgr/tcp_server_utils_posix_common.cc
+++ b/src/core/lib/iomgr/tcp_server_utils_posix_common.cc
@@ -171,8 +171,7 @@ grpc_error* grpc_tcp_server_prepare_socket(int fd,
if (err != GRPC_ERROR_NONE) goto error;
GPR_ASSERT(addr->len < ~(socklen_t)0);
- if (bind(fd,
- reinterpret_cast<struct sockaddr*>(const_cast<char*>(addr->addr)),
+ if (bind(fd, reinterpret_cast<grpc_sockaddr*>(const_cast<char*>(addr->addr)),
addr->len) < 0) {
err = GRPC_OS_ERROR(errno, "bind");
goto error;
@@ -185,7 +184,7 @@ grpc_error* grpc_tcp_server_prepare_socket(int fd,
sockname_temp.len = static_cast<socklen_t>(sizeof(struct sockaddr_storage));
- if (getsockname(fd, reinterpret_cast<struct sockaddr*>(sockname_temp.addr),
+ if (getsockname(fd, reinterpret_cast<grpc_sockaddr*>(sockname_temp.addr),
&sockname_temp.len) < 0) {
err = GRPC_OS_ERROR(errno, "getsockname");
goto error;
diff --git a/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc b/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc
index a60370f763..7fd86c57eb 100644
--- a/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc
+++ b/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc
@@ -68,14 +68,14 @@ static grpc_error* get_unused_port(int* port) {
if (dsmode == GRPC_DSMODE_IPV4) {
grpc_sockaddr_make_wildcard4(0, &wild);
}
- if (bind(fd, reinterpret_cast<const struct sockaddr*>(wild.addr), wild.len) !=
+ if (bind(fd, reinterpret_cast<const grpc_sockaddr*>(wild.addr), wild.len) !=
0) {
err = GRPC_OS_ERROR(errno, "bind");
close(fd);
return err;
}
- if (getsockname(fd, reinterpret_cast<struct sockaddr*>(wild.addr),
- &wild.len) != 0) {
+ if (getsockname(fd, reinterpret_cast<grpc_sockaddr*>(wild.addr), &wild.len) !=
+ 0) {
err = GRPC_OS_ERROR(errno, "getsockname");
close(fd);
return err;
@@ -119,9 +119,9 @@ grpc_error* grpc_tcp_server_add_all_local_addrs(grpc_tcp_server* s,
if (ifa_it->ifa_addr == nullptr) {
continue;
} else if (ifa_it->ifa_addr->sa_family == AF_INET) {
- addr.len = static_cast<socklen_t>(sizeof(struct sockaddr_in));
+ addr.len = static_cast<socklen_t>(sizeof(grpc_sockaddr_in));
} else if (ifa_it->ifa_addr->sa_family == AF_INET6) {
- addr.len = static_cast<socklen_t>(sizeof(struct sockaddr_in6));
+ addr.len = static_cast<socklen_t>(sizeof(grpc_sockaddr_in6));
} else {
continue;
}
diff --git a/src/core/lib/iomgr/tcp_server_uv.cc b/src/core/lib/iomgr/tcp_server_uv.cc
deleted file mode 100644
index aa423766c7..0000000000
--- a/src/core/lib/iomgr/tcp_server_uv.cc
+++ /dev/null
@@ -1,473 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpc/support/port_platform.h>
-
-#include "src/core/lib/iomgr/port.h"
-
-#ifdef GRPC_UV
-
-#include <assert.h>
-#include <string.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-
-#include "src/core/lib/iomgr/error.h"
-#include "src/core/lib/iomgr/exec_ctx.h"
-#include "src/core/lib/iomgr/iomgr_uv.h"
-#include "src/core/lib/iomgr/sockaddr.h"
-#include "src/core/lib/iomgr/sockaddr_utils.h"
-#include "src/core/lib/iomgr/tcp_server.h"
-#include "src/core/lib/iomgr/tcp_uv.h"
-
-/* one listening port */
-typedef struct grpc_tcp_listener grpc_tcp_listener;
-struct grpc_tcp_listener {
- uv_tcp_t* handle;
- grpc_tcp_server* server;
- unsigned port_index;
- int port;
- /* linked list */
- struct grpc_tcp_listener* next;
-
- bool closed;
-
- bool has_pending_connection;
-};
-
-struct grpc_tcp_server {
- gpr_refcount refs;
-
- /* Called whenever accept() succeeds on a server port. */
- grpc_tcp_server_cb on_accept_cb;
- void* on_accept_cb_arg;
-
- int open_ports;
-
- /* linked list of server ports */
- grpc_tcp_listener* head;
- grpc_tcp_listener* tail;
-
- /* List of closures passed to shutdown_starting_add(). */
- grpc_closure_list shutdown_starting;
-
- /* shutdown callback */
- grpc_closure* shutdown_complete;
-
- bool shutdown;
-
- grpc_resource_quota* resource_quota;
-};
-
-grpc_error* grpc_tcp_server_create(grpc_closure* shutdown_complete,
- const grpc_channel_args* args,
- grpc_tcp_server** server) {
- grpc_tcp_server* s = (grpc_tcp_server*)gpr_malloc(sizeof(grpc_tcp_server));
- s->resource_quota = grpc_resource_quota_create(NULL);
- for (size_t i = 0; i < (args == NULL ? 0 : args->num_args); i++) {
- if (0 == strcmp(GRPC_ARG_RESOURCE_QUOTA, args->args[i].key)) {
- if (args->args[i].type == GRPC_ARG_POINTER) {
- grpc_resource_quota_unref_internal(s->resource_quota);
- s->resource_quota = grpc_resource_quota_ref_internal(
- (grpc_resource_quota*)args->args[i].value.pointer.p);
- } else {
- grpc_resource_quota_unref_internal(s->resource_quota);
- gpr_free(s);
- return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- GRPC_ARG_RESOURCE_QUOTA " must be a pointer to a buffer pool");
- }
- }
- }
- gpr_ref_init(&s->refs, 1);
- s->on_accept_cb = NULL;
- s->on_accept_cb_arg = NULL;
- s->open_ports = 0;
- s->head = NULL;
- s->tail = NULL;
- s->shutdown_starting.head = NULL;
- s->shutdown_starting.tail = NULL;
- s->shutdown_complete = shutdown_complete;
- s->shutdown = false;
- *server = s;
- return GRPC_ERROR_NONE;
-}
-
-grpc_tcp_server* grpc_tcp_server_ref(grpc_tcp_server* s) {
- GRPC_UV_ASSERT_SAME_THREAD();
- gpr_ref(&s->refs);
- return s;
-}
-
-void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server* s,
- grpc_closure* shutdown_starting) {
- grpc_closure_list_append(&s->shutdown_starting, shutdown_starting,
- GRPC_ERROR_NONE);
-}
-
-static void finish_shutdown(grpc_tcp_server* s) {
- GPR_ASSERT(s->shutdown);
- if (s->shutdown_complete != NULL) {
- GRPC_CLOSURE_SCHED(s->shutdown_complete, GRPC_ERROR_NONE);
- }
-
- while (s->head) {
- grpc_tcp_listener* sp = s->head;
- s->head = sp->next;
- sp->next = NULL;
- gpr_free(sp->handle);
- gpr_free(sp);
- }
- grpc_resource_quota_unref_internal(s->resource_quota);
- gpr_free(s);
-}
-
-static void handle_close_callback(uv_handle_t* handle) {
- grpc_tcp_listener* sp = (grpc_tcp_listener*)handle->data;
- grpc_core::ExecCtx exec_ctx;
- sp->server->open_ports--;
- if (sp->server->open_ports == 0 && sp->server->shutdown) {
- finish_shutdown(sp->server);
- }
-}
-
-static void close_listener(grpc_tcp_listener* sp) {
- if (!sp->closed) {
- sp->closed = true;
- uv_close((uv_handle_t*)sp->handle, handle_close_callback);
- }
-}
-
-static void tcp_server_destroy(grpc_tcp_server* s) {
- int immediately_done = 0;
- grpc_tcp_listener* sp;
-
- GPR_ASSERT(!s->shutdown);
- s->shutdown = true;
-
- if (s->open_ports == 0) {
- immediately_done = 1;
- }
- for (sp = s->head; sp; sp = sp->next) {
- close_listener(sp);
- }
-
- if (immediately_done) {
- finish_shutdown(s);
- }
-}
-
-void grpc_tcp_server_unref(grpc_tcp_server* s) {
- GRPC_UV_ASSERT_SAME_THREAD();
- if (gpr_unref(&s->refs)) {
- /* Complete shutdown_starting work before destroying. */
- grpc_core::ExecCtx exec_ctx;
- GRPC_CLOSURE_LIST_SCHED(&s->shutdown_starting);
- grpc_core::ExecCtx::Get()->Flush();
- tcp_server_destroy(s);
- }
-}
-
-static void finish_accept(grpc_tcp_listener* sp) {
- grpc_tcp_server_acceptor* acceptor =
- (grpc_tcp_server_acceptor*)gpr_malloc(sizeof(*acceptor));
- uv_tcp_t* client = NULL;
- grpc_endpoint* ep = NULL;
- grpc_resolved_address peer_name;
- char* peer_name_string;
- int err;
- uv_tcp_t* server = sp->handle;
-
- client = (uv_tcp_t*)gpr_malloc(sizeof(uv_tcp_t));
- uv_tcp_init(uv_default_loop(), client);
- // UV documentation says this is guaranteed to succeed
- uv_accept((uv_stream_t*)server, (uv_stream_t*)client);
- peer_name_string = NULL;
- memset(&peer_name, 0, sizeof(grpc_resolved_address));
- peer_name.len = sizeof(struct sockaddr_storage);
- err = uv_tcp_getpeername(client, (struct sockaddr*)&peer_name.addr,
- (int*)&peer_name.len);
- if (err == 0) {
- peer_name_string = grpc_sockaddr_to_uri(&peer_name);
- } else {
- gpr_log(GPR_INFO, "uv_tcp_getpeername error: %s", uv_strerror(err));
- }
- if (grpc_tcp_trace.enabled()) {
- if (peer_name_string) {
- gpr_log(GPR_DEBUG, "SERVER_CONNECT: %p accepted connection: %s",
- sp->server, peer_name_string);
- } else {
- gpr_log(GPR_DEBUG, "SERVER_CONNECT: %p accepted connection", sp->server);
- }
- }
- ep = grpc_tcp_create(client, sp->server->resource_quota, peer_name_string);
- acceptor->from_server = sp->server;
- acceptor->port_index = sp->port_index;
- acceptor->fd_index = 0;
- sp->server->on_accept_cb(sp->server->on_accept_cb_arg, ep, NULL, acceptor);
- gpr_free(peer_name_string);
-}
-
-static void on_connect(uv_stream_t* server, int status) {
- grpc_tcp_listener* sp = (grpc_tcp_listener*)server->data;
- grpc_core::ExecCtx exec_ctx;
-
- if (status < 0) {
- switch (status) {
- case UV_EINTR:
- case UV_EAGAIN:
- return;
- default:
- close_listener(sp);
- return;
- }
- }
-
- GPR_ASSERT(!sp->has_pending_connection);
-
- if (grpc_tcp_trace.enabled()) {
- gpr_log(GPR_DEBUG, "SERVER_CONNECT: %p incoming connection", sp->server);
- }
-
- // Create acceptor.
- if (sp->server->on_accept_cb) {
- finish_accept(sp);
- } else {
- sp->has_pending_connection = true;
- }
-}
-
-static grpc_error* add_addr_to_server(grpc_tcp_server* s,
- const grpc_resolved_address* addr,
- unsigned port_index,
- grpc_tcp_listener** listener) {
- grpc_tcp_listener* sp = NULL;
- int port = -1;
- int status;
- grpc_error* error;
- grpc_resolved_address sockname_temp;
- uv_tcp_t* handle = (uv_tcp_t*)gpr_malloc(sizeof(uv_tcp_t));
- int family = grpc_sockaddr_get_family(addr);
-
- status = uv_tcp_init_ex(uv_default_loop(), handle, (unsigned int)family);
-#if defined(GPR_LINUX) && defined(SO_REUSEPORT)
- if (family == AF_INET || family == AF_INET6) {
- int fd;
- uv_fileno((uv_handle_t*)handle, &fd);
- int enable = 1;
- setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &enable, sizeof(enable));
- }
-#endif /* GPR_LINUX && SO_REUSEPORT */
-
- if (status != 0) {
- error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "Failed to initialize UV tcp handle");
- error =
- grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR,
- grpc_slice_from_static_string(uv_strerror(status)));
- return error;
- }
-
- // The last argument to uv_tcp_bind is flags
- status = uv_tcp_bind(handle, (struct sockaddr*)addr->addr, 0);
- if (status != 0) {
- error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Failed to bind to port");
- error =
- grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR,
- grpc_slice_from_static_string(uv_strerror(status)));
- return error;
- }
-
- status = uv_listen((uv_stream_t*)handle, SOMAXCONN, on_connect);
- if (status != 0) {
- error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Failed to listen to port");
- error =
- grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR,
- grpc_slice_from_static_string(uv_strerror(status)));
- return error;
- }
-
- sockname_temp.len = (int)sizeof(struct sockaddr_storage);
- status = uv_tcp_getsockname(handle, (struct sockaddr*)&sockname_temp.addr,
- (int*)&sockname_temp.len);
- if (status != 0) {
- error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("getsockname failed");
- error =
- grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR,
- grpc_slice_from_static_string(uv_strerror(status)));
- return error;
- }
-
- port = grpc_sockaddr_get_port(&sockname_temp);
-
- GPR_ASSERT(port >= 0);
- GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server");
- sp = (grpc_tcp_listener*)gpr_zalloc(sizeof(grpc_tcp_listener));
- sp->next = NULL;
- if (s->head == NULL) {
- s->head = sp;
- } else {
- s->tail->next = sp;
- }
- s->tail = sp;
- sp->server = s;
- sp->handle = handle;
- sp->port = port;
- sp->port_index = port_index;
- sp->closed = false;
- handle->data = sp;
- s->open_ports++;
- GPR_ASSERT(sp->handle);
- *listener = sp;
-
- return GRPC_ERROR_NONE;
-}
-
-static grpc_error* add_wildcard_addrs_to_server(grpc_tcp_server* s,
- unsigned port_index,
- int requested_port,
- grpc_tcp_listener** listener) {
- grpc_resolved_address wild4;
- grpc_resolved_address wild6;
- grpc_tcp_listener* sp = nullptr;
- grpc_tcp_listener* sp2 = nullptr;
- grpc_error* v6_err = GRPC_ERROR_NONE;
- grpc_error* v4_err = GRPC_ERROR_NONE;
-
- grpc_sockaddr_make_wildcards(requested_port, &wild4, &wild6);
- /* Try listening on IPv6 first. */
- if ((v6_err = add_addr_to_server(s, &wild6, port_index, &sp)) ==
- GRPC_ERROR_NONE) {
- *listener = sp;
- return GRPC_ERROR_NONE;
- }
-
- if ((v4_err = add_addr_to_server(s, &wild4, port_index, &sp2)) ==
- GRPC_ERROR_NONE) {
- *listener = sp2;
- return GRPC_ERROR_NONE;
- }
-
- grpc_error* root_err = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "Failed to add any wildcard listeners");
- root_err = grpc_error_add_child(root_err, v6_err);
- root_err = grpc_error_add_child(root_err, v4_err);
- return root_err;
-}
-
-grpc_error* grpc_tcp_server_add_port(grpc_tcp_server* s,
- const grpc_resolved_address* addr,
- int* port) {
- // This function is mostly copied from tcp_server_windows.c
- grpc_tcp_listener* sp = NULL;
- grpc_resolved_address addr6_v4mapped;
- grpc_resolved_address* allocated_addr = NULL;
- grpc_resolved_address sockname_temp;
- unsigned port_index = 0;
- grpc_error* error = GRPC_ERROR_NONE;
-
- GRPC_UV_ASSERT_SAME_THREAD();
-
- if (s->tail != NULL) {
- port_index = s->tail->port_index + 1;
- }
-
- /* Check if this is a wildcard port, and if so, try to keep the port the same
- as some previously created listener. */
- if (grpc_sockaddr_get_port(addr) == 0) {
- for (sp = s->head; sp; sp = sp->next) {
- sockname_temp.len = sizeof(struct sockaddr_storage);
- if (0 == uv_tcp_getsockname(sp->handle,
- (struct sockaddr*)&sockname_temp.addr,
- (int*)&sockname_temp.len)) {
- *port = grpc_sockaddr_get_port(&sockname_temp);
- if (*port > 0) {
- allocated_addr =
- (grpc_resolved_address*)gpr_malloc(sizeof(grpc_resolved_address));
- memcpy(allocated_addr, addr, sizeof(grpc_resolved_address));
- grpc_sockaddr_set_port(allocated_addr, *port);
- addr = allocated_addr;
- break;
- }
- }
- }
- }
-
- /* Treat :: or 0.0.0.0 as a family-agnostic wildcard. */
- if (grpc_sockaddr_is_wildcard(addr, port)) {
- error = add_wildcard_addrs_to_server(s, port_index, *port, &sp);
- } else {
- if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) {
- addr = &addr6_v4mapped;
- }
-
- error = add_addr_to_server(s, addr, port_index, &sp);
- }
-
- gpr_free(allocated_addr);
-
- if (grpc_tcp_trace.enabled()) {
- char* port_string;
- grpc_sockaddr_to_string(&port_string, addr, 0);
- const char* str = grpc_error_string(error);
- if (port_string) {
- gpr_log(GPR_DEBUG, "SERVER %p add_port %s error=%s", s, port_string, str);
- gpr_free(port_string);
- } else {
- gpr_log(GPR_DEBUG, "SERVER %p add_port error=%s", s, str);
- }
- }
-
- if (error != GRPC_ERROR_NONE) {
- grpc_error* error_out = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Failed to add port to server", &error, 1);
- GRPC_ERROR_UNREF(error);
- error = error_out;
- *port = -1;
- } else {
- GPR_ASSERT(sp != NULL);
- *port = sp->port;
- }
- return error;
-}
-
-void grpc_tcp_server_start(grpc_tcp_server* server, grpc_pollset** pollsets,
- size_t pollset_count,
- grpc_tcp_server_cb on_accept_cb, void* cb_arg) {
- grpc_tcp_listener* sp;
- (void)pollsets;
- (void)pollset_count;
- GRPC_UV_ASSERT_SAME_THREAD();
- if (grpc_tcp_trace.enabled()) {
- gpr_log(GPR_DEBUG, "SERVER_START %p", server);
- }
- GPR_ASSERT(on_accept_cb);
- GPR_ASSERT(!server->on_accept_cb);
- server->on_accept_cb = on_accept_cb;
- server->on_accept_cb_arg = cb_arg;
- for (sp = server->head; sp; sp = sp->next) {
- if (sp->has_pending_connection) {
- finish_accept(sp);
- sp->has_pending_connection = false;
- }
- }
-}
-
-void grpc_tcp_server_shutdown_listeners(grpc_tcp_server* s) {}
-
-#endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/tcp_server_windows.cc b/src/core/lib/iomgr/tcp_server_windows.cc
index 6d19c1c4d7..77f3811dca 100644
--- a/src/core/lib/iomgr/tcp_server_windows.cc
+++ b/src/core/lib/iomgr/tcp_server_windows.cc
@@ -50,7 +50,7 @@ typedef struct grpc_tcp_listener grpc_tcp_listener;
struct grpc_tcp_listener {
/* This seemingly magic number comes from AcceptEx's documentation. each
address buffer needs to have at least 16 more bytes at their end. */
- uint8_t addresses[(sizeof(struct sockaddr_in6) + 16) * 2];
+ uint8_t addresses[(sizeof(grpc_sockaddr_in6) + 16) * 2];
/* This will hold the socket for the next accept. */
SOCKET new_socket;
/* The listener winsocket. */
@@ -96,9 +96,9 @@ struct grpc_tcp_server {
/* Public function. Allocates the proper data structures to hold a
grpc_tcp_server. */
-grpc_error* grpc_tcp_server_create(grpc_closure* shutdown_complete,
- const grpc_channel_args* args,
- grpc_tcp_server** server) {
+static grpc_error* tcp_server_create(grpc_closure* shutdown_complete,
+ const grpc_channel_args* args,
+ grpc_tcp_server** server) {
grpc_tcp_server* s = (grpc_tcp_server*)gpr_malloc(sizeof(grpc_tcp_server));
s->channel_args = grpc_channel_args_copy(args);
gpr_ref_init(&s->refs, 1);
@@ -142,13 +142,13 @@ static void finish_shutdown_locked(grpc_tcp_server* s) {
GRPC_ERROR_NONE);
}
-grpc_tcp_server* grpc_tcp_server_ref(grpc_tcp_server* s) {
+static grpc_tcp_server* tcp_server_ref(grpc_tcp_server* s) {
gpr_ref_non_zero(&s->refs);
return s;
}
-void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server* s,
- grpc_closure* shutdown_starting) {
+static void tcp_server_shutdown_starting_add(grpc_tcp_server* s,
+ grpc_closure* shutdown_starting) {
gpr_mu_lock(&s->mu);
grpc_closure_list_append(&s->shutdown_starting, shutdown_starting,
GRPC_ERROR_NONE);
@@ -172,7 +172,7 @@ static void tcp_server_destroy(grpc_tcp_server* s) {
gpr_mu_unlock(&s->mu);
}
-void grpc_tcp_server_unref(grpc_tcp_server* s) {
+static void tcp_server_unref(grpc_tcp_server* s) {
if (gpr_unref(&s->refs)) {
grpc_tcp_server_shutdown_listeners(s);
gpr_mu_lock(&s->mu);
@@ -195,7 +195,7 @@ static grpc_error* prepare_socket(SOCKET sock,
goto failure;
}
- if (bind(sock, (const struct sockaddr*)addr->addr, (int)addr->len) ==
+ if (bind(sock, (const grpc_sockaddr*)addr->addr, (int)addr->len) ==
SOCKET_ERROR) {
error = GRPC_WSA_ERROR(WSAGetLastError(), "bind");
goto failure;
@@ -207,7 +207,7 @@ static grpc_error* prepare_socket(SOCKET sock,
}
sockname_temp_len = sizeof(struct sockaddr_storage);
- if (getsockname(sock, (struct sockaddr*)sockname_temp.addr,
+ if (getsockname(sock, (grpc_sockaddr*)sockname_temp.addr,
&sockname_temp_len) == SOCKET_ERROR) {
error = GRPC_WSA_ERROR(WSAGetLastError(), "getsockname");
goto failure;
@@ -245,7 +245,7 @@ static void decrement_active_ports_and_notify_locked(grpc_tcp_listener* sp) {
static grpc_error* start_accept_locked(grpc_tcp_listener* port) {
SOCKET sock = INVALID_SOCKET;
BOOL success;
- DWORD addrlen = sizeof(struct sockaddr_in6) + 16;
+ DWORD addrlen = sizeof(grpc_sockaddr_in6) + 16;
DWORD bytes_received = 0;
grpc_error* error = GRPC_ERROR_NONE;
@@ -343,7 +343,7 @@ static void on_accept(void* arg, grpc_error* error) {
gpr_free(utf8_message);
}
int peer_name_len = (int)peer_name.len;
- err = getpeername(sock, (struct sockaddr*)peer_name.addr, &peer_name_len);
+ err = getpeername(sock, (grpc_sockaddr*)peer_name.addr, &peer_name_len);
peer_name.len = (size_t)peer_name_len;
if (!err) {
peer_name_string = grpc_sockaddr_to_uri(&peer_name);
@@ -442,9 +442,9 @@ static grpc_error* add_socket_to_server(grpc_tcp_server* s, SOCKET sock,
return GRPC_ERROR_NONE;
}
-grpc_error* grpc_tcp_server_add_port(grpc_tcp_server* s,
- const grpc_resolved_address* addr,
- int* port) {
+static grpc_error* tcp_server_add_port(grpc_tcp_server* s,
+ const grpc_resolved_address* addr,
+ int* port) {
grpc_tcp_listener* sp = NULL;
SOCKET sock;
grpc_resolved_address addr6_v4mapped;
@@ -464,7 +464,7 @@ grpc_error* grpc_tcp_server_add_port(grpc_tcp_server* s,
for (sp = s->head; sp; sp = sp->next) {
int sockname_temp_len = sizeof(struct sockaddr_storage);
if (0 == getsockname(sp->socket->socket,
- (struct sockaddr*)sockname_temp.addr,
+ (grpc_sockaddr*)sockname_temp.addr,
&sockname_temp_len)) {
sockname_temp.len = (size_t)sockname_temp_len;
*port = grpc_sockaddr_get_port(&sockname_temp);
@@ -516,10 +516,10 @@ done:
return error;
}
-void grpc_tcp_server_start(grpc_tcp_server* s, grpc_pollset** pollset,
- size_t pollset_count,
- grpc_tcp_server_cb on_accept_cb,
- void* on_accept_cb_arg) {
+static void tcp_server_start(grpc_tcp_server* s, grpc_pollset** pollset,
+ size_t pollset_count,
+ grpc_tcp_server_cb on_accept_cb,
+ void* on_accept_cb_arg) {
grpc_tcp_listener* sp;
GPR_ASSERT(on_accept_cb);
gpr_mu_lock(&s->mu);
@@ -534,6 +534,26 @@ void grpc_tcp_server_start(grpc_tcp_server* s, grpc_pollset** pollset,
gpr_mu_unlock(&s->mu);
}
-void grpc_tcp_server_shutdown_listeners(grpc_tcp_server* s) {}
+static unsigned tcp_server_port_fd_count(grpc_tcp_server* s,
+ unsigned port_index) {
+ return 0;
+}
+
+static int tcp_server_port_fd(grpc_tcp_server* s, unsigned port_index,
+ unsigned fd_index) {
+ return -1;
+}
+static void tcp_server_shutdown_listeners(grpc_tcp_server* s) {}
+
+grpc_tcp_server_vtable grpc_windows_tcp_server_vtable = {
+ tcp_server_create,
+ tcp_server_start,
+ tcp_server_add_port,
+ tcp_server_port_fd_count,
+ tcp_server_port_fd,
+ tcp_server_ref,
+ tcp_server_shutdown_starting_add,
+ tcp_server_unref,
+ tcp_server_shutdown_listeners};
#endif /* GRPC_WINSOCK_SOCKET */
diff --git a/src/core/lib/iomgr/tcp_uv.cc b/src/core/lib/iomgr/tcp_uv.cc
index 6db3217d6e..5e3166926b 100644
--- a/src/core/lib/iomgr/tcp_uv.cc
+++ b/src/core/lib/iomgr/tcp_uv.cc
@@ -21,7 +21,6 @@
#include "src/core/lib/iomgr/port.h"
#ifdef GRPC_UV
-
#include <limits.h>
#include <string.h>
@@ -33,393 +32,393 @@
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/iomgr/error.h"
-#include "src/core/lib/iomgr/iomgr_uv.h"
+#include "src/core/lib/iomgr/iomgr_custom.h"
#include "src/core/lib/iomgr/network_status_tracker.h"
+#include "src/core/lib/iomgr/resolve_address_custom.h"
#include "src/core/lib/iomgr/resource_quota.h"
-#include "src/core/lib/iomgr/tcp_uv.h"
+#include "src/core/lib/iomgr/tcp_custom.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
-grpc_core::TraceFlag grpc_tcp_trace(false, "tcp");
+#include <uv.h>
-typedef struct {
- grpc_endpoint base;
- gpr_refcount refcount;
+#define IGNORE_CONST(addr) ((grpc_sockaddr*)(uintptr_t)(addr))
+typedef struct uv_socket_t {
+ uv_connect_t connect_req;
uv_write_t write_req;
uv_shutdown_t shutdown_req;
-
uv_tcp_t* handle;
-
- grpc_closure* read_cb;
- grpc_closure* write_cb;
-
- grpc_slice_buffer* read_slices;
- grpc_slice_buffer* write_slices;
uv_buf_t* write_buffers;
- grpc_resource_user* resource_user;
- grpc_resource_user_slice_allocator slice_allocator;
-
- bool shutting_down;
+ char* read_buf;
+ size_t read_len;
- char* peer_string;
- grpc_pollset* pollset;
-} grpc_tcp;
+ bool pending_connection;
+ grpc_custom_socket* accept_socket;
+ grpc_error* accept_error;
-static grpc_error* tcp_annotate_error(grpc_error* src_error, grpc_tcp* tcp) {
- return grpc_error_set_str(
- grpc_error_set_int(
- src_error,
- /* All tcp errors are marked with UNAVAILABLE so that application may
- * choose to retry. */
- GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE),
- GRPC_ERROR_STR_TARGET_ADDRESS,
- grpc_slice_from_copied_string(tcp->peer_string));
-}
+ grpc_custom_connect_callback connect_cb;
+ grpc_custom_write_callback write_cb;
+ grpc_custom_read_callback read_cb;
+ grpc_custom_accept_callback accept_cb;
+ grpc_custom_close_callback close_cb;
-static void tcp_free(grpc_tcp* tcp) {
- grpc_resource_user_unref(tcp->resource_user);
- gpr_free(tcp->handle);
- gpr_free(tcp->peer_string);
- gpr_free(tcp);
-}
-
-#ifndef NDEBUG
-#define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__)
-#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__)
-static void tcp_unref(grpc_tcp* tcp, const char* reason, const char* file,
- int line) {
- if (grpc_tcp_trace.enabled()) {
- gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "TCP unref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp, reason, val,
- val - 1);
- }
- if (gpr_unref(&tcp->refcount)) {
- tcp_free(tcp);
- }
-}
+} uv_socket_t;
-static void tcp_ref(grpc_tcp* tcp, const char* reason, const char* file,
- int line) {
- if (grpc_tcp_trace.enabled()) {
- gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "TCP ref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp, reason, val,
- val + 1);
- }
- gpr_ref(&tcp->refcount);
-}
-#else
-#define TCP_UNREF(tcp, reason) tcp_unref((tcp))
-#define TCP_REF(tcp, reason) tcp_ref((tcp))
-static void tcp_unref(grpc_tcp* tcp) {
- if (gpr_unref(&tcp->refcount)) {
- tcp_free(tcp);
+static grpc_error* tcp_error_create(const char* desc, int status) {
+ if (status == 0) {
+ return GRPC_ERROR_NONE;
}
+ grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(desc);
+ /* All tcp errors are marked with UNAVAILABLE so that application may
+ * choose to retry. */
+ error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS,
+ GRPC_STATUS_UNAVAILABLE);
+ return grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR,
+ grpc_slice_from_static_string(uv_strerror(status)));
}
-static void tcp_ref(grpc_tcp* tcp) { gpr_ref(&tcp->refcount); }
-#endif
-
-static void uv_close_callback(uv_handle_t* handle) {
- grpc_core::ExecCtx exec_ctx;
- grpc_tcp* tcp = (grpc_tcp*)handle->data;
- TCP_UNREF(tcp, "destroy");
+static void uv_socket_destroy(grpc_custom_socket* socket) {
+ uv_socket_t* uv_socket = (uv_socket_t*)socket->impl;
+ gpr_free(uv_socket->handle);
+ gpr_free(uv_socket);
}
static void alloc_uv_buf(uv_handle_t* handle, size_t suggested_size,
uv_buf_t* buf) {
- grpc_core::ExecCtx exec_ctx;
- grpc_tcp* tcp = (grpc_tcp*)handle->data;
+ uv_socket_t* uv_socket =
+ (uv_socket_t*)((grpc_custom_socket*)handle->data)->impl;
(void)suggested_size;
- /* Before calling uv_read_start, we allocate a buffer with exactly one slice
- * to tcp->read_slices and wait for the callback indicating that the
- * allocation was successful. So slices[0] should always exist here */
- buf->base = (char*)GRPC_SLICE_START_PTR(tcp->read_slices->slices[0]);
- buf->len = GRPC_SLICE_LENGTH(tcp->read_slices->slices[0]);
-}
-
-static void call_read_cb(grpc_tcp* tcp, grpc_error* error) {
- grpc_closure* cb = tcp->read_cb;
- if (grpc_tcp_trace.enabled()) {
- gpr_log(GPR_DEBUG, "TCP:%p call_cb %p %p:%p", tcp, cb, cb->cb, cb->cb_arg);
- size_t i;
- const char* str = grpc_error_string(error);
- gpr_log(GPR_DEBUG, "read: error=%s", str);
-
- for (i = 0; i < tcp->read_slices->count; i++) {
- char* dump = grpc_dump_slice(tcp->read_slices->slices[i],
- GPR_DUMP_HEX | GPR_DUMP_ASCII);
- gpr_log(GPR_DEBUG, "READ %p (peer=%s): %s", tcp, tcp->peer_string, dump);
- gpr_free(dump);
- }
- }
- tcp->read_slices = NULL;
- tcp->read_cb = NULL;
- GRPC_CLOSURE_RUN(cb, error);
+ buf->base = uv_socket->read_buf;
+ buf->len = uv_socket->read_len;
}
-static void read_callback(uv_stream_t* stream, ssize_t nread,
- const uv_buf_t* buf) {
- grpc_error* error;
- grpc_core::ExecCtx exec_ctx;
- grpc_tcp* tcp = (grpc_tcp*)stream->data;
- grpc_slice_buffer garbage;
+static void uv_read_callback(uv_stream_t* stream, ssize_t nread,
+ const uv_buf_t* buf) {
+ grpc_error* error = GRPC_ERROR_NONE;
if (nread == 0) {
// Nothing happened. Wait for the next callback
return;
}
- TCP_UNREF(tcp, "read");
// TODO(murgatroid99): figure out what the return value here means
uv_read_stop(stream);
if (nread == UV_EOF) {
- error =
- tcp_annotate_error(GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF"), tcp);
- grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices);
- } else if (nread > 0) {
- // Successful read
- error = GRPC_ERROR_NONE;
- if ((size_t)nread < tcp->read_slices->length) {
- /* TODO(murgatroid99): Instead of discarding the unused part of the read
- * buffer, reuse it as the next read buffer. */
- grpc_slice_buffer_init(&garbage);
- grpc_slice_buffer_trim_end(
- tcp->read_slices, tcp->read_slices->length - (size_t)nread, &garbage);
- grpc_slice_buffer_reset_and_unref_internal(&garbage);
- }
- } else {
- // nread < 0: Error
- error = tcp_annotate_error(
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Read failed"), tcp);
- grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices);
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF");
+ } else if (nread < 0) {
+ error = tcp_error_create("TCP Read failed", nread);
}
- call_read_cb(tcp, error);
+ grpc_custom_socket* socket = (grpc_custom_socket*)stream->data;
+ uv_socket_t* uv_socket = (uv_socket_t*)socket->impl;
+ uv_socket->read_cb(socket, (size_t)nread, error);
}
-static void tcp_read_allocation_done(void* tcpp, grpc_error* error) {
- int status;
- grpc_tcp* tcp = (grpc_tcp*)tcpp;
- if (grpc_tcp_trace.enabled()) {
- gpr_log(GPR_DEBUG, "TCP:%p read_allocation_done: %s", tcp,
- grpc_error_string(error));
- }
- if (error == GRPC_ERROR_NONE) {
- status =
- uv_read_start((uv_stream_t*)tcp->handle, alloc_uv_buf, read_callback);
- if (status != 0) {
- error = tcp_annotate_error(
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Read failed at start"),
- tcp);
- error = grpc_error_set_str(
- error, GRPC_ERROR_STR_OS_ERROR,
- grpc_slice_from_static_string(uv_strerror(status)));
- }
- }
- if (error != GRPC_ERROR_NONE) {
- grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices);
- call_read_cb(tcp, GRPC_ERROR_REF(error));
- TCP_UNREF(tcp, "read");
+static void uv_close_callback(uv_handle_t* handle) {
+ grpc_custom_socket* socket = (grpc_custom_socket*)handle->data;
+ uv_socket_t* uv_socket = (uv_socket_t*)socket->impl;
+ if (uv_socket->accept_socket) {
+ uv_socket->accept_cb(socket, uv_socket->accept_socket,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("socket closed"));
}
- if (grpc_tcp_trace.enabled()) {
- const char* str = grpc_error_string(error);
- gpr_log(GPR_DEBUG, "Initiating read on %p: error=%s", tcp, str);
+ uv_socket->close_cb(socket);
+}
+
+static void uv_socket_read(grpc_custom_socket* socket, char* buffer,
+ size_t length, grpc_custom_read_callback read_cb) {
+ uv_socket_t* uv_socket = (uv_socket_t*)socket->impl;
+ int status;
+ grpc_error* error;
+ uv_socket->read_cb = read_cb;
+ uv_socket->read_buf = buffer;
+ uv_socket->read_len = length;
+ // TODO(murgatroid99): figure out what the return value here means
+ status =
+ uv_read_start((uv_stream_t*)uv_socket->handle, (uv_alloc_cb)alloc_uv_buf,
+ (uv_read_cb)uv_read_callback);
+ if (status != 0) {
+ error = tcp_error_create("TCP Read failed at start", status);
+ uv_socket->read_cb(socket, 0, error);
}
}
-static void uv_endpoint_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
- grpc_closure* cb) {
- grpc_tcp* tcp = (grpc_tcp*)ep;
- GRPC_UV_ASSERT_SAME_THREAD();
- GPR_ASSERT(tcp->read_cb == NULL);
- tcp->read_cb = cb;
- tcp->read_slices = read_slices;
- grpc_slice_buffer_reset_and_unref_internal(read_slices);
- TCP_REF(tcp, "read");
- grpc_resource_user_alloc_slices(&tcp->slice_allocator,
- GRPC_TCP_DEFAULT_READ_SLICE_SIZE, 1,
- tcp->read_slices);
+static void uv_write_callback(uv_write_t* req, int status) {
+ grpc_custom_socket* socket = (grpc_custom_socket*)req->data;
+ uv_socket_t* uv_socket = (uv_socket_t*)socket->impl;
+ gpr_free(uv_socket->write_buffers);
+ uv_socket->write_cb(socket, tcp_error_create("TCP Write failed", status));
}
-static void write_callback(uv_write_t* req, int status) {
- grpc_tcp* tcp = (grpc_tcp*)req->data;
- grpc_error* error;
- grpc_core::ExecCtx exec_ctx;
- grpc_closure* cb = tcp->write_cb;
- tcp->write_cb = NULL;
- TCP_UNREF(tcp, "write");
- if (status == 0) {
- error = GRPC_ERROR_NONE;
- } else {
- error = tcp_annotate_error(
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Write failed"), tcp);
- }
- if (grpc_tcp_trace.enabled()) {
- const char* str = grpc_error_string(error);
- gpr_log(GPR_DEBUG, "write complete on %p: error=%s", tcp, str);
+void uv_socket_write(grpc_custom_socket* socket,
+ grpc_slice_buffer* write_slices,
+ grpc_custom_write_callback write_cb) {
+ uv_socket_t* uv_socket = (uv_socket_t*)socket->impl;
+ uv_socket->write_cb = write_cb;
+ uv_buf_t* uv_buffers;
+ uv_write_t* write_req;
+
+ uv_buffers = (uv_buf_t*)gpr_malloc(sizeof(uv_buf_t) * write_slices->count);
+ for (size_t i = 0; i < write_slices->count; i++) {
+ uv_buffers[i].base = (char*)GRPC_SLICE_START_PTR(write_slices->slices[i]);
+ uv_buffers[i].len = GRPC_SLICE_LENGTH(write_slices->slices[i]);
}
- gpr_free(tcp->write_buffers);
- GRPC_CLOSURE_SCHED(cb, error);
+
+ uv_socket->write_buffers = uv_buffers;
+ write_req = &uv_socket->write_req;
+ write_req->data = socket;
+ // TODO(murgatroid99): figure out what the return value here means
+ uv_write(write_req, (uv_stream_t*)uv_socket->handle, uv_buffers,
+ write_slices->count, uv_write_callback);
}
-static void uv_endpoint_write(grpc_endpoint* ep,
- grpc_slice_buffer* write_slices,
- grpc_closure* cb) {
- grpc_tcp* tcp = (grpc_tcp*)ep;
- uv_buf_t* buffers;
- unsigned int buffer_count;
- unsigned int i;
- grpc_slice* slice;
- uv_write_t* write_req;
- GRPC_UV_ASSERT_SAME_THREAD();
+static void shutdown_callback(uv_shutdown_t* req, int status) {}
- if (grpc_tcp_trace.enabled()) {
- size_t j;
+static void uv_socket_shutdown(grpc_custom_socket* socket) {
+ uv_socket_t* uv_socket = (uv_socket_t*)socket->impl;
+ uv_shutdown_t* req = &uv_socket->shutdown_req;
+ uv_shutdown(req, (uv_stream_t*)uv_socket->handle, shutdown_callback);
+}
- for (j = 0; j < write_slices->count; j++) {
- char* data = grpc_dump_slice(write_slices->slices[j],
- GPR_DUMP_HEX | GPR_DUMP_ASCII);
- gpr_log(GPR_DEBUG, "WRITE %p (peer=%s): %s", tcp, tcp->peer_string, data);
- gpr_free(data);
- }
+static void uv_socket_close(grpc_custom_socket* socket,
+ grpc_custom_close_callback close_cb) {
+ uv_socket_t* uv_socket = (uv_socket_t*)socket->impl;
+ uv_socket->close_cb = close_cb;
+ uv_close((uv_handle_t*)uv_socket->handle, uv_close_callback);
+}
+
+static grpc_error* uv_socket_init_helper(uv_socket_t* uv_socket, int domain) {
+ uv_tcp_t* tcp = (uv_tcp_t*)gpr_malloc(sizeof(uv_tcp_t));
+ uv_socket->handle = tcp;
+ int status = uv_tcp_init_ex(uv_default_loop(), tcp, (unsigned int)domain);
+ if (status != 0) {
+ return tcp_error_create("Failed to initialize UV tcp handle", status);
}
+ uv_socket->write_buffers = nullptr;
+ uv_socket->read_len = 0;
+ uv_tcp_nodelay(uv_socket->handle, 1);
+ uv_socket->pending_connection = false;
+ uv_socket->accept_socket = nullptr;
+ uv_socket->accept_error = GRPC_ERROR_NONE;
+ return GRPC_ERROR_NONE;
+}
- if (tcp->shutting_down) {
- GRPC_CLOSURE_SCHED(cb,
- tcp_annotate_error(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "TCP socket is shutting down"),
- tcp));
- return;
+static grpc_error* uv_socket_init(grpc_custom_socket* socket, int domain) {
+ uv_socket_t* uv_socket = (uv_socket_t*)gpr_malloc(sizeof(uv_socket_t));
+ grpc_error* error = uv_socket_init_helper(uv_socket, domain);
+ if (error != GRPC_ERROR_NONE) {
+ return error;
}
+ uv_socket->handle->data = socket;
+ socket->impl = uv_socket;
+ return GRPC_ERROR_NONE;
+}
+
+static grpc_error* uv_socket_getpeername(grpc_custom_socket* socket,
+ const grpc_sockaddr* addr,
+ int* addr_len) {
+ uv_socket_t* uv_socket = (uv_socket_t*)socket->impl;
+ int err = uv_tcp_getpeername(uv_socket->handle,
+ (struct sockaddr*)IGNORE_CONST(addr), addr_len);
+ return tcp_error_create("getpeername failed", err);
+}
+
+static grpc_error* uv_socket_getsockname(grpc_custom_socket* socket,
+ const grpc_sockaddr* addr,
+ int* addr_len) {
+ uv_socket_t* uv_socket = (uv_socket_t*)socket->impl;
+ int err = uv_tcp_getsockname(uv_socket->handle,
+ (struct sockaddr*)IGNORE_CONST(addr), addr_len);
+ return tcp_error_create("getsockname failed", err);
+}
- GPR_ASSERT(tcp->write_cb == NULL);
- tcp->write_slices = write_slices;
- GPR_ASSERT(tcp->write_slices->count <= UINT_MAX);
- if (tcp->write_slices->count == 0) {
- // No slices means we don't have to do anything,
- // and libuv doesn't like empty writes
- GRPC_CLOSURE_SCHED(cb, GRPC_ERROR_NONE);
+static void accept_new_connection(grpc_custom_socket* socket) {
+ uv_socket_t* uv_socket = (uv_socket_t*)socket->impl;
+ if (!uv_socket->pending_connection || !uv_socket->accept_socket) {
return;
}
+ grpc_custom_socket* new_socket = uv_socket->accept_socket;
+ grpc_error* error = uv_socket->accept_error;
+ uv_socket->accept_socket = nullptr;
+ uv_socket->accept_error = GRPC_ERROR_NONE;
+ uv_socket->pending_connection = false;
+ if (uv_socket->accept_error != GRPC_ERROR_NONE) {
+ uv_stream_t dummy_handle;
+ uv_accept((uv_stream_t*)uv_socket->handle, &dummy_handle);
+ uv_socket->accept_cb(socket, new_socket, error);
+ } else {
+ uv_socket_t* uv_new_socket = (uv_socket_t*)gpr_malloc(sizeof(uv_socket_t));
+ uv_socket_init_helper(uv_new_socket, AF_UNSPEC);
+ // UV documentation says this is guaranteed to succeed
+ GPR_ASSERT(uv_accept((uv_stream_t*)uv_socket->handle,
+ (uv_stream_t*)uv_new_socket->handle) == 0);
+ new_socket->impl = uv_new_socket;
+ uv_new_socket->handle->data = new_socket;
+ uv_socket->accept_cb(socket, new_socket, error);
+ }
+}
- tcp->write_cb = cb;
- buffer_count = (unsigned int)tcp->write_slices->count;
- buffers = (uv_buf_t*)gpr_malloc(sizeof(uv_buf_t) * buffer_count);
- for (i = 0; i < buffer_count; i++) {
- slice = &tcp->write_slices->slices[i];
- buffers[i].base = (char*)GRPC_SLICE_START_PTR(*slice);
- buffers[i].len = GRPC_SLICE_LENGTH(*slice);
+static void uv_on_connect(uv_stream_t* server, int status) {
+ grpc_custom_socket* socket = (grpc_custom_socket*)server->data;
+ uv_socket_t* uv_socket = (uv_socket_t*)socket->impl;
+ GPR_ASSERT(!uv_socket->pending_connection);
+ uv_socket->pending_connection = true;
+ if (status < 0) {
+ switch (status) {
+ case UV_EINTR:
+ case UV_EAGAIN:
+ return;
+ default:
+ uv_socket->accept_error = tcp_error_create("accept failed", status);
+ }
}
- tcp->write_buffers = buffers;
- write_req = &tcp->write_req;
- write_req->data = tcp;
- TCP_REF(tcp, "write");
- // TODO(murgatroid99): figure out what the return value here means
- uv_write(write_req, (uv_stream_t*)tcp->handle, buffers, buffer_count,
- write_callback);
+ accept_new_connection(socket);
}
-static void uv_add_to_pollset(grpc_endpoint* ep, grpc_pollset* pollset) {
- // No-op. We're ignoring pollsets currently
- (void)ep;
- (void)pollset;
- grpc_tcp* tcp = (grpc_tcp*)ep;
- tcp->pollset = pollset;
+void uv_socket_accept(grpc_custom_socket* socket,
+ grpc_custom_socket* new_socket,
+ grpc_custom_accept_callback accept_cb) {
+ uv_socket_t* uv_socket = (uv_socket_t*)socket->impl;
+ uv_socket->accept_cb = accept_cb;
+ GPR_ASSERT(uv_socket->accept_socket == nullptr);
+ uv_socket->accept_socket = new_socket;
+ accept_new_connection(socket);
}
-static void uv_add_to_pollset_set(grpc_endpoint* ep,
- grpc_pollset_set* pollset) {
- // No-op. We're ignoring pollsets currently
- (void)ep;
- (void)pollset;
+static grpc_error* uv_socket_bind(grpc_custom_socket* socket,
+ const grpc_sockaddr* addr, size_t len,
+ int flags) {
+ uv_socket_t* uv_socket = (uv_socket_t*)socket->impl;
+ int status =
+ uv_tcp_bind((uv_tcp_t*)uv_socket->handle, (struct sockaddr*)addr, 0);
+ return tcp_error_create("Failed to bind to port", status);
}
-static void uv_delete_from_pollset_set(grpc_endpoint* ep,
- grpc_pollset_set* pollset) {
- // No-op. We're ignoring pollsets currently
- (void)ep;
- (void)pollset;
+static grpc_error* uv_socket_listen(grpc_custom_socket* socket) {
+ uv_socket_t* uv_socket = (uv_socket_t*)socket->impl;
+ int status =
+ uv_listen((uv_stream_t*)uv_socket->handle, SOMAXCONN, uv_on_connect);
+ return tcp_error_create("Failed to listen to port", status);
}
-static void shutdown_callback(uv_shutdown_t* req, int status) {}
+static grpc_error* uv_socket_setsockopt(grpc_custom_socket* socket, int level,
+ int option_name, const void* optval,
+ socklen_t option_len) {
+ int fd;
+ uv_socket_t* uv_socket = (uv_socket_t*)socket->impl;
+ uv_fileno((uv_handle_t*)uv_socket->handle, &fd);
+ // TODO Handle error here. Also, does this work on windows??
+ setsockopt(fd, level, option_name, &optval, (socklen_t)option_len);
+ return GRPC_ERROR_NONE;
+}
-static void uv_endpoint_shutdown(grpc_endpoint* ep, grpc_error* why) {
- grpc_tcp* tcp = (grpc_tcp*)ep;
- if (!tcp->shutting_down) {
- if (grpc_tcp_trace.enabled()) {
- const char* str = grpc_error_string(why);
- gpr_log(GPR_DEBUG, "TCP %p shutdown why=%s", tcp->handle, str);
- }
- tcp->shutting_down = true;
- uv_shutdown_t* req = &tcp->shutdown_req;
- uv_shutdown(req, (uv_stream_t*)tcp->handle, shutdown_callback);
- grpc_resource_user_shutdown(tcp->resource_user);
+static void uv_tc_on_connect(uv_connect_t* req, int status) {
+ grpc_custom_socket* socket = (grpc_custom_socket*)req->data;
+ uv_socket_t* uv_socket = (uv_socket_t*)socket->impl;
+ grpc_error* error;
+ if (status == UV_ECANCELED) {
+ // This should only happen if the handle is already closed
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Timeout occurred");
+ } else {
+ error = tcp_error_create("Failed to connect to remote host", status);
}
- GRPC_ERROR_UNREF(why);
+ uv_socket->connect_cb(socket, error);
}
-static void uv_destroy(grpc_endpoint* ep) {
- grpc_network_status_unregister_endpoint(ep);
- grpc_tcp* tcp = (grpc_tcp*)ep;
- uv_close((uv_handle_t*)tcp->handle, uv_close_callback);
+static void uv_socket_connect(grpc_custom_socket* socket,
+ const grpc_sockaddr* addr, size_t len,
+ grpc_custom_connect_callback connect_cb) {
+ uv_socket_t* uv_socket = (uv_socket_t*)socket->impl;
+ uv_socket->connect_cb = connect_cb;
+ uv_socket->connect_req.data = socket;
+ int status = uv_tcp_connect(&uv_socket->connect_req, uv_socket->handle,
+ (struct sockaddr*)addr, uv_tc_on_connect);
+ if (status != 0) {
+ // The callback will not be called
+ uv_socket->connect_cb(socket, tcp_error_create("connect failed", status));
+ }
+}
+
+static grpc_resolved_addresses* handle_addrinfo_result(
+ struct addrinfo* result) {
+ struct addrinfo* resp;
+ struct addrinfo* prev;
+ size_t i;
+ grpc_resolved_addresses* addresses =
+ (grpc_resolved_addresses*)gpr_malloc(sizeof(grpc_resolved_addresses));
+ addresses->naddrs = 0;
+ for (resp = result; resp != nullptr; resp = resp->ai_next) {
+ addresses->naddrs++;
+ }
+ addresses->addrs = (grpc_resolved_address*)gpr_malloc(
+ sizeof(grpc_resolved_address) * addresses->naddrs);
+ i = 0;
+ resp = result;
+ while (resp != nullptr) {
+ memcpy(&addresses->addrs[i].addr, resp->ai_addr, resp->ai_addrlen);
+ addresses->addrs[i].len = resp->ai_addrlen;
+ i++;
+ prev = resp;
+ resp = resp->ai_next;
+ gpr_free(prev);
+ }
+ return addresses;
}
-static char* uv_get_peer(grpc_endpoint* ep) {
- grpc_tcp* tcp = (grpc_tcp*)ep;
- return gpr_strdup(tcp->peer_string);
+static void uv_resolve_callback(uv_getaddrinfo_t* req, int status,
+ struct addrinfo* res) {
+ grpc_custom_resolver* r = (grpc_custom_resolver*)req->data;
+ gpr_free(req);
+ grpc_resolved_addresses* result = nullptr;
+ if (status == 0) {
+ result = handle_addrinfo_result(res);
+ }
+ grpc_custom_resolve_callback(r, result,
+ tcp_error_create("getaddrinfo failed", status));
}
-static grpc_resource_user* uv_get_resource_user(grpc_endpoint* ep) {
- grpc_tcp* tcp = (grpc_tcp*)ep;
- return tcp->resource_user;
+static grpc_error* uv_resolve(char* host, char* port,
+ grpc_resolved_addresses** result) {
+ int status;
+ uv_getaddrinfo_t req;
+ struct addrinfo hints;
+ memset(&hints, 0, sizeof(struct addrinfo));
+ hints.ai_family = AF_UNSPEC; /* ipv4 or ipv6 */
+ hints.ai_socktype = SOCK_STREAM; /* stream socket */
+ hints.ai_flags = AI_PASSIVE; /* for wildcard IP address */
+ status = uv_getaddrinfo(uv_default_loop(), &req, NULL, host, port, &hints);
+ if (status != 0) {
+ *result = nullptr;
+ } else {
+ *result = handle_addrinfo_result(req.addrinfo);
+ }
+ return tcp_error_create("getaddrinfo failed", status);
}
-static int uv_get_fd(grpc_endpoint* ep) { return -1; }
-
-static grpc_endpoint_vtable vtable = {uv_endpoint_read,
- uv_endpoint_write,
- uv_add_to_pollset,
- uv_add_to_pollset_set,
- uv_delete_from_pollset_set,
- uv_endpoint_shutdown,
- uv_destroy,
- uv_get_resource_user,
- uv_get_peer,
- uv_get_fd};
-
-grpc_endpoint* grpc_tcp_create(uv_tcp_t* handle,
- grpc_resource_quota* resource_quota,
- char* peer_string) {
- grpc_tcp* tcp = (grpc_tcp*)gpr_malloc(sizeof(grpc_tcp));
- grpc_core::ExecCtx exec_ctx;
-
- if (grpc_tcp_trace.enabled()) {
- gpr_log(GPR_DEBUG, "Creating TCP endpoint %p", tcp);
+static void uv_resolve_async(grpc_custom_resolver* r, char* host, char* port) {
+ int status;
+ uv_getaddrinfo_t* req =
+ (uv_getaddrinfo_t*)gpr_malloc(sizeof(uv_getaddrinfo_t));
+ req->data = r;
+ struct addrinfo hints;
+ memset(&hints, 0, sizeof(struct addrinfo));
+ hints.ai_family = GRPC_AF_UNSPEC; /* ipv4 or ipv6 */
+ hints.ai_socktype = GRPC_SOCK_STREAM; /* stream socket */
+ hints.ai_flags = GRPC_AI_PASSIVE; /* for wildcard IP address */
+ status = uv_getaddrinfo(uv_default_loop(), req, uv_resolve_callback, host,
+ port, &hints);
+ if (status != 0) {
+ gpr_free(req);
+ grpc_error* error = tcp_error_create("getaddrinfo failed", status);
+ grpc_custom_resolve_callback(r, NULL, error);
}
+}
- /* Disable Nagle's Algorithm */
- uv_tcp_nodelay(handle, 1);
-
- memset(tcp, 0, sizeof(grpc_tcp));
- tcp->base.vtable = &vtable;
- tcp->handle = handle;
- handle->data = tcp;
- gpr_ref_init(&tcp->refcount, 1);
- tcp->peer_string = gpr_strdup(peer_string);
- tcp->shutting_down = false;
- tcp->read_slices = NULL;
- tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
- grpc_resource_user_slice_allocator_init(
- &tcp->slice_allocator, tcp->resource_user, tcp_read_allocation_done, tcp);
- /* Tell network status tracking code about the new endpoint */
- grpc_network_status_register_endpoint(&tcp->base);
-
-#ifndef GRPC_UV_TCP_HOLD_LOOP
- uv_unref((uv_handle_t*)handle);
-#endif
+grpc_custom_resolver_vtable uv_resolver_vtable = {uv_resolve, uv_resolve_async};
- return &tcp->base;
-}
+grpc_socket_vtable grpc_uv_socket_vtable = {
+ uv_socket_init, uv_socket_connect, uv_socket_destroy,
+ uv_socket_shutdown, uv_socket_close, uv_socket_write,
+ uv_socket_read, uv_socket_getpeername, uv_socket_getsockname,
+ uv_socket_setsockopt, uv_socket_bind, uv_socket_listen,
+ uv_socket_accept};
-#endif /* GRPC_UV */
+#endif
diff --git a/src/core/lib/iomgr/tcp_uv.h b/src/core/lib/iomgr/tcp_uv.h
deleted file mode 100644
index 6b1a6f77c2..0000000000
--- a/src/core/lib/iomgr/tcp_uv.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_LIB_IOMGR_TCP_UV_H
-#define GRPC_CORE_LIB_IOMGR_TCP_UV_H
-/*
- Low level TCP "bottom half" implementation, for use by transports built on
- top of a TCP connection.
-
- Note that this file does not (yet) include APIs for creating the socket in
- the first place.
-
- All calls passing slice transfer ownership of a slice refcount unless
- otherwise specified.
-*/
-
-#include <grpc/support/port_platform.h>
-
-#include "src/core/lib/debug/trace.h"
-#include "src/core/lib/iomgr/endpoint.h"
-
-#include "src/core/lib/iomgr/port.h"
-
-#ifdef GRPC_UV
-
-#include <uv.h>
-
-extern grpc_core::TraceFlag grpc_tcp_trace;
-
-#define GRPC_TCP_DEFAULT_READ_SLICE_SIZE 8192
-
-grpc_endpoint* grpc_tcp_create(uv_tcp_t* handle,
- grpc_resource_quota* resource_quota,
- char* peer_string);
-
-#endif /* GRPC_UV */
-
-#endif /* GRPC_CORE_LIB_IOMGR_TCP_UV_H */
diff --git a/src/core/lib/iomgr/tcp_windows.cc b/src/core/lib/iomgr/tcp_windows.cc
index aab8edc888..04e6f11eee 100644
--- a/src/core/lib/iomgr/tcp_windows.cc
+++ b/src/core/lib/iomgr/tcp_windows.cc
@@ -51,7 +51,7 @@
#define GRPC_FIONBIO FIONBIO
#endif
-grpc_core::TraceFlag grpc_tcp_trace(false, "tcp");
+extern grpc_core::TraceFlag grpc_tcp_trace;
static grpc_error* set_non_block(SOCKET sock) {
int status;
diff --git a/src/core/lib/iomgr/timer.cc b/src/core/lib/iomgr/timer.cc
new file mode 100644
index 0000000000..e647cdefa0
--- /dev/null
+++ b/src/core/lib/iomgr/timer.cc
@@ -0,0 +1,45 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/lib/iomgr/timer.h"
+#include "src/core/lib/iomgr/timer_manager.h"
+
+grpc_timer_vtable* grpc_timer_impl;
+
+void grpc_set_timer_impl(grpc_timer_vtable* vtable) {
+ grpc_timer_impl = vtable;
+}
+
+void grpc_timer_init(grpc_timer* timer, grpc_millis deadline,
+ grpc_closure* closure) {
+ grpc_timer_impl->init(timer, deadline, closure);
+}
+
+void grpc_timer_cancel(grpc_timer* timer) { grpc_timer_impl->cancel(timer); }
+
+grpc_timer_check_result grpc_timer_check(grpc_millis* next) {
+ return grpc_timer_impl->check(next);
+}
+
+void grpc_timer_list_init() { grpc_timer_impl->list_init(); }
+
+void grpc_timer_list_shutdown() { grpc_timer_impl->list_shutdown(); }
+
+void grpc_timer_consume_kick() { grpc_timer_impl->consume_kick(); }
diff --git a/src/core/lib/iomgr/timer.h b/src/core/lib/iomgr/timer.h
index 67f1b1b3f9..5ff10d3aee 100644
--- a/src/core/lib/iomgr/timer.h
+++ b/src/core/lib/iomgr/timer.h
@@ -23,17 +23,41 @@
#include "src/core/lib/iomgr/port.h"
-#ifdef GRPC_UV
-#include "src/core/lib/iomgr/timer_uv.h"
-#else
-#include "src/core/lib/iomgr/timer_generic.h"
-#endif /* GRPC_UV */
-
#include <grpc/support/time.h>
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/iomgr.h"
-typedef struct grpc_timer grpc_timer;
+typedef struct grpc_timer {
+ gpr_atm deadline;
+ uint32_t heap_index; /* INVALID_HEAP_INDEX if not in heap */
+ bool pending;
+ struct grpc_timer* next;
+ struct grpc_timer* prev;
+ grpc_closure* closure;
+#ifndef NDEBUG
+ struct grpc_timer* hash_table_next;
+#endif
+
+ // Optional field used by custom timers
+ void* custom_timer;
+} grpc_timer;
+
+typedef enum {
+ GRPC_TIMERS_NOT_CHECKED,
+ GRPC_TIMERS_CHECKED_AND_EMPTY,
+ GRPC_TIMERS_FIRED,
+} grpc_timer_check_result;
+
+typedef struct grpc_timer_vtable {
+ void (*init)(grpc_timer* timer, grpc_millis, grpc_closure* closure);
+ void (*cancel)(grpc_timer* timer);
+
+ /* Internal API */
+ grpc_timer_check_result (*check)(grpc_millis* next);
+ void (*list_init)();
+ void (*list_shutdown)(void);
+ void (*consume_kick)(void);
+} grpc_timer_vtable;
/* Initialize *timer. When expired or canceled, closure will be called with
error set to indicate if it expired (GRPC_ERROR_NONE) or was canceled
@@ -78,12 +102,6 @@ void grpc_timer_cancel(grpc_timer* timer);
/* iomgr internal api for dealing with timers */
-typedef enum {
- GRPC_TIMERS_NOT_CHECKED,
- GRPC_TIMERS_CHECKED_AND_EMPTY,
- GRPC_TIMERS_FIRED,
-} grpc_timer_check_result;
-
/* Check for timers to be run, and run them.
Return true if timer callbacks were executed.
If next is non-null, TRY to update *next with the next running timer
@@ -99,7 +117,9 @@ void grpc_timer_list_shutdown();
void grpc_timer_consume_kick(void);
/* the following must be implemented by each iomgr implementation */
-
void grpc_kick_poller(void);
+/* Sets the timer implementation */
+void grpc_set_timer_impl(grpc_timer_vtable* vtable);
+
#endif /* GRPC_CORE_LIB_IOMGR_TIMER_H */
diff --git a/src/core/lib/iomgr/timer_custom.cc b/src/core/lib/iomgr/timer_custom.cc
new file mode 100644
index 0000000000..71d825ff9f
--- /dev/null
+++ b/src/core/lib/iomgr/timer_custom.cc
@@ -0,0 +1,93 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/lib/iomgr/port.h"
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+
+#include "src/core/lib/debug/trace.h"
+#include "src/core/lib/iomgr/iomgr_custom.h"
+#include "src/core/lib/iomgr/timer.h"
+#include "src/core/lib/iomgr/timer_custom.h"
+
+static grpc_custom_timer_vtable* custom_timer_impl;
+
+void grpc_custom_timer_callback(grpc_custom_timer* t, grpc_error* error) {
+ GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD();
+ grpc_core::ExecCtx exec_ctx;
+ grpc_timer* timer = t->original;
+ GPR_ASSERT(timer->pending);
+ timer->pending = 0;
+ GRPC_CLOSURE_SCHED(timer->closure, GRPC_ERROR_NONE);
+ custom_timer_impl->stop(t);
+ gpr_free(t);
+}
+
+static void timer_init(grpc_timer* timer, grpc_millis deadline,
+ grpc_closure* closure) {
+ uint64_t timeout;
+ GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD();
+ grpc_millis now = grpc_core::ExecCtx::Get()->Now();
+ if (deadline <= grpc_core::ExecCtx::Get()->Now()) {
+ GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE);
+ timer->pending = false;
+ return;
+ } else {
+ timeout = deadline - now;
+ }
+ timer->pending = true;
+ timer->closure = closure;
+ grpc_custom_timer* timer_wrapper =
+ (grpc_custom_timer*)gpr_malloc(sizeof(grpc_custom_timer));
+ timer_wrapper->timeout_ms = timeout;
+ timer->custom_timer = (void*)timer_wrapper;
+ timer_wrapper->original = timer;
+ custom_timer_impl->start(timer_wrapper);
+}
+
+static void timer_cancel(grpc_timer* timer) {
+ GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD();
+ grpc_custom_timer* tw = (grpc_custom_timer*)timer->custom_timer;
+ if (timer->pending) {
+ timer->pending = 0;
+ GRPC_CLOSURE_SCHED(timer->closure, GRPC_ERROR_CANCELLED);
+ custom_timer_impl->stop(tw);
+ gpr_free(tw);
+ }
+}
+
+static grpc_timer_check_result timer_check(grpc_millis* next) {
+ return GRPC_TIMERS_NOT_CHECKED;
+}
+
+static void timer_list_init() {}
+static void timer_list_shutdown() {}
+
+static void timer_consume_kick(void) {}
+
+static grpc_timer_vtable custom_timer_vtable = {
+ timer_init, timer_cancel, timer_check,
+ timer_list_init, timer_list_shutdown, timer_consume_kick};
+
+void grpc_custom_timer_init(grpc_custom_timer_vtable* impl) {
+ custom_timer_impl = impl;
+ grpc_set_timer_impl(&custom_timer_vtable);
+}
diff --git a/src/core/lib/iomgr/timer_custom.h b/src/core/lib/iomgr/timer_custom.h
new file mode 100644
index 0000000000..bfea8bafa6
--- /dev/null
+++ b/src/core/lib/iomgr/timer_custom.h
@@ -0,0 +1,43 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_TIMER_CUSTOM_H
+#define GRPC_CORE_LIB_IOMGR_TIMER_CUSTOM_H
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/lib/iomgr/timer.h"
+
+typedef struct grpc_custom_timer {
+ // Implementation defined
+ void* timer;
+ uint64_t timeout_ms;
+
+ grpc_timer* original;
+} grpc_custom_timer;
+
+typedef struct grpc_custom_timer_vtable {
+ void (*start)(grpc_custom_timer* t);
+ void (*stop)(grpc_custom_timer* t);
+} grpc_custom_timer_vtable;
+
+void grpc_custom_timer_init(grpc_custom_timer_vtable* impl);
+
+void grpc_custom_timer_callback(grpc_custom_timer* t, grpc_error* error);
+
+#endif /* GRPC_CORE_LIB_IOMGR_TIMER_CUSTOM_H */
diff --git a/src/core/lib/iomgr/timer_generic.cc b/src/core/lib/iomgr/timer_generic.cc
index 52a571f425..93e654b7fa 100644
--- a/src/core/lib/iomgr/timer_generic.cc
+++ b/src/core/lib/iomgr/timer_generic.cc
@@ -22,8 +22,6 @@
#include <inttypes.h>
-#ifdef GRPC_TIMER_USE_GENERIC
-
#include "src/core/lib/iomgr/timer.h"
#include <grpc/support/alloc.h>
@@ -238,7 +236,7 @@ static gpr_atm compute_min_deadline(timer_shard* shard) {
: grpc_timer_heap_top(&shard->heap)->deadline;
}
-void grpc_timer_list_init() {
+static void timer_list_init() {
uint32_t i;
g_num_shards = GPR_MIN(1, 2 * gpr_cpu_num_cores());
@@ -270,7 +268,7 @@ void grpc_timer_list_init() {
INIT_TIMER_HASH_TABLE();
}
-void grpc_timer_list_shutdown() {
+static void timer_list_shutdown() {
size_t i;
run_some_expired_timers(
GPR_ATM_MAX, nullptr,
@@ -326,8 +324,8 @@ static void note_deadline_change(timer_shard* shard) {
void grpc_timer_init_unset(grpc_timer* timer) { timer->pending = false; }
-void grpc_timer_init(grpc_timer* timer, grpc_millis deadline,
- grpc_closure* closure) {
+static void timer_init(grpc_timer* timer, grpc_millis deadline,
+ grpc_closure* closure) {
int is_first_timer = 0;
timer_shard* shard = &g_shards[GPR_HASH_POINTER(timer, g_num_shards)];
timer->closure = closure;
@@ -412,12 +410,12 @@ void grpc_timer_init(grpc_timer* timer, grpc_millis deadline,
}
}
-void grpc_timer_consume_kick(void) {
+static void timer_consume_kick(void) {
/* force re-evaluation of last seeen min */
gpr_tls_set(&g_last_seen_min_timer, 0);
}
-void grpc_timer_cancel(grpc_timer* timer) {
+static void timer_cancel(grpc_timer* timer) {
if (!g_shared_mutables.initialized) {
/* must have already been cancelled, also the shard mutex is invalid */
return;
@@ -604,7 +602,7 @@ static grpc_timer_check_result run_some_expired_timers(gpr_atm now,
return result;
}
-grpc_timer_check_result grpc_timer_check(grpc_millis* next) {
+static grpc_timer_check_result timer_check(grpc_millis* next) {
// prelude
grpc_millis now = grpc_core::ExecCtx::Get()->Now();
@@ -660,4 +658,6 @@ grpc_timer_check_result grpc_timer_check(grpc_millis* next) {
return r;
}
-#endif /* GRPC_TIMER_USE_GENERIC */
+grpc_timer_vtable grpc_generic_timer_vtable = {
+ timer_init, timer_cancel, timer_check,
+ timer_list_init, timer_list_shutdown, timer_consume_kick};
diff --git a/src/core/lib/iomgr/timer_heap.cc b/src/core/lib/iomgr/timer_heap.cc
index e5b5abfc97..0c17d607eb 100644
--- a/src/core/lib/iomgr/timer_heap.cc
+++ b/src/core/lib/iomgr/timer_heap.cc
@@ -20,8 +20,6 @@
#include "src/core/lib/iomgr/port.h"
-#ifdef GRPC_TIMER_USE_GENERIC
-
#include "src/core/lib/iomgr/timer_heap.h"
#include <string.h>
@@ -135,5 +133,3 @@ grpc_timer* grpc_timer_heap_top(grpc_timer_heap* heap) {
void grpc_timer_heap_pop(grpc_timer_heap* heap) {
grpc_timer_heap_remove(heap, grpc_timer_heap_top(heap));
}
-
-#endif /* GRPC_TIMER_USE_GENERIC */
diff --git a/src/core/lib/iomgr/timer_uv.cc b/src/core/lib/iomgr/timer_uv.cc
index 6f28f553c5..dadeb960b2 100644
--- a/src/core/lib/iomgr/timer_uv.cc
+++ b/src/core/lib/iomgr/timer_uv.cc
@@ -20,20 +20,18 @@
#include "src/core/lib/iomgr/port.h"
-#if GRPC_UV
+#ifdef GRPC_UV
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "src/core/lib/debug/trace.h"
-#include "src/core/lib/iomgr/iomgr_uv.h"
+#include "src/core/lib/iomgr/iomgr_custom.h"
#include "src/core/lib/iomgr/timer.h"
+#include "src/core/lib/iomgr/timer_custom.h"
#include <uv.h>
-grpc_core::TraceFlag grpc_timer_trace(false, "timer");
-grpc_core::TraceFlag grpc_timer_check_trace(false, "timer_check");
-
static void timer_close_callback(uv_handle_t* handle) { gpr_free(handle); }
static void stop_uv_timer(uv_timer_t* handle) {
@@ -43,57 +41,23 @@ static void stop_uv_timer(uv_timer_t* handle) {
}
void run_expired_timer(uv_timer_t* handle) {
- grpc_timer* timer = (grpc_timer*)handle->data;
- grpc_core::ExecCtx exec_ctx;
- GRPC_UV_ASSERT_SAME_THREAD();
- GPR_ASSERT(timer->pending);
- timer->pending = 0;
- GRPC_CLOSURE_SCHED(timer->closure, GRPC_ERROR_NONE);
- stop_uv_timer(handle);
+ grpc_custom_timer* timer_wrapper = (grpc_custom_timer*)handle->data;
+ grpc_custom_timer_callback(timer_wrapper, GRPC_ERROR_NONE);
}
-void grpc_timer_init(grpc_timer* timer, grpc_millis deadline,
- grpc_closure* closure) {
- uint64_t timeout;
+static void timer_start(grpc_custom_timer* t) {
uv_timer_t* uv_timer;
- GRPC_UV_ASSERT_SAME_THREAD();
- timer->closure = closure;
- if (deadline <= grpc_core::ExecCtx::Get()->Now()) {
- timer->pending = 0;
- GRPC_CLOSURE_SCHED(timer->closure, GRPC_ERROR_NONE);
- return;
- }
- timer->pending = 1;
- timeout = (uint64_t)(deadline - grpc_core::ExecCtx::Get()->Now());
uv_timer = (uv_timer_t*)gpr_malloc(sizeof(uv_timer_t));
uv_timer_init(uv_default_loop(), uv_timer);
- uv_timer->data = timer;
- timer->uv_timer = uv_timer;
- uv_timer_start(uv_timer, run_expired_timer, timeout, 0);
- /* We assume that gRPC timers are only used alongside other active gRPC
- objects, and that there will therefore always be something else keeping
- the uv loop alive whenever there is a timer */
- uv_unref((uv_handle_t*)uv_timer);
+ uv_timer->data = t;
+ t->timer = (void*)uv_timer;
+ uv_timer_start(uv_timer, run_expired_timer, t->timeout_ms, 0);
}
-void grpc_timer_init_unset(grpc_timer* timer) { timer->pending = 0; }
-
-void grpc_timer_cancel(grpc_timer* timer) {
- GRPC_UV_ASSERT_SAME_THREAD();
- if (timer->pending) {
- timer->pending = 0;
- GRPC_CLOSURE_SCHED(timer->closure, GRPC_ERROR_CANCELLED);
- stop_uv_timer((uv_timer_t*)timer->uv_timer);
- }
+static void timer_stop(grpc_custom_timer* t) {
+ stop_uv_timer((uv_timer_t*)t->timer);
}
-grpc_timer_check_result grpc_timer_check(grpc_millis* next) {
- return GRPC_TIMERS_NOT_CHECKED;
-}
-
-void grpc_timer_list_init() {}
-void grpc_timer_list_shutdown() {}
-
-void grpc_timer_consume_kick(void) {}
+grpc_custom_timer_vtable uv_timer_vtable = {timer_start, timer_stop};
-#endif /* GRPC_UV */
+#endif
diff --git a/src/core/lib/iomgr/udp_server.cc b/src/core/lib/iomgr/udp_server.cc
index 6e5079b014..04716a254d 100644
--- a/src/core/lib/iomgr/udp_server.cc
+++ b/src/core/lib/iomgr/udp_server.cc
@@ -345,7 +345,7 @@ static int bind_socket(grpc_socket_factory* socket_factory, int sockfd,
return (socket_factory != nullptr)
? grpc_socket_factory_bind(socket_factory, sockfd, addr)
: bind(sockfd,
- reinterpret_cast<struct sockaddr*>(
+ reinterpret_cast<grpc_sockaddr*>(
const_cast<char*>(addr->addr)),
addr->len);
}
@@ -355,8 +355,8 @@ static int prepare_socket(grpc_socket_factory* socket_factory, int fd,
const grpc_resolved_address* addr, int rcv_buf_size,
int snd_buf_size) {
grpc_resolved_address sockname_temp;
- struct sockaddr* addr_ptr =
- reinterpret_cast<struct sockaddr*>(const_cast<char*>(addr->addr));
+ grpc_sockaddr* addr_ptr =
+ reinterpret_cast<grpc_sockaddr*>(const_cast<char*>(addr->addr));
if (fd < 0) {
goto error;
@@ -392,7 +392,7 @@ static int prepare_socket(grpc_socket_factory* socket_factory, int fd,
sockname_temp.len = static_cast<socklen_t>(sizeof(struct sockaddr_storage));
- if (getsockname(fd, reinterpret_cast<struct sockaddr*>(sockname_temp.addr),
+ if (getsockname(fd, reinterpret_cast<grpc_sockaddr*>(sockname_temp.addr),
&sockname_temp.len) < 0) {
goto error;
}
@@ -577,10 +577,9 @@ int grpc_udp_server_add_port(grpc_udp_server* s,
for (size_t i = 0; i < s->listeners.size(); ++i) {
sockname_temp.len =
static_cast<socklen_t>(sizeof(struct sockaddr_storage));
- if (0 ==
- getsockname(s->listeners[i].fd(),
- reinterpret_cast<struct sockaddr*>(sockname_temp.addr),
- &sockname_temp.len)) {
+ if (0 == getsockname(s->listeners[i].fd(),
+ reinterpret_cast<grpc_sockaddr*>(sockname_temp.addr),
+ &sockname_temp.len)) {
port = grpc_sockaddr_get_port(&sockname_temp);
if (port > 0) {
allocated_addr = static_cast<grpc_resolved_address*>(
diff --git a/src/core/lib/iomgr/unix_sockets_posix.cc b/src/core/lib/iomgr/unix_sockets_posix.cc
index 1c464bf97c..22fcaf57fc 100644
--- a/src/core/lib/iomgr/unix_sockets_posix.cc
+++ b/src/core/lib/iomgr/unix_sockets_posix.cc
@@ -67,15 +67,15 @@ grpc_error* grpc_resolve_unix_domain_address(const char* name,
}
int grpc_is_unix_socket(const grpc_resolved_address* resolved_addr) {
- const struct sockaddr* addr =
- reinterpret_cast<const struct sockaddr*>(resolved_addr->addr);
+ const grpc_sockaddr* addr =
+ reinterpret_cast<const grpc_sockaddr*>(resolved_addr->addr);
return addr->sa_family == AF_UNIX;
}
void grpc_unlink_if_unix_domain_socket(
const grpc_resolved_address* resolved_addr) {
- const struct sockaddr* addr =
- reinterpret_cast<const struct sockaddr*>(resolved_addr->addr);
+ const grpc_sockaddr* addr =
+ reinterpret_cast<const grpc_sockaddr*>(resolved_addr->addr);
if (addr->sa_family != AF_UNIX) {
return;
}
@@ -90,8 +90,8 @@ void grpc_unlink_if_unix_domain_socket(
char* grpc_sockaddr_to_uri_unix_if_possible(
const grpc_resolved_address* resolved_addr) {
- const struct sockaddr* addr =
- reinterpret_cast<const struct sockaddr*>(resolved_addr->addr);
+ const grpc_sockaddr* addr =
+ reinterpret_cast<const grpc_sockaddr*>(resolved_addr->addr);
if (addr->sa_family != AF_UNIX) {
return nullptr;
}
diff --git a/src/core/lib/surface/call.cc b/src/core/lib/surface/call.cc
index adb6ee5a06..c683cc02de 100644
--- a/src/core/lib/surface/call.cc
+++ b/src/core/lib/surface/call.cc
@@ -380,7 +380,7 @@ grpc_error* grpc_call_create(const grpc_call_create_args* args,
bool immediately_cancel = false;
if (args->parent != nullptr) {
- child_call* cc = call->child =
+ call->child =
static_cast<child_call*>(gpr_arena_alloc(arena, sizeof(child_call)));
call->child->parent = args->parent;
@@ -388,10 +388,6 @@ grpc_error* grpc_call_create(const grpc_call_create_args* args,
GPR_ASSERT(call->is_client);
GPR_ASSERT(!args->parent->is_client);
- parent_call* pc = get_or_create_parent_call(args->parent);
-
- gpr_mu_lock(&pc->child_list_mu);
-
if (args->propagation_mask & GRPC_PROPAGATE_DEADLINE) {
send_deadline = GPR_MIN(send_deadline, args->parent->send_deadline);
}
@@ -419,18 +415,6 @@ grpc_error* grpc_call_create(const grpc_call_create_args* args,
immediately_cancel = true;
}
}
-
- if (pc->first_child == nullptr) {
- pc->first_child = call;
- cc->sibling_next = cc->sibling_prev = call;
- } else {
- cc->sibling_next = pc->first_child;
- cc->sibling_prev = pc->first_child->child->sibling_prev;
- cc->sibling_next->child->sibling_prev =
- cc->sibling_prev->child->sibling_next = call;
- }
-
- gpr_mu_unlock(&pc->child_list_mu);
}
call->send_deadline = send_deadline;
@@ -447,6 +431,22 @@ grpc_error* grpc_call_create(const grpc_call_create_args* args,
&call->call_combiner};
add_init_error(&error, grpc_call_stack_init(channel_stack, 1, destroy_call,
call, &call_args));
+ // Publish this call to parent only after the call stack has been initialized.
+ if (args->parent != nullptr) {
+ child_call* cc = call->child;
+ parent_call* pc = get_or_create_parent_call(args->parent);
+ gpr_mu_lock(&pc->child_list_mu);
+ if (pc->first_child == nullptr) {
+ pc->first_child = call;
+ cc->sibling_next = cc->sibling_prev = call;
+ } else {
+ cc->sibling_next = pc->first_child;
+ cc->sibling_prev = pc->first_child->child->sibling_prev;
+ cc->sibling_next->child->sibling_prev =
+ cc->sibling_prev->child->sibling_next = call;
+ }
+ gpr_mu_unlock(&pc->child_list_mu);
+ }
if (error != GRPC_ERROR_NONE) {
cancel_with_error(call, STATUS_FROM_SURFACE, GRPC_ERROR_REF(error));
}
diff --git a/src/csharp/build_packages_dotnetcli.bat b/src/csharp/build_packages_dotnetcli.bat
index 4dd4947f00..76391738ff 100755
--- a/src/csharp/build_packages_dotnetcli.bat
+++ b/src/csharp/build_packages_dotnetcli.bat
@@ -19,17 +19,21 @@ set VERSION=1.11.0-dev
set NUGET=C:\nuget\nuget.exe
set DOTNET=dotnet
-set -ex
-
mkdir ..\..\artifacts
@rem Collect the artifacts built by the previous build step if running on Jenkins
mkdir nativelibs
+@rem Jenkins flow (deprecated)
powershell -Command "cp -r ..\..\platform=*\artifacts\csharp_ext_* nativelibs"
+@rem Kokoro flow
+powershell -Command "cp -r ..\..\input_artifacts\csharp_ext_* nativelibs"
@rem Collect protoc artifacts built by the previous build step
mkdir protoc_plugins
+@rem Jenkins flow (deprecated)
powershell -Command "cp -r ..\..\platform=*\artifacts\protoc_* protoc_plugins"
+@rem Kokoro flow
+powershell -Command "cp -r ..\..\input_artifacts\protoc_* protoc_plugins"
%DOTNET% restore Grpc.sln || goto :error
diff --git a/src/csharp/build_packages_dotnetcli.sh b/src/csharp/build_packages_dotnetcli.sh
index e3f8463ee8..1b73614b91 100755
--- a/src/csharp/build_packages_dotnetcli.sh
+++ b/src/csharp/build_packages_dotnetcli.sh
@@ -21,11 +21,17 @@ mkdir -p ../../artifacts/
# Collect the artifacts built by the previous build step
mkdir -p nativelibs
+# Jenkins flow (deprecated)
cp -r $EXTERNAL_GIT_ROOT/platform={windows,linux,macos}/artifacts/csharp_ext_* nativelibs || true
+# Kokoro flow
+cp -r $EXTERNAL_GIT_ROOT/input_artifacts/csharp_ext_* nativelibs || true
# Collect protoc artifacts built by the previous build step
mkdir -p protoc_plugins
+# Jenkins flow (deprecated)
cp -r $EXTERNAL_GIT_ROOT/platform={windows,linux,macos}/artifacts/protoc_* protoc_plugins || true
+# Kokoro flow
+cp -r $EXTERNAL_GIT_ROOT/input_artifacts/protoc_* protoc_plugins || true
dotnet restore Grpc.sln
diff --git a/src/php/ext/grpc/call_credentials.c b/src/php/ext/grpc/call_credentials.c
index 41c488a79c..d96dc7f3b7 100644
--- a/src/php/ext/grpc/call_credentials.c
+++ b/src/php/ext/grpc/call_credentials.c
@@ -35,6 +35,7 @@
#include <grpc/grpc.h>
#include <grpc/grpc_security.h>
+#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
zend_class_entry *grpc_ce_call_credentials;
@@ -178,8 +179,10 @@ int plugin_get_metadata(
PHP_GRPC_DELREF(arg);
+ gpr_log(GPR_INFO, "GRPC_PHP: call credentials plugin function - begin");
/* call the user callback function */
zend_call_function(state->fci, state->fci_cache TSRMLS_CC);
+ gpr_log(GPR_INFO, "GRPC_PHP: call credentials plugin function - end");
*num_creds_md = 0;
*status = GRPC_STATUS_OK;
diff --git a/src/python/grpcio/grpc_core_dependencies.py b/src/python/grpcio/grpc_core_dependencies.py
index 994443c651..d96cbec292 100644
--- a/src/python/grpcio/grpc_core_dependencies.py
+++ b/src/python/grpcio/grpc_core_dependencies.py
@@ -97,6 +97,8 @@ CORE_SOURCE_FILES = [
'src/core/lib/iomgr/gethostname_sysconf.cc',
'src/core/lib/iomgr/iocp_windows.cc',
'src/core/lib/iomgr/iomgr.cc',
+ 'src/core/lib/iomgr/iomgr_custom.cc',
+ 'src/core/lib/iomgr/iomgr_internal.cc',
'src/core/lib/iomgr/iomgr_posix.cc',
'src/core/lib/iomgr/iomgr_uv.cc',
'src/core/lib/iomgr/iomgr_windows.cc',
@@ -105,12 +107,16 @@ CORE_SOURCE_FILES = [
'src/core/lib/iomgr/lockfree_event.cc',
'src/core/lib/iomgr/network_status_tracker.cc',
'src/core/lib/iomgr/polling_entity.cc',
- 'src/core/lib/iomgr/pollset_set_uv.cc',
+ 'src/core/lib/iomgr/pollset.cc',
+ 'src/core/lib/iomgr/pollset_custom.cc',
+ 'src/core/lib/iomgr/pollset_set.cc',
+ 'src/core/lib/iomgr/pollset_set_custom.cc',
'src/core/lib/iomgr/pollset_set_windows.cc',
'src/core/lib/iomgr/pollset_uv.cc',
'src/core/lib/iomgr/pollset_windows.cc',
+ 'src/core/lib/iomgr/resolve_address.cc',
+ 'src/core/lib/iomgr/resolve_address_custom.cc',
'src/core/lib/iomgr/resolve_address_posix.cc',
- 'src/core/lib/iomgr/resolve_address_uv.cc',
'src/core/lib/iomgr/resolve_address_windows.cc',
'src/core/lib/iomgr/resource_quota.cc',
'src/core/lib/iomgr/sockaddr_utils.cc',
@@ -122,19 +128,24 @@ CORE_SOURCE_FILES = [
'src/core/lib/iomgr/socket_utils_uv.cc',
'src/core/lib/iomgr/socket_utils_windows.cc',
'src/core/lib/iomgr/socket_windows.cc',
+ 'src/core/lib/iomgr/tcp_client.cc',
+ 'src/core/lib/iomgr/tcp_client_custom.cc',
'src/core/lib/iomgr/tcp_client_posix.cc',
- 'src/core/lib/iomgr/tcp_client_uv.cc',
'src/core/lib/iomgr/tcp_client_windows.cc',
+ 'src/core/lib/iomgr/tcp_custom.cc',
'src/core/lib/iomgr/tcp_posix.cc',
+ 'src/core/lib/iomgr/tcp_server.cc',
+ 'src/core/lib/iomgr/tcp_server_custom.cc',
'src/core/lib/iomgr/tcp_server_posix.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_common.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc',
- 'src/core/lib/iomgr/tcp_server_uv.cc',
'src/core/lib/iomgr/tcp_server_windows.cc',
'src/core/lib/iomgr/tcp_uv.cc',
'src/core/lib/iomgr/tcp_windows.cc',
'src/core/lib/iomgr/time_averaged_stats.cc',
+ 'src/core/lib/iomgr/timer.cc',
+ 'src/core/lib/iomgr/timer_custom.cc',
'src/core/lib/iomgr/timer_generic.cc',
'src/core/lib/iomgr/timer_heap.cc',
'src/core/lib/iomgr/timer_manager.cc',
diff --git a/src/python/grpcio_tests/tests/tests.json b/src/python/grpcio_tests/tests/tests.json
index e033c1063f..bbbc3ea360 100644
--- a/src/python/grpcio_tests/tests/tests.json
+++ b/src/python/grpcio_tests/tests/tests.json
@@ -38,6 +38,168 @@
"unit._cython.cygrpc_test.InsecureServerInsecureClient",
"unit._cython.cygrpc_test.SecureServerSecureClient",
"unit._cython.cygrpc_test.TypeSmokeTest",
+ "unit._early_ok_test.ManyEmptyRequestsTwoReadManyEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyEmptyRequestsTwoReadManyLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyEmptyRequestsTwoReadManySmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyEmptyRequestsTwoReadTwoEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyEmptyRequestsTwoReadTwoLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyEmptyRequestsTwoReadTwoSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyEmptyRequestsTwoReadZeroEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyEmptyRequestsTwoReadZeroLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyEmptyRequestsTwoReadZeroSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyEmptyRequestsZeroReadManyEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyEmptyRequestsZeroReadManyLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyEmptyRequestsZeroReadManySmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyEmptyRequestsZeroReadTwoEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyEmptyRequestsZeroReadTwoLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyEmptyRequestsZeroReadTwoSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyEmptyRequestsZeroReadZeroEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyEmptyRequestsZeroReadZeroLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyEmptyRequestsZeroReadZeroSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyLargeRequestsTwoReadManyEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyLargeRequestsTwoReadManyLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyLargeRequestsTwoReadManySmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyLargeRequestsTwoReadTwoEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyLargeRequestsTwoReadTwoLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyLargeRequestsTwoReadTwoSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyLargeRequestsTwoReadZeroEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyLargeRequestsTwoReadZeroLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyLargeRequestsTwoReadZeroSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyLargeRequestsZeroReadManyEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyLargeRequestsZeroReadManyLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyLargeRequestsZeroReadManySmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyLargeRequestsZeroReadTwoEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyLargeRequestsZeroReadTwoLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyLargeRequestsZeroReadTwoSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyLargeRequestsZeroReadZeroEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyLargeRequestsZeroReadZeroLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ManyLargeRequestsZeroReadZeroSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ManySmallRequestsTwoReadManyEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ManySmallRequestsTwoReadManyLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ManySmallRequestsTwoReadManySmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ManySmallRequestsTwoReadTwoEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ManySmallRequestsTwoReadTwoLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ManySmallRequestsTwoReadTwoSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ManySmallRequestsTwoReadZeroEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ManySmallRequestsTwoReadZeroLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ManySmallRequestsTwoReadZeroSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ManySmallRequestsZeroReadManyEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ManySmallRequestsZeroReadManyLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ManySmallRequestsZeroReadManySmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ManySmallRequestsZeroReadTwoEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ManySmallRequestsZeroReadTwoLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ManySmallRequestsZeroReadTwoSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ManySmallRequestsZeroReadZeroEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ManySmallRequestsZeroReadZeroLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ManySmallRequestsZeroReadZeroSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoEmptyRequestsTwoReadManyEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoEmptyRequestsTwoReadManyLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoEmptyRequestsTwoReadManySmallResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoEmptyRequestsTwoReadTwoEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoEmptyRequestsTwoReadTwoLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoEmptyRequestsTwoReadTwoSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoEmptyRequestsTwoReadZeroEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoEmptyRequestsTwoReadZeroLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoEmptyRequestsTwoReadZeroSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoEmptyRequestsZeroReadManyEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoEmptyRequestsZeroReadManyLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoEmptyRequestsZeroReadManySmallResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoEmptyRequestsZeroReadTwoEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoEmptyRequestsZeroReadTwoLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoEmptyRequestsZeroReadTwoSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoEmptyRequestsZeroReadZeroEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoEmptyRequestsZeroReadZeroLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoEmptyRequestsZeroReadZeroSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoLargeRequestsTwoReadManyEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoLargeRequestsTwoReadManyLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoLargeRequestsTwoReadManySmallResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoLargeRequestsTwoReadTwoEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoLargeRequestsTwoReadTwoLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoLargeRequestsTwoReadTwoSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoLargeRequestsTwoReadZeroEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoLargeRequestsTwoReadZeroLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoLargeRequestsTwoReadZeroSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoLargeRequestsZeroReadManyEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoLargeRequestsZeroReadManyLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoLargeRequestsZeroReadManySmallResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoLargeRequestsZeroReadTwoEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoLargeRequestsZeroReadTwoLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoLargeRequestsZeroReadTwoSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoLargeRequestsZeroReadZeroEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoLargeRequestsZeroReadZeroLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoLargeRequestsZeroReadZeroSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoSmallRequestsTwoReadManyEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoSmallRequestsTwoReadManyLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoSmallRequestsTwoReadManySmallResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoSmallRequestsTwoReadTwoEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoSmallRequestsTwoReadTwoLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoSmallRequestsTwoReadTwoSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoSmallRequestsTwoReadZeroEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoSmallRequestsTwoReadZeroLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoSmallRequestsTwoReadZeroSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoSmallRequestsZeroReadManyEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoSmallRequestsZeroReadManyLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoSmallRequestsZeroReadManySmallResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoSmallRequestsZeroReadTwoEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoSmallRequestsZeroReadTwoLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoSmallRequestsZeroReadTwoSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoSmallRequestsZeroReadZeroEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoSmallRequestsZeroReadZeroLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.TwoSmallRequestsZeroReadZeroSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroEmptyRequestsTwoReadManyEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroEmptyRequestsTwoReadManyLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroEmptyRequestsTwoReadManySmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroEmptyRequestsTwoReadTwoEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroEmptyRequestsTwoReadTwoLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroEmptyRequestsTwoReadTwoSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroEmptyRequestsTwoReadZeroEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroEmptyRequestsTwoReadZeroLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroEmptyRequestsTwoReadZeroSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroEmptyRequestsZeroReadManyEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroEmptyRequestsZeroReadManyLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroEmptyRequestsZeroReadManySmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroEmptyRequestsZeroReadTwoEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroEmptyRequestsZeroReadTwoLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroEmptyRequestsZeroReadTwoSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroEmptyRequestsZeroReadZeroEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroEmptyRequestsZeroReadZeroLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroEmptyRequestsZeroReadZeroSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroLargeRequestsTwoReadManyEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroLargeRequestsTwoReadManyLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroLargeRequestsTwoReadManySmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroLargeRequestsTwoReadTwoEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroLargeRequestsTwoReadTwoLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroLargeRequestsTwoReadTwoSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroLargeRequestsTwoReadZeroEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroLargeRequestsTwoReadZeroLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroLargeRequestsTwoReadZeroSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroLargeRequestsZeroReadManyEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroLargeRequestsZeroReadManyLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroLargeRequestsZeroReadManySmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroLargeRequestsZeroReadTwoEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroLargeRequestsZeroReadTwoLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroLargeRequestsZeroReadTwoSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroLargeRequestsZeroReadZeroEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroLargeRequestsZeroReadZeroLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroLargeRequestsZeroReadZeroSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroSmallRequestsTwoReadManyEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroSmallRequestsTwoReadManyLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroSmallRequestsTwoReadManySmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroSmallRequestsTwoReadTwoEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroSmallRequestsTwoReadTwoLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroSmallRequestsTwoReadTwoSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroSmallRequestsTwoReadZeroEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroSmallRequestsTwoReadZeroLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroSmallRequestsTwoReadZeroSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroSmallRequestsZeroReadManyEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroSmallRequestsZeroReadManyLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroSmallRequestsZeroReadManySmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroSmallRequestsZeroReadTwoEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroSmallRequestsZeroReadTwoLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroSmallRequestsZeroReadTwoSmallResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroSmallRequestsZeroReadZeroEmptyResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroSmallRequestsZeroReadZeroLargeResponsesEarlyOKTest",
+ "unit._early_ok_test.ZeroSmallRequestsZeroReadZeroSmallResponsesEarlyOKTest",
"unit._empty_message_test.EmptyMessageTest",
"unit._exit_test.ExitTest",
"unit._interceptor_test.InterceptorTest",
@@ -56,12 +218,6 @@
"unit.beta._beta_features_test.BetaFeaturesTest",
"unit.beta._beta_features_test.ContextManagementAndLifecycleTest",
"unit.beta._connectivity_channel_test.ConnectivityStatesTest",
- "unit.beta._face_interface_test.DynamicInvokerBlockingInvocationInlineServiceTest",
- "unit.beta._face_interface_test.DynamicInvokerFutureInvocationAsynchronousEventServiceTest",
- "unit.beta._face_interface_test.GenericInvokerBlockingInvocationInlineServiceTest",
- "unit.beta._face_interface_test.GenericInvokerFutureInvocationAsynchronousEventServiceTest",
- "unit.beta._face_interface_test.MultiCallableInvokerBlockingInvocationInlineServiceTest",
- "unit.beta._face_interface_test.MultiCallableInvokerFutureInvocationAsynchronousEventServiceTest",
"unit.beta._implementations_test.CallCredentialsTest",
"unit.beta._implementations_test.ChannelCredentialsTest",
"unit.beta._not_found_test.NotFoundTest",
diff --git a/src/python/grpcio_tests/tests/unit/_compression_test.py b/src/python/grpcio_tests/tests/unit/_compression_test.py
index 7550cd39ba..da1996b1d1 100644
--- a/src/python/grpcio_tests/tests/unit/_compression_test.py
+++ b/src/python/grpcio_tests/tests/unit/_compression_test.py
@@ -32,12 +32,10 @@ def handle_unary(request, servicer_context):
def handle_stream(request_iterator, servicer_context):
- # TODO(issue:#6891) We should be able to remove this loop,
- # and replace with return; yield
servicer_context.send_initial_metadata([('grpc-internal-encoding-request',
'gzip')])
- for request in request_iterator:
- yield request
+ return
+ yield
class _MethodHandler(grpc.RpcMethodHandler):
diff --git a/src/python/grpcio_tests/tests/unit/_early_ok_test.py b/src/python/grpcio_tests/tests/unit/_early_ok_test.py
new file mode 100644
index 0000000000..041532c661
--- /dev/null
+++ b/src/python/grpcio_tests/tests/unit/_early_ok_test.py
@@ -0,0 +1,206 @@
+# Copyright 2018 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tests servicers sending OK status without having read all requests.
+
+This is a regression test of https://github.com/grpc/grpc/issues/6891.
+"""
+
+import enum
+import unittest
+
+import six
+
+import grpc
+
+from tests.unit import test_common
+from tests.unit.framework.common import test_constants
+
+_RPC_METHOD = '/serffice/Meffod'
+
+
+@enum.unique
+class _MessageCount(enum.Enum):
+
+ ZERO = (
+ 0,
+ 'Zero',
+ )
+ TWO = (
+ 1,
+ 'Two',
+ )
+ MANY = (
+ test_constants.STREAM_LENGTH,
+ 'Many',
+ )
+
+
+@enum.unique
+class _MessageSize(enum.Enum):
+ EMPTY = (
+ 0,
+ 'Empty',
+ )
+ SMALL = (
+ 32,
+ 'Small',
+ ) # Smaller than any flow control window.
+ LARGE = (
+ 3 * 1024 * 1024,
+ 'Large',
+ ) # Larger than any flow control window.
+
+
+_ZERO_MESSAGE = b''
+_SMALL_MESSAGE = b'\x07' * _MessageSize.SMALL.value[0]
+_LARGE_MESSAGE = b'abc' * (_MessageSize.LARGE.value[0] // 3)
+
+
+@enum.unique
+class _ReadRequests(enum.Enum):
+
+ ZERO = (
+ 0,
+ 'Zero',
+ )
+ TWO = (
+ 2,
+ 'Two',
+ )
+
+
+class _Case(object):
+
+ def __init__(self, request_count, request_size, request_reading,
+ response_count, response_size):
+ self.request_count = request_count
+ self.request_size = request_size
+ self.request_reading = request_reading
+ self.response_count = response_count
+ self.response_size = response_size
+
+ def create_test_case_name(self):
+ return '{}{}Requests{}Read{}{}ResponsesEarlyOKTest'.format(
+ self.request_count.value[1], self.request_size.value[1],
+ self.request_reading.value[1], self.response_count.value[1],
+ self.response_size.value[1])
+
+
+def _message(message_size):
+ if message_size is _MessageSize.EMPTY:
+ return _ZERO_MESSAGE
+ elif message_size is _MessageSize.SMALL:
+ return _SMALL_MESSAGE
+ elif message_size is _MessageSize.LARGE:
+ return _LARGE_MESSAGE
+
+
+def _messages_to_send(count, size):
+ for _ in range(count.value[0]):
+ yield _message(size)
+
+
+def _draw_requests(case, request_iterator):
+ for _ in range(
+ min(case.request_count.value[0], case.request_reading.value[0])):
+ next(request_iterator)
+
+
+def _draw_responses(case, response_iterator):
+ for _ in range(case.response_count.value[0]):
+ next(response_iterator)
+
+
+class _MethodHandler(grpc.RpcMethodHandler):
+
+ def __init__(self, case):
+ self.request_streaming = True
+ self.response_streaming = True
+ self.request_deserializer = None
+ self.response_serializer = None
+ self.unary_unary = None
+ self.unary_stream = None
+ self.stream_unary = None
+ self._case = case
+
+ def stream_stream(self, request_iterator, servicer_context):
+ _draw_requests(self._case, request_iterator)
+
+ for response in _messages_to_send(self._case.response_count,
+ self._case.response_size):
+ yield response
+
+
+class _GenericHandler(grpc.GenericRpcHandler):
+
+ def __init__(self, case):
+ self._case = case
+
+ def service(self, handler_call_details):
+ return _MethodHandler(self._case)
+
+
+class _EarlyOkTest(unittest.TestCase):
+
+ def setUp(self):
+ self._server = test_common.test_server()
+ port = self._server.add_insecure_port('[::]:0')
+ self._server.add_generic_rpc_handlers((_GenericHandler(self.case),))
+ self._server.start()
+
+ self._channel = grpc.insecure_channel('localhost:%d' % port)
+ self._multi_callable = self._channel.stream_stream(_RPC_METHOD)
+
+ def tearDown(self):
+ self._server.stop(None)
+
+ def test_early_ok(self):
+ requests = _messages_to_send(self.case.request_count,
+ self.case.request_size)
+
+ response_iterator_call = self._multi_callable(requests)
+
+ _draw_responses(self.case, response_iterator_call)
+
+ self.assertIs(grpc.StatusCode.OK, response_iterator_call.code())
+
+
+def _cases():
+ for request_count in _MessageCount:
+ for request_size in _MessageSize:
+ for request_reading in _ReadRequests:
+ for response_count in _MessageCount:
+ for response_size in _MessageSize:
+ yield _Case(request_count, request_size,
+ request_reading, response_count,
+ response_size)
+
+
+def _test_case_classes():
+ for case in _cases():
+ yield type(case.create_test_case_name(), (_EarlyOkTest,), {
+ 'case': case,
+ '__module__': _EarlyOkTest.__module__,
+ })
+
+
+def load_tests(loader, tests, pattern):
+ return unittest.TestSuite(
+ tests=tuple(
+ loader.loadTestsFromTestCase(test_case_class)
+ for test_case_class in _test_case_classes()))
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py b/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py
index ca10bd4dab..a6cdf32e5b 100644
--- a/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py
+++ b/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py
@@ -107,9 +107,6 @@ class _Servicer(object):
self._received_client_metadata = context.invocation_metadata()
context.send_initial_metadata(_SERVER_INITIAL_METADATA)
context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
- # TODO(https://github.com/grpc/grpc/issues/6891): just ignore the
- # request iterator.
- list(request_iterator)
if self._abort_call:
context.abort(self._code, self._details)
else:
@@ -127,9 +124,6 @@ class _Servicer(object):
self._received_client_metadata = context.invocation_metadata()
context.send_initial_metadata(_SERVER_INITIAL_METADATA)
context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
- # TODO(https://github.com/grpc/grpc/issues/6891): just ignore the
- # request iterator.
- list(request_iterator)
if self._abort_call:
context.abort(self._code, self._details)
else:
diff --git a/src/python/grpcio_tests/tests/unit/_metadata_test.py b/src/python/grpcio_tests/tests/unit/_metadata_test.py
index 5908421011..2309eeb733 100644
--- a/src/python/grpcio_tests/tests/unit/_metadata_test.py
+++ b/src/python/grpcio_tests/tests/unit/_metadata_test.py
@@ -117,9 +117,6 @@ def handle_stream_unary(test, request_iterator, servicer_context):
validate_client_metadata(test, servicer_context)
servicer_context.send_initial_metadata(_INITIAL_METADATA)
servicer_context.set_trailing_metadata(_TRAILING_METADATA)
- # TODO(issue:#6891) We should be able to remove this loop
- for request in request_iterator:
- pass
return _RESPONSE
@@ -127,10 +124,8 @@ def handle_stream_stream(test, request_iterator, servicer_context):
validate_client_metadata(test, servicer_context)
servicer_context.send_initial_metadata(_INITIAL_METADATA)
servicer_context.set_trailing_metadata(_TRAILING_METADATA)
- # TODO(issue:#6891) We should be able to remove this loop,
- # and replace with return; yield
- for request in request_iterator:
- yield _RESPONSE
+ return
+ yield
class _MethodHandler(grpc.RpcMethodHandler):
diff --git a/src/python/grpcio_tests/tests/unit/_resource_exhausted_test.py b/src/python/grpcio_tests/tests/unit/_resource_exhausted_test.py
index df4b129018..e35f8f10d4 100644
--- a/src/python/grpcio_tests/tests/unit/_resource_exhausted_test.py
+++ b/src/python/grpcio_tests/tests/unit/_resource_exhausted_test.py
@@ -77,18 +77,13 @@ def handle_unary_stream(trigger, request, servicer_context):
def handle_stream_unary(trigger, request_iterator, servicer_context):
trigger.await_trigger()
- # TODO(issue:#6891) We should be able to remove this loop
- for request in request_iterator:
- pass
return _RESPONSE
def handle_stream_stream(trigger, request_iterator, servicer_context):
trigger.await_trigger()
- # TODO(issue:#6891) We should be able to remove this loop,
- # and replace with return; yield
- for request in request_iterator:
- yield _RESPONSE
+ return
+ yield
class _MethodHandler(grpc.RpcMethodHandler):
diff --git a/src/python/grpcio_tests/tests/unit/beta/_face_interface_test.py b/src/python/grpcio_tests/tests/unit/beta/_face_interface_test.py
deleted file mode 100644
index c99738e085..0000000000
--- a/src/python/grpcio_tests/tests/unit/beta/_face_interface_test.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# Copyright 2015 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Tests Face interface compliance of the gRPC Python Beta API."""
-
-import collections
-import unittest
-
-import six
-
-from grpc.beta import implementations
-from grpc.beta import interfaces
-from tests.unit import resources
-from tests.unit import test_common as grpc_test_common
-from tests.unit.beta import test_utilities
-from tests.unit.framework.common import test_constants
-from tests.unit.framework.interfaces.face import test_cases
-from tests.unit.framework.interfaces.face import test_interfaces
-
-_SERVER_HOST_OVERRIDE = 'foo.test.google.fr'
-
-
-class _SerializationBehaviors(
- collections.namedtuple('_SerializationBehaviors', (
- 'request_serializers',
- 'request_deserializers',
- 'response_serializers',
- 'response_deserializers',
- ))):
- pass
-
-
-def _serialization_behaviors_from_test_methods(test_methods):
- request_serializers = {}
- request_deserializers = {}
- response_serializers = {}
- response_deserializers = {}
- for (group, method), test_method in six.iteritems(test_methods):
- request_serializers[group, method] = test_method.serialize_request
- request_deserializers[group, method] = test_method.deserialize_request
- response_serializers[group, method] = test_method.serialize_response
- response_deserializers[group, method] = test_method.deserialize_response
- return _SerializationBehaviors(request_serializers, request_deserializers,
- response_serializers, response_deserializers)
-
-
-class _Implementation(test_interfaces.Implementation):
-
- def instantiate(self, methods, method_implementations,
- multi_method_implementation):
- serialization_behaviors = _serialization_behaviors_from_test_methods(
- methods)
- # TODO(nathaniel): Add a "groups" attribute to _digest.TestServiceDigest.
- service = next(iter(methods))[0]
- # TODO(nathaniel): Add a "cardinalities_by_group" attribute to
- # _digest.TestServiceDigest.
- cardinalities = {
- method: method_object.cardinality()
- for (group, method), method_object in six.iteritems(methods)
- }
-
- server_options = implementations.server_options(
- request_deserializers=serialization_behaviors.request_deserializers,
- response_serializers=serialization_behaviors.response_serializers,
- thread_pool_size=test_constants.POOL_SIZE)
- server = implementations.server(
- method_implementations, options=server_options)
- server_credentials = implementations.ssl_server_credentials([
- (
- resources.private_key(),
- resources.certificate_chain(),
- ),
- ])
- port = server.add_secure_port('[::]:0', server_credentials)
- server.start()
- channel_credentials = implementations.ssl_channel_credentials(
- resources.test_root_certificates())
- channel = test_utilities.not_really_secure_channel(
- 'localhost', port, channel_credentials, _SERVER_HOST_OVERRIDE)
- stub_options = implementations.stub_options(
- request_serializers=serialization_behaviors.request_serializers,
- response_deserializers=serialization_behaviors.
- response_deserializers,
- thread_pool_size=test_constants.POOL_SIZE)
- generic_stub = implementations.generic_stub(
- channel, options=stub_options)
- dynamic_stub = implementations.dynamic_stub(
- channel, service, cardinalities, options=stub_options)
- return generic_stub, {service: dynamic_stub}, server
-
- def destantiate(self, memo):
- memo.stop(test_constants.SHORT_TIMEOUT).wait()
-
- def invocation_metadata(self):
- return grpc_test_common.INVOCATION_INITIAL_METADATA
-
- def initial_metadata(self):
- return grpc_test_common.SERVICE_INITIAL_METADATA
-
- def terminal_metadata(self):
- return grpc_test_common.SERVICE_TERMINAL_METADATA
-
- def code(self):
- return interfaces.StatusCode.OK
-
- def details(self):
- return grpc_test_common.DETAILS
-
- def metadata_transmitted(self, original_metadata, transmitted_metadata):
- return original_metadata is None or grpc_test_common.metadata_transmitted(
- original_metadata, transmitted_metadata)
-
-
-def load_tests(loader, tests, pattern):
- return unittest.TestSuite(
- tests=tuple(
- loader.loadTestsFromTestCase(test_case_class)
- for test_case_class in test_cases.test_cases(_Implementation())))
-
-
-if __name__ == '__main__':
- unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/__init__.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/__init__.py
deleted file mode 100644
index 5fb4f3c3cf..0000000000
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2015 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_3069_test_constant.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_3069_test_constant.py
deleted file mode 100644
index 6eb7ba33f6..0000000000
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_3069_test_constant.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright 2015 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""A test constant working around issue 3069."""
-
-# test_constants is referenced from specification in this module.
-from tests.unit.framework.common import test_constants # pylint: disable=unused-import
-
-# TODO(issue 3069): Replace uses of this constant with
-# test_constants.SHORT_TIMEOUT.
-REALLY_SHORT_TIMEOUT = 0.1
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/__init__.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/__init__.py
deleted file mode 100644
index 5fb4f3c3cf..0000000000
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2015 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_blocking_invocation_inline_service.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_blocking_invocation_inline_service.py
deleted file mode 100644
index 5d8679aa62..0000000000
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_blocking_invocation_inline_service.py
+++ /dev/null
@@ -1,287 +0,0 @@
-# Copyright 2015 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Test code for the Face layer of RPC Framework."""
-
-from __future__ import division
-
-import abc
-import itertools
-import unittest
-from concurrent import futures
-
-import six
-
-# test_interfaces is referenced from specification in this module.
-from grpc.framework.foundation import logging_pool
-from grpc.framework.interfaces.face import face
-from tests.unit.framework.common import test_constants
-from tests.unit.framework.common import test_control
-from tests.unit.framework.common import test_coverage
-from tests.unit.framework.interfaces.face import _3069_test_constant
-from tests.unit.framework.interfaces.face import _digest
-from tests.unit.framework.interfaces.face import _stock_service
-from tests.unit.framework.interfaces.face import test_interfaces # pylint: disable=unused-import
-
-
-class TestCase(
- six.with_metaclass(abc.ABCMeta, test_coverage.Coverage,
- unittest.TestCase)):
- """A test of the Face layer of RPC Framework.
-
- Concrete subclasses must have an "implementation" attribute of type
- test_interfaces.Implementation and an "invoker_constructor" attribute of type
- _invocation.InvokerConstructor.
- """
-
- NAME = 'BlockingInvocationInlineServiceTest'
-
- def setUp(self):
- """See unittest.TestCase.setUp for full specification.
-
- Overriding implementations must call this implementation.
- """
- self._control = test_control.PauseFailControl()
- self._digest = _digest.digest(_stock_service.STOCK_TEST_SERVICE,
- self._control, None)
-
- generic_stub, dynamic_stubs, self._memo = self.implementation.instantiate(
- self._digest.methods, self._digest.inline_method_implementations,
- None)
- self._invoker = self.invoker_constructor.construct_invoker(
- generic_stub, dynamic_stubs, self._digest.methods)
-
- def tearDown(self):
- """See unittest.TestCase.tearDown for full specification.
-
- Overriding implementations must call this implementation.
- """
- self._invoker = None
- self.implementation.destantiate(self._memo)
-
- def testSuccessfulUnaryRequestUnaryResponse(self):
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.unary_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- request = test_messages.request()
-
- response, call = self._invoker.blocking(group, method)(
- request, test_constants.LONG_TIMEOUT, with_call=True)
-
- test_messages.verify(request, response, self)
-
- def testSuccessfulUnaryRequestStreamResponse(self):
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.unary_stream_messages_sequences)):
- for test_messages in test_messages_sequence:
- request = test_messages.request()
-
- response_iterator = self._invoker.blocking(group, method)(
- request, test_constants.LONG_TIMEOUT)
- responses = list(response_iterator)
-
- test_messages.verify(request, responses, self)
-
- def testSuccessfulStreamRequestUnaryResponse(self):
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.stream_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = test_messages.requests()
-
- response, call = self._invoker.blocking(group, method)(
- iter(requests), test_constants.LONG_TIMEOUT, with_call=True)
-
- test_messages.verify(requests, response, self)
-
- def testSuccessfulStreamRequestStreamResponse(self):
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.stream_stream_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = test_messages.requests()
-
- response_iterator = self._invoker.blocking(group, method)(
- iter(requests), test_constants.LONG_TIMEOUT)
- responses = list(response_iterator)
-
- test_messages.verify(requests, responses, self)
-
- def testSequentialInvocations(self):
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.unary_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- first_request = test_messages.request()
- second_request = test_messages.request()
-
- first_response = self._invoker.blocking(group, method)(
- first_request, test_constants.LONG_TIMEOUT)
-
- test_messages.verify(first_request, first_response, self)
-
- second_response = self._invoker.blocking(group, method)(
- second_request, test_constants.LONG_TIMEOUT)
-
- test_messages.verify(second_request, second_response, self)
-
- def testParallelInvocations(self):
- pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.unary_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = []
- response_futures = []
- for _ in range(test_constants.THREAD_CONCURRENCY):
- request = test_messages.request()
- response_future = pool.submit(
- self._invoker.blocking(group, method), request,
- test_constants.LONG_TIMEOUT)
- requests.append(request)
- response_futures.append(response_future)
-
- responses = [
- response_future.result()
- for response_future in response_futures
- ]
-
- for request, response in zip(requests, responses):
- test_messages.verify(request, response, self)
- pool.shutdown(wait=True)
-
- def testWaitingForSomeButNotAllParallelInvocations(self):
- pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.unary_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = []
- response_futures_to_indices = {}
- for index in range(test_constants.THREAD_CONCURRENCY):
- request = test_messages.request()
- response_future = pool.submit(
- self._invoker.blocking(group, method), request,
- test_constants.LONG_TIMEOUT)
- requests.append(request)
- response_futures_to_indices[response_future] = index
-
- some_completed_response_futures_iterator = itertools.islice(
- futures.as_completed(response_futures_to_indices),
- test_constants.THREAD_CONCURRENCY // 2)
- for response_future in some_completed_response_futures_iterator:
- index = response_futures_to_indices[response_future]
- test_messages.verify(requests[index],
- response_future.result(), self)
- pool.shutdown(wait=True)
-
- @unittest.skip('Cancellation impossible with blocking control flow!')
- def testCancelledUnaryRequestUnaryResponse(self):
- raise NotImplementedError()
-
- @unittest.skip('Cancellation impossible with blocking control flow!')
- def testCancelledUnaryRequestStreamResponse(self):
- raise NotImplementedError()
-
- @unittest.skip('Cancellation impossible with blocking control flow!')
- def testCancelledStreamRequestUnaryResponse(self):
- raise NotImplementedError()
-
- @unittest.skip('Cancellation impossible with blocking control flow!')
- def testCancelledStreamRequestStreamResponse(self):
- raise NotImplementedError()
-
- def testExpiredUnaryRequestUnaryResponse(self):
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.unary_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- request = test_messages.request()
-
- with self._control.pause(), self.assertRaises(
- face.ExpirationError):
- self._invoker.blocking(group, method)(
- request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
-
- def testExpiredUnaryRequestStreamResponse(self):
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.unary_stream_messages_sequences)):
- for test_messages in test_messages_sequence:
- request = test_messages.request()
-
- with self._control.pause(), self.assertRaises(
- face.ExpirationError):
- response_iterator = self._invoker.blocking(group, method)(
- request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
- list(response_iterator)
-
- def testExpiredStreamRequestUnaryResponse(self):
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.stream_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = test_messages.requests()
-
- with self._control.pause(), self.assertRaises(
- face.ExpirationError):
- self._invoker.blocking(
- group, method)(iter(requests),
- _3069_test_constant.REALLY_SHORT_TIMEOUT)
-
- def testExpiredStreamRequestStreamResponse(self):
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.stream_stream_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = test_messages.requests()
-
- with self._control.pause(), self.assertRaises(
- face.ExpirationError):
- response_iterator = self._invoker.blocking(
- group, method)(iter(requests),
- _3069_test_constant.REALLY_SHORT_TIMEOUT)
- list(response_iterator)
-
- def testFailedUnaryRequestUnaryResponse(self):
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.unary_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- request = test_messages.request()
-
- with self._control.fail(), self.assertRaises(face.RemoteError):
- self._invoker.blocking(group, method)(
- request, test_constants.LONG_TIMEOUT)
-
- def testFailedUnaryRequestStreamResponse(self):
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.unary_stream_messages_sequences)):
- for test_messages in test_messages_sequence:
- request = test_messages.request()
-
- with self._control.fail(), self.assertRaises(face.RemoteError):
- response_iterator = self._invoker.blocking(group, method)(
- request, test_constants.LONG_TIMEOUT)
- list(response_iterator)
-
- def testFailedStreamRequestUnaryResponse(self):
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.stream_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = test_messages.requests()
-
- with self._control.fail(), self.assertRaises(face.RemoteError):
- self._invoker.blocking(group, method)(
- iter(requests), test_constants.LONG_TIMEOUT)
-
- def testFailedStreamRequestStreamResponse(self):
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.stream_stream_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = test_messages.requests()
-
- with self._control.fail(), self.assertRaises(face.RemoteError):
- response_iterator = self._invoker.blocking(group, method)(
- iter(requests), test_constants.LONG_TIMEOUT)
- list(response_iterator)
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_digest.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_digest.py
deleted file mode 100644
index b1c33da43a..0000000000
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_digest.py
+++ /dev/null
@@ -1,432 +0,0 @@
-# Copyright 2015 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Code for making a service.TestService more amenable to use in tests."""
-
-import collections
-import threading
-
-import six
-
-# test_control, _service, and test_interfaces are referenced from specification
-# in this module.
-from grpc.framework.common import cardinality
-from grpc.framework.common import style
-from grpc.framework.foundation import stream
-from grpc.framework.foundation import stream_util
-from grpc.framework.interfaces.face import face
-from tests.unit.framework.common import test_control # pylint: disable=unused-import
-from tests.unit.framework.interfaces.face import _service # pylint: disable=unused-import
-from tests.unit.framework.interfaces.face import test_interfaces # pylint: disable=unused-import
-
-_IDENTITY = lambda x: x
-
-
-class TestServiceDigest(
- collections.namedtuple('TestServiceDigest', (
- 'methods',
- 'inline_method_implementations',
- 'event_method_implementations',
- 'multi_method_implementation',
- 'unary_unary_messages_sequences',
- 'unary_stream_messages_sequences',
- 'stream_unary_messages_sequences',
- 'stream_stream_messages_sequences',
- ))):
- """A transformation of a service.TestService.
-
- Attributes:
- methods: A dict from method group-name pair to test_interfaces.Method object
- describing the RPC methods that may be called during the test.
- inline_method_implementations: A dict from method group-name pair to
- face.MethodImplementation object to be used in tests of in-line calls to
- behaviors under test.
- event_method_implementations: A dict from method group-name pair to
- face.MethodImplementation object to be used in tests of event-driven calls
- to behaviors under test.
- multi_method_implementation: A face.MultiMethodImplementation to be used in
- tests of generic calls to behaviors under test.
- unary_unary_messages_sequences: A dict from method group-name pair to
- sequence of service.UnaryUnaryTestMessages objects to be used to test the
- identified method.
- unary_stream_messages_sequences: A dict from method group-name pair to
- sequence of service.UnaryStreamTestMessages objects to be used to test the
- identified method.
- stream_unary_messages_sequences: A dict from method group-name pair to
- sequence of service.StreamUnaryTestMessages objects to be used to test the
- identified method.
- stream_stream_messages_sequences: A dict from method group-name pair to
- sequence of service.StreamStreamTestMessages objects to be used to test
- the identified method.
- """
-
-
-class _BufferingConsumer(stream.Consumer):
- """A trivial Consumer that dumps what it consumes in a user-mutable buffer."""
-
- def __init__(self):
- self.consumed = []
- self.terminated = False
-
- def consume(self, value):
- self.consumed.append(value)
-
- def terminate(self):
- self.terminated = True
-
- def consume_and_terminate(self, value):
- self.consumed.append(value)
- self.terminated = True
-
-
-class _InlineUnaryUnaryMethod(face.MethodImplementation):
-
- def __init__(self, unary_unary_test_method, control):
- self._test_method = unary_unary_test_method
- self._control = control
-
- self.cardinality = cardinality.Cardinality.UNARY_UNARY
- self.style = style.Service.INLINE
-
- def unary_unary_inline(self, request, context):
- response_list = []
- self._test_method.service(request, response_list.append, context,
- self._control)
- return response_list.pop(0)
-
-
-class _EventUnaryUnaryMethod(face.MethodImplementation):
-
- def __init__(self, unary_unary_test_method, control, pool):
- self._test_method = unary_unary_test_method
- self._control = control
- self._pool = pool
-
- self.cardinality = cardinality.Cardinality.UNARY_UNARY
- self.style = style.Service.EVENT
-
- def unary_unary_event(self, request, response_callback, context):
- if self._pool is None:
- self._test_method.service(request, response_callback, context,
- self._control)
- else:
- self._pool.submit(self._test_method.service, request,
- response_callback, context, self._control)
-
-
-class _InlineUnaryStreamMethod(face.MethodImplementation):
-
- def __init__(self, unary_stream_test_method, control):
- self._test_method = unary_stream_test_method
- self._control = control
-
- self.cardinality = cardinality.Cardinality.UNARY_STREAM
- self.style = style.Service.INLINE
-
- def unary_stream_inline(self, request, context):
- response_consumer = _BufferingConsumer()
- self._test_method.service(request, response_consumer, context,
- self._control)
- for response in response_consumer.consumed:
- yield response
-
-
-class _EventUnaryStreamMethod(face.MethodImplementation):
-
- def __init__(self, unary_stream_test_method, control, pool):
- self._test_method = unary_stream_test_method
- self._control = control
- self._pool = pool
-
- self.cardinality = cardinality.Cardinality.UNARY_STREAM
- self.style = style.Service.EVENT
-
- def unary_stream_event(self, request, response_consumer, context):
- if self._pool is None:
- self._test_method.service(request, response_consumer, context,
- self._control)
- else:
- self._pool.submit(self._test_method.service, request,
- response_consumer, context, self._control)
-
-
-class _InlineStreamUnaryMethod(face.MethodImplementation):
-
- def __init__(self, stream_unary_test_method, control):
- self._test_method = stream_unary_test_method
- self._control = control
-
- self.cardinality = cardinality.Cardinality.STREAM_UNARY
- self.style = style.Service.INLINE
-
- def stream_unary_inline(self, request_iterator, context):
- response_list = []
- request_consumer = self._test_method.service(response_list.append,
- context, self._control)
- for request in request_iterator:
- request_consumer.consume(request)
- request_consumer.terminate()
- return response_list.pop(0)
-
-
-class _EventStreamUnaryMethod(face.MethodImplementation):
-
- def __init__(self, stream_unary_test_method, control, pool):
- self._test_method = stream_unary_test_method
- self._control = control
- self._pool = pool
-
- self.cardinality = cardinality.Cardinality.STREAM_UNARY
- self.style = style.Service.EVENT
-
- def stream_unary_event(self, response_callback, context):
- request_consumer = self._test_method.service(response_callback, context,
- self._control)
- if self._pool is None:
- return request_consumer
- else:
- return stream_util.ThreadSwitchingConsumer(request_consumer,
- self._pool)
-
-
-class _InlineStreamStreamMethod(face.MethodImplementation):
-
- def __init__(self, stream_stream_test_method, control):
- self._test_method = stream_stream_test_method
- self._control = control
-
- self.cardinality = cardinality.Cardinality.STREAM_STREAM
- self.style = style.Service.INLINE
-
- def stream_stream_inline(self, request_iterator, context):
- response_consumer = _BufferingConsumer()
- request_consumer = self._test_method.service(response_consumer, context,
- self._control)
-
- for request in request_iterator:
- request_consumer.consume(request)
- while response_consumer.consumed:
- yield response_consumer.consumed.pop(0)
- response_consumer.terminate()
-
-
-class _EventStreamStreamMethod(face.MethodImplementation):
-
- def __init__(self, stream_stream_test_method, control, pool):
- self._test_method = stream_stream_test_method
- self._control = control
- self._pool = pool
-
- self.cardinality = cardinality.Cardinality.STREAM_STREAM
- self.style = style.Service.EVENT
-
- def stream_stream_event(self, response_consumer, context):
- request_consumer = self._test_method.service(response_consumer, context,
- self._control)
- if self._pool is None:
- return request_consumer
- else:
- return stream_util.ThreadSwitchingConsumer(request_consumer,
- self._pool)
-
-
-class _UnaryConsumer(stream.Consumer):
- """A Consumer that only allows consumption of exactly one value."""
-
- def __init__(self, action):
- self._lock = threading.Lock()
- self._action = action
- self._consumed = False
- self._terminated = False
-
- def consume(self, value):
- with self._lock:
- if self._consumed:
- raise ValueError('Unary consumer already consumed!')
- elif self._terminated:
- raise ValueError('Unary consumer already terminated!')
- else:
- self._consumed = True
-
- self._action(value)
-
- def terminate(self):
- with self._lock:
- if not self._consumed:
- raise ValueError('Unary consumer hasn\'t yet consumed!')
- elif self._terminated:
- raise ValueError('Unary consumer already terminated!')
- else:
- self._terminated = True
-
- def consume_and_terminate(self, value):
- with self._lock:
- if self._consumed:
- raise ValueError('Unary consumer already consumed!')
- elif self._terminated:
- raise ValueError('Unary consumer already terminated!')
- else:
- self._consumed = True
- self._terminated = True
-
- self._action(value)
-
-
-class _UnaryUnaryAdaptation(object):
-
- def __init__(self, unary_unary_test_method):
- self._method = unary_unary_test_method
-
- def service(self, response_consumer, context, control):
-
- def action(request):
- self._method.service(request,
- response_consumer.consume_and_terminate,
- context, control)
-
- return _UnaryConsumer(action)
-
-
-class _UnaryStreamAdaptation(object):
-
- def __init__(self, unary_stream_test_method):
- self._method = unary_stream_test_method
-
- def service(self, response_consumer, context, control):
-
- def action(request):
- self._method.service(request, response_consumer, context, control)
-
- return _UnaryConsumer(action)
-
-
-class _StreamUnaryAdaptation(object):
-
- def __init__(self, stream_unary_test_method):
- self._method = stream_unary_test_method
-
- def service(self, response_consumer, context, control):
- return self._method.service(response_consumer.consume_and_terminate,
- context, control)
-
-
-class _MultiMethodImplementation(face.MultiMethodImplementation):
-
- def __init__(self, methods, control, pool):
- self._methods = methods
- self._control = control
- self._pool = pool
-
- def service(self, group, name, response_consumer, context):
- method = self._methods.get(group, name, None)
- if method is None:
- raise face.NoSuchMethodError(group, name)
- elif self._pool is None:
- return method(response_consumer, context, self._control)
- else:
- request_consumer = method(response_consumer, context, self._control)
- return stream_util.ThreadSwitchingConsumer(request_consumer,
- self._pool)
-
-
-class _Assembly(
- collections.namedtuple(
- '_Assembly',
- ['methods', 'inlines', 'events', 'adaptations', 'messages'])):
- """An intermediate structure created when creating a TestServiceDigest."""
-
-
-def _assemble(scenarios, identifiers, inline_method_constructor,
- event_method_constructor, adapter, control, pool):
- """Creates an _Assembly from the given scenarios."""
- methods = {}
- inlines = {}
- events = {}
- adaptations = {}
- messages = {}
- for identifier, scenario in six.iteritems(scenarios):
- if identifier in identifiers:
- raise ValueError('Repeated identifier "(%s, %s)"!' % identifier)
-
- test_method = scenario[0]
- inline_method = inline_method_constructor(test_method, control)
- event_method = event_method_constructor(test_method, control, pool)
- adaptation = adapter(test_method)
-
- methods[identifier] = test_method
- inlines[identifier] = inline_method
- events[identifier] = event_method
- adaptations[identifier] = adaptation
- messages[identifier] = scenario[1]
-
- return _Assembly(methods, inlines, events, adaptations, messages)
-
-
-def digest(service, control, pool):
- """Creates a TestServiceDigest from a TestService.
-
- Args:
- service: A _service.TestService.
- control: A test_control.Control.
- pool: If RPC methods should be serviced in a separate thread, a thread pool.
- None if RPC methods should be serviced in the thread belonging to the
- run-time that calls for their service.
-
- Returns:
- A TestServiceDigest synthesized from the given service.TestService.
- """
- identifiers = set()
-
- unary_unary = _assemble(service.unary_unary_scenarios(), identifiers,
- _InlineUnaryUnaryMethod, _EventUnaryUnaryMethod,
- _UnaryUnaryAdaptation, control, pool)
- identifiers.update(unary_unary.inlines)
-
- unary_stream = _assemble(service.unary_stream_scenarios(), identifiers,
- _InlineUnaryStreamMethod, _EventUnaryStreamMethod,
- _UnaryStreamAdaptation, control, pool)
- identifiers.update(unary_stream.inlines)
-
- stream_unary = _assemble(service.stream_unary_scenarios(), identifiers,
- _InlineStreamUnaryMethod, _EventStreamUnaryMethod,
- _StreamUnaryAdaptation, control, pool)
- identifiers.update(stream_unary.inlines)
-
- stream_stream = _assemble(service.stream_stream_scenarios(), identifiers,
- _InlineStreamStreamMethod,
- _EventStreamStreamMethod, _IDENTITY, control,
- pool)
- identifiers.update(stream_stream.inlines)
-
- methods = dict(unary_unary.methods)
- methods.update(unary_stream.methods)
- methods.update(stream_unary.methods)
- methods.update(stream_stream.methods)
- adaptations = dict(unary_unary.adaptations)
- adaptations.update(unary_stream.adaptations)
- adaptations.update(stream_unary.adaptations)
- adaptations.update(stream_stream.adaptations)
- inlines = dict(unary_unary.inlines)
- inlines.update(unary_stream.inlines)
- inlines.update(stream_unary.inlines)
- inlines.update(stream_stream.inlines)
- events = dict(unary_unary.events)
- events.update(unary_stream.events)
- events.update(stream_unary.events)
- events.update(stream_stream.events)
-
- return TestServiceDigest(methods, inlines, events,
- _MultiMethodImplementation(adaptations, control,
- pool),
- unary_unary.messages, unary_stream.messages,
- stream_unary.messages, stream_stream.messages)
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py
deleted file mode 100644
index 3d9b2816aa..0000000000
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py
+++ /dev/null
@@ -1,508 +0,0 @@
-# Copyright 2015 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Test code for the Face layer of RPC Framework."""
-
-from __future__ import division
-
-import abc
-import contextlib
-import itertools
-import threading
-import unittest
-from concurrent import futures
-
-import six
-
-# test_interfaces is referenced from specification in this module.
-from grpc.framework.foundation import future
-from grpc.framework.foundation import logging_pool
-from grpc.framework.interfaces.face import face
-from tests.unit.framework.common import test_constants
-from tests.unit.framework.common import test_control
-from tests.unit.framework.common import test_coverage
-from tests.unit.framework.interfaces.face import _3069_test_constant
-from tests.unit.framework.interfaces.face import _digest
-from tests.unit.framework.interfaces.face import _stock_service
-from tests.unit.framework.interfaces.face import test_interfaces # pylint: disable=unused-import
-
-
-class _PauseableIterator(object):
-
- def __init__(self, upstream):
- self._upstream = upstream
- self._condition = threading.Condition()
- self._paused = False
-
- @contextlib.contextmanager
- def pause(self):
- with self._condition:
- self._paused = True
- yield
- with self._condition:
- self._paused = False
- self._condition.notify_all()
-
- def __iter__(self):
- return self
-
- def __next__(self):
- return self.next()
-
- def next(self):
- with self._condition:
- while self._paused:
- self._condition.wait()
- return next(self._upstream)
-
-
-class _Callback(object):
-
- def __init__(self):
- self._condition = threading.Condition()
- self._called = False
- self._passed_future = None
- self._passed_other_stuff = None
-
- def __call__(self, *args, **kwargs):
- with self._condition:
- self._called = True
- if args:
- self._passed_future = args[0]
- if 1 < len(args) or kwargs:
- self._passed_other_stuff = tuple(args[1:]), dict(kwargs)
- self._condition.notify_all()
-
- def future(self):
- with self._condition:
- while True:
- if self._passed_other_stuff is not None:
- raise ValueError(
- 'Test callback passed unexpected values: %s',
- self._passed_other_stuff)
- elif self._called:
- return self._passed_future
- else:
- self._condition.wait()
-
-
-class TestCase(
- six.with_metaclass(abc.ABCMeta, test_coverage.Coverage,
- unittest.TestCase)):
- """A test of the Face layer of RPC Framework.
-
- Concrete subclasses must have an "implementation" attribute of type
- test_interfaces.Implementation and an "invoker_constructor" attribute of type
- _invocation.InvokerConstructor.
- """
-
- NAME = 'FutureInvocationAsynchronousEventServiceTest'
-
- def setUp(self):
- """See unittest.TestCase.setUp for full specification.
-
- Overriding implementations must call this implementation.
- """
- self._control = test_control.PauseFailControl()
- self._digest_pool = logging_pool.pool(test_constants.POOL_SIZE)
- self._digest = _digest.digest(_stock_service.STOCK_TEST_SERVICE,
- self._control, self._digest_pool)
-
- generic_stub, dynamic_stubs, self._memo = self.implementation.instantiate(
- self._digest.methods, self._digest.event_method_implementations,
- None)
- self._invoker = self.invoker_constructor.construct_invoker(
- generic_stub, dynamic_stubs, self._digest.methods)
-
- def tearDown(self):
- """See unittest.TestCase.tearDown for full specification.
-
- Overriding implementations must call this implementation.
- """
- self._invoker = None
- self.implementation.destantiate(self._memo)
- self._digest_pool.shutdown(wait=True)
-
- def testSuccessfulUnaryRequestUnaryResponse(self):
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.unary_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- request = test_messages.request()
- callback = _Callback()
-
- response_future = self._invoker.future(group, method)(
- request, test_constants.LONG_TIMEOUT)
- response_future.add_done_callback(callback)
- response = response_future.result()
-
- test_messages.verify(request, response, self)
- self.assertIs(callback.future(), response_future)
- self.assertIsNone(response_future.exception())
- self.assertIsNone(response_future.traceback())
-
- def testSuccessfulUnaryRequestStreamResponse(self):
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.unary_stream_messages_sequences)):
- for test_messages in test_messages_sequence:
- request = test_messages.request()
-
- response_iterator = self._invoker.future(group, method)(
- request, test_constants.LONG_TIMEOUT)
- responses = list(response_iterator)
-
- test_messages.verify(request, responses, self)
-
- def testSuccessfulStreamRequestUnaryResponse(self):
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.stream_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = test_messages.requests()
- request_iterator = _PauseableIterator(iter(requests))
- callback = _Callback()
-
- # Use of a paused iterator of requests allows us to test that control is
- # returned to calling code before the iterator yields any requests.
- with request_iterator.pause():
- response_future = self._invoker.future(group, method)(
- request_iterator, test_constants.LONG_TIMEOUT)
- response_future.add_done_callback(callback)
- future_passed_to_callback = callback.future()
- response = future_passed_to_callback.result()
-
- test_messages.verify(requests, response, self)
- self.assertIs(future_passed_to_callback, response_future)
- self.assertIsNone(response_future.exception())
- self.assertIsNone(response_future.traceback())
-
- def testSuccessfulStreamRequestStreamResponse(self):
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.stream_stream_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = test_messages.requests()
- request_iterator = _PauseableIterator(iter(requests))
-
- # Use of a paused iterator of requests allows us to test that control is
- # returned to calling code before the iterator yields any requests.
- with request_iterator.pause():
- response_iterator = self._invoker.future(group, method)(
- request_iterator, test_constants.LONG_TIMEOUT)
- responses = list(response_iterator)
-
- test_messages.verify(requests, responses, self)
-
- def testSequentialInvocations(self):
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.unary_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- first_request = test_messages.request()
- second_request = test_messages.request()
-
- first_response_future = self._invoker.future(group, method)(
- first_request, test_constants.LONG_TIMEOUT)
- first_response = first_response_future.result()
-
- test_messages.verify(first_request, first_response, self)
-
- second_response_future = self._invoker.future(group, method)(
- second_request, test_constants.LONG_TIMEOUT)
- second_response = second_response_future.result()
-
- test_messages.verify(second_request, second_response, self)
-
- def testParallelInvocations(self):
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.unary_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- first_request = test_messages.request()
- second_request = test_messages.request()
-
- first_response_future = self._invoker.future(group, method)(
- first_request, test_constants.LONG_TIMEOUT)
- second_response_future = self._invoker.future(group, method)(
- second_request, test_constants.LONG_TIMEOUT)
- first_response = first_response_future.result()
- second_response = second_response_future.result()
-
- test_messages.verify(first_request, first_response, self)
- test_messages.verify(second_request, second_response, self)
-
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.unary_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = []
- response_futures = []
- for _ in range(test_constants.THREAD_CONCURRENCY):
- request = test_messages.request()
- response_future = self._invoker.future(group, method)(
- request, test_constants.LONG_TIMEOUT)
- requests.append(request)
- response_futures.append(response_future)
-
- responses = [
- response_future.result()
- for response_future in response_futures
- ]
-
- for request, response in zip(requests, responses):
- test_messages.verify(request, response, self)
-
- def testWaitingForSomeButNotAllParallelInvocations(self):
- pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.unary_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = []
- response_futures_to_indices = {}
- for index in range(test_constants.THREAD_CONCURRENCY):
- request = test_messages.request()
- inner_response_future = self._invoker.future(group, method)(
- request, test_constants.LONG_TIMEOUT)
- outer_response_future = pool.submit(
- inner_response_future.result)
- requests.append(request)
- response_futures_to_indices[outer_response_future] = index
-
- some_completed_response_futures_iterator = itertools.islice(
- futures.as_completed(response_futures_to_indices),
- test_constants.THREAD_CONCURRENCY // 2)
- for response_future in some_completed_response_futures_iterator:
- index = response_futures_to_indices[response_future]
- test_messages.verify(requests[index],
- response_future.result(), self)
- pool.shutdown(wait=True)
-
- def testCancelledUnaryRequestUnaryResponse(self):
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.unary_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- request = test_messages.request()
- callback = _Callback()
-
- with self._control.pause():
- response_future = self._invoker.future(group, method)(
- request, test_constants.LONG_TIMEOUT)
- response_future.add_done_callback(callback)
- cancel_method_return_value = response_future.cancel()
-
- self.assertIs(callback.future(), response_future)
- self.assertFalse(cancel_method_return_value)
- self.assertTrue(response_future.cancelled())
- with self.assertRaises(future.CancelledError):
- response_future.result()
- with self.assertRaises(future.CancelledError):
- response_future.exception()
- with self.assertRaises(future.CancelledError):
- response_future.traceback()
-
- def testCancelledUnaryRequestStreamResponse(self):
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.unary_stream_messages_sequences)):
- for test_messages in test_messages_sequence:
- request = test_messages.request()
-
- with self._control.pause():
- response_iterator = self._invoker.future(group, method)(
- request, test_constants.LONG_TIMEOUT)
- response_iterator.cancel()
-
- with self.assertRaises(face.CancellationError):
- next(response_iterator)
-
- def testCancelledStreamRequestUnaryResponse(self):
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.stream_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = test_messages.requests()
- callback = _Callback()
-
- with self._control.pause():
- response_future = self._invoker.future(group, method)(
- iter(requests), test_constants.LONG_TIMEOUT)
- response_future.add_done_callback(callback)
- cancel_method_return_value = response_future.cancel()
-
- self.assertIs(callback.future(), response_future)
- self.assertFalse(cancel_method_return_value)
- self.assertTrue(response_future.cancelled())
- with self.assertRaises(future.CancelledError):
- response_future.result()
- with self.assertRaises(future.CancelledError):
- response_future.exception()
- with self.assertRaises(future.CancelledError):
- response_future.traceback()
-
- def testCancelledStreamRequestStreamResponse(self):
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.stream_stream_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = test_messages.requests()
-
- with self._control.pause():
- response_iterator = self._invoker.future(group, method)(
- iter(requests), test_constants.LONG_TIMEOUT)
- response_iterator.cancel()
-
- with self.assertRaises(face.CancellationError):
- next(response_iterator)
-
- def testExpiredUnaryRequestUnaryResponse(self):
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.unary_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- request = test_messages.request()
- callback = _Callback()
-
- with self._control.pause():
- response_future = self._invoker.future(group, method)(
- request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
- response_future.add_done_callback(callback)
- self.assertIs(callback.future(), response_future)
- self.assertIsInstance(response_future.exception(),
- face.ExpirationError)
- with self.assertRaises(face.ExpirationError):
- response_future.result()
- self.assertIsInstance(response_future.exception(),
- face.AbortionError)
- self.assertIsNotNone(response_future.traceback())
-
- def testExpiredUnaryRequestStreamResponse(self):
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.unary_stream_messages_sequences)):
- for test_messages in test_messages_sequence:
- request = test_messages.request()
-
- with self._control.pause():
- response_iterator = self._invoker.future(group, method)(
- request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
- with self.assertRaises(face.ExpirationError):
- list(response_iterator)
-
- def testExpiredStreamRequestUnaryResponse(self):
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.stream_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = test_messages.requests()
- callback = _Callback()
-
- with self._control.pause():
- response_future = self._invoker.future(
- group, method)(iter(requests),
- _3069_test_constant.REALLY_SHORT_TIMEOUT)
- response_future.add_done_callback(callback)
- self.assertIs(callback.future(), response_future)
- self.assertIsInstance(response_future.exception(),
- face.ExpirationError)
- with self.assertRaises(face.ExpirationError):
- response_future.result()
- self.assertIsInstance(response_future.exception(),
- face.AbortionError)
- self.assertIsNotNone(response_future.traceback())
-
- def testExpiredStreamRequestStreamResponse(self):
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.stream_stream_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = test_messages.requests()
-
- with self._control.pause():
- response_iterator = self._invoker.future(
- group, method)(iter(requests),
- _3069_test_constant.REALLY_SHORT_TIMEOUT)
- with self.assertRaises(face.ExpirationError):
- list(response_iterator)
-
- def testFailedUnaryRequestUnaryResponse(self):
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.unary_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- request = test_messages.request()
- callback = _Callback()
- abortion_callback = _Callback()
-
- with self._control.fail():
- response_future = self._invoker.future(group, method)(
- request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
- response_future.add_done_callback(callback)
- response_future.add_abortion_callback(abortion_callback)
-
- self.assertIs(callback.future(), response_future)
- # Because the servicer fails outside of the thread from which the
- # servicer-side runtime called into it its failure is
- # indistinguishable from simply not having called its
- # response_callback before the expiration of the RPC.
- self.assertIsInstance(response_future.exception(),
- face.ExpirationError)
- with self.assertRaises(face.ExpirationError):
- response_future.result()
- self.assertIsNotNone(response_future.traceback())
- self.assertIsNotNone(abortion_callback.future())
-
- def testFailedUnaryRequestStreamResponse(self):
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.unary_stream_messages_sequences)):
- for test_messages in test_messages_sequence:
- request = test_messages.request()
-
- # Because the servicer fails outside of the thread from which the
- # servicer-side runtime called into it its failure is indistinguishable
- # from simply not having called its response_consumer before the
- # expiration of the RPC.
- with self._control.fail(), self.assertRaises(
- face.ExpirationError):
- response_iterator = self._invoker.future(group, method)(
- request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
- list(response_iterator)
-
- def testFailedStreamRequestUnaryResponse(self):
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.stream_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = test_messages.requests()
- callback = _Callback()
- abortion_callback = _Callback()
-
- with self._control.fail():
- response_future = self._invoker.future(
- group, method)(iter(requests),
- _3069_test_constant.REALLY_SHORT_TIMEOUT)
- response_future.add_done_callback(callback)
- response_future.add_abortion_callback(abortion_callback)
-
- self.assertIs(callback.future(), response_future)
- # Because the servicer fails outside of the thread from which the
- # servicer-side runtime called into it its failure is
- # indistinguishable from simply not having called its
- # response_callback before the expiration of the RPC.
- self.assertIsInstance(response_future.exception(),
- face.ExpirationError)
- with self.assertRaises(face.ExpirationError):
- response_future.result()
- self.assertIsNotNone(response_future.traceback())
- self.assertIsNotNone(abortion_callback.future())
-
- def testFailedStreamRequestStreamResponse(self):
- for (group, method), test_messages_sequence in (six.iteritems(
- self._digest.stream_stream_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = test_messages.requests()
-
- # Because the servicer fails outside of the thread from which the
- # servicer-side runtime called into it its failure is indistinguishable
- # from simply not having called its response_consumer before the
- # expiration of the RPC.
- with self._control.fail(), self.assertRaises(
- face.ExpirationError):
- response_iterator = self._invoker.future(
- group, method)(iter(requests),
- _3069_test_constant.REALLY_SHORT_TIMEOUT)
- list(response_iterator)
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_invocation.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_invocation.py
deleted file mode 100644
index efc93d56b0..0000000000
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_invocation.py
+++ /dev/null
@@ -1,198 +0,0 @@
-# Copyright 2015 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Coverage across the Face layer's generic-to-dynamic range for invocation."""
-
-import abc
-
-import six
-
-from grpc.framework.common import cardinality
-
-_CARDINALITY_TO_GENERIC_BLOCKING_BEHAVIOR = {
- cardinality.Cardinality.UNARY_UNARY: 'blocking_unary_unary',
- cardinality.Cardinality.UNARY_STREAM: 'inline_unary_stream',
- cardinality.Cardinality.STREAM_UNARY: 'blocking_stream_unary',
- cardinality.Cardinality.STREAM_STREAM: 'inline_stream_stream',
-}
-
-_CARDINALITY_TO_GENERIC_FUTURE_BEHAVIOR = {
- cardinality.Cardinality.UNARY_UNARY: 'future_unary_unary',
- cardinality.Cardinality.UNARY_STREAM: 'inline_unary_stream',
- cardinality.Cardinality.STREAM_UNARY: 'future_stream_unary',
- cardinality.Cardinality.STREAM_STREAM: 'inline_stream_stream',
-}
-
-_CARDINALITY_TO_GENERIC_EVENT_BEHAVIOR = {
- cardinality.Cardinality.UNARY_UNARY: 'event_unary_unary',
- cardinality.Cardinality.UNARY_STREAM: 'event_unary_stream',
- cardinality.Cardinality.STREAM_UNARY: 'event_stream_unary',
- cardinality.Cardinality.STREAM_STREAM: 'event_stream_stream',
-}
-
-_CARDINALITY_TO_MULTI_CALLABLE_ATTRIBUTE = {
- cardinality.Cardinality.UNARY_UNARY: 'unary_unary',
- cardinality.Cardinality.UNARY_STREAM: 'unary_stream',
- cardinality.Cardinality.STREAM_UNARY: 'stream_unary',
- cardinality.Cardinality.STREAM_STREAM: 'stream_stream',
-}
-
-
-class Invoker(six.with_metaclass(abc.ABCMeta)):
- """A type used to invoke test RPCs."""
-
- @abc.abstractmethod
- def blocking(self, group, name):
- """Invokes an RPC with blocking control flow."""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def future(self, group, name):
- """Invokes an RPC with future control flow."""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def event(self, group, name):
- """Invokes an RPC with event control flow."""
- raise NotImplementedError()
-
-
-class InvokerConstructor(six.with_metaclass(abc.ABCMeta)):
- """A type used to create Invokers."""
-
- @abc.abstractmethod
- def name(self):
- """Specifies the name of the Invoker constructed by this object."""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def construct_invoker(self, generic_stub, dynamic_stubs, methods):
- """Constructs an Invoker for the given stubs and methods."""
- raise NotImplementedError()
-
-
-class _GenericInvoker(Invoker):
-
- def __init__(self, generic_stub, methods):
- self._stub = generic_stub
- self._methods = methods
-
- def _behavior(self, group, name, cardinality_to_generic_method):
- method_cardinality = self._methods[group, name].cardinality()
- behavior = getattr(self._stub,
- cardinality_to_generic_method[method_cardinality])
- return lambda *args, **kwargs: behavior(group, name, *args, **kwargs)
-
- def blocking(self, group, name):
- return self._behavior(group, name,
- _CARDINALITY_TO_GENERIC_BLOCKING_BEHAVIOR)
-
- def future(self, group, name):
- return self._behavior(group, name,
- _CARDINALITY_TO_GENERIC_FUTURE_BEHAVIOR)
-
- def event(self, group, name):
- return self._behavior(group, name,
- _CARDINALITY_TO_GENERIC_EVENT_BEHAVIOR)
-
-
-class _GenericInvokerConstructor(InvokerConstructor):
-
- def name(self):
- return 'GenericInvoker'
-
- def construct_invoker(self, generic_stub, dynamic_stub, methods):
- return _GenericInvoker(generic_stub, methods)
-
-
-class _MultiCallableInvoker(Invoker):
-
- def __init__(self, generic_stub, methods):
- self._stub = generic_stub
- self._methods = methods
-
- def _multi_callable(self, group, name):
- method_cardinality = self._methods[group, name].cardinality()
- behavior = getattr(
- self._stub,
- _CARDINALITY_TO_MULTI_CALLABLE_ATTRIBUTE[method_cardinality])
- return behavior(group, name)
-
- def blocking(self, group, name):
- return self._multi_callable(group, name)
-
- def future(self, group, name):
- method_cardinality = self._methods[group, name].cardinality()
- behavior = getattr(
- self._stub,
- _CARDINALITY_TO_MULTI_CALLABLE_ATTRIBUTE[method_cardinality])
- if method_cardinality in (cardinality.Cardinality.UNARY_UNARY,
- cardinality.Cardinality.STREAM_UNARY):
- return behavior(group, name).future
- else:
- return behavior(group, name)
-
- def event(self, group, name):
- return self._multi_callable(group, name).event
-
-
-class _MultiCallableInvokerConstructor(InvokerConstructor):
-
- def name(self):
- return 'MultiCallableInvoker'
-
- def construct_invoker(self, generic_stub, dynamic_stub, methods):
- return _MultiCallableInvoker(generic_stub, methods)
-
-
-class _DynamicInvoker(Invoker):
-
- def __init__(self, dynamic_stubs, methods):
- self._stubs = dynamic_stubs
- self._methods = methods
-
- def blocking(self, group, name):
- return getattr(self._stubs[group], name)
-
- def future(self, group, name):
- if self._methods[group, name].cardinality() in (
- cardinality.Cardinality.UNARY_UNARY,
- cardinality.Cardinality.STREAM_UNARY):
- return getattr(self._stubs[group], name).future
- else:
- return getattr(self._stubs[group], name)
-
- def event(self, group, name):
- return getattr(self._stubs[group], name).event
-
-
-class _DynamicInvokerConstructor(InvokerConstructor):
-
- def name(self):
- return 'DynamicInvoker'
-
- def construct_invoker(self, generic_stub, dynamic_stubs, methods):
- return _DynamicInvoker(dynamic_stubs, methods)
-
-
-def invoker_constructors():
- """Creates a sequence of InvokerConstructors to use in tests of RPCs.
-
- Returns:
- A sequence of InvokerConstructors.
- """
- return (
- _GenericInvokerConstructor(),
- _MultiCallableInvokerConstructor(),
- _DynamicInvokerConstructor(),
- )
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_service.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_service.py
deleted file mode 100644
index f1c96b6dc5..0000000000
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_service.py
+++ /dev/null
@@ -1,304 +0,0 @@
-# Copyright 2015 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Private interfaces implemented by data sets used in Face-layer tests."""
-
-import abc
-
-import six
-
-# face is referenced from specification in this module.
-from grpc.framework.interfaces.face import face # pylint: disable=unused-import
-from tests.unit.framework.interfaces.face import test_interfaces
-
-
-class UnaryUnaryTestMethodImplementation(
- six.with_metaclass(abc.ABCMeta, test_interfaces.Method)):
- """A controllable implementation of a unary-unary method."""
-
- @abc.abstractmethod
- def service(self, request, response_callback, context, control):
- """Services an RPC that accepts one message and produces one message.
-
- Args:
- request: The single request message for the RPC.
- response_callback: A callback to be called to accept the response message
- of the RPC.
- context: An face.ServicerContext object.
- control: A test_control.Control to control execution of this method.
-
- Raises:
- abandonment.Abandoned: May or may not be raised when the RPC has been
- aborted.
- """
- raise NotImplementedError()
-
-
-class UnaryUnaryTestMessages(six.with_metaclass(abc.ABCMeta)):
- """A type for unary-request-unary-response message pairings."""
-
- @abc.abstractmethod
- def request(self):
- """Affords a request message.
-
- Implementations of this method should return a different message with each
- call so that multiple test executions of the test method may be made with
- different inputs.
-
- Returns:
- A request message.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def verify(self, request, response, test_case):
- """Verifies that the computed response matches the given request.
-
- Args:
- request: A request message.
- response: A response message.
- test_case: A unittest.TestCase object affording useful assertion methods.
-
- Raises:
- AssertionError: If the request and response do not match, indicating that
- there was some problem executing the RPC under test.
- """
- raise NotImplementedError()
-
-
-class UnaryStreamTestMethodImplementation(
- six.with_metaclass(abc.ABCMeta, test_interfaces.Method)):
- """A controllable implementation of a unary-stream method."""
-
- @abc.abstractmethod
- def service(self, request, response_consumer, context, control):
- """Services an RPC that takes one message and produces a stream of messages.
-
- Args:
- request: The single request message for the RPC.
- response_consumer: A stream.Consumer to be called to accept the response
- messages of the RPC.
- context: A face.ServicerContext object.
- control: A test_control.Control to control execution of this method.
-
- Raises:
- abandonment.Abandoned: May or may not be raised when the RPC has been
- aborted.
- """
- raise NotImplementedError()
-
-
-class UnaryStreamTestMessages(six.with_metaclass(abc.ABCMeta)):
- """A type for unary-request-stream-response message pairings."""
-
- @abc.abstractmethod
- def request(self):
- """Affords a request message.
-
- Implementations of this method should return a different message with each
- call so that multiple test executions of the test method may be made with
- different inputs.
-
- Returns:
- A request message.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def verify(self, request, responses, test_case):
- """Verifies that the computed responses match the given request.
-
- Args:
- request: A request message.
- responses: A sequence of response messages.
- test_case: A unittest.TestCase object affording useful assertion methods.
-
- Raises:
- AssertionError: If the request and responses do not match, indicating that
- there was some problem executing the RPC under test.
- """
- raise NotImplementedError()
-
-
-class StreamUnaryTestMethodImplementation(
- six.with_metaclass(abc.ABCMeta, test_interfaces.Method)):
- """A controllable implementation of a stream-unary method."""
-
- @abc.abstractmethod
- def service(self, response_callback, context, control):
- """Services an RPC that takes a stream of messages and produces one message.
-
- Args:
- response_callback: A callback to be called to accept the response message
- of the RPC.
- context: A face.ServicerContext object.
- control: A test_control.Control to control execution of this method.
-
- Returns:
- A stream.Consumer with which to accept the request messages of the RPC.
- The consumer returned from this method may or may not be invoked to
- completion: in the case of RPC abortion, RPC Framework will simply stop
- passing messages to this object. Implementations must not assume that
- this object will be called to completion of the request stream or even
- called at all.
-
- Raises:
- abandonment.Abandoned: May or may not be raised when the RPC has been
- aborted.
- """
- raise NotImplementedError()
-
-
-class StreamUnaryTestMessages(six.with_metaclass(abc.ABCMeta)):
- """A type for stream-request-unary-response message pairings."""
-
- @abc.abstractmethod
- def requests(self):
- """Affords a sequence of request messages.
-
- Implementations of this method should return a different sequences with each
- call so that multiple test executions of the test method may be made with
- different inputs.
-
- Returns:
- A sequence of request messages.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def verify(self, requests, response, test_case):
- """Verifies that the computed response matches the given requests.
-
- Args:
- requests: A sequence of request messages.
- response: A response message.
- test_case: A unittest.TestCase object affording useful assertion methods.
-
- Raises:
- AssertionError: If the requests and response do not match, indicating that
- there was some problem executing the RPC under test.
- """
- raise NotImplementedError()
-
-
-class StreamStreamTestMethodImplementation(
- six.with_metaclass(abc.ABCMeta, test_interfaces.Method)):
- """A controllable implementation of a stream-stream method."""
-
- @abc.abstractmethod
- def service(self, response_consumer, context, control):
- """Services an RPC that accepts and produces streams of messages.
-
- Args:
- response_consumer: A stream.Consumer to be called to accept the response
- messages of the RPC.
- context: A face.ServicerContext object.
- control: A test_control.Control to control execution of this method.
-
- Returns:
- A stream.Consumer with which to accept the request messages of the RPC.
- The consumer returned from this method may or may not be invoked to
- completion: in the case of RPC abortion, RPC Framework will simply stop
- passing messages to this object. Implementations must not assume that
- this object will be called to completion of the request stream or even
- called at all.
-
- Raises:
- abandonment.Abandoned: May or may not be raised when the RPC has been
- aborted.
- """
- raise NotImplementedError()
-
-
-class StreamStreamTestMessages(six.with_metaclass(abc.ABCMeta)):
- """A type for stream-request-stream-response message pairings."""
-
- @abc.abstractmethod
- def requests(self):
- """Affords a sequence of request messages.
-
- Implementations of this method should return a different sequences with each
- call so that multiple test executions of the test method may be made with
- different inputs.
-
- Returns:
- A sequence of request messages.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def verify(self, requests, responses, test_case):
- """Verifies that the computed response matches the given requests.
-
- Args:
- requests: A sequence of request messages.
- responses: A sequence of response messages.
- test_case: A unittest.TestCase object affording useful assertion methods.
-
- Raises:
- AssertionError: If the requests and responses do not match, indicating
- that there was some problem executing the RPC under test.
- """
- raise NotImplementedError()
-
-
-class TestService(six.with_metaclass(abc.ABCMeta)):
- """A specification of implemented methods to use in tests."""
-
- @abc.abstractmethod
- def unary_unary_scenarios(self):
- """Affords unary-request-unary-response test methods and their messages.
-
- Returns:
- A dict from method group-name pair to implementation/messages pair. The
- first element of the pair is a UnaryUnaryTestMethodImplementation object
- and the second element is a sequence of UnaryUnaryTestMethodMessages
- objects.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def unary_stream_scenarios(self):
- """Affords unary-request-stream-response test methods and their messages.
-
- Returns:
- A dict from method group-name pair to implementation/messages pair. The
- first element of the pair is a UnaryStreamTestMethodImplementation
- object and the second element is a sequence of
- UnaryStreamTestMethodMessages objects.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def stream_unary_scenarios(self):
- """Affords stream-request-unary-response test methods and their messages.
-
- Returns:
- A dict from method group-name pair to implementation/messages pair. The
- first element of the pair is a StreamUnaryTestMethodImplementation
- object and the second element is a sequence of
- StreamUnaryTestMethodMessages objects.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def stream_stream_scenarios(self):
- """Affords stream-request-stream-response test methods and their messages.
-
- Returns:
- A dict from method group-name pair to implementation/messages pair. The
- first element of the pair is a StreamStreamTestMethodImplementation
- object and the second element is a sequence of
- StreamStreamTestMethodMessages objects.
- """
- raise NotImplementedError()
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_stock_service.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_stock_service.py
deleted file mode 100644
index a84e02a79a..0000000000
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_stock_service.py
+++ /dev/null
@@ -1,390 +0,0 @@
-# Copyright 2015 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Examples of Python implementations of the stock.proto Stock service."""
-
-from grpc.framework.common import cardinality
-from grpc.framework.foundation import abandonment
-from grpc.framework.foundation import stream
-from tests.unit.framework.common import test_constants
-from tests.unit.framework.interfaces.face import _service
-from tests.unit._junkdrawer import stock_pb2
-
-_STOCK_GROUP_NAME = 'Stock'
-_SYMBOL_FORMAT = 'test symbol:%03d'
-
-# A test-appropriate security-pricing function. :-P
-_price = lambda symbol_name: float(hash(symbol_name) % 4096)
-
-
-def _get_last_trade_price(stock_request, stock_reply_callback, control, active):
- """A unary-request, unary-response test method."""
- control.control()
- if active():
- stock_reply_callback(
- stock_pb2.StockReply(
- symbol=stock_request.symbol,
- price=_price(stock_request.symbol)))
- else:
- raise abandonment.Abandoned()
-
-
-def _get_last_trade_price_multiple(stock_reply_consumer, control, active):
- """A stream-request, stream-response test method."""
-
- def stock_reply_for_stock_request(stock_request):
- control.control()
- if active():
- return stock_pb2.StockReply(
- symbol=stock_request.symbol, price=_price(stock_request.symbol))
- else:
- raise abandonment.Abandoned()
-
- class StockRequestConsumer(stream.Consumer):
-
- def consume(self, stock_request):
- stock_reply_consumer.consume(
- stock_reply_for_stock_request(stock_request))
-
- def terminate(self):
- control.control()
- stock_reply_consumer.terminate()
-
- def consume_and_terminate(self, stock_request):
- stock_reply_consumer.consume_and_terminate(
- stock_reply_for_stock_request(stock_request))
-
- return StockRequestConsumer()
-
-
-def _watch_future_trades(stock_request, stock_reply_consumer, control, active):
- """A unary-request, stream-response test method."""
- base_price = _price(stock_request.symbol)
- for index in range(stock_request.num_trades_to_watch):
- control.control()
- if active():
- stock_reply_consumer.consume(
- stock_pb2.StockReply(
- symbol=stock_request.symbol, price=base_price + index))
- else:
- raise abandonment.Abandoned()
- stock_reply_consumer.terminate()
-
-
-def _get_highest_trade_price(stock_reply_callback, control, active):
- """A stream-request, unary-response test method."""
-
- class StockRequestConsumer(stream.Consumer):
- """Keeps an ongoing record of the most valuable symbol yet consumed."""
-
- def __init__(self):
- self._symbol = None
- self._price = None
-
- def consume(self, stock_request):
- control.control()
- if active():
- if self._price is None:
- self._symbol = stock_request.symbol
- self._price = _price(stock_request.symbol)
- else:
- candidate_price = _price(stock_request.symbol)
- if self._price < candidate_price:
- self._symbol = stock_request.symbol
- self._price = candidate_price
-
- def terminate(self):
- control.control()
- if active():
- if self._symbol is None:
- raise ValueError()
- else:
- stock_reply_callback(
- stock_pb2.StockReply(
- symbol=self._symbol, price=self._price))
- self._symbol = None
- self._price = None
-
- def consume_and_terminate(self, stock_request):
- control.control()
- if active():
- if self._price is None:
- stock_reply_callback(
- stock_pb2.StockReply(
- symbol=stock_request.symbol,
- price=_price(stock_request.symbol)))
- else:
- candidate_price = _price(stock_request.symbol)
- if self._price < candidate_price:
- stock_reply_callback(
- stock_pb2.StockReply(
- symbol=stock_request.symbol,
- price=candidate_price))
- else:
- stock_reply_callback(
- stock_pb2.StockReply(
- symbol=self._symbol, price=self._price))
-
- self._symbol = None
- self._price = None
-
- return StockRequestConsumer()
-
-
-class GetLastTradePrice(_service.UnaryUnaryTestMethodImplementation):
- """GetLastTradePrice for use in tests."""
-
- def group(self):
- return _STOCK_GROUP_NAME
-
- def name(self):
- return 'GetLastTradePrice'
-
- def cardinality(self):
- return cardinality.Cardinality.UNARY_UNARY
-
- def request_class(self):
- return stock_pb2.StockRequest
-
- def response_class(self):
- return stock_pb2.StockReply
-
- def serialize_request(self, request):
- return request.SerializeToString()
-
- def deserialize_request(self, serialized_request):
- return stock_pb2.StockRequest.FromString(serialized_request)
-
- def serialize_response(self, response):
- return response.SerializeToString()
-
- def deserialize_response(self, serialized_response):
- return stock_pb2.StockReply.FromString(serialized_response)
-
- def service(self, request, response_callback, context, control):
- _get_last_trade_price(request, response_callback, control,
- context.is_active)
-
-
-class GetLastTradePriceMessages(_service.UnaryUnaryTestMessages):
-
- def __init__(self):
- self._index = 0
-
- def request(self):
- symbol = _SYMBOL_FORMAT % self._index
- self._index += 1
- return stock_pb2.StockRequest(symbol=symbol)
-
- def verify(self, request, response, test_case):
- test_case.assertEqual(request.symbol, response.symbol)
- test_case.assertEqual(_price(request.symbol), response.price)
-
-
-class GetLastTradePriceMultiple(_service.StreamStreamTestMethodImplementation):
- """GetLastTradePriceMultiple for use in tests."""
-
- def group(self):
- return _STOCK_GROUP_NAME
-
- def name(self):
- return 'GetLastTradePriceMultiple'
-
- def cardinality(self):
- return cardinality.Cardinality.STREAM_STREAM
-
- def request_class(self):
- return stock_pb2.StockRequest
-
- def response_class(self):
- return stock_pb2.StockReply
-
- def serialize_request(self, request):
- return request.SerializeToString()
-
- def deserialize_request(self, serialized_request):
- return stock_pb2.StockRequest.FromString(serialized_request)
-
- def serialize_response(self, response):
- return response.SerializeToString()
-
- def deserialize_response(self, serialized_response):
- return stock_pb2.StockReply.FromString(serialized_response)
-
- def service(self, response_consumer, context, control):
- return _get_last_trade_price_multiple(response_consumer, control,
- context.is_active)
-
-
-class GetLastTradePriceMultipleMessages(_service.StreamStreamTestMessages):
- """Pairs of message streams for use with GetLastTradePriceMultiple."""
-
- def __init__(self):
- self._index = 0
-
- def requests(self):
- base_index = self._index
- self._index += 1
- return [
- stock_pb2.StockRequest(symbol=_SYMBOL_FORMAT % (base_index + index))
- for index in range(test_constants.STREAM_LENGTH)
- ]
-
- def verify(self, requests, responses, test_case):
- test_case.assertEqual(len(requests), len(responses))
- for stock_request, stock_reply in zip(requests, responses):
- test_case.assertEqual(stock_request.symbol, stock_reply.symbol)
- test_case.assertEqual(
- _price(stock_request.symbol), stock_reply.price)
-
-
-class WatchFutureTrades(_service.UnaryStreamTestMethodImplementation):
- """WatchFutureTrades for use in tests."""
-
- def group(self):
- return _STOCK_GROUP_NAME
-
- def name(self):
- return 'WatchFutureTrades'
-
- def cardinality(self):
- return cardinality.Cardinality.UNARY_STREAM
-
- def request_class(self):
- return stock_pb2.StockRequest
-
- def response_class(self):
- return stock_pb2.StockReply
-
- def serialize_request(self, request):
- return request.SerializeToString()
-
- def deserialize_request(self, serialized_request):
- return stock_pb2.StockRequest.FromString(serialized_request)
-
- def serialize_response(self, response):
- return response.SerializeToString()
-
- def deserialize_response(self, serialized_response):
- return stock_pb2.StockReply.FromString(serialized_response)
-
- def service(self, request, response_consumer, context, control):
- _watch_future_trades(request, response_consumer, control,
- context.is_active)
-
-
-class WatchFutureTradesMessages(_service.UnaryStreamTestMessages):
- """Pairs of a single request message and a sequence of response messages."""
-
- def __init__(self):
- self._index = 0
-
- def request(self):
- symbol = _SYMBOL_FORMAT % self._index
- self._index += 1
- return stock_pb2.StockRequest(
- symbol=symbol, num_trades_to_watch=test_constants.STREAM_LENGTH)
-
- def verify(self, request, responses, test_case):
- test_case.assertEqual(test_constants.STREAM_LENGTH, len(responses))
- base_price = _price(request.symbol)
- for index, response in enumerate(responses):
- test_case.assertEqual(base_price + index, response.price)
-
-
-class GetHighestTradePrice(_service.StreamUnaryTestMethodImplementation):
- """GetHighestTradePrice for use in tests."""
-
- def group(self):
- return _STOCK_GROUP_NAME
-
- def name(self):
- return 'GetHighestTradePrice'
-
- def cardinality(self):
- return cardinality.Cardinality.STREAM_UNARY
-
- def request_class(self):
- return stock_pb2.StockRequest
-
- def response_class(self):
- return stock_pb2.StockReply
-
- def serialize_request(self, request):
- return request.SerializeToString()
-
- def deserialize_request(self, serialized_request):
- return stock_pb2.StockRequest.FromString(serialized_request)
-
- def serialize_response(self, response):
- return response.SerializeToString()
-
- def deserialize_response(self, serialized_response):
- return stock_pb2.StockReply.FromString(serialized_response)
-
- def service(self, response_callback, context, control):
- return _get_highest_trade_price(response_callback, control,
- context.is_active)
-
-
-class GetHighestTradePriceMessages(_service.StreamUnaryTestMessages):
-
- def requests(self):
- return [
- stock_pb2.StockRequest(symbol=_SYMBOL_FORMAT % index)
- for index in range(test_constants.STREAM_LENGTH)
- ]
-
- def verify(self, requests, response, test_case):
- price = None
- symbol = None
- for stock_request in requests:
- current_symbol = stock_request.symbol
- current_price = _price(current_symbol)
- if price is None or price < current_price:
- price = current_price
- symbol = current_symbol
- test_case.assertEqual(price, response.price)
- test_case.assertEqual(symbol, response.symbol)
-
-
-class StockTestService(_service.TestService):
- """A corpus of test data with one method of each RPC cardinality."""
-
- def unary_unary_scenarios(self):
- return {
- (_STOCK_GROUP_NAME, 'GetLastTradePrice'):
- (GetLastTradePrice(), [GetLastTradePriceMessages()]),
- }
-
- def unary_stream_scenarios(self):
- return {
- (_STOCK_GROUP_NAME, 'WatchFutureTrades'):
- (WatchFutureTrades(), [WatchFutureTradesMessages()]),
- }
-
- def stream_unary_scenarios(self):
- return {
- (_STOCK_GROUP_NAME, 'GetHighestTradePrice'):
- (GetHighestTradePrice(), [GetHighestTradePriceMessages()])
- }
-
- def stream_stream_scenarios(self):
- return {
- (_STOCK_GROUP_NAME, 'GetLastTradePriceMultiple'):
- (GetLastTradePriceMultiple(),
- [GetLastTradePriceMultipleMessages()]),
- }
-
-
-STOCK_TEST_SERVICE = StockTestService()
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/test_cases.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/test_cases.py
deleted file mode 100644
index cff4b7cdea..0000000000
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/test_cases.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# Copyright 2015 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Tools for creating tests of implementations of the Face layer."""
-
-# unittest is referenced from specification in this module.
-import unittest # pylint: disable=unused-import
-
-# test_interfaces is referenced from specification in this module.
-from tests.unit.framework.interfaces.face import _blocking_invocation_inline_service
-from tests.unit.framework.interfaces.face import _future_invocation_asynchronous_event_service
-from tests.unit.framework.interfaces.face import _invocation
-from tests.unit.framework.interfaces.face import test_interfaces # pylint: disable=unused-import
-
-_TEST_CASE_SUPERCLASSES = (
- _blocking_invocation_inline_service.TestCase,
- _future_invocation_asynchronous_event_service.TestCase,
-)
-
-
-def test_cases(implementation):
- """Creates unittest.TestCase classes for a given Face layer implementation.
-
- Args:
- implementation: A test_interfaces.Implementation specifying creation and
- destruction of a given Face layer implementation.
-
- Returns:
- A sequence of subclasses of unittest.TestCase defining tests of the
- specified Face layer implementation.
- """
- test_case_classes = []
- for invoker_constructor in _invocation.invoker_constructors():
- for super_class in _TEST_CASE_SUPERCLASSES:
- test_case_classes.append(
- type(
- invoker_constructor.name() + super_class.NAME,
- (super_class,), {
- 'implementation': implementation,
- 'invoker_constructor': invoker_constructor,
- '__module__': implementation.__module__,
- }))
- return test_case_classes
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/test_interfaces.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/test_interfaces.py
deleted file mode 100644
index d0de8e1c54..0000000000
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/test_interfaces.py
+++ /dev/null
@@ -1,212 +0,0 @@
-# Copyright 2015 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Interfaces used in tests of implementations of the Face layer."""
-
-import abc
-
-import six
-
-from grpc.framework.common import cardinality # pylint: disable=unused-import
-from grpc.framework.interfaces.face import face # pylint: disable=unused-import
-
-
-class Method(six.with_metaclass(abc.ABCMeta)):
- """Specifies a method to be used in tests."""
-
- @abc.abstractmethod
- def group(self):
- """Identify the group of the method.
-
- Returns:
- The group of the method.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def name(self):
- """Identify the name of the method.
-
- Returns:
- The name of the method.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def cardinality(self):
- """Identify the cardinality of the method.
-
- Returns:
- A cardinality.Cardinality value describing the streaming semantics of the
- method.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def request_class(self):
- """Identify the class used for the method's request objects.
-
- Returns:
- The class object of the class to which the method's request objects
- belong.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def response_class(self):
- """Identify the class used for the method's response objects.
-
- Returns:
- The class object of the class to which the method's response objects
- belong.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def serialize_request(self, request):
- """Serialize the given request object.
-
- Args:
- request: A request object appropriate for this method.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def deserialize_request(self, serialized_request):
- """Synthesize a request object from a given bytestring.
-
- Args:
- serialized_request: A bytestring deserializable into a request object
- appropriate for this method.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def serialize_response(self, response):
- """Serialize the given response object.
-
- Args:
- response: A response object appropriate for this method.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def deserialize_response(self, serialized_response):
- """Synthesize a response object from a given bytestring.
-
- Args:
- serialized_response: A bytestring deserializable into a response object
- appropriate for this method.
- """
- raise NotImplementedError()
-
-
-class Implementation(six.with_metaclass(abc.ABCMeta)):
- """Specifies an implementation of the Face layer."""
-
- @abc.abstractmethod
- def instantiate(self, methods, method_implementations,
- multi_method_implementation):
- """Instantiates the Face layer implementation to be used in a test.
-
- Args:
- methods: A sequence of Method objects describing the methods available to
- be called during the test.
- method_implementations: A dictionary from group-name pair to
- face.MethodImplementation object specifying implementation of a method.
- multi_method_implementation: A face.MultiMethodImplementation or None.
-
- Returns:
- A sequence of length three the first element of which is a
- face.GenericStub, the second element of which is dictionary from groups
- to face.DynamicStubs affording invocation of the group's methods, and
- the third element of which is an arbitrary memo object to be kept and
- passed to destantiate at the conclusion of the test. The returned stubs
- must be backed by the provided implementations.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def destantiate(self, memo):
- """Destroys the Face layer implementation under test.
-
- Args:
- memo: The object from the third position of the return value of a call to
- instantiate.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def invocation_metadata(self):
- """Provides the metadata to be used when invoking a test RPC.
-
- Returns:
- An object to use as the supplied-at-invocation-time metadata in a test
- RPC.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def initial_metadata(self):
- """Provides the metadata for use as a test RPC's first servicer metadata.
-
- Returns:
- An object to use as the from-the-servicer-before-responses metadata in a
- test RPC.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def terminal_metadata(self):
- """Provides the metadata for use as a test RPC's second servicer metadata.
-
- Returns:
- An object to use as the from-the-servicer-after-all-responses metadata in
- a test RPC.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def code(self):
- """Provides the value for use as a test RPC's code.
-
- Returns:
- An object to use as the from-the-servicer code in a test RPC.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def details(self):
- """Provides the value for use as a test RPC's details.
-
- Returns:
- An object to use as the from-the-servicer details in a test RPC.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def metadata_transmitted(self, original_metadata, transmitted_metadata):
- """Identifies whether or not metadata was properly transmitted.
-
- Args:
- original_metadata: A metadata value passed to the Face interface
- implementation under test.
- transmitted_metadata: The same metadata value after having been
- transmitted via an RPC performed by the Face interface implementation
- under test.
-
- Returns:
- Whether or not the metadata was properly transmitted by the Face interface
- implementation under test.
- """
- raise NotImplementedError()
diff --git a/src/ruby/lib/grpc/core/time_consts.rb b/src/ruby/lib/grpc/core/time_consts.rb
index 92cd323fa8..896b720780 100644
--- a/src/ruby/lib/grpc/core/time_consts.rb
+++ b/src/ruby/lib/grpc/core/time_consts.rb
@@ -32,7 +32,7 @@ module GRPC
# * timish == 0 => TimeConsts.ZERO
#
# @param timeish [Number|TimeSpec]
- # @return timeish [Number|TimeSpec]
+ # @return [Number|TimeSpec]
def from_relative_time(timeish)
if timeish.is_a? TimeSpec
timeish
diff --git a/src/ruby/lib/grpc/generic/bidi_call.rb b/src/ruby/lib/grpc/generic/bidi_call.rb
index 3bdcc0062e..086455db0b 100644
--- a/src/ruby/lib/grpc/generic/bidi_call.rb
+++ b/src/ruby/lib/grpc/generic/bidi_call.rb
@@ -64,7 +64,7 @@ module GRPC
# @param requests the Enumerable of requests to send
# @param set_input_stream_done [Proc] called back when we're done
# reading the input stream
- # @param set_input_stream_done [Proc] called back when we're done
+ # @param set_output_stream_done [Proc] called back when we're done
# sending data on the output stream
# @return an Enumerator of requests to yield
def run_on_client(requests,
diff --git a/src/ruby/lib/grpc/generic/client_stub.rb b/src/ruby/lib/grpc/generic/client_stub.rb
index 9a50f8a99d..b193f5c4e1 100644
--- a/src/ruby/lib/grpc/generic/client_stub.rb
+++ b/src/ruby/lib/grpc/generic/client_stub.rb
@@ -58,8 +58,8 @@ module GRPC
# Minimally, a stub is created with the just the host of the gRPC service
# it wishes to access, e.g.,
#
- # my_stub = ClientStub.new(example.host.com:50505,
- # :this_channel_is_insecure)
+ # my_stub = ClientStub.new(example.host.com:50505,
+ # :this_channel_is_insecure)
#
# If a channel_override argument is passed, it will be used as the
# underlying channel. Otherwise, the channel_args argument will be used
@@ -376,7 +376,7 @@ module GRPC
# This is a blocking call.
#
# * the call completes when the next call to provided block returns
- # * [False]
+ # false
#
# * the execution block parameters are two objects for sending and
# receiving responses, each of which blocks waiting for flow control.
@@ -398,13 +398,9 @@ module GRPC
# responses by throwing StopIteration, but can only happen either
# if bidi_call#writes_done is called.
#
- # To terminate the RPC correctly the block:
- #
- # * must call bidi#writes_done and then
- #
- # * either return false as soon as there is no need for other responses
- #
- # * loop on responses#next until no further responses are available
+ # To properly terminate the RPC, the responses should be completely iterated
+ # through; one way to do this is to loop on responses#next until no further
+ # responses are available.
#
# == Errors ==
# An RuntimeError is raised if
diff --git a/src/ruby/lib/grpc/generic/interceptors.rb b/src/ruby/lib/grpc/generic/interceptors.rb
index 24482f3451..56d3cecaad 100644
--- a/src/ruby/lib/grpc/generic/interceptors.rb
+++ b/src/ruby/lib/grpc/generic/interceptors.rb
@@ -153,7 +153,7 @@ module GRPC
#
class InterceptionContext
##
- # @param [Array<GRPC::Interceptor>]
+ # @param interceptors [Array<GRPC::Interceptor>]
#
def initialize(interceptors = [])
@interceptors = interceptors.dup
diff --git a/src/ruby/lib/grpc/generic/rpc_server.rb b/src/ruby/lib/grpc/generic/rpc_server.rb
index d96e677f20..31ab6a302b 100644
--- a/src/ruby/lib/grpc/generic/rpc_server.rb
+++ b/src/ruby/lib/grpc/generic/rpc_server.rb
@@ -204,7 +204,7 @@ module GRPC
# * connect_md_proc:
# when non-nil is a proc for determining metadata to to send back the client
# on receiving an invocation req. The proc signature is:
- # {key: val, ..} func(method_name, {key: val, ...})
+ # {key: val, ..} func(method_name, {key: val, ...})
#
# * server_args:
# A server arguments hash to be passed down to the underlying core server
@@ -283,7 +283,7 @@ module GRPC
# If run has not been called, this returns immediately.
#
# @param timeout [Numeric] number of seconds to wait
- # @result [true, false] true if the server is running, false otherwise
+ # @return [true, false] true if the server is running, false otherwise
def wait_till_running(timeout = nil)
@run_mutex.synchronize do
@run_cond.wait(@run_mutex, timeout) if @running_state == :not_started