aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core
diff options
context:
space:
mode:
Diffstat (limited to 'src/core')
-rw-r--r--src/core/ext/filters/client_channel/http_proxy.cc13
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc4
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h14
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc142
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc155
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h4
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc11
-rw-r--r--src/core/lib/channel/handshaker.cc29
-rw-r--r--src/core/lib/iomgr/ev_epoll1_linux.cc4
-rw-r--r--src/core/lib/iomgr/ev_epollex_linux.cc77
-rw-r--r--src/core/lib/iomgr/ev_epollsig_linux.cc23
-rw-r--r--src/core/lib/iomgr/ev_poll_posix.cc4
-rw-r--r--src/core/lib/iomgr/ev_posix.cc9
-rw-r--r--src/core/lib/iomgr/ev_posix.h4
-rw-r--r--src/core/lib/iomgr/tcp_client_posix.cc6
-rw-r--r--src/core/lib/iomgr/tcp_posix.cc2
-rw-r--r--src/core/lib/iomgr/tcp_server_posix.cc2
-rw-r--r--src/core/lib/iomgr/udp_server.cc3
-rw-r--r--src/core/tsi/ssl_transport_security.cc5
19 files changed, 279 insertions, 232 deletions
diff --git a/src/core/ext/filters/client_channel/http_proxy.cc b/src/core/ext/filters/client_channel/http_proxy.cc
index 29a6c0e367..9baccd8628 100644
--- a/src/core/ext/filters/client_channel/http_proxy.cc
+++ b/src/core/ext/filters/client_channel/http_proxy.cc
@@ -83,11 +83,24 @@ done:
return proxy_name;
}
+/**
+ * Checks the value of GRPC_ARG_ENABLE_HTTP_PROXY to determine if http_proxy
+ * should be used.
+ */
+bool http_proxy_enabled(const grpc_channel_args* args) {
+ const grpc_arg* arg =
+ grpc_channel_args_find(args, GRPC_ARG_ENABLE_HTTP_PROXY);
+ return grpc_channel_arg_get_bool(arg, true);
+}
+
static bool proxy_mapper_map_name(grpc_proxy_mapper* mapper,
const char* server_uri,
const grpc_channel_args* args,
char** name_to_resolve,
grpc_channel_args** new_args) {
+ if (!http_proxy_enabled(args)) {
+ return false;
+ }
char* user_cred = nullptr;
*name_to_resolve = get_http_proxy_server(&user_cred);
if (*name_to_resolve == nullptr) return false;
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
index 3c40ae14b8..f4f6444c5f 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
@@ -414,10 +414,10 @@ void AresDnsResolver::StartResolvingLocked() {
resolving_ = true;
lb_addresses_ = nullptr;
service_config_json_ = nullptr;
- pending_request_ = grpc_dns_lookup_ares(
+ pending_request_ = grpc_dns_lookup_ares_locked(
dns_server_, name_to_resolve_, kDefaultPort, interested_parties_,
&on_resolved_, &lb_addresses_, true /* check_grpclb */,
- request_service_config_ ? &service_config_json_ : nullptr);
+ request_service_config_ ? &service_config_json_ : nullptr, combiner());
last_resolution_timestamp_ = grpc_core::ExecCtx::Get()->Now();
}
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
index 6239549534..27d1511d94 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
@@ -29,25 +29,27 @@ typedef struct grpc_ares_ev_driver grpc_ares_ev_driver;
/* Start \a ev_driver. It will keep working until all IO on its ares_channel is
done, or grpc_ares_ev_driver_destroy() is called. It may notify the callbacks
bound to its ares_channel when necessary. */
-void grpc_ares_ev_driver_start(grpc_ares_ev_driver* ev_driver);
+void grpc_ares_ev_driver_start_locked(grpc_ares_ev_driver* ev_driver);
/* Returns the ares_channel owned by \a ev_driver. To bind a c-ares query to
\a ev_driver, use the ares_channel owned by \a ev_driver as the arg of the
query. */
-ares_channel* grpc_ares_ev_driver_get_channel(grpc_ares_ev_driver* ev_driver);
+ares_channel* grpc_ares_ev_driver_get_channel_locked(
+ grpc_ares_ev_driver* ev_driver);
/* Creates a new grpc_ares_ev_driver. Returns GRPC_ERROR_NONE if \a ev_driver is
created successfully. */
-grpc_error* grpc_ares_ev_driver_create(grpc_ares_ev_driver** ev_driver,
- grpc_pollset_set* pollset_set);
+grpc_error* grpc_ares_ev_driver_create_locked(grpc_ares_ev_driver** ev_driver,
+ grpc_pollset_set* pollset_set,
+ grpc_combiner* combiner);
/* Destroys \a ev_driver asynchronously. Pending lookups made on \a ev_driver
will be cancelled and their on_done callbacks will be invoked with a status
of ARES_ECANCELLED. */
-void grpc_ares_ev_driver_destroy(grpc_ares_ev_driver* ev_driver);
+void grpc_ares_ev_driver_destroy_locked(grpc_ares_ev_driver* ev_driver);
/* Shutdown all the grpc_fds used by \a ev_driver */
-void grpc_ares_ev_driver_shutdown(grpc_ares_ev_driver* ev_driver);
+void grpc_ares_ev_driver_shutdown_locked(grpc_ares_ev_driver* ev_driver);
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_DNS_C_ARES_GRPC_ARES_EV_DRIVER_H \
*/
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc
index 00a84302ed..b73e979e9f 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc
@@ -21,6 +21,7 @@
#if GRPC_ARES == 1 && defined(GRPC_POSIX_SOCKET_ARES_EV_DRIVER)
#include <ares.h>
+#include <string.h>
#include <sys/ioctl.h>
#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h"
@@ -38,25 +39,23 @@
typedef struct fd_node {
/** the owner of this fd node */
grpc_ares_ev_driver* ev_driver;
- /** a closure wrapping on_readable_cb, which should be invoked when the
- grpc_fd in this node becomes readable. */
+ /** a closure wrapping on_readable_locked, which should be
+ invoked when the grpc_fd in this node becomes readable. */
grpc_closure read_closure;
- /** a closure wrapping on_writable_cb, which should be invoked when the
- grpc_fd in this node becomes writable. */
+ /** a closure wrapping on_writable_locked, which should be
+ invoked when the grpc_fd in this node becomes writable. */
grpc_closure write_closure;
/** next fd node in the list */
struct fd_node* next;
- /** mutex guarding the rest of the state */
- gpr_mu mu;
/** the grpc_fd owned by this fd node */
grpc_fd* fd;
/** if the readable closure has been registered */
bool readable_registered;
/** if the writable closure has been registered */
bool writable_registered;
- /** if the fd is being shut down */
- bool shutting_down;
+ /** if the fd has been shutdown yet from grpc iomgr perspective */
+ bool already_shutdown;
} fd_node;
struct grpc_ares_ev_driver {
@@ -67,8 +66,8 @@ struct grpc_ares_ev_driver {
/** refcount of the event driver */
gpr_refcount refs;
- /** mutex guarding the rest of the state */
- gpr_mu mu;
+ /** combiner to synchronize c-ares and I/O callbacks on */
+ grpc_combiner* combiner;
/** a list of grpc_fd that this event driver is currently using. */
fd_node* fds;
/** is this event driver currently working? */
@@ -91,44 +90,42 @@ static void grpc_ares_ev_driver_unref(grpc_ares_ev_driver* ev_driver) {
if (gpr_unref(&ev_driver->refs)) {
gpr_log(GPR_DEBUG, "destroy ev_driver %" PRIuPTR, (uintptr_t)ev_driver);
GPR_ASSERT(ev_driver->fds == nullptr);
- gpr_mu_destroy(&ev_driver->mu);
+ GRPC_COMBINER_UNREF(ev_driver->combiner, "free ares event driver");
ares_destroy(ev_driver->channel);
gpr_free(ev_driver);
}
}
-static void fd_node_destroy(fd_node* fdn) {
+static void fd_node_destroy_locked(fd_node* fdn) {
gpr_log(GPR_DEBUG, "delete fd: %d", grpc_fd_wrapped_fd(fdn->fd));
GPR_ASSERT(!fdn->readable_registered);
GPR_ASSERT(!fdn->writable_registered);
- gpr_mu_destroy(&fdn->mu);
- /* c-ares library has closed the fd inside grpc_fd. This fd may be picked up
+ GPR_ASSERT(fdn->already_shutdown);
+ /* c-ares library will close the fd inside grpc_fd. This fd may be picked up
immediately by another thread, and should not be closed by the following
grpc_fd_orphan. */
- grpc_fd_orphan(fdn->fd, nullptr, nullptr, true /* already_closed */,
- "c-ares query finished");
+ int dummy_release_fd;
+ grpc_fd_orphan(fdn->fd, nullptr, &dummy_release_fd, "c-ares query finished");
gpr_free(fdn);
}
-static void fd_node_shutdown(fd_node* fdn) {
- gpr_mu_lock(&fdn->mu);
- fdn->shutting_down = true;
- if (!fdn->readable_registered && !fdn->writable_registered) {
- gpr_mu_unlock(&fdn->mu);
- fd_node_destroy(fdn);
- } else {
- grpc_fd_shutdown(
- fdn->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING("c-ares fd shutdown"));
- gpr_mu_unlock(&fdn->mu);
+static void fd_node_shutdown_locked(fd_node* fdn, const char* reason) {
+ if (!fdn->already_shutdown) {
+ fdn->already_shutdown = true;
+ grpc_fd_shutdown(fdn->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING(reason));
}
}
-grpc_error* grpc_ares_ev_driver_create(grpc_ares_ev_driver** ev_driver,
- grpc_pollset_set* pollset_set) {
+grpc_error* grpc_ares_ev_driver_create_locked(grpc_ares_ev_driver** ev_driver,
+ grpc_pollset_set* pollset_set,
+ grpc_combiner* combiner) {
*ev_driver = static_cast<grpc_ares_ev_driver*>(
gpr_malloc(sizeof(grpc_ares_ev_driver)));
- int status = ares_init(&(*ev_driver)->channel);
- gpr_log(GPR_DEBUG, "grpc_ares_ev_driver_create");
+ ares_options opts;
+ memset(&opts, 0, sizeof(opts));
+ opts.flags |= ARES_FLAG_STAYOPEN;
+ int status = ares_init_options(&(*ev_driver)->channel, &opts, ARES_OPT_FLAGS);
+ gpr_log(GPR_DEBUG, "grpc_ares_ev_driver_create_locked");
if (status != ARES_SUCCESS) {
char* err_msg;
gpr_asprintf(&err_msg, "Failed to init ares channel. C-ares error: %s",
@@ -138,7 +135,7 @@ grpc_error* grpc_ares_ev_driver_create(grpc_ares_ev_driver** ev_driver,
gpr_free(*ev_driver);
return err;
}
- gpr_mu_init(&(*ev_driver)->mu);
+ (*ev_driver)->combiner = GRPC_COMBINER_REF(combiner, "ares event driver");
gpr_ref_init(&(*ev_driver)->refs, 1);
(*ev_driver)->pollset_set = pollset_set;
(*ev_driver)->fds = nullptr;
@@ -147,33 +144,26 @@ grpc_error* grpc_ares_ev_driver_create(grpc_ares_ev_driver** ev_driver,
return GRPC_ERROR_NONE;
}
-void grpc_ares_ev_driver_destroy(grpc_ares_ev_driver* ev_driver) {
- // It's not safe to shut down remaining fds here directly, becauses
- // ares_host_callback does not provide an exec_ctx. We mark the event driver
- // as being shut down. If the event driver is working,
- // grpc_ares_notify_on_event_locked will shut down the fds; if it's not
- // working, there are no fds to shut down.
- gpr_mu_lock(&ev_driver->mu);
+void grpc_ares_ev_driver_destroy_locked(grpc_ares_ev_driver* ev_driver) {
+ // We mark the event driver as being shut down. If the event driver
+ // is working, grpc_ares_notify_on_event_locked will shut down the
+ // fds; if it's not working, there are no fds to shut down.
ev_driver->shutting_down = true;
- gpr_mu_unlock(&ev_driver->mu);
grpc_ares_ev_driver_unref(ev_driver);
}
-void grpc_ares_ev_driver_shutdown(grpc_ares_ev_driver* ev_driver) {
- gpr_mu_lock(&ev_driver->mu);
+void grpc_ares_ev_driver_shutdown_locked(grpc_ares_ev_driver* ev_driver) {
ev_driver->shutting_down = true;
fd_node* fn = ev_driver->fds;
while (fn != nullptr) {
- grpc_fd_shutdown(fn->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "grpc_ares_ev_driver_shutdown"));
+ fd_node_shutdown_locked(fn, "grpc_ares_ev_driver_shutdown");
fn = fn->next;
}
- gpr_mu_unlock(&ev_driver->mu);
}
// Search fd in the fd_node list head. This is an O(n) search, the max possible
// value of n is ARES_GETSOCK_MAXNUM (16). n is typically 1 - 2 in our tests.
-static fd_node* pop_fd_node(fd_node** head, int fd) {
+static fd_node* pop_fd_node_locked(fd_node** head, int fd) {
fd_node dummy_head;
dummy_head.next = *head;
fd_node* node = &dummy_head;
@@ -190,31 +180,22 @@ static fd_node* pop_fd_node(fd_node** head, int fd) {
}
/* Check if \a fd is still readable */
-static bool grpc_ares_is_fd_still_readable(grpc_ares_ev_driver* ev_driver,
- int fd) {
+static bool grpc_ares_is_fd_still_readable_locked(
+ grpc_ares_ev_driver* ev_driver, int fd) {
size_t bytes_available = 0;
return ioctl(fd, FIONREAD, &bytes_available) == 0 && bytes_available > 0;
}
-static void on_readable_cb(void* arg, grpc_error* error) {
+static void on_readable_locked(void* arg, grpc_error* error) {
fd_node* fdn = static_cast<fd_node*>(arg);
grpc_ares_ev_driver* ev_driver = fdn->ev_driver;
- gpr_mu_lock(&fdn->mu);
const int fd = grpc_fd_wrapped_fd(fdn->fd);
fdn->readable_registered = false;
- if (fdn->shutting_down && !fdn->writable_registered) {
- gpr_mu_unlock(&fdn->mu);
- fd_node_destroy(fdn);
- grpc_ares_ev_driver_unref(ev_driver);
- return;
- }
- gpr_mu_unlock(&fdn->mu);
-
gpr_log(GPR_DEBUG, "readable on %d", fd);
if (error == GRPC_ERROR_NONE) {
do {
ares_process_fd(ev_driver->channel, fd, ARES_SOCKET_BAD);
- } while (grpc_ares_is_fd_still_readable(ev_driver, fd));
+ } while (grpc_ares_is_fd_still_readable_locked(ev_driver, fd));
} else {
// If error is not GRPC_ERROR_NONE, it means the fd has been shutdown or
// timed out. The pending lookups made on this ev_driver will be cancelled
@@ -224,26 +205,15 @@ static void on_readable_cb(void* arg, grpc_error* error) {
// grpc_ares_notify_on_event_locked().
ares_cancel(ev_driver->channel);
}
- gpr_mu_lock(&ev_driver->mu);
grpc_ares_notify_on_event_locked(ev_driver);
- gpr_mu_unlock(&ev_driver->mu);
grpc_ares_ev_driver_unref(ev_driver);
}
-static void on_writable_cb(void* arg, grpc_error* error) {
+static void on_writable_locked(void* arg, grpc_error* error) {
fd_node* fdn = static_cast<fd_node*>(arg);
grpc_ares_ev_driver* ev_driver = fdn->ev_driver;
- gpr_mu_lock(&fdn->mu);
const int fd = grpc_fd_wrapped_fd(fdn->fd);
fdn->writable_registered = false;
- if (fdn->shutting_down && !fdn->readable_registered) {
- gpr_mu_unlock(&fdn->mu);
- fd_node_destroy(fdn);
- grpc_ares_ev_driver_unref(ev_driver);
- return;
- }
- gpr_mu_unlock(&fdn->mu);
-
gpr_log(GPR_DEBUG, "writable on %d", fd);
if (error == GRPC_ERROR_NONE) {
ares_process_fd(ev_driver->channel, ARES_SOCKET_BAD, fd);
@@ -256,13 +226,12 @@ static void on_writable_cb(void* arg, grpc_error* error) {
// grpc_ares_notify_on_event_locked().
ares_cancel(ev_driver->channel);
}
- gpr_mu_lock(&ev_driver->mu);
grpc_ares_notify_on_event_locked(ev_driver);
- gpr_mu_unlock(&ev_driver->mu);
grpc_ares_ev_driver_unref(ev_driver);
}
-ares_channel* grpc_ares_ev_driver_get_channel(grpc_ares_ev_driver* ev_driver) {
+ares_channel* grpc_ares_ev_driver_get_channel_locked(
+ grpc_ares_ev_driver* ev_driver) {
return &ev_driver->channel;
}
@@ -277,7 +246,7 @@ static void grpc_ares_notify_on_event_locked(grpc_ares_ev_driver* ev_driver) {
for (size_t i = 0; i < ARES_GETSOCK_MAXNUM; i++) {
if (ARES_GETSOCK_READABLE(socks_bitmask, i) ||
ARES_GETSOCK_WRITABLE(socks_bitmask, i)) {
- fd_node* fdn = pop_fd_node(&ev_driver->fds, socks[i]);
+ fd_node* fdn = pop_fd_node_locked(&ev_driver->fds, socks[i]);
// Create a new fd_node if sock[i] is not in the fd_node list.
if (fdn == nullptr) {
char* fd_name;
@@ -288,18 +257,16 @@ static void grpc_ares_notify_on_event_locked(grpc_ares_ev_driver* ev_driver) {
fdn->ev_driver = ev_driver;
fdn->readable_registered = false;
fdn->writable_registered = false;
- fdn->shutting_down = false;
- gpr_mu_init(&fdn->mu);
- GRPC_CLOSURE_INIT(&fdn->read_closure, on_readable_cb, fdn,
- grpc_schedule_on_exec_ctx);
- GRPC_CLOSURE_INIT(&fdn->write_closure, on_writable_cb, fdn,
- grpc_schedule_on_exec_ctx);
+ fdn->already_shutdown = false;
+ GRPC_CLOSURE_INIT(&fdn->read_closure, on_readable_locked, fdn,
+ grpc_combiner_scheduler(ev_driver->combiner));
+ GRPC_CLOSURE_INIT(&fdn->write_closure, on_writable_locked, fdn,
+ grpc_combiner_scheduler(ev_driver->combiner));
grpc_pollset_set_add_fd(ev_driver->pollset_set, fdn->fd);
gpr_free(fd_name);
}
fdn->next = new_list;
new_list = fdn;
- gpr_mu_lock(&fdn->mu);
// Register read_closure if the socket is readable and read_closure has
// not been registered with this socket.
if (ARES_GETSOCK_READABLE(socks_bitmask, i) &&
@@ -319,7 +286,6 @@ static void grpc_ares_notify_on_event_locked(grpc_ares_ev_driver* ev_driver) {
grpc_fd_notify_on_write(fdn->fd, &fdn->write_closure);
fdn->writable_registered = true;
}
- gpr_mu_unlock(&fdn->mu);
}
}
}
@@ -329,7 +295,13 @@ static void grpc_ares_notify_on_event_locked(grpc_ares_ev_driver* ev_driver) {
while (ev_driver->fds != nullptr) {
fd_node* cur = ev_driver->fds;
ev_driver->fds = ev_driver->fds->next;
- fd_node_shutdown(cur);
+ fd_node_shutdown_locked(cur, "c-ares fd shutdown");
+ if (!cur->readable_registered && !cur->writable_registered) {
+ fd_node_destroy_locked(cur);
+ } else {
+ cur->next = new_list;
+ new_list = cur;
+ }
}
ev_driver->fds = new_list;
// If the ev driver has no working fd, all the tasks are done.
@@ -339,13 +311,11 @@ static void grpc_ares_notify_on_event_locked(grpc_ares_ev_driver* ev_driver) {
}
}
-void grpc_ares_ev_driver_start(grpc_ares_ev_driver* ev_driver) {
- gpr_mu_lock(&ev_driver->mu);
+void grpc_ares_ev_driver_start_locked(grpc_ares_ev_driver* ev_driver) {
if (!ev_driver->working) {
ev_driver->working = true;
grpc_ares_notify_on_event_locked(ev_driver);
}
- gpr_mu_unlock(&ev_driver->mu);
}
#endif /* GRPC_ARES == 1 && defined(GRPC_POSIX_SOCKET_ARES_EV_DRIVER) */
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
index 18d0a7b9f6..471de58e8c 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
@@ -65,8 +65,6 @@ struct grpc_ares_request {
/** number of ongoing queries */
gpr_refcount pending_queries;
- /** mutex guarding the rest of the state */
- gpr_mu mu;
/** is there at least one successful query, set in on_done_cb */
bool success;
/** the errors explaining the request failure, set in on_done_cb */
@@ -74,7 +72,8 @@ struct grpc_ares_request {
};
typedef struct grpc_ares_hostbyname_request {
- /** following members are set in create_hostbyname_request */
+ /** following members are set in create_hostbyname_request_locked
+ */
/** the top-level request instance */
grpc_ares_request* parent_request;
/** host to resolve, parsed from the name to resolve */
@@ -96,10 +95,6 @@ static uint16_t strhtons(const char* port) {
return htons(static_cast<unsigned short>(atoi(port)));
}
-static void grpc_ares_request_ref(grpc_ares_request* r) {
- gpr_ref(&r->pending_queries);
-}
-
static void log_address_sorting_list(grpc_lb_addresses* lb_addrs,
const char* input_output_str) {
for (size_t i = 0; i < lb_addrs->num_addresses; i++) {
@@ -149,7 +144,11 @@ void grpc_cares_wrapper_test_only_address_sorting_sort(
grpc_cares_wrapper_address_sorting_sort(lb_addrs);
}
-static void grpc_ares_request_unref(grpc_ares_request* r) {
+static void grpc_ares_request_ref_locked(grpc_ares_request* r) {
+ gpr_ref(&r->pending_queries);
+}
+
+static void grpc_ares_request_unref_locked(grpc_ares_request* r) {
/* If there are no pending queries, invoke on_done callback and destroy the
request */
if (gpr_unref(&r->pending_queries)) {
@@ -158,13 +157,12 @@ static void grpc_ares_request_unref(grpc_ares_request* r) {
grpc_cares_wrapper_address_sorting_sort(lb_addrs);
}
GRPC_CLOSURE_SCHED(r->on_done, r->error);
- gpr_mu_destroy(&r->mu);
- grpc_ares_ev_driver_destroy(r->ev_driver);
+ grpc_ares_ev_driver_destroy_locked(r->ev_driver);
gpr_free(r);
}
}
-static grpc_ares_hostbyname_request* create_hostbyname_request(
+static grpc_ares_hostbyname_request* create_hostbyname_request_locked(
grpc_ares_request* parent_request, char* host, uint16_t port,
bool is_balancer) {
grpc_ares_hostbyname_request* hr = static_cast<grpc_ares_hostbyname_request*>(
@@ -173,22 +171,22 @@ static grpc_ares_hostbyname_request* create_hostbyname_request(
hr->host = gpr_strdup(host);
hr->port = port;
hr->is_balancer = is_balancer;
- grpc_ares_request_ref(parent_request);
+ grpc_ares_request_ref_locked(parent_request);
return hr;
}
-static void destroy_hostbyname_request(grpc_ares_hostbyname_request* hr) {
- grpc_ares_request_unref(hr->parent_request);
+static void destroy_hostbyname_request_locked(
+ grpc_ares_hostbyname_request* hr) {
+ grpc_ares_request_unref_locked(hr->parent_request);
gpr_free(hr->host);
gpr_free(hr);
}
-static void on_hostbyname_done_cb(void* arg, int status, int timeouts,
- struct hostent* hostent) {
+static void on_hostbyname_done_locked(void* arg, int status, int timeouts,
+ struct hostent* hostent) {
grpc_ares_hostbyname_request* hr =
static_cast<grpc_ares_hostbyname_request*>(arg);
grpc_ares_request* r = hr->parent_request;
- gpr_mu_lock(&r->mu);
if (status == ARES_SUCCESS) {
GRPC_ERROR_UNREF(r->error);
r->error = GRPC_ERROR_NONE;
@@ -263,33 +261,33 @@ static void on_hostbyname_done_cb(void* arg, int status, int timeouts,
r->error = grpc_error_add_child(error, r->error);
}
}
- gpr_mu_unlock(&r->mu);
- destroy_hostbyname_request(hr);
+ destroy_hostbyname_request_locked(hr);
}
-static void on_srv_query_done_cb(void* arg, int status, int timeouts,
- unsigned char* abuf, int alen) {
+static void on_srv_query_done_locked(void* arg, int status, int timeouts,
+ unsigned char* abuf, int alen) {
grpc_ares_request* r = static_cast<grpc_ares_request*>(arg);
- gpr_log(GPR_DEBUG, "on_query_srv_done_cb");
+ gpr_log(GPR_DEBUG, "on_query_srv_done_locked");
if (status == ARES_SUCCESS) {
- gpr_log(GPR_DEBUG, "on_query_srv_done_cb ARES_SUCCESS");
+ gpr_log(GPR_DEBUG, "on_query_srv_done_locked ARES_SUCCESS");
struct ares_srv_reply* reply;
const int parse_status = ares_parse_srv_reply(abuf, alen, &reply);
if (parse_status == ARES_SUCCESS) {
- ares_channel* channel = grpc_ares_ev_driver_get_channel(r->ev_driver);
+ ares_channel* channel =
+ grpc_ares_ev_driver_get_channel_locked(r->ev_driver);
for (struct ares_srv_reply* srv_it = reply; srv_it != nullptr;
srv_it = srv_it->next) {
if (grpc_ipv6_loopback_available()) {
- grpc_ares_hostbyname_request* hr = create_hostbyname_request(
+ grpc_ares_hostbyname_request* hr = create_hostbyname_request_locked(
r, srv_it->host, htons(srv_it->port), true /* is_balancer */);
ares_gethostbyname(*channel, hr->host, AF_INET6,
- on_hostbyname_done_cb, hr);
+ on_hostbyname_done_locked, hr);
}
- grpc_ares_hostbyname_request* hr = create_hostbyname_request(
+ grpc_ares_hostbyname_request* hr = create_hostbyname_request_locked(
r, srv_it->host, htons(srv_it->port), true /* is_balancer */);
- ares_gethostbyname(*channel, hr->host, AF_INET, on_hostbyname_done_cb,
- hr);
- grpc_ares_ev_driver_start(r->ev_driver);
+ ares_gethostbyname(*channel, hr->host, AF_INET,
+ on_hostbyname_done_locked, hr);
+ grpc_ares_ev_driver_start_locked(r->ev_driver);
}
}
if (reply != nullptr) {
@@ -307,21 +305,20 @@ static void on_srv_query_done_cb(void* arg, int status, int timeouts,
r->error = grpc_error_add_child(error, r->error);
}
}
- grpc_ares_request_unref(r);
+ grpc_ares_request_unref_locked(r);
}
static const char g_service_config_attribute_prefix[] = "grpc_config=";
-static void on_txt_done_cb(void* arg, int status, int timeouts,
- unsigned char* buf, int len) {
- gpr_log(GPR_DEBUG, "on_txt_done_cb");
+static void on_txt_done_locked(void* arg, int status, int timeouts,
+ unsigned char* buf, int len) {
+ gpr_log(GPR_DEBUG, "on_txt_done_locked");
char* error_msg;
grpc_ares_request* r = static_cast<grpc_ares_request*>(arg);
const size_t prefix_len = sizeof(g_service_config_attribute_prefix) - 1;
struct ares_txt_ext* result = nullptr;
struct ares_txt_ext* reply = nullptr;
grpc_error* error = GRPC_ERROR_NONE;
- gpr_mu_lock(&r->mu);
if (status != ARES_SUCCESS) goto fail;
status = ares_parse_txt_reply_ext(buf, len, &reply);
if (status != ARES_SUCCESS) goto fail;
@@ -366,14 +363,14 @@ fail:
r->error = grpc_error_add_child(error, r->error);
}
done:
- gpr_mu_unlock(&r->mu);
- grpc_ares_request_unref(r);
+ grpc_ares_request_unref_locked(r);
}
-static grpc_ares_request* grpc_dns_lookup_ares_impl(
+static grpc_ares_request* grpc_dns_lookup_ares_locked_impl(
const char* dns_server, const char* name, const char* default_port,
grpc_pollset_set* interested_parties, grpc_closure* on_done,
- grpc_lb_addresses** addrs, bool check_grpclb, char** service_config_json) {
+ grpc_lb_addresses** addrs, bool check_grpclb, char** service_config_json,
+ grpc_combiner* combiner) {
grpc_error* error = GRPC_ERROR_NONE;
grpc_ares_hostbyname_request* hr = nullptr;
grpc_ares_request* r = nullptr;
@@ -402,20 +399,19 @@ static grpc_ares_request* grpc_dns_lookup_ares_impl(
}
port = gpr_strdup(default_port);
}
-
grpc_ares_ev_driver* ev_driver;
- error = grpc_ares_ev_driver_create(&ev_driver, interested_parties);
+ error = grpc_ares_ev_driver_create_locked(&ev_driver, interested_parties,
+ combiner);
if (error != GRPC_ERROR_NONE) goto error_cleanup;
r = static_cast<grpc_ares_request*>(gpr_zalloc(sizeof(grpc_ares_request)));
- gpr_mu_init(&r->mu);
r->ev_driver = ev_driver;
r->on_done = on_done;
r->lb_addrs_out = addrs;
r->service_config_json_out = service_config_json;
r->success = false;
r->error = GRPC_ERROR_NONE;
- channel = grpc_ares_ev_driver_get_channel(r->ev_driver);
+ channel = grpc_ares_ev_driver_get_channel_locked(r->ev_driver);
// If dns_server is specified, use it.
if (dns_server != nullptr) {
@@ -457,32 +453,34 @@ static grpc_ares_request* grpc_dns_lookup_ares_impl(
}
gpr_ref_init(&r->pending_queries, 1);
if (grpc_ipv6_loopback_available()) {
- hr = create_hostbyname_request(r, host, strhtons(port),
- false /* is_balancer */);
- ares_gethostbyname(*channel, hr->host, AF_INET6, on_hostbyname_done_cb, hr);
+ hr = create_hostbyname_request_locked(r, host, strhtons(port),
+ false /* is_balancer */);
+ ares_gethostbyname(*channel, hr->host, AF_INET6, on_hostbyname_done_locked,
+ hr);
}
- hr = create_hostbyname_request(r, host, strhtons(port),
- false /* is_balancer */);
- ares_gethostbyname(*channel, hr->host, AF_INET, on_hostbyname_done_cb, hr);
+ hr = create_hostbyname_request_locked(r, host, strhtons(port),
+ false /* is_balancer */);
+ ares_gethostbyname(*channel, hr->host, AF_INET, on_hostbyname_done_locked,
+ hr);
if (check_grpclb) {
/* Query the SRV record */
- grpc_ares_request_ref(r);
+ grpc_ares_request_ref_locked(r);
char* service_name;
gpr_asprintf(&service_name, "_grpclb._tcp.%s", host);
- ares_query(*channel, service_name, ns_c_in, ns_t_srv, on_srv_query_done_cb,
- r);
+ ares_query(*channel, service_name, ns_c_in, ns_t_srv,
+ on_srv_query_done_locked, r);
gpr_free(service_name);
}
if (service_config_json != nullptr) {
- grpc_ares_request_ref(r);
+ grpc_ares_request_ref_locked(r);
char* config_name;
gpr_asprintf(&config_name, "_grpc_config.%s", host);
- ares_search(*channel, config_name, ns_c_in, ns_t_txt, on_txt_done_cb, r);
+ ares_search(*channel, config_name, ns_c_in, ns_t_txt, on_txt_done_locked,
+ r);
gpr_free(config_name);
}
- /* TODO(zyc): Handle CNAME records here. */
- grpc_ares_ev_driver_start(r->ev_driver);
- grpc_ares_request_unref(r);
+ grpc_ares_ev_driver_start_locked(r->ev_driver);
+ grpc_ares_request_unref_locked(r);
gpr_free(host);
gpr_free(port);
return r;
@@ -494,15 +492,15 @@ error_cleanup:
return nullptr;
}
-grpc_ares_request* (*grpc_dns_lookup_ares)(
+grpc_ares_request* (*grpc_dns_lookup_ares_locked)(
const char* dns_server, const char* name, const char* default_port,
grpc_pollset_set* interested_parties, grpc_closure* on_done,
- grpc_lb_addresses** addrs, bool check_grpclb,
- char** service_config_json) = grpc_dns_lookup_ares_impl;
+ grpc_lb_addresses** addrs, bool check_grpclb, char** service_config_json,
+ grpc_combiner* combiner) = grpc_dns_lookup_ares_locked_impl;
void grpc_cancel_ares_request(grpc_ares_request* r) {
- if (grpc_dns_lookup_ares == grpc_dns_lookup_ares_impl) {
- grpc_ares_ev_driver_shutdown(r->ev_driver);
+ if (grpc_dns_lookup_ares_locked == grpc_dns_lookup_ares_locked_impl) {
+ grpc_ares_ev_driver_shutdown_locked(r->ev_driver);
}
}
@@ -534,6 +532,8 @@ void grpc_ares_cleanup(void) {
*/
typedef struct grpc_resolve_address_ares_request {
+ /* combiner that queries and related callbacks run under */
+ grpc_combiner* combiner;
/** the pointer to receive the resolved addresses */
grpc_resolved_addresses** addrs_out;
/** currently resolving lb addresses */
@@ -541,8 +541,14 @@ typedef struct grpc_resolve_address_ares_request {
/** closure to call when the resolve_address_ares request completes */
grpc_closure* on_resolve_address_done;
/** a closure wrapping on_dns_lookup_done_cb, which should be invoked when the
- grpc_dns_lookup_ares operation is done. */
+ grpc_dns_lookup_ares_locked operation is done. */
grpc_closure on_dns_lookup_done;
+ /* target name */
+ const char* name;
+ /* default port to use if none is specified */
+ const char* default_port;
+ /* pollset_set to be driven by */
+ grpc_pollset_set* interested_parties;
} grpc_resolve_address_ares_request;
static void on_dns_lookup_done_cb(void* arg, grpc_error* error) {
@@ -566,9 +572,20 @@ static void on_dns_lookup_done_cb(void* arg, grpc_error* error) {
}
GRPC_CLOSURE_SCHED(r->on_resolve_address_done, GRPC_ERROR_REF(error));
if (r->lb_addrs != nullptr) grpc_lb_addresses_destroy(r->lb_addrs);
+ GRPC_COMBINER_UNREF(r->combiner, "on_dns_lookup_done_cb");
gpr_free(r);
}
+static void grpc_resolve_address_invoke_dns_lookup_ares_locked(
+ void* arg, grpc_error* unused_error) {
+ grpc_resolve_address_ares_request* r =
+ static_cast<grpc_resolve_address_ares_request*>(arg);
+ grpc_dns_lookup_ares_locked(
+ nullptr /* dns_server */, r->name, r->default_port, r->interested_parties,
+ &r->on_dns_lookup_done, &r->lb_addrs, false /* check_grpclb */,
+ nullptr /* service_config_json */, r->combiner);
+}
+
static void grpc_resolve_address_ares_impl(const char* name,
const char* default_port,
grpc_pollset_set* interested_parties,
@@ -577,14 +594,18 @@ static void grpc_resolve_address_ares_impl(const char* name,
grpc_resolve_address_ares_request* r =
static_cast<grpc_resolve_address_ares_request*>(
gpr_zalloc(sizeof(grpc_resolve_address_ares_request)));
+ r->combiner = grpc_combiner_create();
r->addrs_out = addrs;
r->on_resolve_address_done = on_done;
GRPC_CLOSURE_INIT(&r->on_dns_lookup_done, on_dns_lookup_done_cb, r,
grpc_schedule_on_exec_ctx);
- grpc_dns_lookup_ares(nullptr /* dns_server */, name, default_port,
- interested_parties, &r->on_dns_lookup_done, &r->lb_addrs,
- false /* check_grpclb */,
- nullptr /* service_config_json */);
+ r->name = name;
+ r->default_port = default_port;
+ r->interested_parties = interested_parties;
+ GRPC_CLOSURE_SCHED(
+ GRPC_CLOSURE_CREATE(grpc_resolve_address_invoke_dns_lookup_ares_locked, r,
+ grpc_combiner_scheduler(r->combiner)),
+ GRPC_ERROR_NONE);
}
void (*grpc_resolve_address_ares)(
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
index 2d84a038d6..9e93d0cf94 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
@@ -48,11 +48,11 @@ extern void (*grpc_resolve_address_ares)(const char* name,
function. \a on_done may be called directly in this function without being
scheduled with \a exec_ctx, so it must not try to acquire locks that are
being held by the caller. */
-extern grpc_ares_request* (*grpc_dns_lookup_ares)(
+extern grpc_ares_request* (*grpc_dns_lookup_ares_locked)(
const char* dns_server, const char* name, const char* default_port,
grpc_pollset_set* interested_parties, grpc_closure* on_done,
grpc_lb_addresses** addresses, bool check_grpclb,
- char** service_config_json);
+ char** service_config_json, grpc_combiner* combiner);
/* Cancel the pending grpc_ares_request \a request */
void grpc_cancel_ares_request(grpc_ares_request* request);
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc
index 5096e480bc..d6a76fc8b6 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc
@@ -26,18 +26,19 @@ struct grpc_ares_request {
char val;
};
-static grpc_ares_request* grpc_dns_lookup_ares_impl(
+static grpc_ares_request* grpc_dns_lookup_ares_locked_impl(
const char* dns_server, const char* name, const char* default_port,
grpc_pollset_set* interested_parties, grpc_closure* on_done,
- grpc_lb_addresses** addrs, bool check_grpclb, char** service_config_json) {
+ grpc_lb_addresses** addrs, bool check_grpclb, char** service_config_json,
+ grpc_combiner* combiner) {
return NULL;
}
-grpc_ares_request* (*grpc_dns_lookup_ares)(
+grpc_ares_request* (*grpc_dns_lookup_ares_locked)(
const char* dns_server, const char* name, const char* default_port,
grpc_pollset_set* interested_parties, grpc_closure* on_done,
- grpc_lb_addresses** addrs, bool check_grpclb,
- char** service_config_json) = grpc_dns_lookup_ares_impl;
+ grpc_lb_addresses** addrs, bool check_grpclb, char** service_config_json,
+ grpc_combiner* combiner) = grpc_dns_lookup_ares_locked_impl;
void grpc_cancel_ares_request(grpc_ares_request* r) {}
diff --git a/src/core/lib/channel/handshaker.cc b/src/core/lib/channel/handshaker.cc
index 86f8699e04..ad3250b7e9 100644
--- a/src/core/lib/channel/handshaker.cc
+++ b/src/core/lib/channel/handshaker.cc
@@ -223,18 +223,23 @@ static bool call_next_handshaker_locked(grpc_handshake_manager* mgr,
mgr->index == mgr->count) {
if (error == GRPC_ERROR_NONE && mgr->shutdown) {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("handshaker shutdown");
- // TODO(roth): It is currently necessary to shutdown endpoints
- // before destroying then, even when we know that there are no
- // pending read/write callbacks. This should be fixed, at which
- // point this can be removed.
- grpc_endpoint_shutdown(mgr->args.endpoint, GRPC_ERROR_REF(error));
- grpc_endpoint_destroy(mgr->args.endpoint);
- mgr->args.endpoint = nullptr;
- grpc_channel_args_destroy(mgr->args.args);
- mgr->args.args = nullptr;
- grpc_slice_buffer_destroy_internal(mgr->args.read_buffer);
- gpr_free(mgr->args.read_buffer);
- mgr->args.read_buffer = nullptr;
+ // It is possible that the endpoint has already been destroyed by
+ // a shutdown call while this callback was sitting on the ExecCtx
+ // with no error.
+ if (mgr->args.endpoint != nullptr) {
+ // TODO(roth): It is currently necessary to shutdown endpoints
+ // before destroying then, even when we know that there are no
+ // pending read/write callbacks. This should be fixed, at which
+ // point this can be removed.
+ grpc_endpoint_shutdown(mgr->args.endpoint, GRPC_ERROR_REF(error));
+ grpc_endpoint_destroy(mgr->args.endpoint);
+ mgr->args.endpoint = nullptr;
+ grpc_channel_args_destroy(mgr->args.args);
+ mgr->args.args = nullptr;
+ grpc_slice_buffer_destroy_internal(mgr->args.read_buffer);
+ gpr_free(mgr->args.read_buffer);
+ mgr->args.read_buffer = nullptr;
+ }
}
if (grpc_handshaker_trace.enabled()) {
gpr_log(GPR_INFO,
diff --git a/src/core/lib/iomgr/ev_epoll1_linux.cc b/src/core/lib/iomgr/ev_epoll1_linux.cc
index 98ab974057..86a0243d2e 100644
--- a/src/core/lib/iomgr/ev_epoll1_linux.cc
+++ b/src/core/lib/iomgr/ev_epoll1_linux.cc
@@ -346,7 +346,7 @@ static void fd_shutdown(grpc_fd* fd, grpc_error* why) {
}
static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
- bool already_closed, const char* reason) {
+ const char* reason) {
grpc_error* error = GRPC_ERROR_NONE;
bool is_release_fd = (release_fd != nullptr);
@@ -359,7 +359,7 @@ static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
descriptor fd->fd (but we still own the grpc_fd structure). */
if (is_release_fd) {
*release_fd = fd->fd;
- } else if (!already_closed) {
+ } else {
close(fd->fd);
}
diff --git a/src/core/lib/iomgr/ev_epollex_linux.cc b/src/core/lib/iomgr/ev_epollex_linux.cc
index 1bb9b81e5a..a9b2adf75d 100644
--- a/src/core/lib/iomgr/ev_epollex_linux.cc
+++ b/src/core/lib/iomgr/ev_epollex_linux.cc
@@ -64,7 +64,7 @@
// a keepalive ping timeout issue. We may want to revert https://github
// .com/grpc/grpc/pull/14943 once we figure out the root cause.
#define MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL 16
-#define MAX_PROBE_EPOLL_FDS 32
+#define MAX_FDS_IN_CACHE 32
grpc_core::DebugOnlyTraceFlag grpc_trace_pollable_refcount(false,
"pollable_refcount");
@@ -78,8 +78,14 @@ typedef enum { PO_MULTI, PO_FD, PO_EMPTY } pollable_type;
typedef struct pollable pollable;
typedef struct cached_fd {
+ // Set to the grpc_fd's salt value. See 'salt' variable' in grpc_fd for more
+ // details
intptr_t salt;
+
+ // The underlying fd
int fd;
+
+ // A recency time counter that helps to determine the LRU fd in the cache
uint64_t last_used;
} cached_fd;
@@ -112,10 +118,32 @@ struct pollable {
int event_count;
struct epoll_event events[MAX_EPOLL_EVENTS];
- // Maintain a LRU-eviction cache of fds in this pollable
- cached_fd fd_cache[MAX_PROBE_EPOLL_FDS];
+ // We may be calling pollable_add_fd() on the same (pollable, fd) multiple
+ // times. To prevent pollable_add_fd() from making multiple sys calls to
+ // epoll_ctl() to add the fd, we maintain a cache of what fds are already
+ // present in the underlying epoll-set.
+ //
+ // Since this is not a correctness issue, we do not need to maintain all the
+ // fds in the cache. Hence we just use an LRU cache of size 'MAX_FDS_IN_CACHE'
+ //
+ // NOTE: An ideal implementation of this should do the following:
+ // 1) Add fds to the cache in pollable_add_fd() function (i.e whenever the fd
+ // is added to the pollable's epoll set)
+ // 2) Remove the fd from the cache whenever the fd is removed from the
+ // underlying epoll set (i.e whenever fd_orphan() is called).
+ //
+ // Implementing (2) above (i.e removing fds from cache on fd_orphan) adds a
+ // lot of complexity since an fd can be present in multiple pollalbles. So our
+ // implementation ONLY DOES (1) and NOT (2).
+ //
+ // The cache_fd.salt variable helps here to maintain correctness (it serves as
+ // an epoch that differentiates one grpc_fd from the other even though both of
+ // them may have the same fd number)
+ //
+ // The following implements LRU-eviction cache of fds in this pollable
+ cached_fd fd_cache[MAX_FDS_IN_CACHE];
int fd_cache_size;
- uint64_t fd_cache_counter;
+ uint64_t fd_cache_counter; // Recency timer tick counter
};
static const char* pollable_type_string(pollable_type t) {
@@ -158,15 +186,24 @@ static void pollable_unref(pollable* p, int line, const char* reason);
* Fd Declarations
*/
+// Monotonically increasing Epoch counter that is assinged to each grpc_fd. See
+// the description of 'salt' variable in 'grpc_fd' for more details
+// TODO: (sreek/kpayson) gpr_atm is intptr_t which may not be wide-enough on
+// 32-bit systems. Change this to int_64 - atleast on 32-bit systems
static gpr_atm g_fd_salt;
struct grpc_fd {
int fd;
+
+ // Since fd numbers can be reused (after old fds are closed), this serves as
+ // an epoch that uniquely identifies this fd (i.e the pair (salt, fd) is
+ // unique (until the salt counter (i.e g_fd_salt) overflows)
intptr_t salt;
- /* refst format:
- bit 0 : 1=Active / 0=Orphaned
- bits 1-n : refcount
- Ref/Unref by two to avoid altering the orphaned bit */
+
+ // refst format:
+ // bit 0 : 1=Active / 0=Orphaned
+ // bits 1-n : refcount
+ // Ref/Unref by two to avoid altering the orphaned bit
gpr_atm refst;
gpr_mu orphan_mu;
@@ -181,13 +218,13 @@ struct grpc_fd {
struct grpc_fd* freelist_next;
grpc_closure* on_done_closure;
- /* The pollset that last noticed that the fd is readable. The actual type
- * stored in this is (grpc_pollset *) */
+ // The pollset that last noticed that the fd is readable. The actual type
+ // stored in this is (grpc_pollset *)
gpr_atm read_notifier_pollset;
grpc_iomgr_object iomgr_object;
- /* Do we need to track EPOLLERR events separately? */
+ // Do we need to track EPOLLERR events separately?
bool track_err;
};
@@ -404,8 +441,8 @@ static int fd_wrapped_fd(grpc_fd* fd) {
}
static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
- bool already_closed, const char* reason) {
- bool is_fd_closed = already_closed;
+ const char* reason) {
+ bool is_fd_closed = false;
gpr_mu_lock(&fd->orphan_mu);
@@ -415,7 +452,7 @@ static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
descriptor fd->fd (but we still own the grpc_fd structure). */
if (release_fd != nullptr) {
*release_fd = fd->fd;
- } else if (!is_fd_closed) {
+ } else {
close(fd->fd);
is_fd_closed = true;
}
@@ -447,7 +484,12 @@ static bool fd_is_shutdown(grpc_fd* fd) {
/* Might be called multiple times */
static void fd_shutdown(grpc_fd* fd, grpc_error* why) {
if (fd->read_closure->SetShutdown(GRPC_ERROR_REF(why))) {
- shutdown(fd->fd, SHUT_RDWR);
+ if (shutdown(fd->fd, SHUT_RDWR)) {
+ if (errno != ENOTCONN) {
+ gpr_log(GPR_ERROR, "Error shutting down fd %d. errno: %d",
+ grpc_fd_wrapped_fd(fd), errno);
+ }
+ }
fd->write_closure->SetShutdown(GRPC_ERROR_REF(why));
fd->error_closure->SetShutdown(GRPC_ERROR_REF(why));
}
@@ -558,6 +600,7 @@ static grpc_error* pollable_add_fd(pollable* p, grpc_fd* fd) {
const int epfd = p->epfd;
gpr_mu_lock(&p->mu);
p->fd_cache_counter++;
+
// Handle the case of overflow for our cache counter by
// reseting the recency-counter on all cache objects
if (p->fd_cache_counter == 0) {
@@ -577,8 +620,9 @@ static grpc_error* pollable_add_fd(pollable* p, grpc_fd* fd) {
lru_idx = i;
}
}
+
// Add to cache
- if (p->fd_cache_size < MAX_PROBE_EPOLL_FDS) {
+ if (p->fd_cache_size < MAX_FDS_IN_CACHE) {
lru_idx = p->fd_cache_size;
p->fd_cache_size++;
}
@@ -586,6 +630,7 @@ static grpc_error* pollable_add_fd(pollable* p, grpc_fd* fd) {
p->fd_cache[lru_idx].salt = fd->salt;
p->fd_cache[lru_idx].last_used = p->fd_cache_counter;
gpr_mu_unlock(&p->mu);
+
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_INFO, "add fd %p (%d) to pollable %p", fd, fd->fd, p);
}
diff --git a/src/core/lib/iomgr/ev_epollsig_linux.cc b/src/core/lib/iomgr/ev_epollsig_linux.cc
index 20512e31ac..2189801c18 100644
--- a/src/core/lib/iomgr/ev_epollsig_linux.cc
+++ b/src/core/lib/iomgr/ev_epollsig_linux.cc
@@ -442,7 +442,6 @@ static void polling_island_remove_all_fds_locked(polling_island* pi,
/* The caller is expected to hold pi->mu lock before calling this function */
static void polling_island_remove_fd_locked(polling_island* pi, grpc_fd* fd,
- bool is_fd_closed,
grpc_error** error) {
int err;
size_t i;
@@ -451,16 +450,14 @@ static void polling_island_remove_fd_locked(polling_island* pi, grpc_fd* fd,
/* If fd is already closed, then it would have been automatically been removed
from the epoll set */
- if (!is_fd_closed) {
- err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_DEL, fd->fd, nullptr);
- if (err < 0 && errno != ENOENT) {
- gpr_asprintf(
- &err_msg,
- "epoll_ctl (epoll_fd: %d) del fd: %d failed with error: %d (%s)",
- pi->epoll_fd, fd->fd, errno, strerror(errno));
- append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
- gpr_free(err_msg);
- }
+ err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_DEL, fd->fd, nullptr);
+ if (err < 0 && errno != ENOENT) {
+ gpr_asprintf(
+ &err_msg,
+ "epoll_ctl (epoll_fd: %d) del fd: %d failed with error: %d (%s)",
+ pi->epoll_fd, fd->fd, errno, strerror(errno));
+ append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
+ gpr_free(err_msg);
}
for (i = 0; i < pi->fd_cnt; i++) {
@@ -874,7 +871,7 @@ static int fd_wrapped_fd(grpc_fd* fd) {
}
static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
- bool already_closed, const char* reason) {
+ const char* reason) {
grpc_error* error = GRPC_ERROR_NONE;
polling_island* unref_pi = nullptr;
@@ -895,7 +892,7 @@ static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
before doing this.) */
if (fd->po.pi != nullptr) {
polling_island* pi_latest = polling_island_lock(fd->po.pi);
- polling_island_remove_fd_locked(pi_latest, fd, already_closed, &error);
+ polling_island_remove_fd_locked(pi_latest, fd, &error);
gpr_mu_unlock(&pi_latest->mu);
unref_pi = fd->po.pi;
diff --git a/src/core/lib/iomgr/ev_poll_posix.cc b/src/core/lib/iomgr/ev_poll_posix.cc
index df6b0e1e89..c9c09881a2 100644
--- a/src/core/lib/iomgr/ev_poll_posix.cc
+++ b/src/core/lib/iomgr/ev_poll_posix.cc
@@ -425,14 +425,12 @@ static int fd_wrapped_fd(grpc_fd* fd) {
}
static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
- bool already_closed, const char* reason) {
+ const char* reason) {
fd->on_done_closure = on_done;
fd->released = release_fd != nullptr;
if (release_fd != nullptr) {
*release_fd = fd->fd;
fd->released = true;
- } else if (already_closed) {
- fd->released = true;
}
gpr_mu_lock(&fd->mu);
REF_BY(fd, 1, reason); /* remove active status, but keep referenced */
diff --git a/src/core/lib/iomgr/ev_posix.cc b/src/core/lib/iomgr/ev_posix.cc
index 82b21df7ba..1139b3273a 100644
--- a/src/core/lib/iomgr/ev_posix.cc
+++ b/src/core/lib/iomgr/ev_posix.cc
@@ -209,13 +209,12 @@ int grpc_fd_wrapped_fd(grpc_fd* fd) {
}
void grpc_fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
- bool already_closed, const char* reason) {
- GRPC_POLLING_API_TRACE("fd_orphan(%d, %p, %p, %d, %s)",
- grpc_fd_wrapped_fd(fd), on_done, release_fd,
- already_closed, reason);
+ const char* reason) {
+ GRPC_POLLING_API_TRACE("fd_orphan(%d, %p, %p, %s)", grpc_fd_wrapped_fd(fd),
+ on_done, release_fd, reason);
GRPC_FD_TRACE("grpc_fd_orphan, fd:%d closed", grpc_fd_wrapped_fd(fd));
- g_event_engine->fd_orphan(fd, on_done, release_fd, already_closed, reason);
+ g_event_engine->fd_orphan(fd, on_done, release_fd, reason);
}
void grpc_fd_shutdown(grpc_fd* fd, grpc_error* why) {
diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h
index e3996ce365..b4c17fc80d 100644
--- a/src/core/lib/iomgr/ev_posix.h
+++ b/src/core/lib/iomgr/ev_posix.h
@@ -46,7 +46,7 @@ typedef struct grpc_event_engine_vtable {
grpc_fd* (*fd_create)(int fd, const char* name, bool track_err);
int (*fd_wrapped_fd)(grpc_fd* fd);
void (*fd_orphan)(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
- bool already_closed, const char* reason);
+ const char* reason);
void (*fd_shutdown)(grpc_fd* fd, grpc_error* why);
void (*fd_notify_on_read)(grpc_fd* fd, grpc_closure* closure);
void (*fd_notify_on_write)(grpc_fd* fd, grpc_closure* closure);
@@ -112,7 +112,7 @@ int grpc_fd_wrapped_fd(grpc_fd* fd);
notify_on_write.
MUST NOT be called with a pollset lock taken */
void grpc_fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
- bool already_closed, const char* reason);
+ const char* reason);
/* Has grpc_fd_shutdown been called on an fd? */
bool grpc_fd_is_shutdown(grpc_fd* fd);
diff --git a/src/core/lib/iomgr/tcp_client_posix.cc b/src/core/lib/iomgr/tcp_client_posix.cc
index 71e08f1230..296ee74311 100644
--- a/src/core/lib/iomgr/tcp_client_posix.cc
+++ b/src/core/lib/iomgr/tcp_client_posix.cc
@@ -211,8 +211,7 @@ static void on_writable(void* acp, grpc_error* error) {
finish:
if (fd != nullptr) {
grpc_pollset_set_del_fd(ac->interested_parties, fd);
- grpc_fd_orphan(fd, nullptr, nullptr, false /* already_closed */,
- "tcp_client_orphan");
+ grpc_fd_orphan(fd, nullptr, nullptr, "tcp_client_orphan");
fd = nullptr;
}
done = (--ac->refs == 0);
@@ -305,8 +304,7 @@ void grpc_tcp_client_create_from_prepared_fd(
return;
}
if (errno != EWOULDBLOCK && errno != EINPROGRESS) {
- grpc_fd_orphan(fdobj, nullptr, nullptr, false /* already_closed */,
- "tcp_client_connect_error");
+ grpc_fd_orphan(fdobj, nullptr, nullptr, "tcp_client_connect_error");
GRPC_CLOSURE_SCHED(closure, GRPC_OS_ERROR(errno, "connect"));
return;
}
diff --git a/src/core/lib/iomgr/tcp_posix.cc b/src/core/lib/iomgr/tcp_posix.cc
index 43d545846d..9df2e206b2 100644
--- a/src/core/lib/iomgr/tcp_posix.cc
+++ b/src/core/lib/iomgr/tcp_posix.cc
@@ -297,7 +297,7 @@ static void tcp_shutdown(grpc_endpoint* ep, grpc_error* why) {
static void tcp_free(grpc_tcp* tcp) {
grpc_fd_orphan(tcp->em_fd, tcp->release_fd_cb, tcp->release_fd,
- false /* already_closed */, "tcp_unref_orphan");
+ "tcp_unref_orphan");
grpc_slice_buffer_destroy_internal(&tcp->last_read_buffer);
grpc_resource_user_unref(tcp->resource_user);
gpr_free(tcp->peer_string);
diff --git a/src/core/lib/iomgr/tcp_server_posix.cc b/src/core/lib/iomgr/tcp_server_posix.cc
index ce18ff99e6..8ddf684fea 100644
--- a/src/core/lib/iomgr/tcp_server_posix.cc
+++ b/src/core/lib/iomgr/tcp_server_posix.cc
@@ -150,7 +150,7 @@ static void deactivated_all_ports(grpc_tcp_server* s) {
GRPC_CLOSURE_INIT(&sp->destroyed_closure, destroyed_port, s,
grpc_schedule_on_exec_ctx);
grpc_fd_orphan(sp->emfd, &sp->destroyed_closure, nullptr,
- false /* already_closed */, "tcp_listener_shutdown");
+ "tcp_listener_shutdown");
}
gpr_mu_unlock(&s->mu);
} else {
diff --git a/src/core/lib/iomgr/udp_server.cc b/src/core/lib/iomgr/udp_server.cc
index 99368020d4..bdb2d0e764 100644
--- a/src/core/lib/iomgr/udp_server.cc
+++ b/src/core/lib/iomgr/udp_server.cc
@@ -300,8 +300,7 @@ void GrpcUdpListener::OrphanFd() {
grpc_schedule_on_exec_ctx);
/* Because at this point, all listening sockets have been shutdown already, no
* need to call OnFdAboutToOrphan() to notify the handler again. */
- grpc_fd_orphan(emfd_, &destroyed_closure_, nullptr,
- false /* already_closed */, "udp_listener_shutdown");
+ grpc_fd_orphan(emfd_, &destroyed_closure_, nullptr, "udp_listener_shutdown");
}
void grpc_udp_server_destroy(grpc_udp_server* s, grpc_closure* on_done) {
diff --git a/src/core/tsi/ssl_transport_security.cc b/src/core/tsi/ssl_transport_security.cc
index 8065a8b185..e66fc9ba03 100644
--- a/src/core/tsi/ssl_transport_security.cc
+++ b/src/core/tsi/ssl_transport_security.cc
@@ -260,14 +260,13 @@ static tsi_result ssl_get_x509_common_name(X509* cert, unsigned char** utf8,
X509_NAME* subject_name = X509_get_subject_name(cert);
int utf8_returned_size = 0;
if (subject_name == nullptr) {
- gpr_log(GPR_ERROR, "Could not get subject name from certificate.");
+ gpr_log(GPR_INFO, "Could not get subject name from certificate.");
return TSI_NOT_FOUND;
}
common_name_index =
X509_NAME_get_index_by_NID(subject_name, NID_commonName, -1);
if (common_name_index == -1) {
- gpr_log(GPR_ERROR,
- "Could not get common name of subject from certificate.");
+ gpr_log(GPR_INFO, "Could not get common name of subject from certificate.");
return TSI_NOT_FOUND;
}
common_name_entry = X509_NAME_get_entry(subject_name, common_name_index);