aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/core/ext/census/grpc_filter.c4
-rw-r--r--src/core/ext/client_channel/client_channel.c304
-rw-r--r--src/core/ext/client_channel/lb_policy.c89
-rw-r--r--src/core/ext/client_channel/lb_policy.h85
-rw-r--r--src/core/ext/client_channel/lb_policy_factory.h1
-rw-r--r--src/core/ext/client_channel/parse_address.c7
-rw-r--r--src/core/ext/client_channel/resolver.c25
-rw-r--r--src/core/ext/client_channel/resolver.h35
-rw-r--r--src/core/ext/client_channel/resolver_factory.h1
-rw-r--r--src/core/ext/client_channel/resolver_registry.c4
-rw-r--r--src/core/ext/client_channel/resolver_registry.h3
-rw-r--r--src/core/ext/client_channel/subchannel.c7
-rw-r--r--src/core/ext/client_channel/uri_parser.c3
-rw-r--r--src/core/ext/lb_policy/grpclb/grpclb.c187
-rw-r--r--src/core/ext/lb_policy/grpclb/load_balancer_api.c9
-rw-r--r--src/core/ext/lb_policy/pick_first/pick_first.c187
-rw-r--r--src/core/ext/lb_policy/round_robin/round_robin.c112
-rw-r--r--src/core/ext/load_reporting/load_reporting_filter.c6
-rw-r--r--src/core/ext/resolver/dns/native/dns_resolver.c62
-rw-r--r--src/core/ext/resolver/sockaddr/sockaddr_resolver.c47
-rw-r--r--src/core/ext/transport/chttp2/client/chttp2_connector.c3
-rw-r--r--src/core/ext/transport/chttp2/client/insecure/channel_create.c4
-rw-r--r--src/core/ext/transport/chttp2/client/secure/secure_channel_create.c4
-rw-r--r--src/core/ext/transport/chttp2/server/chttp2_server.c3
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.c172
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_encoder.c1
-rw-r--r--src/core/ext/transport/chttp2/transport/internal.h29
-rw-r--r--src/core/lib/channel/channel_stack.c15
-rw-r--r--src/core/lib/channel/channel_stack.h8
-rw-r--r--src/core/lib/channel/channel_stack_builder.c5
-rw-r--r--src/core/lib/channel/compress_filter.c2
-rw-r--r--src/core/lib/channel/connected_channel.c2
-rw-r--r--src/core/lib/channel/deadline_filter.c107
-rw-r--r--src/core/lib/channel/deadline_filter.h13
-rw-r--r--src/core/lib/channel/handshaker.c3
-rw-r--r--src/core/lib/channel/http_client_filter.c2
-rw-r--r--src/core/lib/channel/http_server_filter.c11
-rw-r--r--src/core/lib/channel/message_size_filter.c3
-rw-r--r--src/core/lib/http/httpcli_security_connector.c3
-rw-r--r--src/core/lib/http/parser.c4
-rw-r--r--src/core/lib/iomgr/error.h4
-rw-r--r--src/core/lib/iomgr/ev_epoll_linux.c338
-rw-r--r--src/core/lib/iomgr/ev_poll_posix.c3
-rw-r--r--src/core/lib/iomgr/pollset.h1
-rw-r--r--src/core/lib/iomgr/pollset_uv.c23
-rw-r--r--src/core/lib/iomgr/pollset_windows.c1
-rw-r--r--src/core/lib/iomgr/resolve_address_uv.c5
-rw-r--r--src/core/lib/iomgr/socket_utils_windows.c4
-rw-r--r--src/core/lib/iomgr/tcp_client_uv.c17
-rw-r--r--src/core/lib/iomgr/tcp_uv.c10
-rw-r--r--src/core/lib/iomgr/timer_generic.c27
-rw-r--r--src/core/lib/iomgr/timer_generic.h2
-rw-r--r--src/core/lib/iomgr/timer_uv.c12
-rw-r--r--src/core/lib/iomgr/timer_uv.h2
-rw-r--r--src/core/lib/json/json.c3
-rw-r--r--src/core/lib/security/context/security_context.c13
-rw-r--r--src/core/lib/security/credentials/composite/composite_credentials.c12
-rw-r--r--src/core/lib/security/credentials/credentials.c3
-rw-r--r--src/core/lib/security/credentials/credentials_metadata.c3
-rw-r--r--src/core/lib/security/credentials/fake/fake_credentials.c6
-rw-r--r--src/core/lib/security/credentials/google_default/google_default_credentials.c2
-rw-r--r--src/core/lib/security/credentials/iam/iam_credentials.c3
-rw-r--r--src/core/lib/security/credentials/jwt/jwt_credentials.c3
-rw-r--r--src/core/lib/security/credentials/jwt/jwt_verifier.c9
-rw-r--r--src/core/lib/security/credentials/oauth2/oauth2_credentials.c6
-rw-r--r--src/core/lib/security/credentials/plugin/plugin_credentials.c6
-rw-r--r--src/core/lib/security/credentials/ssl/ssl_credentials.c6
-rw-r--r--src/core/lib/security/transport/client_auth_filter.c2
-rw-r--r--src/core/lib/security/transport/security_connector.c12
-rw-r--r--src/core/lib/security/transport/security_handshaker.c3
-rw-r--r--src/core/lib/security/transport/server_auth_filter.c2
-rw-r--r--src/core/lib/slice/slice_hash_table.c6
-rw-r--r--src/core/lib/slice/slice_intern.c6
-rw-r--r--src/core/lib/support/alloc.c27
-rw-r--r--src/core/lib/support/cmdline.c6
-rw-r--r--src/core/lib/support/histogram.c3
-rw-r--r--src/core/lib/support/spinlock.h52
-rw-r--r--src/core/lib/support/subprocess_posix.c3
-rw-r--r--src/core/lib/surface/call.c164
-rw-r--r--src/core/lib/surface/completion_queue.c2
-rw-r--r--src/core/lib/surface/lame_client.c2
-rw-r--r--src/core/lib/surface/server.c19
-rw-r--r--src/core/lib/transport/metadata.c6
-rw-r--r--src/core/lib/transport/transport.h1
-rw-r--r--src/core/lib/tsi/fake_transport_security.c6
-rw-r--r--src/core/lib/tsi/ssl_transport_security.c23
-rw-r--r--src/core/lib/tsi/test_creds/BUILD34
-rw-r--r--src/core/lib/tsi/transport_security.c6
-rw-r--r--src/core/plugin_registry/grpc_cronet_plugin_registry.c4
-rw-r--r--src/cpp/common/channel_filter.h4
-rw-r--r--src/cpp/server/health/default_health_check_service.cc4
-rw-r--r--src/cpp/server/server_cc.cc3
-rw-r--r--src/cpp/server/server_context.cc13
-rw-r--r--src/csharp/Grpc.IntegrationTesting/InteropClient.cs2
-rw-r--r--src/csharp/Grpc.IntegrationTesting/InteropServer.cs2
-rw-r--r--src/csharp/Grpc.IntegrationTesting/QpsWorker.cs2
-rw-r--r--src/csharp/Grpc.IntegrationTesting/StressTestClient.cs1
-rw-r--r--src/csharp/README.md29
-rw-r--r--src/node/ext/completion_queue_threadpool.cc43
-rw-r--r--src/node/ext/server.cc67
-rw-r--r--src/node/ext/server.h1
-rw-r--r--src/node/ext/server_generic.cc73
-rw-r--r--src/node/ext/server_uv.cc131
-rw-r--r--src/node/src/client.js9
-rw-r--r--src/node/src/server.js32
-rw-r--r--src/node/test/surface_test.js107
-rw-r--r--src/php/README.md24
-rw-r--r--src/php/composer.json2
-rw-r--r--src/proto/grpc/reflection/v1alpha/BUILD39
-rw-r--r--src/proto/grpc/testing/echo_messages.proto1
-rw-r--r--src/python/grpcio/grpc/_channel.py2
-rw-r--r--src/python/grpcio/grpc/beta/_connectivity_channel.py159
-rw-r--r--src/python/grpcio_tests/tests/http2/negative_http2_client.py2
-rw-r--r--src/python/grpcio_tests/tests/unit/_invocation_defects_test.py4
-rw-r--r--src/python/grpcio_tests/tests/unit/_metadata_test.py6
-rw-r--r--src/python/grpcio_tests/tests/unit/_thread_cleanup_test.py4
-rw-r--r--src/ruby/ext/grpc/extconf.rb1
-rw-r--r--src/ruby/ext/grpc/rb_grpc_imports.generated.c2
-rw-r--r--src/ruby/ext/grpc/rb_grpc_imports.generated.h3
119 files changed, 1909 insertions, 1348 deletions
diff --git a/src/core/ext/census/grpc_filter.c b/src/core/ext/census/grpc_filter.c
index 65cfe1fa90..b80d831557 100644
--- a/src/core/ext/census/grpc_filter.c
+++ b/src/core/ext/census/grpc_filter.c
@@ -127,7 +127,7 @@ static void server_start_transport_op(grpc_exec_ctx *exec_ctx,
static grpc_error *client_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args) {
+ const grpc_call_element_args *args) {
call_data *d = elem->call_data;
GPR_ASSERT(d != NULL);
memset(d, 0, sizeof(*d));
@@ -146,7 +146,7 @@ static void client_destroy_call_elem(grpc_exec_ctx *exec_ctx,
static grpc_error *server_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args) {
+ const grpc_call_element_args *args) {
call_data *d = elem->call_data;
GPR_ASSERT(d != NULL);
memset(d, 0, sizeof(*d));
diff --git a/src/core/ext/client_channel/client_channel.c b/src/core/ext/client_channel/client_channel.c
index 62fb061bcf..bf64f84772 100644
--- a/src/core/ext/client_channel/client_channel.c
+++ b/src/core/ext/client_channel/client_channel.c
@@ -76,24 +76,82 @@ typedef enum {
WAIT_FOR_READY_TRUE
} wait_for_ready_value;
-typedef struct method_parameters {
+typedef struct {
+ gpr_refcount refs;
gpr_timespec timeout;
wait_for_ready_value wait_for_ready;
} method_parameters;
+static method_parameters *method_parameters_ref(
+ method_parameters *method_params) {
+ gpr_ref(&method_params->refs);
+ return method_params;
+}
+
+static void method_parameters_unref(method_parameters *method_params) {
+ if (gpr_unref(&method_params->refs)) {
+ gpr_free(method_params);
+ }
+}
+
static void *method_parameters_copy(void *value) {
- void *new_value = gpr_malloc(sizeof(method_parameters));
- memcpy(new_value, value, sizeof(method_parameters));
- return new_value;
+ return method_parameters_ref(value);
}
-static void method_parameters_free(grpc_exec_ctx *exec_ctx, void *p) {
- gpr_free(p);
+static void method_parameters_free(grpc_exec_ctx *exec_ctx, void *value) {
+ method_parameters_unref(value);
}
static const grpc_slice_hash_table_vtable method_parameters_vtable = {
method_parameters_free, method_parameters_copy};
+static bool parse_wait_for_ready(grpc_json *field,
+ wait_for_ready_value *wait_for_ready) {
+ if (field->type != GRPC_JSON_TRUE && field->type != GRPC_JSON_FALSE) {
+ return false;
+ }
+ *wait_for_ready = field->type == GRPC_JSON_TRUE ? WAIT_FOR_READY_TRUE
+ : WAIT_FOR_READY_FALSE;
+ return true;
+}
+
+static bool parse_timeout(grpc_json *field, gpr_timespec *timeout) {
+ if (field->type != GRPC_JSON_STRING) return false;
+ size_t len = strlen(field->value);
+ if (field->value[len - 1] != 's') return false;
+ char *buf = gpr_strdup(field->value);
+ buf[len - 1] = '\0'; // Remove trailing 's'.
+ char *decimal_point = strchr(buf, '.');
+ if (decimal_point != NULL) {
+ *decimal_point = '\0';
+ timeout->tv_nsec = gpr_parse_nonnegative_int(decimal_point + 1);
+ if (timeout->tv_nsec == -1) {
+ gpr_free(buf);
+ return false;
+ }
+ // There should always be exactly 3, 6, or 9 fractional digits.
+ int multiplier = 1;
+ switch (strlen(decimal_point + 1)) {
+ case 9:
+ break;
+ case 6:
+ multiplier *= 1000;
+ break;
+ case 3:
+ multiplier *= 1000000;
+ break;
+ default: // Unsupported number of digits.
+ gpr_free(buf);
+ return false;
+ }
+ timeout->tv_nsec *= multiplier;
+ }
+ timeout->tv_sec = gpr_parse_nonnegative_int(buf);
+ gpr_free(buf);
+ if (timeout->tv_sec == -1) return false;
+ return true;
+}
+
static void *method_parameters_create_from_json(const grpc_json *json) {
wait_for_ready_value wait_for_ready = WAIT_FOR_READY_UNSET;
gpr_timespec timeout = {0, 0, GPR_TIMESPAN};
@@ -101,49 +159,14 @@ static void *method_parameters_create_from_json(const grpc_json *json) {
if (field->key == NULL) continue;
if (strcmp(field->key, "waitForReady") == 0) {
if (wait_for_ready != WAIT_FOR_READY_UNSET) return NULL; // Duplicate.
- if (field->type != GRPC_JSON_TRUE && field->type != GRPC_JSON_FALSE) {
- return NULL;
- }
- wait_for_ready = field->type == GRPC_JSON_TRUE ? WAIT_FOR_READY_TRUE
- : WAIT_FOR_READY_FALSE;
+ if (!parse_wait_for_ready(field, &wait_for_ready)) return NULL;
} else if (strcmp(field->key, "timeout") == 0) {
if (timeout.tv_sec > 0 || timeout.tv_nsec > 0) return NULL; // Duplicate.
- if (field->type != GRPC_JSON_STRING) return NULL;
- size_t len = strlen(field->value);
- if (field->value[len - 1] != 's') return NULL;
- char *buf = gpr_strdup(field->value);
- buf[len - 1] = '\0'; // Remove trailing 's'.
- char *decimal_point = strchr(buf, '.');
- if (decimal_point != NULL) {
- *decimal_point = '\0';
- timeout.tv_nsec = gpr_parse_nonnegative_int(decimal_point + 1);
- if (timeout.tv_nsec == -1) {
- gpr_free(buf);
- return NULL;
- }
- // There should always be exactly 3, 6, or 9 fractional digits.
- int multiplier = 1;
- switch (strlen(decimal_point + 1)) {
- case 9:
- break;
- case 6:
- multiplier *= 1000;
- break;
- case 3:
- multiplier *= 1000000;
- break;
- default: // Unsupported number of digits.
- gpr_free(buf);
- return NULL;
- }
- timeout.tv_nsec *= multiplier;
- }
- timeout.tv_sec = gpr_parse_nonnegative_int(buf);
- if (timeout.tv_sec == -1) return NULL;
- gpr_free(buf);
+ if (!parse_timeout(field, &timeout)) return NULL;
}
}
method_parameters *value = gpr_malloc(sizeof(method_parameters));
+ gpr_ref_init(&value->refs, 1);
value->timeout = timeout;
value->wait_for_ready = wait_for_ready;
return value;
@@ -183,7 +206,7 @@ typedef struct client_channel_channel_data {
grpc_pollset_set *interested_parties;
/* the following properties are guarded by a mutex since API's require them
- to be instantaniously available */
+ to be instantaneously available */
gpr_mu info_mu;
char *info_lb_policy_name;
/** service config in JSON form */
@@ -200,9 +223,9 @@ typedef struct {
grpc_lb_policy *lb_policy;
} lb_policy_connectivity_watcher;
-static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
- grpc_lb_policy *lb_policy,
- grpc_connectivity_state current_state);
+static void watch_lb_policy_locked(grpc_exec_ctx *exec_ctx, channel_data *chand,
+ grpc_lb_policy *lb_policy,
+ grpc_connectivity_state current_state);
static void set_channel_connectivity_state_locked(grpc_exec_ctx *exec_ctx,
channel_data *chand,
@@ -213,7 +236,7 @@ static void set_channel_connectivity_state_locked(grpc_exec_ctx *exec_ctx,
state == GRPC_CHANNEL_SHUTDOWN) &&
chand->lb_policy != NULL) {
/* cancel picks with wait_for_ready=false */
- grpc_lb_policy_cancel_picks(
+ grpc_lb_policy_cancel_picks_locked(
exec_ctx, chand->lb_policy,
/* mask= */ GRPC_INITIAL_METADATA_WAIT_FOR_READY,
/* check= */ 0, GRPC_ERROR_REF(error));
@@ -230,14 +253,14 @@ static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx,
if (w->lb_policy == w->chand->lb_policy) {
if (publish_state == GRPC_CHANNEL_SHUTDOWN && w->chand->resolver != NULL) {
publish_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
- grpc_resolver_channel_saw_error(exec_ctx, w->chand->resolver);
+ grpc_resolver_channel_saw_error_locked(exec_ctx, w->chand->resolver);
GRPC_LB_POLICY_UNREF(exec_ctx, w->chand->lb_policy, "channel");
w->chand->lb_policy = NULL;
}
set_channel_connectivity_state_locked(exec_ctx, w->chand, publish_state,
GRPC_ERROR_REF(error), "lb_changed");
if (w->state != GRPC_CHANNEL_SHUTDOWN) {
- watch_lb_policy(exec_ctx, w->chand, w->lb_policy, w->state);
+ watch_lb_policy_locked(exec_ctx, w->chand, w->lb_policy, w->state);
}
}
@@ -245,9 +268,9 @@ static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx,
gpr_free(w);
}
-static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
- grpc_lb_policy *lb_policy,
- grpc_connectivity_state current_state) {
+static void watch_lb_policy_locked(grpc_exec_ctx *exec_ctx, channel_data *chand,
+ grpc_lb_policy *lb_policy,
+ grpc_connectivity_state current_state) {
lb_policy_connectivity_watcher *w = gpr_malloc(sizeof(*w));
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy");
@@ -256,8 +279,8 @@ static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
grpc_combiner_scheduler(chand->combiner, false));
w->state = current_state;
w->lb_policy = lb_policy;
- grpc_lb_policy_notify_on_state_change(exec_ctx, lb_policy, &w->state,
- &w->on_changed);
+ grpc_lb_policy_notify_on_state_change_locked(exec_ctx, lb_policy, &w->state,
+ &w->on_changed);
}
static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
@@ -313,13 +336,14 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_args lb_policy_args;
lb_policy_args.args = chand->resolver_result;
lb_policy_args.client_channel_factory = chand->client_channel_factory;
+ lb_policy_args.combiner = chand->combiner;
lb_policy =
grpc_lb_policy_create(exec_ctx, lb_policy_name, &lb_policy_args);
if (lb_policy != NULL) {
GRPC_LB_POLICY_REF(lb_policy, "config_change");
GRPC_ERROR_UNREF(state_error);
- state =
- grpc_lb_policy_check_connectivity(exec_ctx, lb_policy, &state_error);
+ state = grpc_lb_policy_check_connectivity_locked(exec_ctx, lb_policy,
+ &state_error);
}
// Find service config.
channel_arg =
@@ -383,14 +407,15 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
set_channel_connectivity_state_locked(
exec_ctx, chand, state, GRPC_ERROR_REF(state_error), "new_lb+resolver");
if (lb_policy != NULL) {
- watch_lb_policy(exec_ctx, chand, lb_policy, state);
+ watch_lb_policy_locked(exec_ctx, chand, lb_policy, state);
}
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
- grpc_resolver_next(exec_ctx, chand->resolver, &chand->resolver_result,
- &chand->on_resolver_result_changed);
+ grpc_resolver_next_locked(exec_ctx, chand->resolver,
+ &chand->resolver_result,
+ &chand->on_resolver_result_changed);
} else {
if (chand->resolver != NULL) {
- grpc_resolver_shutdown(exec_ctx, chand->resolver);
+ grpc_resolver_shutdown_locked(exec_ctx, chand->resolver);
GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
chand->resolver = NULL;
}
@@ -403,7 +428,7 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
}
if (exit_idle) {
- grpc_lb_policy_exit_idle(exec_ctx, lb_policy);
+ grpc_lb_policy_exit_idle_locked(exec_ctx, lb_policy);
GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "exit_idle");
}
@@ -440,7 +465,7 @@ static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_closure_sched(exec_ctx, op->send_ping,
GRPC_ERROR_CREATE("Ping with no load balancing"));
} else {
- grpc_lb_policy_ping_one(exec_ctx, chand->lb_policy, op->send_ping);
+ grpc_lb_policy_ping_one_locked(exec_ctx, chand->lb_policy, op->send_ping);
op->bind_pollset = NULL;
}
op->send_ping = NULL;
@@ -451,7 +476,7 @@ static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
set_channel_connectivity_state_locked(
exec_ctx, chand, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(op->disconnect_with_error), "disconnect");
- grpc_resolver_shutdown(exec_ctx, chand->resolver);
+ grpc_resolver_shutdown_locked(exec_ctx, chand->resolver);
GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
chand->resolver = NULL;
if (!chand->started_resolving) {
@@ -518,7 +543,6 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_channel_element_args *args) {
channel_data *chand = elem->channel_data;
- memset(chand, 0, sizeof(*chand));
GPR_ASSERT(args->is_last);
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
// Initialize data members.
@@ -550,7 +574,7 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
chand->resolver = grpc_resolver_create(
exec_ctx, proxy_name != NULL ? proxy_name : arg->value.string,
new_args != NULL ? new_args : args->channel_args,
- chand->interested_parties);
+ chand->interested_parties, chand->combiner);
if (proxy_name != NULL) gpr_free(proxy_name);
if (new_args != NULL) grpc_channel_args_destroy(exec_ctx, new_args);
if (chand->resolver == NULL) {
@@ -559,13 +583,23 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_NONE;
}
+static void shutdown_resolver_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_resolver *resolver = arg;
+ grpc_resolver_shutdown_locked(exec_ctx, resolver);
+ GRPC_RESOLVER_UNREF(exec_ctx, resolver, "channel");
+}
+
/* Destructor for channel_data */
static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
channel_data *chand = elem->channel_data;
if (chand->resolver != NULL) {
- grpc_resolver_shutdown(exec_ctx, chand->resolver);
- GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
+ grpc_closure_sched(
+ exec_ctx,
+ grpc_closure_create(shutdown_resolver_locked, chand->resolver,
+ grpc_combiner_scheduler(chand->combiner, false)),
+ GRPC_ERROR_NONE);
}
if (chand->client_channel_factory != NULL) {
grpc_client_channel_factory_unref(exec_ctx, chand->client_channel_factory);
@@ -618,7 +652,7 @@ typedef struct client_channel_call_data {
grpc_slice path; // Request path.
gpr_timespec call_start_time;
gpr_timespec deadline;
- wait_for_ready_value wait_for_ready_from_service_config;
+ method_parameters *method_params;
grpc_closure read_service_config;
grpc_error *cancel_error;
@@ -797,8 +831,9 @@ static bool pick_subchannel_locked(
if (initial_metadata == NULL) {
if (chand->lb_policy != NULL) {
- grpc_lb_policy_cancel_pick(exec_ctx, chand->lb_policy,
- connected_subchannel, GRPC_ERROR_REF(error));
+ grpc_lb_policy_cancel_pick_locked(exec_ctx, chand->lb_policy,
+ connected_subchannel,
+ GRPC_ERROR_REF(error));
}
for (closure = chand->waiting_for_config_closures.head; closure != NULL;
closure = closure->next_data.next) {
@@ -825,10 +860,11 @@ static bool pick_subchannel_locked(
initial_metadata_flags &
GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET;
const bool wait_for_ready_set_from_service_config =
- calld->wait_for_ready_from_service_config != WAIT_FOR_READY_UNSET;
+ calld->method_params != NULL &&
+ calld->method_params->wait_for_ready != WAIT_FOR_READY_UNSET;
if (!wait_for_ready_set_from_api &&
wait_for_ready_set_from_service_config) {
- if (calld->wait_for_ready_from_service_config == WAIT_FOR_READY_TRUE) {
+ if (calld->method_params->wait_for_ready == WAIT_FOR_READY_TRUE) {
initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
} else {
initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
@@ -837,7 +873,7 @@ static bool pick_subchannel_locked(
const grpc_lb_policy_pick_args inputs = {
initial_metadata, initial_metadata_flags, &calld->lb_token_mdelem,
gpr_inf_future(GPR_CLOCK_MONOTONIC)};
- const bool result = grpc_lb_policy_pick(
+ const bool result = grpc_lb_policy_pick_locked(
exec_ctx, lb_policy, &inputs, connected_subchannel, NULL, on_ready);
GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "pick_subchannel");
GPR_TIMER_END("pick_subchannel", 0);
@@ -846,8 +882,9 @@ static bool pick_subchannel_locked(
if (chand->resolver != NULL && !chand->started_resolving) {
chand->started_resolving = true;
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
- grpc_resolver_next(exec_ctx, chand->resolver, &chand->resolver_result,
- &chand->on_resolver_result_changed);
+ grpc_resolver_next_locked(exec_ctx, chand->resolver,
+ &chand->resolver_result,
+ &chand->on_resolver_result_changed);
}
if (chand->resolver != NULL) {
cpa = gpr_malloc(sizeof(*cpa));
@@ -965,10 +1002,9 @@ static void start_transport_stream_op_locked_inner(grpc_exec_ctx *exec_ctx,
add_waiting_locked(calld, op);
}
-static void cc_start_transport_stream_op_locked(grpc_exec_ctx *exec_ctx,
- void *arg,
- grpc_error *error_ignored) {
- GPR_TIMER_BEGIN("cc_start_transport_stream_op_locked", 0);
+static void start_transport_stream_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error_ignored) {
+ GPR_TIMER_BEGIN("start_transport_stream_op_locked", 0);
grpc_transport_stream_op *op = arg;
grpc_call_element *elem = op->handler_private.args[0];
@@ -978,7 +1014,7 @@ static void cc_start_transport_stream_op_locked(grpc_exec_ctx *exec_ctx,
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call,
"start_transport_stream_op");
- GPR_TIMER_END("cc_start_transport_stream_op_locked", 0);
+ GPR_TIMER_END("start_transport_stream_op_locked", 0);
}
/* The logic here is fairly complicated, due to (a) the fact that we
@@ -1018,52 +1054,53 @@ static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_closure_sched(
exec_ctx,
grpc_closure_init(&op->handler_private.closure,
- cc_start_transport_stream_op_locked, op,
+ start_transport_stream_op_locked, op,
grpc_combiner_scheduler(chand->combiner, false)),
GRPC_ERROR_NONE);
GPR_TIMER_END("cc_start_transport_stream_op", 0);
}
+// Sets calld->method_params.
+// If the method params specify a timeout, populates
+// *per_method_deadline and returns true.
+static bool set_call_method_params_from_service_config_locked(
+ grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ gpr_timespec *per_method_deadline) {
+ channel_data *chand = elem->channel_data;
+ call_data *calld = elem->call_data;
+ if (chand->method_params_table != NULL) {
+ calld->method_params = grpc_method_config_table_get(
+ exec_ctx, chand->method_params_table, calld->path);
+ if (calld->method_params != NULL) {
+ method_parameters_ref(calld->method_params);
+ if (gpr_time_cmp(calld->method_params->timeout,
+ gpr_time_0(GPR_TIMESPAN)) != 0) {
+ *per_method_deadline =
+ gpr_time_add(calld->call_start_time, calld->method_params->timeout);
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
// Gets data from the service config. Invoked when the resolver returns
// its initial result.
static void read_service_config_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_call_element *elem = arg;
- channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
// If this is an error, there's no point in looking at the service config.
if (error == GRPC_ERROR_NONE) {
- // Get the method config table from channel data.
- grpc_slice_hash_table *method_params_table = NULL;
- if (chand->method_params_table != NULL) {
- method_params_table =
- grpc_slice_hash_table_ref(chand->method_params_table);
- }
- // If the method config table was present, use it.
- if (method_params_table != NULL) {
- const method_parameters *method_params = grpc_method_config_table_get(
- exec_ctx, method_params_table, calld->path);
- if (method_params != NULL) {
- const bool have_method_timeout =
- gpr_time_cmp(method_params->timeout, gpr_time_0(GPR_TIMESPAN)) != 0;
- if (have_method_timeout ||
- method_params->wait_for_ready != WAIT_FOR_READY_UNSET) {
- if (have_method_timeout) {
- const gpr_timespec per_method_deadline =
- gpr_time_add(calld->call_start_time, method_params->timeout);
- if (gpr_time_cmp(per_method_deadline, calld->deadline) < 0) {
- calld->deadline = per_method_deadline;
- // Reset deadline timer.
- grpc_deadline_state_reset(exec_ctx, elem, calld->deadline);
- }
- }
- if (method_params->wait_for_ready != WAIT_FOR_READY_UNSET) {
- calld->wait_for_ready_from_service_config =
- method_params->wait_for_ready;
- }
- }
+ gpr_timespec per_method_deadline;
+ if (set_call_method_params_from_service_config_locked(
+ exec_ctx, elem, &per_method_deadline)) {
+ // If the deadline from the service config is shorter than the one
+ // from the client API, reset the deadline timer.
+ if (gpr_time_cmp(per_method_deadline, calld->deadline) < 0) {
+ calld->deadline = per_method_deadline;
+ grpc_deadline_state_reset(exec_ctx, elem, calld->deadline);
}
- grpc_slice_hash_table_unref(exec_ctx, method_params_table);
}
}
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "read_service_config");
@@ -1078,29 +1115,12 @@ static void initial_read_service_config_locked(grpc_exec_ctx *exec_ctx,
// If the resolver has already returned results, then we can access
// the service config parameters immediately. Otherwise, we need to
// defer that work until the resolver returns an initial result.
- // TODO(roth): This code is almost but not quite identical to the code
- // in read_service_config() above. It would be nice to find a way to
- // combine them, to avoid having to maintain it twice.
if (chand->lb_policy != NULL) {
// We already have a resolver result, so check for service config.
- if (chand->method_params_table != NULL) {
- grpc_slice_hash_table *method_params_table =
- grpc_slice_hash_table_ref(chand->method_params_table);
- method_parameters *method_params = grpc_method_config_table_get(
- exec_ctx, method_params_table, calld->path);
- if (method_params != NULL) {
- if (gpr_time_cmp(method_params->timeout,
- gpr_time_0(GPR_CLOCK_MONOTONIC)) != 0) {
- gpr_timespec per_method_deadline =
- gpr_time_add(calld->call_start_time, method_params->timeout);
- calld->deadline = gpr_time_min(calld->deadline, per_method_deadline);
- }
- if (method_params->wait_for_ready != WAIT_FOR_READY_UNSET) {
- calld->wait_for_ready_from_service_config =
- method_params->wait_for_ready;
- }
- }
- grpc_slice_hash_table_unref(exec_ctx, method_params_table);
+ gpr_timespec per_method_deadline;
+ if (set_call_method_params_from_service_config_locked(
+ exec_ctx, elem, &per_method_deadline)) {
+ calld->deadline = gpr_time_min(calld->deadline, per_method_deadline);
}
} else {
// We don't yet have a resolver result, so register a callback to
@@ -1123,7 +1143,7 @@ static void initial_read_service_config_locked(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args) {
+ const grpc_call_element_args *args) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
// Initialize data members.
@@ -1131,7 +1151,7 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
calld->path = grpc_slice_ref_internal(args->path);
calld->call_start_time = args->start_time;
calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
- calld->wait_for_ready_from_service_config = WAIT_FOR_READY_UNSET;
+ calld->method_params = NULL;
calld->cancel_error = GRPC_ERROR_NONE;
gpr_atm_rel_store(&calld->subchannel_call, 0);
calld->connected_subchannel = NULL;
@@ -1159,6 +1179,9 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
call_data *calld = elem->call_data;
grpc_deadline_state_destroy(exec_ctx, elem);
grpc_slice_unref_internal(exec_ctx, calld->path);
+ if (calld->method_params != NULL) {
+ method_parameters_unref(calld->method_params);
+ }
GRPC_ERROR_UNREF(calld->cancel_error);
grpc_subchannel_call *call = GET_CALL(calld);
if (call != NULL && call != CANCELLED_CALL) {
@@ -1204,14 +1227,15 @@ static void try_to_connect_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error_ignored) {
channel_data *chand = arg;
if (chand->lb_policy != NULL) {
- grpc_lb_policy_exit_idle(exec_ctx, chand->lb_policy);
+ grpc_lb_policy_exit_idle_locked(exec_ctx, chand->lb_policy);
} else {
chand->exit_idle_when_lb_policy_arrives = true;
if (!chand->started_resolving && chand->resolver != NULL) {
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
chand->started_resolving = true;
- grpc_resolver_next(exec_ctx, chand->resolver, &chand->resolver_result,
- &chand->on_resolver_result_changed);
+ grpc_resolver_next_locked(exec_ctx, chand->resolver,
+ &chand->resolver_result,
+ &chand->on_resolver_result_changed);
}
}
GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "try_to_connect");
diff --git a/src/core/ext/client_channel/lb_policy.c b/src/core/ext/client_channel/lb_policy.c
index 90401b586f..aba51add53 100644
--- a/src/core/ext/client_channel/lb_policy.c
+++ b/src/core/ext/client_channel/lb_policy.c
@@ -32,14 +32,17 @@
*/
#include "src/core/ext/client_channel/lb_policy.h"
+#include "src/core/lib/iomgr/combiner.h"
#define WEAK_REF_BITS 16
void grpc_lb_policy_init(grpc_lb_policy *policy,
- const grpc_lb_policy_vtable *vtable) {
+ const grpc_lb_policy_vtable *vtable,
+ grpc_combiner *combiner) {
policy->vtable = vtable;
gpr_atm_no_barrier_store(&policy->ref_pair, 1 << WEAK_REF_BITS);
policy->interested_parties = grpc_pollset_set_create();
+ policy->combiner = GRPC_COMBINER_REF(combiner, "lb_policy");
}
#ifdef GRPC_LB_POLICY_REFCOUNT_DEBUG
@@ -71,6 +74,13 @@ void grpc_lb_policy_ref(grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
ref_mutate(policy, 1 << WEAK_REF_BITS, 0 REF_MUTATE_PASS_ARGS("STRONG_REF"));
}
+static void shutdown_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_lb_policy *policy = arg;
+ policy->vtable->shutdown_locked(exec_ctx, policy);
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, policy, "strong-unref");
+}
+
void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
gpr_atm old_val =
@@ -79,10 +89,15 @@ void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx,
gpr_atm mask = ~(gpr_atm)((1 << WEAK_REF_BITS) - 1);
gpr_atm check = 1 << WEAK_REF_BITS;
if ((old_val & mask) == check) {
- policy->vtable->shutdown(exec_ctx, policy);
+ grpc_closure_sched(
+ exec_ctx,
+ grpc_closure_create(shutdown_locked, policy,
+ grpc_combiner_scheduler(policy->combiner, false)),
+ GRPC_ERROR_NONE);
+ } else {
+ grpc_lb_policy_weak_unref(exec_ctx,
+ policy REF_FUNC_PASS_ARGS("strong-unref"));
}
- grpc_lb_policy_weak_unref(exec_ctx,
- policy REF_FUNC_PASS_ARGS("strong-unref"));
}
void grpc_lb_policy_weak_ref(grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
@@ -95,52 +110,58 @@ void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx,
ref_mutate(policy, -(gpr_atm)1, 1 REF_MUTATE_PASS_ARGS("WEAK_UNREF"));
if (old_val == 1) {
grpc_pollset_set_destroy(exec_ctx, policy->interested_parties);
+ grpc_combiner *combiner = policy->combiner;
policy->vtable->destroy(exec_ctx, policy);
+ GRPC_COMBINER_UNREF(exec_ctx, combiner, "lb_policy");
}
}
-int grpc_lb_policy_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- const grpc_lb_policy_pick_args *pick_args,
- grpc_connected_subchannel **target, void **user_data,
- grpc_closure *on_complete) {
- return policy->vtable->pick(exec_ctx, policy, pick_args, target, user_data,
- on_complete);
+int grpc_lb_policy_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ const grpc_lb_policy_pick_args *pick_args,
+ grpc_connected_subchannel **target,
+ void **user_data, grpc_closure *on_complete) {
+ return policy->vtable->pick_locked(exec_ctx, policy, pick_args, target,
+ user_data, on_complete);
}
-void grpc_lb_policy_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_connected_subchannel **target,
- grpc_error *error) {
- policy->vtable->cancel_pick(exec_ctx, policy, target, error);
+void grpc_lb_policy_cancel_pick_locked(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *policy,
+ grpc_connected_subchannel **target,
+ grpc_error *error) {
+ policy->vtable->cancel_pick_locked(exec_ctx, policy, target, error);
}
-void grpc_lb_policy_cancel_picks(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy,
- uint32_t initial_metadata_flags_mask,
- uint32_t initial_metadata_flags_eq,
- grpc_error *error) {
- policy->vtable->cancel_picks(exec_ctx, policy, initial_metadata_flags_mask,
- initial_metadata_flags_eq, error);
+void grpc_lb_policy_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *policy,
+ uint32_t initial_metadata_flags_mask,
+ uint32_t initial_metadata_flags_eq,
+ grpc_error *error) {
+ policy->vtable->cancel_picks_locked(exec_ctx, policy,
+ initial_metadata_flags_mask,
+ initial_metadata_flags_eq, error);
}
-void grpc_lb_policy_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy) {
- policy->vtable->exit_idle(exec_ctx, policy);
+void grpc_lb_policy_exit_idle_locked(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *policy) {
+ policy->vtable->exit_idle_locked(exec_ctx, policy);
}
-void grpc_lb_policy_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_closure *closure) {
- policy->vtable->ping_one(exec_ctx, policy, closure);
+void grpc_lb_policy_ping_one_locked(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *policy,
+ grpc_closure *closure) {
+ policy->vtable->ping_one_locked(exec_ctx, policy, closure);
}
-void grpc_lb_policy_notify_on_state_change(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy,
- grpc_connectivity_state *state,
- grpc_closure *closure) {
- policy->vtable->notify_on_state_change(exec_ctx, policy, state, closure);
+void grpc_lb_policy_notify_on_state_change_locked(
+ grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ grpc_connectivity_state *state, grpc_closure *closure) {
+ policy->vtable->notify_on_state_change_locked(exec_ctx, policy, state,
+ closure);
}
-grpc_connectivity_state grpc_lb_policy_check_connectivity(
+grpc_connectivity_state grpc_lb_policy_check_connectivity_locked(
grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_error **connectivity_error) {
- return policy->vtable->check_connectivity(exec_ctx, policy,
- connectivity_error);
+ return policy->vtable->check_connectivity_locked(exec_ctx, policy,
+ connectivity_error);
}
diff --git a/src/core/ext/client_channel/lb_policy.h b/src/core/ext/client_channel/lb_policy.h
index 120c641edc..3405709c2c 100644
--- a/src/core/ext/client_channel/lb_policy.h
+++ b/src/core/ext/client_channel/lb_policy.h
@@ -51,6 +51,8 @@ struct grpc_lb_policy {
gpr_atm ref_pair;
/* owned pointer to interested parties in load balancing decisions */
grpc_pollset_set *interested_parties;
+ /* combiner under which lb_policy actions take place */
+ grpc_combiner *combiner;
};
/** Extra arguments for an LB pick */
@@ -69,42 +71,44 @@ typedef struct grpc_lb_policy_pick_args {
struct grpc_lb_policy_vtable {
void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
- void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
+ void (*shutdown_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
/** \see grpc_lb_policy_pick */
- int (*pick)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- const grpc_lb_policy_pick_args *pick_args,
- grpc_connected_subchannel **target, void **user_data,
- grpc_closure *on_complete);
+ int (*pick_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ const grpc_lb_policy_pick_args *pick_args,
+ grpc_connected_subchannel **target, void **user_data,
+ grpc_closure *on_complete);
/** \see grpc_lb_policy_cancel_pick */
- void (*cancel_pick)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_connected_subchannel **target, grpc_error *error);
+ void (*cancel_pick_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ grpc_connected_subchannel **target,
+ grpc_error *error);
/** \see grpc_lb_policy_cancel_picks */
- void (*cancel_picks)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- uint32_t initial_metadata_flags_mask,
- uint32_t initial_metadata_flags_eq, grpc_error *error);
+ void (*cancel_picks_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ uint32_t initial_metadata_flags_mask,
+ uint32_t initial_metadata_flags_eq,
+ grpc_error *error);
/** \see grpc_lb_policy_ping_one */
- void (*ping_one)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_closure *closure);
+ void (*ping_one_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ grpc_closure *closure);
/** Try to enter a READY connectivity state */
- void (*exit_idle)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
+ void (*exit_idle_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
/** check the current connectivity of the lb_policy */
- grpc_connectivity_state (*check_connectivity)(
+ grpc_connectivity_state (*check_connectivity_locked)(
grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_error **connectivity_error);
/** call notify when the connectivity state of a channel changes from *state.
Updates *state with the new state of the policy. Calling with a NULL \a
state cancels the subscription. */
- void (*notify_on_state_change)(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy,
- grpc_connectivity_state *state,
- grpc_closure *closure);
+ void (*notify_on_state_change_locked)(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *policy,
+ grpc_connectivity_state *state,
+ grpc_closure *closure);
};
/*#define GRPC_LB_POLICY_REFCOUNT_DEBUG*/
@@ -144,7 +148,8 @@ void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
/** called by concrete implementations to initialize the base struct */
void grpc_lb_policy_init(grpc_lb_policy *policy,
- const grpc_lb_policy_vtable *vtable);
+ const grpc_lb_policy_vtable *vtable,
+ grpc_combiner *combiner);
/** Finds an appropriate subchannel for a call, based on \a pick_args.
@@ -159,43 +164,45 @@ void grpc_lb_policy_init(grpc_lb_policy *policy,
Any IO should be done under the \a interested_parties \a grpc_pollset_set
in the \a grpc_lb_policy struct. */
-int grpc_lb_policy_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- const grpc_lb_policy_pick_args *pick_args,
- grpc_connected_subchannel **target, void **user_data,
- grpc_closure *on_complete);
+int grpc_lb_policy_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ const grpc_lb_policy_pick_args *pick_args,
+ grpc_connected_subchannel **target,
+ void **user_data, grpc_closure *on_complete);
/** Perform a connected subchannel ping (see \a grpc_connected_subchannel_ping)
against one of the connected subchannels managed by \a policy. */
-void grpc_lb_policy_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_closure *closure);
+void grpc_lb_policy_ping_one_locked(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *policy,
+ grpc_closure *closure);
/** Cancel picks for \a target.
The \a on_complete callback of the pending picks will be invoked with \a
*target set to NULL. */
-void grpc_lb_policy_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_connected_subchannel **target,
- grpc_error *error);
+void grpc_lb_policy_cancel_pick_locked(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *policy,
+ grpc_connected_subchannel **target,
+ grpc_error *error);
/** Cancel all pending picks for which their \a initial_metadata_flags (as given
in the call to \a grpc_lb_policy_pick) matches \a initial_metadata_flags_eq
when AND'd with \a initial_metadata_flags_mask */
-void grpc_lb_policy_cancel_picks(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy,
- uint32_t initial_metadata_flags_mask,
- uint32_t initial_metadata_flags_eq,
- grpc_error *error);
+void grpc_lb_policy_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *policy,
+ uint32_t initial_metadata_flags_mask,
+ uint32_t initial_metadata_flags_eq,
+ grpc_error *error);
/** Try to enter a READY connectivity state */
-void grpc_lb_policy_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
+void grpc_lb_policy_exit_idle_locked(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *policy);
/* Call notify when the connectivity state of a channel changes from \a *state.
* Updates \a *state with the new state of the policy */
-void grpc_lb_policy_notify_on_state_change(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy,
- grpc_connectivity_state *state,
- grpc_closure *closure);
+void grpc_lb_policy_notify_on_state_change_locked(
+ grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ grpc_connectivity_state *state, grpc_closure *closure);
-grpc_connectivity_state grpc_lb_policy_check_connectivity(
+grpc_connectivity_state grpc_lb_policy_check_connectivity_locked(
grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_error **connectivity_error);
diff --git a/src/core/ext/client_channel/lb_policy_factory.h b/src/core/ext/client_channel/lb_policy_factory.h
index 9b8b03f982..27c12c0d73 100644
--- a/src/core/ext/client_channel/lb_policy_factory.h
+++ b/src/core/ext/client_channel/lb_policy_factory.h
@@ -107,6 +107,7 @@ grpc_arg grpc_lb_addresses_create_channel_arg(
typedef struct grpc_lb_policy_args {
grpc_client_channel_factory *client_channel_factory;
grpc_channel_args *args;
+ grpc_combiner *combiner;
} grpc_lb_policy_args;
struct grpc_lb_policy_factory_vtable {
diff --git a/src/core/ext/client_channel/parse_address.c b/src/core/ext/client_channel/parse_address.c
index b1d55ad0f5..8e4da03de0 100644
--- a/src/core/ext/client_channel/parse_address.c
+++ b/src/core/ext/client_channel/parse_address.c
@@ -49,11 +49,12 @@
int parse_unix(grpc_uri *uri, grpc_resolved_address *resolved_addr) {
struct sockaddr_un *un = (struct sockaddr_un *)resolved_addr->addr;
-
+ const size_t maxlen = sizeof(un->sun_path);
+ const size_t path_len = strnlen(uri->path, maxlen);
+ if (path_len == maxlen) return 0;
un->sun_family = AF_UNIX;
strcpy(un->sun_path, uri->path);
- resolved_addr->len = strlen(un->sun_path) + sizeof(un->sun_family) + 1;
-
+ resolved_addr->len = sizeof(*un);
return 1;
}
diff --git a/src/core/ext/client_channel/resolver.c b/src/core/ext/client_channel/resolver.c
index 2ae4fe862e..b1a1faa6c9 100644
--- a/src/core/ext/client_channel/resolver.c
+++ b/src/core/ext/client_channel/resolver.c
@@ -32,10 +32,13 @@
*/
#include "src/core/ext/client_channel/resolver.h"
+#include "src/core/lib/iomgr/combiner.h"
void grpc_resolver_init(grpc_resolver *resolver,
- const grpc_resolver_vtable *vtable) {
+ const grpc_resolver_vtable *vtable,
+ grpc_combiner *combiner) {
resolver->vtable = vtable;
+ resolver->combiner = GRPC_COMBINER_REF(combiner, "resolver");
gpr_ref_init(&resolver->refs, 1);
}
@@ -62,20 +65,24 @@ void grpc_resolver_unref(grpc_resolver *resolver,
void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) {
#endif
if (gpr_unref(&resolver->refs)) {
+ grpc_combiner *combiner = resolver->combiner;
resolver->vtable->destroy(exec_ctx, resolver);
+ GRPC_COMBINER_UNREF(exec_ctx, combiner, "resolver");
}
}
-void grpc_resolver_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) {
- resolver->vtable->shutdown(exec_ctx, resolver);
+void grpc_resolver_shutdown_locked(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver) {
+ resolver->vtable->shutdown_locked(exec_ctx, resolver);
}
-void grpc_resolver_channel_saw_error(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver) {
- resolver->vtable->channel_saw_error(exec_ctx, resolver);
+void grpc_resolver_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver) {
+ resolver->vtable->channel_saw_error_locked(exec_ctx, resolver);
}
-void grpc_resolver_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
- grpc_channel_args **result, grpc_closure *on_complete) {
- resolver->vtable->next(exec_ctx, resolver, result, on_complete);
+void grpc_resolver_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
+ grpc_channel_args **result,
+ grpc_closure *on_complete) {
+ resolver->vtable->next_locked(exec_ctx, resolver, result, on_complete);
}
diff --git a/src/core/ext/client_channel/resolver.h b/src/core/ext/client_channel/resolver.h
index 96ece92b9d..bbba424ca5 100644
--- a/src/core/ext/client_channel/resolver.h
+++ b/src/core/ext/client_channel/resolver.h
@@ -44,14 +44,16 @@ typedef struct grpc_resolver_vtable grpc_resolver_vtable;
struct grpc_resolver {
const grpc_resolver_vtable *vtable;
gpr_refcount refs;
+ grpc_combiner *combiner;
};
struct grpc_resolver_vtable {
void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
- void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
- void (*channel_saw_error)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
- void (*next)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
- grpc_channel_args **result, grpc_closure *on_complete);
+ void (*shutdown_locked)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
+ void (*channel_saw_error_locked)(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver);
+ void (*next_locked)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
+ grpc_channel_args **result, grpc_closure *on_complete);
};
#ifdef GRPC_RESOLVER_REFCOUNT_DEBUG
@@ -70,21 +72,30 @@ void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *policy);
#endif
void grpc_resolver_init(grpc_resolver *resolver,
- const grpc_resolver_vtable *vtable);
+ const grpc_resolver_vtable *vtable,
+ grpc_combiner *combiner);
-void grpc_resolver_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
+void grpc_resolver_shutdown_locked(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver);
/** Notification that the channel has seen an error on some address.
- Can be used as a hint that re-resolution is desirable soon. */
-void grpc_resolver_channel_saw_error(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver);
+ Can be used as a hint that re-resolution is desirable soon.
+
+ Must be called from the combiner passed as a resolver_arg at construction
+ time.*/
+void grpc_resolver_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver);
/** Get the next result from the resolver. Expected to set \a *result with
new channel args and then schedule \a on_complete for execution.
If resolution is fatally broken, set \a *result to NULL and
- schedule \a on_complete. */
-void grpc_resolver_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
- grpc_channel_args **result, grpc_closure *on_complete);
+ schedule \a on_complete.
+
+ Must be called from the combiner passed as a resolver_arg at construction
+ time.*/
+void grpc_resolver_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
+ grpc_channel_args **result,
+ grpc_closure *on_complete);
#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_RESOLVER_H */
diff --git a/src/core/ext/client_channel/resolver_factory.h b/src/core/ext/client_channel/resolver_factory.h
index 3792ddca18..e3cd99ec5a 100644
--- a/src/core/ext/client_channel/resolver_factory.h
+++ b/src/core/ext/client_channel/resolver_factory.h
@@ -50,6 +50,7 @@ typedef struct grpc_resolver_args {
grpc_uri *uri;
const grpc_channel_args *args;
grpc_pollset_set *pollset_set;
+ grpc_combiner *combiner;
} grpc_resolver_args;
struct grpc_resolver_factory_vtable {
diff --git a/src/core/ext/client_channel/resolver_registry.c b/src/core/ext/client_channel/resolver_registry.c
index 5110a7cad9..f8e8bc9c39 100644
--- a/src/core/ext/client_channel/resolver_registry.c
+++ b/src/core/ext/client_channel/resolver_registry.c
@@ -133,7 +133,8 @@ static grpc_resolver_factory *resolve_factory(const char *target,
grpc_resolver *grpc_resolver_create(grpc_exec_ctx *exec_ctx, const char *target,
const grpc_channel_args *args,
- grpc_pollset_set *pollset_set) {
+ grpc_pollset_set *pollset_set,
+ grpc_combiner *combiner) {
grpc_uri *uri = NULL;
char *canonical_target = NULL;
grpc_resolver_factory *factory =
@@ -144,6 +145,7 @@ grpc_resolver *grpc_resolver_create(grpc_exec_ctx *exec_ctx, const char *target,
resolver_args.uri = uri;
resolver_args.args = args;
resolver_args.pollset_set = pollset_set;
+ resolver_args.combiner = combiner;
resolver =
grpc_resolver_factory_create_resolver(exec_ctx, factory, &resolver_args);
grpc_uri_destroy(uri);
diff --git a/src/core/ext/client_channel/resolver_registry.h b/src/core/ext/client_channel/resolver_registry.h
index a4606463eb..e2c189cf0c 100644
--- a/src/core/ext/client_channel/resolver_registry.h
+++ b/src/core/ext/client_channel/resolver_registry.h
@@ -65,7 +65,8 @@ void grpc_register_resolver_type(grpc_resolver_factory *factory);
should not be NULL. */
grpc_resolver *grpc_resolver_create(grpc_exec_ctx *exec_ctx, const char *target,
const grpc_channel_args *args,
- grpc_pollset_set *pollset_set);
+ grpc_pollset_set *pollset_set,
+ grpc_combiner *combiner);
/** Find a resolver factory given a name and return an (owned-by-the-caller)
* reference to it */
diff --git a/src/core/ext/client_channel/subchannel.c b/src/core/ext/client_channel/subchannel.c
index 09c68a91dd..cb2d2c821d 100644
--- a/src/core/ext/client_channel/subchannel.c
+++ b/src/core/ext/client_channel/subchannel.c
@@ -316,8 +316,7 @@ grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
return c;
}
- c = gpr_malloc(sizeof(*c));
- memset(c, 0, sizeof(*c));
+ c = gpr_zalloc(sizeof(*c));
c->key = key;
gpr_atm_no_barrier_store(&c->ref_pair, 1 << INTERNAL_REF_BITS);
c->connector = connector;
@@ -438,7 +437,7 @@ static void on_external_state_watcher_done(grpc_exec_ctx *exec_ctx, void *arg,
gpr_mu_unlock(&w->subchannel->mu);
GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, w->subchannel, "external_state_watcher");
gpr_free(w);
- follow_up->cb(exec_ctx, follow_up->cb_arg, error);
+ grpc_closure_run(exec_ctx, follow_up, GRPC_ERROR_REF(error));
}
static void on_alarm(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
@@ -765,7 +764,7 @@ grpc_error *grpc_connected_subchannel_create_call(
grpc_polling_entity *pollent, grpc_slice path, gpr_timespec start_time,
gpr_timespec deadline, grpc_subchannel_call **call) {
grpc_channel_stack *chanstk = CHANNEL_STACK_FROM_CONNECTION(con);
- *call = gpr_malloc(sizeof(grpc_subchannel_call) + chanstk->call_stack_size);
+ *call = gpr_zalloc(sizeof(grpc_subchannel_call) + chanstk->call_stack_size);
grpc_call_stack *callstk = SUBCHANNEL_CALL_TO_CALL_STACK(*call);
(*call)->connection = con; // Ref is added below.
grpc_error *error =
diff --git a/src/core/ext/client_channel/uri_parser.c b/src/core/ext/client_channel/uri_parser.c
index f8c946b275..7dd7b753cc 100644
--- a/src/core/ext/client_channel/uri_parser.c
+++ b/src/core/ext/client_channel/uri_parser.c
@@ -262,8 +262,7 @@ grpc_uri *grpc_uri_parse(const char *uri_text, int suppress_errors) {
fragment_end = i;
}
- uri = gpr_malloc(sizeof(*uri));
- memset(uri, 0, sizeof(*uri));
+ uri = gpr_zalloc(sizeof(*uri));
uri->scheme = copy_component(uri_text, scheme_begin, scheme_end);
uri->authority = copy_component(uri_text, authority_begin, authority_end);
uri->path = copy_component(uri_text, path_begin, path_end);
diff --git a/src/core/ext/lb_policy/grpclb/grpclb.c b/src/core/ext/lb_policy/grpclb/grpclb.c
index 8a2af48328..aea0fcc33d 100644
--- a/src/core/ext/lb_policy/grpclb/grpclb.c
+++ b/src/core/ext/lb_policy/grpclb/grpclb.c
@@ -115,6 +115,7 @@
#include "src/core/ext/lb_policy/grpclb/grpclb_channel.h"
#include "src/core/ext/lb_policy/grpclb/load_balancer_api.h"
#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/iomgr/timer.h"
@@ -237,9 +238,7 @@ static void add_pending_pick(pending_pick **root,
const grpc_lb_policy_pick_args *pick_args,
grpc_connected_subchannel **target,
grpc_closure *on_complete) {
- pending_pick *pp = gpr_malloc(sizeof(*pp));
- memset(pp, 0, sizeof(pending_pick));
- memset(&pp->wrapped_on_complete_arg, 0, sizeof(wrapped_rr_closure_arg));
+ pending_pick *pp = gpr_zalloc(sizeof(*pp));
pp->next = *root;
pp->pick_args = *pick_args;
pp->target = target;
@@ -264,9 +263,7 @@ typedef struct pending_ping {
} pending_ping;
static void add_pending_ping(pending_ping **root, grpc_closure *notify) {
- pending_ping *pping = gpr_malloc(sizeof(*pping));
- memset(pping, 0, sizeof(pending_ping));
- memset(&pping->wrapped_notify_arg, 0, sizeof(wrapped_rr_closure_arg));
+ pending_ping *pping = gpr_zalloc(sizeof(*pping));
pping->wrapped_notify_arg.wrapped_closure = notify;
pping->wrapped_notify_arg.free_when_done = pping;
pping->next = *root;
@@ -285,9 +282,6 @@ typedef struct glb_lb_policy {
/** base policy: must be first */
grpc_lb_policy base;
- /** mutex protecting remaining members */
- gpr_mu mu;
-
/** who the client is trying to communicate with */
const char *server_name;
grpc_client_channel_factory *cc_factory;
@@ -557,9 +551,9 @@ static bool pick_from_internal_rr_locked(
const grpc_lb_policy_pick_args *pick_args,
grpc_connected_subchannel **target, wrapped_rr_closure_arg *wc_arg) {
GPR_ASSERT(rr_policy != NULL);
- const bool pick_done =
- grpc_lb_policy_pick(exec_ctx, rr_policy, pick_args, target,
- (void **)&wc_arg->lb_token, &wc_arg->wrapper_closure);
+ const bool pick_done = grpc_lb_policy_pick_locked(
+ exec_ctx, rr_policy, pick_args, target, (void **)&wc_arg->lb_token,
+ &wc_arg->wrapper_closure);
if (pick_done) {
/* synchronous grpc_lb_policy_pick call. Unref the RR policy. */
if (grpc_lb_glb_trace) {
@@ -590,6 +584,7 @@ static grpc_lb_policy *create_rr_locked(
grpc_lb_policy_args args;
memset(&args, 0, sizeof(args));
args.client_channel_factory = glb_policy->cc_factory;
+ args.combiner = glb_policy->base.combiner;
grpc_lb_addresses *addresses =
process_serverlist_locked(exec_ctx, serverlist);
@@ -608,8 +603,8 @@ static grpc_lb_policy *create_rr_locked(
return rr;
}
-static void glb_rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
+static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx,
+ void *arg, grpc_error *error);
/* glb_policy->rr_policy may be NULL (initial handover) */
static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
@@ -633,8 +628,8 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
grpc_error *new_rr_state_error = NULL;
const grpc_connectivity_state new_rr_state =
- grpc_lb_policy_check_connectivity(exec_ctx, new_rr_policy,
- &new_rr_state_error);
+ grpc_lb_policy_check_connectivity_locked(exec_ctx, new_rr_policy,
+ &new_rr_state_error);
/* Connectivity state is a function of the new RR policy just created */
const bool replace_old_rr = update_lb_connectivity_status_locked(
exec_ctx, glb_policy, new_rr_state, new_rr_state_error);
@@ -675,19 +670,19 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
/* Allocate the data for the tracking of the new RR policy's connectivity.
* It'll be deallocated in glb_rr_connectivity_changed() */
rr_connectivity_data *rr_connectivity =
- gpr_malloc(sizeof(rr_connectivity_data));
- memset(rr_connectivity, 0, sizeof(rr_connectivity_data));
- grpc_closure_init(&rr_connectivity->on_change, glb_rr_connectivity_changed,
- rr_connectivity, grpc_schedule_on_exec_ctx);
+ gpr_zalloc(sizeof(rr_connectivity_data));
+ grpc_closure_init(&rr_connectivity->on_change,
+ glb_rr_connectivity_changed_locked, rr_connectivity,
+ grpc_combiner_scheduler(glb_policy->base.combiner, false));
rr_connectivity->glb_policy = glb_policy;
rr_connectivity->state = new_rr_state;
/* Subscribe to changes to the connectivity of the new RR */
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "rr_connectivity_cb");
- grpc_lb_policy_notify_on_state_change(exec_ctx, glb_policy->rr_policy,
- &rr_connectivity->state,
- &rr_connectivity->on_change);
- grpc_lb_policy_exit_idle(exec_ctx, glb_policy->rr_policy);
+ grpc_lb_policy_notify_on_state_change_locked(exec_ctx, glb_policy->rr_policy,
+ &rr_connectivity->state,
+ &rr_connectivity->on_change);
+ grpc_lb_policy_exit_idle_locked(exec_ctx, glb_policy->rr_policy);
/* Update picks and pings in wait */
pending_pick *pp;
@@ -713,17 +708,16 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_INFO, "Pending ping about to PING from 0x%" PRIxPTR "",
(intptr_t)glb_policy->rr_policy);
}
- grpc_lb_policy_ping_one(exec_ctx, glb_policy->rr_policy,
- &pping->wrapped_notify_arg.wrapper_closure);
+ grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy,
+ &pping->wrapped_notify_arg.wrapper_closure);
}
}
-static void glb_rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
+static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx,
+ void *arg, grpc_error *error) {
rr_connectivity_data *rr_connectivity = arg;
glb_lb_policy *glb_policy = rr_connectivity->glb_policy;
- gpr_mu_lock(&glb_policy->mu);
const bool shutting_down = glb_policy->shutting_down;
bool unref_needed = false;
GRPC_ERROR_REF(error);
@@ -740,11 +734,10 @@ static void glb_rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
update_lb_connectivity_status_locked(exec_ctx, glb_policy,
rr_connectivity->state, error);
/* Resubscribe. Reuse the "rr_connectivity_cb" weak ref. */
- grpc_lb_policy_notify_on_state_change(exec_ctx, glb_policy->rr_policy,
- &rr_connectivity->state,
- &rr_connectivity->on_change);
+ grpc_lb_policy_notify_on_state_change_locked(
+ exec_ctx, glb_policy->rr_policy, &rr_connectivity->state,
+ &rr_connectivity->on_change);
}
- gpr_mu_unlock(&glb_policy->mu);
if (unref_needed) {
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"rr_connectivity_cb");
@@ -862,8 +855,7 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
}
if (num_grpclb_addrs == 0) return NULL;
- glb_lb_policy *glb_policy = gpr_malloc(sizeof(*glb_policy));
- memset(glb_policy, 0, sizeof(*glb_policy));
+ glb_lb_policy *glb_policy = gpr_zalloc(sizeof(*glb_policy));
/* Get server name. */
arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
@@ -899,8 +891,7 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
gpr_free(glb_policy);
return NULL;
}
- grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable);
- gpr_mu_init(&glb_policy->mu);
+ grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable, args->combiner);
grpc_connectivity_state_init(&glb_policy->state_tracker, GRPC_CHANNEL_IDLE,
"grpclb");
return &glb_policy->base;
@@ -918,13 +909,11 @@ static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
if (glb_policy->serverlist != NULL) {
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
}
- gpr_mu_destroy(&glb_policy->mu);
gpr_free(glb_policy);
}
-static void glb_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
+static void glb_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
- gpr_mu_lock(&glb_policy->mu);
glb_policy->shutting_down = true;
pending_pick *pp = glb_policy->pending_picks;
@@ -941,7 +930,6 @@ static void glb_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
* while holding glb_policy->mu: lb_on_server_status_received, invoked due to
* the cancel, needs to acquire that same lock */
grpc_call *lb_call = glb_policy->lb_call;
- gpr_mu_unlock(&glb_policy->mu);
/* glb_policy->lb_call and this local lb_call must be consistent at this point
* because glb_policy->lb_call is only assigned in lb_call_init_locked as part
@@ -967,11 +955,10 @@ static void glb_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
}
}
-static void glb_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- grpc_connected_subchannel **target,
- grpc_error *error) {
+static void glb_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ grpc_connected_subchannel **target,
+ grpc_error *error) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
- gpr_mu_lock(&glb_policy->mu);
pending_pick *pp = glb_policy->pending_picks;
glb_policy->pending_picks = NULL;
while (pp != NULL) {
@@ -987,16 +974,15 @@ static void glb_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
pp = next;
}
- gpr_mu_unlock(&glb_policy->mu);
GRPC_ERROR_UNREF(error);
}
-static void glb_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- uint32_t initial_metadata_flags_mask,
- uint32_t initial_metadata_flags_eq,
- grpc_error *error) {
+static void glb_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *pol,
+ uint32_t initial_metadata_flags_mask,
+ uint32_t initial_metadata_flags_eq,
+ grpc_error *error) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
- gpr_mu_lock(&glb_policy->mu);
pending_pick *pp = glb_policy->pending_picks;
glb_policy->pending_picks = NULL;
while (pp != NULL) {
@@ -1012,7 +998,6 @@ static void glb_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
pp = next;
}
- gpr_mu_unlock(&glb_policy->mu);
GRPC_ERROR_UNREF(error);
}
@@ -1025,19 +1010,17 @@ static void start_picking_locked(grpc_exec_ctx *exec_ctx,
query_for_backends_locked(exec_ctx, glb_policy);
}
-static void glb_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
+static void glb_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
- gpr_mu_lock(&glb_policy->mu);
if (!glb_policy->started_picking) {
start_picking_locked(exec_ctx, glb_policy);
}
- gpr_mu_unlock(&glb_policy->mu);
}
-static int glb_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- const grpc_lb_policy_pick_args *pick_args,
- grpc_connected_subchannel **target, void **user_data,
- grpc_closure *on_complete) {
+static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ const grpc_lb_policy_pick_args *pick_args,
+ grpc_connected_subchannel **target, void **user_data,
+ grpc_closure *on_complete) {
if (pick_args->lb_token_mdelem_storage == NULL) {
*target = NULL;
grpc_closure_sched(
@@ -1048,7 +1031,6 @@ static int glb_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
- gpr_mu_lock(&glb_policy->mu);
glb_policy->deadline = pick_args->deadline;
bool pick_done;
@@ -1059,8 +1041,7 @@ static int glb_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick");
- wrapped_rr_closure_arg *wc_arg = gpr_malloc(sizeof(wrapped_rr_closure_arg));
- memset(wc_arg, 0, sizeof(wrapped_rr_closure_arg));
+ wrapped_rr_closure_arg *wc_arg = gpr_zalloc(sizeof(wrapped_rr_closure_arg));
grpc_closure_init(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg,
grpc_schedule_on_exec_ctx);
@@ -1087,53 +1068,43 @@ static int glb_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
pick_done = false;
}
- gpr_mu_unlock(&glb_policy->mu);
return pick_done;
}
-static grpc_connectivity_state glb_check_connectivity(
+static grpc_connectivity_state glb_check_connectivity_locked(
grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_error **connectivity_error) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
- grpc_connectivity_state st;
- gpr_mu_lock(&glb_policy->mu);
- st = grpc_connectivity_state_get(&glb_policy->state_tracker,
- connectivity_error);
- gpr_mu_unlock(&glb_policy->mu);
- return st;
+ return grpc_connectivity_state_get(&glb_policy->state_tracker,
+ connectivity_error);
}
-static void glb_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- grpc_closure *closure) {
+static void glb_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ grpc_closure *closure) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
- gpr_mu_lock(&glb_policy->mu);
if (glb_policy->rr_policy) {
- grpc_lb_policy_ping_one(exec_ctx, glb_policy->rr_policy, closure);
+ grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy, closure);
} else {
add_pending_ping(&glb_policy->pending_pings, closure);
if (!glb_policy->started_picking) {
start_picking_locked(exec_ctx, glb_policy);
}
}
- gpr_mu_unlock(&glb_policy->mu);
}
-static void glb_notify_on_state_change(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *pol,
- grpc_connectivity_state *current,
- grpc_closure *notify) {
+static void glb_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *pol,
+ grpc_connectivity_state *current,
+ grpc_closure *notify) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
- gpr_mu_lock(&glb_policy->mu);
grpc_connectivity_state_notify_on_state_change(
exec_ctx, &glb_policy->state_tracker, current, notify);
-
- gpr_mu_unlock(&glb_policy->mu);
}
-static void lb_on_server_status_received(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
-static void lb_on_response_received(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
+static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
+ void *arg, grpc_error *error);
+static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error);
static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
GPR_ASSERT(glb_policy->server_name != NULL);
@@ -1162,11 +1133,11 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
grpc_grpclb_request_destroy(request);
grpc_closure_init(&glb_policy->lb_on_server_status_received,
- lb_on_server_status_received, glb_policy,
- grpc_schedule_on_exec_ctx);
+ lb_on_server_status_received_locked, glb_policy,
+ grpc_combiner_scheduler(glb_policy->base.combiner, false));
grpc_closure_init(&glb_policy->lb_on_response_received,
- lb_on_response_received, glb_policy,
- grpc_schedule_on_exec_ctx);
+ lb_on_response_received_locked, glb_policy,
+ grpc_combiner_scheduler(glb_policy->base.combiner, false));
gpr_backoff_init(&glb_policy->lb_call_backoff_state,
GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS,
@@ -1261,14 +1232,13 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(GRPC_CALL_OK == call_error);
}
-static void lb_on_response_received(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
+static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
glb_lb_policy *glb_policy = arg;
grpc_op ops[2];
memset(ops, 0, sizeof(ops));
grpc_op *op = ops;
- gpr_mu_lock(&glb_policy->mu);
if (glb_policy->lb_response_payload != NULL) {
gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
/* Received data from the LB server. Look inside
@@ -1342,20 +1312,17 @@ static void lb_on_response_received(grpc_exec_ctx *exec_ctx, void *arg,
&glb_policy->lb_on_response_received); /* loop */
GPR_ASSERT(GRPC_CALL_OK == call_error);
}
- gpr_mu_unlock(&glb_policy->mu);
} else { /* empty payload: call cancelled. */
/* dispose of the "lb_on_response_received" weak ref taken in
* query_for_backends_locked() and reused in every reception loop */
- gpr_mu_unlock(&glb_policy->mu);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"lb_on_response_received_empty_payload");
}
}
-static void lb_call_on_retry_timer(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
+static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
glb_lb_policy *glb_policy = arg;
- gpr_mu_lock(&glb_policy->mu);
if (!glb_policy->shutting_down) {
if (grpc_lb_glb_trace) {
@@ -1365,15 +1332,13 @@ static void lb_call_on_retry_timer(grpc_exec_ctx *exec_ctx, void *arg,
GPR_ASSERT(glb_policy->lb_call == NULL);
query_for_backends_locked(exec_ctx, glb_policy);
}
- gpr_mu_unlock(&glb_policy->mu);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"grpclb_on_retry_timer");
}
-static void lb_on_server_status_received(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
+static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
+ void *arg, grpc_error *error) {
glb_lb_policy *glb_policy = arg;
- gpr_mu_lock(&glb_policy->mu);
GPR_ASSERT(glb_policy->lb_call != NULL);
@@ -1408,21 +1373,27 @@ static void lb_on_server_status_received(grpc_exec_ctx *exec_ctx, void *arg,
}
}
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
- grpc_closure_init(&glb_policy->lb_on_call_retry, lb_call_on_retry_timer,
- glb_policy, grpc_schedule_on_exec_ctx);
+ grpc_closure_init(
+ &glb_policy->lb_on_call_retry, lb_call_on_retry_timer_locked,
+ glb_policy, grpc_combiner_scheduler(glb_policy->base.combiner, false));
grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
&glb_policy->lb_on_call_retry, now);
}
- gpr_mu_unlock(&glb_policy->mu);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"lb_on_server_status_received");
}
/* Code wiring the policy with the rest of the core */
static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
- glb_destroy, glb_shutdown, glb_pick,
- glb_cancel_pick, glb_cancel_picks, glb_ping_one,
- glb_exit_idle, glb_check_connectivity, glb_notify_on_state_change};
+ glb_destroy,
+ glb_shutdown_locked,
+ glb_pick_locked,
+ glb_cancel_pick_locked,
+ glb_cancel_picks_locked,
+ glb_ping_one_locked,
+ glb_exit_idle_locked,
+ glb_check_connectivity_locked,
+ glb_notify_on_state_change_locked};
static void glb_factory_ref(grpc_lb_policy_factory *factory) {}
diff --git a/src/core/ext/lb_policy/grpclb/load_balancer_api.c b/src/core/ext/lb_policy/grpclb/load_balancer_api.c
index 837e9c1113..3c4604c402 100644
--- a/src/core/ext/lb_policy/grpclb/load_balancer_api.c
+++ b/src/core/ext/lb_policy/grpclb/load_balancer_api.c
@@ -62,8 +62,7 @@ static bool decode_serverlist(pb_istream_t *stream, const pb_field_t *field,
}
dec_arg->num_servers++;
} else { /* second pass. Actually decode. */
- grpc_grpclb_server *server = gpr_malloc(sizeof(grpc_grpclb_server));
- memset(server, 0, sizeof(grpc_grpclb_server));
+ grpc_grpclb_server *server = gpr_zalloc(sizeof(grpc_grpclb_server));
GPR_ASSERT(dec_arg->num_servers > 0);
if (dec_arg->decoding_idx == 0) { /* first iteration of second pass */
dec_arg->servers =
@@ -160,8 +159,7 @@ grpc_grpclb_serverlist *grpc_grpclb_response_parse_serverlist(
return NULL;
}
- grpc_grpclb_serverlist *sl = gpr_malloc(sizeof(grpc_grpclb_serverlist));
- memset(sl, 0, sizeof(*sl));
+ grpc_grpclb_serverlist *sl = gpr_zalloc(sizeof(grpc_grpclb_serverlist));
sl->num_servers = arg.num_servers;
sl->servers = arg.servers;
if (res.server_list.has_expiration_interval) {
@@ -183,8 +181,7 @@ void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist *serverlist) {
grpc_grpclb_serverlist *grpc_grpclb_serverlist_copy(
const grpc_grpclb_serverlist *sl) {
- grpc_grpclb_serverlist *copy = gpr_malloc(sizeof(grpc_grpclb_serverlist));
- memset(copy, 0, sizeof(grpc_grpclb_serverlist));
+ grpc_grpclb_serverlist *copy = gpr_zalloc(sizeof(grpc_grpclb_serverlist));
copy->num_servers = sl->num_servers;
memcpy(&copy->expiration_interval, &sl->expiration_interval,
sizeof(grpc_grpclb_duration));
diff --git a/src/core/ext/lb_policy/pick_first/pick_first.c b/src/core/ext/lb_policy/pick_first/pick_first.c
index 1b965183f6..e2a66d16bd 100644
--- a/src/core/ext/lb_policy/pick_first/pick_first.c
+++ b/src/core/ext/lb_policy/pick_first/pick_first.c
@@ -38,6 +38,7 @@
#include "src/core/ext/client_channel/lb_policy_registry.h"
#include "src/core/ext/client_channel/subchannel.h"
#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/transport/connectivity_state.h"
@@ -57,11 +58,11 @@ typedef struct {
grpc_closure connectivity_changed;
- /** the selected channel (a grpc_connected_subchannel) */
- gpr_atm selected;
+ /** remaining members are protected by the combiner */
+
+ /** the selected channel */
+ grpc_connected_subchannel *selected;
- /** mutex protecting remaining members */
- gpr_mu mu;
/** have we started picking? */
int started_picking;
/** are we shut down? */
@@ -77,32 +78,24 @@ typedef struct {
grpc_connectivity_state_tracker state_tracker;
} pick_first_lb_policy;
-#define GET_SELECTED(p) \
- ((grpc_connected_subchannel *)gpr_atm_acq_load(&(p)->selected))
-
static void pf_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
- grpc_connected_subchannel *selected = GET_SELECTED(p);
size_t i;
GPR_ASSERT(p->pending_picks == NULL);
for (i = 0; i < p->num_subchannels; i++) {
GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[i], "pick_first");
}
- if (selected != NULL) {
- GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, selected, "picked_first");
+ if (p->selected != NULL) {
+ GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, p->selected, "picked_first");
}
grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
gpr_free(p->subchannels);
- gpr_mu_destroy(&p->mu);
gpr_free(p);
}
-static void pf_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
+static void pf_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp;
- grpc_connected_subchannel *selected;
- gpr_mu_lock(&p->mu);
- selected = GET_SELECTED(p);
p->shutdown = 1;
pp = p->pending_picks;
p->pending_picks = NULL;
@@ -110,15 +103,14 @@ static void pf_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_CREATE("Channel shutdown"), "shutdown");
/* cancel subscription */
- if (selected != NULL) {
+ if (p->selected != NULL) {
grpc_connected_subchannel_notify_on_state_change(
- exec_ctx, selected, NULL, NULL, &p->connectivity_changed);
+ exec_ctx, p->selected, NULL, NULL, &p->connectivity_changed);
} else if (p->num_subchannels > 0) {
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[p->checking_subchannel], NULL, NULL,
&p->connectivity_changed);
}
- gpr_mu_unlock(&p->mu);
while (pp != NULL) {
pending_pick *next = pp->next;
*pp->target = NULL;
@@ -128,12 +120,11 @@ static void pf_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
}
}
-static void pf_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- grpc_connected_subchannel **target,
- grpc_error *error) {
+static void pf_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ grpc_connected_subchannel **target,
+ grpc_error *error) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp;
- gpr_mu_lock(&p->mu);
pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
@@ -150,17 +141,15 @@ static void pf_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
pp = next;
}
- gpr_mu_unlock(&p->mu);
GRPC_ERROR_UNREF(error);
}
-static void pf_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- uint32_t initial_metadata_flags_mask,
- uint32_t initial_metadata_flags_eq,
- grpc_error *error) {
+static void pf_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ uint32_t initial_metadata_flags_mask,
+ uint32_t initial_metadata_flags_eq,
+ grpc_error *error) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp;
- gpr_mu_lock(&p->mu);
pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
@@ -177,7 +166,6 @@ static void pf_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
pp = next;
}
- gpr_mu_unlock(&p->mu);
GRPC_ERROR_UNREF(error);
}
@@ -192,63 +180,48 @@ static void start_picking(grpc_exec_ctx *exec_ctx, pick_first_lb_policy *p) {
&p->connectivity_changed);
}
-static void pf_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
+static void pf_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
- gpr_mu_lock(&p->mu);
if (!p->started_picking) {
start_picking(exec_ctx, p);
}
- gpr_mu_unlock(&p->mu);
}
-static int pf_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- const grpc_lb_policy_pick_args *pick_args,
- grpc_connected_subchannel **target, void **user_data,
- grpc_closure *on_complete) {
+static int pf_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ const grpc_lb_policy_pick_args *pick_args,
+ grpc_connected_subchannel **target, void **user_data,
+ grpc_closure *on_complete) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp;
/* Check atomically for a selected channel */
- grpc_connected_subchannel *selected = GET_SELECTED(p);
- if (selected != NULL) {
- *target = GRPC_CONNECTED_SUBCHANNEL_REF(selected, "picked");
+ if (p->selected != NULL) {
+ *target = GRPC_CONNECTED_SUBCHANNEL_REF(p->selected, "picked");
return 1;
}
- /* No subchannel selected yet, so acquire lock and then attempt again */
- gpr_mu_lock(&p->mu);
- selected = GET_SELECTED(p);
- if (selected) {
- gpr_mu_unlock(&p->mu);
- *target = GRPC_CONNECTED_SUBCHANNEL_REF(selected, "picked");
- return 1;
- } else {
- if (!p->started_picking) {
- start_picking(exec_ctx, p);
- }
- pp = gpr_malloc(sizeof(*pp));
- pp->next = p->pending_picks;
- pp->target = target;
- pp->initial_metadata_flags = pick_args->initial_metadata_flags;
- pp->on_complete = on_complete;
- p->pending_picks = pp;
- gpr_mu_unlock(&p->mu);
- return 0;
+ /* No subchannel selected yet, so try again */
+ if (!p->started_picking) {
+ start_picking(exec_ctx, p);
}
+ pp = gpr_malloc(sizeof(*pp));
+ pp->next = p->pending_picks;
+ pp->target = target;
+ pp->initial_metadata_flags = pick_args->initial_metadata_flags;
+ pp->on_complete = on_complete;
+ p->pending_picks = pp;
+ return 0;
}
-static void destroy_subchannels(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- pick_first_lb_policy *p = arg;
+static void destroy_subchannels_locked(grpc_exec_ctx *exec_ctx,
+ pick_first_lb_policy *p) {
size_t i;
size_t num_subchannels = p->num_subchannels;
grpc_subchannel **subchannels;
- gpr_mu_lock(&p->mu);
subchannels = p->subchannels;
p->num_subchannels = 0;
p->subchannels = NULL;
- gpr_mu_unlock(&p->mu);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "destroy_subchannels");
for (i = 0; i < num_subchannels; i++) {
@@ -258,25 +231,19 @@ static void destroy_subchannels(grpc_exec_ctx *exec_ctx, void *arg,
gpr_free(subchannels);
}
-static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
+static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
pick_first_lb_policy *p = arg;
grpc_subchannel *selected_subchannel;
pending_pick *pp;
- grpc_connected_subchannel *selected;
GRPC_ERROR_REF(error);
- gpr_mu_lock(&p->mu);
-
- selected = GET_SELECTED(p);
-
if (p->shutdown) {
- gpr_mu_unlock(&p->mu);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
GRPC_ERROR_UNREF(error);
return;
- } else if (selected != NULL) {
+ } else if (p->selected != NULL) {
if (p->checking_connectivity == GRPC_CHANNEL_TRANSIENT_FAILURE) {
/* if the selected channel goes bad, we're done */
p->checking_connectivity = GRPC_CHANNEL_SHUTDOWN;
@@ -286,7 +253,7 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
"selected_changed");
if (p->checking_connectivity != GRPC_CHANNEL_SHUTDOWN) {
grpc_connected_subchannel_notify_on_state_change(
- exec_ctx, selected, p->base.interested_parties,
+ exec_ctx, p->selected, p->base.interested_parties,
&p->checking_connectivity, &p->connectivity_changed);
} else {
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
@@ -301,26 +268,21 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_CHANNEL_READY, GRPC_ERROR_NONE,
"connecting_ready");
selected_subchannel = p->subchannels[p->checking_subchannel];
- selected =
- grpc_subchannel_get_connected_subchannel(selected_subchannel);
- GPR_ASSERT(selected != NULL);
- GRPC_CONNECTED_SUBCHANNEL_REF(selected, "picked_first");
+ p->selected = GRPC_CONNECTED_SUBCHANNEL_REF(
+ grpc_subchannel_get_connected_subchannel(selected_subchannel),
+ "picked_first");
/* drop the pick list: we are connected now */
GRPC_LB_POLICY_WEAK_REF(&p->base, "destroy_subchannels");
- gpr_atm_rel_store(&p->selected, (gpr_atm)selected);
- grpc_closure_sched(exec_ctx,
- grpc_closure_create(destroy_subchannels, p,
- grpc_schedule_on_exec_ctx),
- GRPC_ERROR_NONE);
+ destroy_subchannels_locked(exec_ctx, p);
/* update any calls that were waiting for a pick */
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
- *pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(selected, "picked");
+ *pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(p->selected, "picked");
grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp);
}
grpc_connected_subchannel_notify_on_state_change(
- exec_ctx, selected, p->base.interested_parties,
+ exec_ctx, p->selected, p->base.interested_parties,
&p->checking_connectivity, &p->connectivity_changed);
break;
case GRPC_CHANNEL_TRANSIENT_FAILURE:
@@ -387,48 +349,44 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
}
}
- gpr_mu_unlock(&p->mu);
-
GRPC_ERROR_UNREF(error);
}
-static grpc_connectivity_state pf_check_connectivity(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *pol,
- grpc_error **error) {
+static grpc_connectivity_state pf_check_connectivity_locked(
+ grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_error **error) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
- grpc_connectivity_state st;
- gpr_mu_lock(&p->mu);
- st = grpc_connectivity_state_get(&p->state_tracker, error);
- gpr_mu_unlock(&p->mu);
- return st;
+ return grpc_connectivity_state_get(&p->state_tracker, error);
}
-static void pf_notify_on_state_change(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *pol,
- grpc_connectivity_state *current,
- grpc_closure *notify) {
+static void pf_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *pol,
+ grpc_connectivity_state *current,
+ grpc_closure *notify) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
- gpr_mu_lock(&p->mu);
grpc_connectivity_state_notify_on_state_change(exec_ctx, &p->state_tracker,
current, notify);
- gpr_mu_unlock(&p->mu);
}
-static void pf_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- grpc_closure *closure) {
+static void pf_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ grpc_closure *closure) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
- grpc_connected_subchannel *selected = GET_SELECTED(p);
- if (selected) {
- grpc_connected_subchannel_ping(exec_ctx, selected, closure);
+ if (p->selected) {
+ grpc_connected_subchannel_ping(exec_ctx, p->selected, closure);
} else {
grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CREATE("Not connected"));
}
}
static const grpc_lb_policy_vtable pick_first_lb_policy_vtable = {
- pf_destroy, pf_shutdown, pf_pick,
- pf_cancel_pick, pf_cancel_picks, pf_ping_one,
- pf_exit_idle, pf_check_connectivity, pf_notify_on_state_change};
+ pf_destroy,
+ pf_shutdown_locked,
+ pf_pick_locked,
+ pf_cancel_pick_locked,
+ pf_cancel_picks_locked,
+ pf_ping_one_locked,
+ pf_exit_idle_locked,
+ pf_check_connectivity_locked,
+ pf_notify_on_state_change_locked};
static void pick_first_factory_ref(grpc_lb_policy_factory *factory) {}
@@ -451,11 +409,9 @@ static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx,
}
if (num_addrs == 0) return NULL;
- pick_first_lb_policy *p = gpr_malloc(sizeof(*p));
- memset(p, 0, sizeof(*p));
+ pick_first_lb_policy *p = gpr_zalloc(sizeof(*p));
- p->subchannels = gpr_malloc(sizeof(grpc_subchannel *) * num_addrs);
- memset(p->subchannels, 0, sizeof(*p->subchannels) * num_addrs);
+ p->subchannels = gpr_zalloc(sizeof(grpc_subchannel *) * num_addrs);
grpc_subchannel_args sc_args;
size_t subchannel_idx = 0;
for (size_t i = 0; i < addresses->num_addresses; i++) {
@@ -489,10 +445,9 @@ static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx,
}
p->num_subchannels = subchannel_idx;
- grpc_lb_policy_init(&p->base, &pick_first_lb_policy_vtable);
- grpc_closure_init(&p->connectivity_changed, pf_connectivity_changed, p,
- grpc_schedule_on_exec_ctx);
- gpr_mu_init(&p->mu);
+ grpc_lb_policy_init(&p->base, &pick_first_lb_policy_vtable, args->combiner);
+ grpc_closure_init(&p->connectivity_changed, pf_connectivity_changed_locked, p,
+ grpc_combiner_scheduler(args->combiner, false));
return &p->base;
}
diff --git a/src/core/ext/lb_policy/round_robin/round_robin.c b/src/core/ext/lb_policy/round_robin/round_robin.c
index 63e3d033ad..f2d1d46179 100644
--- a/src/core/ext/lb_policy/round_robin/round_robin.c
+++ b/src/core/ext/lb_policy/round_robin/round_robin.c
@@ -67,6 +67,7 @@
#include "src/core/ext/client_channel/subchannel.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/debug/trace.h"
+#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/static_metadata.h"
@@ -134,7 +135,6 @@ typedef struct {
struct round_robin_lb_policy {
/** base policy: must be first */
grpc_lb_policy base;
- gpr_mu mu;
/** total number of addresses received at creation time */
size_t num_addresses;
@@ -213,8 +213,7 @@ static void advance_last_picked_locked(round_robin_lb_policy *p) {
* csc to the list of ready subchannels. */
static ready_list *add_connected_sc_locked(round_robin_lb_policy *p,
subchannel_data *sd) {
- ready_list *new_elem = gpr_malloc(sizeof(ready_list));
- memset(new_elem, 0, sizeof(ready_list));
+ ready_list *new_elem = gpr_zalloc(sizeof(ready_list));
new_elem->subchannel = sd->subchannel;
new_elem->user_data = sd->user_data;
if (p->ready_list.prev == NULL) {
@@ -293,7 +292,6 @@ static void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
gpr_free(p->subchannels);
- gpr_mu_destroy(&p->mu);
elem = p->ready_list.next;
while (elem != NULL && elem != &p->ready_list) {
@@ -309,12 +307,11 @@ static void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
gpr_free(p);
}
-static void rr_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
+static void rr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
pending_pick *pp;
size_t i;
- gpr_mu_lock(&p->mu);
if (grpc_lb_round_robin_trace) {
gpr_log(GPR_DEBUG, "Shutting down Round Robin policy at %p", (void *)pol);
}
@@ -335,15 +332,13 @@ static void rr_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL, NULL,
&sd->connectivity_changed_closure);
}
- gpr_mu_unlock(&p->mu);
}
-static void rr_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- grpc_connected_subchannel **target,
- grpc_error *error) {
+static void rr_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ grpc_connected_subchannel **target,
+ grpc_error *error) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
pending_pick *pp;
- gpr_mu_lock(&p->mu);
pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
@@ -360,17 +355,15 @@ static void rr_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
pp = next;
}
- gpr_mu_unlock(&p->mu);
GRPC_ERROR_UNREF(error);
}
-static void rr_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- uint32_t initial_metadata_flags_mask,
- uint32_t initial_metadata_flags_eq,
- grpc_error *error) {
+static void rr_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ uint32_t initial_metadata_flags_mask,
+ uint32_t initial_metadata_flags_eq,
+ grpc_error *error) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
pending_pick *pp;
- gpr_mu_lock(&p->mu);
pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
@@ -388,11 +381,11 @@ static void rr_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
pp = next;
}
- gpr_mu_unlock(&p->mu);
GRPC_ERROR_UNREF(error);
}
-static void start_picking(grpc_exec_ctx *exec_ctx, round_robin_lb_policy *p) {
+static void start_picking_locked(grpc_exec_ctx *exec_ctx,
+ round_robin_lb_policy *p) {
size_t i;
p->started_picking = 1;
@@ -411,23 +404,20 @@ static void start_picking(grpc_exec_ctx *exec_ctx, round_robin_lb_policy *p) {
}
}
-static void rr_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
+static void rr_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
- gpr_mu_lock(&p->mu);
if (!p->started_picking) {
- start_picking(exec_ctx, p);
+ start_picking_locked(exec_ctx, p);
}
- gpr_mu_unlock(&p->mu);
}
-static int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- const grpc_lb_policy_pick_args *pick_args,
- grpc_connected_subchannel **target, void **user_data,
- grpc_closure *on_complete) {
+static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ const grpc_lb_policy_pick_args *pick_args,
+ grpc_connected_subchannel **target, void **user_data,
+ grpc_closure *on_complete) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
pending_pick *pp;
ready_list *selected;
- gpr_mu_lock(&p->mu);
if (grpc_lb_round_robin_trace) {
gpr_log(GPR_INFO, "Round Robin %p trying to pick", (void *)pol);
@@ -449,12 +439,11 @@ static int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
/* only advance the last picked pointer if the selection was used */
advance_last_picked_locked(p);
- gpr_mu_unlock(&p->mu);
return 1;
} else {
/* no pick currently available. Save for later in list of pending picks */
if (!p->started_picking) {
- start_picking(exec_ctx, p);
+ start_picking_locked(exec_ctx, p);
}
pp = gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
@@ -463,7 +452,6 @@ static int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pp->initial_metadata_flags = pick_args->initial_metadata_flags;
pp->user_data = user_data;
p->pending_picks = pp;
- gpr_mu_unlock(&p->mu);
return 0;
}
}
@@ -538,17 +526,15 @@ static grpc_connectivity_state update_lb_connectivity_status(
return sd->curr_connectivity_state;
}
-static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
+static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
subchannel_data *sd = arg;
round_robin_lb_policy *p = sd->policy;
pending_pick *pp;
GRPC_ERROR_REF(error);
- gpr_mu_lock(&p->mu);
if (p->shutdown) {
- gpr_mu_unlock(&p->mu);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "rr_connectivity");
GRPC_ERROR_UNREF(error);
return;
@@ -645,56 +631,51 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "rr_connectivity");
break;
}
- gpr_mu_unlock(&p->mu);
GRPC_ERROR_UNREF(error);
}
-static grpc_connectivity_state rr_check_connectivity(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *pol,
- grpc_error **error) {
+static grpc_connectivity_state rr_check_connectivity_locked(
+ grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_error **error) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
- grpc_connectivity_state st;
- gpr_mu_lock(&p->mu);
- st = grpc_connectivity_state_get(&p->state_tracker, error);
- gpr_mu_unlock(&p->mu);
- return st;
+ return grpc_connectivity_state_get(&p->state_tracker, error);
}
-static void rr_notify_on_state_change(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *pol,
- grpc_connectivity_state *current,
- grpc_closure *notify) {
+static void rr_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *pol,
+ grpc_connectivity_state *current,
+ grpc_closure *notify) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
- gpr_mu_lock(&p->mu);
grpc_connectivity_state_notify_on_state_change(exec_ctx, &p->state_tracker,
current, notify);
- gpr_mu_unlock(&p->mu);
}
-static void rr_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- grpc_closure *closure) {
+static void rr_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ grpc_closure *closure) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
ready_list *selected;
grpc_connected_subchannel *target;
- gpr_mu_lock(&p->mu);
if ((selected = peek_next_connected_locked(p))) {
- gpr_mu_unlock(&p->mu);
target = GRPC_CONNECTED_SUBCHANNEL_REF(
grpc_subchannel_get_connected_subchannel(selected->subchannel),
"rr_picked");
grpc_connected_subchannel_ping(exec_ctx, target, closure);
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, target, "rr_picked");
} else {
- gpr_mu_unlock(&p->mu);
grpc_closure_sched(exec_ctx, closure,
GRPC_ERROR_CREATE("Round Robin not connected"));
}
}
static const grpc_lb_policy_vtable round_robin_lb_policy_vtable = {
- rr_destroy, rr_shutdown, rr_pick,
- rr_cancel_pick, rr_cancel_picks, rr_ping_one,
- rr_exit_idle, rr_check_connectivity, rr_notify_on_state_change};
+ rr_destroy,
+ rr_shutdown_locked,
+ rr_pick_locked,
+ rr_cancel_pick_locked,
+ rr_cancel_picks_locked,
+ rr_ping_one_locked,
+ rr_exit_idle_locked,
+ rr_check_connectivity_locked,
+ rr_notify_on_state_change_locked};
static void round_robin_factory_ref(grpc_lb_policy_factory *factory) {}
@@ -717,12 +698,10 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
}
if (num_addrs == 0) return NULL;
- round_robin_lb_policy *p = gpr_malloc(sizeof(*p));
- memset(p, 0, sizeof(*p));
+ round_robin_lb_policy *p = gpr_zalloc(sizeof(*p));
p->num_addresses = num_addrs;
- p->subchannels = gpr_malloc(sizeof(*p->subchannels) * num_addrs);
- memset(p->subchannels, 0, sizeof(*p->subchannels) * num_addrs);
+ p->subchannels = gpr_zalloc(sizeof(*p->subchannels) * num_addrs);
grpc_subchannel_args sc_args;
size_t subchannel_idx = 0;
@@ -749,8 +728,7 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
grpc_channel_args_destroy(exec_ctx, new_args);
if (subchannel != NULL) {
- subchannel_data *sd = gpr_malloc(sizeof(*sd));
- memset(sd, 0, sizeof(*sd));
+ subchannel_data *sd = gpr_zalloc(sizeof(*sd));
p->subchannels[subchannel_idx] = sd;
sd->policy = p;
sd->index = subchannel_idx;
@@ -762,7 +740,8 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
}
++subchannel_idx;
grpc_closure_init(&sd->connectivity_changed_closure,
- rr_connectivity_changed, sd, grpc_schedule_on_exec_ctx);
+ rr_connectivity_changed_locked, sd,
+ grpc_combiner_scheduler(args->combiner, false));
}
}
if (subchannel_idx == 0) {
@@ -779,7 +758,7 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
p->ready_list.next = NULL;
p->ready_list_last_pick = &p->ready_list;
- grpc_lb_policy_init(&p->base, &round_robin_lb_policy_vtable);
+ grpc_lb_policy_init(&p->base, &round_robin_lb_policy_vtable, args->combiner);
grpc_connectivity_state_init(&p->state_tracker, GRPC_CHANNEL_IDLE,
"round_robin");
@@ -787,7 +766,6 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_DEBUG, "Created RR policy at %p with %lu subchannels",
(void *)p, (unsigned long)p->num_subchannels);
}
- gpr_mu_init(&p->mu);
return &p->base;
}
diff --git a/src/core/ext/load_reporting/load_reporting_filter.c b/src/core/ext/load_reporting/load_reporting_filter.c
index 7fc88db603..c2750634a5 100644
--- a/src/core/ext/load_reporting/load_reporting_filter.c
+++ b/src/core/ext/load_reporting/load_reporting_filter.c
@@ -100,10 +100,8 @@ static void on_initial_md_ready(grpc_exec_ctx *exec_ctx, void *user_data,
/* Constructor for call_data */
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args) {
+ const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
- memset(calld, 0, sizeof(call_data));
-
calld->id = (intptr_t)args->call_stack;
grpc_closure_init(&calld->on_initial_md_ready, on_initial_md_ready, elem,
grpc_schedule_on_exec_ctx);
@@ -154,8 +152,6 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(!args->is_last);
channel_data *chand = elem->channel_data;
- memset(chand, 0, sizeof(channel_data));
-
chand->id = (intptr_t)args->channel_stack;
/* TODO(dgq): do something with the data
diff --git a/src/core/ext/resolver/dns/native/dns_resolver.c b/src/core/ext/resolver/dns/native/dns_resolver.c
index c08b53ea04..d227c19c43 100644
--- a/src/core/ext/resolver/dns/native/dns_resolver.c
+++ b/src/core/ext/resolver/dns/native/dns_resolver.c
@@ -40,6 +40,7 @@
#include "src/core/ext/client_channel/lb_policy_registry.h"
#include "src/core/ext/client_channel/resolver_registry.h"
#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/support/backoff.h"
@@ -63,8 +64,6 @@ typedef struct {
/** pollset_set to drive the name resolution process */
grpc_pollset_set *interested_parties;
- /** mutex guarding the rest of the state */
- gpr_mu mu;
/** are we currently resolving? */
bool resolving;
/** which version of the result have we published? */
@@ -95,18 +94,20 @@ static void dns_start_resolving_locked(grpc_exec_ctx *exec_ctx,
static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
dns_resolver *r);
-static void dns_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
-static void dns_channel_saw_error(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
-static void dns_next(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
- grpc_channel_args **target_result,
- grpc_closure *on_complete);
+static void dns_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
+static void dns_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *r);
+static void dns_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
+ grpc_channel_args **target_result,
+ grpc_closure *on_complete);
static const grpc_resolver_vtable dns_resolver_vtable = {
- dns_destroy, dns_shutdown, dns_channel_saw_error, dns_next};
+ dns_destroy, dns_shutdown_locked, dns_channel_saw_error_locked,
+ dns_next_locked};
-static void dns_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) {
+static void dns_shutdown_locked(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver) {
dns_resolver *r = (dns_resolver *)resolver;
- gpr_mu_lock(&r->mu);
if (r->have_retry_timer) {
grpc_timer_cancel(exec_ctx, &r->retry_timer);
}
@@ -116,25 +117,21 @@ static void dns_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) {
GRPC_ERROR_CREATE("Resolver Shutdown"));
r->next_completion = NULL;
}
- gpr_mu_unlock(&r->mu);
}
-static void dns_channel_saw_error(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver) {
+static void dns_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver) {
dns_resolver *r = (dns_resolver *)resolver;
- gpr_mu_lock(&r->mu);
if (!r->resolving) {
gpr_backoff_reset(&r->backoff_state);
dns_start_resolving_locked(exec_ctx, r);
}
- gpr_mu_unlock(&r->mu);
}
-static void dns_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
- grpc_channel_args **target_result,
- grpc_closure *on_complete) {
+static void dns_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
+ grpc_channel_args **target_result,
+ grpc_closure *on_complete) {
dns_resolver *r = (dns_resolver *)resolver;
- gpr_mu_lock(&r->mu);
GPR_ASSERT(!r->next_completion);
r->next_completion = on_complete;
r->target_result = target_result;
@@ -144,30 +141,26 @@ static void dns_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
} else {
dns_maybe_finish_next_locked(exec_ctx, r);
}
- gpr_mu_unlock(&r->mu);
}
-static void dns_on_retry_timer(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
+static void dns_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
dns_resolver *r = arg;
- gpr_mu_lock(&r->mu);
r->have_retry_timer = false;
if (error == GRPC_ERROR_NONE) {
if (!r->resolving) {
dns_start_resolving_locked(exec_ctx, r);
}
}
- gpr_mu_unlock(&r->mu);
GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "retry-timer");
}
-static void dns_on_resolved(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
+static void dns_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
dns_resolver *r = arg;
grpc_channel_args *result = NULL;
- gpr_mu_lock(&r->mu);
GPR_ASSERT(r->resolving);
r->resolving = false;
if (r->addresses != NULL) {
@@ -198,8 +191,8 @@ static void dns_on_resolved(grpc_exec_ctx *exec_ctx, void *arg,
} else {
gpr_log(GPR_DEBUG, "retrying immediately");
}
- grpc_closure_init(&r->on_retry, dns_on_retry_timer, r,
- grpc_schedule_on_exec_ctx);
+ grpc_closure_init(&r->on_retry, dns_on_retry_timer_locked, r,
+ grpc_combiner_scheduler(r->base.combiner, false));
grpc_timer_init(exec_ctx, &r->retry_timer, next_try, &r->on_retry, now);
}
if (r->resolved_result != NULL) {
@@ -208,7 +201,6 @@ static void dns_on_resolved(grpc_exec_ctx *exec_ctx, void *arg,
r->resolved_result = result;
r->resolved_version++;
dns_maybe_finish_next_locked(exec_ctx, r);
- gpr_mu_unlock(&r->mu);
GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "dns-resolving");
}
@@ -221,7 +213,8 @@ static void dns_start_resolving_locked(grpc_exec_ctx *exec_ctx,
r->addresses = NULL;
grpc_resolve_address(
exec_ctx, r->name_to_resolve, r->default_port, r->interested_parties,
- grpc_closure_create(dns_on_resolved, r, grpc_schedule_on_exec_ctx),
+ grpc_closure_create(dns_on_resolved_locked, r,
+ grpc_combiner_scheduler(r->base.combiner, false)),
&r->addresses);
}
@@ -240,7 +233,6 @@ static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
static void dns_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
dns_resolver *r = (dns_resolver *)gr;
- gpr_mu_destroy(&r->mu);
if (r->resolved_result != NULL) {
grpc_channel_args_destroy(exec_ctx, r->resolved_result);
}
@@ -262,10 +254,8 @@ static grpc_resolver *dns_create(grpc_exec_ctx *exec_ctx,
char *path = args->uri->path;
if (path[0] == '/') ++path;
// Create resolver.
- dns_resolver *r = gpr_malloc(sizeof(dns_resolver));
- memset(r, 0, sizeof(*r));
- gpr_mu_init(&r->mu);
- grpc_resolver_init(&r->base, &dns_resolver_vtable);
+ dns_resolver *r = gpr_zalloc(sizeof(dns_resolver));
+ grpc_resolver_init(&r->base, &dns_resolver_vtable, args->combiner);
r->name_to_resolve = gpr_strdup(path);
r->default_port = gpr_strdup(default_port);
r->channel_args = grpc_channel_args_copy(args->args);
diff --git a/src/core/ext/resolver/sockaddr/sockaddr_resolver.c b/src/core/ext/resolver/sockaddr/sockaddr_resolver.c
index a1365f6465..da5923d26f 100644
--- a/src/core/ext/resolver/sockaddr/sockaddr_resolver.c
+++ b/src/core/ext/resolver/sockaddr/sockaddr_resolver.c
@@ -45,6 +45,7 @@
#include "src/core/ext/client_channel/parse_address.h"
#include "src/core/ext/client_channel/resolver_registry.h"
#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/unix_sockets_posix.h"
#include "src/core/lib/slice/slice_internal.h"
@@ -58,8 +59,6 @@ typedef struct {
grpc_lb_addresses *addresses;
/** channel args */
grpc_channel_args *channel_args;
- /** mutex guarding the rest of the state */
- gpr_mu mu;
/** have we published? */
bool published;
/** pending next completion, or NULL */
@@ -73,48 +72,43 @@ static void sockaddr_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
sockaddr_resolver *r);
-static void sockaddr_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
-static void sockaddr_channel_saw_error(grpc_exec_ctx *exec_ctx,
- grpc_resolver *r);
-static void sockaddr_next(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
- grpc_channel_args **target_result,
- grpc_closure *on_complete);
+static void sockaddr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
+static void sockaddr_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *r);
+static void sockaddr_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
+ grpc_channel_args **target_result,
+ grpc_closure *on_complete);
static const grpc_resolver_vtable sockaddr_resolver_vtable = {
- sockaddr_destroy, sockaddr_shutdown, sockaddr_channel_saw_error,
- sockaddr_next};
+ sockaddr_destroy, sockaddr_shutdown_locked,
+ sockaddr_channel_saw_error_locked, sockaddr_next_locked};
-static void sockaddr_shutdown(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver) {
+static void sockaddr_shutdown_locked(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver) {
sockaddr_resolver *r = (sockaddr_resolver *)resolver;
- gpr_mu_lock(&r->mu);
if (r->next_completion != NULL) {
*r->target_result = NULL;
grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
r->next_completion = NULL;
}
- gpr_mu_unlock(&r->mu);
}
-static void sockaddr_channel_saw_error(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver) {
+static void sockaddr_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver) {
sockaddr_resolver *r = (sockaddr_resolver *)resolver;
- gpr_mu_lock(&r->mu);
r->published = false;
sockaddr_maybe_finish_next_locked(exec_ctx, r);
- gpr_mu_unlock(&r->mu);
}
-static void sockaddr_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
- grpc_channel_args **target_result,
- grpc_closure *on_complete) {
+static void sockaddr_next_locked(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver,
+ grpc_channel_args **target_result,
+ grpc_closure *on_complete) {
sockaddr_resolver *r = (sockaddr_resolver *)resolver;
- gpr_mu_lock(&r->mu);
GPR_ASSERT(!r->next_completion);
r->next_completion = on_complete;
r->target_result = target_result;
sockaddr_maybe_finish_next_locked(exec_ctx, r);
- gpr_mu_unlock(&r->mu);
}
static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
@@ -131,7 +125,6 @@ static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
static void sockaddr_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
sockaddr_resolver *r = (sockaddr_resolver *)gr;
- gpr_mu_destroy(&r->mu);
grpc_lb_addresses_destroy(exec_ctx, r->addresses);
grpc_channel_args_destroy(exec_ctx, r->channel_args);
gpr_free(r);
@@ -197,12 +190,10 @@ static grpc_resolver *sockaddr_create(grpc_exec_ctx *exec_ctx,
return NULL;
}
/* Instantiate resolver. */
- sockaddr_resolver *r = gpr_malloc(sizeof(sockaddr_resolver));
- memset(r, 0, sizeof(*r));
+ sockaddr_resolver *r = gpr_zalloc(sizeof(sockaddr_resolver));
r->addresses = addresses;
r->channel_args = grpc_channel_args_copy(args->args);
- gpr_mu_init(&r->mu);
- grpc_resolver_init(&r->base, &sockaddr_resolver_vtable);
+ grpc_resolver_init(&r->base, &sockaddr_resolver_vtable, args->combiner);
return &r->base;
}
diff --git a/src/core/ext/transport/chttp2/client/chttp2_connector.c b/src/core/ext/transport/chttp2/client/chttp2_connector.c
index d0a762a280..fc5e17d8fc 100644
--- a/src/core/ext/transport/chttp2/client/chttp2_connector.c
+++ b/src/core/ext/transport/chttp2/client/chttp2_connector.c
@@ -248,8 +248,7 @@ static const grpc_connector_vtable chttp2_connector_vtable = {
chttp2_connector_connect};
grpc_connector *grpc_chttp2_connector_create() {
- chttp2_connector *c = gpr_malloc(sizeof(*c));
- memset(c, 0, sizeof(*c));
+ chttp2_connector *c = gpr_zalloc(sizeof(*c));
c->base.vtable = &chttp2_connector_vtable;
gpr_mu_init(&c->mu);
gpr_ref_init(&c->refs, 1);
diff --git a/src/core/ext/transport/chttp2/client/insecure/channel_create.c b/src/core/ext/transport/chttp2/client/insecure/channel_create.c
index 490a0c560e..286232f277 100644
--- a/src/core/ext/transport/chttp2/client/insecure/channel_create.c
+++ b/src/core/ext/transport/chttp2/client/insecure/channel_create.c
@@ -73,7 +73,9 @@ static grpc_channel *client_channel_factory_create_channel(
arg.type = GRPC_ARG_STRING;
arg.key = GRPC_ARG_SERVER_URI;
arg.value.string = grpc_resolver_factory_add_default_prefix_if_needed(target);
- grpc_channel_args *new_args = grpc_channel_args_copy_and_add(args, &arg, 1);
+ const char *to_remove[] = {GRPC_ARG_SERVER_URI};
+ grpc_channel_args *new_args =
+ grpc_channel_args_copy_and_add_and_remove(args, to_remove, 1, &arg, 1);
gpr_free(arg.value.string);
grpc_channel *channel = grpc_channel_create(exec_ctx, target, new_args,
GRPC_CLIENT_CHANNEL, NULL);
diff --git a/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c b/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c
index d8c18eb122..825db68c65 100644
--- a/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c
+++ b/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c
@@ -182,7 +182,9 @@ static grpc_channel *client_channel_factory_create_channel(
arg.type = GRPC_ARG_STRING;
arg.key = GRPC_ARG_SERVER_URI;
arg.value.string = grpc_resolver_factory_add_default_prefix_if_needed(target);
- grpc_channel_args *new_args = grpc_channel_args_copy_and_add(args, &arg, 1);
+ const char *to_remove[] = {GRPC_ARG_SERVER_URI};
+ grpc_channel_args *new_args =
+ grpc_channel_args_copy_and_add_and_remove(args, to_remove, 1, &arg, 1);
gpr_free(arg.value.string);
grpc_channel *channel = grpc_channel_create(exec_ctx, target, new_args,
GRPC_CLIENT_CHANNEL, NULL);
diff --git a/src/core/ext/transport/chttp2/server/chttp2_server.c b/src/core/ext/transport/chttp2/server/chttp2_server.c
index 0fc180d52f..837b9fd03d 100644
--- a/src/core/ext/transport/chttp2/server/chttp2_server.c
+++ b/src/core/ext/transport/chttp2/server/chttp2_server.c
@@ -222,8 +222,7 @@ grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx,
if (err != GRPC_ERROR_NONE) {
goto error;
}
- state = gpr_malloc(sizeof(*state));
- memset(state, 0, sizeof(*state));
+ state = gpr_zalloc(sizeof(*state));
grpc_closure_init(&state->tcp_server_shutdown_complete,
tcp_server_shutdown_complete, state,
grpc_schedule_on_exec_ctx);
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
index d1fab25478..da4c7dc7b2 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
@@ -48,16 +48,19 @@
#include "src/core/ext/transport/chttp2/transport/varint.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/http/parser.h"
+#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/iomgr/workqueue.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
+#include "src/core/lib/support/env.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/transport/error_utils.h"
#include "src/core/lib/transport/http2_errors.h"
#include "src/core/lib/transport/static_metadata.h"
#include "src/core/lib/transport/status_conversion.h"
#include "src/core/lib/transport/timeout_encoding.h"
+#include "src/core/lib/transport/transport.h"
#include "src/core/lib/transport/transport_impl.h"
#define DEFAULT_WINDOW 65535
@@ -66,6 +69,10 @@
#define MAX_WRITE_BUFFER_SIZE (64 * 1024 * 1024)
#define DEFAULT_MAX_HEADER_LIST_SIZE (16 * 1024)
+#define DEFAULT_KEEPALIVE_TIME_SECOND INT_MAX
+#define DEFAULT_KEEPALIVE_TIMEOUT_SECOND 20
+#define DEFAULT_KEEPALIVE_PERMIT_WITHOUT_CALLS false
+
#define MAX_CLIENT_STREAM_ID 0x7fffffffu
int grpc_http_trace = 0;
int grpc_flowctl_trace = 0;
@@ -139,6 +146,16 @@ static void send_ping_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
#define DEFAULT_MIN_TIME_BETWEEN_PINGS_MS 0
#define DEFAULT_MAX_PINGS_BETWEEN_DATA 3
+/** keepalive-relevant functions */
+static void init_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error);
+static void start_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error);
+static void finish_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error);
+static void keepalive_watchdog_fired_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error);
+
/*******************************************************************************
* CONSTRUCTION/DESTRUCTION/REFCOUNTING
*/
@@ -218,8 +235,6 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
GPR_ASSERT(strlen(GRPC_CHTTP2_CLIENT_CONNECT_STRING) ==
GRPC_CHTTP2_CLIENT_CONNECT_STRLEN);
- memset(t, 0, sizeof(*t));
-
t->base.vtable = &vtable;
t->ep = ep;
/* one ref is for destroy */
@@ -255,6 +270,17 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_combiner_scheduler(t->combiner, false));
grpc_closure_init(&t->finish_bdp_ping_locked, finish_bdp_ping_locked, t,
grpc_combiner_scheduler(t->combiner, false));
+ grpc_closure_init(&t->init_keepalive_ping_locked, init_keepalive_ping_locked,
+ t, grpc_combiner_scheduler(t->combiner, false));
+ grpc_closure_init(&t->start_keepalive_ping_locked,
+ start_keepalive_ping_locked, t,
+ grpc_combiner_scheduler(t->combiner, false));
+ grpc_closure_init(&t->finish_keepalive_ping_locked,
+ finish_keepalive_ping_locked, t,
+ grpc_combiner_scheduler(t->combiner, false));
+ grpc_closure_init(&t->keepalive_watchdog_fired_locked,
+ keepalive_watchdog_fired_locked, t,
+ grpc_combiner_scheduler(t->combiner, false));
grpc_bdp_estimator_init(&t->bdp_estimator, t->peer_string);
t->last_pid_update = gpr_now(GPR_CLOCK_MONOTONIC);
@@ -316,6 +342,18 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
gpr_time_from_millis(DEFAULT_MIN_TIME_BETWEEN_PINGS_MS, GPR_TIMESPAN),
};
+ /* client-side keepalive setting */
+ t->keepalive_time =
+ DEFAULT_KEEPALIVE_TIME_SECOND == INT_MAX
+ ? gpr_inf_future(GPR_TIMESPAN)
+ : gpr_time_from_seconds(DEFAULT_KEEPALIVE_TIME_SECOND, GPR_TIMESPAN);
+ t->keepalive_timeout =
+ DEFAULT_KEEPALIVE_TIMEOUT_SECOND == INT_MAX
+ ? gpr_inf_future(GPR_TIMESPAN)
+ : gpr_time_from_seconds(DEFAULT_KEEPALIVE_TIMEOUT_SECOND,
+ GPR_TIMESPAN);
+ t->keepalive_permit_without_calls = DEFAULT_KEEPALIVE_PERMIT_WITHOUT_CALLS;
+
if (channel_args) {
for (i = 0; i < channel_args->num_args; i++) {
if (0 == strcmp(channel_args->args[i].key,
@@ -363,6 +401,28 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
strcmp(channel_args->args[i].key, GRPC_ARG_HTTP2_BDP_PROBE)) {
t->enable_bdp_probe = grpc_channel_arg_get_integer(
&channel_args->args[i], (grpc_integer_options){1, 0, 1});
+ } else if (0 == strcmp(channel_args->args[i].key,
+ GRPC_ARG_HTTP2_KEEPALIVE_TIME)) {
+ const int value = grpc_channel_arg_get_integer(
+ &channel_args->args[i],
+ (grpc_integer_options){DEFAULT_KEEPALIVE_TIME_SECOND, 1, INT_MAX});
+ t->keepalive_time = value == INT_MAX
+ ? gpr_inf_future(GPR_TIMESPAN)
+ : gpr_time_from_seconds(value, GPR_TIMESPAN);
+ } else if (0 == strcmp(channel_args->args[i].key,
+ GRPC_ARG_HTTP2_KEEPALIVE_TIMEOUT)) {
+ const int value = grpc_channel_arg_get_integer(
+ &channel_args->args[i],
+ (grpc_integer_options){DEFAULT_KEEPALIVE_TIMEOUT_SECOND, 0,
+ INT_MAX});
+ t->keepalive_timeout = value == INT_MAX
+ ? gpr_inf_future(GPR_TIMESPAN)
+ : gpr_time_from_seconds(value, GPR_TIMESPAN);
+ } else if (0 == strcmp(channel_args->args[i].key,
+ GRPC_ARG_HTTP2_KEEPALIVE_PERMIT_WITHOUT_CALLS)) {
+ t->keepalive_permit_without_calls =
+ (uint32_t)grpc_channel_arg_get_integer(
+ &channel_args->args[i], (grpc_integer_options){0, 0, 1});
} else {
static const struct {
const char *channel_arg_name;
@@ -414,6 +474,16 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
t->ping_state.pings_before_data_required =
t->ping_policy.max_pings_without_data;
+ /** Start client-side keepalive pings */
+ if (t->is_client) {
+ t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_WAITING;
+ GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping");
+ grpc_timer_init(
+ exec_ctx, &t->keepalive_ping_timer,
+ gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), t->keepalive_time),
+ &t->init_keepalive_ping_locked, gpr_now(GPR_CLOCK_MONOTONIC));
+ }
+
grpc_chttp2_initiate_write(exec_ctx, t, false, "init");
post_benign_reclaimer(exec_ctx, t);
}
@@ -458,6 +528,22 @@ static void close_transport_locked(grpc_exec_ctx *exec_ctx,
connectivity_state_set(exec_ctx, t, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(error), "close_transport");
grpc_endpoint_shutdown(exec_ctx, t->ep, GRPC_ERROR_REF(error));
+ if (t->is_client) {
+ switch (t->keepalive_state) {
+ case GRPC_CHTTP2_KEEPALIVE_STATE_WAITING: {
+ grpc_timer_cancel(exec_ctx, &t->keepalive_ping_timer);
+ break;
+ }
+ case GRPC_CHTTP2_KEEPALIVE_STATE_PINGING: {
+ grpc_timer_cancel(exec_ctx, &t->keepalive_ping_timer);
+ grpc_timer_cancel(exec_ctx, &t->keepalive_watchdog_timer);
+ break;
+ }
+ case GRPC_CHTTP2_KEEPALIVE_STATE_DYING: {
+ break;
+ }
+ }
+ }
/* flush writable stream list to avoid dangling references */
grpc_chttp2_stream *s;
@@ -494,8 +580,6 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs;
- memset(s, 0, sizeof(*s));
-
s->t = t;
s->refcount = refcount;
/* We reserve one 'active stream' that's dropped when the stream is
@@ -1114,8 +1198,11 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
grpc_chttp2_list_add_waiting_for_concurrency(t, s);
maybe_start_some_streams(exec_ctx, t);
} else {
- grpc_chttp2_cancel_stream(exec_ctx, t, s,
- GRPC_ERROR_CREATE("Transport closed"));
+ grpc_chttp2_cancel_stream(
+ exec_ctx, t, s,
+ grpc_error_set_int(GRPC_ERROR_CREATE("Transport closed"),
+ GRPC_ERROR_INT_GRPC_STATUS,
+ GRPC_STATUS_UNAVAILABLE));
}
} else {
GPR_ASSERT(s->id != 0);
@@ -1984,6 +2071,77 @@ static void finish_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "bdp_ping");
}
+static void init_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_chttp2_transport *t = arg;
+ GPR_ASSERT(t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_WAITING);
+ if (error == GRPC_ERROR_NONE && !(t->destroying || t->closed)) {
+ if (t->keepalive_permit_without_calls || t->stream_map.count > 0) {
+ t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_PINGING;
+ GRPC_CHTTP2_REF_TRANSPORT(t, "keepalive ping end");
+ send_ping_locked(exec_ctx, t, GRPC_CHTTP2_PING_ON_NEXT_WRITE,
+ &t->start_keepalive_ping_locked,
+ &t->finish_keepalive_ping_locked);
+ } else {
+ GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping");
+ grpc_timer_init(
+ exec_ctx, &t->keepalive_ping_timer,
+ gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), t->keepalive_time),
+ &t->init_keepalive_ping_locked, gpr_now(GPR_CLOCK_MONOTONIC));
+ }
+ }
+ GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "init keepalive ping");
+}
+
+static void start_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_chttp2_transport *t = arg;
+ GRPC_CHTTP2_REF_TRANSPORT(t, "keepalive watchdog");
+ grpc_timer_init(
+ exec_ctx, &t->keepalive_watchdog_timer,
+ gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), t->keepalive_timeout),
+ &t->keepalive_watchdog_fired_locked, gpr_now(GPR_CLOCK_MONOTONIC));
+}
+
+static void finish_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_chttp2_transport *t = arg;
+ if (t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_PINGING) {
+ if (error == GRPC_ERROR_NONE) {
+ t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_WAITING;
+ grpc_timer_cancel(exec_ctx, &t->keepalive_watchdog_timer);
+ GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping");
+ grpc_timer_init(
+ exec_ctx, &t->keepalive_ping_timer,
+ gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), t->keepalive_time),
+ grpc_closure_create(init_keepalive_ping_locked, t,
+ grpc_combiner_scheduler(t->combiner, false)),
+ gpr_now(GPR_CLOCK_MONOTONIC));
+ }
+ }
+ GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "keepalive ping end");
+}
+
+static void keepalive_watchdog_fired_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_chttp2_transport *t = arg;
+ if (t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_PINGING) {
+ if (error == GRPC_ERROR_NONE) {
+ t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DYING;
+ close_transport_locked(exec_ctx, t,
+ GRPC_ERROR_CREATE("keepalive watchdog timeout"));
+ }
+ } else {
+ /** The watchdog timer should have been cancelled by
+ finish_keepalive_ping_locked. */
+ if (error != GRPC_ERROR_CANCELLED) {
+ gpr_log(GPR_ERROR, "keepalive_ping_end state error: %d (expect: %d)",
+ t->keepalive_state, GRPC_CHTTP2_KEEPALIVE_STATE_PINGING);
+ }
+ }
+ GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "keepalive watchdog");
+}
+
/*******************************************************************************
* CALLBACK LOOP
*/
@@ -2425,7 +2583,7 @@ static const grpc_transport_vtable vtable = {sizeof(grpc_chttp2_stream),
grpc_transport *grpc_create_chttp2_transport(
grpc_exec_ctx *exec_ctx, const grpc_channel_args *channel_args,
grpc_endpoint *ep, int is_client) {
- grpc_chttp2_transport *t = gpr_malloc(sizeof(grpc_chttp2_transport));
+ grpc_chttp2_transport *t = gpr_zalloc(sizeof(grpc_chttp2_transport));
init_transport(exec_ctx, t, channel_args, ep, is_client != 0);
return &t->base;
}
diff --git a/src/core/ext/transport/chttp2/transport/hpack_encoder.c b/src/core/ext/transport/chttp2/transport/hpack_encoder.c
index 63df8e135f..84586cd998 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_encoder.c
+++ b/src/core/ext/transport/chttp2/transport/hpack_encoder.c
@@ -173,6 +173,7 @@ static void add_header_data(framer_state *st, grpc_slice slice) {
static uint8_t *add_tiny_header_data(framer_state *st, size_t len) {
ensure_space(st, len);
+ st->stats->header_bytes += len;
return grpc_slice_buffer_tiny_add(st->output, len);
}
diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h
index 5d41f4bfda..d26812ad6b 100644
--- a/src/core/ext/transport/chttp2/transport/internal.h
+++ b/src/core/ext/transport/chttp2/transport/internal.h
@@ -50,6 +50,7 @@
#include "src/core/ext/transport/chttp2/transport/stream_map.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/endpoint.h"
+#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/transport/bdp_estimator.h"
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/pid_controller.h"
@@ -208,6 +209,12 @@ struct grpc_chttp2_incoming_byte_stream {
grpc_closure finished_action;
};
+typedef enum {
+ GRPC_CHTTP2_KEEPALIVE_STATE_WAITING,
+ GRPC_CHTTP2_KEEPALIVE_STATE_PINGING,
+ GRPC_CHTTP2_KEEPALIVE_STATE_DYING,
+} grpc_chttp2_keepalive_state;
+
struct grpc_chttp2_transport {
grpc_transport base; /* must be first */
gpr_refcount refs;
@@ -382,6 +389,28 @@ struct grpc_chttp2_transport {
grpc_closure benign_reclaimer_locked;
/** destructive cleanup closure */
grpc_closure destructive_reclaimer_locked;
+
+ /* keep-alive ping support */
+ /** Closure to initialize a keepalive ping */
+ grpc_closure init_keepalive_ping_locked;
+ /** Closure to run when the keepalive ping is sent */
+ grpc_closure start_keepalive_ping_locked;
+ /** Cousure to run when the keepalive ping ack is received */
+ grpc_closure finish_keepalive_ping_locked;
+ /** Closrue to run when the keepalive ping timeouts */
+ grpc_closure keepalive_watchdog_fired_locked;
+ /** timer to initiate ping events */
+ grpc_timer keepalive_ping_timer;
+ /** watchdog to kill the transport when waiting for the keepalive ping */
+ grpc_timer keepalive_watchdog_timer;
+ /** time duration in between pings */
+ gpr_timespec keepalive_time;
+ /** grace period for a ping to complete before watchdog kicks in */
+ gpr_timespec keepalive_timeout;
+ /** if keepalive pings are allowed when there's no outstanding streams */
+ bool keepalive_permit_without_calls;
+ /** keep-alive state machine state */
+ grpc_chttp2_keepalive_state keepalive_state;
};
typedef enum {
diff --git a/src/core/lib/channel/channel_stack.c b/src/core/lib/channel/channel_stack.c
index ec973d4e7f..3fb2a60ac7 100644
--- a/src/core/lib/channel/channel_stack.c
+++ b/src/core/lib/channel/channel_stack.c
@@ -173,7 +173,6 @@ grpc_error *grpc_call_stack_init(
grpc_slice path, gpr_timespec start_time, gpr_timespec deadline,
grpc_call_stack *call_stack) {
grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(channel_stack);
- grpc_call_element_args args;
size_t count = channel_stack->count;
grpc_call_element *call_elems;
char *user_data;
@@ -188,13 +187,15 @@ grpc_error *grpc_call_stack_init(
/* init per-filter data */
grpc_error *first_error = GRPC_ERROR_NONE;
- args.start_time = start_time;
+ const grpc_call_element_args args = {
+ .start_time = start_time,
+ .call_stack = call_stack,
+ .server_transport_data = transport_server_data,
+ .context = context,
+ .path = path,
+ .deadline = deadline,
+ };
for (i = 0; i < count; i++) {
- args.call_stack = call_stack;
- args.server_transport_data = transport_server_data;
- args.context = context;
- args.path = path;
- args.deadline = deadline;
call_elems[i].filter = channel_elems[i].filter;
call_elems[i].channel_data = channel_elems[i].channel_data;
call_elems[i].call_data = user_data;
diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h
index 1cf07d43c2..6d3340bcbf 100644
--- a/src/core/lib/channel/channel_stack.h
+++ b/src/core/lib/channel/channel_stack.h
@@ -128,10 +128,11 @@ typedef struct {
server_transport_data is an opaque pointer. If it is NULL, this call is
on a client; if it is non-NULL, then it points to memory owned by the
transport and is on the server. Most filters want to ignore this
- argument. */
+ argument.
+ Implementations may assume that elem->call_data is all zeros. */
grpc_error *(*init_call_elem)(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args);
+ const grpc_call_element_args *args);
void (*set_pollset_or_pollset_set)(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_polling_entity *pollent);
@@ -152,7 +153,8 @@ typedef struct {
is what needs initializing.
is_first, is_last designate this elements position in the stack, and are
useful for asserting correct configuration by upper layer code.
- The filter does not need to do any chaining */
+ The filter does not need to do any chaining.
+ Implementations may assume that elem->call_data is all zeros. */
grpc_error *(*init_channel_elem)(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_channel_element_args *args);
diff --git a/src/core/lib/channel/channel_stack_builder.c b/src/core/lib/channel/channel_stack_builder.c
index 5f9e3b4539..b515b7321a 100644
--- a/src/core/lib/channel/channel_stack_builder.c
+++ b/src/core/lib/channel/channel_stack_builder.c
@@ -65,8 +65,7 @@ struct grpc_channel_stack_builder_iterator {
};
grpc_channel_stack_builder *grpc_channel_stack_builder_create(void) {
- grpc_channel_stack_builder *b = gpr_malloc(sizeof(*b));
- memset(b, 0, sizeof(*b));
+ grpc_channel_stack_builder *b = gpr_zalloc(sizeof(*b));
b->begin.filter = NULL;
b->end.filter = NULL;
@@ -251,7 +250,7 @@ grpc_error *grpc_channel_stack_builder_finish(
size_t channel_stack_size = grpc_channel_stack_size(filters, num_filters);
// allocate memory, with prefix_bytes followed by channel_stack_size
- *result = gpr_malloc(prefix_bytes + channel_stack_size);
+ *result = gpr_zalloc(prefix_bytes + channel_stack_size);
// fetch a pointer to the channel stack
grpc_channel_stack *channel_stack =
(grpc_channel_stack *)((char *)(*result) + prefix_bytes);
diff --git a/src/core/lib/channel/compress_filter.c b/src/core/lib/channel/compress_filter.c
index 22781c7839..aa41014a21 100644
--- a/src/core/lib/channel/compress_filter.c
+++ b/src/core/lib/channel/compress_filter.c
@@ -274,7 +274,7 @@ static void compress_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args) {
+ const grpc_call_element_args *args) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
diff --git a/src/core/lib/channel/connected_channel.c b/src/core/lib/channel/connected_channel.c
index 068c61c92a..29796f7ca7 100644
--- a/src/core/lib/channel/connected_channel.c
+++ b/src/core/lib/channel/connected_channel.c
@@ -83,7 +83,7 @@ static void con_start_transport_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args) {
+ const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
int r = grpc_transport_init_stream(
diff --git a/src/core/lib/channel/deadline_filter.c b/src/core/lib/channel/deadline_filter.c
index bc9a2effc2..5a12d62f1d 100644
--- a/src/core/lib/channel/deadline_filter.c
+++ b/src/core/lib/channel/deadline_filter.c
@@ -52,9 +52,6 @@ static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
grpc_call_element* elem = arg;
grpc_deadline_state* deadline_state = elem->call_data;
- gpr_mu_lock(&deadline_state->timer_mu);
- deadline_state->timer_pending = false;
- gpr_mu_unlock(&deadline_state->timer_mu);
if (error != GRPC_ERROR_CANCELLED) {
grpc_call_element_signal_error(
exec_ctx, elem,
@@ -66,53 +63,64 @@ static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg,
}
// Starts the deadline timer.
-static void start_timer_if_needed_locked(grpc_exec_ctx* exec_ctx,
- grpc_call_element* elem,
- gpr_timespec deadline) {
- grpc_deadline_state* deadline_state = elem->call_data;
- deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
- // Note: We do not start the timer if there is already a timer
- // pending. This should be okay, because this is only called from two
- // functions exported by this module: grpc_deadline_state_start(), which
- // starts the initial timer, and grpc_deadline_state_reset(), which
- // cancels any pre-existing timer before starting a new one. In
- // particular, we want to ensure that if grpc_deadline_state_start()
- // winds up trying to start the timer after grpc_deadline_state_reset()
- // has already done so, we ignore the value from the former.
- if (!deadline_state->timer_pending &&
- gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
- // Take a reference to the call stack, to be owned by the timer.
- GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer");
- deadline_state->timer_pending = true;
- grpc_closure_init(&deadline_state->timer_callback, timer_callback, elem,
- grpc_schedule_on_exec_ctx);
- grpc_timer_init(exec_ctx, &deadline_state->timer, deadline,
- &deadline_state->timer_callback,
- gpr_now(GPR_CLOCK_MONOTONIC));
- }
-}
static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
gpr_timespec deadline) {
+ deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
+ if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) == 0) {
+ return;
+ }
grpc_deadline_state* deadline_state = elem->call_data;
- gpr_mu_lock(&deadline_state->timer_mu);
- start_timer_if_needed_locked(exec_ctx, elem, deadline);
- gpr_mu_unlock(&deadline_state->timer_mu);
+ grpc_deadline_timer_state cur_state;
+ grpc_closure* closure = NULL;
+retry:
+ cur_state =
+ (grpc_deadline_timer_state)gpr_atm_acq_load(&deadline_state->timer_state);
+ switch (cur_state) {
+ case GRPC_DEADLINE_STATE_PENDING:
+ // Note: We do not start the timer if there is already a timer
+ return;
+ case GRPC_DEADLINE_STATE_FINISHED:
+ if (gpr_atm_rel_cas(&deadline_state->timer_state,
+ GRPC_DEADLINE_STATE_FINISHED,
+ GRPC_DEADLINE_STATE_PENDING)) {
+ // If we've already created and destroyed a timer, we always create a
+ // new closure: we have no other guarantee that the inlined closure is
+ // not in use (it may hold a pending call to timer_callback)
+ closure = grpc_closure_create(timer_callback, elem,
+ grpc_schedule_on_exec_ctx);
+ } else {
+ goto retry;
+ }
+ break;
+ case GRPC_DEADLINE_STATE_INITIAL:
+ if (gpr_atm_rel_cas(&deadline_state->timer_state,
+ GRPC_DEADLINE_STATE_INITIAL,
+ GRPC_DEADLINE_STATE_PENDING)) {
+ closure =
+ grpc_closure_init(&deadline_state->timer_callback, timer_callback,
+ elem, grpc_schedule_on_exec_ctx);
+ } else {
+ goto retry;
+ }
+ break;
+ }
+ GPR_ASSERT(closure);
+ GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer");
+ grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, closure,
+ gpr_now(GPR_CLOCK_MONOTONIC));
}
// Cancels the deadline timer.
-static void cancel_timer_if_needed_locked(grpc_exec_ctx* exec_ctx,
- grpc_deadline_state* deadline_state) {
- if (deadline_state->timer_pending) {
- grpc_timer_cancel(exec_ctx, &deadline_state->timer);
- deadline_state->timer_pending = false;
- }
-}
static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx,
grpc_deadline_state* deadline_state) {
- gpr_mu_lock(&deadline_state->timer_mu);
- cancel_timer_if_needed_locked(exec_ctx, deadline_state);
- gpr_mu_unlock(&deadline_state->timer_mu);
+ if (gpr_atm_rel_cas(&deadline_state->timer_state, GRPC_DEADLINE_STATE_PENDING,
+ GRPC_DEADLINE_STATE_FINISHED)) {
+ grpc_timer_cancel(exec_ctx, &deadline_state->timer);
+ } else {
+ // timer was either in STATE_INITAL (nothing to cancel)
+ // OR in STATE_FINISHED (again nothing to cancel)
+ }
}
// Callback run when the call is complete.
@@ -120,8 +128,8 @@ static void on_complete(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
grpc_deadline_state* deadline_state = arg;
cancel_timer_if_needed(exec_ctx, deadline_state);
// Invoke the next callback.
- deadline_state->next_on_complete->cb(
- exec_ctx, deadline_state->next_on_complete->cb_arg, error);
+ grpc_closure_run(exec_ctx, deadline_state->next_on_complete,
+ GRPC_ERROR_REF(error));
}
// Inject our own on_complete callback into op.
@@ -136,16 +144,13 @@ static void inject_on_complete_cb(grpc_deadline_state* deadline_state,
void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_call_stack* call_stack) {
grpc_deadline_state* deadline_state = elem->call_data;
- memset(deadline_state, 0, sizeof(*deadline_state));
deadline_state->call_stack = call_stack;
- gpr_mu_init(&deadline_state->timer_mu);
}
void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem) {
grpc_deadline_state* deadline_state = elem->call_data;
cancel_timer_if_needed(exec_ctx, deadline_state);
- gpr_mu_destroy(&deadline_state->timer_mu);
}
// Callback and associated state for starting the timer after call stack
@@ -187,10 +192,8 @@ void grpc_deadline_state_start(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
gpr_timespec new_deadline) {
grpc_deadline_state* deadline_state = elem->call_data;
- gpr_mu_lock(&deadline_state->timer_mu);
- cancel_timer_if_needed_locked(exec_ctx, deadline_state);
- start_timer_if_needed_locked(exec_ctx, elem, new_deadline);
- gpr_mu_unlock(&deadline_state->timer_mu);
+ cancel_timer_if_needed(exec_ctx, deadline_state);
+ start_timer_if_needed(exec_ctx, elem, new_deadline);
}
void grpc_deadline_state_client_start_transport_stream_op(
@@ -244,9 +247,7 @@ typedef struct server_call_data {
// Constructor for call_data. Used for both client and server filters.
static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
- grpc_call_element_args* args) {
- // Note: size of call data is different between client and server.
- memset(elem->call_data, 0, elem->filter->sizeof_call_data);
+ const grpc_call_element_args* args) {
grpc_deadline_state_init(exec_ctx, elem, args->call_stack);
grpc_deadline_state_start(exec_ctx, elem, args->deadline);
return GRPC_ERROR_NONE;
diff --git a/src/core/lib/channel/deadline_filter.h b/src/core/lib/channel/deadline_filter.h
index bd2b84f79e..72cd5cb929 100644
--- a/src/core/lib/channel/deadline_filter.h
+++ b/src/core/lib/channel/deadline_filter.h
@@ -35,16 +35,18 @@
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/iomgr/timer.h"
+typedef enum grpc_deadline_timer_state {
+ GRPC_DEADLINE_STATE_INITIAL,
+ GRPC_DEADLINE_STATE_PENDING,
+ GRPC_DEADLINE_STATE_FINISHED
+} grpc_deadline_timer_state;
+
// State used for filters that enforce call deadlines.
// Must be the first field in the filter's call_data.
typedef struct grpc_deadline_state {
// We take a reference to the call stack for the timer callback.
grpc_call_stack* call_stack;
- // Guards access to timer_pending and timer.
- gpr_mu timer_mu;
- // True if the timer callback is currently pending.
- bool timer_pending;
- // The deadline timer.
+ gpr_atm timer_state;
grpc_timer timer;
grpc_closure timer_callback;
// Closure to invoke when the call is complete.
@@ -60,6 +62,7 @@ typedef struct grpc_deadline_state {
// elem->call_data is a grpc_deadline_state.
//
+// assumes elem->call_data is zero'd
void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_call_stack* call_stack);
void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
diff --git a/src/core/lib/channel/handshaker.c b/src/core/lib/channel/handshaker.c
index 82c361c7ef..1b4240bb10 100644
--- a/src/core/lib/channel/handshaker.c
+++ b/src/core/lib/channel/handshaker.c
@@ -99,8 +99,7 @@ struct grpc_handshake_manager {
};
grpc_handshake_manager* grpc_handshake_manager_create() {
- grpc_handshake_manager* mgr = gpr_malloc(sizeof(grpc_handshake_manager));
- memset(mgr, 0, sizeof(*mgr));
+ grpc_handshake_manager* mgr = gpr_zalloc(sizeof(grpc_handshake_manager));
gpr_mu_init(&mgr->mu);
gpr_ref_init(&mgr->refs, 1);
return mgr;
diff --git a/src/core/lib/channel/http_client_filter.c b/src/core/lib/channel/http_client_filter.c
index 49a2a980e0..c031533dd8 100644
--- a/src/core/lib/channel/http_client_filter.c
+++ b/src/core/lib/channel/http_client_filter.c
@@ -386,7 +386,7 @@ static void hc_start_transport_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args) {
+ const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
calld->on_done_recv_initial_metadata = NULL;
calld->on_done_recv_trailing_metadata = NULL;
diff --git a/src/core/lib/channel/http_server_filter.c b/src/core/lib/channel/http_server_filter.c
index bb185351a8..fb70de8e96 100644
--- a/src/core/lib/channel/http_server_filter.c
+++ b/src/core/lib/channel/http_server_filter.c
@@ -252,12 +252,11 @@ static void hs_on_complete(grpc_exec_ctx *exec_ctx, void *user_data,
*calld->pp_recv_message = calld->payload_bin_delivered
? NULL
: (grpc_byte_stream *)&calld->read_stream;
- calld->recv_message_ready->cb(exec_ctx, calld->recv_message_ready->cb_arg,
- err);
+ grpc_closure_run(exec_ctx, calld->recv_message_ready, GRPC_ERROR_REF(err));
calld->recv_message_ready = NULL;
calld->payload_bin_delivered = true;
}
- calld->on_complete->cb(exec_ctx, calld->on_complete->cb_arg, err);
+ grpc_closure_run(exec_ctx, calld->on_complete, GRPC_ERROR_REF(err));
}
static void hs_recv_message_ready(grpc_exec_ctx *exec_ctx, void *user_data,
@@ -268,8 +267,7 @@ static void hs_recv_message_ready(grpc_exec_ctx *exec_ctx, void *user_data,
/* do nothing. This is probably a GET request, and payload will be returned
in hs_on_complete callback. */
} else {
- calld->recv_message_ready->cb(exec_ctx, calld->recv_message_ready->cb_arg,
- err);
+ grpc_closure_run(exec_ctx, calld->recv_message_ready, GRPC_ERROR_REF(err));
}
}
@@ -343,11 +341,10 @@ static void hs_start_transport_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args) {
+ const grpc_call_element_args *args) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
/* initialize members */
- memset(calld, 0, sizeof(*calld));
grpc_closure_init(&calld->hs_on_recv, hs_on_recv, elem,
grpc_schedule_on_exec_ctx);
grpc_closure_init(&calld->hs_on_complete, hs_on_complete, elem,
diff --git a/src/core/lib/channel/message_size_filter.c b/src/core/lib/channel/message_size_filter.c
index 5e22860cfb..b424c0d2ac 100644
--- a/src/core/lib/channel/message_size_filter.c
+++ b/src/core/lib/channel/message_size_filter.c
@@ -166,7 +166,7 @@ static void start_transport_stream_op(grpc_exec_ctx* exec_ctx,
// Constructor for call_data.
static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
- grpc_call_element_args* args) {
+ const grpc_call_element_args* args) {
channel_data* chand = elem->channel_data;
call_data* calld = elem->call_data;
calld->next_recv_message_ready = NULL;
@@ -208,7 +208,6 @@ static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
grpc_channel_element_args* args) {
GPR_ASSERT(!args->is_last);
channel_data* chand = elem->channel_data;
- memset(chand, 0, sizeof(*chand));
chand->max_send_size = GRPC_DEFAULT_MAX_SEND_MESSAGE_LENGTH;
chand->max_recv_size = GRPC_DEFAULT_MAX_RECV_MESSAGE_LENGTH;
for (size_t i = 0; i < args->channel_args->num_args; ++i) {
diff --git a/src/core/lib/http/httpcli_security_connector.c b/src/core/lib/http/httpcli_security_connector.c
index f4f6f3c27a..354d2f4a09 100644
--- a/src/core/lib/http/httpcli_security_connector.c
+++ b/src/core/lib/http/httpcli_security_connector.c
@@ -118,8 +118,7 @@ static grpc_security_status httpcli_ssl_channel_security_connector_create(
return GRPC_SECURITY_ERROR;
}
- c = gpr_malloc(sizeof(grpc_httpcli_ssl_channel_security_connector));
- memset(c, 0, sizeof(grpc_httpcli_ssl_channel_security_connector));
+ c = gpr_zalloc(sizeof(grpc_httpcli_ssl_channel_security_connector));
gpr_ref_init(&c->base.base.refcount, 1);
c->base.base.vtable = &httpcli_ssl_vtable;
diff --git a/src/core/lib/http/parser.c b/src/core/lib/http/parser.c
index 2f84adc187..b9c56c103c 100644
--- a/src/core/lib/http/parser.c
+++ b/src/core/lib/http/parser.c
@@ -284,9 +284,9 @@ static grpc_error *addbyte(grpc_http_parser *parser, uint8_t byte,
case GRPC_HTTP_HEADERS:
if (parser->cur_line_length >= GRPC_HTTP_PARSER_MAX_HEADER_LENGTH) {
if (grpc_http1_trace)
- gpr_log(GPR_ERROR, "HTTP client max line length (%d) exceeded",
+ gpr_log(GPR_ERROR, "HTTP header max line length (%d) exceeded",
GRPC_HTTP_PARSER_MAX_HEADER_LENGTH);
- return GRPC_ERROR_NONE;
+ return GRPC_ERROR_CREATE("HTTP header max line length exceeded");
}
parser->cur_line[parser->cur_line_length] = byte;
parser->cur_line_length++;
diff --git a/src/core/lib/iomgr/error.h b/src/core/lib/iomgr/error.h
index ffacdac393..2613512acb 100644
--- a/src/core/lib/iomgr/error.h
+++ b/src/core/lib/iomgr/error.h
@@ -137,8 +137,8 @@ typedef enum {
} grpc_error_times;
/// The following "special" errors can be propagated without allocating memory.
-/// They are always even so that other code (particularly combiner locks) can
-/// safely use the lower bit for themselves.
+/// They are always even so that other code (particularly combiner locks,
+/// polling engines) can safely use the lower bit for themselves.
#define GRPC_ERROR_NONE ((grpc_error *)NULL)
#define GRPC_ERROR_OOM ((grpc_error *)2)
diff --git a/src/core/lib/iomgr/ev_epoll_linux.c b/src/core/lib/iomgr/ev_epoll_linux.c
index fac3705142..11208b9ad1 100644
--- a/src/core/lib/iomgr/ev_epoll_linux.c
+++ b/src/core/lib/iomgr/ev_epoll_linux.c
@@ -68,7 +68,7 @@ static int grpc_polling_trace = 0; /* Disabled by default */
gpr_log(GPR_INFO, (fmt), __VA_ARGS__); \
}
-/* Uncomment the following enable extra checks on poll_object operations */
+/* Uncomment the following to enable extra checks on poll_object operations */
/* #define PO_DEBUG */
static int grpc_wakeup_signal = -1;
@@ -140,24 +140,61 @@ struct grpc_fd {
Ref/Unref by two to avoid altering the orphaned bit */
gpr_atm refst;
- /* Indicates that the fd is shutdown and that any pending read/write closures
- should fail */
- bool shutdown;
- grpc_error *shutdown_error; /* reason for shutdown: set iff shutdown==true */
+ /* Internally stores data of type (grpc_error *). If the FD is shutdown, this
+ contains reason for shutdown (i.e a pointer to grpc_error) ORed with
+ FD_SHUTDOWN_BIT. Since address allocations are word-aligned, the lower bit
+ of (grpc_error *) addresses is guaranteed to be zero. Even if the
+ (grpc_error *), is of special types like GRPC_ERROR_NONE, GRPC_ERROR_OOM
+ etc, the lower bit is guaranteed to be zero.
- /* The fd is either closed or we relinquished control of it. In either cases,
- this indicates that the 'fd' on this structure is no longer valid */
+ Once an fd is shutdown, any pending or future read/write closures on the
+ fd should fail */
+ gpr_atm shutdown_error;
+
+ /* The fd is either closed or we relinquished control of it. In either
+ cases, this indicates that the 'fd' on this structure is no longer
+ valid */
bool orphaned;
- /* TODO: sreek - Move this to a lockfree implementation */
- grpc_closure *read_closure;
- grpc_closure *write_closure;
+ /* Closures to call when the fd is readable or writable respectively. These
+ fields contain one of the following values:
+ CLOSURE_READY : The fd has an I/O event of interest but there is no
+ closure yet to execute
+
+ CLOSURE_NOT_READY : The fd has no I/O event of interest
+
+ closure ptr : The closure to be executed when the fd has an I/O
+ event of interest
+
+ shutdown_error | FD_SHUTDOWN_BIT :
+ 'shutdown_error' field ORed with FD_SHUTDOWN_BIT.
+ This indicates that the fd is shutdown. Since all
+ memory allocations are word-aligned, the lower two
+ bits of the shutdown_error pointer are always 0. So
+ it is safe to OR these with FD_SHUTDOWN_BIT
+
+ Valid state transitions:
+
+ <closure ptr> <-----3------ CLOSURE_NOT_READY ----1----> CLOSURE_READY
+ | | ^ | ^ | |
+ | | | | | | |
+ | +--------------4----------+ 6 +---------2---------------+ |
+ | | |
+ | v |
+ +-----5-------> [shutdown_error | FD_SHUTDOWN_BIT] <----7---------+
+
+ For 1, 4 : See set_ready() function
+ For 2, 3 : See notify_on() function
+ For 5,6,7: See set_shutdown() function */
+ gpr_atm read_closure;
+ gpr_atm write_closure;
struct grpc_fd *freelist_next;
grpc_closure *on_done_closure;
- /* The pollset that last noticed that the fd is readable */
- grpc_pollset *read_notifier_pollset;
+ /* The pollset that last noticed that the fd is readable. The actual type
+ * stored in this is (grpc_pollset *) */
+ gpr_atm read_notifier_pollset;
grpc_iomgr_object iomgr_object;
};
@@ -180,8 +217,10 @@ static void fd_unref(grpc_fd *fd);
static void fd_global_init(void);
static void fd_global_shutdown(void);
-#define CLOSURE_NOT_READY ((grpc_closure *)0)
-#define CLOSURE_READY ((grpc_closure *)1)
+#define CLOSURE_NOT_READY ((gpr_atm)0)
+#define CLOSURE_READY ((gpr_atm)2)
+
+#define FD_SHUTDOWN_BIT 1
/*******************************************************************************
* Polling island Declarations
@@ -908,7 +947,11 @@ static void unref_by(grpc_fd *fd, int n) {
fd->freelist_next = fd_freelist;
fd_freelist = fd;
grpc_iomgr_unregister_object(&fd->iomgr_object);
- if (fd->shutdown) GRPC_ERROR_UNREF(fd->shutdown_error);
+
+ grpc_error *err = (grpc_error *)gpr_atm_acq_load(&fd->shutdown_error);
+ /* Clear the least significant bit if it set (in case fd was shutdown) */
+ err = (grpc_error *)((intptr_t)err & ~FD_SHUTDOWN_BIT);
+ GRPC_ERROR_UNREF(err);
gpr_mu_unlock(&fd_freelist_mu);
} else {
@@ -972,13 +1015,14 @@ static grpc_fd *fd_create(int fd, const char *name) {
gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1);
new_fd->fd = fd;
- new_fd->shutdown = false;
+ gpr_atm_no_barrier_store(&new_fd->shutdown_error, (gpr_atm)GRPC_ERROR_NONE);
new_fd->orphaned = false;
- new_fd->read_closure = CLOSURE_NOT_READY;
- new_fd->write_closure = CLOSURE_NOT_READY;
+ gpr_atm_no_barrier_store(&new_fd->read_closure, CLOSURE_NOT_READY);
+ gpr_atm_no_barrier_store(&new_fd->write_closure, CLOSURE_NOT_READY);
+ gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
+
new_fd->freelist_next = NULL;
new_fd->on_done_closure = NULL;
- new_fd->read_notifier_pollset = NULL;
gpr_mu_unlock(&new_fd->po.mu);
@@ -1060,101 +1104,206 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
GRPC_ERROR_UNREF(error);
}
-static grpc_error *fd_shutdown_error(grpc_fd *fd) {
- if (!fd->shutdown) {
- return GRPC_ERROR_NONE;
- } else {
- return GRPC_ERROR_CREATE_REFERENCING("FD shutdown", &fd->shutdown_error, 1);
+static void notify_on(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_atm *state,
+ grpc_closure *closure) {
+ while (true) {
+ /* Fast-path: CLOSURE_NOT_READY -> <closure>.
+ The 'release' cas here matches the 'acquire' load in set_ready and
+ set_shutdown ensuring that the closure (scheduled by set_ready or
+ set_shutdown) happens-after the I/O event on the fd */
+ if (gpr_atm_rel_cas(state, CLOSURE_NOT_READY, (gpr_atm)closure)) {
+ return; /* Fast-path successful. Return */
+ }
+
+ /* Slowpath. The 'acquire' load matches the 'release' cas in set_ready and
+ set_shutdown */
+ gpr_atm curr = gpr_atm_acq_load(state);
+ switch (curr) {
+ case CLOSURE_NOT_READY: {
+ break; /* retry */
+ }
+
+ case CLOSURE_READY: {
+ /* Change the state to CLOSURE_NOT_READY. Schedule the closure if
+ successful. If not, the state most likely transitioned to shutdown.
+ We should retry.
+
+ This can be a no-barrier cas since the state is being transitioned to
+ CLOSURE_NOT_READY; set_ready and set_shutdown do not schedule any
+ closure when transitioning out of CLOSURE_NO_READY state (i.e there
+ is no other code that needs to 'happen-after' this) */
+ if (gpr_atm_no_barrier_cas(state, CLOSURE_READY, CLOSURE_NOT_READY)) {
+ grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE);
+ return; /* Slow-path successful. Return */
+ }
+
+ break; /* retry */
+ }
+
+ default: {
+ /* 'curr' is either a closure or the fd is shutdown(in which case 'curr'
+ contains a pointer to the shutdown-error). If the fd is shutdown,
+ schedule the closure with the shutdown error */
+ if ((curr & FD_SHUTDOWN_BIT) > 0) {
+ grpc_error *shutdown_err = (grpc_error *)(curr & ~FD_SHUTDOWN_BIT);
+ grpc_closure_sched(
+ exec_ctx, closure,
+ GRPC_ERROR_CREATE_REFERENCING("FD Shutdown", &shutdown_err, 1));
+ return;
+ }
+
+ /* There is already a closure!. This indicates a bug in the code */
+ gpr_log(GPR_ERROR,
+ "notify_on called with a previous callback still pending");
+ abort();
+ }
+ }
}
+
+ GPR_UNREACHABLE_CODE(return );
}
-static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure **st, grpc_closure *closure) {
- if (fd->shutdown) {
- grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CREATE("FD shutdown"));
- } else if (*st == CLOSURE_NOT_READY) {
- /* not ready ==> switch to a waiting state by setting the closure */
- *st = closure;
- } else if (*st == CLOSURE_READY) {
- /* already ready ==> queue the closure to run immediately */
- *st = CLOSURE_NOT_READY;
- grpc_closure_sched(exec_ctx, closure, fd_shutdown_error(fd));
- } else {
- /* upcallptr was set to a different closure. This is an error! */
- gpr_log(GPR_ERROR,
- "User called a notify_on function with a previous callback still "
- "pending");
- abort();
+static void set_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_atm *state,
+ grpc_error *shutdown_err) {
+ /* Try the fast-path first (i.e expect the current value to be
+ CLOSURE_NOT_READY */
+ gpr_atm curr = CLOSURE_NOT_READY;
+ gpr_atm new_state = (gpr_atm)shutdown_err | FD_SHUTDOWN_BIT;
+
+ while (true) {
+ /* The 'release' cas here matches the 'acquire' load in notify_on to ensure
+ that the closure it schedules 'happens-after' the set_shutdown is called
+ on the fd */
+ if (gpr_atm_rel_cas(state, curr, new_state)) {
+ return; /* Fast-path successful. Return */
+ }
+
+ /* Fallback to slowpath. This 'acquire' load matches the 'release' cas in
+ notify_on and set_ready */
+ curr = gpr_atm_acq_load(state);
+ switch (curr) {
+ case CLOSURE_READY: {
+ break; /* retry */
+ }
+
+ case CLOSURE_NOT_READY: {
+ break; /* retry */
+ }
+
+ default: {
+ /* 'curr' is either a closure or the fd is already shutdown */
+
+ /* If fd is already shutdown, we are done */
+ if ((curr & FD_SHUTDOWN_BIT) > 0) {
+ return;
+ }
+
+ /* Fd is not shutdown. Schedule the closure and move the state to
+ shutdown state. The 'release' cas here matches the 'acquire' load in
+ notify_on to ensure that the closure it schedules 'happens-after'
+ the set_shutdown is called on the fd */
+ if (gpr_atm_rel_cas(state, curr, new_state)) {
+ grpc_closure_sched(
+ exec_ctx, (grpc_closure *)curr,
+ GRPC_ERROR_CREATE_REFERENCING("FD Shutdown", &shutdown_err, 1));
+ return;
+ }
+
+ /* 'curr' was a closure but now changed to a different state. We will
+ have to retry */
+ break;
+ }
+ }
}
+
+ GPR_UNREACHABLE_CODE(return );
}
-/* returns 1 if state becomes not ready */
-static int set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure **st) {
- if (*st == CLOSURE_READY) {
- /* duplicate ready ==> ignore */
- return 0;
- } else if (*st == CLOSURE_NOT_READY) {
- /* not ready, and not waiting ==> flag ready */
- *st = CLOSURE_READY;
- return 0;
- } else {
- /* waiting ==> queue closure */
- grpc_closure_sched(exec_ctx, *st, fd_shutdown_error(fd));
- *st = CLOSURE_NOT_READY;
- return 1;
+static void set_ready(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_atm *state) {
+ /* Try an optimistic case first (i.e assume current state is
+ CLOSURE_NOT_READY).
+
+ This 'release' cas matches the 'acquire' load in notify_on ensuring that
+ any closure (scheduled by notify_on) 'happens-after' the return from
+ epoll_pwait */
+ if (gpr_atm_rel_cas(state, CLOSURE_NOT_READY, CLOSURE_READY)) {
+ return; /* early out */
+ }
+
+ /* The 'acquire' load here matches the 'release' cas in notify_on and
+ set_shutdown */
+ gpr_atm curr = gpr_atm_acq_load(state);
+ switch (curr) {
+ case CLOSURE_READY: {
+ /* Already ready. We are done here */
+ break;
+ }
+
+ case CLOSURE_NOT_READY: {
+ /* The state was not CLOSURE_NOT_READY when we checked initially at the
+ beginning of this function but now it is CLOSURE_NOT_READY again.
+ This is only possible if the state transitioned out of
+ CLOSURE_NOT_READY to either CLOSURE_READY or <some closure> and then
+ back to CLOSURE_NOT_READY again (i.e after we entered this function,
+ the fd became "ready" and the necessary actions were already done).
+ So there is no need to make the state CLOSURE_READY now */
+ break;
+ }
+
+ default: {
+ /* 'curr' is either a closure or the fd is shutdown */
+ if ((curr & FD_SHUTDOWN_BIT) > 0) {
+ /* The fd is shutdown. Do nothing */
+ } else if (gpr_atm_no_barrier_cas(state, curr, CLOSURE_NOT_READY)) {
+ /* The cas above was no-barrier since the state is being transitioned to
+ CLOSURE_NOT_READY; notify_on and set_shutdown do not schedule any
+ closures when transitioning out of CLOSURE_NO_READY state (i.e there
+ is no other code that needs to 'happen-after' this) */
+
+ grpc_closure_sched(exec_ctx, (grpc_closure *)curr, GRPC_ERROR_NONE);
+ }
+ /* else the state changed again (only possible by either a racing
+ set_ready or set_shutdown functions. In both these cases, the closure
+ would have been scheduled for execution. So we are done here */
+ break;
+ }
}
}
static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
grpc_fd *fd) {
- grpc_pollset *notifier = NULL;
-
- gpr_mu_lock(&fd->po.mu);
- notifier = fd->read_notifier_pollset;
- gpr_mu_unlock(&fd->po.mu);
-
- return notifier;
+ gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
+ return (grpc_pollset *)notifier;
}
static bool fd_is_shutdown(grpc_fd *fd) {
- gpr_mu_lock(&fd->po.mu);
- const bool r = fd->shutdown;
- gpr_mu_unlock(&fd->po.mu);
- return r;
+ grpc_error *err = (grpc_error *)gpr_atm_acq_load(&fd->shutdown_error);
+ return (((intptr_t)err & FD_SHUTDOWN_BIT) > 0);
}
/* Might be called multiple times */
static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
- gpr_mu_lock(&fd->po.mu);
- /* Do the actual shutdown only once */
- if (!fd->shutdown) {
- fd->shutdown = true;
- fd->shutdown_error = why;
-
+ /* Store the shutdown error ORed with FD_SHUTDOWN_BIT in fd->shutdown_error */
+ if (gpr_atm_rel_cas(&fd->shutdown_error, (gpr_atm)GRPC_ERROR_NONE,
+ (gpr_atm)why | FD_SHUTDOWN_BIT)) {
shutdown(fd->fd, SHUT_RDWR);
- /* Flush any pending read and write closures. Since fd->shutdown is 'true'
- at this point, the closures would be called with 'success = false' */
- set_ready_locked(exec_ctx, fd, &fd->read_closure);
- set_ready_locked(exec_ctx, fd, &fd->write_closure);
+
+ set_shutdown(exec_ctx, fd, &fd->read_closure, why);
+ set_shutdown(exec_ctx, fd, &fd->write_closure, why);
} else {
+ /* Shutdown already called */
GRPC_ERROR_UNREF(why);
}
- gpr_mu_unlock(&fd->po.mu);
}
static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) {
- gpr_mu_lock(&fd->po.mu);
- notify_on_locked(exec_ctx, fd, &fd->read_closure, closure);
- gpr_mu_unlock(&fd->po.mu);
+ notify_on(exec_ctx, fd, &fd->read_closure, closure);
}
static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) {
- gpr_mu_lock(&fd->po.mu);
- notify_on_locked(exec_ctx, fd, &fd->write_closure, closure);
- gpr_mu_unlock(&fd->po.mu);
+ notify_on(exec_ctx, fd, &fd->write_closure, closure);
}
static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) {
@@ -1343,18 +1492,19 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_pollset *notifier) {
- /* Need the fd->po.mu since we might be racing with fd_notify_on_read */
- gpr_mu_lock(&fd->po.mu);
- set_ready_locked(exec_ctx, fd, &fd->read_closure);
- fd->read_notifier_pollset = notifier;
- gpr_mu_unlock(&fd->po.mu);
+ set_ready(exec_ctx, fd, &fd->read_closure);
+
+ /* Note, it is possible that fd_become_readable might be called twice with
+ different 'notifier's when an fd becomes readable and it is in two epoll
+ sets (This can happen briefly during polling island merges). In such cases
+ it does not really matter which notifer is set as the read_notifier_pollset
+ (They would both point to the same polling island anyway) */
+ /* Use release store to match with acquire load in fd_get_read_notifier */
+ gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
}
static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
- /* Need the fd->po.mu since we might be racing with fd_notify_on_write */
- gpr_mu_lock(&fd->po.mu);
- set_ready_locked(exec_ctx, fd, &fd->write_closure);
- gpr_mu_unlock(&fd->po.mu);
+ set_ready(exec_ctx, fd, &fd->write_closure);
}
static void pollset_release_polling_island(grpc_exec_ctx *exec_ctx,
@@ -1737,7 +1887,7 @@ retry:
(void *)pi_new, FD_FROM_PO(item)->fd, poll_obj_string(bag_type),
(void *)bag);
/* No need to lock 'pi_new' here since this is a new polling island
- * and no one has a reference to it yet */
+ and no one has a reference to it yet */
polling_island_remove_all_fds_locked(pi_new, true, &error);
/* Ref and unref so that the polling island gets deleted during unref
diff --git a/src/core/lib/iomgr/ev_poll_posix.c b/src/core/lib/iomgr/ev_poll_posix.c
index c03fadaebb..5ddd5313e2 100644
--- a/src/core/lib/iomgr/ev_poll_posix.c
+++ b/src/core/lib/iomgr/ev_poll_posix.c
@@ -1131,8 +1131,7 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
*/
static grpc_pollset_set *pollset_set_create(void) {
- grpc_pollset_set *pollset_set = gpr_malloc(sizeof(*pollset_set));
- memset(pollset_set, 0, sizeof(*pollset_set));
+ grpc_pollset_set *pollset_set = gpr_zalloc(sizeof(*pollset_set));
gpr_mu_init(&pollset_set->mu);
return pollset_set;
}
diff --git a/src/core/lib/iomgr/pollset.h b/src/core/lib/iomgr/pollset.h
index c4b62a2790..e19ce697b8 100644
--- a/src/core/lib/iomgr/pollset.h
+++ b/src/core/lib/iomgr/pollset.h
@@ -53,6 +53,7 @@ typedef struct grpc_pollset grpc_pollset;
typedef struct grpc_pollset_worker grpc_pollset_worker;
size_t grpc_pollset_size(void);
+/* Initialize a pollset: assumes *pollset contains all zeros */
void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu);
/* Begin shutting down the pollset, and call closure when done.
* pollset's mutex must be held */
diff --git a/src/core/lib/iomgr/pollset_uv.c b/src/core/lib/iomgr/pollset_uv.c
index 5847771108..af33949c69 100644
--- a/src/core/lib/iomgr/pollset_uv.c
+++ b/src/core/lib/iomgr/pollset_uv.c
@@ -57,24 +57,35 @@ int grpc_pollset_work_run_loop;
gpr_mu grpc_polling_mu;
+/* This is used solely to kick the uv loop, by setting a callback to be run
+ immediately in the next loop iteration.
+ Note: In the future, if there is a bug that involves missing wakeups in the
+ future, try adding a uv_async_t to kick the loop differently */
+uv_timer_t dummy_uv_handle;
+
size_t grpc_pollset_size() { return sizeof(grpc_pollset); }
+void dummy_timer_cb(uv_timer_t *handle) {}
+
void grpc_pollset_global_init(void) {
gpr_mu_init(&grpc_polling_mu);
+ uv_timer_init(uv_default_loop(), &dummy_uv_handle);
grpc_pollset_work_run_loop = 1;
}
-void grpc_pollset_global_shutdown(void) { gpr_mu_destroy(&grpc_polling_mu); }
+static void timer_close_cb(uv_handle_t *handle) { handle->data = (void *)1; }
+
+void grpc_pollset_global_shutdown(void) {
+ gpr_mu_destroy(&grpc_polling_mu);
+ uv_close((uv_handle_t *)&dummy_uv_handle, timer_close_cb);
+}
void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
*mu = &grpc_polling_mu;
- memset(pollset, 0, sizeof(grpc_pollset));
uv_timer_init(uv_default_loop(), &pollset->timer);
pollset->shutting_down = 0;
}
-static void timer_close_cb(uv_handle_t *handle) { handle->data = (void *)1; }
-
void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure *closure) {
GPR_ASSERT(!pollset->shutting_down);
@@ -82,6 +93,9 @@ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (grpc_pollset_work_run_loop) {
// Drain any pending UV callbacks without blocking
uv_run(uv_default_loop(), UV_RUN_NOWAIT);
+ } else {
+ // kick the loop once
+ uv_timer_start(&dummy_uv_handle, dummy_timer_cb, 0, 0);
}
grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE);
}
@@ -131,6 +145,7 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_error *grpc_pollset_kick(grpc_pollset *pollset,
grpc_pollset_worker *specific_worker) {
+ uv_timer_start(&dummy_uv_handle, dummy_timer_cb, 0, 0);
return GRPC_ERROR_NONE;
}
diff --git a/src/core/lib/iomgr/pollset_windows.c b/src/core/lib/iomgr/pollset_windows.c
index 9be73a7ce8..17043c1ea1 100644
--- a/src/core/lib/iomgr/pollset_windows.c
+++ b/src/core/lib/iomgr/pollset_windows.c
@@ -98,7 +98,6 @@ size_t grpc_pollset_size(void) { return sizeof(grpc_pollset); }
void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
*mu = &grpc_polling_mu;
- memset(pollset, 0, sizeof(*pollset));
pollset->root_worker.links[GRPC_POLLSET_WORKER_LINK_POLLSET].next =
pollset->root_worker.links[GRPC_POLLSET_WORKER_LINK_POLLSET].prev =
&pollset->root_worker;
diff --git a/src/core/lib/iomgr/resolve_address_uv.c b/src/core/lib/iomgr/resolve_address_uv.c
index 9b5f3209f0..79ff910738 100644
--- a/src/core/lib/iomgr/resolve_address_uv.c
+++ b/src/core/lib/iomgr/resolve_address_uv.c
@@ -113,14 +113,15 @@ static grpc_error *try_split_host_port(const char *name,
/* parse name, splitting it into host and port parts */
grpc_error *error;
gpr_split_host_port(name, host, port);
- if (host == NULL) {
+ if (*host == NULL) {
char *msg;
gpr_asprintf(&msg, "unparseable host:port: '%s'", name);
error = GRPC_ERROR_CREATE(msg);
gpr_free(msg);
return error;
}
- if (port == NULL) {
+ if (*port == NULL) {
+ // TODO(murgatroid99): add tests for this case
if (default_port == NULL) {
char *msg;
gpr_asprintf(&msg, "no port in name '%s'", name);
diff --git a/src/core/lib/iomgr/socket_utils_windows.c b/src/core/lib/iomgr/socket_utils_windows.c
index 628ad4a45b..5bbe8fa34c 100644
--- a/src/core/lib/iomgr/socket_utils_windows.c
+++ b/src/core/lib/iomgr/socket_utils_windows.c
@@ -41,8 +41,12 @@
#include <grpc/support/log.h>
const char *grpc_inet_ntop(int af, const void *src, char *dst, size_t size) {
+#ifdef GPR_WIN_INET_NTOP
+ return inet_ntop(af, src, dst, size);
+#else
/* Windows InetNtopA wants a mutable ip pointer */
return InetNtopA(af, (void *)src, dst, size);
+#endif /* GPR_WIN_INET_NTOP */
}
#endif /* GRPC_WINDOWS_SOCKETUTILS */
diff --git a/src/core/lib/iomgr/tcp_client_uv.c b/src/core/lib/iomgr/tcp_client_uv.c
index 5225a5402b..ae66577caf 100644
--- a/src/core/lib/iomgr/tcp_client_uv.c
+++ b/src/core/lib/iomgr/tcp_client_uv.c
@@ -46,6 +46,8 @@
#include "src/core/lib/iomgr/tcp_uv.h"
#include "src/core/lib/iomgr/timer.h"
+extern int grpc_tcp_trace;
+
typedef struct grpc_uv_tcp_connect {
uv_connect_t connect_req;
grpc_timer alarm;
@@ -70,6 +72,12 @@ static void uv_tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp,
grpc_error *error) {
int done;
grpc_uv_tcp_connect *connect = acp;
+ if (grpc_tcp_trace) {
+ const char *str = grpc_error_string(error);
+ gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_alarm: error=%s",
+ connect->addr_name, str);
+ grpc_error_free_string(str);
+ }
if (error == GRPC_ERROR_NONE) {
/* error == NONE implies that the timer ran out, and wasn't cancelled. If
it was cancelled, then the handler that cancelled it also should close
@@ -136,8 +144,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
}
}
- connect = gpr_malloc(sizeof(grpc_uv_tcp_connect));
- memset(connect, 0, sizeof(grpc_uv_tcp_connect));
+ connect = gpr_zalloc(sizeof(grpc_uv_tcp_connect));
connect->closure = closure;
connect->endpoint = ep;
connect->tcp_handle = gpr_malloc(sizeof(uv_tcp_t));
@@ -145,6 +152,12 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
connect->resource_quota = resource_quota;
uv_tcp_init(uv_default_loop(), connect->tcp_handle);
connect->connect_req.data = connect;
+
+ if (grpc_tcp_trace) {
+ gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: asynchronously connecting",
+ connect->addr_name);
+ }
+
// TODO(murgatroid99): figure out what the return value here means
uv_tcp_connect(&connect->connect_req, connect->tcp_handle,
(const struct sockaddr *)resolved_addr->addr,
diff --git a/src/core/lib/iomgr/tcp_uv.c b/src/core/lib/iomgr/tcp_uv.c
index 5fb398c50b..5541c62068 100644
--- a/src/core/lib/iomgr/tcp_uv.c
+++ b/src/core/lib/iomgr/tcp_uv.c
@@ -79,8 +79,6 @@ typedef struct {
grpc_pollset *pollset;
} grpc_tcp;
-static void uv_close_callback(uv_handle_t *handle) { gpr_free(handle); }
-
static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
grpc_resource_user_unref(exec_ctx, tcp->resource_user);
gpr_free(tcp);
@@ -119,6 +117,13 @@ static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); }
#endif
+static void uv_close_callback(uv_handle_t *handle) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_tcp *tcp = handle->data;
+ TCP_UNREF(&exec_ctx, tcp, "destroy");
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
static void alloc_uv_buf(uv_handle_t *handle, size_t suggested_size,
uv_buf_t *buf) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
@@ -313,7 +318,6 @@ static void uv_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
grpc_network_status_unregister_endpoint(ep);
grpc_tcp *tcp = (grpc_tcp *)ep;
uv_close((uv_handle_t *)tcp->handle, uv_close_callback);
- TCP_UNREF(exec_ctx, tcp, "destroy");
}
static char *uv_get_peer(grpc_endpoint *ep) {
diff --git a/src/core/lib/iomgr/timer_generic.c b/src/core/lib/iomgr/timer_generic.c
index 8a5617e7c1..d4df96c214 100644
--- a/src/core/lib/iomgr/timer_generic.c
+++ b/src/core/lib/iomgr/timer_generic.c
@@ -42,6 +42,7 @@
#include <grpc/support/useful.h>
#include "src/core/lib/iomgr/time_averaged_stats.h"
#include "src/core/lib/iomgr/timer_heap.h"
+#include "src/core/lib/support/spinlock.h"
#define INVALID_HEAP_INDEX 0xffffffffu
@@ -69,7 +70,7 @@ typedef struct {
/* Protects g_shard_queue */
static gpr_mu g_mu;
/* Allow only one run_some_expired_timers at once */
-static gpr_mu g_checker_mu;
+static gpr_spinlock g_checker_mu = GPR_SPINLOCK_STATIC_INITIALIZER;
static gpr_clock_type g_clock_type;
static shard_type g_shards[NUM_SHARDS];
/* Protected by g_mu */
@@ -90,7 +91,6 @@ void grpc_timer_list_init(gpr_timespec now) {
g_initialized = true;
gpr_mu_init(&g_mu);
- gpr_mu_init(&g_checker_mu);
g_clock_type = now.clock_type;
for (i = 0; i < NUM_SHARDS; i++) {
@@ -117,7 +117,6 @@ void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) {
grpc_timer_heap_destroy(&shard->heap);
}
gpr_mu_destroy(&g_mu);
- gpr_mu_destroy(&g_checker_mu);
g_initialized = false;
}
@@ -180,25 +179,25 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
GPR_ASSERT(now.clock_type == g_clock_type);
timer->closure = closure;
timer->deadline = deadline;
- timer->triggered = 0;
if (!g_initialized) {
- timer->triggered = 1;
+ timer->pending = false;
grpc_closure_sched(
exec_ctx, timer->closure,
GRPC_ERROR_CREATE("Attempt to create timer before initialization"));
return;
}
+ gpr_mu_lock(&shard->mu);
+ timer->pending = true;
if (gpr_time_cmp(deadline, now) <= 0) {
- timer->triggered = 1;
+ timer->pending = false;
grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_NONE);
+ gpr_mu_unlock(&shard->mu);
+ /* early out */
return;
}
- /* TODO(ctiller): check deadline expired */
-
- gpr_mu_lock(&shard->mu);
grpc_time_averaged_stats_add_sample(&shard->stats,
ts_to_dbl(gpr_time_sub(deadline, now)));
if (gpr_time_cmp(deadline, shard->queue_deadline_cap) < 0) {
@@ -243,9 +242,9 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
shard_type *shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)];
gpr_mu_lock(&shard->mu);
- if (!timer->triggered) {
+ if (timer->pending) {
grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED);
- timer->triggered = 1;
+ timer->pending = false;
if (timer->heap_index == INVALID_HEAP_INDEX) {
list_remove(timer);
} else {
@@ -296,7 +295,7 @@ static grpc_timer *pop_one(shard_type *shard, gpr_timespec now) {
}
timer = grpc_timer_heap_top(&shard->heap);
if (gpr_time_cmp(timer->deadline, now) > 0) return NULL;
- timer->triggered = 1;
+ timer->pending = false;
grpc_timer_heap_pop(&shard->heap);
return timer;
}
@@ -324,7 +323,7 @@ static int run_some_expired_timers(grpc_exec_ctx *exec_ctx, gpr_timespec now,
/* TODO(ctiller): verify that there are any timers (atomically) here */
- if (gpr_mu_trylock(&g_checker_mu)) {
+ if (gpr_spinlock_trylock(&g_checker_mu)) {
gpr_mu_lock(&g_mu);
while (gpr_time_cmp(g_shard_queue[0]->min_deadline, now) < 0) {
@@ -350,7 +349,7 @@ static int run_some_expired_timers(grpc_exec_ctx *exec_ctx, gpr_timespec now,
}
gpr_mu_unlock(&g_mu);
- gpr_mu_unlock(&g_checker_mu);
+ gpr_spinlock_unlock(&g_checker_mu);
} else if (next != NULL) {
/* TODO(ctiller): this forces calling code to do an short poll, and
then retry the timer check (because this time through the timer list was
diff --git a/src/core/lib/iomgr/timer_generic.h b/src/core/lib/iomgr/timer_generic.h
index 9d901c7e68..1608dce9fb 100644
--- a/src/core/lib/iomgr/timer_generic.h
+++ b/src/core/lib/iomgr/timer_generic.h
@@ -40,7 +40,7 @@
struct grpc_timer {
gpr_timespec deadline;
uint32_t heap_index; /* INVALID_HEAP_INDEX if not in heap */
- int triggered;
+ bool pending;
struct grpc_timer *next;
struct grpc_timer *prev;
grpc_closure *closure;
diff --git a/src/core/lib/iomgr/timer_uv.c b/src/core/lib/iomgr/timer_uv.c
index fa2cdee964..f28a14405d 100644
--- a/src/core/lib/iomgr/timer_uv.c
+++ b/src/core/lib/iomgr/timer_uv.c
@@ -53,8 +53,8 @@ static void stop_uv_timer(uv_timer_t *handle) {
void run_expired_timer(uv_timer_t *handle) {
grpc_timer *timer = (grpc_timer *)handle->data;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- GPR_ASSERT(!timer->triggered);
- timer->triggered = 1;
+ GPR_ASSERT(timer->pending);
+ timer->pending = 0;
grpc_closure_sched(&exec_ctx, timer->closure, GRPC_ERROR_NONE);
stop_uv_timer(handle);
grpc_exec_ctx_finish(&exec_ctx);
@@ -67,11 +67,11 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
uv_timer_t *uv_timer;
timer->closure = closure;
if (gpr_time_cmp(deadline, now) <= 0) {
- timer->triggered = 1;
+ timer->pending = 0;
grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_NONE);
return;
}
- timer->triggered = 0;
+ timer->pending = 1;
timeout = (uint64_t)gpr_time_to_millis(gpr_time_sub(deadline, now));
uv_timer = gpr_malloc(sizeof(uv_timer_t));
uv_timer_init(uv_default_loop(), uv_timer);
@@ -81,8 +81,8 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
}
void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
- if (!timer->triggered) {
- timer->triggered = 1;
+ if (timer->pending) {
+ timer->pending = 0;
grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED);
stop_uv_timer((uv_timer_t *)timer->uv_timer);
}
diff --git a/src/core/lib/iomgr/timer_uv.h b/src/core/lib/iomgr/timer_uv.h
index 13cf8bd4fa..9870cd4a5c 100644
--- a/src/core/lib/iomgr/timer_uv.h
+++ b/src/core/lib/iomgr/timer_uv.h
@@ -41,7 +41,7 @@ struct grpc_timer {
/* This is actually a uv_timer_t*, but we want to keep platform-specific
types out of headers */
void *uv_timer;
- int triggered;
+ int pending;
};
#endif /* GRPC_CORE_LIB_IOMGR_TIMER_UV_H */
diff --git a/src/core/lib/json/json.c b/src/core/lib/json/json.c
index 48b13686d7..5f7e4ec042 100644
--- a/src/core/lib/json/json.c
+++ b/src/core/lib/json/json.c
@@ -38,8 +38,7 @@
#include "src/core/lib/json/json.h"
grpc_json* grpc_json_create(grpc_json_type type) {
- grpc_json* json = gpr_malloc(sizeof(*json));
- memset(json, 0, sizeof(*json));
+ grpc_json* json = gpr_zalloc(sizeof(*json));
json->type = type;
return json;
diff --git a/src/core/lib/security/context/security_context.c b/src/core/lib/security/context/security_context.c
index fe82fabc31..d29fc55ea0 100644
--- a/src/core/lib/security/context/security_context.c
+++ b/src/core/lib/security/context/security_context.c
@@ -91,10 +91,7 @@ void grpc_auth_context_release(grpc_auth_context *context) {
/* --- grpc_client_security_context --- */
grpc_client_security_context *grpc_client_security_context_create(void) {
- grpc_client_security_context *ctx =
- gpr_malloc(sizeof(grpc_client_security_context));
- memset(ctx, 0, sizeof(grpc_client_security_context));
- return ctx;
+ return gpr_zalloc(sizeof(grpc_client_security_context));
}
void grpc_client_security_context_destroy(void *ctx) {
@@ -112,10 +109,7 @@ void grpc_client_security_context_destroy(void *ctx) {
/* --- grpc_server_security_context --- */
grpc_server_security_context *grpc_server_security_context_create(void) {
- grpc_server_security_context *ctx =
- gpr_malloc(sizeof(grpc_server_security_context));
- memset(ctx, 0, sizeof(grpc_server_security_context));
- return ctx;
+ return gpr_zalloc(sizeof(grpc_server_security_context));
}
void grpc_server_security_context_destroy(void *ctx) {
@@ -132,8 +126,7 @@ void grpc_server_security_context_destroy(void *ctx) {
static grpc_auth_property_iterator empty_iterator = {NULL, 0, NULL};
grpc_auth_context *grpc_auth_context_create(grpc_auth_context *chained) {
- grpc_auth_context *ctx = gpr_malloc(sizeof(grpc_auth_context));
- memset(ctx, 0, sizeof(grpc_auth_context));
+ grpc_auth_context *ctx = gpr_zalloc(sizeof(grpc_auth_context));
gpr_ref_init(&ctx->refcount, 1);
if (chained != NULL) {
ctx->chained = GRPC_AUTH_CONTEXT_REF(chained, "chained");
diff --git a/src/core/lib/security/credentials/composite/composite_credentials.c b/src/core/lib/security/credentials/composite/composite_credentials.c
index be1588dd8b..e497b9f657 100644
--- a/src/core/lib/security/credentials/composite/composite_credentials.c
+++ b/src/core/lib/security/credentials/composite/composite_credentials.c
@@ -115,8 +115,7 @@ static void composite_call_get_request_metadata(
grpc_composite_call_credentials *c = (grpc_composite_call_credentials *)creds;
grpc_composite_call_credentials_metadata_context *ctx;
- ctx = gpr_malloc(sizeof(grpc_composite_call_credentials_metadata_context));
- memset(ctx, 0, sizeof(grpc_composite_call_credentials_metadata_context));
+ ctx = gpr_zalloc(sizeof(grpc_composite_call_credentials_metadata_context));
ctx->auth_md_context = auth_md_context;
ctx->user_data = user_data;
ctx->cb = cb;
@@ -158,8 +157,7 @@ grpc_call_credentials *grpc_composite_call_credentials_create(
GPR_ASSERT(reserved == NULL);
GPR_ASSERT(creds1 != NULL);
GPR_ASSERT(creds2 != NULL);
- c = gpr_malloc(sizeof(grpc_composite_call_credentials));
- memset(c, 0, sizeof(grpc_composite_call_credentials));
+ c = gpr_zalloc(sizeof(grpc_composite_call_credentials));
c->base.type = GRPC_CALL_CREDENTIALS_TYPE_COMPOSITE;
c->base.vtable = &composite_call_credentials_vtable;
gpr_ref_init(&c->base.refcount, 1);
@@ -167,8 +165,7 @@ grpc_call_credentials *grpc_composite_call_credentials_create(
creds2_array = get_creds_array(&creds2);
c->inner.num_creds = creds1_array.num_creds + creds2_array.num_creds;
creds_array_byte_size = c->inner.num_creds * sizeof(grpc_call_credentials *);
- c->inner.creds_array = gpr_malloc(creds_array_byte_size);
- memset(c->inner.creds_array, 0, creds_array_byte_size);
+ c->inner.creds_array = gpr_zalloc(creds_array_byte_size);
for (i = 0; i < creds1_array.num_creds; i++) {
grpc_call_credentials *cur_creds = creds1_array.creds_array[i];
c->inner.creds_array[i] = grpc_call_credentials_ref(cur_creds);
@@ -262,8 +259,7 @@ static grpc_channel_credentials_vtable composite_channel_credentials_vtable = {
grpc_channel_credentials *grpc_composite_channel_credentials_create(
grpc_channel_credentials *channel_creds, grpc_call_credentials *call_creds,
void *reserved) {
- grpc_composite_channel_credentials *c = gpr_malloc(sizeof(*c));
- memset(c, 0, sizeof(*c));
+ grpc_composite_channel_credentials *c = gpr_zalloc(sizeof(*c));
GPR_ASSERT(channel_creds != NULL && call_creds != NULL && reserved == NULL);
GRPC_API_TRACE(
"grpc_composite_channel_credentials_create(channel_creds=%p, "
diff --git a/src/core/lib/security/credentials/credentials.c b/src/core/lib/security/credentials/credentials.c
index b24697ce54..52b80141d1 100644
--- a/src/core/lib/security/credentials/credentials.c
+++ b/src/core/lib/security/credentials/credentials.c
@@ -57,8 +57,7 @@ grpc_credentials_metadata_request *grpc_credentials_metadata_request_create(
grpc_call_credentials *creds, grpc_credentials_metadata_cb cb,
void *user_data) {
grpc_credentials_metadata_request *r =
- gpr_malloc(sizeof(grpc_credentials_metadata_request));
- memset(&r->response, 0, sizeof(r->response));
+ gpr_zalloc(sizeof(grpc_credentials_metadata_request));
r->creds = grpc_call_credentials_ref(creds);
r->cb = cb;
r->user_data = user_data;
diff --git a/src/core/lib/security/credentials/credentials_metadata.c b/src/core/lib/security/credentials/credentials_metadata.c
index 68da5fb4a8..f11cc58786 100644
--- a/src/core/lib/security/credentials/credentials_metadata.c
+++ b/src/core/lib/security/credentials/credentials_metadata.c
@@ -50,8 +50,7 @@ static void store_ensure_capacity(grpc_credentials_md_store *store) {
grpc_credentials_md_store *grpc_credentials_md_store_create(
size_t initial_capacity) {
grpc_credentials_md_store *store =
- gpr_malloc(sizeof(grpc_credentials_md_store));
- memset(store, 0, sizeof(grpc_credentials_md_store));
+ gpr_zalloc(sizeof(grpc_credentials_md_store));
if (initial_capacity > 0) {
store->entries = gpr_malloc(initial_capacity * sizeof(grpc_credentials_md));
store->allocated = initial_capacity;
diff --git a/src/core/lib/security/credentials/fake/fake_credentials.c b/src/core/lib/security/credentials/fake/fake_credentials.c
index a0629f76ce..68636ba208 100644
--- a/src/core/lib/security/credentials/fake/fake_credentials.c
+++ b/src/core/lib/security/credentials/fake/fake_credentials.c
@@ -71,8 +71,7 @@ static grpc_server_credentials_vtable
grpc_channel_credentials *grpc_fake_transport_security_credentials_create(
void) {
- grpc_channel_credentials *c = gpr_malloc(sizeof(grpc_channel_credentials));
- memset(c, 0, sizeof(grpc_channel_credentials));
+ grpc_channel_credentials *c = gpr_zalloc(sizeof(grpc_channel_credentials));
c->type = GRPC_CHANNEL_CREDENTIALS_TYPE_FAKE_TRANSPORT_SECURITY;
c->vtable = &fake_transport_security_credentials_vtable;
gpr_ref_init(&c->refcount, 1);
@@ -131,8 +130,7 @@ static grpc_call_credentials_vtable md_only_test_vtable = {
grpc_call_credentials *grpc_md_only_test_credentials_create(
const char *md_key, const char *md_value, int is_async) {
grpc_md_only_test_credentials *c =
- gpr_malloc(sizeof(grpc_md_only_test_credentials));
- memset(c, 0, sizeof(grpc_md_only_test_credentials));
+ gpr_zalloc(sizeof(grpc_md_only_test_credentials));
c->base.type = GRPC_CALL_CREDENTIALS_TYPE_OAUTH2;
c->base.vtable = &md_only_test_vtable;
gpr_ref_init(&c->base.refcount, 1);
diff --git a/src/core/lib/security/credentials/google_default/google_default_credentials.c b/src/core/lib/security/credentials/google_default/google_default_credentials.c
index ecd26de9fa..dd44621347 100644
--- a/src/core/lib/security/credentials/google_default/google_default_credentials.c
+++ b/src/core/lib/security/credentials/google_default/google_default_credentials.c
@@ -112,7 +112,7 @@ static int is_stack_running_on_compute_engine(grpc_exec_ctx *exec_ctx) {
on compute engine. */
gpr_timespec max_detection_delay = gpr_time_from_seconds(1, GPR_TIMESPAN);
- grpc_pollset *pollset = gpr_malloc(grpc_pollset_size());
+ grpc_pollset *pollset = gpr_zalloc(grpc_pollset_size());
grpc_pollset_init(pollset, &g_polling_mu);
detector.pollent = grpc_polling_entity_create_from_pollset(pollset);
detector.is_done = 0;
diff --git a/src/core/lib/security/credentials/iam/iam_credentials.c b/src/core/lib/security/credentials/iam/iam_credentials.c
index abd69a9670..393a433f2e 100644
--- a/src/core/lib/security/credentials/iam/iam_credentials.c
+++ b/src/core/lib/security/credentials/iam/iam_credentials.c
@@ -72,8 +72,7 @@ grpc_call_credentials *grpc_google_iam_credentials_create(
GPR_ASSERT(reserved == NULL);
GPR_ASSERT(token != NULL);
GPR_ASSERT(authority_selector != NULL);
- c = gpr_malloc(sizeof(grpc_google_iam_credentials));
- memset(c, 0, sizeof(grpc_google_iam_credentials));
+ c = gpr_zalloc(sizeof(grpc_google_iam_credentials));
c->base.type = GRPC_CALL_CREDENTIALS_TYPE_IAM;
c->base.vtable = &iam_vtable;
gpr_ref_init(&c->base.refcount, 1);
diff --git a/src/core/lib/security/credentials/jwt/jwt_credentials.c b/src/core/lib/security/credentials/jwt/jwt_credentials.c
index 616be64a54..178ce89aa6 100644
--- a/src/core/lib/security/credentials/jwt/jwt_credentials.c
+++ b/src/core/lib/security/credentials/jwt/jwt_credentials.c
@@ -135,8 +135,7 @@ grpc_service_account_jwt_access_credentials_create_from_auth_json_key(
gpr_log(GPR_ERROR, "Invalid input for jwt credentials creation");
return NULL;
}
- c = gpr_malloc(sizeof(grpc_service_account_jwt_access_credentials));
- memset(c, 0, sizeof(grpc_service_account_jwt_access_credentials));
+ c = gpr_zalloc(sizeof(grpc_service_account_jwt_access_credentials));
c->base.type = GRPC_CALL_CREDENTIALS_TYPE_JWT;
gpr_ref_init(&c->base.refcount, 1);
c->base.vtable = &jwt_vtable;
diff --git a/src/core/lib/security/credentials/jwt/jwt_verifier.c b/src/core/lib/security/credentials/jwt/jwt_verifier.c
index f128177e8c..5c59cf0f4a 100644
--- a/src/core/lib/security/credentials/jwt/jwt_verifier.c
+++ b/src/core/lib/security/credentials/jwt/jwt_verifier.c
@@ -144,8 +144,7 @@ static void jose_header_destroy(grpc_exec_ctx *exec_ctx, jose_header *h) {
static jose_header *jose_header_from_json(grpc_exec_ctx *exec_ctx,
grpc_json *json, grpc_slice buffer) {
grpc_json *cur;
- jose_header *h = gpr_malloc(sizeof(jose_header));
- memset(h, 0, sizeof(jose_header));
+ jose_header *h = gpr_zalloc(sizeof(jose_header));
h->buffer = buffer;
for (cur = json->child; cur != NULL; cur = cur->next) {
if (strcmp(cur->key, "alg") == 0) {
@@ -363,8 +362,7 @@ static verifier_cb_ctx *verifier_cb_ctx_create(
const char *signed_jwt, size_t signed_jwt_len, void *user_data,
grpc_jwt_verification_done_cb cb) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- verifier_cb_ctx *ctx = gpr_malloc(sizeof(verifier_cb_ctx));
- memset(ctx, 0, sizeof(verifier_cb_ctx));
+ verifier_cb_ctx *ctx = gpr_zalloc(sizeof(verifier_cb_ctx));
ctx->verifier = verifier;
ctx->pollent = grpc_polling_entity_create_from_pollset(pollset);
ctx->header = header;
@@ -878,8 +876,7 @@ error:
grpc_jwt_verifier *grpc_jwt_verifier_create(
const grpc_jwt_verifier_email_domain_key_url_mapping *mappings,
size_t num_mappings) {
- grpc_jwt_verifier *v = gpr_malloc(sizeof(grpc_jwt_verifier));
- memset(v, 0, sizeof(grpc_jwt_verifier));
+ grpc_jwt_verifier *v = gpr_zalloc(sizeof(grpc_jwt_verifier));
grpc_httpcli_context_init(&v->http_ctx);
/* We know at least of one mapping. */
diff --git a/src/core/lib/security/credentials/oauth2/oauth2_credentials.c b/src/core/lib/security/credentials/oauth2/oauth2_credentials.c
index c0f260f938..ccfb3566c1 100644
--- a/src/core/lib/security/credentials/oauth2/oauth2_credentials.c
+++ b/src/core/lib/security/credentials/oauth2/oauth2_credentials.c
@@ -389,8 +389,7 @@ grpc_refresh_token_credentials_create_from_auth_refresh_token(
gpr_log(GPR_ERROR, "Invalid input for refresh token credentials creation");
return NULL;
}
- c = gpr_malloc(sizeof(grpc_google_refresh_token_credentials));
- memset(c, 0, sizeof(grpc_google_refresh_token_credentials));
+ c = gpr_zalloc(sizeof(grpc_google_refresh_token_credentials));
init_oauth2_token_fetcher(&c->base, refresh_token_fetch_oauth2);
c->base.base.vtable = &refresh_token_vtable;
c->refresh_token = refresh_token;
@@ -450,14 +449,13 @@ static grpc_call_credentials_vtable access_token_vtable = {
grpc_call_credentials *grpc_access_token_credentials_create(
const char *access_token, void *reserved) {
grpc_access_token_credentials *c =
- gpr_malloc(sizeof(grpc_access_token_credentials));
+ gpr_zalloc(sizeof(grpc_access_token_credentials));
char *token_md_value;
GRPC_API_TRACE(
"grpc_access_token_credentials_create(access_token=<redacted>, "
"reserved=%p)",
1, (reserved));
GPR_ASSERT(reserved == NULL);
- memset(c, 0, sizeof(grpc_access_token_credentials));
c->base.type = GRPC_CALL_CREDENTIALS_TYPE_OAUTH2;
c->base.vtable = &access_token_vtable;
gpr_ref_init(&c->base.refcount, 1);
diff --git a/src/core/lib/security/credentials/plugin/plugin_credentials.c b/src/core/lib/security/credentials/plugin/plugin_credentials.c
index 7bc5dfb403..8416bfa5a8 100644
--- a/src/core/lib/security/credentials/plugin/plugin_credentials.c
+++ b/src/core/lib/security/credentials/plugin/plugin_credentials.c
@@ -126,8 +126,7 @@ static void plugin_get_request_metadata(grpc_exec_ctx *exec_ctx,
void *user_data) {
grpc_plugin_credentials *c = (grpc_plugin_credentials *)creds;
if (c->plugin.get_metadata != NULL) {
- grpc_metadata_plugin_request *request = gpr_malloc(sizeof(*request));
- memset(request, 0, sizeof(*request));
+ grpc_metadata_plugin_request *request = gpr_zalloc(sizeof(*request));
request->user_data = user_data;
request->cb = cb;
c->plugin.get_metadata(c->plugin.state, context,
@@ -142,11 +141,10 @@ static grpc_call_credentials_vtable plugin_vtable = {
grpc_call_credentials *grpc_metadata_credentials_create_from_plugin(
grpc_metadata_credentials_plugin plugin, void *reserved) {
- grpc_plugin_credentials *c = gpr_malloc(sizeof(*c));
+ grpc_plugin_credentials *c = gpr_zalloc(sizeof(*c));
GRPC_API_TRACE("grpc_metadata_credentials_create_from_plugin(reserved=%p)", 1,
(reserved));
GPR_ASSERT(reserved == NULL);
- memset(c, 0, sizeof(*c));
c->base.type = plugin.type;
c->base.vtable = &plugin_vtable;
gpr_ref_init(&c->base.refcount, 1);
diff --git a/src/core/lib/security/credentials/ssl/ssl_credentials.c b/src/core/lib/security/credentials/ssl/ssl_credentials.c
index 4eebb7d613..4b17ac8098 100644
--- a/src/core/lib/security/credentials/ssl/ssl_credentials.c
+++ b/src/core/lib/security/credentials/ssl/ssl_credentials.c
@@ -121,14 +121,13 @@ static void ssl_build_config(const char *pem_root_certs,
grpc_channel_credentials *grpc_ssl_credentials_create(
const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pair,
void *reserved) {
- grpc_ssl_credentials *c = gpr_malloc(sizeof(grpc_ssl_credentials));
+ grpc_ssl_credentials *c = gpr_zalloc(sizeof(grpc_ssl_credentials));
GRPC_API_TRACE(
"grpc_ssl_credentials_create(pem_root_certs=%s, "
"pem_key_cert_pair=%p, "
"reserved=%p)",
3, (pem_root_certs, pem_key_cert_pair, reserved));
GPR_ASSERT(reserved == NULL);
- memset(c, 0, sizeof(grpc_ssl_credentials));
c->base.type = GRPC_CHANNEL_CREDENTIALS_TYPE_SSL;
c->base.vtable = &ssl_vtable;
gpr_ref_init(&c->base.refcount, 1);
@@ -225,7 +224,7 @@ grpc_server_credentials *grpc_ssl_server_credentials_create_ex(
grpc_ssl_client_certificate_request_type client_certificate_request,
void *reserved) {
grpc_ssl_server_credentials *c =
- gpr_malloc(sizeof(grpc_ssl_server_credentials));
+ gpr_zalloc(sizeof(grpc_ssl_server_credentials));
GRPC_API_TRACE(
"grpc_ssl_server_credentials_create_ex("
"pem_root_certs=%s, pem_key_cert_pairs=%p, num_key_cert_pairs=%lu, "
@@ -233,7 +232,6 @@ grpc_server_credentials *grpc_ssl_server_credentials_create_ex(
5, (pem_root_certs, pem_key_cert_pairs, (unsigned long)num_key_cert_pairs,
client_certificate_request, reserved));
GPR_ASSERT(reserved == NULL);
- memset(c, 0, sizeof(grpc_ssl_server_credentials));
c->base.type = GRPC_CHANNEL_CREDENTIALS_TYPE_SSL;
gpr_ref_init(&c->base.refcount, 1);
c->base.vtable = &ssl_server_vtable;
diff --git a/src/core/lib/security/transport/client_auth_filter.c b/src/core/lib/security/transport/client_auth_filter.c
index b9bbe1b304..a23082a866 100644
--- a/src/core/lib/security/transport/client_auth_filter.c
+++ b/src/core/lib/security/transport/client_auth_filter.c
@@ -302,7 +302,7 @@ static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args) {
+ const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
memset(calld, 0, sizeof(*calld));
return GRPC_ERROR_NONE;
diff --git a/src/core/lib/security/transport/security_connector.c b/src/core/lib/security/transport/security_connector.c
index aeb04e33a3..ad083a730f 100644
--- a/src/core/lib/security/transport/security_connector.c
+++ b/src/core/lib/security/transport/security_connector.c
@@ -412,8 +412,7 @@ static grpc_security_connector_vtable fake_server_vtable = {
grpc_channel_security_connector *grpc_fake_channel_security_connector_create(
grpc_call_credentials *request_metadata_creds, const char *target,
const grpc_channel_args *args) {
- grpc_fake_channel_security_connector *c = gpr_malloc(sizeof(*c));
- memset(c, 0, sizeof(*c));
+ grpc_fake_channel_security_connector *c = gpr_zalloc(sizeof(*c));
gpr_ref_init(&c->base.base.refcount, 1);
c->base.base.url_scheme = GRPC_FAKE_SECURITY_URL_SCHEME;
c->base.base.vtable = &fake_channel_vtable;
@@ -435,8 +434,7 @@ grpc_channel_security_connector *grpc_fake_channel_security_connector_create(
grpc_server_security_connector *grpc_fake_server_security_connector_create(
void) {
grpc_server_security_connector *c =
- gpr_malloc(sizeof(grpc_server_security_connector));
- memset(c, 0, sizeof(*c));
+ gpr_zalloc(sizeof(grpc_server_security_connector));
gpr_ref_init(&c->base.refcount, 1);
c->base.vtable = &fake_server_vtable;
c->base.url_scheme = GRPC_FAKE_SECURITY_URL_SCHEME;
@@ -815,8 +813,7 @@ grpc_security_status grpc_ssl_channel_security_connector_create(
pem_root_certs_size = config->pem_root_certs_size;
}
- c = gpr_malloc(sizeof(grpc_ssl_channel_security_connector));
- memset(c, 0, sizeof(grpc_ssl_channel_security_connector));
+ c = gpr_zalloc(sizeof(grpc_ssl_channel_security_connector));
gpr_ref_init(&c->base.base.refcount, 1);
c->base.base.vtable = &ssl_channel_vtable;
@@ -877,8 +874,7 @@ grpc_security_status grpc_ssl_server_security_connector_create(
gpr_log(GPR_ERROR, "An SSL server needs a key and a cert.");
goto error;
}
- c = gpr_malloc(sizeof(grpc_ssl_server_security_connector));
- memset(c, 0, sizeof(grpc_ssl_server_security_connector));
+ c = gpr_zalloc(sizeof(grpc_ssl_server_security_connector));
gpr_ref_init(&c->base.base.refcount, 1);
c->base.base.url_scheme = GRPC_SSL_URL_SCHEME;
diff --git a/src/core/lib/security/transport/security_handshaker.c b/src/core/lib/security/transport/security_handshaker.c
index 5d57543ac5..7065d261ba 100644
--- a/src/core/lib/security/transport/security_handshaker.c
+++ b/src/core/lib/security/transport/security_handshaker.c
@@ -387,8 +387,7 @@ static const grpc_handshaker_vtable security_handshaker_vtable = {
static grpc_handshaker *security_handshaker_create(
grpc_exec_ctx *exec_ctx, tsi_handshaker *handshaker,
grpc_security_connector *connector) {
- security_handshaker *h = gpr_malloc(sizeof(security_handshaker));
- memset(h, 0, sizeof(security_handshaker));
+ security_handshaker *h = gpr_zalloc(sizeof(security_handshaker));
grpc_handshaker_init(&security_handshaker_vtable, &h->base);
h->handshaker = handshaker;
h->connector = GRPC_SECURITY_CONNECTOR_REF(connector, "handshake");
diff --git a/src/core/lib/security/transport/server_auth_filter.c b/src/core/lib/security/transport/server_auth_filter.c
index 36e81d6501..14619d97ca 100644
--- a/src/core/lib/security/transport/server_auth_filter.c
+++ b/src/core/lib/security/transport/server_auth_filter.c
@@ -197,7 +197,7 @@ static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args) {
+ const grpc_call_element_args *args) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
diff --git a/src/core/lib/slice/slice_hash_table.c b/src/core/lib/slice/slice_hash_table.c
index 46f807f4a5..219567f36f 100644
--- a/src/core/lib/slice/slice_hash_table.c
+++ b/src/core/lib/slice/slice_hash_table.c
@@ -82,15 +82,13 @@ static void grpc_slice_hash_table_add(
grpc_slice_hash_table* grpc_slice_hash_table_create(
size_t num_entries, grpc_slice_hash_table_entry* entries) {
- grpc_slice_hash_table* table = gpr_malloc(sizeof(*table));
- memset(table, 0, sizeof(*table));
+ grpc_slice_hash_table* table = gpr_zalloc(sizeof(*table));
gpr_ref_init(&table->refs, 1);
// Quadratic probing gets best performance when the table is no more
// than half full.
table->size = num_entries * 2;
const size_t entry_size = sizeof(grpc_slice_hash_table_entry) * table->size;
- table->entries = gpr_malloc(entry_size);
- memset(table->entries, 0, entry_size);
+ table->entries = gpr_zalloc(entry_size);
for (size_t i = 0; i < num_entries; ++i) {
grpc_slice_hash_table_entry* entry = &entries[i];
grpc_slice_hash_table_add(table, entry->key, entry->value, entry->vtable);
diff --git a/src/core/lib/slice/slice_intern.c b/src/core/lib/slice/slice_intern.c
index 32adc4df97..1c00a2d88e 100644
--- a/src/core/lib/slice/slice_intern.c
+++ b/src/core/lib/slice/slice_intern.c
@@ -144,8 +144,7 @@ static void grow_shard(slice_shard *shard) {
GPR_TIMER_BEGIN("grow_strtab", 0);
- strtab = gpr_malloc(sizeof(interned_slice_refcount *) * capacity);
- memset(strtab, 0, sizeof(interned_slice_refcount *) * capacity);
+ strtab = gpr_zalloc(sizeof(interned_slice_refcount *) * capacity);
for (i = 0; i < shard->capacity; i++) {
for (s = shard->strs[i]; s; s = next) {
@@ -296,8 +295,7 @@ void grpc_slice_intern_init(void) {
gpr_mu_init(&shard->mu);
shard->count = 0;
shard->capacity = INITIAL_SHARD_CAPACITY;
- shard->strs = gpr_malloc(sizeof(*shard->strs) * shard->capacity);
- memset(shard->strs, 0, sizeof(*shard->strs) * shard->capacity);
+ shard->strs = gpr_zalloc(sizeof(*shard->strs) * shard->capacity);
}
for (size_t i = 0; i < GPR_ARRAY_SIZE(static_metadata_hash); i++) {
static_metadata_hash[i].hash = 0;
diff --git a/src/core/lib/support/alloc.c b/src/core/lib/support/alloc.c
index 618c3f6acd..9973166217 100644
--- a/src/core/lib/support/alloc.c
+++ b/src/core/lib/support/alloc.c
@@ -36,9 +36,19 @@
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include <stdlib.h>
+#include <string.h>
#include "src/core/lib/profiling/timers.h"
-static gpr_allocation_functions g_alloc_functions = {malloc, realloc, free};
+static void *zalloc_with_calloc(size_t sz) { return calloc(sz, 1); }
+
+static void *zalloc_with_gpr_malloc(size_t sz) {
+ void *p = gpr_malloc(sz);
+ memset(p, 0, sz);
+ return p;
+}
+
+static gpr_allocation_functions g_alloc_functions = {malloc, zalloc_with_calloc,
+ realloc, free};
gpr_allocation_functions gpr_get_allocation_functions() {
return g_alloc_functions;
@@ -48,6 +58,9 @@ void gpr_set_allocation_functions(gpr_allocation_functions functions) {
GPR_ASSERT(functions.malloc_fn != NULL);
GPR_ASSERT(functions.realloc_fn != NULL);
GPR_ASSERT(functions.free_fn != NULL);
+ if (functions.zalloc_fn == NULL) {
+ functions.zalloc_fn = zalloc_with_gpr_malloc;
+ }
g_alloc_functions = functions;
}
@@ -63,6 +76,18 @@ void *gpr_malloc(size_t size) {
return p;
}
+void *gpr_zalloc(size_t size) {
+ void *p;
+ if (size == 0) return NULL;
+ GPR_TIMER_BEGIN("gpr_zalloc", 0);
+ p = g_alloc_functions.zalloc_fn(size);
+ if (!p) {
+ abort();
+ }
+ GPR_TIMER_END("gpr_zalloc", 0);
+ return p;
+}
+
void gpr_free(void *p) {
GPR_TIMER_BEGIN("gpr_free", 0);
g_alloc_functions.free_fn(p);
diff --git a/src/core/lib/support/cmdline.c b/src/core/lib/support/cmdline.c
index d47498676d..88a65a8e2e 100644
--- a/src/core/lib/support/cmdline.c
+++ b/src/core/lib/support/cmdline.c
@@ -71,8 +71,7 @@ struct gpr_cmdline {
static int normal_state(gpr_cmdline *cl, char *arg);
gpr_cmdline *gpr_cmdline_create(const char *description) {
- gpr_cmdline *cl = gpr_malloc(sizeof(gpr_cmdline));
- memset(cl, 0, sizeof(gpr_cmdline));
+ gpr_cmdline *cl = gpr_zalloc(sizeof(gpr_cmdline));
cl->description = description;
cl->state = normal_state;
@@ -101,8 +100,7 @@ static void add_arg(gpr_cmdline *cl, const char *name, const char *help,
GPR_ASSERT(0 != strcmp(a->name, name));
}
- a = gpr_malloc(sizeof(arg));
- memset(a, 0, sizeof(arg));
+ a = gpr_zalloc(sizeof(arg));
a->name = name;
a->help = help;
a->type = type;
diff --git a/src/core/lib/support/histogram.c b/src/core/lib/support/histogram.c
index 7b016bbc78..ba8176bb05 100644
--- a/src/core/lib/support/histogram.c
+++ b/src/core/lib/support/histogram.c
@@ -102,8 +102,7 @@ gpr_histogram *gpr_histogram_create(double resolution,
h->num_buckets = bucket_for_unchecked(h, max_bucket_start) + 1;
GPR_ASSERT(h->num_buckets > 1);
GPR_ASSERT(h->num_buckets < 100000000);
- h->buckets = gpr_malloc(sizeof(uint32_t) * h->num_buckets);
- memset(h->buckets, 0, sizeof(uint32_t) * h->num_buckets);
+ h->buckets = gpr_zalloc(sizeof(uint32_t) * h->num_buckets);
return h;
}
diff --git a/src/core/lib/support/spinlock.h b/src/core/lib/support/spinlock.h
new file mode 100644
index 0000000000..d8c7c5ffde
--- /dev/null
+++ b/src/core/lib/support/spinlock.h
@@ -0,0 +1,52 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_SUPPORT_SPINLOCK_H
+#define GRPC_CORE_LIB_SUPPORT_SPINLOCK_H
+
+#include <grpc/support/atm.h>
+
+/* Simple spinlock. No backoff strategy, gpr_spinlock_lock is almost always
+ a concurrency code smell. */
+typedef struct { gpr_atm atm; } gpr_spinlock;
+
+#define GPR_SPINLOCK_INITIALIZER ((gpr_spinlock){0})
+#define GPR_SPINLOCK_STATIC_INITIALIZER \
+ { 0 }
+#define gpr_spinlock_trylock(lock) (gpr_atm_acq_cas(&(lock)->atm, 0, 1))
+#define gpr_spinlock_unlock(lock) (gpr_atm_rel_store(&(lock)->atm, 0))
+#define gpr_spinlock_lock(lock) \
+ do { \
+ } while (!gpr_spinlock_trylock((lock)))
+
+#endif /* GRPC_CORE_LIB_SUPPORT_SPINLOCK_H */
diff --git a/src/core/lib/support/subprocess_posix.c b/src/core/lib/support/subprocess_posix.c
index 4247a1c12b..ed653b9c2e 100644
--- a/src/core/lib/support/subprocess_posix.c
+++ b/src/core/lib/support/subprocess_posix.c
@@ -76,8 +76,7 @@ gpr_subprocess *gpr_subprocess_create(int argc, const char **argv) {
_exit(1);
return NULL;
} else {
- r = gpr_malloc(sizeof(gpr_subprocess));
- memset(r, 0, sizeof(*r));
+ r = gpr_zalloc(sizeof(gpr_subprocess));
r->pid = pid;
return r;
}
diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c
index 48a1e586e1..cc57654ea4 100644
--- a/src/core/lib/surface/call.c
+++ b/src/core/lib/surface/call.c
@@ -101,7 +101,18 @@ typedef struct {
grpc_error *error;
} received_status;
-#define MAX_ERRORS_PER_BATCH 3
+static gpr_atm pack_received_status(received_status r) {
+ return r.is_set ? (1 | (gpr_atm)r.error) : 0;
+}
+
+static received_status unpack_received_status(gpr_atm atm) {
+ return (atm & 1) == 0
+ ? (received_status){.is_set = false, .error = GRPC_ERROR_NONE}
+ : (received_status){.is_set = true,
+ .error = (grpc_error *)(atm & ~(gpr_atm)1)};
+}
+
+#define MAX_ERRORS_PER_BATCH 4
typedef struct batch_control {
grpc_call *call;
@@ -142,8 +153,6 @@ struct grpc_call {
bool destroy_called;
/** flag indicating that cancellation is inherited */
bool cancellation_is_inherited;
- /** bitmask of live batches */
- uint8_t used_batches;
/** which ops are in-flight */
bool sent_initial_metadata;
bool sending_message;
@@ -165,8 +174,8 @@ struct grpc_call {
Element 0 is initial metadata, element 1 is trailing metadata. */
grpc_metadata_array *buffered_metadata[2];
- /* Received call statuses from various sources */
- received_status status[STATUS_SOURCE_COUNT];
+ /* Packed received call statuses from various sources */
+ gpr_atm status[STATUS_SOURCE_COUNT];
/* Call data useful used for reporting. Only valid after the call has
* completed */
@@ -245,7 +254,7 @@ static void set_status_from_error(grpc_exec_ctx *exec_ctx, grpc_call *call,
static void process_data_after_md(grpc_exec_ctx *exec_ctx, batch_control *bctl);
static void post_batch_completion(grpc_exec_ctx *exec_ctx, batch_control *bctl);
static void add_batch_error(grpc_exec_ctx *exec_ctx, batch_control *bctl,
- grpc_error *error);
+ grpc_error *error, bool has_cancelled);
static void add_init_error(grpc_error **composite, grpc_error *new) {
if (new == GRPC_ERROR_NONE) return;
@@ -263,9 +272,8 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
grpc_channel_get_channel_stack(args->channel);
grpc_call *call;
GPR_TIMER_BEGIN("grpc_call_create", 0);
- call = gpr_malloc(sizeof(grpc_call) + channel_stack->call_stack_size);
+ call = gpr_zalloc(sizeof(grpc_call) + channel_stack->call_stack_size);
*out_call = call;
- memset(call, 0, sizeof(grpc_call));
gpr_mu_init(&call->mu);
call->channel = args->channel;
call->cq = args->cq;
@@ -446,7 +454,8 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
gpr_time_sub(gpr_now(GPR_CLOCK_MONOTONIC), c->start_time);
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
- GRPC_ERROR_UNREF(c->status[i].error);
+ GRPC_ERROR_UNREF(
+ unpack_received_status(gpr_atm_no_barrier_load(&c->status[i])).error);
}
grpc_call_stack_destroy(exec_ctx, CALL_STACK_FROM_CALL(c), &c->final_info, c);
@@ -614,13 +623,12 @@ static void cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
*/
static bool get_final_status_from(
- grpc_call *call, status_source from_source, bool allow_ok_status,
+ grpc_call *call, grpc_error *error, bool allow_ok_status,
void (*set_value)(grpc_status_code code, void *user_data),
void *set_value_user_data, grpc_slice *details) {
grpc_status_code code;
const char *msg = NULL;
- grpc_error_get_status(call->status[from_source].error, call->send_deadline,
- &code, &msg, NULL);
+ grpc_error_get_status(error, call->send_deadline, &code, &msg, NULL);
if (code == GRPC_STATUS_OK && !allow_ok_status) {
return false;
}
@@ -638,12 +646,15 @@ static void get_final_status(grpc_call *call,
void *user_data),
void *set_value_user_data, grpc_slice *details) {
int i;
+ received_status status[STATUS_SOURCE_COUNT];
+ for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
+ status[i] = unpack_received_status(gpr_atm_acq_load(&call->status[i]));
+ }
if (grpc_call_error_trace) {
gpr_log(GPR_DEBUG, "get_final_status %s", call->is_client ? "CLI" : "SVR");
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
- if (call->status[i].is_set) {
- gpr_log(GPR_DEBUG, " %d: %s", i,
- grpc_error_string(call->status[i].error));
+ if (status[i].is_set) {
+ gpr_log(GPR_DEBUG, " %d: %s", i, grpc_error_string(status[i].error));
}
}
}
@@ -653,9 +664,9 @@ static void get_final_status(grpc_call *call,
/* search for the best status we can present: ideally the error we use has a
clearly defined grpc-status, and we'll prefer that. */
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
- if (call->status[i].is_set &&
- grpc_error_has_clear_grpc_status(call->status[i].error)) {
- if (get_final_status_from(call, (status_source)i, allow_ok_status != 0,
+ if (status[i].is_set &&
+ grpc_error_has_clear_grpc_status(status[i].error)) {
+ if (get_final_status_from(call, status[i].error, allow_ok_status != 0,
set_value, set_value_user_data, details)) {
return;
}
@@ -663,8 +674,8 @@ static void get_final_status(grpc_call *call,
}
/* If no clearly defined status exists, search for 'anything' */
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
- if (call->status[i].is_set) {
- if (get_final_status_from(call, (status_source)i, allow_ok_status != 0,
+ if (status[i].is_set) {
+ if (get_final_status_from(call, status[i].error, allow_ok_status != 0,
set_value, set_value_user_data, details)) {
return;
}
@@ -681,12 +692,13 @@ static void get_final_status(grpc_call *call,
static void set_status_from_error(grpc_exec_ctx *exec_ctx, grpc_call *call,
status_source source, grpc_error *error) {
- if (call->status[source].is_set) {
+ if (!gpr_atm_rel_cas(&call->status[source],
+ pack_received_status((received_status){
+ .is_set = false, .error = GRPC_ERROR_NONE}),
+ pack_received_status((received_status){
+ .is_set = true, .error = error}))) {
GRPC_ERROR_UNREF(error);
- return;
}
- call->status[source].is_set = true;
- call->status[source].error = error;
}
/*******************************************************************************
@@ -997,25 +1009,48 @@ static bool are_initial_metadata_flags_valid(uint32_t flags, bool is_client) {
return !(flags & invalid_positions);
}
-static batch_control *allocate_batch_control(grpc_call *call) {
- size_t i;
- for (i = 0; i < MAX_CONCURRENT_BATCHES; i++) {
- if ((call->used_batches & (1 << i)) == 0) {
- call->used_batches = (uint8_t)(call->used_batches | (uint8_t)(1 << i));
- return &call->active_batches[i];
- }
+static int batch_slot_for_op(grpc_op_type type) {
+ switch (type) {
+ case GRPC_OP_SEND_INITIAL_METADATA:
+ return 0;
+ case GRPC_OP_SEND_MESSAGE:
+ return 1;
+ case GRPC_OP_SEND_CLOSE_FROM_CLIENT:
+ case GRPC_OP_SEND_STATUS_FROM_SERVER:
+ return 2;
+ case GRPC_OP_RECV_INITIAL_METADATA:
+ return 3;
+ case GRPC_OP_RECV_MESSAGE:
+ return 4;
+ case GRPC_OP_RECV_CLOSE_ON_SERVER:
+ case GRPC_OP_RECV_STATUS_ON_CLIENT:
+ return 5;
+ }
+ GPR_UNREACHABLE_CODE(return 123456789);
+}
+
+static batch_control *allocate_batch_control(grpc_call *call,
+ const grpc_op *ops,
+ size_t num_ops) {
+ int slot = batch_slot_for_op(ops[0].op);
+ for (size_t i = 1; i < num_ops; i++) {
+ int op_slot = batch_slot_for_op(ops[i].op);
+ slot = GPR_MIN(slot, op_slot);
+ }
+ batch_control *bctl = &call->active_batches[slot];
+ if (bctl->call != NULL) {
+ return NULL;
}
- return NULL;
+ memset(bctl, 0, sizeof(*bctl));
+ bctl->call = call;
+ return bctl;
}
static void finish_batch_completion(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_cq_completion *storage) {
batch_control *bctl = user_data;
grpc_call *call = bctl->call;
- gpr_mu_lock(&call->mu);
- call->used_batches = (uint8_t)(
- call->used_batches & ~(uint8_t)(1 << (bctl - call->active_batches)));
- gpr_mu_unlock(&call->mu);
+ bctl->call = NULL;
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion");
}
@@ -1098,12 +1133,8 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx,
if (bctl->is_notify_tag_closure) {
/* unrefs bctl->error */
+ bctl->call = NULL;
grpc_closure_run(exec_ctx, bctl->notify_tag, error);
- gpr_mu_lock(&call->mu);
- bctl->call->used_batches =
- (uint8_t)(bctl->call->used_batches &
- ~(uint8_t)(1 << (bctl - bctl->call->active_batches)));
- gpr_mu_unlock(&call->mu);
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion");
} else {
/* unrefs bctl->error */
@@ -1191,6 +1222,11 @@ static void receiving_stream_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
grpc_call *call = bctl->call;
gpr_mu_lock(&bctl->call->mu);
if (error != GRPC_ERROR_NONE) {
+ if (call->receiving_stream != NULL) {
+ grpc_byte_stream_destroy(exec_ctx, call->receiving_stream);
+ call->receiving_stream = NULL;
+ }
+ add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error), true);
cancel_with_error(exec_ctx, call, STATUS_FROM_SURFACE,
GRPC_ERROR_REF(error));
}
@@ -1257,10 +1293,10 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
}
static void add_batch_error(grpc_exec_ctx *exec_ctx, batch_control *bctl,
- grpc_error *error) {
+ grpc_error *error, bool has_cancelled) {
if (error == GRPC_ERROR_NONE) return;
int idx = (int)gpr_atm_no_barrier_fetch_add(&bctl->num_errors, 1);
- if (idx == 0) {
+ if (idx == 0 && !has_cancelled) {
cancel_with_error(exec_ctx, bctl->call, STATUS_FROM_CORE,
GRPC_ERROR_REF(error));
}
@@ -1274,7 +1310,7 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&call->mu);
- add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error));
+ add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error), false);
if (error == GRPC_ERROR_NONE) {
grpc_metadata_batch *md =
&call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */];
@@ -1311,10 +1347,15 @@ static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp,
grpc_error *error) {
batch_control *bctl = bctlp;
- add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error));
+ add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error), false);
finish_batch_step(exec_ctx, bctl);
}
+static void free_no_op_completion(grpc_exec_ctx *exec_ctx, void *p,
+ grpc_cq_completion *completion) {
+ gpr_free(completion);
+}
+
static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
grpc_call *call, const grpc_op *ops,
size_t nops, void *notify_tag,
@@ -1329,32 +1370,33 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
grpc_metadata compression_md;
GPR_TIMER_BEGIN("grpc_call_start_batch", 0);
-
GRPC_CALL_LOG_BATCH(GPR_INFO, call, ops, nops, notify_tag);
- /* TODO(ctiller): this feels like it could be made lock-free */
- gpr_mu_lock(&call->mu);
- bctl = allocate_batch_control(call);
- memset(bctl, 0, sizeof(*bctl));
- bctl->call = call;
- bctl->notify_tag = notify_tag;
- bctl->is_notify_tag_closure = (uint8_t)(is_notify_tag_closure != 0);
-
- grpc_transport_stream_op *stream_op = &bctl->op;
- memset(stream_op, 0, sizeof(*stream_op));
- stream_op->covered_by_poller = true;
-
if (nops == 0) {
- GRPC_CALL_INTERNAL_REF(call, "completion");
if (!is_notify_tag_closure) {
grpc_cq_begin_op(call->cq, notify_tag);
+ grpc_cq_end_op(exec_ctx, call->cq, notify_tag, GRPC_ERROR_NONE,
+ free_no_op_completion, NULL,
+ gpr_malloc(sizeof(grpc_cq_completion)));
+ } else {
+ grpc_closure_sched(exec_ctx, notify_tag, GRPC_ERROR_NONE);
}
- gpr_mu_unlock(&call->mu);
- post_batch_completion(exec_ctx, bctl);
error = GRPC_CALL_OK;
goto done;
}
+ bctl = allocate_batch_control(call, ops, nops);
+ if (bctl == NULL) {
+ return GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
+ }
+ bctl->notify_tag = notify_tag;
+ bctl->is_notify_tag_closure = (uint8_t)(is_notify_tag_closure != 0);
+
+ gpr_mu_lock(&call->mu);
+ grpc_transport_stream_op *stream_op = &bctl->op;
+ memset(stream_op, 0, sizeof(*stream_op));
+ stream_op->covered_by_poller = true;
+
/* rewrite batch ops into a transport op */
for (i = 0; i < nops; i++) {
op = &ops[i];
diff --git a/src/core/lib/surface/completion_queue.c b/src/core/lib/surface/completion_queue.c
index b80656aa8d..b4594817e4 100644
--- a/src/core/lib/surface/completion_queue.c
+++ b/src/core/lib/surface/completion_queue.c
@@ -118,7 +118,7 @@ grpc_completion_queue *grpc_completion_queue_create(void *reserved) {
GRPC_API_TRACE("grpc_completion_queue_create(reserved=%p)", 1, (reserved));
- cc = gpr_malloc(sizeof(grpc_completion_queue) + grpc_pollset_size());
+ cc = gpr_zalloc(sizeof(grpc_completion_queue) + grpc_pollset_size());
grpc_pollset_init(POLLSET_FROM_CQ(cc), &cc->mu);
#ifndef NDEBUG
cc->outstanding_tags = NULL;
diff --git a/src/core/lib/surface/lame_client.c b/src/core/lib/surface/lame_client.c
index 48de0e1d5b..49bc4c114b 100644
--- a/src/core/lib/surface/lame_client.c
+++ b/src/core/lib/surface/lame_client.c
@@ -122,7 +122,7 @@ static void lame_start_transport_op(grpc_exec_ctx *exec_ctx,
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args) {
+ const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
gpr_atm_no_barrier_store(&calld->filled_metadata, 0);
return GRPC_ERROR_NONE;
diff --git a/src/core/lib/surface/server.c b/src/core/lib/surface/server.c
index 6ab1c0d94d..b360579553 100644
--- a/src/core/lib/surface/server.c
+++ b/src/core/lib/surface/server.c
@@ -540,7 +540,8 @@ static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg,
&calld->kill_zombie_closure, kill_zombie,
grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
grpc_schedule_on_exec_ctx);
- grpc_closure_sched(exec_ctx, &calld->kill_zombie_closure, error);
+ grpc_closure_sched(exec_ctx, &calld->kill_zombie_closure,
+ GRPC_ERROR_REF(error));
return;
}
@@ -879,7 +880,7 @@ static void channel_connectivity_changed(grpc_exec_ctx *exec_ctx, void *cd,
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args) {
+ const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
memset(calld, 0, sizeof(call_data));
@@ -1015,12 +1016,10 @@ void grpc_server_register_non_listening_completion_queue(
grpc_server *grpc_server_create(const grpc_channel_args *args, void *reserved) {
GRPC_API_TRACE("grpc_server_create(%p, %p)", 2, (args, reserved));
- grpc_server *server = gpr_malloc(sizeof(grpc_server));
+ grpc_server *server = gpr_zalloc(sizeof(grpc_server));
GPR_ASSERT(grpc_is_initialized() && "call grpc_init()");
- memset(server, 0, sizeof(grpc_server));
-
gpr_mu_init(&server->mu_global);
gpr_mu_init(&server->mu_call);
@@ -1069,8 +1068,7 @@ void *grpc_server_register_method(
flags);
return NULL;
}
- m = gpr_malloc(sizeof(registered_method));
- memset(m, 0, sizeof(*m));
+ m = gpr_zalloc(sizeof(registered_method));
m->method = gpr_strdup(method);
m->host = gpr_strdup(host);
m->next = server->registered_methods;
@@ -1174,8 +1172,7 @@ void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s,
if (num_registered_methods > 0) {
slots = 2 * num_registered_methods;
alloc = sizeof(channel_registered_method) * slots;
- chand->registered_methods = gpr_malloc(alloc);
- memset(chand->registered_methods, 0, alloc);
+ chand->registered_methods = gpr_zalloc(alloc);
for (rm = s->registered_methods; rm; rm = rm->next) {
grpc_slice host;
bool has_host;
@@ -1198,7 +1195,9 @@ void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s,
crm->server_registered_method = rm;
crm->flags = rm->flags;
crm->has_host = has_host;
- crm->host = host;
+ if (has_host) {
+ crm->host = host;
+ }
crm->method = method;
}
GPR_ASSERT(slots <= UINT32_MAX);
diff --git a/src/core/lib/transport/metadata.c b/src/core/lib/transport/metadata.c
index 489c20cbc8..f2417d8c4e 100644
--- a/src/core/lib/transport/metadata.c
+++ b/src/core/lib/transport/metadata.c
@@ -130,8 +130,7 @@ void grpc_mdctx_global_init(void) {
shard->count = 0;
gpr_atm_no_barrier_store(&shard->free_estimate, 0);
shard->capacity = INITIAL_SHARD_CAPACITY;
- shard->elems = gpr_malloc(sizeof(*shard->elems) * shard->capacity);
- memset(shard->elems, 0, sizeof(*shard->elems) * shard->capacity);
+ shard->elems = gpr_zalloc(sizeof(*shard->elems) * shard->capacity);
}
}
@@ -216,8 +215,7 @@ static void grow_mdtab(mdtab_shard *shard) {
GPR_TIMER_BEGIN("grow_mdtab", 0);
- mdtab = gpr_malloc(sizeof(interned_metadata *) * capacity);
- memset(mdtab, 0, sizeof(interned_metadata *) * capacity);
+ mdtab = gpr_zalloc(sizeof(interned_metadata *) * capacity);
for (i = 0; i < shard->capacity; i++) {
for (md = shard->elems[i]; md; md = next) {
diff --git a/src/core/lib/transport/transport.h b/src/core/lib/transport/transport.h
index e56bf2780a..bb23c0225a 100644
--- a/src/core/lib/transport/transport.h
+++ b/src/core/lib/transport/transport.h
@@ -213,6 +213,7 @@ size_t grpc_transport_stream_size(grpc_transport *transport);
/* Initialize transport data for a stream.
Returns 0 on success, any other (transport-defined) value for failure.
+ May assume that stream contains all-zeros.
Arguments:
transport - the transport on which to create this stream
diff --git a/src/core/lib/tsi/fake_transport_security.c b/src/core/lib/tsi/fake_transport_security.c
index 0e20d6fd71..bbe323df3b 100644
--- a/src/core/lib/tsi/fake_transport_security.c
+++ b/src/core/lib/tsi/fake_transport_security.c
@@ -502,8 +502,7 @@ static const tsi_handshaker_vtable handshaker_vtable = {
};
tsi_handshaker *tsi_create_fake_handshaker(int is_client) {
- tsi_fake_handshaker *impl = gpr_malloc(sizeof(*impl));
- memset(impl, 0, sizeof(*impl));
+ tsi_fake_handshaker *impl = gpr_zalloc(sizeof(*impl));
impl->base.vtable = &handshaker_vtable;
impl->is_client = is_client;
impl->result = TSI_HANDSHAKE_IN_PROGRESS;
@@ -519,8 +518,7 @@ tsi_handshaker *tsi_create_fake_handshaker(int is_client) {
tsi_frame_protector *tsi_create_fake_protector(
size_t *max_protected_frame_size) {
- tsi_fake_frame_protector *impl = gpr_malloc(sizeof(*impl));
- memset(impl, 0, sizeof(*impl));
+ tsi_fake_frame_protector *impl = gpr_zalloc(sizeof(*impl));
impl->max_frame_size = (max_protected_frame_size == NULL)
? TSI_FAKE_DEFAULT_FRAME_SIZE
: *max_protected_frame_size;
diff --git a/src/core/lib/tsi/ssl_transport_security.c b/src/core/lib/tsi/ssl_transport_security.c
index 366dca9507..53aabdb926 100644
--- a/src/core/lib/tsi/ssl_transport_security.c
+++ b/src/core/lib/tsi/ssl_transport_security.c
@@ -976,9 +976,7 @@ static tsi_result ssl_handshaker_extract_peer(tsi_handshaker *self,
if (alpn_selected != NULL) {
size_t i;
tsi_peer_property *new_properties =
- gpr_malloc(sizeof(*new_properties) * (peer->property_count + 1));
- memset(new_properties, 0,
- sizeof(*new_properties) * (peer->property_count + 1));
+ gpr_zalloc(sizeof(*new_properties) * (peer->property_count + 1));
for (i = 0; i < peer->property_count; i++) {
new_properties[i] = peer->properties[i];
}
@@ -1002,8 +1000,7 @@ static tsi_result ssl_handshaker_create_frame_protector(
size_t actual_max_output_protected_frame_size =
TSI_SSL_MAX_PROTECTED_FRAME_SIZE_UPPER_BOUND;
tsi_ssl_handshaker *impl = (tsi_ssl_handshaker *)self;
- tsi_ssl_frame_protector *protector_impl = gpr_malloc(sizeof(*protector_impl));
- memset(protector_impl, 0, sizeof(*protector_impl));
+ tsi_ssl_frame_protector *protector_impl = gpr_zalloc(sizeof(*protector_impl));
if (max_output_protected_frame_size != NULL) {
if (*max_output_protected_frame_size >
@@ -1119,8 +1116,7 @@ static tsi_result create_tsi_ssl_handshaker(SSL_CTX *ctx, int is_client,
SSL_set_accept_state(ssl);
}
- impl = gpr_malloc(sizeof(*impl));
- memset(impl, 0, sizeof(*impl));
+ impl = gpr_zalloc(sizeof(*impl));
impl->ssl = ssl;
impl->into_ssl = into_ssl;
impl->from_ssl = from_ssl;
@@ -1338,8 +1334,7 @@ tsi_result tsi_create_ssl_client_handshaker_factory(
return TSI_INVALID_ARGUMENT;
}
- impl = gpr_malloc(sizeof(*impl));
- memset(impl, 0, sizeof(*impl));
+ impl = gpr_zalloc(sizeof(*impl));
impl->ssl_context = ssl_context;
do {
@@ -1433,17 +1428,13 @@ tsi_result tsi_create_ssl_server_handshaker_factory_ex(
return TSI_INVALID_ARGUMENT;
}
- impl = gpr_malloc(sizeof(*impl));
- memset(impl, 0, sizeof(*impl));
+ impl = gpr_zalloc(sizeof(*impl));
impl->base.create_handshaker =
ssl_server_handshaker_factory_create_handshaker;
impl->base.destroy = ssl_server_handshaker_factory_destroy;
- impl->ssl_contexts = gpr_malloc(key_cert_pair_count * sizeof(SSL_CTX *));
- memset(impl->ssl_contexts, 0, key_cert_pair_count * sizeof(SSL_CTX *));
+ impl->ssl_contexts = gpr_zalloc(key_cert_pair_count * sizeof(SSL_CTX *));
impl->ssl_context_x509_subject_names =
- gpr_malloc(key_cert_pair_count * sizeof(tsi_peer));
- memset(impl->ssl_context_x509_subject_names, 0,
- key_cert_pair_count * sizeof(tsi_peer));
+ gpr_zalloc(key_cert_pair_count * sizeof(tsi_peer));
if (impl->ssl_contexts == NULL ||
impl->ssl_context_x509_subject_names == NULL) {
tsi_ssl_handshaker_factory_destroy(&impl->base);
diff --git a/src/core/lib/tsi/test_creds/BUILD b/src/core/lib/tsi/test_creds/BUILD
new file mode 100644
index 0000000000..dcd6d930a8
--- /dev/null
+++ b/src/core/lib/tsi/test_creds/BUILD
@@ -0,0 +1,34 @@
+# Copyright 2017, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+exports_files([
+ "ca.pem",
+ "server1.key",
+ "server1.pem",
+])
diff --git a/src/core/lib/tsi/transport_security.c b/src/core/lib/tsi/transport_security.c
index 830cf09584..2cbf381c88 100644
--- a/src/core/lib/tsi/transport_security.c
+++ b/src/core/lib/tsi/transport_security.c
@@ -231,8 +231,7 @@ tsi_result tsi_construct_allocated_string_peer_property(
*property = tsi_init_peer_property();
if (name != NULL) property->name = gpr_strdup(name);
if (value_length > 0) {
- property->value.data = gpr_malloc(value_length);
- memset(property->value.data, 0, value_length);
+ property->value.data = gpr_zalloc(value_length);
property->value.length = value_length;
}
return TSI_OK;
@@ -260,8 +259,7 @@ tsi_result tsi_construct_string_peer_property(const char *name,
tsi_result tsi_construct_peer(size_t property_count, tsi_peer *peer) {
memset(peer, 0, sizeof(tsi_peer));
if (property_count > 0) {
- peer->properties = gpr_malloc(property_count * sizeof(tsi_peer_property));
- memset(peer->properties, 0, property_count * sizeof(tsi_peer_property));
+ peer->properties = gpr_zalloc(property_count * sizeof(tsi_peer_property));
peer->property_count = property_count;
}
return TSI_OK;
diff --git a/src/core/plugin_registry/grpc_cronet_plugin_registry.c b/src/core/plugin_registry/grpc_cronet_plugin_registry.c
index d339ed327f..c97f47b397 100644
--- a/src/core/plugin_registry/grpc_cronet_plugin_registry.c
+++ b/src/core/plugin_registry/grpc_cronet_plugin_registry.c
@@ -37,10 +37,14 @@ extern void grpc_chttp2_plugin_init(void);
extern void grpc_chttp2_plugin_shutdown(void);
extern void grpc_client_channel_init(void);
extern void grpc_client_channel_shutdown(void);
+extern void grpc_load_reporting_plugin_init(void);
+extern void grpc_load_reporting_plugin_shutdown(void);
void grpc_register_built_in_plugins(void) {
grpc_register_plugin(grpc_chttp2_plugin_init,
grpc_chttp2_plugin_shutdown);
grpc_register_plugin(grpc_client_channel_init,
grpc_client_channel_shutdown);
+ grpc_register_plugin(grpc_load_reporting_plugin_init,
+ grpc_load_reporting_plugin_shutdown);
}
diff --git a/src/cpp/common/channel_filter.h b/src/cpp/common/channel_filter.h
index 5f9fd8790b..79c4bab985 100644
--- a/src/cpp/common/channel_filter.h
+++ b/src/cpp/common/channel_filter.h
@@ -244,7 +244,7 @@ class CallData {
/// Initializes the call data.
virtual grpc_error *Init(grpc_exec_ctx *exec_ctx, ChannelData *channel_data,
- grpc_call_element_args *args) {
+ const grpc_call_element_args *args) {
return GRPC_ERROR_NONE;
}
@@ -308,7 +308,7 @@ class ChannelFilter final {
static grpc_error *InitCallElement(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args) {
+ const grpc_call_element_args *args) {
ChannelDataType *channel_data = (ChannelDataType *)elem->channel_data;
// Construct the object in the already-allocated memory.
CallDataType *call_data = new (elem->call_data) CallDataType();
diff --git a/src/cpp/server/health/default_health_check_service.cc b/src/cpp/server/health/default_health_check_service.cc
index 46def70e8a..bc98ce79a7 100644
--- a/src/cpp/server/health/default_health_check_service.cc
+++ b/src/cpp/server/health/default_health_check_service.cc
@@ -63,7 +63,9 @@ Status DefaultHealthCheckService::HealthCheckServiceImpl::Check(
ServerContext* context, const ByteBuffer* request, ByteBuffer* response) {
// Decode request.
std::vector<Slice> slices;
- request->Dump(&slices);
+ if (!request->Dump(&slices).ok()) {
+ return Status(StatusCode::INVALID_ARGUMENT, "");
+ }
uint8_t* request_bytes = nullptr;
bool request_bytes_owned = false;
size_t request_size = 0;
diff --git a/src/cpp/server/server_cc.cc b/src/cpp/server/server_cc.cc
index 150c5a5faf..9e11a8a9e0 100644
--- a/src/cpp/server/server_cc.cc
+++ b/src/cpp/server/server_cc.cc
@@ -189,8 +189,7 @@ class Server::SyncRequest final : public CompletionQueueTag {
explicit CallData(Server* server, SyncRequest* mrd)
: cq_(mrd->cq_),
call_(mrd->call_, server, &cq_, server->max_receive_message_size()),
- ctx_(mrd->deadline_, mrd->request_metadata_.metadata,
- mrd->request_metadata_.count),
+ ctx_(mrd->deadline_, &mrd->request_metadata_),
has_request_payload_(mrd->has_request_payload_),
request_payload_(mrd->request_payload_),
method_(mrd->method_) {
diff --git a/src/cpp/server/server_context.cc b/src/cpp/server/server_context.cc
index 6edbc90927..05c05c8695 100644
--- a/src/cpp/server/server_context.cc
+++ b/src/cpp/server/server_context.cc
@@ -33,7 +33,9 @@
#include <grpc++/server_context.h>
+#include <algorithm>
#include <mutex>
+#include <utility>
#include <grpc++/completion_queue.h>
#include <grpc++/impl/call.h>
@@ -133,8 +135,7 @@ ServerContext::ServerContext()
sent_initial_metadata_(false),
compression_level_set_(false) {}
-ServerContext::ServerContext(gpr_timespec deadline, grpc_metadata* metadata,
- size_t metadata_count)
+ServerContext::ServerContext(gpr_timespec deadline, grpc_metadata_array* arr)
: completion_op_(nullptr),
has_notify_when_done_tag_(false),
async_notify_when_done_tag_(nullptr),
@@ -143,12 +144,8 @@ ServerContext::ServerContext(gpr_timespec deadline, grpc_metadata* metadata,
cq_(nullptr),
sent_initial_metadata_(false),
compression_level_set_(false) {
- for (size_t i = 0; i < metadata_count; i++) {
- client_metadata_.map()->insert(
- std::pair<grpc::string_ref, grpc::string_ref>(
- StringRefFromSlice(&metadata[i].key),
- StringRefFromSlice(&metadata[i].value)));
- }
+ std::swap(*client_metadata_.arr(), *arr);
+ client_metadata_.FillMap();
}
ServerContext::~ServerContext() {
diff --git a/src/csharp/Grpc.IntegrationTesting/InteropClient.cs b/src/csharp/Grpc.IntegrationTesting/InteropClient.cs
index 68a8f4879b..32a52eee6b 100644
--- a/src/csharp/Grpc.IntegrationTesting/InteropClient.cs
+++ b/src/csharp/Grpc.IntegrationTesting/InteropClient.cs
@@ -45,6 +45,7 @@ using Google.Apis.Auth.OAuth2;
using Google.Protobuf;
using Grpc.Auth;
using Grpc.Core;
+using Grpc.Core.Logging;
using Grpc.Core.Utils;
using Grpc.Testing;
using Newtonsoft.Json.Linq;
@@ -95,6 +96,7 @@ namespace Grpc.IntegrationTesting
public static void Run(string[] args)
{
+ GrpcEnvironment.SetLogger(new ConsoleLogger());
var parserResult = Parser.Default.ParseArguments<ClientOptions>(args)
.WithNotParsed(errors => Environment.Exit(1))
.WithParsed(options =>
diff --git a/src/csharp/Grpc.IntegrationTesting/InteropServer.cs b/src/csharp/Grpc.IntegrationTesting/InteropServer.cs
index 4118f99c2b..be99f92d0b 100644
--- a/src/csharp/Grpc.IntegrationTesting/InteropServer.cs
+++ b/src/csharp/Grpc.IntegrationTesting/InteropServer.cs
@@ -41,6 +41,7 @@ using System.Threading.Tasks;
using CommandLine;
using CommandLine.Text;
using Grpc.Core;
+using Grpc.Core.Logging;
using Grpc.Core.Utils;
using Grpc.Testing;
using NUnit.Framework;
@@ -68,6 +69,7 @@ namespace Grpc.IntegrationTesting
public static void Run(string[] args)
{
+ GrpcEnvironment.SetLogger(new ConsoleLogger());
var parserResult = Parser.Default.ParseArguments<ServerOptions>(args)
.WithNotParsed(errors => Environment.Exit(1))
.WithParsed(options =>
diff --git a/src/csharp/Grpc.IntegrationTesting/QpsWorker.cs b/src/csharp/Grpc.IntegrationTesting/QpsWorker.cs
index 62a7347d42..b17b2c2183 100644
--- a/src/csharp/Grpc.IntegrationTesting/QpsWorker.cs
+++ b/src/csharp/Grpc.IntegrationTesting/QpsWorker.cs
@@ -42,6 +42,7 @@ using System.Threading.Tasks;
using CommandLine;
using CommandLine.Text;
using Grpc.Core;
+using Grpc.Core.Logging;
using Grpc.Core.Utils;
using Grpc.Testing;
using NUnit.Framework;
@@ -65,6 +66,7 @@ namespace Grpc.IntegrationTesting
public static void Run(string[] args)
{
+ GrpcEnvironment.SetLogger(new ConsoleLogger());
var parserResult = Parser.Default.ParseArguments<ServerOptions>(args)
.WithNotParsed((x) => Environment.Exit(1))
.WithParsed(options =>
diff --git a/src/csharp/Grpc.IntegrationTesting/StressTestClient.cs b/src/csharp/Grpc.IntegrationTesting/StressTestClient.cs
index 750613b078..dbf8844265 100644
--- a/src/csharp/Grpc.IntegrationTesting/StressTestClient.cs
+++ b/src/csharp/Grpc.IntegrationTesting/StressTestClient.cs
@@ -92,6 +92,7 @@ namespace Grpc.IntegrationTesting
public static void Run(string[] args)
{
+ GrpcEnvironment.SetLogger(new ConsoleLogger());
var parserResult = Parser.Default.ParseArguments<ClientOptions>(args)
.WithNotParsed((x) => Environment.Exit(1))
.WithParsed(options => {
diff --git a/src/csharp/README.md b/src/csharp/README.md
index 8468eb991e..a21b72f225 100644
--- a/src/csharp/README.md
+++ b/src/csharp/README.md
@@ -7,19 +7,19 @@ A C# implementation of gRPC.
SUPPORTED PLATFORMS
------------------
+- [.NET Core](https://dotnet.github.io/) on Linux, Windows and Mac OS X
- .NET Framework 4.5+ (Windows)
-- [.NET Core](https://dotnet.github.io/) on Linux, Windows and Mac OS X (starting from version 1.0.1)
- Mono 4+ on Linux, Windows and Mac OS X
-
PREREQUISITES
--------------
+When using gRPC C# under .NET Core you only need to [install .NET Core](https://www.microsoft.com/net/core).
+
- Windows: .NET Framework 4.5+, Visual Studio 2013 or 2015
- Linux: Mono 4+, MonoDevelop 5.9+ (with NuGet add-in installed)
- Mac OS X: Xamarin Studio 5.9+
-
HOW TO USE
--------------
@@ -27,7 +27,7 @@ HOW TO USE
- Open Visual Studio / MonoDevelop / Xamarin Studio and start a new project/solution.
-- Add the [Grpc](https://www.nuget.org/packages/Grpc/) NuGet package as a dependency (Project options -> Manage NuGet Packages).
+- Add the [Grpc](https://www.nuget.org/packages/Grpc/) NuGet package as a dependency (Project options -> Manage NuGet Packages).
- To be able to generate code from Protocol Buffer (`.proto`) file definitions, add the [Grpc.Tools](https://www.nuget.org/packages/Grpc.Tools/) NuGet package that contains Protocol Buffers compiler (_protoc_) and the gRPC _protoc_ plugin.
@@ -71,23 +71,10 @@ DOCUMENTATION
- [Helloworld Example][]
- [RouteGuide Tutorial][]
-CONTENTS
---------
-
-- ext:
- The extension library that wraps C API to be more digestible by C#.
-- Grpc.Auth:
- gRPC OAuth2/JWT support.
-- Grpc.Core:
- The main gRPC C# library.
-- Grpc.Examples:
- API examples for math.proto
-- Grpc.Examples.MathClient:
- An example client that sends requests to math server.
-- Grpc.Examples.MathServer:
- An example server that implements a simple math service.
-- Grpc.IntegrationTesting:
- Cross-language gRPC implementation testing (interop testing).
+PERFORMANCE
+-----------
+
+For best gRPC C# performance, use [.NET Core](https://dotnet.github.io/) and the Server GC mode `"System.GC.Server": true` for your applications.
THE NATIVE DEPENDENCY
---------------
diff --git a/src/node/ext/completion_queue_threadpool.cc b/src/node/ext/completion_queue_threadpool.cc
index 6302e7a103..1917074dc2 100644
--- a/src/node/ext/completion_queue_threadpool.cc
+++ b/src/node/ext/completion_queue_threadpool.cc
@@ -78,6 +78,8 @@ class CompletionQueueAsyncWorker : public Nan::AsyncWorker {
void HandleErrorCallback();
private:
+ static void TryAddWorker();
+
grpc_event result;
static grpc_completion_queue *queue;
@@ -118,20 +120,21 @@ void CompletionQueueAsyncWorker::Execute() {
grpc_completion_queue *CompletionQueueAsyncWorker::GetQueue() { return queue; }
-void CompletionQueueAsyncWorker::Next() {
-#ifndef GRPC_UV
- Nan::HandleScope scope;
- if (current_threads < max_queue_threads) {
+void CompletionQueueAsyncWorker::TryAddWorker() {
+ if (current_threads < max_queue_threads && waiting_next_calls > 0) {
current_threads += 1;
+ waiting_next_calls -= 1;
CompletionQueueAsyncWorker *worker = new CompletionQueueAsyncWorker();
Nan::AsyncQueueWorker(worker);
- } else {
- waiting_next_calls += 1;
}
GPR_ASSERT(current_threads <= max_queue_threads);
GPR_ASSERT((current_threads == max_queue_threads) ||
(waiting_next_calls == 0));
-#endif
+}
+
+void CompletionQueueAsyncWorker::Next() {
+ waiting_next_calls += 1;
+ TryAddWorker();
}
void CompletionQueueAsyncWorker::Init(Local<Object> exports) {
@@ -143,17 +146,8 @@ void CompletionQueueAsyncWorker::Init(Local<Object> exports) {
void CompletionQueueAsyncWorker::HandleOKCallback() {
Nan::HandleScope scope;
- if (waiting_next_calls > 0) {
- waiting_next_calls -= 1;
- // Old worker removed, new worker added. current_threads += 0
- CompletionQueueAsyncWorker *worker = new CompletionQueueAsyncWorker();
- Nan::AsyncQueueWorker(worker);
- } else {
- current_threads -= 1;
- }
- GPR_ASSERT(current_threads <= max_queue_threads);
- GPR_ASSERT((current_threads == max_queue_threads) ||
- (waiting_next_calls == 0));
+ current_threads -= 1;
+ TryAddWorker();
Nan::Callback *callback = GetTagCallback(result.tag);
Local<Value> argv[] = {Nan::Null(), GetTagNodeValue(result.tag)};
callback->Call(2, argv);
@@ -162,18 +156,9 @@ void CompletionQueueAsyncWorker::HandleOKCallback() {
}
void CompletionQueueAsyncWorker::HandleErrorCallback() {
- if (waiting_next_calls > 0) {
- waiting_next_calls -= 1;
- // Old worker removed, new worker added. current_threads += 0
- CompletionQueueAsyncWorker *worker = new CompletionQueueAsyncWorker();
- Nan::AsyncQueueWorker(worker);
- } else {
- current_threads -= 1;
- }
- GPR_ASSERT(current_threads <= max_queue_threads);
- GPR_ASSERT((current_threads == max_queue_threads) ||
- (waiting_next_calls == 0));
Nan::HandleScope scope;
+ current_threads -= 1;
+ TryAddWorker();
Nan::Callback *callback = GetTagCallback(result.tag);
Local<Value> argv[] = {Nan::Error(ErrorMessage())};
diff --git a/src/node/ext/server.cc b/src/node/ext/server.cc
index 4761b2867d..ccb55aa54c 100644
--- a/src/node/ext/server.cc
+++ b/src/node/ext/server.cc
@@ -78,8 +78,6 @@ using v8::Value;
Nan::Callback *Server::constructor;
Persistent<FunctionTemplate> Server::fun_tpl;
-static Callback *shutdown_callback;
-
class NewCallOp : public Op {
public:
NewCallOp() {
@@ -128,51 +126,6 @@ class NewCallOp : public Op {
std::string GetTypeString() const { return "new_call"; }
};
-class ServerShutdownOp : public Op {
- public:
- ServerShutdownOp(grpc_server *server): server(server) {
- }
-
- ~ServerShutdownOp() {
- }
-
- Local<Value> GetNodeValue() const {
- return Nan::New<External>(reinterpret_cast<void *>(server));
- }
-
- bool ParseOp(Local<Value> value, grpc_op *out) {
- return true;
- }
- bool IsFinalOp() {
- return false;
- }
-
- grpc_server *server;
-
- protected:
- std::string GetTypeString() const { return "shutdown"; }
-};
-
-NAN_METHOD(ServerShutdownCallback) {
- if (!info[0]->IsNull()) {
- return Nan::ThrowError("forceShutdown failed somehow");
- }
- MaybeLocal<Object> maybe_result = Nan::To<Object>(info[1]);
- Local<Object> result = maybe_result.ToLocalChecked();
- Local<Value> server_val = Nan::Get(
- result, Nan::New("shutdown").ToLocalChecked()).ToLocalChecked();
- Local<External> server_extern = server_val.As<External>();
- grpc_server *server = reinterpret_cast<grpc_server *>(server_extern->Value());
- grpc_server_destroy(server);
-}
-
-Server::Server(grpc_server *server) : wrapped_server(server) {
-}
-
-Server::~Server() {
- this->ShutdownServer();
-}
-
void Server::Init(Local<Object> exports) {
HandleScope scope;
Local<FunctionTemplate> tpl = Nan::New<FunctionTemplate>(New);
@@ -187,11 +140,6 @@ void Server::Init(Local<Object> exports) {
Local<Function> ctr = Nan::GetFunction(tpl).ToLocalChecked();
Nan::Set(exports, Nan::New("Server").ToLocalChecked(), ctr);
constructor = new Callback(ctr);
-
- Local<FunctionTemplate>callback_tpl =
- Nan::New<FunctionTemplate>(ServerShutdownCallback);
- shutdown_callback = new Callback(
- Nan::GetFunction(callback_tpl).ToLocalChecked());
}
bool Server::HasInstance(Local<Value> val) {
@@ -199,21 +147,6 @@ bool Server::HasInstance(Local<Value> val) {
return Nan::New(fun_tpl)->HasInstance(val);
}
-void Server::ShutdownServer() {
- if (this->wrapped_server != NULL) {
- ServerShutdownOp *op = new ServerShutdownOp(this->wrapped_server);
- unique_ptr<OpVec> ops(new OpVec());
- ops->push_back(unique_ptr<Op>(op));
-
- grpc_server_shutdown_and_notify(
- this->wrapped_server, GetCompletionQueue(),
- new struct tag(new Callback(**shutdown_callback), ops.release(), NULL));
- grpc_server_cancel_all_calls(this->wrapped_server);
- CompletionQueueNext();
- this->wrapped_server = NULL;
- }
-}
-
NAN_METHOD(Server::New) {
/* If this is not a constructor call, make a constructor call and return
the result */
diff --git a/src/node/ext/server.h b/src/node/ext/server.h
index 9e6a7bd1e0..ab5fc210e8 100644
--- a/src/node/ext/server.h
+++ b/src/node/ext/server.h
@@ -73,6 +73,7 @@ class Server : public Nan::ObjectWrap {
static Nan::Persistent<v8::FunctionTemplate> fun_tpl;
grpc_server *wrapped_server;
+ grpc_completion_queue *shutdown_queue;
};
} // namespace node
diff --git a/src/node/ext/server_generic.cc b/src/node/ext/server_generic.cc
new file mode 100644
index 0000000000..0cf20f754a
--- /dev/null
+++ b/src/node/ext/server_generic.cc
@@ -0,0 +1,73 @@
+/*
+ *
+ * Copyright 2017, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_UV
+
+#include "server.h"
+
+#include <node.h>
+#include <nan.h>
+#include "grpc/grpc.h"
+#include "grpc/support/time.h"
+
+namespace grpc {
+namespace node {
+
+Server::Server(grpc_server *server) : wrapped_server(server) {
+ shutdown_queue = grpc_completion_queue_create(NULL);
+ grpc_server_register_non_listening_completion_queue(server, shutdown_queue,
+ NULL);
+}
+
+Server::~Server() {
+ this->ShutdownServer();
+ grpc_completion_queue_shutdown(this->shutdown_queue);
+ grpc_completion_queue_destroy(this->shutdown_queue);
+}
+
+void Server::ShutdownServer() {
+ if (this->wrapped_server != NULL) {
+ grpc_server_shutdown_and_notify(this->wrapped_server, this->shutdown_queue,
+ NULL);
+ grpc_server_cancel_all_calls(this->wrapped_server);
+ grpc_completion_queue_pluck(this->shutdown_queue, NULL,
+ gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
+ grpc_server_destroy(this->wrapped_server);
+ this->wrapped_server = NULL;
+ }
+}
+
+} // namespace grpc
+} // namespace node
+
+#endif /* GRPC_UV */
diff --git a/src/node/ext/server_uv.cc b/src/node/ext/server_uv.cc
new file mode 100644
index 0000000000..bf8b609a63
--- /dev/null
+++ b/src/node/ext/server_uv.cc
@@ -0,0 +1,131 @@
+/*
+ *
+ * Copyright 2017, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifdef GRPC_UV
+
+#include "server.h"
+
+#include <node.h>
+#include <nan.h>
+#include "grpc/grpc.h"
+#include "grpc/support/time.h"
+
+#include "call.h"
+#include "completion_queue.h"
+
+namespace grpc {
+namespace node {
+
+using Nan::Callback;
+
+using v8::External;
+using v8::Function;
+using v8::FunctionTemplate;
+using v8::Local;
+using v8::MaybeLocal;
+using v8::Object;
+using v8::Value;
+
+static Callback *shutdown_callback = NULL;
+
+class ServerShutdownOp : public Op {
+ public:
+ ServerShutdownOp(grpc_server *server): server(server) {
+ }
+
+ ~ServerShutdownOp() {
+ }
+
+ Local<Value> GetNodeValue() const {
+ return Nan::New<External>(reinterpret_cast<void *>(server));
+ }
+
+ bool ParseOp(Local<Value> value, grpc_op *out) {
+ return true;
+ }
+ bool IsFinalOp() {
+ return false;
+ }
+
+ grpc_server *server;
+
+ protected:
+ std::string GetTypeString() const { return "shutdown"; }
+};
+
+Server::Server(grpc_server *server) : wrapped_server(server) {
+}
+
+Server::~Server() {
+ this->ShutdownServer();
+}
+
+NAN_METHOD(ServerShutdownCallback) {
+ if (!info[0]->IsNull()) {
+ return Nan::ThrowError("forceShutdown failed somehow");
+ }
+ MaybeLocal<Object> maybe_result = Nan::To<Object>(info[1]);
+ Local<Object> result = maybe_result.ToLocalChecked();
+ Local<Value> server_val = Nan::Get(
+ result, Nan::New("shutdown").ToLocalChecked()).ToLocalChecked();
+ Local<External> server_extern = server_val.As<External>();
+ grpc_server *server = reinterpret_cast<grpc_server *>(server_extern->Value());
+ grpc_server_destroy(server);
+}
+
+void Server::ShutdownServer() {
+ if (this->wrapped_server != NULL) {
+ if (shutdown_callback == NULL) {
+ Local<FunctionTemplate>callback_tpl =
+ Nan::New<FunctionTemplate>(ServerShutdownCallback);
+ shutdown_callback = new Callback(
+ Nan::GetFunction(callback_tpl).ToLocalChecked());
+ }
+
+ ServerShutdownOp *op = new ServerShutdownOp(this->wrapped_server);
+ unique_ptr<OpVec> ops(new OpVec());
+ ops->push_back(unique_ptr<Op>(op));
+
+ grpc_server_shutdown_and_notify(
+ this->wrapped_server, GetCompletionQueue(),
+ new struct tag(new Callback(**shutdown_callback), ops.release(), NULL));
+ grpc_server_cancel_all_calls(this->wrapped_server);
+ CompletionQueueNext();
+ this->wrapped_server = NULL;
+ }
+}
+
+} // namespace grpc
+} // namespace node
+
+#endif /* GRPC_UV */
diff --git a/src/node/src/client.js b/src/node/src/client.js
index 134ef239c2..44081a3a6c 100644
--- a/src/node/src/client.js
+++ b/src/node/src/client.js
@@ -108,7 +108,7 @@ function _write(chunk, encoding, callback) {
but passing an object that causes a serialization failure is a misuse
of the API anyway, so that's OK. The primary purpose here is to give the
programmer a useful error and to stop the stream properly */
- this.call.cancelWithStatus(grpc.status.INTERNAL, "Serialization failure");
+ this.call.cancelWithStatus(grpc.status.INTERNAL, 'Serialization failure');
callback(e);
}
if (_.isFinite(encoding)) {
@@ -831,13 +831,12 @@ exports.waitForClientReady = function(client, deadline, callback) {
*/
exports.makeProtobufClientConstructor = function(service, options) {
var method_attrs = common.getProtobufServiceAttrs(service, options);
- var deprecatedArgumentOrder = false;
- if (options) {
- deprecatedArgumentOrder = options.deprecatedArgumentOrder;
+ if (!options) {
+ options = {deprecatedArgumentOrder: false};
}
var Client = exports.makeClientConstructor(
method_attrs, common.fullyQualifiedName(service),
- deprecatedArgumentOrder);
+ options);
Client.service = service;
Client.service.grpc_options = options;
return Client;
diff --git a/src/node/src/server.js b/src/node/src/server.js
index da9c6b2d7f..8a7eff507d 100644
--- a/src/node/src/server.js
+++ b/src/node/src/server.js
@@ -121,20 +121,20 @@ function sendUnaryResponse(call, value, serialize, metadata, flags) {
if (metadata) {
statusMetadata = metadata;
}
- status.metadata = statusMetadata._getCoreRepresentation();
- if (!call.metadataSent) {
- end_batch[grpc.opType.SEND_INITIAL_METADATA] =
- (new Metadata())._getCoreRepresentation();
- call.metadataSent = true;
- }
var message;
try {
message = serialize(value);
} catch (e) {
e.code = grpc.status.INTERNAL;
- handleError(e);
+ handleError(call, e);
return;
}
+ status.metadata = statusMetadata._getCoreRepresentation();
+ if (!call.metadataSent) {
+ end_batch[grpc.opType.SEND_INITIAL_METADATA] =
+ (new Metadata())._getCoreRepresentation();
+ call.metadataSent = true;
+ }
message.grpcWriteFlags = flags;
end_batch[grpc.opType.SEND_MESSAGE] = message;
end_batch[grpc.opType.SEND_STATUS_FROM_SERVER] = status;
@@ -280,11 +280,6 @@ function _write(chunk, encoding, callback) {
/* jshint validthis: true */
var batch = {};
var self = this;
- if (!this.call.metadataSent) {
- batch[grpc.opType.SEND_INITIAL_METADATA] =
- (new Metadata())._getCoreRepresentation();
- this.call.metadataSent = true;
- }
var message;
try {
message = this.serialize(chunk);
@@ -293,6 +288,11 @@ function _write(chunk, encoding, callback) {
callback(e);
return;
}
+ if (!this.call.metadataSent) {
+ batch[grpc.opType.SEND_INITIAL_METADATA] =
+ (new Metadata())._getCoreRepresentation();
+ this.call.metadataSent = true;
+ }
if (_.isFinite(encoding)) {
/* Attach the encoding if it is a finite number. This is the closest we
* can get to checking that it is valid flags */
@@ -728,11 +728,17 @@ var defaultHandler = {
* method implementation for the provided service.
*/
Server.prototype.addService = function(service, implementation) {
+ if (!_.isObjectLike(service) || !_.isObjectLike(implementation)) {
+ throw new Error('addService requires two objects as arguments');
+ }
+ if (_.keys(service).length === 0) {
+ throw new Error('Cannot add an empty service to a server');
+ }
if (this.started) {
throw new Error('Can\'t add a service to a started server.');
}
var self = this;
- _.each(service, function(attrs, name) {
+ _.forOwn(service, function(attrs, name) {
var method_type;
if (attrs.requestStream) {
if (attrs.responseStream) {
diff --git a/src/node/test/surface_test.js b/src/node/test/surface_test.js
index e429a3648b..2636ea85ac 100644
--- a/src/node/test/surface_test.js
+++ b/src/node/test/surface_test.js
@@ -607,6 +607,113 @@ describe('Client malformed response handling', function() {
call.end();
});
});
+describe('Server serialization failure handling', function() {
+ function serializeFail(obj) {
+ throw new Error('Serialization failed');
+ }
+ var client;
+ var server;
+ before(function() {
+ var test_proto = ProtoBuf.loadProtoFile(__dirname + '/test_service.proto');
+ var test_service = test_proto.lookup('TestService');
+ var malformed_test_service = {
+ unary: {
+ path: '/TestService/Unary',
+ requestStream: false,
+ responseStream: false,
+ requestDeserialize: _.identity,
+ responseSerialize: serializeFail
+ },
+ clientStream: {
+ path: '/TestService/ClientStream',
+ requestStream: true,
+ responseStream: false,
+ requestDeserialize: _.identity,
+ responseSerialize: serializeFail
+ },
+ serverStream: {
+ path: '/TestService/ServerStream',
+ requestStream: false,
+ responseStream: true,
+ requestDeserialize: _.identity,
+ responseSerialize: serializeFail
+ },
+ bidiStream: {
+ path: '/TestService/BidiStream',
+ requestStream: true,
+ responseStream: true,
+ requestDeserialize: _.identity,
+ responseSerialize: serializeFail
+ }
+ };
+ server = new grpc.Server();
+ server.addService(malformed_test_service, {
+ unary: function(call, cb) {
+ cb(null, {});
+ },
+ clientStream: function(stream, cb) {
+ stream.on('data', function() {/* Ignore requests */});
+ stream.on('end', function() {
+ cb(null, {});
+ });
+ },
+ serverStream: function(stream) {
+ stream.write({});
+ stream.end();
+ },
+ bidiStream: function(stream) {
+ stream.on('data', function() {
+ // Ignore requests
+ stream.write({});
+ });
+ stream.on('end', function() {
+ stream.end();
+ });
+ }
+ });
+ var port = server.bind('localhost:0', server_insecure_creds);
+ var Client = surface_client.makeProtobufClientConstructor(test_service);
+ client = new Client('localhost:' + port, grpc.credentials.createInsecure());
+ server.start();
+ });
+ after(function() {
+ server.forceShutdown();
+ });
+ it('should get an INTERNAL status with a unary call', function(done) {
+ client.unary({}, function(err, data) {
+ assert(err);
+ assert.strictEqual(err.code, grpc.status.INTERNAL);
+ done();
+ });
+ });
+ it('should get an INTERNAL status with a client stream call', function(done) {
+ var call = client.clientStream(function(err, data) {
+ assert(err);
+ assert.strictEqual(err.code, grpc.status.INTERNAL);
+ done();
+ });
+ call.write({});
+ call.end();
+ });
+ it('should get an INTERNAL status with a server stream call', function(done) {
+ var call = client.serverStream({});
+ call.on('data', function(){});
+ call.on('error', function(err) {
+ assert.strictEqual(err.code, grpc.status.INTERNAL);
+ done();
+ });
+ });
+ it('should get an INTERNAL status with a bidi stream call', function(done) {
+ var call = client.bidiStream();
+ call.on('data', function(){});
+ call.on('error', function(err) {
+ assert.strictEqual(err.code, grpc.status.INTERNAL);
+ done();
+ });
+ call.write({});
+ call.end();
+ });
+});
describe('Other conditions', function() {
var test_service;
var Client;
diff --git a/src/php/README.md b/src/php/README.md
index ed91d2fbe5..f08541f16c 100644
--- a/src/php/README.md
+++ b/src/php/README.md
@@ -13,12 +13,24 @@ shared C library.
* `phpunit` (optional)
**Install PHP and PECL on Ubuntu/Debian:**
+
+For PHP5:
+
```sh
-$ sudo apt-get install php5 php5-dev php-pear
+$ sudo apt-get install php5 php5-dev php-pear phpunit
+```
-OR
+For PHP7:
+
+```sh
+$ sudo apt-get install php7.0 php7.0-dev php-pear phpunit
+```
-$ sudo apt-get install php7.0 php7.0-dev php-pear
+**Install PHP and PECL on CentOS/RHEL 7:**
+```sh
+$ sudo rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
+$ sudo rpm -Uvh https://mirror.webtatic.com/yum/el7/webtatic-release.rpm
+$ sudo yum install php56w php56w-devel php-pear phpunit gcc zlib-devel
```
**Install PECL on Mac:**
@@ -52,6 +64,10 @@ This will compile and install the gRPC PHP extension into the standard PHP
extension directory. You should be able to run the [unit tests](#unit-tests),
with the PHP extension installed.
+Note: For users on CentOS/RHEL 6, unfortunately this step won't work. Please
+follow the instructions below to compile the extension from source.
+
+
**Update php.ini**
Add this line to your `php.ini` file, e.g. `/etc/php5/cli/php.ini`
@@ -142,7 +158,7 @@ $ composer install
### Protobuf compiler
Again if you don't have it already, you need to install the protobuf compiler
-`protoc`, version 3.2.0+.
+`protoc`, version 3.1.0+ (the newer the better).
If `protoc` hasn't been installed, you can download the `protoc` binaries from
[the protocol buffers Github repository](https://github.com/google/protobuf/releases).
diff --git a/src/php/composer.json b/src/php/composer.json
index 8528304c81..491e34795a 100644
--- a/src/php/composer.json
+++ b/src/php/composer.json
@@ -5,7 +5,7 @@
"version": "1.2.0",
"require": {
"php": ">=5.5.0",
- "google/protobuf": "v3.1.0"
+ "google/protobuf": "^v3.1.0"
},
"require-dev": {
"google/auth": "v0.9"
diff --git a/src/proto/grpc/reflection/v1alpha/BUILD b/src/proto/grpc/reflection/v1alpha/BUILD
new file mode 100644
index 0000000000..92dd3d7f68
--- /dev/null
+++ b/src/proto/grpc/reflection/v1alpha/BUILD
@@ -0,0 +1,39 @@
+# Copyright 2017, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+licenses(["notice"]) # 3-clause BSD
+
+package(default_visibility = ["//visibility:public"])
+
+load("//bazel:grpc_build_system.bzl", "grpc_proto_library")
+
+grpc_proto_library(
+ name = "reflection_proto",
+ srcs = ["reflection.proto"],
+)
diff --git a/src/proto/grpc/testing/echo_messages.proto b/src/proto/grpc/testing/echo_messages.proto
index b405acf043..efb6f4d493 100644
--- a/src/proto/grpc/testing/echo_messages.proto
+++ b/src/proto/grpc/testing/echo_messages.proto
@@ -50,6 +50,7 @@ message RequestParams {
bool skip_cancelled_check = 9;
string expected_transport_security_type = 10;
DebugInfo debug_info = 11;
+ bool server_die = 12; // Server should not see a request with this set.
}
message EchoRequest {
diff --git a/src/python/grpcio/grpc/_channel.py b/src/python/grpcio/grpc/_channel.py
index 36dfc0d7b2..2480f9b174 100644
--- a/src/python/grpcio/grpc/_channel.py
+++ b/src/python/grpcio/grpc/_channel.py
@@ -39,7 +39,7 @@ from grpc import _grpcio_metadata
from grpc._cython import cygrpc
from grpc.framework.foundation import callable_util
-_USER_AGENT = 'Python-gRPC-{}'.format(_grpcio_metadata.__version__)
+_USER_AGENT = 'grpc-python/{}'.format(_grpcio_metadata.__version__)
_EMPTY_FLAGS = 0
_INFINITE_FUTURE = cygrpc.Timespec(float('+inf'))
diff --git a/src/python/grpcio/grpc/beta/_connectivity_channel.py b/src/python/grpcio/grpc/beta/_connectivity_channel.py
deleted file mode 100644
index bfb847f80a..0000000000
--- a/src/python/grpcio/grpc/beta/_connectivity_channel.py
+++ /dev/null
@@ -1,159 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-"""Affords a connectivity-state-listenable channel."""
-
-import threading
-import time
-
-from grpc._adapter import _low
-from grpc._adapter import _types
-from grpc.beta import interfaces
-from grpc.framework.foundation import callable_util
-
-_CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE = (
- 'Exception calling channel subscription callback!')
-
-_LOW_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY = {
- state: connectivity
- for state, connectivity in zip(_types.ConnectivityState,
- interfaces.ChannelConnectivity)
-}
-
-
-class ConnectivityChannel(object):
-
- def __init__(self, low_channel):
- self._lock = threading.Lock()
- self._low_channel = low_channel
-
- self._polling = False
- self._connectivity = None
- self._try_to_connect = False
- self._callbacks_and_connectivities = []
- self._delivering = False
-
- def _deliveries(self, connectivity):
- callbacks_needing_update = []
- for callback_and_connectivity in self._callbacks_and_connectivities:
- callback, callback_connectivity = callback_and_connectivity
- if callback_connectivity is not connectivity:
- callbacks_needing_update.append(callback)
- callback_and_connectivity[1] = connectivity
- return callbacks_needing_update
-
- def _deliver(self, initial_connectivity, initial_callbacks):
- connectivity = initial_connectivity
- callbacks = initial_callbacks
- while True:
- for callback in callbacks:
- callable_util.call_logging_exceptions(
- callback, _CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE,
- connectivity)
- with self._lock:
- callbacks = self._deliveries(self._connectivity)
- if callbacks:
- connectivity = self._connectivity
- else:
- self._delivering = False
- return
-
- def _spawn_delivery(self, connectivity, callbacks):
- delivering_thread = threading.Thread(
- target=self._deliver, args=(connectivity, callbacks,))
- delivering_thread.start()
- self._delivering = True
-
- # TODO(issue 3064): Don't poll.
- def _poll_connectivity(self, low_channel, initial_try_to_connect):
- try_to_connect = initial_try_to_connect
- low_connectivity = low_channel.check_connectivity_state(try_to_connect)
- with self._lock:
- self._connectivity = _LOW_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
- low_connectivity]
- callbacks = tuple(
- callback
- for callback, unused_but_known_to_be_none_connectivity in
- self._callbacks_and_connectivities)
- for callback_and_connectivity in self._callbacks_and_connectivities:
- callback_and_connectivity[1] = self._connectivity
- if callbacks:
- self._spawn_delivery(self._connectivity, callbacks)
- completion_queue = _low.CompletionQueue()
- while True:
- low_channel.watch_connectivity_state(low_connectivity,
- time.time() + 0.2,
- completion_queue, None)
- event = completion_queue.next()
- with self._lock:
- if not self._callbacks_and_connectivities and not self._try_to_connect:
- self._polling = False
- self._connectivity = None
- completion_queue.shutdown()
- break
- try_to_connect = self._try_to_connect
- self._try_to_connect = False
- if event.success or try_to_connect:
- low_connectivity = low_channel.check_connectivity_state(
- try_to_connect)
- with self._lock:
- self._connectivity = _LOW_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
- low_connectivity]
- if not self._delivering:
- callbacks = self._deliveries(self._connectivity)
- if callbacks:
- self._spawn_delivery(self._connectivity, callbacks)
-
- def subscribe(self, callback, try_to_connect):
- with self._lock:
- if not self._callbacks_and_connectivities and not self._polling:
- polling_thread = threading.Thread(
- target=self._poll_connectivity,
- args=(self._low_channel, bool(try_to_connect)))
- polling_thread.start()
- self._polling = True
- self._callbacks_and_connectivities.append([callback, None])
- elif not self._delivering and self._connectivity is not None:
- self._spawn_delivery(self._connectivity, (callback,))
- self._try_to_connect |= bool(try_to_connect)
- self._callbacks_and_connectivities.append(
- [callback, self._connectivity])
- else:
- self._try_to_connect |= bool(try_to_connect)
- self._callbacks_and_connectivities.append([callback, None])
-
- def unsubscribe(self, callback):
- with self._lock:
- for index, (subscribed_callback, unused_connectivity
- ) in enumerate(self._callbacks_and_connectivities):
- if callback == subscribed_callback:
- self._callbacks_and_connectivities.pop(index)
- break
-
- def low_channel(self):
- return self._low_channel
diff --git a/src/python/grpcio_tests/tests/http2/negative_http2_client.py b/src/python/grpcio_tests/tests/http2/negative_http2_client.py
index b8adf093a5..b184e62cfd 100644
--- a/src/python/grpcio_tests/tests/http2/negative_http2_client.py
+++ b/src/python/grpcio_tests/tests/http2/negative_http2_client.py
@@ -31,6 +31,7 @@
import argparse
import grpc
+import time
from src.proto.grpc.testing import test_pb2
from src.proto.grpc.testing import messages_pb2
@@ -75,6 +76,7 @@ def _goaway(stub):
first_response = stub.UnaryCall(_SIMPLE_REQUEST)
_validate_payload_type_and_length(first_response, messages_pb2.COMPRESSABLE,
_RESPONSE_SIZE)
+ time.sleep(1)
second_response = stub.UnaryCall(_SIMPLE_REQUEST)
_validate_payload_type_and_length(second_response,
messages_pb2.COMPRESSABLE, _RESPONSE_SIZE)
diff --git a/src/python/grpcio_tests/tests/unit/_invocation_defects_test.py b/src/python/grpcio_tests/tests/unit/_invocation_defects_test.py
index f2e3898ed6..ee235032f0 100644
--- a/src/python/grpcio_tests/tests/unit/_invocation_defects_test.py
+++ b/src/python/grpcio_tests/tests/unit/_invocation_defects_test.py
@@ -249,3 +249,7 @@ class InvocationDefectsTest(unittest.TestCase):
with self.assertRaises(grpc.RpcError):
for _ in range(test_constants.STREAM_LENGTH // 2 + 1):
next(response_iterator)
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_metadata_test.py b/src/python/grpcio_tests/tests/unit/_metadata_test.py
index 53fe7ba8aa..035d87e3cf 100644
--- a/src/python/grpcio_tests/tests/unit/_metadata_test.py
+++ b/src/python/grpcio_tests/tests/unit/_metadata_test.py
@@ -32,7 +32,7 @@ import unittest
import weakref
import grpc
-from grpc import _grpcio_metadata
+from grpc import _channel
from grpc.framework.foundation import logging_pool
from tests.unit import test_common
@@ -49,8 +49,6 @@ _UNARY_STREAM = '/test/UnaryStream'
_STREAM_UNARY = '/test/StreamUnary'
_STREAM_STREAM = '/test/StreamStream'
-_USER_AGENT = 'Python-gRPC-{}'.format(_grpcio_metadata.__version__)
-
_CLIENT_METADATA = (('client-md-key', 'client-md-key'),
('client-md-key-bin', b'\x00\x01'))
@@ -76,7 +74,7 @@ def validate_client_metadata(test, servicer_context):
_CLIENT_METADATA, servicer_context.invocation_metadata()))
test.assertTrue(
user_agent(servicer_context.invocation_metadata())
- .startswith('primary-agent ' + _USER_AGENT))
+ .startswith('primary-agent ' + _channel._USER_AGENT))
test.assertTrue(
user_agent(servicer_context.invocation_metadata())
.endswith('secondary-agent'))
diff --git a/src/python/grpcio_tests/tests/unit/_thread_cleanup_test.py b/src/python/grpcio_tests/tests/unit/_thread_cleanup_test.py
index be3522f46f..eb5f459848 100644
--- a/src/python/grpcio_tests/tests/unit/_thread_cleanup_test.py
+++ b/src/python/grpcio_tests/tests/unit/_thread_cleanup_test.py
@@ -35,8 +35,8 @@ import unittest
from grpc import _common
_SHORT_TIME = 0.5
-_LONG_TIME = 2.0
-_EPSILON = 0.1
+_LONG_TIME = 5.0
+_EPSILON = 0.5
def cleanup(timeout):
diff --git a/src/ruby/ext/grpc/extconf.rb b/src/ruby/ext/grpc/extconf.rb
index 66c54aa3e0..b379664bab 100644
--- a/src/ruby/ext/grpc/extconf.rb
+++ b/src/ruby/ext/grpc/extconf.rb
@@ -102,7 +102,6 @@ $CFLAGS << ' -std=c99 '
$CFLAGS << ' -Wall '
$CFLAGS << ' -Wextra '
$CFLAGS << ' -pedantic '
-$CFLAGS << ' -Werror '
$CFLAGS << ' -Wno-format '
output = File.join('grpc', 'grpc_c')
diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.c b/src/ruby/ext/grpc/rb_grpc_imports.generated.c
index b8af800833..52465a4dd7 100644
--- a/src/ruby/ext/grpc/rb_grpc_imports.generated.c
+++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.c
@@ -220,6 +220,7 @@ grpc_slice_buffer_move_first_into_buffer_type grpc_slice_buffer_move_first_into_
grpc_slice_buffer_take_first_type grpc_slice_buffer_take_first_import;
grpc_slice_buffer_undo_take_first_type grpc_slice_buffer_undo_take_first_import;
gpr_malloc_type gpr_malloc_import;
+gpr_zalloc_type gpr_zalloc_import;
gpr_free_type gpr_free_import;
gpr_realloc_type gpr_realloc_import;
gpr_malloc_aligned_type gpr_malloc_aligned_import;
@@ -512,6 +513,7 @@ void grpc_rb_load_imports(HMODULE library) {
grpc_slice_buffer_take_first_import = (grpc_slice_buffer_take_first_type) GetProcAddress(library, "grpc_slice_buffer_take_first");
grpc_slice_buffer_undo_take_first_import = (grpc_slice_buffer_undo_take_first_type) GetProcAddress(library, "grpc_slice_buffer_undo_take_first");
gpr_malloc_import = (gpr_malloc_type) GetProcAddress(library, "gpr_malloc");
+ gpr_zalloc_import = (gpr_zalloc_type) GetProcAddress(library, "gpr_zalloc");
gpr_free_import = (gpr_free_type) GetProcAddress(library, "gpr_free");
gpr_realloc_import = (gpr_realloc_type) GetProcAddress(library, "gpr_realloc");
gpr_malloc_aligned_import = (gpr_malloc_aligned_type) GetProcAddress(library, "gpr_malloc_aligned");
diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.h b/src/ruby/ext/grpc/rb_grpc_imports.generated.h
index 3229e2a77d..b16e673878 100644
--- a/src/ruby/ext/grpc/rb_grpc_imports.generated.h
+++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.h
@@ -611,6 +611,9 @@ extern grpc_slice_buffer_undo_take_first_type grpc_slice_buffer_undo_take_first_
typedef void *(*gpr_malloc_type)(size_t size);
extern gpr_malloc_type gpr_malloc_import;
#define gpr_malloc gpr_malloc_import
+typedef void *(*gpr_zalloc_type)(size_t size);
+extern gpr_zalloc_type gpr_zalloc_import;
+#define gpr_zalloc gpr_zalloc_import
typedef void(*gpr_free_type)(void *ptr);
extern gpr_free_type gpr_free_import;
#define gpr_free gpr_free_import