aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core
diff options
context:
space:
mode:
Diffstat (limited to 'src/core')
-rw-r--r--src/core/ext/census/census_log.h2
-rw-r--r--src/core/ext/census/grpc_filter.c13
-rw-r--r--src/core/ext/census/mlog.h2
-rw-r--r--src/core/ext/census/trace_context.h2
-rw-r--r--src/core/ext/client_channel/client_channel.c226
-rw-r--r--src/core/ext/client_channel/client_channel.h10
-rw-r--r--src/core/ext/client_channel/client_channel_factory.c32
-rw-r--r--src/core/ext/client_channel/client_channel_factory.h6
-rw-r--r--src/core/ext/client_channel/connector.h2
-rw-r--r--src/core/ext/client_channel/default_initial_connect_string.c4
-rw-r--r--src/core/ext/client_channel/http_connect_handshaker.c235
-rw-r--r--src/core/ext/client_channel/http_connect_handshaker.h5
-rw-r--r--src/core/ext/client_channel/initial_connect_string.c4
-rw-r--r--src/core/ext/client_channel/initial_connect_string.h8
-rw-r--r--src/core/ext/client_channel/lb_policy.h6
-rw-r--r--src/core/ext/client_channel/lb_policy_factory.h3
-rw-r--r--src/core/ext/client_channel/lb_policy_registry.c8
-rw-r--r--src/core/ext/client_channel/resolver_factory.c5
-rw-r--r--src/core/ext/client_channel/resolver_factory.h8
-rw-r--r--src/core/ext/client_channel/resolver_registry.c41
-rw-r--r--src/core/ext/client_channel/resolver_registry.h10
-rw-r--r--src/core/ext/client_channel/subchannel.c185
-rw-r--r--src/core/ext/client_channel/subchannel.h8
-rw-r--r--src/core/ext/client_channel/subchannel_index.c4
-rw-r--r--src/core/ext/client_channel/uri_parser.c35
-rw-r--r--src/core/ext/client_config/message_size_filter.c85
-rw-r--r--src/core/ext/lb_policy/grpclb/grpclb.c905
-rw-r--r--src/core/ext/lb_policy/grpclb/load_balancer_api.c20
-rw-r--r--src/core/ext/lb_policy/grpclb/load_balancer_api.h8
-rw-r--r--src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h4
-rw-r--r--src/core/ext/lb_policy/pick_first/pick_first.c20
-rw-r--r--src/core/ext/lb_policy/round_robin/round_robin.c363
-rw-r--r--src/core/ext/load_reporting/load_reporting_filter.c9
-rw-r--r--src/core/ext/resolver/dns/native/dns_resolver.c42
-rw-r--r--src/core/ext/resolver/sockaddr/sockaddr_resolver.c27
-rw-r--r--src/core/ext/transport/chttp2/client/chttp2_connector.c263
-rw-r--r--src/core/ext/transport/chttp2/client/chttp2_connector.h51
-rw-r--r--src/core/ext/transport/chttp2/client/insecure/channel_create.c177
-rw-r--r--src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c6
-rw-r--r--src/core/ext/transport/chttp2/client/secure/secure_channel_create.c257
-rw-r--r--src/core/ext/transport/chttp2/server/chttp2_server.c362
-rw-r--r--src/core/ext/transport/chttp2/server/chttp2_server.h78
-rw-r--r--src/core/ext/transport/chttp2/server/insecure/server_chttp2.c172
-rw-r--r--src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c8
-rw-r--r--src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c337
-rw-r--r--src/core/ext/transport/chttp2/transport/bin_decoder.c55
-rw-r--r--src/core/ext/transport/chttp2/transport/bin_decoder.h8
-rw-r--r--src/core/ext/transport/chttp2/transport/bin_encoder.c47
-rw-r--r--src/core/ext/transport/chttp2/transport/bin_encoder.h15
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.c371
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.h2
-rw-r--r--src/core/ext/transport/chttp2/transport/frame.h2
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_data.c33
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_data.h10
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_goaway.c26
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_goaway.h10
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_ping.c16
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_ping.h6
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_rst_stream.c16
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_rst_stream.h8
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_settings.c28
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_settings.h10
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_window_update.c12
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_window_update.h8
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_encoder.c65
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_encoder.h6
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_parser.c38
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_parser.h2
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_table.c8
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_table.h2
-rw-r--r--src/core/ext/transport/chttp2/transport/internal.h58
-rw-r--r--src/core/ext/transport/chttp2/transport/parsing.c39
-rw-r--r--src/core/ext/transport/chttp2/transport/stream_lists.c5
-rw-r--r--src/core/ext/transport/chttp2/transport/stream_map.c11
-rw-r--r--src/core/ext/transport/chttp2/transport/stream_map.h3
-rw-r--r--src/core/ext/transport/chttp2/transport/writing.c24
-rw-r--r--src/core/ext/transport/cronet/transport/cronet_transport.c218
-rw-r--r--src/core/lib/channel/channel_args.c6
-rw-r--r--src/core/lib/channel/channel_args.h8
-rw-r--r--src/core/lib/channel/channel_stack.c40
-rw-r--r--src/core/lib/channel/channel_stack.h38
-rw-r--r--src/core/lib/channel/channel_stack_builder.c46
-rw-r--r--src/core/lib/channel/channel_stack_builder.h11
-rw-r--r--src/core/lib/channel/compress_filter.c42
-rw-r--r--src/core/lib/channel/connected_channel.c15
-rw-r--r--src/core/lib/channel/context.h3
-rw-r--r--src/core/lib/channel/deadline_filter.c13
-rw-r--r--src/core/lib/channel/handshaker.c196
-rw-r--r--src/core/lib/channel/handshaker.h80
-rw-r--r--src/core/lib/channel/http_client_filter.c96
-rw-r--r--src/core/lib/channel/http_server_filter.c49
-rw-r--r--src/core/lib/compression/message_compress.c50
-rw-r--r--src/core/lib/compression/message_compress.h6
-rw-r--r--src/core/lib/http/format_request.c19
-rw-r--r--src/core/lib/http/format_request.h12
-rw-r--r--src/core/lib/http/httpcli.c47
-rw-r--r--src/core/lib/http/httpcli.h2
-rw-r--r--src/core/lib/http/httpcli_security_connector.c91
-rw-r--r--src/core/lib/http/parser.c6
-rw-r--r--src/core/lib/http/parser.h4
-rw-r--r--src/core/lib/iomgr/combiner.c11
-rw-r--r--src/core/lib/iomgr/endpoint.c10
-rw-r--r--src/core/lib/iomgr/endpoint.h21
-rw-r--r--src/core/lib/iomgr/endpoint_pair.h5
-rw-r--r--src/core/lib/iomgr/endpoint_pair_posix.c13
-rw-r--r--src/core/lib/iomgr/endpoint_pair_uv.c5
-rw-r--r--src/core/lib/iomgr/endpoint_pair_windows.c9
-rw-r--r--src/core/lib/iomgr/ev_epoll_linux.c612
-rw-r--r--src/core/lib/iomgr/ev_poll_and_epoll_posix.c2076
-rw-r--r--src/core/lib/iomgr/ev_poll_posix.c29
-rw-r--r--src/core/lib/iomgr/ev_posix.c2
-rw-r--r--src/core/lib/iomgr/ev_posix.h1
-rw-r--r--src/core/lib/iomgr/iomgr.c1
-rw-r--r--src/core/lib/iomgr/load_file.c6
-rw-r--r--src/core/lib/iomgr/load_file.h4
-rw-r--r--src/core/lib/iomgr/network_status_tracker.c2
-rw-r--r--src/core/lib/iomgr/port.h2
-rw-r--r--src/core/lib/iomgr/resolve_address.h2
-rw-r--r--src/core/lib/iomgr/resolve_address_posix.c9
-rw-r--r--src/core/lib/iomgr/resolve_address_uv.c9
-rw-r--r--src/core/lib/iomgr/resolve_address_windows.c9
-rw-r--r--src/core/lib/iomgr/resource_quota.c829
-rw-r--r--src/core/lib/iomgr/resource_quota.h153
-rw-r--r--src/core/lib/iomgr/socket_mutator.c98
-rw-r--r--src/core/lib/iomgr/socket_mutator.h80
-rw-r--r--src/core/lib/iomgr/socket_utils_common_posix.c9
-rw-r--r--src/core/lib/iomgr/socket_utils_posix.h5
-rw-r--r--src/core/lib/iomgr/socket_windows.c8
-rw-r--r--src/core/lib/iomgr/socket_windows.h1
-rw-r--r--src/core/lib/iomgr/tcp_client.h6
-rw-r--r--src/core/lib/iomgr/tcp_client_posix.c73
-rw-r--r--src/core/lib/iomgr/tcp_client_posix.h (renamed from src/core/lib/iomgr/ev_poll_and_epoll_posix.h)12
-rw-r--r--src/core/lib/iomgr/tcp_client_uv.c36
-rw-r--r--src/core/lib/iomgr/tcp_client_windows.c56
-rw-r--r--src/core/lib/iomgr/tcp_posix.c134
-rw-r--r--src/core/lib/iomgr/tcp_posix.h4
-rw-r--r--src/core/lib/iomgr/tcp_server.h6
-rw-r--r--src/core/lib/iomgr/tcp_server_posix.c84
-rw-r--r--src/core/lib/iomgr/tcp_server_uv.c33
-rw-r--r--src/core/lib/iomgr/tcp_server_windows.c131
-rw-r--r--src/core/lib/iomgr/tcp_uv.c121
-rw-r--r--src/core/lib/iomgr/tcp_uv.h4
-rw-r--r--src/core/lib/iomgr/tcp_windows.c79
-rw-r--r--src/core/lib/iomgr/tcp_windows.h4
-rw-r--r--src/core/lib/iomgr/udp_server.c6
-rw-r--r--src/core/lib/iomgr/wakeup_fd_pipe.c2
-rw-r--r--src/core/lib/json/json.c6
-rw-r--r--src/core/lib/json/json.h22
-rw-r--r--src/core/lib/security/credentials/credentials.h6
-rw-r--r--src/core/lib/security/credentials/credentials_metadata.c14
-rw-r--r--src/core/lib/security/credentials/google_default/google_default_credentials.c14
-rw-r--r--src/core/lib/security/credentials/jwt/json_token.h2
-rw-r--r--src/core/lib/security/credentials/jwt/jwt_credentials.c45
-rw-r--r--src/core/lib/security/credentials/jwt/jwt_verifier.c116
-rw-r--r--src/core/lib/security/credentials/jwt/jwt_verifier.h9
-rw-r--r--src/core/lib/security/credentials/oauth2/oauth2_credentials.c53
-rw-r--r--src/core/lib/security/credentials/plugin/plugin_credentials.c10
-rw-r--r--src/core/lib/security/transport/client_auth_filter.c29
-rw-r--r--src/core/lib/security/transport/handshake.c374
-rw-r--r--src/core/lib/security/transport/secure_endpoint.c142
-rw-r--r--src/core/lib/security/transport/secure_endpoint.h4
-rw-r--r--src/core/lib/security/transport/security_connector.c277
-rw-r--r--src/core/lib/security/transport/security_connector.h60
-rw-r--r--src/core/lib/security/transport/security_handshaker.c450
-rw-r--r--src/core/lib/security/transport/security_handshaker.h (renamed from src/core/lib/security/transport/handshake.h)20
-rw-r--r--src/core/lib/security/transport/server_auth_filter.c26
-rw-r--r--src/core/lib/security/util/b64.c14
-rw-r--r--src/core/lib/security/util/b64.h8
-rw-r--r--src/core/lib/slice/percent_encoding.c (renamed from src/core/lib/support/percent_encoding.c)58
-rw-r--r--src/core/lib/slice/percent_encoding.h (renamed from src/core/lib/support/percent_encoding.h)28
-rw-r--r--src/core/lib/slice/slice.c (renamed from src/core/lib/support/slice.c)104
-rw-r--r--src/core/lib/slice/slice_buffer.c (renamed from src/core/lib/support/slice_buffer.c)122
-rw-r--r--src/core/lib/slice/slice_string_helpers.c89
-rw-r--r--src/core/lib/slice/slice_string_helpers.h58
-rw-r--r--src/core/lib/support/backoff.c37
-rw-r--r--src/core/lib/support/backoff.h7
-rw-r--r--src/core/lib/support/env.h2
-rw-r--r--src/core/lib/support/string.c69
-rw-r--r--src/core/lib/support/string.h14
-rw-r--r--src/core/lib/support/subprocess_posix.c7
-rw-r--r--src/core/lib/support/tmpfile.h2
-rw-r--r--src/core/lib/surface/byte_buffer.c19
-rw-r--r--src/core/lib/surface/byte_buffer_reader.c28
-rw-r--r--src/core/lib/surface/call.c74
-rw-r--r--src/core/lib/surface/channel.c128
-rw-r--r--src/core/lib/surface/completion_queue.c2
-rw-r--r--src/core/lib/surface/init.c2
-rw-r--r--src/core/lib/surface/lame_client.c12
-rw-r--r--src/core/lib/surface/server.c29
-rw-r--r--src/core/lib/surface/validate_metadata.c2
-rw-r--r--src/core/lib/surface/version.c2
-rw-r--r--src/core/lib/transport/byte_stream.c8
-rw-r--r--src/core/lib/transport/byte_stream.h10
-rw-r--r--src/core/lib/transport/connectivity_state.c16
-rw-r--r--src/core/lib/transport/connectivity_state.h5
-rw-r--r--src/core/lib/transport/mdstr_hash_table.c42
-rw-r--r--src/core/lib/transport/mdstr_hash_table.h18
-rw-r--r--src/core/lib/transport/metadata.c67
-rw-r--r--src/core/lib/transport/metadata.h20
-rw-r--r--src/core/lib/transport/metadata_batch.h2
-rw-r--r--src/core/lib/transport/method_config.c340
-rw-r--r--src/core/lib/transport/method_config.h136
-rw-r--r--src/core/lib/transport/pid_controller.c57
-rw-r--r--src/core/lib/transport/pid_controller.h64
-rw-r--r--src/core/lib/transport/service_config.c249
-rw-r--r--src/core/lib/transport/service_config.h70
-rw-r--r--src/core/lib/transport/transport.c22
-rw-r--r--src/core/lib/transport/transport.h13
-rw-r--r--src/core/lib/transport/transport_impl.h3
-rw-r--r--src/core/lib/transport/transport_op_string.c11
-rw-r--r--src/core/lib/tsi/ssl_transport_security.c16
211 files changed, 7752 insertions, 6925 deletions
diff --git a/src/core/ext/census/census_log.h b/src/core/ext/census/census_log.h
index 534ecc5705..1b185a53b9 100644
--- a/src/core/ext/census/census_log.h
+++ b/src/core/ext/census/census_log.h
@@ -84,7 +84,7 @@ const void *census_log_read_next(size_t *bytes_available);
*/
size_t census_log_remaining_space(void);
-/* Returns the number of times gprc_stats_log_start_write() failed due to
+/* Returns the number of times grpc_stats_log_start_write() failed due to
out-of-space. */
int census_log_out_of_space_count(void);
diff --git a/src/core/ext/census/grpc_filter.c b/src/core/ext/census/grpc_filter.c
index a4cf6f37bd..3e8acc85e1 100644
--- a/src/core/ext/census/grpc_filter.c
+++ b/src/core/ext/census/grpc_filter.c
@@ -37,9 +37,9 @@
#include <string.h>
#include <grpc/census.h>
+#include <grpc/slice.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/slice.h>
#include <grpc/support/time.h>
#include "src/core/ext/census/census_interface.h"
@@ -69,7 +69,7 @@ static void extract_and_annotate_method_tag(grpc_metadata_batch *md,
for (m = md->list.head; m != NULL; m = m->next) {
if (m->md->key == GRPC_MDSTR_PATH) {
gpr_log(GPR_DEBUG, "%s",
- (const char *)GPR_SLICE_START_PTR(m->md->value->slice));
+ (const char *)GRPC_SLICE_START_PTR(m->md->value->slice));
/* Add method tag here */
}
}
@@ -167,11 +167,12 @@ static void server_destroy_call_elem(grpc_exec_ctx *exec_ctx,
/* TODO(hongyu): record rpc server stats and census_tracing_end_op here */
}
-static void init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_channel_element_args *args) {
+static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
channel_data *chand = elem->channel_data;
GPR_ASSERT(chand != NULL);
+ return GRPC_ERROR_NONE;
}
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
@@ -191,6 +192,7 @@ const grpc_channel_filter grpc_client_census_filter = {
init_channel_elem,
destroy_channel_elem,
grpc_call_next_get_peer,
+ grpc_channel_next_get_info,
"census-client"};
const grpc_channel_filter grpc_server_census_filter = {
@@ -204,4 +206,5 @@ const grpc_channel_filter grpc_server_census_filter = {
init_channel_elem,
destroy_channel_elem,
grpc_call_next_get_peer,
+ grpc_channel_next_get_info,
"census-server"};
diff --git a/src/core/ext/census/mlog.h b/src/core/ext/census/mlog.h
index a256426f91..18805ad994 100644
--- a/src/core/ext/census/mlog.h
+++ b/src/core/ext/census/mlog.h
@@ -88,7 +88,7 @@ const void* census_log_read_next(size_t* bytes_available);
*/
size_t census_log_remaining_space(void);
-/* Returns the number of times gprc_stats_log_start_write() failed due to
+/* Returns the number of times grpc_stats_log_start_write() failed due to
out-of-space. */
int64_t census_log_out_of_space_count(void);
diff --git a/src/core/ext/census/trace_context.h b/src/core/ext/census/trace_context.h
index ee71fef460..1cb5e26ea7 100644
--- a/src/core/ext/census/trace_context.h
+++ b/src/core/ext/census/trace_context.h
@@ -65,4 +65,4 @@ of these do not exist. On success, returns true and false otherwise. */
bool decode_trace_context(google_trace_TraceContext *ctxt, uint8_t *buffer,
const size_t nbytes);
-#endif
+#endif /* GRPC_CORE_EXT_CENSUS_TRACE_CONTEXT_H */
diff --git a/src/core/ext/client_channel/client_channel.c b/src/core/ext/client_channel/client_channel.c
index 80b4f048c2..9d46338428 100644
--- a/src/core/ext/client_channel/client_channel.c
+++ b/src/core/ext/client_channel/client_channel.c
@@ -39,10 +39,12 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
#include <grpc/support/useful.h>
#include "src/core/ext/client_channel/lb_policy_registry.h"
+#include "src/core/ext/client_channel/resolver_registry.h"
#include "src/core/ext/client_channel/subchannel.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
@@ -55,7 +57,7 @@
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/metadata.h"
#include "src/core/lib/transport/metadata_batch.h"
-#include "src/core/lib/transport/method_config.h"
+#include "src/core/lib/transport/service_config.h"
#include "src/core/lib/transport/static_metadata.h"
/* Client channel implementation */
@@ -81,30 +83,61 @@ static void *method_parameters_copy(void *value) {
return new_value;
}
-static int method_parameters_cmp(void *value1, void *value2) {
- const method_parameters *v1 = value1;
- const method_parameters *v2 = value2;
- const int retval = gpr_time_cmp(v1->timeout, v2->timeout);
- if (retval != 0) return retval;
- if (v1->wait_for_ready > v2->wait_for_ready) return 1;
- if (v1->wait_for_ready < v2->wait_for_ready) return -1;
- return 0;
-}
-
static const grpc_mdstr_hash_table_vtable method_parameters_vtable = {
- gpr_free, method_parameters_copy, method_parameters_cmp};
-
-static void *method_config_convert_value(
- const grpc_method_config *method_config) {
+ gpr_free, method_parameters_copy};
+
+static void *method_parameters_create_from_json(const grpc_json *json) {
+ wait_for_ready_value wait_for_ready = WAIT_FOR_READY_UNSET;
+ gpr_timespec timeout = {0, 0, GPR_TIMESPAN};
+ for (grpc_json *field = json->child; field != NULL; field = field->next) {
+ if (field->key == NULL) continue;
+ if (strcmp(field->key, "waitForReady") == 0) {
+ if (wait_for_ready != WAIT_FOR_READY_UNSET) return NULL; // Duplicate.
+ if (field->type != GRPC_JSON_TRUE && field->type != GRPC_JSON_FALSE) {
+ return NULL;
+ }
+ wait_for_ready = field->type == GRPC_JSON_TRUE ? WAIT_FOR_READY_TRUE
+ : WAIT_FOR_READY_FALSE;
+ } else if (strcmp(field->key, "timeout") == 0) {
+ if (timeout.tv_sec > 0 || timeout.tv_nsec > 0) return NULL; // Duplicate.
+ if (field->type != GRPC_JSON_STRING) return NULL;
+ size_t len = strlen(field->value);
+ if (field->value[len - 1] != 's') return NULL;
+ char *buf = gpr_strdup(field->value);
+ buf[len - 1] = '\0'; // Remove trailing 's'.
+ char *decimal_point = strchr(buf, '.');
+ if (decimal_point != NULL) {
+ *decimal_point = '\0';
+ timeout.tv_nsec = gpr_parse_nonnegative_int(decimal_point + 1);
+ if (timeout.tv_nsec == -1) {
+ gpr_free(buf);
+ return NULL;
+ }
+ // There should always be exactly 3, 6, or 9 fractional digits.
+ int multiplier = 1;
+ switch (strlen(decimal_point + 1)) {
+ case 9:
+ break;
+ case 6:
+ multiplier *= 1000;
+ break;
+ case 3:
+ multiplier *= 1000000;
+ break;
+ default: // Unsupported number of digits.
+ gpr_free(buf);
+ return NULL;
+ }
+ timeout.tv_nsec *= multiplier;
+ }
+ timeout.tv_sec = gpr_parse_nonnegative_int(buf);
+ if (timeout.tv_sec == -1) return NULL;
+ gpr_free(buf);
+ }
+ }
method_parameters *value = gpr_malloc(sizeof(method_parameters));
- const gpr_timespec *timeout = grpc_method_config_get_timeout(method_config);
- value->timeout = timeout != NULL ? *timeout : gpr_time_0(GPR_TIMESPAN);
- const bool *wait_for_ready =
- grpc_method_config_get_wait_for_ready(method_config);
- value->wait_for_ready =
- wait_for_ready == NULL
- ? WAIT_FOR_READY_UNSET
- : (wait_for_ready ? WAIT_FOR_READY_TRUE : WAIT_FOR_READY_FALSE);
+ value->timeout = timeout;
+ value->wait_for_ready = wait_for_ready;
return value;
}
@@ -123,7 +156,10 @@ typedef struct client_channel_channel_data {
/** mutex protecting all variables below in this data structure */
gpr_mu mu;
/** currently active load balancer */
+ char *lb_policy_name;
grpc_lb_policy *lb_policy;
+ /** service config in JSON form */
+ char *service_config_json;
/** maps method names to method_parameters structs */
grpc_mdstr_hash_table *method_params_table;
/** incoming resolver result - set by resolver.next() */
@@ -223,22 +259,19 @@ static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
channel_data *chand = arg;
+ char *lb_policy_name = NULL;
grpc_lb_policy *lb_policy = NULL;
grpc_lb_policy *old_lb_policy;
grpc_mdstr_hash_table *method_params_table = NULL;
grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
bool exit_idle = false;
grpc_error *state_error = GRPC_ERROR_CREATE("No load balancing policy");
+ char *service_config_json = NULL;
if (chand->resolver_result != NULL) {
- grpc_lb_policy_args lb_policy_args;
- lb_policy_args.args = chand->resolver_result;
- lb_policy_args.client_channel_factory = chand->client_channel_factory;
-
// Find LB policy name.
- const char *lb_policy_name = NULL;
const grpc_arg *channel_arg =
- grpc_channel_args_find(lb_policy_args.args, GRPC_ARG_LB_POLICY_NAME);
+ grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_POLICY_NAME);
if (channel_arg != NULL) {
GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
lb_policy_name = channel_arg->value.string;
@@ -247,7 +280,7 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
// assume that we should use the grpclb policy, regardless of what the
// resolver actually specified.
channel_arg =
- grpc_channel_args_find(lb_policy_args.args, GRPC_ARG_LB_ADDRESSES);
+ grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES);
if (channel_arg != NULL) {
GPR_ASSERT(channel_arg->type == GRPC_ARG_POINTER);
grpc_lb_addresses *addresses = channel_arg->value.pointer.p;
@@ -272,7 +305,10 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
// Use pick_first if nothing was specified and we didn't select grpclb
// above.
if (lb_policy_name == NULL) lb_policy_name = "pick_first";
-
+ // Instantiate LB policy.
+ grpc_lb_policy_args lb_policy_args;
+ lb_policy_args.args = chand->resolver_result;
+ lb_policy_args.client_channel_factory = chand->client_channel_factory;
lb_policy =
grpc_lb_policy_create(exec_ctx, lb_policy_name, &lb_policy_args);
if (lb_policy != NULL) {
@@ -281,14 +317,25 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
state =
grpc_lb_policy_check_connectivity(exec_ctx, lb_policy, &state_error);
}
+ // Find service config.
channel_arg =
- grpc_channel_args_find(lb_policy_args.args, GRPC_ARG_SERVICE_CONFIG);
+ grpc_channel_args_find(chand->resolver_result, GRPC_ARG_SERVICE_CONFIG);
if (channel_arg != NULL) {
- GPR_ASSERT(channel_arg->type == GRPC_ARG_POINTER);
- method_params_table = grpc_method_config_table_convert(
- (grpc_method_config_table *)channel_arg->value.pointer.p,
- method_config_convert_value, &method_parameters_vtable);
+ GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
+ service_config_json = gpr_strdup(channel_arg->value.string);
+ grpc_service_config *service_config =
+ grpc_service_config_create(service_config_json);
+ if (service_config != NULL) {
+ method_params_table = grpc_service_config_create_method_config_table(
+ service_config, method_parameters_create_from_json,
+ &method_parameters_vtable);
+ grpc_service_config_destroy(service_config);
+ }
}
+ // Before we clean up, save a copy of lb_policy_name, since it might
+ // be pointing to data inside chand->resolver_result.
+ // The copy will be saved in chand->lb_policy_name below.
+ lb_policy_name = gpr_strdup(lb_policy_name);
grpc_channel_args_destroy(chand->resolver_result);
chand->resolver_result = NULL;
}
@@ -299,8 +346,16 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
}
gpr_mu_lock(&chand->mu);
+ if (lb_policy_name != NULL) {
+ gpr_free(chand->lb_policy_name);
+ chand->lb_policy_name = lb_policy_name;
+ }
old_lb_policy = chand->lb_policy;
chand->lb_policy = lb_policy;
+ if (service_config_json != NULL) {
+ gpr_free(chand->service_config_json);
+ chand->service_config_json = service_config_json;
+ }
if (chand->method_params_table != NULL) {
grpc_mdstr_hash_table_unref(chand->method_params_table);
}
@@ -426,25 +481,58 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&chand->mu);
}
-/* Constructor for channel_data */
-static void cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_channel_element_args *args) {
+static void cc_get_channel_info(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ const grpc_channel_info *info) {
channel_data *chand = elem->channel_data;
+ gpr_mu_lock(&chand->mu);
+ if (info->lb_policy_name != NULL) {
+ *info->lb_policy_name = chand->lb_policy_name == NULL
+ ? NULL
+ : gpr_strdup(chand->lb_policy_name);
+ }
+ if (info->service_config_json != NULL) {
+ *info->service_config_json = chand->service_config_json == NULL
+ ? NULL
+ : gpr_strdup(chand->service_config_json);
+ }
+ gpr_mu_unlock(&chand->mu);
+}
+/* Constructor for channel_data */
+static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
+ channel_data *chand = elem->channel_data;
memset(chand, 0, sizeof(*chand));
-
GPR_ASSERT(args->is_last);
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
-
+ // Initialize data members.
gpr_mu_init(&chand->mu);
+ chand->owning_stack = args->channel_stack;
grpc_closure_init(&chand->on_resolver_result_changed,
on_resolver_result_changed, chand);
- chand->owning_stack = args->channel_stack;
-
+ chand->interested_parties = grpc_pollset_set_create();
grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
"client_channel");
- chand->interested_parties = grpc_pollset_set_create();
+ // Record client channel factory.
+ const grpc_arg *arg = grpc_channel_args_find(args->channel_args,
+ GRPC_ARG_CLIENT_CHANNEL_FACTORY);
+ GPR_ASSERT(arg != NULL);
+ GPR_ASSERT(arg->type == GRPC_ARG_POINTER);
+ grpc_client_channel_factory_ref(arg->value.pointer.p);
+ chand->client_channel_factory = arg->value.pointer.p;
+ // Instantiate resolver.
+ arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVER_URI);
+ GPR_ASSERT(arg != NULL);
+ GPR_ASSERT(arg->type == GRPC_ARG_STRING);
+ chand->resolver =
+ grpc_resolver_create(exec_ctx, arg->value.string, args->channel_args,
+ chand->interested_parties);
+ if (chand->resolver == NULL) {
+ return GRPC_ERROR_CREATE("resolver creation failed");
+ }
+ return GRPC_ERROR_NONE;
}
/* Destructor for channel_data */
@@ -465,6 +553,8 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
chand->interested_parties);
GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
}
+ gpr_free(chand->lb_policy_name);
+ gpr_free(chand->service_config_json);
if (chand->method_params_table != NULL) {
grpc_mdstr_hash_table_unref(chand->method_params_table);
}
@@ -609,15 +699,21 @@ static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg,
"Failed to create subchannel", &error, 1));
} else if (GET_CALL(calld) == CANCELLED_CALL) {
/* already cancelled before subchannel became ready */
- fail_locked(exec_ctx, calld,
- GRPC_ERROR_CREATE_REFERENCING(
- "Cancelled before creating subchannel", &error, 1));
+ grpc_error *cancellation_error = GRPC_ERROR_CREATE_REFERENCING(
+ "Cancelled before creating subchannel", &error, 1);
+ /* if due to deadline, attach the deadline exceeded status to the error */
+ if (gpr_time_cmp(calld->deadline, gpr_now(GPR_CLOCK_MONOTONIC)) < 0) {
+ cancellation_error =
+ grpc_error_set_int(cancellation_error, GRPC_ERROR_INT_GRPC_STATUS,
+ GRPC_STATUS_DEADLINE_EXCEEDED);
+ }
+ fail_locked(exec_ctx, calld, cancellation_error);
} else {
/* Create call on subchannel. */
grpc_subchannel_call *subchannel_call = NULL;
grpc_error *new_error = grpc_connected_subchannel_create_call(
exec_ctx, calld->connected_subchannel, calld->pollent, calld->path,
- calld->deadline, &subchannel_call);
+ calld->call_start_time, calld->deadline, &subchannel_call);
if (new_error != GRPC_ERROR_NONE) {
new_error = grpc_error_add_child(new_error, error);
subchannel_call = CANCELLED_CALL;
@@ -735,7 +831,6 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
}
}
- // TODO(dgq): make this deadline configurable somehow.
const grpc_lb_policy_pick_args inputs = {
initial_metadata, initial_metadata_flags, &calld->lb_token_mdelem,
gpr_inf_future(GPR_CLOCK_MONOTONIC)};
@@ -870,7 +965,7 @@ retry:
grpc_subchannel_call *subchannel_call = NULL;
grpc_error *error = grpc_connected_subchannel_create_call(
exec_ctx, calld->connected_subchannel, calld->pollent, calld->path,
- calld->deadline, &subchannel_call);
+ calld->call_start_time, calld->deadline, &subchannel_call);
if (error != GRPC_ERROR_NONE) {
subchannel_call = CANCELLED_CALL;
fail_locked(exec_ctx, calld, GRPC_ERROR_REF(error));
@@ -1022,6 +1117,10 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING);
gpr_mu_destroy(&calld->mu);
GPR_ASSERT(calld->waiting_ops_count == 0);
+ if (calld->connected_subchannel != NULL) {
+ GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, calld->connected_subchannel,
+ "picked");
+ }
gpr_free(calld->waiting_ops);
gpr_free(and_free_memory);
}
@@ -1048,33 +1147,10 @@ const grpc_channel_filter grpc_client_channel_filter = {
cc_init_channel_elem,
cc_destroy_channel_elem,
cc_get_peer,
+ cc_get_channel_info,
"client-channel",
};
-void grpc_client_channel_finish_initialization(
- grpc_exec_ctx *exec_ctx, grpc_channel_stack *channel_stack,
- grpc_resolver *resolver,
- grpc_client_channel_factory *client_channel_factory) {
- /* post construction initialization: set the transport setup pointer */
- GPR_ASSERT(client_channel_factory != NULL);
- grpc_channel_element *elem = grpc_channel_stack_last_element(channel_stack);
- channel_data *chand = elem->channel_data;
- gpr_mu_lock(&chand->mu);
- GPR_ASSERT(!chand->resolver);
- chand->resolver = resolver;
- GRPC_RESOLVER_REF(resolver, "channel");
- if (!grpc_closure_list_empty(chand->waiting_for_config_closures) ||
- chand->exit_idle_when_lb_policy_arrives) {
- chand->started_resolving = true;
- GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
- grpc_resolver_next(exec_ctx, resolver, &chand->resolver_result,
- &chand->on_resolver_result_changed);
- }
- chand->client_channel_factory = client_channel_factory;
- grpc_client_channel_factory_ref(client_channel_factory);
- gpr_mu_unlock(&chand->mu);
-}
-
grpc_connectivity_state grpc_client_channel_check_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect) {
channel_data *chand = elem->channel_data;
diff --git a/src/core/ext/client_channel/client_channel.h b/src/core/ext/client_channel/client_channel.h
index ab5a84fdfb..f02587d0c1 100644
--- a/src/core/ext/client_channel/client_channel.h
+++ b/src/core/ext/client_channel/client_channel.h
@@ -38,6 +38,9 @@
#include "src/core/ext/client_channel/resolver.h"
#include "src/core/lib/channel/channel_stack.h"
+// Channel arg key for server URI string.
+#define GRPC_ARG_SERVER_URI "grpc.server_uri"
+
/* A client channel is a channel that begins disconnected, and can connect
to some endpoint on demand. If that endpoint disconnects, it will be
connected to again later.
@@ -47,13 +50,6 @@
extern const grpc_channel_filter grpc_client_channel_filter;
-/* Post-construction initializer to give the client channel its resolver
- and factory. */
-void grpc_client_channel_finish_initialization(
- grpc_exec_ctx *exec_ctx, grpc_channel_stack *channel_stack,
- grpc_resolver *resolver,
- grpc_client_channel_factory *client_channel_factory);
-
grpc_connectivity_state grpc_client_channel_check_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect);
diff --git a/src/core/ext/client_channel/client_channel_factory.c b/src/core/ext/client_channel/client_channel_factory.c
index 4900832d57..4eb35dfcf7 100644
--- a/src/core/ext/client_channel/client_channel_factory.c
+++ b/src/core/ext/client_channel/client_channel_factory.c
@@ -55,3 +55,35 @@ grpc_channel* grpc_client_channel_factory_create_channel(
return factory->vtable->create_client_channel(exec_ctx, factory, target, type,
args);
}
+
+static void* factory_arg_copy(void* factory) {
+ grpc_client_channel_factory_ref(factory);
+ return factory;
+}
+
+static void factory_arg_destroy(void* factory) {
+ // TODO(roth): Remove local exec_ctx when
+ // https://github.com/grpc/grpc/pull/8705 is merged.
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_client_channel_factory_unref(&exec_ctx, factory);
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+static int factory_arg_cmp(void* factory1, void* factory2) {
+ if (factory1 < factory2) return -1;
+ if (factory1 > factory2) return 1;
+ return 0;
+}
+
+static const grpc_arg_pointer_vtable factory_arg_vtable = {
+ factory_arg_copy, factory_arg_destroy, factory_arg_cmp};
+
+grpc_arg grpc_client_channel_factory_create_channel_arg(
+ grpc_client_channel_factory* factory) {
+ grpc_arg arg;
+ arg.type = GRPC_ARG_POINTER;
+ arg.key = GRPC_ARG_CLIENT_CHANNEL_FACTORY;
+ arg.value.pointer.p = factory;
+ arg.value.pointer.vtable = &factory_arg_vtable;
+ return arg;
+}
diff --git a/src/core/ext/client_channel/client_channel_factory.h b/src/core/ext/client_channel/client_channel_factory.h
index 2b8fc577b3..bf2764b537 100644
--- a/src/core/ext/client_channel/client_channel_factory.h
+++ b/src/core/ext/client_channel/client_channel_factory.h
@@ -39,6 +39,9 @@
#include "src/core/ext/client_channel/subchannel.h"
#include "src/core/lib/channel/channel_stack.h"
+// Channel arg key for client channel factory.
+#define GRPC_ARG_CLIENT_CHANNEL_FACTORY "grpc.client_channel_factory"
+
typedef struct grpc_client_channel_factory grpc_client_channel_factory;
typedef struct grpc_client_channel_factory_vtable
grpc_client_channel_factory_vtable;
@@ -83,4 +86,7 @@ grpc_channel *grpc_client_channel_factory_create_channel(
const char *target, grpc_client_channel_type type,
const grpc_channel_args *args);
+grpc_arg grpc_client_channel_factory_create_channel_arg(
+ grpc_client_channel_factory *factory);
+
#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_CLIENT_CHANNEL_FACTORY_H */
diff --git a/src/core/ext/client_channel/connector.h b/src/core/ext/client_channel/connector.h
index ed7d5450de..3de061620e 100644
--- a/src/core/ext/client_channel/connector.h
+++ b/src/core/ext/client_channel/connector.h
@@ -52,7 +52,7 @@ typedef struct {
const grpc_resolved_address *addr;
size_t addr_len;
/** initial connect string to send */
- gpr_slice initial_connect_string;
+ grpc_slice initial_connect_string;
/** deadline for connection */
gpr_timespec deadline;
/** channel arguments (to be passed to transport) */
diff --git a/src/core/ext/client_channel/default_initial_connect_string.c b/src/core/ext/client_channel/default_initial_connect_string.c
index 0b251372fd..6db82d84ef 100644
--- a/src/core/ext/client_channel/default_initial_connect_string.c
+++ b/src/core/ext/client_channel/default_initial_connect_string.c
@@ -31,8 +31,8 @@
*
*/
-#include <grpc/support/slice.h>
+#include <grpc/slice.h>
#include "src/core/lib/iomgr/resolve_address.h"
void grpc_set_default_initial_connect_string(grpc_resolved_address **addr,
- gpr_slice *initial_str) {}
+ grpc_slice *initial_str) {}
diff --git a/src/core/ext/client_channel/http_connect_handshaker.c b/src/core/ext/client_channel/http_connect_handshaker.c
index ea2cbbdd97..76c78ee853 100644
--- a/src/core/ext/client_channel/http_connect_handshaker.c
+++ b/src/core/ext/client_channel/http_connect_handshaker.c
@@ -35,15 +35,17 @@
#include <string.h>
+#include <grpc/slice_buffer.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/slice_buffer.h>
#include <grpc/support/string_util.h>
+#include "src/core/ext/client_channel/client_channel.h"
+#include "src/core/ext/client_channel/resolver_registry.h"
#include "src/core/ext/client_channel/uri_parser.h"
+#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/http/format_request.h"
#include "src/core/lib/http/parser.h"
-#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/support/env.h"
typedef struct http_connect_handshaker {
@@ -51,60 +53,105 @@ typedef struct http_connect_handshaker {
grpc_handshaker base;
char* proxy_server;
- char* server_name;
+
+ gpr_refcount refcount;
+ gpr_mu mu;
+
+ bool shutdown;
+ // Endpoint and read buffer to destroy after a shutdown.
+ grpc_endpoint* endpoint_to_destroy;
+ grpc_slice_buffer* read_buffer_to_destroy;
// State saved while performing the handshake.
- grpc_endpoint* endpoint;
- grpc_channel_args* args;
- grpc_handshaker_done_cb cb;
- void* user_data;
+ grpc_handshaker_args* args;
+ grpc_closure* on_handshake_done;
// Objects for processing the HTTP CONNECT request and response.
- gpr_slice_buffer write_buffer;
- gpr_slice_buffer* read_buffer; // Ownership passes through this object.
+ grpc_slice_buffer write_buffer;
grpc_closure request_done_closure;
grpc_closure response_read_closure;
grpc_http_parser http_parser;
grpc_http_response http_response;
- grpc_timer timeout_timer;
-
- gpr_refcount refcount;
} http_connect_handshaker;
// Unref and clean up handshaker.
-static void http_connect_handshaker_unref(http_connect_handshaker* handshaker) {
+static void http_connect_handshaker_unref(grpc_exec_ctx* exec_ctx,
+ http_connect_handshaker* handshaker) {
if (gpr_unref(&handshaker->refcount)) {
+ gpr_mu_destroy(&handshaker->mu);
+ if (handshaker->endpoint_to_destroy != NULL) {
+ grpc_endpoint_destroy(exec_ctx, handshaker->endpoint_to_destroy);
+ }
+ if (handshaker->read_buffer_to_destroy != NULL) {
+ grpc_slice_buffer_destroy(handshaker->read_buffer_to_destroy);
+ gpr_free(handshaker->read_buffer_to_destroy);
+ }
gpr_free(handshaker->proxy_server);
- gpr_free(handshaker->server_name);
- gpr_slice_buffer_destroy(&handshaker->write_buffer);
+ grpc_slice_buffer_destroy(&handshaker->write_buffer);
grpc_http_parser_destroy(&handshaker->http_parser);
grpc_http_response_destroy(&handshaker->http_response);
gpr_free(handshaker);
}
}
-// Callback invoked when deadline is exceeded.
-static void on_timeout(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
- http_connect_handshaker* handshaker = arg;
- if (error == GRPC_ERROR_NONE) { // Timer fired, rather than being cancelled.
- grpc_endpoint_shutdown(exec_ctx, handshaker->endpoint);
+// Set args fields to NULL, saving the endpoint and read buffer for
+// later destruction.
+static void cleanup_args_for_failure_locked(
+ http_connect_handshaker* handshaker) {
+ handshaker->endpoint_to_destroy = handshaker->args->endpoint;
+ handshaker->args->endpoint = NULL;
+ handshaker->read_buffer_to_destroy = handshaker->args->read_buffer;
+ handshaker->args->read_buffer = NULL;
+ grpc_channel_args_destroy(handshaker->args->args);
+ handshaker->args->args = NULL;
+}
+
+// If the handshake failed or we're shutting down, clean up and invoke the
+// callback with the error.
+static void handshake_failed_locked(grpc_exec_ctx* exec_ctx,
+ http_connect_handshaker* handshaker,
+ grpc_error* error) {
+ if (error == GRPC_ERROR_NONE) {
+ // If we were shut down after an endpoint operation succeeded but
+ // before the endpoint callback was invoked, we need to generate our
+ // own error.
+ error = GRPC_ERROR_CREATE("Handshaker shutdown");
+ }
+ if (!handshaker->shutdown) {
+ // TODO(ctiller): It is currently necessary to shutdown endpoints
+ // before destroying them, even if we know that there are no
+ // pending read/write callbacks. This should be fixed, at which
+ // point this can be removed.
+ grpc_endpoint_shutdown(exec_ctx, handshaker->args->endpoint);
+ // Not shutting down, so the handshake failed. Clean up before
+ // invoking the callback.
+ cleanup_args_for_failure_locked(handshaker);
+ // Set shutdown to true so that subsequent calls to
+ // http_connect_handshaker_shutdown() do nothing.
+ handshaker->shutdown = true;
}
- http_connect_handshaker_unref(handshaker);
+ // Invoke callback.
+ grpc_exec_ctx_sched(exec_ctx, handshaker->on_handshake_done, error, NULL);
}
// Callback invoked when finished writing HTTP CONNECT request.
static void on_write_done(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
http_connect_handshaker* handshaker = arg;
- if (error != GRPC_ERROR_NONE) {
- // If the write failed, invoke the callback immediately with the error.
- handshaker->cb(exec_ctx, handshaker->endpoint, handshaker->args,
- handshaker->read_buffer, handshaker->user_data,
- GRPC_ERROR_REF(error));
+ gpr_mu_lock(&handshaker->mu);
+ if (error != GRPC_ERROR_NONE || handshaker->shutdown) {
+ // If the write failed or we're shutting down, clean up and invoke the
+ // callback with the error.
+ handshake_failed_locked(exec_ctx, handshaker, GRPC_ERROR_REF(error));
+ gpr_mu_unlock(&handshaker->mu);
+ http_connect_handshaker_unref(exec_ctx, handshaker);
} else {
// Otherwise, read the response.
- grpc_endpoint_read(exec_ctx, handshaker->endpoint, handshaker->read_buffer,
+ // The read callback inherits our ref to the handshaker.
+ grpc_endpoint_read(exec_ctx, handshaker->args->endpoint,
+ handshaker->args->read_buffer,
&handshaker->response_read_closure);
+ gpr_mu_unlock(&handshaker->mu);
}
}
@@ -112,37 +159,41 @@ static void on_write_done(grpc_exec_ctx* exec_ctx, void* arg,
static void on_read_done(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
http_connect_handshaker* handshaker = arg;
- if (error != GRPC_ERROR_NONE) {
- GRPC_ERROR_REF(error); // Take ref to pass to the handshake-done callback.
+ gpr_mu_lock(&handshaker->mu);
+ if (error != GRPC_ERROR_NONE || handshaker->shutdown) {
+ // If the read failed or we're shutting down, clean up and invoke the
+ // callback with the error.
+ handshake_failed_locked(exec_ctx, handshaker, GRPC_ERROR_REF(error));
goto done;
}
// Add buffer to parser.
- for (size_t i = 0; i < handshaker->read_buffer->count; ++i) {
- if (GPR_SLICE_LENGTH(handshaker->read_buffer->slices[i]) > 0) {
+ for (size_t i = 0; i < handshaker->args->read_buffer->count; ++i) {
+ if (GRPC_SLICE_LENGTH(handshaker->args->read_buffer->slices[i]) > 0) {
size_t body_start_offset = 0;
error = grpc_http_parser_parse(&handshaker->http_parser,
- handshaker->read_buffer->slices[i],
+ handshaker->args->read_buffer->slices[i],
&body_start_offset);
- if (error != GRPC_ERROR_NONE) goto done;
+ if (error != GRPC_ERROR_NONE) {
+ handshake_failed_locked(exec_ctx, handshaker, error);
+ goto done;
+ }
if (handshaker->http_parser.state == GRPC_HTTP_BODY) {
- // We've gotten back a successul response, so stop the timeout timer.
- grpc_timer_cancel(exec_ctx, &handshaker->timeout_timer);
// Remove the data we've already read from the read buffer,
// leaving only the leftover bytes (if any).
- gpr_slice_buffer tmp_buffer;
- gpr_slice_buffer_init(&tmp_buffer);
+ grpc_slice_buffer tmp_buffer;
+ grpc_slice_buffer_init(&tmp_buffer);
if (body_start_offset <
- GPR_SLICE_LENGTH(handshaker->read_buffer->slices[i])) {
- gpr_slice_buffer_add(
+ GRPC_SLICE_LENGTH(handshaker->args->read_buffer->slices[i])) {
+ grpc_slice_buffer_add(
&tmp_buffer,
- gpr_slice_split_tail(&handshaker->read_buffer->slices[i],
- body_start_offset));
+ grpc_slice_split_tail(&handshaker->args->read_buffer->slices[i],
+ body_start_offset));
}
- gpr_slice_buffer_addn(&tmp_buffer,
- &handshaker->read_buffer->slices[i + 1],
- handshaker->read_buffer->count - i - 1);
- gpr_slice_buffer_swap(handshaker->read_buffer, &tmp_buffer);
- gpr_slice_buffer_destroy(&tmp_buffer);
+ grpc_slice_buffer_addn(&tmp_buffer,
+ &handshaker->args->read_buffer->slices[i + 1],
+ handshaker->args->read_buffer->count - i - 1);
+ grpc_slice_buffer_swap(handshaker->args->read_buffer, &tmp_buffer);
+ grpc_slice_buffer_destroy(&tmp_buffer);
break;
}
}
@@ -159,9 +210,11 @@ static void on_read_done(grpc_exec_ctx* exec_ctx, void* arg,
// complete (e.g., handling chunked transfer encoding or looking
// at the Content-Length: header).
if (handshaker->http_parser.state != GRPC_HTTP_BODY) {
- gpr_slice_buffer_reset_and_unref(handshaker->read_buffer);
- grpc_endpoint_read(exec_ctx, handshaker->endpoint, handshaker->read_buffer,
+ grpc_slice_buffer_reset_and_unref(handshaker->args->read_buffer);
+ grpc_endpoint_read(exec_ctx, handshaker->args->endpoint,
+ handshaker->args->read_buffer,
&handshaker->response_read_closure);
+ gpr_mu_unlock(&handshaker->mu);
return;
}
// Make sure we got a 2xx response.
@@ -172,11 +225,17 @@ static void on_read_done(grpc_exec_ctx* exec_ctx, void* arg,
handshaker->http_response.status);
error = GRPC_ERROR_CREATE(msg);
gpr_free(msg);
+ handshake_failed_locked(exec_ctx, handshaker, error);
+ goto done;
}
+ // Success. Invoke handshake-done callback.
+ grpc_exec_ctx_sched(exec_ctx, handshaker->on_handshake_done, error, NULL);
done:
- // Invoke handshake-done callback.
- handshaker->cb(exec_ctx, handshaker->endpoint, handshaker->args,
- handshaker->read_buffer, handshaker->user_data, error);
+ // Set shutdown to true so that subsequent calls to
+ // http_connect_handshaker_shutdown() do nothing.
+ handshaker->shutdown = true;
+ gpr_mu_unlock(&handshaker->mu);
+ http_connect_handshaker_unref(exec_ctx, handshaker);
}
//
@@ -186,67 +245,79 @@ done:
static void http_connect_handshaker_destroy(grpc_exec_ctx* exec_ctx,
grpc_handshaker* handshaker_in) {
http_connect_handshaker* handshaker = (http_connect_handshaker*)handshaker_in;
- http_connect_handshaker_unref(handshaker);
+ http_connect_handshaker_unref(exec_ctx, handshaker);
}
static void http_connect_handshaker_shutdown(grpc_exec_ctx* exec_ctx,
- grpc_handshaker* handshaker) {}
+ grpc_handshaker* handshaker_in) {
+ http_connect_handshaker* handshaker = (http_connect_handshaker*)handshaker_in;
+ gpr_mu_lock(&handshaker->mu);
+ if (!handshaker->shutdown) {
+ handshaker->shutdown = true;
+ grpc_endpoint_shutdown(exec_ctx, handshaker->args->endpoint);
+ cleanup_args_for_failure_locked(handshaker);
+ }
+ gpr_mu_unlock(&handshaker->mu);
+}
static void http_connect_handshaker_do_handshake(
grpc_exec_ctx* exec_ctx, grpc_handshaker* handshaker_in,
- grpc_endpoint* endpoint, grpc_channel_args* args,
- gpr_slice_buffer* read_buffer, gpr_timespec deadline,
- grpc_tcp_server_acceptor* acceptor, grpc_handshaker_done_cb cb,
- void* user_data) {
+ grpc_tcp_server_acceptor* acceptor, grpc_closure* on_handshake_done,
+ grpc_handshaker_args* args) {
http_connect_handshaker* handshaker = (http_connect_handshaker*)handshaker_in;
+ // Get server name from channel args.
+ const grpc_arg* arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
+ GPR_ASSERT(arg != NULL);
+ GPR_ASSERT(arg->type == GRPC_ARG_STRING);
+ char* canonical_uri =
+ grpc_resolver_factory_add_default_prefix_if_needed(arg->value.string);
+ grpc_uri* uri = grpc_uri_parse(canonical_uri, 1);
+ char* server_name = uri->path;
+ if (server_name[0] == '/') ++server_name;
// Save state in the handshaker object.
- handshaker->endpoint = endpoint;
+ gpr_mu_lock(&handshaker->mu);
handshaker->args = args;
- handshaker->cb = cb;
- handshaker->user_data = user_data;
- handshaker->read_buffer = read_buffer;
+ handshaker->on_handshake_done = on_handshake_done;
// Send HTTP CONNECT request.
- gpr_log(GPR_INFO, "Connecting to server %s via HTTP proxy %s",
- handshaker->server_name, handshaker->proxy_server);
+ gpr_log(GPR_INFO, "Connecting to server %s via HTTP proxy %s", server_name,
+ handshaker->proxy_server);
grpc_httpcli_request request;
memset(&request, 0, sizeof(request));
- request.host = handshaker->proxy_server;
+ request.host = server_name;
request.http.method = "CONNECT";
- request.http.path = handshaker->server_name;
+ request.http.path = server_name;
request.handshaker = &grpc_httpcli_plaintext;
- gpr_slice request_slice = grpc_httpcli_format_connect_request(&request);
- gpr_slice_buffer_add(&handshaker->write_buffer, request_slice);
- grpc_endpoint_write(exec_ctx, endpoint, &handshaker->write_buffer,
- &handshaker->request_done_closure);
- // Set timeout timer. The timer gets a reference to the handshaker.
+ grpc_slice request_slice = grpc_httpcli_format_connect_request(&request);
+ grpc_slice_buffer_add(&handshaker->write_buffer, request_slice);
+ // Take a new ref to be held by the write callback.
gpr_ref(&handshaker->refcount);
- grpc_timer_init(exec_ctx, &handshaker->timeout_timer,
- gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
- on_timeout, handshaker, gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_endpoint_write(exec_ctx, args->endpoint, &handshaker->write_buffer,
+ &handshaker->request_done_closure);
+ gpr_mu_unlock(&handshaker->mu);
+ // Clean up.
+ gpr_free(canonical_uri);
+ grpc_uri_destroy(uri);
}
-static const struct grpc_handshaker_vtable http_connect_handshaker_vtable = {
+static const grpc_handshaker_vtable http_connect_handshaker_vtable = {
http_connect_handshaker_destroy, http_connect_handshaker_shutdown,
http_connect_handshaker_do_handshake};
-grpc_handshaker* grpc_http_connect_handshaker_create(const char* proxy_server,
- const char* server_name) {
+grpc_handshaker* grpc_http_connect_handshaker_create(const char* proxy_server) {
GPR_ASSERT(proxy_server != NULL);
- GPR_ASSERT(server_name != NULL);
- http_connect_handshaker* handshaker =
- gpr_malloc(sizeof(http_connect_handshaker));
+ http_connect_handshaker* handshaker = gpr_malloc(sizeof(*handshaker));
memset(handshaker, 0, sizeof(*handshaker));
grpc_handshaker_init(&http_connect_handshaker_vtable, &handshaker->base);
+ gpr_mu_init(&handshaker->mu);
+ gpr_ref_init(&handshaker->refcount, 1);
handshaker->proxy_server = gpr_strdup(proxy_server);
- handshaker->server_name = gpr_strdup(server_name);
- gpr_slice_buffer_init(&handshaker->write_buffer);
+ grpc_slice_buffer_init(&handshaker->write_buffer);
grpc_closure_init(&handshaker->request_done_closure, on_write_done,
handshaker);
grpc_closure_init(&handshaker->response_read_closure, on_read_done,
handshaker);
grpc_http_parser_init(&handshaker->http_parser, GRPC_HTTP_RESPONSE,
&handshaker->http_response);
- gpr_ref_init(&handshaker->refcount, 1);
return &handshaker->base;
}
diff --git a/src/core/ext/client_channel/http_connect_handshaker.h b/src/core/ext/client_channel/http_connect_handshaker.h
index c689df2b2b..ea293852e6 100644
--- a/src/core/ext/client_channel/http_connect_handshaker.h
+++ b/src/core/ext/client_channel/http_connect_handshaker.h
@@ -36,9 +36,8 @@
#include "src/core/lib/channel/handshaker.h"
-/// Does NOT take ownership of \a proxy_server or \a server_name.
-grpc_handshaker* grpc_http_connect_handshaker_create(const char* proxy_server,
- const char* server_name);
+/// Does NOT take ownership of \a proxy_server.
+grpc_handshaker* grpc_http_connect_handshaker_create(const char* proxy_server);
/// Returns the name of the proxy to use, or NULL if no proxy is configured.
/// Caller takes ownership of result.
diff --git a/src/core/ext/client_channel/initial_connect_string.c b/src/core/ext/client_channel/initial_connect_string.c
index fb1493d77d..8ebd06c458 100644
--- a/src/core/ext/client_channel/initial_connect_string.c
+++ b/src/core/ext/client_channel/initial_connect_string.c
@@ -36,7 +36,7 @@
#include <stddef.h>
extern void grpc_set_default_initial_connect_string(
- grpc_resolved_address **addr, gpr_slice *initial_str);
+ grpc_resolved_address **addr, grpc_slice *initial_str);
static grpc_set_initial_connect_string_func g_set_initial_connect_string_func =
grpc_set_default_initial_connect_string;
@@ -47,6 +47,6 @@ void grpc_test_set_initial_connect_string_function(
}
void grpc_set_initial_connect_string(grpc_resolved_address **addr,
- gpr_slice *initial_str) {
+ grpc_slice *initial_str) {
g_set_initial_connect_string_func(addr, initial_str);
}
diff --git a/src/core/ext/client_channel/initial_connect_string.h b/src/core/ext/client_channel/initial_connect_string.h
index 68adb0373c..876abea40e 100644
--- a/src/core/ext/client_channel/initial_connect_string.h
+++ b/src/core/ext/client_channel/initial_connect_string.h
@@ -34,17 +34,17 @@
#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_INITIAL_CONNECT_STRING_H
#define GRPC_CORE_EXT_CLIENT_CHANNEL_INITIAL_CONNECT_STRING_H
-#include <grpc/support/slice.h>
-
+#include <grpc/slice.h>
#include "src/core/lib/iomgr/resolve_address.h"
typedef void (*grpc_set_initial_connect_string_func)(
- grpc_resolved_address **addr, gpr_slice *initial_str);
+ grpc_resolved_address **addr, grpc_slice *initial_str);
+
void grpc_test_set_initial_connect_string_function(
grpc_set_initial_connect_string_func func);
/** Set a string to be sent once connected. Optionally reset addr. */
void grpc_set_initial_connect_string(grpc_resolved_address **addr,
- gpr_slice *connect_string);
+ grpc_slice *connect_string);
#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_INITIAL_CONNECT_STRING_H */
diff --git a/src/core/ext/client_channel/lb_policy.h b/src/core/ext/client_channel/lb_policy.h
index 54ad779792..120c641edc 100644
--- a/src/core/ext/client_channel/lb_policy.h
+++ b/src/core/ext/client_channel/lb_policy.h
@@ -109,10 +109,16 @@ struct grpc_lb_policy_vtable {
/*#define GRPC_LB_POLICY_REFCOUNT_DEBUG*/
#ifdef GRPC_LB_POLICY_REFCOUNT_DEBUG
+
+/* Strong references: the policy will shutdown when they reach zero */
#define GRPC_LB_POLICY_REF(p, r) \
grpc_lb_policy_ref((p), __FILE__, __LINE__, (r))
#define GRPC_LB_POLICY_UNREF(exec_ctx, p, r) \
grpc_lb_policy_unref((exec_ctx), (p), __FILE__, __LINE__, (r))
+
+/* Weak references: they don't prevent the shutdown of the LB policy. When no
+ * strong references are left but there are still weak ones, shutdown is called.
+ * Once the weak reference also reaches zero, the LB policy is destroyed. */
#define GRPC_LB_POLICY_WEAK_REF(p, r) \
grpc_lb_policy_weak_ref((p), __FILE__, __LINE__, (r))
#define GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, p, r) \
diff --git a/src/core/ext/client_channel/lb_policy_factory.h b/src/core/ext/client_channel/lb_policy_factory.h
index e2b8080a32..79b3dee259 100644
--- a/src/core/ext/client_channel/lb_policy_factory.h
+++ b/src/core/ext/client_channel/lb_policy_factory.h
@@ -40,6 +40,9 @@
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/resolve_address.h"
+// Channel arg key for grpc_lb_addresses.
+#define GRPC_ARG_LB_ADDRESSES "grpc.lb_addresses"
+
typedef struct grpc_lb_policy_factory grpc_lb_policy_factory;
typedef struct grpc_lb_policy_factory_vtable grpc_lb_policy_factory_vtable;
diff --git a/src/core/ext/client_channel/lb_policy_registry.c b/src/core/ext/client_channel/lb_policy_registry.c
index f46a721f9d..90c149d947 100644
--- a/src/core/ext/client_channel/lb_policy_registry.c
+++ b/src/core/ext/client_channel/lb_policy_registry.c
@@ -35,6 +35,8 @@
#include <string.h>
+#include "src/core/lib/support/string.h"
+
#define MAX_POLICIES 10
static grpc_lb_policy_factory *g_all_of_the_lb_policies[MAX_POLICIES];
@@ -52,8 +54,8 @@ void grpc_lb_policy_registry_shutdown(void) {
void grpc_register_lb_policy(grpc_lb_policy_factory *factory) {
int i;
for (i = 0; i < g_number_of_lb_policies; i++) {
- GPR_ASSERT(0 != strcmp(factory->vtable->name,
- g_all_of_the_lb_policies[i]->vtable->name));
+ GPR_ASSERT(0 != gpr_stricmp(factory->vtable->name,
+ g_all_of_the_lb_policies[i]->vtable->name));
}
GPR_ASSERT(g_number_of_lb_policies != MAX_POLICIES);
grpc_lb_policy_factory_ref(factory);
@@ -66,7 +68,7 @@ static grpc_lb_policy_factory *lookup_factory(const char *name) {
if (name == NULL) return NULL;
for (i = 0; i < g_number_of_lb_policies; i++) {
- if (0 == strcmp(name, g_all_of_the_lb_policies[i]->vtable->name)) {
+ if (0 == gpr_stricmp(name, g_all_of_the_lb_policies[i]->vtable->name)) {
return g_all_of_the_lb_policies[i];
}
}
diff --git a/src/core/ext/client_channel/resolver_factory.c b/src/core/ext/client_channel/resolver_factory.c
index 7c3d644257..00bbb92dd0 100644
--- a/src/core/ext/client_channel/resolver_factory.c
+++ b/src/core/ext/client_channel/resolver_factory.c
@@ -43,9 +43,10 @@ void grpc_resolver_factory_unref(grpc_resolver_factory* factory) {
/** Create a resolver instance for a name */
grpc_resolver* grpc_resolver_factory_create_resolver(
- grpc_resolver_factory* factory, grpc_resolver_args* args) {
+ grpc_exec_ctx* exec_ctx, grpc_resolver_factory* factory,
+ grpc_resolver_args* args) {
if (factory == NULL) return NULL;
- return factory->vtable->create_resolver(factory, args);
+ return factory->vtable->create_resolver(exec_ctx, factory, args);
}
char* grpc_resolver_factory_get_default_authority(
diff --git a/src/core/ext/client_channel/resolver_factory.h b/src/core/ext/client_channel/resolver_factory.h
index 4da42e84d2..3792ddca18 100644
--- a/src/core/ext/client_channel/resolver_factory.h
+++ b/src/core/ext/client_channel/resolver_factory.h
@@ -37,6 +37,7 @@
#include "src/core/ext/client_channel/client_channel_factory.h"
#include "src/core/ext/client_channel/resolver.h"
#include "src/core/ext/client_channel/uri_parser.h"
+#include "src/core/lib/iomgr/pollset_set.h"
typedef struct grpc_resolver_factory grpc_resolver_factory;
typedef struct grpc_resolver_factory_vtable grpc_resolver_factory_vtable;
@@ -48,6 +49,7 @@ struct grpc_resolver_factory {
typedef struct grpc_resolver_args {
grpc_uri *uri;
const grpc_channel_args *args;
+ grpc_pollset_set *pollset_set;
} grpc_resolver_args;
struct grpc_resolver_factory_vtable {
@@ -55,7 +57,8 @@ struct grpc_resolver_factory_vtable {
void (*unref)(grpc_resolver_factory *factory);
/** Implementation of grpc_resolver_factory_create_resolver */
- grpc_resolver *(*create_resolver)(grpc_resolver_factory *factory,
+ grpc_resolver *(*create_resolver)(grpc_exec_ctx *exec_ctx,
+ grpc_resolver_factory *factory,
grpc_resolver_args *args);
/** Implementation of grpc_resolver_factory_get_default_authority */
@@ -70,7 +73,8 @@ void grpc_resolver_factory_unref(grpc_resolver_factory *resolver);
/** Create a resolver instance for a name */
grpc_resolver *grpc_resolver_factory_create_resolver(
- grpc_resolver_factory *factory, grpc_resolver_args *args);
+ grpc_exec_ctx *exec_ctx, grpc_resolver_factory *factory,
+ grpc_resolver_args *args);
/** Return a (freshly allocated with gpr_malloc) string representing
the default authority to use for this scheme. */
diff --git a/src/core/ext/client_channel/resolver_registry.c b/src/core/ext/client_channel/resolver_registry.c
index d0f0fc3f33..5110a7cad9 100644
--- a/src/core/ext/client_channel/resolver_registry.c
+++ b/src/core/ext/client_channel/resolver_registry.c
@@ -109,8 +109,8 @@ static grpc_resolver_factory *lookup_factory_by_uri(grpc_uri *uri) {
}
static grpc_resolver_factory *resolve_factory(const char *target,
- grpc_uri **uri) {
- char *tmp;
+ grpc_uri **uri,
+ char **canonical_target) {
grpc_resolver_factory *factory = NULL;
GPR_ASSERT(uri != NULL);
@@ -118,37 +118,54 @@ static grpc_resolver_factory *resolve_factory(const char *target,
factory = lookup_factory_by_uri(*uri);
if (factory == NULL) {
grpc_uri_destroy(*uri);
- gpr_asprintf(&tmp, "%s%s", g_default_resolver_prefix, target);
- *uri = grpc_uri_parse(tmp, 1);
+ gpr_asprintf(canonical_target, "%s%s", g_default_resolver_prefix, target);
+ *uri = grpc_uri_parse(*canonical_target, 1);
factory = lookup_factory_by_uri(*uri);
if (factory == NULL) {
grpc_uri_destroy(grpc_uri_parse(target, 0));
- grpc_uri_destroy(grpc_uri_parse(tmp, 0));
- gpr_log(GPR_ERROR, "don't know how to resolve '%s' or '%s'", target, tmp);
+ grpc_uri_destroy(grpc_uri_parse(*canonical_target, 0));
+ gpr_log(GPR_ERROR, "don't know how to resolve '%s' or '%s'", target,
+ *canonical_target);
}
- gpr_free(tmp);
}
return factory;
}
-grpc_resolver *grpc_resolver_create(const char *target,
- const grpc_channel_args *args) {
+grpc_resolver *grpc_resolver_create(grpc_exec_ctx *exec_ctx, const char *target,
+ const grpc_channel_args *args,
+ grpc_pollset_set *pollset_set) {
grpc_uri *uri = NULL;
- grpc_resolver_factory *factory = resolve_factory(target, &uri);
+ char *canonical_target = NULL;
+ grpc_resolver_factory *factory =
+ resolve_factory(target, &uri, &canonical_target);
grpc_resolver *resolver;
grpc_resolver_args resolver_args;
memset(&resolver_args, 0, sizeof(resolver_args));
resolver_args.uri = uri;
resolver_args.args = args;
- resolver = grpc_resolver_factory_create_resolver(factory, &resolver_args);
+ resolver_args.pollset_set = pollset_set;
+ resolver =
+ grpc_resolver_factory_create_resolver(exec_ctx, factory, &resolver_args);
grpc_uri_destroy(uri);
+ gpr_free(canonical_target);
return resolver;
}
char *grpc_get_default_authority(const char *target) {
grpc_uri *uri = NULL;
- grpc_resolver_factory *factory = resolve_factory(target, &uri);
+ char *canonical_target = NULL;
+ grpc_resolver_factory *factory =
+ resolve_factory(target, &uri, &canonical_target);
char *authority = grpc_resolver_factory_get_default_authority(factory, uri);
grpc_uri_destroy(uri);
+ gpr_free(canonical_target);
return authority;
}
+
+char *grpc_resolver_factory_add_default_prefix_if_needed(const char *target) {
+ grpc_uri *uri = NULL;
+ char *canonical_target = NULL;
+ resolve_factory(target, &uri, &canonical_target);
+ grpc_uri_destroy(uri);
+ return canonical_target == NULL ? gpr_strdup(target) : canonical_target;
+}
diff --git a/src/core/ext/client_channel/resolver_registry.h b/src/core/ext/client_channel/resolver_registry.h
index 2a95a669f0..4fb16131db 100644
--- a/src/core/ext/client_channel/resolver_registry.h
+++ b/src/core/ext/client_channel/resolver_registry.h
@@ -35,6 +35,7 @@
#define GRPC_CORE_EXT_CLIENT_CHANNEL_RESOLVER_REGISTRY_H
#include "src/core/ext/client_channel/resolver_factory.h"
+#include "src/core/lib/iomgr/pollset_set.h"
void grpc_resolver_registry_init();
void grpc_resolver_registry_shutdown(void);
@@ -60,8 +61,9 @@ void grpc_register_resolver_type(grpc_resolver_factory *factory);
If a resolver factory was not found, return NULL.
\a args is a set of channel arguments to be included in the result
(typically the set of arguments passed in from the client API). */
-grpc_resolver *grpc_resolver_create(const char *target,
- const grpc_channel_args *args);
+grpc_resolver *grpc_resolver_create(grpc_exec_ctx *exec_ctx, const char *target,
+ const grpc_channel_args *args,
+ grpc_pollset_set *pollset_set);
/** Find a resolver factory given a name and return an (owned-by-the-caller)
* reference to it */
@@ -71,4 +73,8 @@ grpc_resolver_factory *grpc_resolver_factory_lookup(const char *name);
representing the default authority to pass from a client. */
char *grpc_get_default_authority(const char *target);
+/** Returns a newly allocated string containing \a target, adding the
+ default prefix if needed. */
+char *grpc_resolver_factory_add_default_prefix_if_needed(const char *target);
+
#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_RESOLVER_REGISTRY_H */
diff --git a/src/core/ext/client_channel/subchannel.c b/src/core/ext/client_channel/subchannel.c
index ab6aede23a..f294e69392 100644
--- a/src/core/ext/client_channel/subchannel.c
+++ b/src/core/ext/client_channel/subchannel.c
@@ -100,7 +100,7 @@ struct grpc_subchannel {
grpc_subchannel_key *key;
/** initial string to send to peer */
- gpr_slice initial_connect_string;
+ grpc_slice initial_connect_string;
/** set during connection */
grpc_connect_out_args connecting_result;
@@ -119,9 +119,9 @@ struct grpc_subchannel {
gpr_mu mu;
/** have we seen a disconnection? */
- int disconnected;
+ bool disconnected;
/** are we connecting */
- int connecting;
+ bool connecting;
/** connectivity state tracking */
grpc_connectivity_state_tracker state_tracker;
@@ -132,7 +132,9 @@ struct grpc_subchannel {
/** backoff state */
gpr_backoff backoff_state;
/** do we have an active alarm? */
- int have_alarm;
+ bool have_alarm;
+ /** have we started the backoff loop */
+ bool backoff_begun;
/** our alarm */
grpc_timer alarm;
};
@@ -183,9 +185,10 @@ static void connection_destroy(grpc_exec_ctx *exec_ctx, void *arg,
gpr_free(c);
}
-void grpc_connected_subchannel_ref(
+grpc_connected_subchannel *grpc_connected_subchannel_ref(
grpc_connected_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
GRPC_CHANNEL_STACK_REF(CHANNEL_STACK_FROM_CONNECTION(c), REF_REASON);
+ return c;
}
void grpc_connected_subchannel_unref(grpc_exec_ctx *exec_ctx,
@@ -205,7 +208,7 @@ static void subchannel_destroy(grpc_exec_ctx *exec_ctx, void *arg,
gpr_free((void *)c->filters);
grpc_channel_args_destroy(c->args);
gpr_free(c->addr);
- gpr_slice_unref(c->initial_connect_string);
+ grpc_slice_unref(c->initial_connect_string);
grpc_connectivity_state_destroy(exec_ctx, &c->state_tracker);
grpc_connector_unref(exec_ctx, c->connector);
grpc_pollset_set_destroy(c->pollset_set);
@@ -263,7 +266,7 @@ static void disconnect(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
grpc_subchannel_index_unregister(exec_ctx, c->key, c);
gpr_mu_lock(&c->mu);
GPR_ASSERT(!c->disconnected);
- c->disconnected = 1;
+ c->disconnected = true;
grpc_connector_shutdown(exec_ctx, c->connector);
con = GET_CONNECTED_SUBCHANNEL(c, no_barrier);
if (con != NULL) {
@@ -333,16 +336,18 @@ grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
int initial_backoff_ms =
GRPC_SUBCHANNEL_INITIAL_CONNECT_BACKOFF_SECONDS * 1000;
int max_backoff_ms = GRPC_SUBCHANNEL_RECONNECT_MAX_BACKOFF_SECONDS * 1000;
+ int min_backoff_ms = GRPC_SUBCHANNEL_MIN_CONNECT_TIMEOUT_SECONDS * 1000;
bool fixed_reconnect_backoff = false;
if (c->args) {
for (size_t i = 0; i < c->args->num_args; i++) {
if (0 == strcmp(c->args->args[i].key,
- "grpc.testing.fixed_reconnect_backoff")) {
+ "grpc.testing.fixed_reconnect_backoff_ms")) {
GPR_ASSERT(c->args->args[i].type == GRPC_ARG_INTEGER);
fixed_reconnect_backoff = true;
- initial_backoff_ms = max_backoff_ms = grpc_channel_arg_get_integer(
- &c->args->args[i],
- (grpc_integer_options){initial_backoff_ms, 100, INT_MAX});
+ initial_backoff_ms = min_backoff_ms = max_backoff_ms =
+ grpc_channel_arg_get_integer(
+ &c->args->args[i],
+ (grpc_integer_options){initial_backoff_ms, 100, INT_MAX});
} else if (0 == strcmp(c->args->args[i].key,
GRPC_ARG_MAX_RECONNECT_BACKOFF_MS)) {
fixed_reconnect_backoff = false;
@@ -359,17 +364,18 @@ grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
}
}
gpr_backoff_init(
- &c->backoff_state,
+ &c->backoff_state, initial_backoff_ms,
fixed_reconnect_backoff ? 1.0
: GRPC_SUBCHANNEL_RECONNECT_BACKOFF_MULTIPLIER,
fixed_reconnect_backoff ? 0.0 : GRPC_SUBCHANNEL_RECONNECT_JITTER,
- initial_backoff_ms, max_backoff_ms);
+ min_backoff_ms, max_backoff_ms);
gpr_mu_init(&c->mu);
return grpc_subchannel_index_register(exec_ctx, key, c);
}
-static void continue_connect(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
+static void continue_connect_locked(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel *c) {
grpc_connect_in_args args;
args.interested_parties = c->pollset_set;
@@ -385,12 +391,6 @@ static void continue_connect(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
&c->connected);
}
-static void start_connect(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
- c->next_attempt =
- gpr_backoff_begin(&c->backoff_state, gpr_now(GPR_CLOCK_MONOTONIC));
- continue_connect(exec_ctx, c);
-}
-
grpc_connectivity_state grpc_subchannel_check_connectivity(grpc_subchannel *c,
grpc_error **error) {
grpc_connectivity_state state;
@@ -417,6 +417,73 @@ static void on_external_state_watcher_done(grpc_exec_ctx *exec_ctx, void *arg,
follow_up->cb(exec_ctx, follow_up->cb_arg, error);
}
+static void on_alarm(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
+ grpc_subchannel *c = arg;
+ gpr_mu_lock(&c->mu);
+ c->have_alarm = false;
+ if (c->disconnected) {
+ error = GRPC_ERROR_CREATE_REFERENCING("Disconnected", &error, 1);
+ } else {
+ GRPC_ERROR_REF(error);
+ }
+ if (error == GRPC_ERROR_NONE) {
+ gpr_log(GPR_INFO, "Failed to connect to channel, retrying");
+ c->next_attempt =
+ gpr_backoff_step(&c->backoff_state, gpr_now(GPR_CLOCK_MONOTONIC));
+ continue_connect_locked(exec_ctx, c);
+ gpr_mu_unlock(&c->mu);
+ } else {
+ gpr_mu_unlock(&c->mu);
+ GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "connecting");
+ }
+ GRPC_ERROR_UNREF(error);
+}
+
+static void maybe_start_connecting_locked(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel *c) {
+ if (c->disconnected) {
+ /* Don't try to connect if we're already disconnected */
+ return;
+ }
+
+ if (c->connecting) {
+ /* Already connecting: don't restart */
+ return;
+ }
+
+ if (GET_CONNECTED_SUBCHANNEL(c, no_barrier) != NULL) {
+ /* Already connected: don't restart */
+ return;
+ }
+
+ if (!grpc_connectivity_state_has_watchers(&c->state_tracker)) {
+ /* Nobody is interested in connecting: so don't just yet */
+ return;
+ }
+
+ c->connecting = true;
+ GRPC_SUBCHANNEL_WEAK_REF(c, "connecting");
+
+ gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
+ if (!c->backoff_begun) {
+ c->backoff_begun = true;
+ c->next_attempt = gpr_backoff_begin(&c->backoff_state, now);
+ continue_connect_locked(exec_ctx, c);
+ } else {
+ GPR_ASSERT(!c->have_alarm);
+ c->have_alarm = true;
+ gpr_timespec time_til_next = gpr_time_sub(c->next_attempt, now);
+ if (gpr_time_cmp(time_til_next, gpr_time_0(time_til_next.clock_type)) <=
+ 0) {
+ gpr_log(GPR_INFO, "Retry immediately");
+ } else {
+ gpr_log(GPR_INFO, "Retry in %" PRId64 ".%09d seconds",
+ time_til_next.tv_sec, time_til_next.tv_nsec);
+ }
+ grpc_timer_init(exec_ctx, &c->alarm, c->next_attempt, on_alarm, c, now);
+ }
+}
+
void grpc_subchannel_notify_on_state_change(
grpc_exec_ctx *exec_ctx, grpc_subchannel *c,
grpc_pollset_set *interested_parties, grpc_connectivity_state *state,
@@ -448,13 +515,9 @@ void grpc_subchannel_notify_on_state_change(
w->next = &c->root_external_state_watcher;
w->prev = w->next->prev;
w->next->prev = w->prev->next = w;
- if (grpc_connectivity_state_notify_on_state_change(
- exec_ctx, &c->state_tracker, state, &w->closure)) {
- c->connecting = 1;
- /* released by connection */
- GRPC_SUBCHANNEL_WEAK_REF(c, "connecting");
- start_connect(exec_ctx, c);
- }
+ grpc_connectivity_state_notify_on_state_change(exec_ctx, &c->state_tracker,
+ state, &w->closure);
+ maybe_start_connecting_locked(exec_ctx, c);
gpr_mu_unlock(&c->mu);
}
}
@@ -541,14 +604,20 @@ static void publish_transport_locked(grpc_exec_ctx *exec_ctx,
grpc_channel_stack_builder_set_transport(builder,
c->connecting_result.transport);
- if (grpc_channel_init_create_stack(exec_ctx, builder,
- GRPC_CLIENT_SUBCHANNEL)) {
- con = grpc_channel_stack_builder_finish(exec_ctx, builder, 0, 1,
- connection_destroy, NULL);
- } else {
+ if (!grpc_channel_init_create_stack(exec_ctx, builder,
+ GRPC_CLIENT_SUBCHANNEL)) {
grpc_channel_stack_builder_destroy(builder);
abort(); /* TODO(ctiller): what to do here (previously we just crashed) */
}
+ grpc_error *error = grpc_channel_stack_builder_finish(
+ exec_ctx, builder, 0, 1, connection_destroy, NULL, (void **)&con);
+ if (error != GRPC_ERROR_NONE) {
+ const char *msg = grpc_error_string(error);
+ gpr_log(GPR_ERROR, "error initializing subchannel stack: %s", msg);
+ grpc_error_free_string(msg);
+ GRPC_ERROR_UNREF(error);
+ abort(); /* TODO(ctiller): what to do here? */
+ }
stk = CHANNEL_STACK_FROM_CONNECTION(con);
memset(&c->connecting_result, 0, sizeof(c->connecting_result));
@@ -574,12 +643,9 @@ static void publish_transport_locked(grpc_exec_ctx *exec_ctx,
Re-evaluate if we really need this. */
gpr_atm_full_barrier();
GPR_ASSERT(gpr_atm_rel_cas(&c->connected_subchannel, 0, (gpr_atm)con));
- c->connecting = 0;
/* setup subchannel watching connected subchannel for changes; subchannel
- ref
- for connecting is donated
- to the state watcher */
+ ref for connecting is donated to the state watcher */
GRPC_SUBCHANNEL_WEAK_REF(c, "state_watcher");
GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "connecting");
grpc_connected_subchannel_notify_on_state_change(
@@ -591,28 +657,6 @@ static void publish_transport_locked(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_NONE, "connected");
}
-static void on_alarm(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_subchannel *c = arg;
- gpr_mu_lock(&c->mu);
- c->have_alarm = 0;
- if (c->disconnected) {
- error = GRPC_ERROR_CREATE_REFERENCING("Disconnected", &error, 1);
- } else {
- GRPC_ERROR_REF(error);
- }
- if (error == GRPC_ERROR_NONE) {
- gpr_log(GPR_INFO, "Failed to connect to channel, retrying");
- c->next_attempt =
- gpr_backoff_step(&c->backoff_state, gpr_now(GPR_CLOCK_MONOTONIC));
- continue_connect(exec_ctx, c);
- gpr_mu_unlock(&c->mu);
- } else {
- gpr_mu_unlock(&c->mu);
- GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "connecting");
- }
- GRPC_ERROR_UNREF(error);
-}
-
static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_subchannel *c = arg;
@@ -620,35 +664,28 @@ static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_SUBCHANNEL_WEAK_REF(c, "connected");
gpr_mu_lock(&c->mu);
+ c->connecting = false;
if (c->connecting_result.transport != NULL) {
publish_transport_locked(exec_ctx, c);
} else if (c->disconnected) {
GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "connecting");
} else {
- gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
- GPR_ASSERT(!c->have_alarm);
- c->have_alarm = 1;
grpc_connectivity_state_set(
exec_ctx, &c->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
grpc_error_set_int(
GRPC_ERROR_CREATE_REFERENCING("Connect Failed", &error, 1),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE),
"connect_failed");
- gpr_timespec time_til_next = gpr_time_sub(c->next_attempt, now);
+
const char *errmsg = grpc_error_string(error);
gpr_log(GPR_INFO, "Connect failed: %s", errmsg);
- if (gpr_time_cmp(time_til_next, gpr_time_0(time_til_next.clock_type)) <=
- 0) {
- gpr_log(GPR_INFO, "Retry immediately");
- } else {
- gpr_log(GPR_INFO, "Retry in %" PRId64 ".%09d seconds",
- time_til_next.tv_sec, time_til_next.tv_nsec);
- }
- grpc_timer_init(exec_ctx, &c->alarm, c->next_attempt, on_alarm, c, now);
grpc_error_free_string(errmsg);
+
+ maybe_start_connecting_locked(exec_ctx, c);
+ GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "connecting");
}
gpr_mu_unlock(&c->mu);
- GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "connecting");
+ GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "connected");
grpc_channel_args_destroy(delete_channel_args);
}
@@ -701,15 +738,15 @@ grpc_connected_subchannel *grpc_subchannel_get_connected_subchannel(
grpc_error *grpc_connected_subchannel_create_call(
grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *con,
- grpc_polling_entity *pollent, grpc_mdstr *path, gpr_timespec deadline,
- grpc_subchannel_call **call) {
+ grpc_polling_entity *pollent, grpc_mdstr *path, gpr_timespec start_time,
+ gpr_timespec deadline, grpc_subchannel_call **call) {
grpc_channel_stack *chanstk = CHANNEL_STACK_FROM_CONNECTION(con);
*call = gpr_malloc(sizeof(grpc_subchannel_call) + chanstk->call_stack_size);
grpc_call_stack *callstk = SUBCHANNEL_CALL_TO_CALL_STACK(*call);
(*call)->connection = con; // Ref is added below.
grpc_error *error =
grpc_call_stack_init(exec_ctx, chanstk, 1, subchannel_call_destroy, *call,
- NULL, NULL, path, deadline, callstk);
+ NULL, NULL, path, start_time, deadline, callstk);
if (error != GRPC_ERROR_NONE) {
const char *error_string = grpc_error_string(error);
gpr_log(GPR_ERROR, "error: %s", error_string);
diff --git a/src/core/ext/client_channel/subchannel.h b/src/core/ext/client_channel/subchannel.h
index 5652074074..24aa9f73dc 100644
--- a/src/core/ext/client_channel/subchannel.h
+++ b/src/core/ext/client_channel/subchannel.h
@@ -97,7 +97,7 @@ grpc_subchannel *grpc_subchannel_weak_ref(
void grpc_subchannel_weak_unref(grpc_exec_ctx *exec_ctx,
grpc_subchannel *channel
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-void grpc_connected_subchannel_ref(
+grpc_connected_subchannel *grpc_connected_subchannel_ref(
grpc_connected_subchannel *channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_connected_subchannel_unref(grpc_exec_ctx *exec_ctx,
grpc_connected_subchannel *channel
@@ -111,8 +111,8 @@ void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx,
/** construct a subchannel call */
grpc_error *grpc_connected_subchannel_create_call(
grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *connected_subchannel,
- grpc_polling_entity *pollent, grpc_mdstr *path, gpr_timespec deadline,
- grpc_subchannel_call **subchannel_call);
+ grpc_polling_entity *pollent, grpc_mdstr *path, gpr_timespec start_time,
+ gpr_timespec deadline, grpc_subchannel_call **subchannel_call);
/** process a transport level op */
void grpc_connected_subchannel_process_transport_op(
@@ -164,8 +164,6 @@ struct grpc_subchannel_args {
size_t filter_count;
/** Channel arguments to be supplied to the newly created channel */
const grpc_channel_args *args;
- /** Server name */
- const char *server_name;
/** Address to connect to */
grpc_resolved_address *addr;
};
diff --git a/src/core/ext/client_channel/subchannel_index.c b/src/core/ext/client_channel/subchannel_index.c
index 227013a7d7..a1ba5e945c 100644
--- a/src/core/ext/client_channel/subchannel_index.c
+++ b/src/core/ext/client_channel/subchannel_index.c
@@ -86,7 +86,6 @@ static grpc_subchannel_key *create_key(
} else {
k->args.filters = NULL;
}
- k->args.server_name = gpr_strdup(args->server_name);
k->args.addr = gpr_malloc(sizeof(grpc_resolved_address));
k->args.addr->len = args->addr->len;
if (k->args.addr->len > 0) {
@@ -113,8 +112,6 @@ static int subchannel_key_compare(grpc_subchannel_key *a,
if (c != 0) return c;
c = GPR_ICMP(a->args.filter_count, b->args.filter_count);
if (c != 0) return c;
- c = strcmp(a->args.server_name, b->args.server_name);
- if (c != 0) return c;
if (a->args.addr->len) {
c = memcmp(a->args.addr->addr, b->args.addr->addr, a->args.addr->len);
if (c != 0) return c;
@@ -132,7 +129,6 @@ void grpc_subchannel_key_destroy(grpc_exec_ctx *exec_ctx,
grpc_connector_unref(exec_ctx, k->connector);
gpr_free((grpc_channel_args *)k->args.filters);
grpc_channel_args_destroy((grpc_channel_args *)k->args.args);
- gpr_free((void *)k->args.server_name);
gpr_free(k->args.addr);
gpr_free(k);
}
diff --git a/src/core/ext/client_channel/uri_parser.c b/src/core/ext/client_channel/uri_parser.c
index bcb6a1dee4..0fbc542ef8 100644
--- a/src/core/ext/client_channel/uri_parser.c
+++ b/src/core/ext/client_channel/uri_parser.c
@@ -35,13 +35,14 @@
#include <string.h>
+#include <grpc/slice.h>
+#include <grpc/slice_buffer.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
-#include <grpc/support/slice.h>
-#include <grpc/support/slice_buffer.h>
#include <grpc/support/string_util.h>
+#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/support/string.h"
/** a size_t default value... maps to all 1's */
@@ -148,38 +149,38 @@ static void parse_query_parts(grpc_uri *uri) {
uri->num_query_parts = 0;
return;
}
- gpr_slice query_slice =
- gpr_slice_new(uri->query, strlen(uri->query), do_nothing);
- gpr_slice_buffer query_parts; /* the &-separated elements of the query */
- gpr_slice_buffer query_param_parts; /* the =-separated subelements */
+ grpc_slice query_slice =
+ grpc_slice_new(uri->query, strlen(uri->query), do_nothing);
+ grpc_slice_buffer query_parts; /* the &-separated elements of the query */
+ grpc_slice_buffer query_param_parts; /* the =-separated subelements */
- gpr_slice_buffer_init(&query_parts);
- gpr_slice_buffer_init(&query_param_parts);
+ grpc_slice_buffer_init(&query_parts);
+ grpc_slice_buffer_init(&query_param_parts);
- gpr_slice_split(query_slice, QUERY_PARTS_SEPARATOR, &query_parts);
+ grpc_slice_split(query_slice, QUERY_PARTS_SEPARATOR, &query_parts);
uri->query_parts = gpr_malloc(query_parts.count * sizeof(char *));
uri->query_parts_values = gpr_malloc(query_parts.count * sizeof(char *));
uri->num_query_parts = query_parts.count;
for (size_t i = 0; i < query_parts.count; i++) {
- gpr_slice_split(query_parts.slices[i], QUERY_PARTS_VALUE_SEPARATOR,
- &query_param_parts);
+ grpc_slice_split(query_parts.slices[i], QUERY_PARTS_VALUE_SEPARATOR,
+ &query_param_parts);
GPR_ASSERT(query_param_parts.count > 0);
uri->query_parts[i] =
- gpr_dump_slice(query_param_parts.slices[0], GPR_DUMP_ASCII);
+ grpc_dump_slice(query_param_parts.slices[0], GPR_DUMP_ASCII);
if (query_param_parts.count > 1) {
/* TODO(dgq): only the first value after the separator is considered.
* Perhaps all chars after the first separator for the query part should
* be included, even if they include the separator. */
uri->query_parts_values[i] =
- gpr_dump_slice(query_param_parts.slices[1], GPR_DUMP_ASCII);
+ grpc_dump_slice(query_param_parts.slices[1], GPR_DUMP_ASCII);
} else {
uri->query_parts_values[i] = NULL;
}
- gpr_slice_buffer_reset_and_unref(&query_param_parts);
+ grpc_slice_buffer_reset_and_unref(&query_param_parts);
}
- gpr_slice_buffer_destroy(&query_parts);
- gpr_slice_buffer_destroy(&query_param_parts);
- gpr_slice_unref(query_slice);
+ grpc_slice_buffer_destroy(&query_parts);
+ grpc_slice_buffer_destroy(&query_param_parts);
+ grpc_slice_unref(query_slice);
}
grpc_uri *grpc_uri_parse(const char *uri_text, int suppress_errors) {
diff --git a/src/core/ext/client_config/message_size_filter.c b/src/core/ext/client_config/message_size_filter.c
index eaae75ae81..770526db84 100644
--- a/src/core/ext/client_config/message_size_filter.c
+++ b/src/core/ext/client_config/message_size_filter.c
@@ -34,16 +34,14 @@
#include <limits.h>
#include <string.h>
+#include <grpc/impl/codegen/grpc_types.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include "src/core/lib/channel/channel_args.h"
-#include "src/core/lib/transport/method_config.h"
-
-#define DEFAULT_MAX_SEND_MESSAGE_LENGTH -1 // Unlimited.
-// The protobuf library will (by default) start warning at 100 megs.
-#define DEFAULT_MAX_RECV_MESSAGE_LENGTH (4 * 1024 * 1024)
+#include "src/core/lib/support/string.h"
+#include "src/core/lib/transport/service_config.h"
typedef struct message_size_limits {
int max_send_size;
@@ -56,30 +54,29 @@ static void* message_size_limits_copy(void* value) {
return new_value;
}
-static int message_size_limits_cmp(void* value1, void* value2) {
- const message_size_limits* v1 = value1;
- const message_size_limits* v2 = value2;
- if (v1->max_send_size > v2->max_send_size) return 1;
- if (v1->max_send_size < v2->max_send_size) return -1;
- if (v1->max_recv_size > v2->max_recv_size) return 1;
- if (v1->max_recv_size < v2->max_recv_size) return -1;
- return 0;
-}
-
static const grpc_mdstr_hash_table_vtable message_size_limits_vtable = {
- gpr_free, message_size_limits_copy, message_size_limits_cmp};
+ gpr_free, message_size_limits_copy};
-static void* method_config_convert_value(
- const grpc_method_config* method_config) {
+static void* message_size_limits_create_from_json(const grpc_json* json) {
+ int max_request_message_bytes = -1;
+ int max_response_message_bytes = -1;
+ for (grpc_json* field = json->child; field != NULL; field = field->next) {
+ if (field->key == NULL) continue;
+ if (strcmp(field->key, "maxRequestMessageBytes") == 0) {
+ if (max_request_message_bytes >= 0) return NULL; // Duplicate.
+ if (field->type != GRPC_JSON_STRING) return NULL;
+ max_request_message_bytes = gpr_parse_nonnegative_int(field->value);
+ if (max_request_message_bytes == -1) return NULL;
+ } else if (strcmp(field->key, "maxResponseMessageBytes") == 0) {
+ if (max_response_message_bytes >= 0) return NULL; // Duplicate.
+ if (field->type != GRPC_JSON_STRING) return NULL;
+ max_response_message_bytes = gpr_parse_nonnegative_int(field->value);
+ if (max_response_message_bytes == -1) return NULL;
+ }
+ }
message_size_limits* value = gpr_malloc(sizeof(message_size_limits));
- const int32_t* max_request_message_bytes =
- grpc_method_config_get_max_request_message_bytes(method_config);
- value->max_send_size =
- max_request_message_bytes != NULL ? *max_request_message_bytes : -1;
- const int32_t* max_response_message_bytes =
- grpc_method_config_get_max_response_message_bytes(method_config);
- value->max_recv_size =
- max_response_message_bytes != NULL ? *max_response_message_bytes : -1;
+ value->max_send_size = max_request_message_bytes;
+ value->max_recv_size = max_response_message_bytes;
return value;
}
@@ -141,7 +138,7 @@ static void start_transport_stream_op(grpc_exec_ctx* exec_ctx,
char* message_string;
gpr_asprintf(&message_string, "Sent message larger than max (%u vs. %d)",
op->send_message->length, calld->max_send_size);
- gpr_slice message = gpr_slice_from_copied_string(message_string);
+ grpc_slice message = grpc_slice_from_copied_string(message_string);
gpr_free(message_string);
grpc_call_element_send_close_with_message(
exec_ctx, elem, GRPC_STATUS_INVALID_ARGUMENT, &message);
@@ -195,26 +192,26 @@ static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
void* ignored) {}
// Constructor for channel_data.
-static void init_channel_elem(grpc_exec_ctx* exec_ctx,
- grpc_channel_element* elem,
- grpc_channel_element_args* args) {
+static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem,
+ grpc_channel_element_args* args) {
GPR_ASSERT(!args->is_last);
channel_data* chand = elem->channel_data;
memset(chand, 0, sizeof(*chand));
- chand->max_send_size = DEFAULT_MAX_SEND_MESSAGE_LENGTH;
- chand->max_recv_size = DEFAULT_MAX_RECV_MESSAGE_LENGTH;
+ chand->max_send_size = GRPC_DEFAULT_MAX_SEND_MESSAGE_LENGTH;
+ chand->max_recv_size = GRPC_DEFAULT_MAX_RECV_MESSAGE_LENGTH;
for (size_t i = 0; i < args->channel_args->num_args; ++i) {
if (strcmp(args->channel_args->args[i].key,
GRPC_ARG_MAX_SEND_MESSAGE_LENGTH) == 0) {
- const grpc_integer_options options = {DEFAULT_MAX_SEND_MESSAGE_LENGTH, 0,
- INT_MAX};
+ const grpc_integer_options options = {
+ GRPC_DEFAULT_MAX_SEND_MESSAGE_LENGTH, 0, INT_MAX};
chand->max_send_size =
grpc_channel_arg_get_integer(&args->channel_args->args[i], options);
}
if (strcmp(args->channel_args->args[i].key,
GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH) == 0) {
- const grpc_integer_options options = {DEFAULT_MAX_RECV_MESSAGE_LENGTH, 0,
- INT_MAX};
+ const grpc_integer_options options = {
+ GRPC_DEFAULT_MAX_RECV_MESSAGE_LENGTH, 0, INT_MAX};
chand->max_recv_size =
grpc_channel_arg_get_integer(&args->channel_args->args[i], options);
}
@@ -223,11 +220,18 @@ static void init_channel_elem(grpc_exec_ctx* exec_ctx,
const grpc_arg* channel_arg =
grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVICE_CONFIG);
if (channel_arg != NULL) {
- GPR_ASSERT(channel_arg->type == GRPC_ARG_POINTER);
- chand->method_limit_table = grpc_method_config_table_convert(
- (grpc_method_config_table*)channel_arg->value.pointer.p,
- method_config_convert_value, &message_size_limits_vtable);
+ GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
+ grpc_service_config* service_config =
+ grpc_service_config_create(channel_arg->value.string);
+ if (service_config != NULL) {
+ chand->method_limit_table =
+ grpc_service_config_create_method_config_table(
+ service_config, message_size_limits_create_from_json,
+ &message_size_limits_vtable);
+ grpc_service_config_destroy(service_config);
+ }
}
+ return GRPC_ERROR_NONE;
}
// Destructor for channel_data.
@@ -248,4 +252,5 @@ const grpc_channel_filter grpc_message_size_filter = {
init_channel_elem,
destroy_channel_elem,
grpc_call_next_get_peer,
+ grpc_channel_next_get_info,
"message_size"};
diff --git a/src/core/ext/lb_policy/grpclb/grpclb.c b/src/core/ext/lb_policy/grpclb/grpclb.c
index 6da4febf26..bed5e6c901 100644
--- a/src/core/ext/lb_policy/grpclb/grpclb.c
+++ b/src/core/ext/lb_policy/grpclb/grpclb.c
@@ -43,30 +43,23 @@
* policy to select from this list of LB server backends.
*
* The first time the policy gets a request for a pick, a ping, or to exit the
- * idle state, \a query_for_backends() is called. It creates an instance of \a
- * lb_client_data, an internal struct meant to contain the data associated with
- * the internal communication with the LB server. This instance is created via
- * \a lb_client_data_create(). There, the call over lb_channel to pick-first
- * from {a1..an} is created, the \a LoadBalancingRequest message is assembled
- * and all necessary callbacks for the progress of the internal call configured.
+ * idle state, \a query_for_backends_locked() is called. This function sets up
+ * and initiates the internal communication with the LB server. In particular,
+ * it's responsible for instantiating the internal *streaming* call to the LB
+ * server (whichever address from {a1..an} pick-first chose). This call is
+ * serviced by two callbacks, \a lb_on_server_status_received and \a
+ * lb_on_response_received. The former will be called when the call to the LB
+ * server completes. This can happen if the LB server closes the connection or
+ * if this policy itself cancels the call (for example because it's shutting
+ * down). If the internal call times out, the usual behavior of pick-first
+ * applies, continuing to pick from the list {a1..an}.
*
- * Back in \a query_for_backends(), the internal *streaming* call to the LB
- * server (whichever address from {a1..an} pick-first chose) is kicked off.
- * It'll progress over the callbacks configured in \a lb_client_data_create()
- * (see the field docstrings of \a lb_client_data for more details).
- *
- * If the call fails with UNIMPLEMENTED, the original call will also fail.
- * There's a misconfiguration somewhere: at least one of {a1..an} isn't a LB
- * server, which contradicts the LB bit being set. If the internal call times
- * out, the usual behavior of pick-first applies, continuing to pick from the
- * list {a1..an}.
- *
- * Upon sucesss, a \a LoadBalancingResponse is expected in \a res_recv_cb. An
- * invalid one results in the termination of the streaming call. A new streaming
- * call should be created if possible, failing the original call otherwise.
- * For a valid \a LoadBalancingResponse, the server list of actual backends is
- * extracted. A Round Robin policy will be created from this list. There are two
- * possible scenarios:
+ * Upon sucesss, the incoming \a LoadBalancingResponse is processed by \a
+ * res_recv. An invalid one results in the termination of the streaming call. A
+ * new streaming call should be created if possible, failing the original call
+ * otherwise. For a valid \a LoadBalancingResponse, the server list of actual
+ * backends is extracted. A Round Robin policy will be created from this list.
+ * There are two possible scenarios:
*
* 1. This is the first server list received. There was no previous instance of
* the Round Robin policy. \a rr_handover_locked() will instantiate the RR
@@ -84,10 +77,10 @@
* Once a RR policy instance is in place (and getting updated as described),
* calls to for a pick, a ping or a cancellation will be serviced right away by
* forwarding them to the RR instance. Any time there's no RR policy available
- * (ie, right after the creation of the gRPCLB policy, if an empty serverlist
- * is received, etc), pick/ping requests are added to a list of pending
- * picks/pings to be flushed and serviced as part of \a rr_handover_locked() the
- * moment the RR policy instance becomes available.
+ * (ie, right after the creation of the gRPCLB policy, if an empty serverlist is
+ * received, etc), pick/ping requests are added to a list of pending picks/pings
+ * to be flushed and serviced as part of \a rr_handover_locked() the moment the
+ * RR policy instance becomes available.
*
* \see https://github.com/grpc/grpc/blob/master/doc/load-balancing.md for the
* high level design and details. */
@@ -113,6 +106,7 @@
#include <grpc/support/string_util.h>
#include <grpc/support/time.h>
+#include "src/core/ext/client_channel/client_channel.h"
#include "src/core/ext/client_channel/client_channel_factory.h"
#include "src/core/ext/client_channel/lb_policy_factory.h"
#include "src/core/ext/client_channel/lb_policy_registry.h"
@@ -120,12 +114,22 @@
#include "src/core/ext/lb_policy/grpclb/grpclb.h"
#include "src/core/ext/lb_policy/grpclb/load_balancer_api.h"
#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
+#include "src/core/lib/iomgr/timer.h"
+#include "src/core/lib/slice/slice_string_helpers.h"
+#include "src/core/lib/support/backoff.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/transport/static_metadata.h"
+#define GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS 20
+#define GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS 1
+#define GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER 1.6
+#define GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS 120
+#define GRPC_GRPCLB_RECONNECT_JITTER 0.2
+
int grpc_lb_glb_trace = 0;
/* add lb_token of selected subchannel (address) to the call's initial
@@ -174,25 +178,33 @@ typedef struct wrapped_rr_closure_arg {
static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
wrapped_rr_closure_arg *wc_arg = arg;
- if (wc_arg->rr_policy != NULL) {
- if (grpc_lb_glb_trace) {
- gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")",
- (intptr_t)wc_arg->rr_policy);
- }
- GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure");
- /* if target is NULL, no pick has been made by the RR policy (eg, all
+ GPR_ASSERT(wc_arg->wrapped_closure != NULL);
+ grpc_exec_ctx_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error),
+ NULL);
+
+ if (wc_arg->rr_policy != NULL) {
+ /* if *target is NULL, no pick has been made by the RR policy (eg, all
* addresses failed to connect). There won't be any user_data/token
* available */
- if (wc_arg->target != NULL) {
- initial_metadata_add_lb_token(wc_arg->initial_metadata,
- wc_arg->lb_token_mdelem_storage,
- GRPC_MDELEM_REF(wc_arg->lb_token));
+ if (*wc_arg->target != NULL) {
+ if (wc_arg->lb_token != NULL) {
+ initial_metadata_add_lb_token(wc_arg->initial_metadata,
+ wc_arg->lb_token_mdelem_storage,
+ GRPC_MDELEM_REF(wc_arg->lb_token));
+ } else {
+ gpr_log(GPR_ERROR,
+ "No LB token for connected subchannel pick %p (from RR "
+ "instance %p).",
+ (void *)*wc_arg->target, (void *)wc_arg->rr_policy);
+ abort();
+ }
}
+ if (grpc_lb_glb_trace) {
+ gpr_log(GPR_INFO, "Unreffing RR %p", (void *)wc_arg->rr_policy);
+ }
+ GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure");
}
- GPR_ASSERT(wc_arg->wrapped_closure != NULL);
- grpc_exec_ctx_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error),
- NULL);
GPR_ASSERT(wc_arg->free_when_done != NULL);
gpr_free(wc_arg->free_when_done);
}
@@ -264,7 +276,6 @@ static void add_pending_ping(pending_ping **root, grpc_closure *notify) {
* glb_lb_policy
*/
typedef struct rr_connectivity_data rr_connectivity_data;
-struct lb_client_data;
static const grpc_lb_policy_vtable glb_lb_policy_vtable;
typedef struct glb_lb_policy {
/** base policy: must be first */
@@ -296,20 +307,47 @@ typedef struct glb_lb_policy {
* response has arrived. */
grpc_grpclb_serverlist *serverlist;
- /** addresses from \a serverlist */
- grpc_lb_addresses *addresses;
-
/** list of picks that are waiting on RR's policy connectivity */
pending_pick *pending_picks;
/** list of pings that are waiting on RR's policy connectivity */
pending_ping *pending_pings;
- /** client data associated with the LB server communication */
- struct lb_client_data *lb_client;
+ bool shutting_down;
+
+ /************************************************************/
+ /* client data associated with the LB server communication */
+ /************************************************************/
+ /* Status from the LB server has been received. This signals the end of the LB
+ * call. */
+ grpc_closure lb_on_server_status_received;
+
+ /* A response from the LB server has been received. Process it */
+ grpc_closure lb_on_response_received;
+
+ grpc_call *lb_call; /* streaming call to the LB server, */
+
+ grpc_metadata_array lb_initial_metadata_recv; /* initial MD from LB server */
+ grpc_metadata_array
+ lb_trailing_metadata_recv; /* trailing MD from LB server */
- /** for tracking of the RR connectivity */
- rr_connectivity_data *rr_connectivity;
+ /* what's being sent to the LB server. Note that its value may vary if the LB
+ * server indicates a redirect. */
+ grpc_byte_buffer *lb_request_payload;
+
+ /* response the LB server, if any. Processed in lb_on_response_received() */
+ grpc_byte_buffer *lb_response_payload;
+
+ /* call status code and details, set in lb_on_server_status_received() */
+ grpc_status_code lb_call_status;
+ char *lb_call_status_details;
+ size_t lb_call_status_details_capacity;
+
+ /** LB call retry backoff state */
+ gpr_backoff lb_call_backoff_state;
+
+ /** LB call retry timer */
+ grpc_timer lb_call_retry_timer;
} glb_lb_policy;
/* Keeps track and reacts to changes in connectivity of the RR instance */
@@ -358,8 +396,30 @@ static int lb_token_cmp(void *token1, void *token2) {
static const grpc_lb_user_data_vtable lb_token_vtable = {
lb_token_copy, lb_token_destroy, lb_token_cmp};
+static void parse_server(const grpc_grpclb_server *server,
+ grpc_resolved_address *addr) {
+ const uint16_t netorder_port = htons((uint16_t)server->port);
+ /* the addresses are given in binary format (a in(6)_addr struct) in
+ * server->ip_address.bytes. */
+ const grpc_grpclb_ip_address *ip = &server->ip_address;
+ memset(addr, 0, sizeof(*addr));
+ if (ip->size == 4) {
+ addr->len = sizeof(struct sockaddr_in);
+ struct sockaddr_in *addr4 = (struct sockaddr_in *)&addr->addr;
+ addr4->sin_family = AF_INET;
+ memcpy(&addr4->sin_addr, ip->bytes, ip->size);
+ addr4->sin_port = netorder_port;
+ } else if (ip->size == 16) {
+ addr->len = sizeof(struct sockaddr_in6);
+ struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&addr->addr;
+ addr6->sin6_family = AF_INET6;
+ memcpy(&addr6->sin6_addr, ip->bytes, ip->size);
+ addr6->sin6_port = netorder_port;
+ }
+}
+
/* Returns addresses extracted from \a serverlist. */
-static grpc_lb_addresses *process_serverlist(
+static grpc_lb_addresses *process_serverlist_locked(
const grpc_grpclb_serverlist *serverlist) {
size_t num_valid = 0;
/* first pass: count how many are valid in order to allocate the necessary
@@ -384,40 +444,27 @@ static grpc_lb_addresses *process_serverlist(
if (!is_server_valid(serverlist->servers[sl_idx], sl_idx, false)) continue;
/* address processing */
- const uint16_t netorder_port = htons((uint16_t)server->port);
- /* the addresses are given in binary format (a in(6)_addr struct) in
- * server->ip_address.bytes. */
- const grpc_grpclb_ip_address *ip = &server->ip_address;
grpc_resolved_address addr;
- memset(&addr, 0, sizeof(addr));
- if (ip->size == 4) {
- addr.len = sizeof(struct sockaddr_in);
- struct sockaddr_in *addr4 = (struct sockaddr_in *)&addr.addr;
- addr4->sin_family = AF_INET;
- memcpy(&addr4->sin_addr, ip->bytes, ip->size);
- addr4->sin_port = netorder_port;
- } else if (ip->size == 16) {
- addr.len = sizeof(struct sockaddr_in6);
- struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&addr.addr;
- addr6->sin6_family = AF_INET;
- memcpy(&addr6->sin6_addr, ip->bytes, ip->size);
- addr6->sin6_port = netorder_port;
- }
+ parse_server(server, &addr);
/* lb token processing */
void *user_data;
if (server->has_load_balance_token) {
- const size_t lb_token_size =
- GPR_ARRAY_SIZE(server->load_balance_token) - 1;
+ const size_t lb_token_max_length =
+ GPR_ARRAY_SIZE(server->load_balance_token);
+ const size_t lb_token_length =
+ strnlen(server->load_balance_token, lb_token_max_length);
grpc_mdstr *lb_token_mdstr = grpc_mdstr_from_buffer(
- (uint8_t *)server->load_balance_token, lb_token_size);
+ (uint8_t *)server->load_balance_token, lb_token_length);
user_data = grpc_mdelem_from_metadata_strings(GRPC_MDSTR_LB_TOKEN,
lb_token_mdstr);
} else {
- gpr_log(GPR_ERROR,
+ char *uri = grpc_sockaddr_to_uri(&addr);
+ gpr_log(GPR_INFO,
"Missing LB token for backend address '%s'. The empty token will "
"be used instead",
- grpc_sockaddr_to_uri(&addr));
+ uri);
+ gpr_free(uri);
user_data = GRPC_MDELEM_LB_TOKEN_EMPTY;
}
@@ -427,10 +474,71 @@ static grpc_lb_addresses *process_serverlist(
++addr_idx;
}
GPR_ASSERT(addr_idx == num_valid);
-
return lb_addresses;
}
+/* returns true if the new RR policy should replace the current one, if any */
+static bool update_lb_connectivity_status_locked(
+ grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
+ grpc_connectivity_state new_rr_state, grpc_error *new_rr_state_error) {
+ grpc_error *curr_state_error;
+ const grpc_connectivity_state curr_glb_state = grpc_connectivity_state_check(
+ &glb_policy->state_tracker, &curr_state_error);
+
+ /* The new connectivity status is a function of the previous one and the new
+ * input coming from the status of the RR policy.
+ *
+ * current state (grpclb's)
+ * |
+ * v || I | C | R | TF | SD | <- new state (RR's)
+ * ===++====+=====+=====+======+======+
+ * I || I | C | R | [I] | [I] |
+ * ---++----+-----+-----+------+------+
+ * C || I | C | R | [C] | [C] |
+ * ---++----+-----+-----+------+------+
+ * R || I | C | R | [R] | [R] |
+ * ---++----+-----+-----+------+------+
+ * TF || I | C | R | [TF] | [TF] |
+ * ---++----+-----+-----+------+------+
+ * SD || NA | NA | NA | NA | NA | (*)
+ * ---++----+-----+-----+------+------+
+ *
+ * A [STATE] indicates that the old RR policy is kept. In those cases, STATE
+ * is the current state of grpclb, which is left untouched.
+ *
+ * In summary, if the new state is TRANSIENT_FAILURE or SHUTDOWN, stick to
+ * the previous RR instance.
+ *
+ * Note that the status is never updated to SHUTDOWN as a result of calling
+ * this function. Only glb_shutdown() has the power to set that state.
+ *
+ * (*) This function mustn't be called during shutting down. */
+ GPR_ASSERT(curr_glb_state != GRPC_CHANNEL_SHUTDOWN);
+
+ switch (new_rr_state) {
+ case GRPC_CHANNEL_TRANSIENT_FAILURE:
+ case GRPC_CHANNEL_SHUTDOWN:
+ GPR_ASSERT(new_rr_state_error != GRPC_ERROR_NONE);
+ return false; /* don't replace the RR policy */
+ case GRPC_CHANNEL_INIT:
+ case GRPC_CHANNEL_IDLE:
+ case GRPC_CHANNEL_CONNECTING:
+ case GRPC_CHANNEL_READY:
+ GPR_ASSERT(new_rr_state_error == GRPC_ERROR_NONE);
+ }
+
+ if (grpc_lb_glb_trace) {
+ gpr_log(GPR_INFO,
+ "Setting grpclb's state to %s from new RR policy %p state.",
+ grpc_connectivity_state_name(new_rr_state),
+ (void *)glb_policy->rr_policy);
+ }
+ grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker,
+ new_rr_state, GRPC_ERROR_REF(new_rr_state_error),
+ "update_lb_connectivity_status_locked");
+ return true;
+}
+
/* perform a pick over \a rr_policy. Given that a pick can return immediately
* (ignoring its completion callback) we need to perform the cleanups this
* callback would be otherwise resposible for */
@@ -448,7 +556,7 @@ static bool pick_from_internal_rr_locked(
gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")",
(intptr_t)wc_arg->rr_policy);
}
- GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick");
+ GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
/* add the load reporting initial metadata */
initial_metadata_add_lb_token(pick_args->initial_metadata,
@@ -461,7 +569,6 @@ static bool pick_from_internal_rr_locked(
* pending pick list inside the RR policy (glb_policy->rr_policy).
* Eventually, wrapped_on_complete will be called, which will -among other
* things- add the LB token to the call's initial metadata */
-
return pick_done;
}
@@ -470,57 +577,108 @@ static grpc_lb_policy *create_rr_locked(
glb_lb_policy *glb_policy) {
GPR_ASSERT(serverlist != NULL && serverlist->num_servers > 0);
- if (glb_policy->addresses != NULL) {
- /* dispose of the previous version */
- grpc_lb_addresses_destroy(glb_policy->addresses);
- }
- glb_policy->addresses = process_serverlist(serverlist);
-
grpc_lb_policy_args args;
memset(&args, 0, sizeof(args));
args.client_channel_factory = glb_policy->cc_factory;
+ grpc_lb_addresses *addresses = process_serverlist_locked(serverlist);
// Replace the LB addresses in the channel args that we pass down to
// the subchannel.
static const char *keys_to_remove[] = {GRPC_ARG_LB_ADDRESSES};
- const grpc_arg arg =
- grpc_lb_addresses_create_channel_arg(glb_policy->addresses);
+ const grpc_arg arg = grpc_lb_addresses_create_channel_arg(addresses);
args.args = grpc_channel_args_copy_and_add_and_remove(
glb_policy->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &arg,
1);
grpc_lb_policy *rr = grpc_lb_policy_create(exec_ctx, "round_robin", &args);
+ GPR_ASSERT(rr != NULL);
+ grpc_lb_addresses_destroy(addresses);
grpc_channel_args_destroy(args.args);
-
return rr;
}
+static void glb_rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error);
+/* glb_policy->rr_policy may be NULL (initial handover) */
static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
- glb_lb_policy *glb_policy, grpc_error *error) {
+ glb_lb_policy *glb_policy) {
GPR_ASSERT(glb_policy->serverlist != NULL &&
glb_policy->serverlist->num_servers > 0);
- glb_policy->rr_policy =
+
+ if (glb_policy->shutting_down) return;
+
+ grpc_lb_policy *new_rr_policy =
create_rr_locked(exec_ctx, glb_policy->serverlist, glb_policy);
+ if (new_rr_policy == NULL) {
+ gpr_log(GPR_ERROR,
+ "Failure creating a RoundRobin policy for serverlist update with "
+ "%lu entries. The previous RR instance (%p), if any, will continue "
+ "to be used. Future updates from the LB will attempt to create new "
+ "instances.",
+ (unsigned long)glb_policy->serverlist->num_servers,
+ (void *)glb_policy->rr_policy);
+ return;
+ }
+
+ grpc_error *new_rr_state_error = NULL;
+ const grpc_connectivity_state new_rr_state =
+ grpc_lb_policy_check_connectivity(exec_ctx, new_rr_policy,
+ &new_rr_state_error);
+ /* Connectivity state is a function of the new RR policy just created */
+ const bool replace_old_rr = update_lb_connectivity_status_locked(
+ exec_ctx, glb_policy, new_rr_state, new_rr_state_error);
+
+ if (!replace_old_rr) {
+ /* dispose of the new RR policy that won't be used after all */
+ GRPC_LB_POLICY_UNREF(exec_ctx, new_rr_policy, "rr_handover_no_replace");
+ if (grpc_lb_glb_trace) {
+ gpr_log(GPR_INFO,
+ "Keeping old RR policy (%p) despite new serverlist: new RR "
+ "policy was in %s connectivity state.",
+ (void *)glb_policy->rr_policy,
+ grpc_connectivity_state_name(new_rr_state));
+ }
+ return;
+ }
if (grpc_lb_glb_trace) {
- gpr_log(GPR_INFO, "Created RR policy (0x%" PRIxPTR ")",
- (intptr_t)glb_policy->rr_policy);
+ gpr_log(GPR_INFO, "Created RR policy (%p) to replace old RR (%p)",
+ (void *)new_rr_policy, (void *)glb_policy->rr_policy);
}
- GPR_ASSERT(glb_policy->rr_policy != NULL);
+
+ if (glb_policy->rr_policy != NULL) {
+ /* if we are phasing out an existing RR instance, unref it. */
+ GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, "rr_handover");
+ }
+
+ /* Finally update the RR policy to the newly created one */
+ glb_policy->rr_policy = new_rr_policy;
+
+ /* Add the gRPC LB's interested_parties pollset_set to that of the newly
+ * created RR policy. This will make the RR policy progress upon activity on
+ * gRPC LB, which in turn is tied to the application's call */
grpc_pollset_set_add_pollset_set(exec_ctx,
glb_policy->rr_policy->interested_parties,
glb_policy->base.interested_parties);
- glb_policy->rr_connectivity->state = grpc_lb_policy_check_connectivity(
- exec_ctx, glb_policy->rr_policy, &error);
- grpc_lb_policy_notify_on_state_change(
- exec_ctx, glb_policy->rr_policy, &glb_policy->rr_connectivity->state,
- &glb_policy->rr_connectivity->on_change);
- grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker,
- glb_policy->rr_connectivity->state,
- GRPC_ERROR_REF(error), "rr_handover");
+
+ /* Allocate the data for the tracking of the new RR policy's connectivity.
+ * It'll be deallocated in glb_rr_connectivity_changed() */
+ rr_connectivity_data *rr_connectivity =
+ gpr_malloc(sizeof(rr_connectivity_data));
+ memset(rr_connectivity, 0, sizeof(rr_connectivity_data));
+ grpc_closure_init(&rr_connectivity->on_change, glb_rr_connectivity_changed,
+ rr_connectivity);
+ rr_connectivity->glb_policy = glb_policy;
+ rr_connectivity->state = new_rr_state;
+
+ /* Subscribe to changes to the connectivity of the new RR */
+ GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "rr_connectivity_cb");
+ grpc_lb_policy_notify_on_state_change(exec_ctx, glb_policy->rr_policy,
+ &rr_connectivity->state,
+ &rr_connectivity->on_change);
grpc_lb_policy_exit_idle(exec_ctx, glb_policy->rr_policy);
- /* flush pending ops */
+ /* Update picks and pings in wait */
pending_pick *pp;
while ((pp = glb_policy->pending_picks)) {
glb_policy->pending_picks = pp->next;
@@ -551,47 +709,41 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
static void glb_rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- rr_connectivity_data *rr_conn_data = arg;
- glb_lb_policy *glb_policy = rr_conn_data->glb_policy;
-
- if (rr_conn_data->state == GRPC_CHANNEL_SHUTDOWN) {
- if (glb_policy->serverlist != NULL) {
- /* a RR policy is shutting down but there's a serverlist available ->
- * perform a handover */
- gpr_mu_lock(&glb_policy->mu);
- rr_handover_locked(exec_ctx, glb_policy, error);
- gpr_mu_unlock(&glb_policy->mu);
- } else {
- /* shutting down and no new serverlist available. Bail out. */
- gpr_free(rr_conn_data);
- }
- } else {
- if (error == GRPC_ERROR_NONE) {
- gpr_mu_lock(&glb_policy->mu);
- /* RR not shutting down. Mimic the RR's policy state */
- grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker,
- rr_conn_data->state, GRPC_ERROR_REF(error),
- "glb_rr_connectivity_changed");
- /* resubscribe */
- grpc_lb_policy_notify_on_state_change(exec_ctx, glb_policy->rr_policy,
- &rr_conn_data->state,
- &rr_conn_data->on_change);
- gpr_mu_unlock(&glb_policy->mu);
- } else { /* error */
- gpr_free(rr_conn_data);
- }
+ rr_connectivity_data *rr_connectivity = arg;
+ glb_lb_policy *glb_policy = rr_connectivity->glb_policy;
+
+ gpr_mu_lock(&glb_policy->mu);
+ const bool shutting_down = glb_policy->shutting_down;
+ bool unref_needed = false;
+ GRPC_ERROR_REF(error);
+
+ if (rr_connectivity->state == GRPC_CHANNEL_SHUTDOWN || shutting_down) {
+ /* RR policy shutting down. Don't renew subscription and free the arg of
+ * this callback. In addition we need to stash away the current policy to
+ * be UNREF'd after releasing the lock. Otherwise, if the UNREF is the last
+ * one, the policy would be destroyed, alongside the lock, which would
+ * result in a use-after-free */
+ unref_needed = true;
+ gpr_free(rr_connectivity);
+ } else { /* rr state != SHUTDOWN && !shutting down: biz as usual */
+ update_lb_connectivity_status_locked(exec_ctx, glb_policy,
+ rr_connectivity->state, error);
+ /* Resubscribe. Reuse the "rr_connectivity_cb" weak ref. */
+ grpc_lb_policy_notify_on_state_change(exec_ctx, glb_policy->rr_policy,
+ &rr_connectivity->state,
+ &rr_connectivity->on_change);
}
+ gpr_mu_unlock(&glb_policy->mu);
+ if (unref_needed) {
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
+ "rr_connectivity_cb");
+ }
+ GRPC_ERROR_UNREF(error);
}
static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_factory *factory,
grpc_lb_policy_args *args) {
- /* Get server name. */
- const grpc_arg *arg =
- grpc_channel_args_find(args->args, GRPC_ARG_SERVER_NAME);
- const char *server_name =
- arg != NULL && arg->type == GRPC_ARG_STRING ? arg->value.string : NULL;
-
/* Count the number of gRPC-LB addresses. There must be at least one.
* TODO(roth): For now, we ignore non-balancer addresses, but in the
* future, we may change the behavior such that we fall back to using
@@ -599,7 +751,8 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
* time, this should be changed to allow a list with no balancer addresses,
* since the resolver might fail to return a balancer address even when
* this is the right LB policy to use. */
- arg = grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
+ const grpc_arg *arg =
+ grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
GPR_ASSERT(arg != NULL && arg->type == GRPC_ARG_POINTER);
grpc_lb_addresses *addresses = arg->value.pointer.p;
size_t num_grpclb_addrs = 0;
@@ -611,13 +764,25 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy = gpr_malloc(sizeof(*glb_policy));
memset(glb_policy, 0, sizeof(*glb_policy));
+ /* Get server name. */
+ arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
+ GPR_ASSERT(arg != NULL);
+ GPR_ASSERT(arg->type == GRPC_ARG_STRING);
+ grpc_uri *uri = grpc_uri_parse(arg->value.string, true);
+ GPR_ASSERT(uri->path[0] != '\0');
+ glb_policy->server_name =
+ gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
+ if (grpc_lb_glb_trace) {
+ gpr_log(GPR_INFO, "Will use '%s' as the server name for LB request.",
+ glb_policy->server_name);
+ }
+ grpc_uri_destroy(uri);
+
/* All input addresses in addresses come from a resolver that claims
* they are LB services. It's the resolver's responsibility to make sure
- * this
- * policy is only instantiated and used in that case.
+ * this policy is only instantiated and used in that case.
*
* Create a client channel over them to communicate with a LB service */
- glb_policy->server_name = gpr_strdup(server_name);
glb_policy->cc_factory = args->client_channel_factory;
glb_policy->args = grpc_channel_args_copy(args->args);
GPR_ASSERT(glb_policy->cc_factory != NULL);
@@ -661,9 +826,14 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
* We need the LB channel to return addresses with is_balancer=false
* so that it does not wind up recursively using the grpclb LB policy,
* as per the special case logic in client_channel.c.
+ *
+ * Finally, we also strip out the channel arg for the server URI,
+ * since that will be different for the LB channel than for the parent
+ * channel. (The client channel factory will re-add this arg with
+ * the right value.)
*/
- static const char *keys_to_remove[] = {GRPC_ARG_LB_POLICY_NAME,
- GRPC_ARG_LB_ADDRESSES};
+ static const char *keys_to_remove[] = {
+ GRPC_ARG_LB_POLICY_NAME, GRPC_ARG_LB_ADDRESSES, GRPC_ARG_SERVER_URI};
grpc_channel_args *new_args = grpc_channel_args_copy_and_remove(
args->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove));
glb_policy->lb_channel = grpc_client_channel_factory_create_channel(
@@ -682,18 +852,11 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
return NULL;
}
- rr_connectivity_data *rr_connectivity =
- gpr_malloc(sizeof(rr_connectivity_data));
- memset(rr_connectivity, 0, sizeof(rr_connectivity_data));
- grpc_closure_init(&rr_connectivity->on_change, glb_rr_connectivity_changed,
- rr_connectivity);
- rr_connectivity->glb_policy = glb_policy;
- glb_policy->rr_connectivity = rr_connectivity;
-
grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable);
gpr_mu_init(&glb_policy->mu);
grpc_connectivity_state_init(&glb_policy->state_tracker, GRPC_CHANNEL_IDLE,
"grpclb");
+
return &glb_policy->base;
}
@@ -710,21 +873,38 @@ static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
}
gpr_mu_destroy(&glb_policy->mu);
- grpc_lb_addresses_destroy(glb_policy->addresses);
gpr_free(glb_policy);
}
-static void lb_client_data_destroy(struct lb_client_data *lb_client);
static void glb_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
gpr_mu_lock(&glb_policy->mu);
+ glb_policy->shutting_down = true;
pending_pick *pp = glb_policy->pending_picks;
glb_policy->pending_picks = NULL;
pending_ping *pping = glb_policy->pending_pings;
glb_policy->pending_pings = NULL;
+ if (glb_policy->rr_policy) {
+ GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, "glb_shutdown");
+ }
+ grpc_connectivity_state_set(
+ exec_ctx, &glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN,
+ GRPC_ERROR_CREATE("Channel Shutdown"), "glb_shutdown");
+ /* We need a copy of the lb_call pointer because we can't cancell the call
+ * while holding glb_policy->mu: lb_on_server_status_received, invoked due to
+ * the cancel, needs to acquire that same lock */
+ grpc_call *lb_call = glb_policy->lb_call;
gpr_mu_unlock(&glb_policy->mu);
+ /* glb_policy->lb_call and this local lb_call must be consistent at this point
+ * because glb_policy->lb_call is only assigned in lb_call_init_locked as part
+ * of query_for_backends_locked, which can only be invoked while
+ * glb_policy->shutting_down is false. */
+ if (lb_call != NULL) {
+ grpc_call_cancel(lb_call, NULL);
+ /* lb_on_server_status_received will pick up the cancel and clean up */
+ }
while (pp != NULL) {
pending_pick *next = pp->next;
*pp->target = NULL;
@@ -739,21 +919,6 @@ static void glb_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
GRPC_ERROR_NONE, NULL);
pping = next;
}
-
- if (glb_policy->rr_policy) {
- /* unsubscribe */
- grpc_lb_policy_notify_on_state_change(
- exec_ctx, glb_policy->rr_policy, NULL,
- &glb_policy->rr_connectivity->on_change);
- GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, "glb_shutdown");
- }
-
- lb_client_data_destroy(glb_policy->lb_client);
- glb_policy->lb_client = NULL;
-
- grpc_connectivity_state_set(
- exec_ctx, &glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN,
- GRPC_ERROR_CREATE("Channel Shutdown"), "glb_shutdown");
}
static void glb_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
@@ -780,17 +945,12 @@ static void glb_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
GRPC_ERROR_UNREF(error);
}
-static grpc_call *lb_client_data_get_call(struct lb_client_data *lb_client);
static void glb_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
grpc_error *error) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
gpr_mu_lock(&glb_policy->mu);
- if (glb_policy->lb_client != NULL) {
- /* cancel the call to the load balancer service, if any */
- grpc_call_cancel(lb_client_data_get_call(glb_policy->lb_client), NULL);
- }
pending_pick *pp = glb_policy->pending_picks;
glb_policy->pending_picks = NULL;
while (pp != NULL) {
@@ -810,18 +970,20 @@ static void glb_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
GRPC_ERROR_UNREF(error);
}
-static void query_for_backends(grpc_exec_ctx *exec_ctx,
- glb_lb_policy *glb_policy);
-static void start_picking(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy) {
+static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
+ glb_lb_policy *glb_policy);
+static void start_picking_locked(grpc_exec_ctx *exec_ctx,
+ glb_lb_policy *glb_policy) {
glb_policy->started_picking = true;
- query_for_backends(exec_ctx, glb_policy);
+ gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
+ query_for_backends_locked(exec_ctx, glb_policy);
}
static void glb_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
gpr_mu_lock(&glb_policy->mu);
if (!glb_policy->started_picking) {
- start_picking(exec_ctx, glb_policy);
+ start_picking_locked(exec_ctx, glb_policy);
}
gpr_mu_unlock(&glb_policy->mu);
}
@@ -847,8 +1009,8 @@ static int glb_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if (glb_policy->rr_policy != NULL) {
if (grpc_lb_glb_trace) {
- gpr_log(GPR_INFO, "about to PICK from 0x%" PRIxPTR "",
- (intptr_t)glb_policy->rr_policy);
+ gpr_log(GPR_INFO, "grpclb %p about to PICK from RR %p",
+ (void *)glb_policy, (void *)glb_policy->rr_policy);
}
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick");
@@ -865,11 +1027,17 @@ static int glb_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pick_done = pick_from_internal_rr_locked(exec_ctx, glb_policy->rr_policy,
pick_args, target, wc_arg);
} else {
+ if (grpc_lb_glb_trace) {
+ gpr_log(GPR_DEBUG,
+ "No RR policy in grpclb instance %p. Adding to grpclb's pending "
+ "picks",
+ (void *)(glb_policy));
+ }
add_pending_pick(&glb_policy->pending_picks, pick_args, target,
on_complete);
if (!glb_policy->started_picking) {
- start_picking(exec_ctx, glb_policy);
+ start_picking_locked(exec_ctx, glb_policy);
}
pick_done = false;
}
@@ -898,7 +1066,7 @@ static void glb_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
} else {
add_pending_ping(&glb_policy->pending_pings, closure);
if (!glb_policy->started_picking) {
- start_picking(exec_ctx, glb_policy);
+ start_picking_locked(exec_ctx, glb_policy);
}
}
gpr_mu_unlock(&glb_policy->mu);
@@ -916,250 +1084,190 @@ static void glb_notify_on_state_change(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&glb_policy->mu);
}
-/*
- * lb_client_data
- *
- * Used internally for the client call to the LB */
-typedef struct lb_client_data {
- gpr_mu mu;
-
- /* called once initial metadata's been sent */
- grpc_closure md_sent;
-
- /* called once the LoadBalanceRequest has been sent to the LB server. See
- * src/proto/grpc/.../load_balancer.proto */
- grpc_closure req_sent;
-
- /* A response from the LB server has been received (or error). Process it */
- grpc_closure res_rcvd;
-
- /* After the client has sent a close to the LB server */
- grpc_closure close_sent;
-
- /* ... and the status from the LB server has been received */
- grpc_closure srv_status_rcvd;
-
- grpc_call *lb_call; /* streaming call to the LB server, */
- gpr_timespec deadline; /* for the streaming call to the LB server */
-
- grpc_metadata_array initial_metadata_recv; /* initial MD from LB server */
- grpc_metadata_array trailing_metadata_recv; /* trailing MD from LB server */
-
- /* what's being sent to the LB server. Note that its value may vary if the LB
- * server indicates a redirect. */
- grpc_byte_buffer *request_payload;
-
- /* response from the LB server, if any. Processed in res_recv_cb() */
- grpc_byte_buffer *response_payload;
-
- /* the call's status and status detailset in srv_status_rcvd_cb() */
- grpc_status_code status;
- char *status_details;
- size_t status_details_capacity;
-
- /* pointer back to the enclosing policy */
- glb_lb_policy *glb_policy;
-} lb_client_data;
-
-static void md_sent_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
-static void req_sent_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
-static void res_recv_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
-static void close_sent_cb(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
-static void srv_status_rcvd_cb(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
-
-static lb_client_data *lb_client_data_create(glb_lb_policy *glb_policy) {
+static void lb_on_server_status_received(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error);
+static void lb_on_response_received(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error);
+static void lb_call_init_locked(glb_lb_policy *glb_policy) {
GPR_ASSERT(glb_policy->server_name != NULL);
GPR_ASSERT(glb_policy->server_name[0] != '\0');
-
- lb_client_data *lb_client = gpr_malloc(sizeof(lb_client_data));
- memset(lb_client, 0, sizeof(lb_client_data));
-
- gpr_mu_init(&lb_client->mu);
- grpc_closure_init(&lb_client->md_sent, md_sent_cb, lb_client);
-
- grpc_closure_init(&lb_client->req_sent, req_sent_cb, lb_client);
- grpc_closure_init(&lb_client->res_rcvd, res_recv_cb, lb_client);
- grpc_closure_init(&lb_client->close_sent, close_sent_cb, lb_client);
- grpc_closure_init(&lb_client->srv_status_rcvd, srv_status_rcvd_cb, lb_client);
-
- lb_client->deadline = glb_policy->deadline;
+ GPR_ASSERT(!glb_policy->shutting_down);
/* Note the following LB call progresses every time there's activity in \a
* glb_policy->base.interested_parties, which is comprised of the polling
* entities from \a client_channel. */
- lb_client->lb_call = grpc_channel_create_pollset_set_call(
+ glb_policy->lb_call = grpc_channel_create_pollset_set_call(
glb_policy->lb_channel, NULL, GRPC_PROPAGATE_DEFAULTS,
glb_policy->base.interested_parties,
"/grpc.lb.v1.LoadBalancer/BalanceLoad", glb_policy->server_name,
- lb_client->deadline, NULL);
+ glb_policy->deadline, NULL);
- grpc_metadata_array_init(&lb_client->initial_metadata_recv);
- grpc_metadata_array_init(&lb_client->trailing_metadata_recv);
+ grpc_metadata_array_init(&glb_policy->lb_initial_metadata_recv);
+ grpc_metadata_array_init(&glb_policy->lb_trailing_metadata_recv);
grpc_grpclb_request *request =
grpc_grpclb_request_create(glb_policy->server_name);
- gpr_slice request_payload_slice = grpc_grpclb_request_encode(request);
- lb_client->request_payload =
+ grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
+ glb_policy->lb_request_payload =
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
- gpr_slice_unref(request_payload_slice);
+ grpc_slice_unref(request_payload_slice);
grpc_grpclb_request_destroy(request);
- lb_client->status_details = NULL;
- lb_client->status_details_capacity = 0;
- lb_client->glb_policy = glb_policy;
- return lb_client;
+ glb_policy->lb_call_status_details = NULL;
+ glb_policy->lb_call_status_details_capacity = 0;
+
+ grpc_closure_init(&glb_policy->lb_on_server_status_received,
+ lb_on_server_status_received, glb_policy);
+ grpc_closure_init(&glb_policy->lb_on_response_received,
+ lb_on_response_received, glb_policy);
+
+ gpr_backoff_init(&glb_policy->lb_call_backoff_state,
+ GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS,
+ GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER,
+ GRPC_GRPCLB_RECONNECT_JITTER,
+ GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
+ GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
}
-static void lb_client_data_destroy(lb_client_data *lb_client) {
- grpc_call_destroy(lb_client->lb_call);
- grpc_metadata_array_destroy(&lb_client->initial_metadata_recv);
- grpc_metadata_array_destroy(&lb_client->trailing_metadata_recv);
+static void lb_call_destroy_locked(glb_lb_policy *glb_policy) {
+ GPR_ASSERT(glb_policy->lb_call != NULL);
+ grpc_call_destroy(glb_policy->lb_call);
+ glb_policy->lb_call = NULL;
- grpc_byte_buffer_destroy(lb_client->request_payload);
+ grpc_metadata_array_destroy(&glb_policy->lb_initial_metadata_recv);
+ grpc_metadata_array_destroy(&glb_policy->lb_trailing_metadata_recv);
- gpr_free(lb_client->status_details);
- gpr_mu_destroy(&lb_client->mu);
- gpr_free(lb_client);
-}
-static grpc_call *lb_client_data_get_call(lb_client_data *lb_client) {
- return lb_client->lb_call;
+ grpc_byte_buffer_destroy(glb_policy->lb_request_payload);
+ gpr_free(glb_policy->lb_call_status_details);
}
/*
* Auxiliary functions and LB client callbacks.
*/
-static void query_for_backends(grpc_exec_ctx *exec_ctx,
- glb_lb_policy *glb_policy) {
+static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
+ glb_lb_policy *glb_policy) {
GPR_ASSERT(glb_policy->lb_channel != NULL);
+ if (glb_policy->shutting_down) return;
+
+ lb_call_init_locked(glb_policy);
+
+ if (grpc_lb_glb_trace) {
+ gpr_log(GPR_INFO, "Query for backends (grpclb: %p, lb_call: %p)",
+ (void *)glb_policy, (void *)glb_policy->lb_call);
+ }
+ GPR_ASSERT(glb_policy->lb_call != NULL);
- glb_policy->lb_client = lb_client_data_create(glb_policy);
grpc_call_error call_error;
- grpc_op ops[1];
+ grpc_op ops[4];
memset(ops, 0, sizeof(ops));
+
grpc_op *op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op->reserved = NULL;
op++;
- call_error = grpc_call_start_batch_and_execute(
- exec_ctx, glb_policy->lb_client->lb_call, ops, (size_t)(op - ops),
- &glb_policy->lb_client->md_sent);
- GPR_ASSERT(GRPC_CALL_OK == call_error);
- op = ops;
- op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
- op->data.recv_status_on_client.trailing_metadata =
- &glb_policy->lb_client->trailing_metadata_recv;
- op->data.recv_status_on_client.status = &glb_policy->lb_client->status;
- op->data.recv_status_on_client.status_details =
- &glb_policy->lb_client->status_details;
- op->data.recv_status_on_client.status_details_capacity =
- &glb_policy->lb_client->status_details_capacity;
+ op->op = GRPC_OP_RECV_INITIAL_METADATA;
+ op->data.recv_initial_metadata = &glb_policy->lb_initial_metadata_recv;
op->flags = 0;
op->reserved = NULL;
op++;
- call_error = grpc_call_start_batch_and_execute(
- exec_ctx, glb_policy->lb_client->lb_call, ops, (size_t)(op - ops),
- &glb_policy->lb_client->srv_status_rcvd);
- GPR_ASSERT(GRPC_CALL_OK == call_error);
-}
-
-static void md_sent_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- lb_client_data *lb_client = arg;
- GPR_ASSERT(lb_client->lb_call);
- grpc_op ops[1];
- memset(ops, 0, sizeof(ops));
- grpc_op *op = ops;
+ GPR_ASSERT(glb_policy->lb_request_payload != NULL);
op->op = GRPC_OP_SEND_MESSAGE;
- op->data.send_message = lb_client->request_payload;
+ op->data.send_message = glb_policy->lb_request_payload;
op->flags = 0;
op->reserved = NULL;
op++;
- grpc_call_error call_error = grpc_call_start_batch_and_execute(
- exec_ctx, lb_client->lb_call, ops, (size_t)(op - ops),
- &lb_client->req_sent);
- GPR_ASSERT(GRPC_CALL_OK == call_error);
-}
-
-static void req_sent_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- lb_client_data *lb_client = arg;
- GPR_ASSERT(lb_client->lb_call);
- grpc_op ops[2];
- memset(ops, 0, sizeof(ops));
- grpc_op *op = ops;
-
- op->op = GRPC_OP_RECV_INITIAL_METADATA;
- op->data.recv_initial_metadata = &lb_client->initial_metadata_recv;
+ op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
+ op->data.recv_status_on_client.trailing_metadata =
+ &glb_policy->lb_trailing_metadata_recv;
+ op->data.recv_status_on_client.status = &glb_policy->lb_call_status;
+ op->data.recv_status_on_client.status_details =
+ &glb_policy->lb_call_status_details;
+ op->data.recv_status_on_client.status_details_capacity =
+ &glb_policy->lb_call_status_details_capacity;
op->flags = 0;
op->reserved = NULL;
op++;
+ /* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref
+ * count goes to zero) to be unref'd in lb_on_server_status_received */
+ GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "lb_on_server_status_received");
+ call_error = grpc_call_start_batch_and_execute(
+ exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
+ &glb_policy->lb_on_server_status_received);
+ GPR_ASSERT(GRPC_CALL_OK == call_error);
+ op = ops;
op->op = GRPC_OP_RECV_MESSAGE;
- op->data.recv_message = &lb_client->response_payload;
+ op->data.recv_message = &glb_policy->lb_response_payload;
op->flags = 0;
op->reserved = NULL;
op++;
- grpc_call_error call_error = grpc_call_start_batch_and_execute(
- exec_ctx, lb_client->lb_call, ops, (size_t)(op - ops),
- &lb_client->res_rcvd);
+ /* take another weak ref to be unref'd in lb_on_response_received */
+ GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "lb_on_response_received");
+ call_error = grpc_call_start_batch_and_execute(
+ exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
+ &glb_policy->lb_on_response_received);
GPR_ASSERT(GRPC_CALL_OK == call_error);
}
-static void res_recv_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- lb_client_data *lb_client = arg;
+static void lb_on_response_received(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ glb_lb_policy *glb_policy = arg;
+
grpc_op ops[2];
memset(ops, 0, sizeof(ops));
grpc_op *op = ops;
- if (lb_client->response_payload != NULL) {
+ gpr_mu_lock(&glb_policy->mu);
+ if (glb_policy->lb_response_payload != NULL) {
+ gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
/* Received data from the LB server. Look inside
- * lb_client->response_payload, for a serverlist. */
+ * glb_policy->lb_response_payload, for a serverlist. */
grpc_byte_buffer_reader bbr;
- grpc_byte_buffer_reader_init(&bbr, lb_client->response_payload);
- gpr_slice response_slice = grpc_byte_buffer_reader_readall(&bbr);
- grpc_byte_buffer_destroy(lb_client->response_payload);
+ grpc_byte_buffer_reader_init(&bbr, glb_policy->lb_response_payload);
+ grpc_slice response_slice = grpc_byte_buffer_reader_readall(&bbr);
+ grpc_byte_buffer_destroy(glb_policy->lb_response_payload);
grpc_grpclb_serverlist *serverlist =
grpc_grpclb_response_parse_serverlist(response_slice);
if (serverlist != NULL) {
- gpr_slice_unref(response_slice);
+ GPR_ASSERT(glb_policy->lb_call != NULL);
+ grpc_slice_unref(response_slice);
if (grpc_lb_glb_trace) {
gpr_log(GPR_INFO, "Serverlist with %lu servers received",
(unsigned long)serverlist->num_servers);
+ for (size_t i = 0; i < serverlist->num_servers; ++i) {
+ grpc_resolved_address addr;
+ parse_server(serverlist->servers[i], &addr);
+ char *ipport;
+ grpc_sockaddr_to_string(&ipport, &addr, false);
+ gpr_log(GPR_INFO, "Serverlist[%lu]: %s", (unsigned long)i, ipport);
+ gpr_free(ipport);
+ }
}
/* update serverlist */
if (serverlist->num_servers > 0) {
- gpr_mu_lock(&lb_client->glb_policy->mu);
- if (grpc_grpclb_serverlist_equals(lb_client->glb_policy->serverlist,
- serverlist)) {
+ if (grpc_grpclb_serverlist_equals(glb_policy->serverlist, serverlist)) {
if (grpc_lb_glb_trace) {
gpr_log(GPR_INFO,
"Incoming server list identical to current, ignoring.");
}
+ grpc_grpclb_destroy_serverlist(serverlist);
} else { /* new serverlist */
- if (lb_client->glb_policy->serverlist != NULL) {
+ if (glb_policy->serverlist != NULL) {
/* dispose of the old serverlist */
- grpc_grpclb_destroy_serverlist(lb_client->glb_policy->serverlist);
+ grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
}
- /* and update the copy in the glb_lb_policy instance */
- lb_client->glb_policy->serverlist = serverlist;
- }
- if (lb_client->glb_policy->rr_policy == NULL) {
- /* initial "handover", in this case from a null RR policy, meaning
- * it'll just create the first RR policy instance */
- rr_handover_locked(exec_ctx, lb_client->glb_policy, error);
- } else {
- /* unref the RR policy, eventually leading to its substitution with a
- * new one constructed from the received serverlist (see
- * glb_rr_connectivity_changed) */
- GRPC_LB_POLICY_UNREF(exec_ctx, lb_client->glb_policy->rr_policy,
- "serverlist_received");
+ /* and update the copy in the glb_lb_policy instance. This serverlist
+ * instance will be destroyed either upon the next update or in
+ * glb_destroy() */
+ glb_policy->serverlist = serverlist;
+
+ rr_handover_locked(exec_ctx, glb_policy);
}
- gpr_mu_unlock(&lb_client->glb_policy->mu);
} else {
if (grpc_lb_glb_trace) {
gpr_log(GPR_INFO,
@@ -1167,60 +1275,95 @@ static void res_recv_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
"response with > 0 servers is received");
}
}
+ } else { /* serverlist == NULL */
+ gpr_log(GPR_ERROR, "Invalid LB response received: '%s'. Ignoring.",
+ grpc_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX));
+ grpc_slice_unref(response_slice);
+ }
+ if (!glb_policy->shutting_down) {
/* keep listening for serverlist updates */
op->op = GRPC_OP_RECV_MESSAGE;
- op->data.recv_message = &lb_client->response_payload;
+ op->data.recv_message = &glb_policy->lb_response_payload;
op->flags = 0;
op->reserved = NULL;
op++;
+ /* reuse the "lb_on_response_received" weak ref taken in
+ * query_for_backends_locked() */
const grpc_call_error call_error = grpc_call_start_batch_and_execute(
- exec_ctx, lb_client->lb_call, ops, (size_t)(op - ops),
- &lb_client->res_rcvd); /* loop */
+ exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
+ &glb_policy->lb_on_response_received); /* loop */
GPR_ASSERT(GRPC_CALL_OK == call_error);
- return;
}
-
- GPR_ASSERT(serverlist == NULL);
- gpr_log(GPR_ERROR, "Invalid LB response received: '%s'",
- gpr_dump_slice(response_slice, GPR_DUMP_ASCII));
- gpr_slice_unref(response_slice);
-
- /* Disconnect from server returning invalid response. */
- op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
- op->flags = 0;
- op->reserved = NULL;
- op++;
- grpc_call_error call_error = grpc_call_start_batch_and_execute(
- exec_ctx, lb_client->lb_call, ops, (size_t)(op - ops),
- &lb_client->close_sent);
- GPR_ASSERT(GRPC_CALL_OK == call_error);
+ gpr_mu_unlock(&glb_policy->mu);
+ } else { /* empty payload: call cancelled. */
+ /* dispose of the "lb_on_response_received" weak ref taken in
+ * query_for_backends_locked() and reused in every reception loop */
+ gpr_mu_unlock(&glb_policy->mu);
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
+ "lb_on_response_received_empty_payload");
}
- /* empty payload: call cancelled by server. Cleanups happening in
- * srv_status_rcvd_cb */
}
-static void close_sent_cb(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- if (grpc_lb_glb_trace) {
- gpr_log(GPR_INFO,
- "Close from LB client sent. Waiting from server status now");
+static void lb_call_on_retry_timer(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ glb_lb_policy *glb_policy = arg;
+ gpr_mu_lock(&glb_policy->mu);
+
+ if (!glb_policy->shutting_down) {
+ if (grpc_lb_glb_trace) {
+ gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)",
+ (void *)glb_policy);
+ }
+ GPR_ASSERT(glb_policy->lb_call == NULL);
+ query_for_backends_locked(exec_ctx, glb_policy);
}
+ gpr_mu_unlock(&glb_policy->mu);
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
+ "grpclb_on_retry_timer");
}
-static void srv_status_rcvd_cb(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- lb_client_data *lb_client = arg;
+static void lb_on_server_status_received(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ glb_lb_policy *glb_policy = arg;
+ gpr_mu_lock(&glb_policy->mu);
+
+ GPR_ASSERT(glb_policy->lb_call != NULL);
+
if (grpc_lb_glb_trace) {
- gpr_log(GPR_INFO,
- "status from lb server received. Status = %d, Details = '%s', "
- "Capacity "
- "= %lu",
- lb_client->status, lb_client->status_details,
- (unsigned long)lb_client->status_details_capacity);
+ gpr_log(GPR_DEBUG,
+ "Status from LB server received. Status = %d, Details = '%s', "
+ "(call: %p)",
+ glb_policy->lb_call_status, glb_policy->lb_call_status_details,
+ (void *)glb_policy->lb_call);
}
- /* TODO(dgq): deal with stream termination properly (fire up another one?
- * fail the original call?) */
+
+ /* We need to performe cleanups no matter what. */
+ lb_call_destroy_locked(glb_policy);
+
+ if (!glb_policy->shutting_down) {
+ /* if we aren't shutting down, restart the LB client call after some time */
+ gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
+ gpr_timespec next_try =
+ gpr_backoff_step(&glb_policy->lb_call_backoff_state, now);
+ if (grpc_lb_glb_trace) {
+ gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...",
+ (void *)glb_policy);
+ gpr_timespec timeout = gpr_time_sub(next_try, now);
+ if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) {
+ gpr_log(GPR_DEBUG, "... retrying in %" PRId64 ".%09d seconds.",
+ timeout.tv_sec, timeout.tv_nsec);
+ } else {
+ gpr_log(GPR_DEBUG, "... retrying immediately.");
+ }
+ }
+ GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
+ grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
+ lb_call_on_retry_timer, glb_policy, now);
+ }
+ gpr_mu_unlock(&glb_policy->mu);
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
+ "lb_on_server_status_received");
}
/* Code wiring the policy with the rest of the core */
diff --git a/src/core/ext/lb_policy/grpclb/load_balancer_api.c b/src/core/ext/lb_policy/grpclb/load_balancer_api.c
index a8881004a0..837e9c1113 100644
--- a/src/core/ext/lb_policy/grpclb/load_balancer_api.c
+++ b/src/core/ext/lb_policy/grpclb/load_balancer_api.c
@@ -90,18 +90,18 @@ grpc_grpclb_request *grpc_grpclb_request_create(const char *lb_service_name) {
return req;
}
-gpr_slice grpc_grpclb_request_encode(const grpc_grpclb_request *request) {
+grpc_slice grpc_grpclb_request_encode(const grpc_grpclb_request *request) {
size_t encoded_length;
pb_ostream_t sizestream;
pb_ostream_t outputstream;
- gpr_slice slice;
+ grpc_slice slice;
memset(&sizestream, 0, sizeof(pb_ostream_t));
pb_encode(&sizestream, grpc_lb_v1_LoadBalanceRequest_fields, request);
encoded_length = sizestream.bytes_written;
- slice = gpr_slice_malloc(encoded_length);
+ slice = grpc_slice_malloc(encoded_length);
outputstream =
- pb_ostream_from_buffer(GPR_SLICE_START_PTR(slice), encoded_length);
+ pb_ostream_from_buffer(GRPC_SLICE_START_PTR(slice), encoded_length);
GPR_ASSERT(pb_encode(&outputstream, grpc_lb_v1_LoadBalanceRequest_fields,
request) != 0);
return slice;
@@ -113,10 +113,10 @@ void grpc_grpclb_request_destroy(grpc_grpclb_request *request) {
typedef grpc_lb_v1_LoadBalanceResponse grpc_grpclb_response;
grpc_grpclb_initial_response *grpc_grpclb_initial_response_parse(
- gpr_slice encoded_grpc_grpclb_response) {
+ grpc_slice encoded_grpc_grpclb_response) {
pb_istream_t stream =
- pb_istream_from_buffer(GPR_SLICE_START_PTR(encoded_grpc_grpclb_response),
- GPR_SLICE_LENGTH(encoded_grpc_grpclb_response));
+ pb_istream_from_buffer(GRPC_SLICE_START_PTR(encoded_grpc_grpclb_response),
+ GRPC_SLICE_LENGTH(encoded_grpc_grpclb_response));
grpc_grpclb_response res;
memset(&res, 0, sizeof(grpc_grpclb_response));
if (!pb_decode(&stream, grpc_lb_v1_LoadBalanceResponse_fields, &res)) {
@@ -132,12 +132,12 @@ grpc_grpclb_initial_response *grpc_grpclb_initial_response_parse(
}
grpc_grpclb_serverlist *grpc_grpclb_response_parse_serverlist(
- gpr_slice encoded_grpc_grpclb_response) {
+ grpc_slice encoded_grpc_grpclb_response) {
bool status;
decode_serverlist_arg arg;
pb_istream_t stream =
- pb_istream_from_buffer(GPR_SLICE_START_PTR(encoded_grpc_grpclb_response),
- GPR_SLICE_LENGTH(encoded_grpc_grpclb_response));
+ pb_istream_from_buffer(GRPC_SLICE_START_PTR(encoded_grpc_grpclb_response),
+ GRPC_SLICE_LENGTH(encoded_grpc_grpclb_response));
pb_istream_t stream_at_start = stream;
grpc_grpclb_response res;
memset(&res, 0, sizeof(grpc_grpclb_response));
diff --git a/src/core/ext/lb_policy/grpclb/load_balancer_api.h b/src/core/ext/lb_policy/grpclb/load_balancer_api.h
index 079a64a3f3..b4c967e426 100644
--- a/src/core/ext/lb_policy/grpclb/load_balancer_api.h
+++ b/src/core/ext/lb_policy/grpclb/load_balancer_api.h
@@ -34,7 +34,7 @@
#ifndef GRPC_CORE_EXT_LB_POLICY_GRPCLB_LOAD_BALANCER_API_H
#define GRPC_CORE_EXT_LB_POLICY_GRPCLB_LOAD_BALANCER_API_H
-#include <grpc/support/slice_buffer.h>
+#include <grpc/slice_buffer.h>
#include "src/core/ext/client_channel/lb_policy_factory.h"
#include "src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h"
@@ -60,7 +60,7 @@ typedef struct grpc_grpclb_serverlist {
grpc_grpclb_request *grpc_grpclb_request_create(const char *lb_service_name);
/** Protocol Buffers v3-encode \a request */
-gpr_slice grpc_grpclb_request_encode(const grpc_grpclb_request *request);
+grpc_slice grpc_grpclb_request_encode(const grpc_grpclb_request *request);
/** Destroy \a request */
void grpc_grpclb_request_destroy(grpc_grpclb_request *request);
@@ -68,11 +68,11 @@ void grpc_grpclb_request_destroy(grpc_grpclb_request *request);
/** Parse (ie, decode) the bytes in \a encoded_grpc_grpclb_response as a \a
* grpc_grpclb_initial_response */
grpc_grpclb_initial_response *grpc_grpclb_initial_response_parse(
- gpr_slice encoded_grpc_grpclb_response);
+ grpc_slice encoded_grpc_grpclb_response);
/** Parse the list of servers from an encoded \a grpc_grpclb_response */
grpc_grpclb_serverlist *grpc_grpclb_response_parse_serverlist(
- gpr_slice encoded_grpc_grpclb_response);
+ grpc_slice encoded_grpc_grpclb_response);
/** Return a copy of \a sl. The caller is responsible for calling \a
* grpc_grpclb_destroy_serverlist on the returned copy. */
diff --git a/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h b/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h
index 53fed22bae..e36d0966f8 100644
--- a/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h
+++ b/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h
@@ -77,7 +77,7 @@ typedef struct _grpc_lb_v1_Server {
bool has_port;
int32_t port;
bool has_load_balance_token;
- char load_balance_token[65];
+ char load_balance_token[50];
bool has_drop_request;
bool drop_request;
/* @@protoc_insertion_point(struct:grpc_lb_v1_Server) */
@@ -172,7 +172,7 @@ extern const pb_field_t grpc_lb_v1_Server_fields[5];
#define grpc_lb_v1_LoadBalanceResponse_size (98 + grpc_lb_v1_ServerList_size)
#define grpc_lb_v1_InitialLoadBalanceResponse_size 90
/* grpc_lb_v1_ServerList_size depends on runtime parameters */
-#define grpc_lb_v1_Server_size 98
+#define grpc_lb_v1_Server_size 83
/* Message IDs (where set with "msgid" option) */
#ifdef PB_MSGID
diff --git a/src/core/ext/lb_policy/pick_first/pick_first.c b/src/core/ext/lb_policy/pick_first/pick_first.c
index 5d3433df74..b9cfe6b5c0 100644
--- a/src/core/ext/lb_policy/pick_first/pick_first.c
+++ b/src/core/ext/lb_policy/pick_first/pick_first.c
@@ -209,7 +209,7 @@ static int pf_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
/* Check atomically for a selected channel */
grpc_connected_subchannel *selected = GET_SELECTED(p);
if (selected != NULL) {
- *target = selected;
+ *target = GRPC_CONNECTED_SUBCHANNEL_REF(selected, "picked");
return 1;
}
@@ -218,7 +218,7 @@ static int pf_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
selected = GET_SELECTED(p);
if (selected) {
gpr_mu_unlock(&p->mu);
- *target = selected;
+ *target = GRPC_CONNECTED_SUBCHANNEL_REF(selected, "picked");
return 1;
} else {
if (!p->started_picking) {
@@ -292,6 +292,8 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
} else {
loop:
switch (p->checking_connectivity) {
+ case GRPC_CHANNEL_INIT:
+ GPR_UNREACHABLE_CODE(return );
case GRPC_CHANNEL_READY:
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_READY, GRPC_ERROR_NONE,
@@ -310,7 +312,7 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
/* update any calls that were waiting for a pick */
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
- *pp->target = selected;
+ *pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(selected, "picked");
grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL);
gpr_free(pp);
}
@@ -436,15 +438,10 @@ static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_args *args) {
GPR_ASSERT(args->client_channel_factory != NULL);
- /* Get server name. */
- const grpc_arg *arg =
- grpc_channel_args_find(args->args, GRPC_ARG_SERVER_NAME);
- const char *server_name =
- arg != NULL && arg->type == GRPC_ARG_STRING ? arg->value.string : NULL;
-
/* Find the number of backend addresses. We ignore balancer
* addresses, since we don't know how to handle them. */
- arg = grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
+ const grpc_arg *arg =
+ grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
GPR_ASSERT(arg != NULL && arg->type == GRPC_ARG_POINTER);
grpc_lb_addresses *addresses = arg->value.pointer.p;
size_t num_addrs = 0;
@@ -470,9 +467,6 @@ static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx,
}
memset(&sc_args, 0, sizeof(grpc_subchannel_args));
- /* server_name will be copied as part of the subchannel creation. This makes
- * the copying of server_name (a borrowed pointer) OK. */
- sc_args.server_name = server_name;
sc_args.addr = &addresses->addresses[i].address;
sc_args.args = args->args;
diff --git a/src/core/ext/lb_policy/round_robin/round_robin.c b/src/core/ext/lb_policy/round_robin/round_robin.c
index c0743b00e8..f0305473d2 100644
--- a/src/core/ext/lb_policy/round_robin/round_robin.c
+++ b/src/core/ext/lb_policy/round_robin/round_robin.c
@@ -116,15 +116,23 @@ typedef struct {
grpc_closure connectivity_changed_closure;
/** this subchannels current position in subchannel->ready_list */
ready_list *ready_list_node;
- /** last observed connectivity */
- grpc_connectivity_state connectivity_state;
+ /** last observed connectivity. Not updated by
+ * \a grpc_subchannel_notify_on_state_change. Used to determine the previous
+ * state while processing the new state in \a rr_connectivity_changed */
+ grpc_connectivity_state prev_connectivity_state;
+ /** current connectivity state. Updated by \a
+ * grpc_subchannel_notify_on_state_change */
+ grpc_connectivity_state curr_connectivity_state;
/** the subchannel's target user data */
void *user_data;
+ /** vtable to operate over \a user_data */
+ const grpc_lb_user_data_vtable *user_data_vtable;
} subchannel_data;
struct round_robin_lb_policy {
/** base policy: must be first */
grpc_lb_policy base;
+ gpr_mu mu;
/** total number of addresses received at creation time */
size_t num_addresses;
@@ -133,8 +141,11 @@ struct round_robin_lb_policy {
size_t num_subchannels;
subchannel_data **subchannels;
- /** mutex protecting remaining members */
- gpr_mu mu;
+ /** how many subchannels are in TRANSIENT_FAILURE */
+ size_t num_transient_failures;
+ /** how many subchannels are IDLE */
+ size_t num_idle;
+
/** have we started picking? */
int started_picking;
/** are we shutting down? */
@@ -186,9 +197,13 @@ static void advance_last_picked_locked(round_robin_lb_policy *p) {
}
if (grpc_lb_round_robin_trace) {
- gpr_log(GPR_DEBUG, "[READYLIST] ADVANCED LAST PICK. NOW AT NODE %p (SC %p)",
- (void *)p->ready_list_last_pick,
- (void *)p->ready_list_last_pick->subchannel);
+ gpr_log(GPR_DEBUG,
+ "[READYLIST, RR: %p] ADVANCED LAST PICK. NOW AT NODE %p (SC %p, "
+ "CSC %p)",
+ (void *)p, (void *)p->ready_list_last_pick,
+ (void *)p->ready_list_last_pick->subchannel,
+ (void *)grpc_subchannel_get_connected_subchannel(
+ p->ready_list_last_pick->subchannel));
}
}
@@ -252,12 +267,25 @@ static void remove_disconnected_sc_locked(round_robin_lb_policy *p,
gpr_free(node);
}
+static bool is_ready_list_empty(round_robin_lb_policy *p) {
+ return p->ready_list.prev == NULL;
+}
+
static void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
ready_list *elem;
+
+ if (grpc_lb_round_robin_trace) {
+ gpr_log(GPR_DEBUG, "Destroying Round Robin policy at %p", (void *)pol);
+ }
+
for (size_t i = 0; i < p->num_subchannels; i++) {
subchannel_data *sd = p->subchannels[i];
- GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "round_robin");
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "rr_destroy");
+ if (sd->user_data != NULL) {
+ GPR_ASSERT(sd->user_data_vtable != NULL);
+ sd->user_data_vtable->destroy(sd->user_data);
+ }
gpr_free(sd);
}
@@ -285,6 +313,9 @@ static void rr_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
size_t i;
gpr_mu_lock(&p->mu);
+ if (grpc_lb_round_robin_trace) {
+ gpr_log(GPR_DEBUG, "Shutting down Round Robin policy at %p", (void *)pol);
+ }
p->shutdown = 1;
while ((pp = p->pending_picks)) {
@@ -296,7 +327,7 @@ static void rr_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
}
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
- GRPC_ERROR_CREATE("Channel Shutdown"), "shutdown");
+ GRPC_ERROR_CREATE("Channel Shutdown"), "rr_shutdown");
for (i = 0; i < p->num_subchannels; i++) {
subchannel_data *sd = p->subchannels[i];
grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL, NULL,
@@ -363,18 +394,18 @@ static void start_picking(grpc_exec_ctx *exec_ctx, round_robin_lb_policy *p) {
size_t i;
p->started_picking = 1;
- if (grpc_lb_round_robin_trace) {
- gpr_log(GPR_DEBUG, "LB_POLICY: p=%p num_subchannels=%" PRIuPTR, (void *)p,
- p->num_subchannels);
- }
-
for (i = 0; i < p->num_subchannels; i++) {
subchannel_data *sd = p->subchannels[i];
- sd->connectivity_state = GRPC_CHANNEL_IDLE;
+ /* use some sentinel value outside of the range of grpc_connectivity_state
+ * to signal an undefined previous state. We won't be referring to this
+ * value again and it'll be overwritten after the first call to
+ * rr_connectivity_changed */
+ sd->prev_connectivity_state = GRPC_CHANNEL_INIT;
+ sd->curr_connectivity_state = GRPC_CHANNEL_IDLE;
+ GRPC_LB_POLICY_WEAK_REF(&p->base, "rr_connectivity");
grpc_subchannel_notify_on_state_change(
exec_ctx, sd->subchannel, p->base.interested_parties,
- &sd->connectivity_state, &sd->connectivity_changed_closure);
- GRPC_LB_POLICY_WEAK_REF(&p->base, "round_robin_connectivity");
+ &sd->curr_connectivity_state, &sd->connectivity_changed_closure);
}
}
@@ -395,9 +426,16 @@ static int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *pp;
ready_list *selected;
gpr_mu_lock(&p->mu);
+
+ if (grpc_lb_round_robin_trace) {
+ gpr_log(GPR_INFO, "Round Robin %p trying to pick", (void *)pol);
+ }
+
if ((selected = peek_next_connected_locked(p))) {
/* readily available, report right away */
- *target = grpc_subchannel_get_connected_subchannel(selected->subchannel);
+ *target = GRPC_CONNECTED_SUBCHANNEL_REF(
+ grpc_subchannel_get_connected_subchannel(selected->subchannel),
+ "rr_picked");
if (user_data != NULL) {
*user_data = selected->user_data;
@@ -428,123 +466,184 @@ static int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
}
+static void update_state_counters(subchannel_data *sd) {
+ round_robin_lb_policy *p = sd->policy;
+
+ /* update p->num_transient_failures (resp. p->num_idle): if the previous
+ * state was TRANSIENT_FAILURE (resp. IDLE), decrement
+ * p->num_transient_failures (resp. p->num_idle). */
+ if (sd->prev_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
+ GPR_ASSERT(p->num_transient_failures > 0);
+ --p->num_transient_failures;
+ } else if (sd->prev_connectivity_state == GRPC_CHANNEL_IDLE) {
+ GPR_ASSERT(p->num_idle > 0);
+ --p->num_idle;
+ }
+}
+
+/* sd is the subchannel_data associted with the updated subchannel.
+ * shutdown_error will only be used upon policy transition to TRANSIENT_FAILURE
+ * or SHUTDOWN */
+static grpc_connectivity_state update_lb_connectivity_status(
+ grpc_exec_ctx *exec_ctx, subchannel_data *sd, grpc_error *error) {
+ /* In priority order. The first rule to match terminates the search (ie, if we
+ * are on rule n, all previous rules were unfulfilled).
+ *
+ * 1) RULE: ANY subchannel is READY => policy is READY.
+ * CHECK: At least one subchannel is ready iff p->ready_list is NOT empty.
+ *
+ * 2) RULE: ANY subchannel is CONNECTING => policy is CONNECTING.
+ * CHECK: sd->curr_connectivity_state == CONNECTING.
+ *
+ * 3) RULE: ALL subchannels are SHUTDOWN => policy is SHUTDOWN.
+ * CHECK: p->num_subchannels = 0.
+ *
+ * 4) RULE: ALL subchannels are TRANSIENT_FAILURE => policy is
+ * TRANSIENT_FAILURE.
+ * CHECK: p->num_transient_failures == p->num_subchannels.
+ *
+ * 5) RULE: ALL subchannels are IDLE => policy is IDLE.
+ * CHECK: p->num_idle == p->num_subchannels.
+ */
+ round_robin_lb_policy *p = sd->policy;
+ if (!is_ready_list_empty(p)) { /* 1) READY */
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker, GRPC_CHANNEL_READY,
+ GRPC_ERROR_NONE, "rr_ready");
+ return GRPC_CHANNEL_READY;
+ } else if (sd->curr_connectivity_state ==
+ GRPC_CHANNEL_CONNECTING) { /* 2) CONNECTING */
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_CONNECTING, GRPC_ERROR_NONE,
+ "rr_connecting");
+ return GRPC_CHANNEL_CONNECTING;
+ } else if (p->num_subchannels == 0) { /* 3) SHUTDOWN */
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error),
+ "rr_shutdown");
+ return GRPC_CHANNEL_SHUTDOWN;
+ } else if (p->num_transient_failures ==
+ p->num_subchannels) { /* 4) TRANSIENT_FAILURE */
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_TRANSIENT_FAILURE,
+ GRPC_ERROR_REF(error), "rr_transient_failure");
+ return GRPC_CHANNEL_TRANSIENT_FAILURE;
+ } else if (p->num_idle == p->num_subchannels) { /* 5) IDLE */
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker, GRPC_CHANNEL_IDLE,
+ GRPC_ERROR_NONE, "rr_idle");
+ return GRPC_CHANNEL_IDLE;
+ }
+ /* no change */
+ return sd->curr_connectivity_state;
+}
+
static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
subchannel_data *sd = arg;
round_robin_lb_policy *p = sd->policy;
pending_pick *pp;
- ready_list *selected;
-
- int unref = 0;
GRPC_ERROR_REF(error);
gpr_mu_lock(&p->mu);
if (p->shutdown) {
- unref = 1;
- } else {
- switch (sd->connectivity_state) {
- case GRPC_CHANNEL_READY:
- grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
- GRPC_CHANNEL_READY, GRPC_ERROR_REF(error),
- "connecting_ready");
- /* add the newly connected subchannel to the list of connected ones.
- * Note that it goes to the "end of the line". */
- sd->ready_list_node = add_connected_sc_locked(p, sd);
- /* at this point we know there's at least one suitable subchannel. Go
- * ahead and pick one and notify the pending suitors in
- * p->pending_picks. This preemtively replicates rr_pick()'s actions. */
- selected = peek_next_connected_locked(p);
- if (p->pending_picks != NULL) {
- /* if the selected subchannel is going to be used for the pending
- * picks, update the last picked pointer */
- advance_last_picked_locked(p);
+ gpr_mu_unlock(&p->mu);
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "rr_connectivity");
+ GRPC_ERROR_UNREF(error);
+ return;
+ }
+ switch (sd->curr_connectivity_state) {
+ case GRPC_CHANNEL_INIT:
+ GPR_UNREACHABLE_CODE(return );
+ case GRPC_CHANNEL_READY:
+ /* add the newly connected subchannel to the list of connected ones.
+ * Note that it goes to the "end of the line". */
+ sd->ready_list_node = add_connected_sc_locked(p, sd);
+ /* at this point we know there's at least one suitable subchannel. Go
+ * ahead and pick one and notify the pending suitors in
+ * p->pending_picks. This preemtively replicates rr_pick()'s actions. */
+ ready_list *selected = peek_next_connected_locked(p);
+ GPR_ASSERT(selected != NULL);
+ if (p->pending_picks != NULL) {
+ /* if the selected subchannel is going to be used for the pending
+ * picks, update the last picked pointer */
+ advance_last_picked_locked(p);
+ }
+ while ((pp = p->pending_picks)) {
+ p->pending_picks = pp->next;
+ *pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(
+ grpc_subchannel_get_connected_subchannel(selected->subchannel),
+ "rr_picked");
+ if (pp->user_data != NULL) {
+ *pp->user_data = selected->user_data;
+ }
+ if (grpc_lb_round_robin_trace) {
+ gpr_log(GPR_DEBUG,
+ "[RR CONN CHANGED] TARGET <-- SUBCHANNEL %p (NODE %p)",
+ (void *)selected->subchannel, (void *)selected);
}
+ grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL);
+ gpr_free(pp);
+ }
+ update_lb_connectivity_status(exec_ctx, sd, error);
+ sd->prev_connectivity_state = sd->curr_connectivity_state;
+ /* renew notification: reuses the "rr_connectivity" weak ref */
+ grpc_subchannel_notify_on_state_change(
+ exec_ctx, sd->subchannel, p->base.interested_parties,
+ &sd->curr_connectivity_state, &sd->connectivity_changed_closure);
+ break;
+ case GRPC_CHANNEL_IDLE:
+ ++p->num_idle;
+ /* fallthrough */
+ case GRPC_CHANNEL_CONNECTING:
+ update_state_counters(sd);
+ update_lb_connectivity_status(exec_ctx, sd, error);
+ sd->prev_connectivity_state = sd->curr_connectivity_state;
+ /* renew notification: reuses the "rr_connectivity" weak ref */
+ grpc_subchannel_notify_on_state_change(
+ exec_ctx, sd->subchannel, p->base.interested_parties,
+ &sd->curr_connectivity_state, &sd->connectivity_changed_closure);
+ break;
+ case GRPC_CHANNEL_TRANSIENT_FAILURE:
+ ++p->num_transient_failures;
+ /* remove from ready list if still present */
+ if (sd->ready_list_node != NULL) {
+ remove_disconnected_sc_locked(p, sd->ready_list_node);
+ sd->ready_list_node = NULL;
+ }
+ update_lb_connectivity_status(exec_ctx, sd, error);
+ sd->prev_connectivity_state = sd->curr_connectivity_state;
+ /* renew notification: reuses the "rr_connectivity" weak ref */
+ grpc_subchannel_notify_on_state_change(
+ exec_ctx, sd->subchannel, p->base.interested_parties,
+ &sd->curr_connectivity_state, &sd->connectivity_changed_closure);
+ break;
+ case GRPC_CHANNEL_SHUTDOWN:
+ update_state_counters(sd);
+ if (sd->ready_list_node != NULL) {
+ remove_disconnected_sc_locked(p, sd->ready_list_node);
+ sd->ready_list_node = NULL;
+ }
+ --p->num_subchannels;
+ GPR_SWAP(subchannel_data *, p->subchannels[sd->index],
+ p->subchannels[p->num_subchannels]);
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "rr_subchannel_shutdown");
+ p->subchannels[sd->index]->index = sd->index;
+ if (update_lb_connectivity_status(exec_ctx, sd, error) ==
+ GRPC_CHANNEL_SHUTDOWN) {
+ /* the policy is shutting down. Flush all the pending picks... */
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
-
- *pp->target =
- grpc_subchannel_get_connected_subchannel(selected->subchannel);
- if (pp->user_data != NULL) {
- *pp->user_data = selected->user_data;
- }
- if (grpc_lb_round_robin_trace) {
- gpr_log(GPR_DEBUG,
- "[RR CONN CHANGED] TARGET <-- SUBCHANNEL %p (NODE %p)",
- (void *)selected->subchannel, (void *)selected);
- }
+ *pp->target = NULL;
grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL);
gpr_free(pp);
}
- grpc_subchannel_notify_on_state_change(
- exec_ctx, sd->subchannel, p->base.interested_parties,
- &sd->connectivity_state, &sd->connectivity_changed_closure);
- break;
- case GRPC_CHANNEL_CONNECTING:
- case GRPC_CHANNEL_IDLE:
- grpc_connectivity_state_set(
- exec_ctx, &p->state_tracker, sd->connectivity_state,
- GRPC_ERROR_REF(error), "connecting_changed");
- grpc_subchannel_notify_on_state_change(
- exec_ctx, sd->subchannel, p->base.interested_parties,
- &sd->connectivity_state, &sd->connectivity_changed_closure);
- break;
- case GRPC_CHANNEL_TRANSIENT_FAILURE:
- /* renew state notification */
- grpc_subchannel_notify_on_state_change(
- exec_ctx, sd->subchannel, p->base.interested_parties,
- &sd->connectivity_state, &sd->connectivity_changed_closure);
-
- /* remove from ready list if still present */
- if (sd->ready_list_node != NULL) {
- remove_disconnected_sc_locked(p, sd->ready_list_node);
- sd->ready_list_node = NULL;
- }
- grpc_connectivity_state_set(
- exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
- GRPC_ERROR_REF(error), "connecting_transient_failure");
- break;
- case GRPC_CHANNEL_SHUTDOWN:
- if (sd->ready_list_node != NULL) {
- remove_disconnected_sc_locked(p, sd->ready_list_node);
- sd->ready_list_node = NULL;
- }
-
- p->num_subchannels--;
- GPR_SWAP(subchannel_data *, p->subchannels[sd->index],
- p->subchannels[p->num_subchannels]);
- GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "round_robin");
- p->subchannels[sd->index]->index = sd->index;
- gpr_free(sd);
-
- unref = 1;
- if (p->num_subchannels == 0) {
- grpc_connectivity_state_set(
- exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
- GRPC_ERROR_CREATE_REFERENCING("Round Robin Channels Exhausted",
- &error, 1),
- "no_more_channels");
- while ((pp = p->pending_picks)) {
- p->pending_picks = pp->next;
- *pp->target = NULL;
- grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE,
- NULL);
- gpr_free(pp);
- }
- } else {
- grpc_connectivity_state_set(
- exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
- GRPC_ERROR_REF(error), "subchannel_failed");
- }
- } /* switch */
- } /* !unref */
-
- gpr_mu_unlock(&p->mu);
-
- if (unref) {
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "round_robin_connectivity");
+ }
+ gpr_free(sd);
+ /* unref the "rr_connectivity" weak ref from start_picking */
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "rr_connectivity");
+ break;
}
-
+ gpr_mu_unlock(&p->mu);
GRPC_ERROR_UNREF(error);
}
@@ -578,8 +677,11 @@ static void rr_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
gpr_mu_lock(&p->mu);
if ((selected = peek_next_connected_locked(p))) {
gpr_mu_unlock(&p->mu);
- target = grpc_subchannel_get_connected_subchannel(selected->subchannel);
+ target = GRPC_CONNECTED_SUBCHANNEL_REF(
+ grpc_subchannel_get_connected_subchannel(selected->subchannel),
+ "rr_picked");
grpc_connected_subchannel_ping(exec_ctx, target, closure);
+ GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, target, "rr_picked");
} else {
gpr_mu_unlock(&p->mu);
grpc_exec_ctx_sched(exec_ctx, closure,
@@ -601,15 +703,10 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_args *args) {
GPR_ASSERT(args->client_channel_factory != NULL);
- /* Get server name. */
- const grpc_arg *arg =
- grpc_channel_args_find(args->args, GRPC_ARG_SERVER_NAME);
- const char *server_name =
- arg != NULL && arg->type == GRPC_ARG_STRING ? arg->value.string : NULL;
-
/* Find the number of backend addresses. We ignore balancer
* addresses, since we don't know how to handle them. */
- arg = grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
+ const grpc_arg *arg =
+ grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
GPR_ASSERT(arg != NULL && arg->type == GRPC_ARG_POINTER);
grpc_lb_addresses *addresses = arg->value.pointer.p;
size_t num_addrs = 0;
@@ -632,9 +729,6 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
if (addresses->addresses[i].is_balancer) continue;
memset(&sc_args, 0, sizeof(grpc_subchannel_args));
- /* server_name will be copied as part of the subchannel creation. This makes
- * the copying of server_name (a borrowed pointer) OK. */
- sc_args.server_name = server_name;
sc_args.addr = &addresses->addresses[i].address;
sc_args.args = args->args;
@@ -648,7 +742,11 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
sd->policy = p;
sd->index = subchannel_idx;
sd->subchannel = subchannel;
- sd->user_data = addresses->addresses[i].user_data;
+ sd->user_data_vtable = addresses->user_data_vtable;
+ if (sd->user_data_vtable != NULL) {
+ sd->user_data =
+ sd->user_data_vtable->copy(addresses->addresses[i].user_data);
+ }
++subchannel_idx;
grpc_closure_init(&sd->connectivity_changed_closure,
rr_connectivity_changed, sd);
@@ -671,6 +769,11 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_init(&p->base, &round_robin_lb_policy_vtable);
grpc_connectivity_state_init(&p->state_tracker, GRPC_CHANNEL_IDLE,
"round_robin");
+
+ if (grpc_lb_round_robin_trace) {
+ gpr_log(GPR_DEBUG, "Created RR policy at %p with %lu subchannels",
+ (void *)p, (unsigned long)p->num_subchannels);
+ }
gpr_mu_init(&p->mu);
return &p->base;
}
diff --git a/src/core/ext/load_reporting/load_reporting_filter.c b/src/core/ext/load_reporting/load_reporting_filter.c
index eeae2400fb..18bb826948 100644
--- a/src/core/ext/load_reporting/load_reporting_filter.c
+++ b/src/core/ext/load_reporting/load_reporting_filter.c
@@ -152,9 +152,9 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
}
/* Constructor for channel_data */
-static void init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_channel_element_args *args) {
+static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
GPR_ASSERT(!args->is_last);
channel_data *chand = elem->channel_data;
@@ -171,6 +171,8 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
NULL,
NULL};
*/
+
+ return GRPC_ERROR_NONE;
}
/* Destructor for channel data */
@@ -232,4 +234,5 @@ const grpc_channel_filter grpc_load_reporting_filter = {
init_channel_elem,
destroy_channel_elem,
grpc_call_next_get_peer,
+ grpc_channel_next_get_info,
"load_reporting"};
diff --git a/src/core/ext/resolver/dns/native/dns_resolver.c b/src/core/ext/resolver/dns/native/dns_resolver.c
index 958b8af8b2..2675fa931f 100644
--- a/src/core/ext/resolver/dns/native/dns_resolver.c
+++ b/src/core/ext/resolver/dns/native/dns_resolver.c
@@ -46,10 +46,11 @@
#include "src/core/lib/support/backoff.h"
#include "src/core/lib/support/string.h"
-#define BACKOFF_MULTIPLIER 1.6
-#define BACKOFF_JITTER 0.2
-#define BACKOFF_MIN_SECONDS 1
-#define BACKOFF_MAX_SECONDS 120
+#define GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS 1
+#define GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS 1
+#define GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER 1.6
+#define GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS 120
+#define GRPC_DNS_RECONNECT_JITTER 0.2
typedef struct {
/** base class: must be first */
@@ -60,6 +61,8 @@ typedef struct {
char *default_port;
/** channel args. */
grpc_channel_args *channel_args;
+ /** pollset_set to drive the name resolution process */
+ grpc_pollset_set *interested_parties;
/** mutex guarding the rest of the state */
gpr_mu mu;
@@ -190,7 +193,7 @@ static void dns_on_resolved(grpc_exec_ctx *exec_ctx, void *arg,
GPR_ASSERT(!r->have_retry_timer);
r->have_retry_timer = true;
GRPC_RESOLVER_REF(&r->base, "retry-timer");
- if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) <= 0) {
+ if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) {
gpr_log(GPR_DEBUG, "retrying in %" PRId64 ".%09d seconds", timeout.tv_sec,
timeout.tv_nsec);
} else {
@@ -217,6 +220,7 @@ static void dns_start_resolving_locked(grpc_exec_ctx *exec_ctx,
r->resolving = true;
r->addresses = NULL;
grpc_resolve_address(exec_ctx, r->name_to_resolve, r->default_port,
+ r->interested_parties,
grpc_closure_create(dns_on_resolved, r), &r->addresses);
}
@@ -239,13 +243,15 @@ static void dns_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
if (r->resolved_result != NULL) {
grpc_channel_args_destroy(r->resolved_result);
}
+ grpc_pollset_set_destroy(r->interested_parties);
gpr_free(r->name_to_resolve);
gpr_free(r->default_port);
grpc_channel_args_destroy(r->channel_args);
gpr_free(r);
}
-static grpc_resolver *dns_create(grpc_resolver_args *args,
+static grpc_resolver *dns_create(grpc_exec_ctx *exec_ctx,
+ grpc_resolver_args *args,
const char *default_port) {
if (0 != strcmp(args->uri->authority, "")) {
gpr_log(GPR_ERROR, "authority based dns uri's not supported");
@@ -263,14 +269,17 @@ static grpc_resolver *dns_create(grpc_resolver_args *args,
grpc_resolver_init(&r->base, &dns_resolver_vtable);
r->name_to_resolve = proxy_name == NULL ? gpr_strdup(path) : proxy_name;
r->default_port = gpr_strdup(default_port);
- grpc_arg server_name_arg;
- server_name_arg.type = GRPC_ARG_STRING;
- server_name_arg.key = GRPC_ARG_SERVER_NAME;
- server_name_arg.value.string = (char *)path;
- r->channel_args =
- grpc_channel_args_copy_and_add(args->args, &server_name_arg, 1);
- gpr_backoff_init(&r->backoff_state, BACKOFF_MULTIPLIER, BACKOFF_JITTER,
- BACKOFF_MIN_SECONDS * 1000, BACKOFF_MAX_SECONDS * 1000);
+ r->channel_args = grpc_channel_args_copy(args->args);
+ r->interested_parties = grpc_pollset_set_create();
+ if (args->pollset_set != NULL) {
+ grpc_pollset_set_add_pollset_set(exec_ctx, r->interested_parties,
+ args->pollset_set);
+ }
+ gpr_backoff_init(&r->backoff_state, GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS,
+ GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER,
+ GRPC_DNS_RECONNECT_JITTER,
+ GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
+ GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
return &r->base;
}
@@ -283,8 +292,9 @@ static void dns_factory_ref(grpc_resolver_factory *factory) {}
static void dns_factory_unref(grpc_resolver_factory *factory) {}
static grpc_resolver *dns_factory_create_resolver(
- grpc_resolver_factory *factory, grpc_resolver_args *args) {
- return dns_create(args, "https");
+ grpc_exec_ctx *exec_ctx, grpc_resolver_factory *factory,
+ grpc_resolver_args *args) {
+ return dns_create(exec_ctx, args, "https");
}
static char *dns_factory_get_default_host_name(grpc_resolver_factory *factory,
diff --git a/src/core/ext/resolver/sockaddr/sockaddr_resolver.c b/src/core/ext/resolver/sockaddr/sockaddr_resolver.c
index 5fec03a8e4..88808c674f 100644
--- a/src/core/ext/resolver/sockaddr/sockaddr_resolver.c
+++ b/src/core/ext/resolver/sockaddr/sockaddr_resolver.c
@@ -47,6 +47,7 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/unix_sockets_posix.h"
+#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/support/string.h"
typedef struct {
@@ -169,17 +170,17 @@ static grpc_resolver *sockaddr_create(grpc_resolver_args *args,
return NULL;
}
/* Construct addresses. */
- gpr_slice path_slice =
- gpr_slice_new(args->uri->path, strlen(args->uri->path), do_nothing);
- gpr_slice_buffer path_parts;
- gpr_slice_buffer_init(&path_parts);
- gpr_slice_split(path_slice, ",", &path_parts);
+ grpc_slice path_slice =
+ grpc_slice_new(args->uri->path, strlen(args->uri->path), do_nothing);
+ grpc_slice_buffer path_parts;
+ grpc_slice_buffer_init(&path_parts);
+ grpc_slice_split(path_slice, ",", &path_parts);
grpc_lb_addresses *addresses =
grpc_lb_addresses_create(path_parts.count, NULL /* user_data_vtable */);
bool errors_found = false;
for (size_t i = 0; i < addresses->num_addresses; i++) {
grpc_uri ith_uri = *args->uri;
- char *part_str = gpr_dump_slice(path_parts.slices[i], GPR_DUMP_ASCII);
+ char *part_str = grpc_dump_slice(path_parts.slices[i], GPR_DUMP_ASCII);
ith_uri.path = part_str;
if (!parse(&ith_uri, &addresses->addresses[i].address)) {
errors_found = true; /* GPR_TRUE */
@@ -187,8 +188,8 @@ static grpc_resolver *sockaddr_create(grpc_resolver_args *args,
gpr_free(part_str);
if (errors_found) break;
}
- gpr_slice_buffer_destroy(&path_parts);
- gpr_slice_unref(path_slice);
+ grpc_slice_buffer_destroy(&path_parts);
+ grpc_slice_unref(path_slice);
if (errors_found) {
grpc_lb_addresses_destroy(addresses);
return NULL;
@@ -197,12 +198,7 @@ static grpc_resolver *sockaddr_create(grpc_resolver_args *args,
sockaddr_resolver *r = gpr_malloc(sizeof(sockaddr_resolver));
memset(r, 0, sizeof(*r));
r->addresses = addresses;
- grpc_arg server_name_arg;
- server_name_arg.type = GRPC_ARG_STRING;
- server_name_arg.key = GRPC_ARG_SERVER_NAME;
- server_name_arg.value.string = args->uri->path;
- r->channel_args =
- grpc_channel_args_copy_and_add(args->args, &server_name_arg, 1);
+ r->channel_args = grpc_channel_args_copy(args->args);
gpr_mu_init(&r->mu);
grpc_resolver_init(&r->base, &sockaddr_resolver_vtable);
return &r->base;
@@ -218,7 +214,8 @@ static void sockaddr_factory_unref(grpc_resolver_factory *factory) {}
#define DECL_FACTORY(name) \
static grpc_resolver *name##_factory_create_resolver( \
- grpc_resolver_factory *factory, grpc_resolver_args *args) { \
+ grpc_exec_ctx *exec_ctx, grpc_resolver_factory *factory, \
+ grpc_resolver_args *args) { \
return sockaddr_create(args, parse_##name); \
} \
static const grpc_resolver_factory_vtable name##_factory_vtable = { \
diff --git a/src/core/ext/transport/chttp2/client/chttp2_connector.c b/src/core/ext/transport/chttp2/client/chttp2_connector.c
new file mode 100644
index 0000000000..dfa0808def
--- /dev/null
+++ b/src/core/ext/transport/chttp2/client/chttp2_connector.c
@@ -0,0 +1,263 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/ext/transport/chttp2/client/chttp2_connector.h"
+
+#include <grpc/grpc.h>
+
+#include <string.h>
+
+#include <grpc/slice_buffer.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/string_util.h>
+
+#include "src/core/ext/client_channel/connector.h"
+#include "src/core/ext/client_channel/http_connect_handshaker.h"
+#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
+#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/channel/handshaker.h"
+#include "src/core/lib/iomgr/tcp_client.h"
+
+typedef struct {
+ grpc_connector base;
+
+ gpr_mu mu;
+ gpr_refcount refs;
+
+ bool shutdown;
+ bool connecting;
+
+ grpc_chttp2_add_handshakers_func add_handshakers;
+ void *add_handshakers_user_data;
+
+ grpc_closure *notify;
+ grpc_connect_in_args args;
+ grpc_connect_out_args *result;
+ grpc_closure initial_string_sent;
+ grpc_slice_buffer initial_string_buffer;
+
+ grpc_endpoint *endpoint; // Non-NULL until handshaking starts.
+
+ grpc_closure connected;
+
+ grpc_handshake_manager *handshake_mgr;
+} chttp2_connector;
+
+static void chttp2_connector_ref(grpc_connector *con) {
+ chttp2_connector *c = (chttp2_connector *)con;
+ gpr_ref(&c->refs);
+}
+
+static void chttp2_connector_unref(grpc_exec_ctx *exec_ctx,
+ grpc_connector *con) {
+ chttp2_connector *c = (chttp2_connector *)con;
+ if (gpr_unref(&c->refs)) {
+ /* c->initial_string_buffer does not need to be destroyed */
+ gpr_mu_destroy(&c->mu);
+ // If handshaking is not yet in progress, destroy the endpoint.
+ // Otherwise, the handshaker will do this for us.
+ if (c->endpoint != NULL) grpc_endpoint_destroy(exec_ctx, c->endpoint);
+ gpr_free(c);
+ }
+}
+
+static void chttp2_connector_shutdown(grpc_exec_ctx *exec_ctx,
+ grpc_connector *con) {
+ chttp2_connector *c = (chttp2_connector *)con;
+ gpr_mu_lock(&c->mu);
+ c->shutdown = true;
+ if (c->handshake_mgr != NULL) {
+ grpc_handshake_manager_shutdown(exec_ctx, c->handshake_mgr);
+ }
+ // If handshaking is not yet in progress, shutdown the endpoint.
+ // Otherwise, the handshaker will do this for us.
+ if (!c->connecting && c->endpoint != NULL) {
+ grpc_endpoint_shutdown(exec_ctx, c->endpoint);
+ }
+ gpr_mu_unlock(&c->mu);
+}
+
+static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_handshaker_args *args = arg;
+ chttp2_connector *c = args->user_data;
+ gpr_mu_lock(&c->mu);
+ if (error != GRPC_ERROR_NONE || c->shutdown) {
+ if (error == GRPC_ERROR_NONE) {
+ error = GRPC_ERROR_CREATE("connector shutdown");
+ // We were shut down after handshaking completed successfully, so
+ // destroy the endpoint here.
+ // TODO(ctiller): It is currently necessary to shutdown endpoints
+ // before destroying them, even if we know that there are no
+ // pending read/write callbacks. This should be fixed, at which
+ // point this can be removed.
+ grpc_endpoint_shutdown(exec_ctx, args->endpoint);
+ grpc_endpoint_destroy(exec_ctx, args->endpoint);
+ grpc_channel_args_destroy(args->args);
+ grpc_slice_buffer_destroy(args->read_buffer);
+ gpr_free(args->read_buffer);
+ } else {
+ error = GRPC_ERROR_REF(error);
+ }
+ memset(c->result, 0, sizeof(*c->result));
+ } else {
+ c->result->transport =
+ grpc_create_chttp2_transport(exec_ctx, args->args, args->endpoint, 1);
+ GPR_ASSERT(c->result->transport);
+ grpc_chttp2_transport_start_reading(exec_ctx, c->result->transport,
+ args->read_buffer);
+ c->result->channel_args = args->args;
+ }
+ grpc_closure *notify = c->notify;
+ c->notify = NULL;
+ grpc_exec_ctx_sched(exec_ctx, notify, error, NULL);
+ grpc_handshake_manager_destroy(exec_ctx, c->handshake_mgr);
+ c->handshake_mgr = NULL;
+ gpr_mu_unlock(&c->mu);
+ chttp2_connector_unref(exec_ctx, (grpc_connector *)c);
+}
+
+static void start_handshake_locked(grpc_exec_ctx *exec_ctx,
+ chttp2_connector *c) {
+ c->handshake_mgr = grpc_handshake_manager_create();
+ char *proxy_name = grpc_get_http_proxy_server();
+ if (proxy_name != NULL) {
+ grpc_handshake_manager_add(c->handshake_mgr,
+ grpc_http_connect_handshaker_create(proxy_name));
+ gpr_free(proxy_name);
+ }
+ if (c->add_handshakers != NULL) {
+ c->add_handshakers(exec_ctx, c->add_handshakers_user_data,
+ c->handshake_mgr);
+ }
+ grpc_handshake_manager_do_handshake(
+ exec_ctx, c->handshake_mgr, c->endpoint, c->args.channel_args,
+ c->args.deadline, NULL /* acceptor */, on_handshake_done, c);
+ c->endpoint = NULL; // Endpoint handed off to handshake manager.
+}
+
+static void on_initial_connect_string_sent(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ chttp2_connector *c = arg;
+ gpr_mu_lock(&c->mu);
+ if (error != GRPC_ERROR_NONE || c->shutdown) {
+ if (error == GRPC_ERROR_NONE) {
+ error = GRPC_ERROR_CREATE("connector shutdown");
+ } else {
+ error = GRPC_ERROR_REF(error);
+ }
+ memset(c->result, 0, sizeof(*c->result));
+ grpc_closure *notify = c->notify;
+ c->notify = NULL;
+ grpc_exec_ctx_sched(exec_ctx, notify, error, NULL);
+ gpr_mu_unlock(&c->mu);
+ chttp2_connector_unref(exec_ctx, arg);
+ } else {
+ start_handshake_locked(exec_ctx, c);
+ gpr_mu_unlock(&c->mu);
+ }
+}
+
+static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
+ chttp2_connector *c = arg;
+ gpr_mu_lock(&c->mu);
+ GPR_ASSERT(c->connecting);
+ c->connecting = false;
+ if (error != GRPC_ERROR_NONE || c->shutdown) {
+ if (error == GRPC_ERROR_NONE) {
+ error = GRPC_ERROR_CREATE("connector shutdown");
+ } else {
+ error = GRPC_ERROR_REF(error);
+ }
+ memset(c->result, 0, sizeof(*c->result));
+ grpc_closure *notify = c->notify;
+ c->notify = NULL;
+ grpc_exec_ctx_sched(exec_ctx, notify, error, NULL);
+ if (c->endpoint != NULL) grpc_endpoint_shutdown(exec_ctx, c->endpoint);
+ gpr_mu_unlock(&c->mu);
+ chttp2_connector_unref(exec_ctx, arg);
+ } else {
+ GPR_ASSERT(c->endpoint != NULL);
+ if (!GRPC_SLICE_IS_EMPTY(c->args.initial_connect_string)) {
+ grpc_closure_init(&c->initial_string_sent, on_initial_connect_string_sent,
+ c);
+ grpc_slice_buffer_init(&c->initial_string_buffer);
+ grpc_slice_buffer_add(&c->initial_string_buffer,
+ c->args.initial_connect_string);
+ grpc_endpoint_write(exec_ctx, c->endpoint, &c->initial_string_buffer,
+ &c->initial_string_sent);
+ } else {
+ start_handshake_locked(exec_ctx, c);
+ }
+ gpr_mu_unlock(&c->mu);
+ }
+}
+
+static void chttp2_connector_connect(grpc_exec_ctx *exec_ctx,
+ grpc_connector *con,
+ const grpc_connect_in_args *args,
+ grpc_connect_out_args *result,
+ grpc_closure *notify) {
+ chttp2_connector *c = (chttp2_connector *)con;
+ gpr_mu_lock(&c->mu);
+ GPR_ASSERT(c->notify == NULL);
+ c->notify = notify;
+ c->args = *args;
+ c->result = result;
+ GPR_ASSERT(c->endpoint == NULL);
+ chttp2_connector_ref(con); // Ref taken for callback.
+ grpc_closure_init(&c->connected, connected, c);
+ GPR_ASSERT(!c->connecting);
+ c->connecting = true;
+ grpc_tcp_client_connect(exec_ctx, &c->connected, &c->endpoint,
+ args->interested_parties, args->channel_args,
+ args->addr, args->deadline);
+ gpr_mu_unlock(&c->mu);
+}
+
+static const grpc_connector_vtable chttp2_connector_vtable = {
+ chttp2_connector_ref, chttp2_connector_unref, chttp2_connector_shutdown,
+ chttp2_connector_connect};
+
+grpc_connector *grpc_chttp2_connector_create(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_add_handshakers_func add_handshakers,
+ void *add_handshakers_user_data) {
+ chttp2_connector *c = gpr_malloc(sizeof(*c));
+ memset(c, 0, sizeof(*c));
+ c->base.vtable = &chttp2_connector_vtable;
+ gpr_mu_init(&c->mu);
+ gpr_ref_init(&c->refs, 1);
+ c->add_handshakers = add_handshakers;
+ c->add_handshakers_user_data = add_handshakers_user_data;
+ return &c->base;
+}
diff --git a/src/core/ext/transport/chttp2/client/chttp2_connector.h b/src/core/ext/transport/chttp2/client/chttp2_connector.h
new file mode 100644
index 0000000000..58eba22417
--- /dev/null
+++ b/src/core/ext/transport/chttp2/client/chttp2_connector.h
@@ -0,0 +1,51 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_CLIENT_CHTTP2_CONNECTOR_H
+#define GRPC_CORE_EXT_TRANSPORT_CHTTP2_CLIENT_CHTTP2_CONNECTOR_H
+
+#include "src/core/ext/client_channel/connector.h"
+#include "src/core/lib/channel/handshaker.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
+
+typedef void (*grpc_chttp2_add_handshakers_func)(
+ grpc_exec_ctx* exec_ctx, void* user_data,
+ grpc_handshake_manager* handshake_mgr);
+
+/// If \a add_handshakers is non-NULL, it will be called with
+/// \a add_handshakers_user_data to add handshakers.
+grpc_connector* grpc_chttp2_connector_create(
+ grpc_exec_ctx* exec_ctx, grpc_chttp2_add_handshakers_func add_handshakers,
+ void* add_handshakers_user_data);
+
+#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_CLIENT_CHTTP2_CONNECTOR_H */
diff --git a/src/core/ext/transport/chttp2/client/insecure/channel_create.c b/src/core/ext/transport/chttp2/client/insecure/channel_create.c
index 372fb7bf4c..a0d0652ce7 100644
--- a/src/core/ext/transport/chttp2/client/insecure/channel_create.c
+++ b/src/core/ext/transport/chttp2/client/insecure/channel_create.c
@@ -33,137 +33,17 @@
#include <grpc/grpc.h>
-#include <stdlib.h>
#include <string.h>
#include <grpc/support/alloc.h>
-#include <grpc/support/slice.h>
-#include <grpc/support/slice_buffer.h>
+#include <grpc/support/string_util.h>
#include "src/core/ext/client_channel/client_channel.h"
-#include "src/core/ext/client_channel/http_connect_handshaker.h"
-#include "src/core/ext/client_channel/resolver_registry.h"
-#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
+#include "src/core/ext/transport/chttp2/client/chttp2_connector.h"
#include "src/core/lib/channel/channel_args.h"
-#include "src/core/lib/channel/compress_filter.h"
-#include "src/core/lib/channel/handshaker.h"
-#include "src/core/lib/channel/http_client_filter.h"
-#include "src/core/lib/iomgr/tcp_client.h"
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/channel.h"
-//
-// connector
-//
-
-typedef struct {
- grpc_connector base;
- gpr_refcount refs;
-
- grpc_closure *notify;
- grpc_connect_in_args args;
- grpc_connect_out_args *result;
- grpc_closure initial_string_sent;
- gpr_slice_buffer initial_string_buffer;
-
- grpc_endpoint *tcp;
-
- grpc_closure connected;
-
- grpc_handshake_manager *handshake_mgr;
-} connector;
-
-static void connector_ref(grpc_connector *con) {
- connector *c = (connector *)con;
- gpr_ref(&c->refs);
-}
-
-static void connector_unref(grpc_exec_ctx *exec_ctx, grpc_connector *con) {
- connector *c = (connector *)con;
- if (gpr_unref(&c->refs)) {
- /* c->initial_string_buffer does not need to be destroyed */
- grpc_handshake_manager_destroy(exec_ctx, c->handshake_mgr);
- gpr_free(c);
- }
-}
-
-static void on_initial_connect_string_sent(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- connector_unref(exec_ctx, arg);
-}
-
-static void on_handshake_done(grpc_exec_ctx *exec_ctx, grpc_endpoint *endpoint,
- grpc_channel_args *args,
- gpr_slice_buffer *read_buffer, void *user_data,
- grpc_error *error) {
- connector *c = user_data;
- if (error != GRPC_ERROR_NONE) {
- grpc_channel_args_destroy(args);
- gpr_free(read_buffer);
- } else {
- c->result->transport =
- grpc_create_chttp2_transport(exec_ctx, args, endpoint, 1);
- GPR_ASSERT(c->result->transport);
- grpc_chttp2_transport_start_reading(exec_ctx, c->result->transport,
- read_buffer);
- c->result->channel_args = args;
- }
- grpc_closure *notify = c->notify;
- c->notify = NULL;
- grpc_exec_ctx_sched(exec_ctx, notify, error, NULL);
-}
-
-static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- connector *c = arg;
- grpc_endpoint *tcp = c->tcp;
- if (tcp != NULL) {
- if (!GPR_SLICE_IS_EMPTY(c->args.initial_connect_string)) {
- grpc_closure_init(&c->initial_string_sent, on_initial_connect_string_sent,
- c);
- gpr_slice_buffer_init(&c->initial_string_buffer);
- gpr_slice_buffer_add(&c->initial_string_buffer,
- c->args.initial_connect_string);
- connector_ref(arg);
- grpc_endpoint_write(exec_ctx, tcp, &c->initial_string_buffer,
- &c->initial_string_sent);
- } else {
- grpc_handshake_manager_do_handshake(
- exec_ctx, c->handshake_mgr, tcp, c->args.channel_args,
- c->args.deadline, NULL /* acceptor */, on_handshake_done, c);
- }
- } else {
- memset(c->result, 0, sizeof(*c->result));
- grpc_closure *notify = c->notify;
- c->notify = NULL;
- grpc_exec_ctx_sched(exec_ctx, notify, GRPC_ERROR_REF(error), NULL);
- }
-}
-
-static void connector_shutdown(grpc_exec_ctx *exec_ctx, grpc_connector *con) {}
-
-static void connector_connect(grpc_exec_ctx *exec_ctx, grpc_connector *con,
- const grpc_connect_in_args *args,
- grpc_connect_out_args *result,
- grpc_closure *notify) {
- connector *c = (connector *)con;
- GPR_ASSERT(c->notify == NULL);
- GPR_ASSERT(notify->cb);
- c->notify = notify;
- c->args = *args;
- c->result = result;
- c->tcp = NULL;
- grpc_closure_init(&c->connected, connected, c);
- grpc_tcp_client_connect(exec_ctx, &c->connected, &c->tcp,
- args->interested_parties, args->addr, args->deadline);
-}
-
-static const grpc_connector_vtable connector_vtable = {
- connector_ref, connector_unref, connector_shutdown, connector_connect};
-
-//
-// client_channel_factory
-//
-
static void client_channel_factory_ref(
grpc_client_channel_factory *cc_factory) {}
@@ -173,20 +53,10 @@ static void client_channel_factory_unref(
static grpc_subchannel *client_channel_factory_create_subchannel(
grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *cc_factory,
const grpc_subchannel_args *args) {
- connector *c = gpr_malloc(sizeof(*c));
- memset(c, 0, sizeof(*c));
- c->base.vtable = &connector_vtable;
- gpr_ref_init(&c->refs, 1);
- c->handshake_mgr = grpc_handshake_manager_create();
- char *proxy_name = grpc_get_http_proxy_server();
- if (proxy_name != NULL) {
- grpc_handshake_manager_add(
- c->handshake_mgr,
- grpc_http_connect_handshaker_create(proxy_name, args->server_name));
- gpr_free(proxy_name);
- }
- grpc_subchannel *s = grpc_subchannel_create(exec_ctx, &c->base, args);
- grpc_connector_unref(exec_ctx, &c->base);
+ grpc_connector *connector = grpc_chttp2_connector_create(
+ exec_ctx, NULL /* add_handshakers */, NULL /* user_data */);
+ grpc_subchannel *s = grpc_subchannel_create(exec_ctx, connector, args);
+ grpc_connector_unref(exec_ctx, connector);
return s;
}
@@ -194,19 +64,15 @@ static grpc_channel *client_channel_factory_create_channel(
grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *cc_factory,
const char *target, grpc_client_channel_type type,
const grpc_channel_args *args) {
- grpc_channel *channel =
- grpc_channel_create(exec_ctx, target, args, GRPC_CLIENT_CHANNEL, NULL);
- grpc_resolver *resolver = grpc_resolver_create(target, args);
- if (!resolver) {
- GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel,
- "client_channel_factory_create_channel");
- return NULL;
- }
-
- grpc_client_channel_finish_initialization(
- exec_ctx, grpc_channel_get_channel_stack(channel), resolver, cc_factory);
- GRPC_RESOLVER_UNREF(exec_ctx, resolver, "create_channel");
-
+ // Add channel arg containing the server URI.
+ grpc_arg arg;
+ arg.type = GRPC_ARG_STRING;
+ arg.key = GRPC_ARG_SERVER_URI;
+ arg.value.string = (char *)target;
+ grpc_channel_args *new_args = grpc_channel_args_copy_and_add(args, &arg, 1);
+ grpc_channel *channel = grpc_channel_create(exec_ctx, target, new_args,
+ GRPC_CLIENT_CHANNEL, NULL);
+ grpc_channel_args_destroy(new_args);
return channel;
}
@@ -229,16 +95,19 @@ grpc_channel *grpc_insecure_channel_create(const char *target,
GRPC_API_TRACE(
"grpc_insecure_channel_create(target=%p, args=%p, reserved=%p)", 3,
(target, args, reserved));
- GPR_ASSERT(!reserved);
-
+ GPR_ASSERT(reserved == NULL);
grpc_client_channel_factory *factory =
(grpc_client_channel_factory *)&client_channel_factory;
+ // Add channel arg containing the client channel factory.
+ grpc_arg arg = grpc_client_channel_factory_create_channel_arg(factory);
+ grpc_channel_args *new_args = grpc_channel_args_copy_and_add(args, &arg, 1);
+ // Create channel.
grpc_channel *channel = client_channel_factory_create_channel(
- &exec_ctx, factory, target, GRPC_CLIENT_CHANNEL_TYPE_REGULAR, args);
-
+ &exec_ctx, factory, target, GRPC_CLIENT_CHANNEL_TYPE_REGULAR, new_args);
+ // Clean up.
+ grpc_channel_args_destroy(new_args);
grpc_client_channel_factory_unref(&exec_ctx, factory);
grpc_exec_ctx_finish(&exec_ctx);
-
return channel != NULL ? channel : grpc_lame_client_channel_create(
target, GRPC_STATUS_INTERNAL,
"Failed to create client channel");
diff --git a/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c b/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c
index b2c5e5b088..1e5b1c22e3 100644
--- a/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c
+++ b/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c
@@ -44,6 +44,7 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/iomgr/tcp_client_posix.h"
#include "src/core/lib/iomgr/tcp_posix.h"
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/channel.h"
@@ -65,9 +66,8 @@ grpc_channel *grpc_insecure_channel_create_from_fd(
int flags = fcntl(fd, F_GETFL, 0);
GPR_ASSERT(fcntl(fd, F_SETFL, flags | O_NONBLOCK) == 0);
- grpc_endpoint *client =
- grpc_tcp_create(grpc_fd_create(fd, "client"),
- GRPC_TCP_DEFAULT_READ_SLICE_SIZE, "fd-client");
+ grpc_endpoint *client = grpc_tcp_client_create_from_fd(
+ &exec_ctx, grpc_fd_create(fd, "client"), args, "fd-client");
grpc_transport *transport =
grpc_create_chttp2_transport(&exec_ctx, final_args, client, 1);
diff --git a/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c b/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c
index 29095af207..f35439cd44 100644
--- a/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c
+++ b/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c
@@ -33,196 +33,18 @@
#include <grpc/grpc.h>
-#include <stdlib.h>
#include <string.h>
#include <grpc/support/alloc.h>
-#include <grpc/support/slice.h>
-#include <grpc/support/slice_buffer.h>
+#include <grpc/support/string_util.h>
#include "src/core/ext/client_channel/client_channel.h"
-#include "src/core/ext/client_channel/http_connect_handshaker.h"
-#include "src/core/ext/client_channel/resolver_registry.h"
-#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
+#include "src/core/ext/transport/chttp2/client/chttp2_connector.h"
#include "src/core/lib/channel/channel_args.h"
-#include "src/core/lib/channel/handshaker.h"
-#include "src/core/lib/iomgr/tcp_client.h"
-#include "src/core/lib/security/context/security_context.h"
#include "src/core/lib/security/credentials/credentials.h"
-#include "src/core/lib/security/transport/auth_filters.h"
+#include "src/core/lib/security/transport/security_connector.h"
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/channel.h"
-#include "src/core/lib/tsi/transport_security_interface.h"
-
-//
-// connector
-//
-
-typedef struct {
- grpc_connector base;
- gpr_refcount refs;
-
- grpc_channel_security_connector *security_connector;
-
- grpc_closure *notify;
- grpc_connect_in_args args;
- grpc_connect_out_args *result;
- grpc_closure initial_string_sent;
- gpr_slice_buffer initial_string_buffer;
-
- gpr_mu mu;
- grpc_endpoint *connecting_endpoint;
- grpc_endpoint *newly_connecting_endpoint;
-
- grpc_closure connected_closure;
-
- grpc_handshake_manager *handshake_mgr;
-
- // TODO(roth): Remove once we eliminate on_secure_handshake_done().
- grpc_channel_args *tmp_args;
-} connector;
-
-static void connector_ref(grpc_connector *con) {
- connector *c = (connector *)con;
- gpr_ref(&c->refs);
-}
-
-static void connector_unref(grpc_exec_ctx *exec_ctx, grpc_connector *con) {
- connector *c = (connector *)con;
- if (gpr_unref(&c->refs)) {
- /* c->initial_string_buffer does not need to be destroyed */
- grpc_channel_args_destroy(c->tmp_args);
- grpc_handshake_manager_destroy(exec_ctx, c->handshake_mgr);
- gpr_free(c);
- }
-}
-
-static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_security_status status,
- grpc_endpoint *secure_endpoint,
- grpc_auth_context *auth_context) {
- connector *c = arg;
- gpr_mu_lock(&c->mu);
- grpc_error *error = GRPC_ERROR_NONE;
- if (c->connecting_endpoint == NULL) {
- memset(c->result, 0, sizeof(*c->result));
- gpr_mu_unlock(&c->mu);
- } else if (status != GRPC_SECURITY_OK) {
- error = grpc_error_set_int(GRPC_ERROR_CREATE("Secure handshake failed"),
- GRPC_ERROR_INT_SECURITY_STATUS, status);
- memset(c->result, 0, sizeof(*c->result));
- c->connecting_endpoint = NULL;
- gpr_mu_unlock(&c->mu);
- } else {
- grpc_arg auth_context_arg;
- c->connecting_endpoint = NULL;
- gpr_mu_unlock(&c->mu);
- c->result->transport = grpc_create_chttp2_transport(
- exec_ctx, c->args.channel_args, secure_endpoint, 1);
- grpc_chttp2_transport_start_reading(exec_ctx, c->result->transport, NULL);
- auth_context_arg = grpc_auth_context_to_arg(auth_context);
- c->result->channel_args =
- grpc_channel_args_copy_and_add(c->tmp_args, &auth_context_arg, 1);
- }
- grpc_closure *notify = c->notify;
- c->notify = NULL;
- grpc_exec_ctx_sched(exec_ctx, notify, error, NULL);
-}
-
-static void on_handshake_done(grpc_exec_ctx *exec_ctx, grpc_endpoint *endpoint,
- grpc_channel_args *args,
- gpr_slice_buffer *read_buffer, void *user_data,
- grpc_error *error) {
- connector *c = user_data;
- c->tmp_args = args;
- if (error != GRPC_ERROR_NONE) {
- gpr_free(read_buffer);
- grpc_closure *notify = c->notify;
- c->notify = NULL;
- grpc_exec_ctx_sched(exec_ctx, notify, error, NULL);
- } else {
- // TODO(roth, jboeuf): Convert security connector handshaking to use new
- // handshake API, and then move the code from on_secure_handshake_done()
- // into this function.
- grpc_channel_security_connector_do_handshake(
- exec_ctx, c->security_connector, endpoint, read_buffer,
- c->args.deadline, on_secure_handshake_done, c);
- }
-}
-
-static void on_initial_connect_string_sent(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- connector *c = arg;
- grpc_handshake_manager_do_handshake(
- exec_ctx, c->handshake_mgr, c->connecting_endpoint, c->args.channel_args,
- c->args.deadline, NULL /* acceptor */, on_handshake_done, c);
-}
-
-static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- connector *c = arg;
- grpc_endpoint *tcp = c->newly_connecting_endpoint;
- if (tcp != NULL) {
- gpr_mu_lock(&c->mu);
- GPR_ASSERT(c->connecting_endpoint == NULL);
- c->connecting_endpoint = tcp;
- gpr_mu_unlock(&c->mu);
- if (!GPR_SLICE_IS_EMPTY(c->args.initial_connect_string)) {
- grpc_closure_init(&c->initial_string_sent, on_initial_connect_string_sent,
- c);
- gpr_slice_buffer_init(&c->initial_string_buffer);
- gpr_slice_buffer_add(&c->initial_string_buffer,
- c->args.initial_connect_string);
- grpc_endpoint_write(exec_ctx, tcp, &c->initial_string_buffer,
- &c->initial_string_sent);
- } else {
- grpc_handshake_manager_do_handshake(
- exec_ctx, c->handshake_mgr, tcp, c->args.channel_args,
- c->args.deadline, NULL /* acceptor */, on_handshake_done, c);
- }
- } else {
- memset(c->result, 0, sizeof(*c->result));
- grpc_closure *notify = c->notify;
- c->notify = NULL;
- grpc_exec_ctx_sched(exec_ctx, notify, GRPC_ERROR_REF(error), NULL);
- }
-}
-
-static void connector_shutdown(grpc_exec_ctx *exec_ctx, grpc_connector *con) {
- connector *c = (connector *)con;
- grpc_endpoint *ep;
- gpr_mu_lock(&c->mu);
- ep = c->connecting_endpoint;
- c->connecting_endpoint = NULL;
- gpr_mu_unlock(&c->mu);
- if (ep) {
- grpc_endpoint_shutdown(exec_ctx, ep);
- }
-}
-
-static void connector_connect(grpc_exec_ctx *exec_ctx, grpc_connector *con,
- const grpc_connect_in_args *args,
- grpc_connect_out_args *result,
- grpc_closure *notify) {
- connector *c = (connector *)con;
- GPR_ASSERT(c->notify == NULL);
- c->notify = notify;
- c->args = *args;
- c->result = result;
- gpr_mu_lock(&c->mu);
- GPR_ASSERT(c->connecting_endpoint == NULL);
- gpr_mu_unlock(&c->mu);
- grpc_closure_init(&c->connected_closure, connected, c);
- grpc_tcp_client_connect(exec_ctx, &c->connected_closure,
- &c->newly_connecting_endpoint,
- args->interested_parties, args->addr, args->deadline);
-}
-
-static const grpc_connector_vtable connector_vtable = {
- connector_ref, connector_unref, connector_shutdown, connector_connect};
-
-//
-// client_channel_factory
-//
typedef struct {
grpc_client_channel_factory base;
@@ -246,26 +68,20 @@ static void client_channel_factory_unref(
}
}
+static void add_handshakers(grpc_exec_ctx *exec_ctx, void *security_connector,
+ grpc_handshake_manager *handshake_mgr) {
+ grpc_channel_security_connector_add_handshakers(exec_ctx, security_connector,
+ handshake_mgr);
+}
+
static grpc_subchannel *client_channel_factory_create_subchannel(
grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *cc_factory,
const grpc_subchannel_args *args) {
client_channel_factory *f = (client_channel_factory *)cc_factory;
- connector *c = gpr_malloc(sizeof(*c));
- memset(c, 0, sizeof(*c));
- c->base.vtable = &connector_vtable;
- c->security_connector = f->security_connector;
- c->handshake_mgr = grpc_handshake_manager_create();
- char *proxy_name = grpc_get_http_proxy_server();
- if (proxy_name != NULL) {
- grpc_handshake_manager_add(
- c->handshake_mgr,
- grpc_http_connect_handshaker_create(proxy_name, args->server_name));
- gpr_free(proxy_name);
- }
- gpr_mu_init(&c->mu);
- gpr_ref_init(&c->refs, 1);
- grpc_subchannel *s = grpc_subchannel_create(exec_ctx, &c->base, args);
- grpc_connector_unref(exec_ctx, &c->base);
+ grpc_connector *connector = grpc_chttp2_connector_create(
+ exec_ctx, add_handshakers, f->security_connector);
+ grpc_subchannel *s = grpc_subchannel_create(exec_ctx, connector, args);
+ grpc_connector_unref(exec_ctx, connector);
return s;
}
@@ -273,19 +89,15 @@ static grpc_channel *client_channel_factory_create_channel(
grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *cc_factory,
const char *target, grpc_client_channel_type type,
const grpc_channel_args *args) {
- client_channel_factory *f = (client_channel_factory *)cc_factory;
- grpc_channel *channel =
- grpc_channel_create(exec_ctx, target, args, GRPC_CLIENT_CHANNEL, NULL);
- grpc_resolver *resolver = grpc_resolver_create(target, args);
- if (resolver != NULL) {
- grpc_client_channel_finish_initialization(
- exec_ctx, grpc_channel_get_channel_stack(channel), resolver, &f->base);
- GRPC_RESOLVER_UNREF(exec_ctx, resolver, "create");
- } else {
- GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel,
- "client_channel_factory_create_channel");
- channel = NULL;
- }
+ // Add channel arg containing the server URI.
+ grpc_arg arg;
+ arg.type = GRPC_ARG_STRING;
+ arg.key = GRPC_ARG_SERVER_URI;
+ arg.value.string = (char *)target;
+ grpc_channel_args *new_args = grpc_channel_args_copy_and_add(args, &arg, 1);
+ grpc_channel *channel = grpc_channel_create(exec_ctx, target, new_args,
+ GRPC_CLIENT_CHANNEL, NULL);
+ grpc_channel_args_destroy(new_args);
return channel;
}
@@ -326,14 +138,6 @@ grpc_channel *grpc_secure_channel_create(grpc_channel_credentials *creds,
return grpc_lame_client_channel_create(
target, GRPC_STATUS_INTERNAL, "Failed to create security connector.");
}
- grpc_arg connector_arg =
- grpc_security_connector_to_arg(&security_connector->base);
- grpc_channel_args *new_args = grpc_channel_args_copy_and_add(
- new_args_from_connector != NULL ? new_args_from_connector : args,
- &connector_arg, 1);
- if (new_args_from_connector != NULL) {
- grpc_channel_args_destroy(new_args_from_connector);
- }
// Create client channel factory.
client_channel_factory *f = gpr_malloc(sizeof(*f));
memset(f, 0, sizeof(*f));
@@ -342,13 +146,24 @@ grpc_channel *grpc_secure_channel_create(grpc_channel_credentials *creds,
GRPC_SECURITY_CONNECTOR_REF(&security_connector->base,
"grpc_secure_channel_create");
f->security_connector = security_connector;
+ // Add channel args containing the client channel factory and security
+ // connector.
+ grpc_arg new_args[2];
+ new_args[0] = grpc_client_channel_factory_create_channel_arg(&f->base);
+ new_args[1] = grpc_security_connector_to_arg(&security_connector->base);
+ grpc_channel_args *args_copy = grpc_channel_args_copy_and_add(
+ new_args_from_connector != NULL ? new_args_from_connector : args,
+ new_args, GPR_ARRAY_SIZE(new_args));
+ if (new_args_from_connector != NULL) {
+ grpc_channel_args_destroy(new_args_from_connector);
+ }
// Create channel.
grpc_channel *channel = client_channel_factory_create_channel(
- &exec_ctx, &f->base, target, GRPC_CLIENT_CHANNEL_TYPE_REGULAR, new_args);
+ &exec_ctx, &f->base, target, GRPC_CLIENT_CHANNEL_TYPE_REGULAR, args_copy);
// Clean up.
GRPC_SECURITY_CONNECTOR_UNREF(&f->security_connector->base,
- "client_channel_factory_create_channel");
- grpc_channel_args_destroy(new_args);
+ "secure_client_channel_factory_create_channel");
+ grpc_channel_args_destroy(args_copy);
grpc_client_channel_factory_unref(&exec_ctx, &f->base);
grpc_exec_ctx_finish(&exec_ctx);
return channel; /* may be NULL */
diff --git a/src/core/ext/transport/chttp2/server/chttp2_server.c b/src/core/ext/transport/chttp2/server/chttp2_server.c
new file mode 100644
index 0000000000..f0857714fc
--- /dev/null
+++ b/src/core/ext/transport/chttp2/server/chttp2_server.c
@@ -0,0 +1,362 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/ext/transport/chttp2/server/chttp2_server.h"
+
+#include <grpc/grpc.h>
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/sync.h>
+#include <grpc/support/useful.h>
+
+#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
+#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/channel/handshaker.h"
+#include "src/core/lib/channel/http_server_filter.h"
+#include "src/core/lib/iomgr/endpoint.h"
+#include "src/core/lib/iomgr/resolve_address.h"
+#include "src/core/lib/iomgr/tcp_server.h"
+#include "src/core/lib/surface/api_trace.h"
+#include "src/core/lib/surface/server.h"
+
+void grpc_chttp2_server_handshaker_factory_add_handshakers(
+ grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_server_handshaker_factory *handshaker_factory,
+ grpc_handshake_manager *handshake_mgr) {
+ if (handshaker_factory != NULL) {
+ handshaker_factory->vtable->add_handshakers(exec_ctx, handshaker_factory,
+ handshake_mgr);
+ }
+}
+
+void grpc_chttp2_server_handshaker_factory_destroy(
+ grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_server_handshaker_factory *handshaker_factory) {
+ if (handshaker_factory != NULL) {
+ handshaker_factory->vtable->destroy(exec_ctx, handshaker_factory);
+ }
+}
+
+typedef struct pending_handshake_manager_node {
+ grpc_handshake_manager *handshake_mgr;
+ struct pending_handshake_manager_node *next;
+} pending_handshake_manager_node;
+
+typedef struct {
+ grpc_server *server;
+ grpc_tcp_server *tcp_server;
+ grpc_channel_args *args;
+ grpc_chttp2_server_handshaker_factory *handshaker_factory;
+ gpr_mu mu;
+ bool shutdown;
+ grpc_closure tcp_server_shutdown_complete;
+ grpc_closure *server_destroy_listener_done;
+ pending_handshake_manager_node *pending_handshake_mgrs;
+} server_state;
+
+typedef struct {
+ server_state *server_state;
+ grpc_pollset *accepting_pollset;
+ grpc_tcp_server_acceptor *acceptor;
+ grpc_handshake_manager *handshake_mgr;
+} server_connection_state;
+
+static void pending_handshake_manager_add_locked(
+ server_state *state, grpc_handshake_manager *handshake_mgr) {
+ pending_handshake_manager_node *node = gpr_malloc(sizeof(*node));
+ node->handshake_mgr = handshake_mgr;
+ node->next = state->pending_handshake_mgrs;
+ state->pending_handshake_mgrs = node;
+}
+
+static void pending_handshake_manager_remove_locked(
+ server_state *state, grpc_handshake_manager *handshake_mgr) {
+ pending_handshake_manager_node **prev_node = &state->pending_handshake_mgrs;
+ for (pending_handshake_manager_node *node = state->pending_handshake_mgrs;
+ node != NULL; node = node->next) {
+ if (node->handshake_mgr == handshake_mgr) {
+ *prev_node = node->next;
+ gpr_free(node);
+ break;
+ }
+ prev_node = &node->next;
+ }
+}
+
+static void pending_handshake_manager_shutdown_locked(grpc_exec_ctx *exec_ctx,
+ server_state *state) {
+ pending_handshake_manager_node *prev_node = NULL;
+ for (pending_handshake_manager_node *node = state->pending_handshake_mgrs;
+ node != NULL; node = node->next) {
+ grpc_handshake_manager_shutdown(exec_ctx, node->handshake_mgr);
+ gpr_free(prev_node);
+ prev_node = node;
+ }
+ gpr_free(prev_node);
+ state->pending_handshake_mgrs = NULL;
+}
+
+static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_handshaker_args *args = arg;
+ server_connection_state *connection_state = args->user_data;
+ gpr_mu_lock(&connection_state->server_state->mu);
+ if (error != GRPC_ERROR_NONE || connection_state->server_state->shutdown) {
+ const char *error_str = grpc_error_string(error);
+ gpr_log(GPR_ERROR, "Handshaking failed: %s", error_str);
+ grpc_error_free_string(error_str);
+ if (error == GRPC_ERROR_NONE && args->endpoint != NULL) {
+ // We were shut down after handshaking completed successfully, so
+ // destroy the endpoint here.
+ // TODO(ctiller): It is currently necessary to shutdown endpoints
+ // before destroying them, even if we know that there are no
+ // pending read/write callbacks. This should be fixed, at which
+ // point this can be removed.
+ grpc_endpoint_shutdown(exec_ctx, args->endpoint);
+ grpc_endpoint_destroy(exec_ctx, args->endpoint);
+ grpc_channel_args_destroy(args->args);
+ grpc_slice_buffer_destroy(args->read_buffer);
+ gpr_free(args->read_buffer);
+ }
+ } else {
+ // If the handshaking succeeded but there is no endpoint, then the
+ // handshaker may have handed off the connection to some external
+ // code, so we can just clean up here without creating a transport.
+ if (args->endpoint != NULL) {
+ grpc_transport *transport =
+ grpc_create_chttp2_transport(exec_ctx, args->args, args->endpoint, 0);
+ grpc_server_setup_transport(
+ exec_ctx, connection_state->server_state->server, transport,
+ connection_state->accepting_pollset, args->args);
+ grpc_chttp2_transport_start_reading(exec_ctx, transport,
+ args->read_buffer);
+ grpc_channel_args_destroy(args->args);
+ }
+ }
+ pending_handshake_manager_remove_locked(connection_state->server_state,
+ connection_state->handshake_mgr);
+ gpr_mu_unlock(&connection_state->server_state->mu);
+ grpc_handshake_manager_destroy(exec_ctx, connection_state->handshake_mgr);
+ grpc_tcp_server_unref(exec_ctx, connection_state->server_state->tcp_server);
+ gpr_free(connection_state->acceptor);
+ gpr_free(connection_state);
+}
+
+static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp,
+ grpc_pollset *accepting_pollset,
+ grpc_tcp_server_acceptor *acceptor) {
+ server_state *state = arg;
+ gpr_mu_lock(&state->mu);
+ if (state->shutdown) {
+ gpr_mu_unlock(&state->mu);
+ grpc_endpoint_destroy(exec_ctx, tcp);
+ gpr_free(acceptor);
+ return;
+ }
+ grpc_handshake_manager *handshake_mgr = grpc_handshake_manager_create();
+ pending_handshake_manager_add_locked(state, handshake_mgr);
+ gpr_mu_unlock(&state->mu);
+ grpc_tcp_server_ref(state->tcp_server);
+ server_connection_state *connection_state =
+ gpr_malloc(sizeof(*connection_state));
+ connection_state->server_state = state;
+ connection_state->accepting_pollset = accepting_pollset;
+ connection_state->acceptor = acceptor;
+ connection_state->handshake_mgr = handshake_mgr;
+ grpc_chttp2_server_handshaker_factory_add_handshakers(
+ exec_ctx, state->handshaker_factory, connection_state->handshake_mgr);
+ // TODO(roth): We should really get this timeout value from channel
+ // args instead of hard-coding it.
+ const gpr_timespec deadline = gpr_time_add(
+ gpr_now(GPR_CLOCK_MONOTONIC), gpr_time_from_seconds(120, GPR_TIMESPAN));
+ grpc_handshake_manager_do_handshake(exec_ctx, connection_state->handshake_mgr,
+ tcp, state->args, deadline, acceptor,
+ on_handshake_done, connection_state);
+}
+
+/* Server callback: start listening on our ports */
+static void server_start_listener(grpc_exec_ctx *exec_ctx, grpc_server *server,
+ void *arg, grpc_pollset **pollsets,
+ size_t pollset_count) {
+ server_state *state = arg;
+ gpr_mu_lock(&state->mu);
+ state->shutdown = false;
+ gpr_mu_unlock(&state->mu);
+ grpc_tcp_server_start(exec_ctx, state->tcp_server, pollsets, pollset_count,
+ on_accept, state);
+}
+
+static void tcp_server_shutdown_complete(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ server_state *state = arg;
+ /* ensure all threads have unlocked */
+ gpr_mu_lock(&state->mu);
+ grpc_closure *destroy_done = state->server_destroy_listener_done;
+ GPR_ASSERT(state->shutdown);
+ pending_handshake_manager_shutdown_locked(exec_ctx, state);
+ gpr_mu_unlock(&state->mu);
+ // Flush queued work before destroying handshaker factory, since that
+ // may do a synchronous unref.
+ grpc_exec_ctx_flush(exec_ctx);
+ grpc_chttp2_server_handshaker_factory_destroy(exec_ctx,
+ state->handshaker_factory);
+ if (destroy_done != NULL) {
+ destroy_done->cb(exec_ctx, destroy_done->cb_arg, GRPC_ERROR_REF(error));
+ grpc_exec_ctx_flush(exec_ctx);
+ }
+ grpc_channel_args_destroy(state->args);
+ gpr_mu_destroy(&state->mu);
+ gpr_free(state);
+}
+
+/* Server callback: destroy the tcp listener (so we don't generate further
+ callbacks) */
+static void server_destroy_listener(grpc_exec_ctx *exec_ctx,
+ grpc_server *server, void *arg,
+ grpc_closure *destroy_done) {
+ server_state *state = arg;
+ gpr_mu_lock(&state->mu);
+ state->shutdown = true;
+ state->server_destroy_listener_done = destroy_done;
+ grpc_tcp_server *tcp_server = state->tcp_server;
+ gpr_mu_unlock(&state->mu);
+ grpc_tcp_server_shutdown_listeners(exec_ctx, tcp_server);
+ grpc_tcp_server_unref(exec_ctx, tcp_server);
+}
+
+grpc_error *grpc_chttp2_server_add_port(
+ grpc_exec_ctx *exec_ctx, grpc_server *server, const char *addr,
+ grpc_channel_args *args,
+ grpc_chttp2_server_handshaker_factory *handshaker_factory, int *port_num) {
+ grpc_resolved_addresses *resolved = NULL;
+ grpc_tcp_server *tcp_server = NULL;
+ size_t i;
+ size_t count = 0;
+ int port_temp;
+ grpc_error *err = GRPC_ERROR_NONE;
+ server_state *state = NULL;
+ grpc_error **errors = NULL;
+
+ *port_num = -1;
+
+ /* resolve address */
+ err = grpc_blocking_resolve_address(addr, "https", &resolved);
+ if (err != GRPC_ERROR_NONE) {
+ goto error;
+ }
+ state = gpr_malloc(sizeof(*state));
+ memset(state, 0, sizeof(*state));
+ grpc_closure_init(&state->tcp_server_shutdown_complete,
+ tcp_server_shutdown_complete, state);
+ err = grpc_tcp_server_create(exec_ctx, &state->tcp_server_shutdown_complete,
+ args, &tcp_server);
+ if (err != GRPC_ERROR_NONE) {
+ goto error;
+ }
+
+ state->server = server;
+ state->tcp_server = tcp_server;
+ state->args = args;
+ state->handshaker_factory = handshaker_factory;
+ state->shutdown = true;
+ gpr_mu_init(&state->mu);
+
+ const size_t naddrs = resolved->naddrs;
+ errors = gpr_malloc(sizeof(*errors) * naddrs);
+ for (i = 0; i < naddrs; i++) {
+ errors[i] =
+ grpc_tcp_server_add_port(tcp_server, &resolved->addrs[i], &port_temp);
+ if (errors[i] == GRPC_ERROR_NONE) {
+ if (*port_num == -1) {
+ *port_num = port_temp;
+ } else {
+ GPR_ASSERT(*port_num == port_temp);
+ }
+ count++;
+ }
+ }
+ if (count == 0) {
+ char *msg;
+ gpr_asprintf(&msg, "No address added out of total %" PRIuPTR " resolved",
+ naddrs);
+ err = GRPC_ERROR_CREATE_REFERENCING(msg, errors, naddrs);
+ gpr_free(msg);
+ goto error;
+ } else if (count != naddrs) {
+ char *msg;
+ gpr_asprintf(&msg, "Only %" PRIuPTR
+ " addresses added out of total %" PRIuPTR " resolved",
+ count, naddrs);
+ err = GRPC_ERROR_CREATE_REFERENCING(msg, errors, naddrs);
+ gpr_free(msg);
+
+ const char *warning_message = grpc_error_string(err);
+ gpr_log(GPR_INFO, "WARNING: %s", warning_message);
+ grpc_error_free_string(warning_message);
+ /* we managed to bind some addresses: continue */
+ }
+ grpc_resolved_addresses_destroy(resolved);
+
+ /* Register with the server only upon success */
+ grpc_server_add_listener(exec_ctx, server, state, server_start_listener,
+ server_destroy_listener);
+ goto done;
+
+/* Error path: cleanup and return */
+error:
+ GPR_ASSERT(err != GRPC_ERROR_NONE);
+ if (resolved) {
+ grpc_resolved_addresses_destroy(resolved);
+ }
+ if (tcp_server) {
+ grpc_tcp_server_unref(exec_ctx, tcp_server);
+ } else {
+ grpc_channel_args_destroy(args);
+ grpc_chttp2_server_handshaker_factory_destroy(exec_ctx, handshaker_factory);
+ gpr_free(state);
+ }
+ *port_num = 0;
+
+done:
+ if (errors != NULL) {
+ for (i = 0; i < naddrs; i++) {
+ GRPC_ERROR_UNREF(errors[i]);
+ }
+ gpr_free(errors);
+ }
+ return err;
+}
diff --git a/src/core/ext/transport/chttp2/server/chttp2_server.h b/src/core/ext/transport/chttp2/server/chttp2_server.h
new file mode 100644
index 0000000000..aa364b565d
--- /dev/null
+++ b/src/core/ext/transport/chttp2/server/chttp2_server.h
@@ -0,0 +1,78 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_SERVER_CHTTP2_SERVER_H
+#define GRPC_CORE_EXT_TRANSPORT_CHTTP2_SERVER_CHTTP2_SERVER_H
+
+#include <grpc/impl/codegen/grpc_types.h>
+
+#include "src/core/lib/channel/handshaker.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
+
+/// A server handshaker factory is used to create handshakers for server
+/// connections.
+typedef struct grpc_chttp2_server_handshaker_factory
+ grpc_chttp2_server_handshaker_factory;
+
+typedef struct {
+ void (*add_handshakers)(
+ grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_server_handshaker_factory *handshaker_factory,
+ grpc_handshake_manager *handshake_mgr);
+ void (*destroy)(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_server_handshaker_factory *handshaker_factory);
+} grpc_chttp2_server_handshaker_factory_vtable;
+
+struct grpc_chttp2_server_handshaker_factory {
+ const grpc_chttp2_server_handshaker_factory_vtable *vtable;
+};
+
+void grpc_chttp2_server_handshaker_factory_add_handshakers(
+ grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_server_handshaker_factory *handshaker_factory,
+ grpc_handshake_manager *handshake_mgr);
+
+void grpc_chttp2_server_handshaker_factory_destroy(
+ grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_server_handshaker_factory *handshaker_factory);
+
+/// Adds a port to \a server. Sets \a port_num to the port number.
+/// If \a handshaker_factory is not NULL, it will be used to create
+/// handshakers for the port.
+/// Takes ownership of \a args and \a handshaker_factory.
+grpc_error *grpc_chttp2_server_add_port(
+ grpc_exec_ctx *exec_ctx, grpc_server *server, const char *addr,
+ grpc_channel_args *args,
+ grpc_chttp2_server_handshaker_factory *handshaker_factory, int *port_num);
+
+#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_SERVER_CHTTP2_SERVER_H */
diff --git a/src/core/ext/transport/chttp2/server/insecure/server_chttp2.c b/src/core/ext/transport/chttp2/server/insecure/server_chttp2.c
index e4967c2f08..7e286d4e46 100644
--- a/src/core/ext/transport/chttp2/server/insecure/server_chttp2.c
+++ b/src/core/ext/transport/chttp2/server/insecure/server_chttp2.c
@@ -33,180 +33,28 @@
#include <grpc/grpc.h>
-#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/string_util.h>
-#include <grpc/support/useful.h>
-#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
+#include "src/core/ext/transport/chttp2/server/chttp2_server.h"
#include "src/core/lib/channel/channel_args.h"
-#include "src/core/lib/channel/handshaker.h"
-#include "src/core/lib/channel/http_server_filter.h"
-#include "src/core/lib/iomgr/resolve_address.h"
-#include "src/core/lib/iomgr/tcp_server.h"
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/server.h"
-typedef struct server_connect_state {
- grpc_server *server;
- grpc_pollset *accepting_pollset;
- grpc_tcp_server_acceptor *acceptor;
- grpc_handshake_manager *handshake_mgr;
-} server_connect_state;
-
-static void on_handshake_done(grpc_exec_ctx *exec_ctx, grpc_endpoint *endpoint,
- grpc_channel_args *args,
- gpr_slice_buffer *read_buffer, void *user_data,
- grpc_error *error) {
- server_connect_state *state = user_data;
- if (error != GRPC_ERROR_NONE) {
- const char *error_str = grpc_error_string(error);
- gpr_log(GPR_ERROR, "Handshaking failed: %s", error_str);
- grpc_error_free_string(error_str);
- GRPC_ERROR_UNREF(error);
- grpc_handshake_manager_shutdown(exec_ctx, state->handshake_mgr);
- gpr_free(read_buffer);
- } else {
- // Beware that the call to grpc_create_chttp2_transport() has to happen
- // before grpc_tcp_server_destroy(). This is fine here, but similar code
- // asynchronously doing a handshake instead of calling
- // grpc_tcp_server_start() (as in server_secure_chttp2.c) needs to add
- // synchronization to avoid this case.
- grpc_transport *transport =
- grpc_create_chttp2_transport(exec_ctx, args, endpoint, 0);
- grpc_server_setup_transport(exec_ctx, state->server, transport,
- state->accepting_pollset,
- grpc_server_get_channel_args(state->server));
- grpc_chttp2_transport_start_reading(exec_ctx, transport, read_buffer);
- }
- // Clean up.
- grpc_channel_args_destroy(args);
- grpc_handshake_manager_destroy(exec_ctx, state->handshake_mgr);
- gpr_free(state);
-}
-
-static void on_accept(grpc_exec_ctx *exec_ctx, void *server, grpc_endpoint *tcp,
- grpc_pollset *accepting_pollset,
- grpc_tcp_server_acceptor *acceptor) {
- server_connect_state *state = gpr_malloc(sizeof(server_connect_state));
- state->server = server;
- state->accepting_pollset = accepting_pollset;
- state->acceptor = acceptor;
- state->handshake_mgr = grpc_handshake_manager_create();
- // TODO(roth): We should really get this timeout value from channel
- // args instead of hard-coding it.
- const gpr_timespec deadline = gpr_time_add(
- gpr_now(GPR_CLOCK_MONOTONIC), gpr_time_from_seconds(120, GPR_TIMESPAN));
- grpc_handshake_manager_do_handshake(
- exec_ctx, state->handshake_mgr, tcp, grpc_server_get_channel_args(server),
- deadline, acceptor, on_handshake_done, state);
-}
-
-/* Server callback: start listening on our ports */
-static void start(grpc_exec_ctx *exec_ctx, grpc_server *server, void *tcpp,
- grpc_pollset **pollsets, size_t pollset_count) {
- grpc_tcp_server *tcp = tcpp;
- grpc_tcp_server_start(exec_ctx, tcp, pollsets, pollset_count, on_accept,
- server);
-}
-
-/* Server callback: destroy the tcp listener (so we don't generate further
- callbacks) */
-static void destroy(grpc_exec_ctx *exec_ctx, grpc_server *server, void *tcpp,
- grpc_closure *destroy_done) {
- grpc_tcp_server *tcp = tcpp;
- grpc_tcp_server_shutdown_listeners(exec_ctx, tcp);
- grpc_tcp_server_unref(exec_ctx, tcp);
- grpc_exec_ctx_sched(exec_ctx, destroy_done, GRPC_ERROR_NONE, NULL);
-}
-
int grpc_server_add_insecure_http2_port(grpc_server *server, const char *addr) {
- grpc_resolved_addresses *resolved = NULL;
- grpc_tcp_server *tcp = NULL;
- size_t i;
- size_t count = 0;
- int port_num = -1;
- int port_temp;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_error *err = GRPC_ERROR_NONE;
-
+ int port_num = 0;
GRPC_API_TRACE("grpc_server_add_insecure_http2_port(server=%p, addr=%s)", 2,
(server, addr));
-
- grpc_error **errors = NULL;
- err = grpc_blocking_resolve_address(addr, "https", &resolved);
- if (err != GRPC_ERROR_NONE) {
- goto error;
- }
-
- err =
- grpc_tcp_server_create(NULL, grpc_server_get_channel_args(server), &tcp);
+ grpc_error *err = grpc_chttp2_server_add_port(
+ &exec_ctx, server, addr,
+ grpc_channel_args_copy(grpc_server_get_channel_args(server)),
+ NULL /* handshaker_factory */, &port_num);
if (err != GRPC_ERROR_NONE) {
- goto error;
- }
-
- const size_t naddrs = resolved->naddrs;
- errors = gpr_malloc(sizeof(*errors) * naddrs);
- for (i = 0; i < naddrs; i++) {
- errors[i] = grpc_tcp_server_add_port(tcp, &resolved->addrs[i], &port_temp);
- if (errors[i] == GRPC_ERROR_NONE) {
- if (port_num == -1) {
- port_num = port_temp;
- } else {
- GPR_ASSERT(port_num == port_temp);
- }
- count++;
- }
+ const char *msg = grpc_error_string(err);
+ gpr_log(GPR_ERROR, "%s", msg);
+ grpc_error_free_string(msg);
+ GRPC_ERROR_UNREF(err);
}
- if (count == 0) {
- char *msg;
- gpr_asprintf(&msg, "No address added out of total %" PRIuPTR " resolved",
- naddrs);
- err = GRPC_ERROR_CREATE_REFERENCING(msg, errors, naddrs);
- gpr_free(msg);
- goto error;
- } else if (count != naddrs) {
- char *msg;
- gpr_asprintf(&msg, "Only %" PRIuPTR
- " addresses added out of total %" PRIuPTR " resolved",
- count, naddrs);
- err = GRPC_ERROR_CREATE_REFERENCING(msg, errors, naddrs);
- gpr_free(msg);
-
- const char *warning_message = grpc_error_string(err);
- gpr_log(GPR_INFO, "WARNING: %s", warning_message);
- grpc_error_free_string(warning_message);
- /* we managed to bind some addresses: continue */
- }
- grpc_resolved_addresses_destroy(resolved);
-
- /* Register with the server only upon success */
- grpc_server_add_listener(&exec_ctx, server, tcp, start, destroy);
- goto done;
-
-/* Error path: cleanup and return */
-error:
- GPR_ASSERT(err != GRPC_ERROR_NONE);
- if (resolved) {
- grpc_resolved_addresses_destroy(resolved);
- }
- if (tcp) {
- grpc_tcp_server_unref(&exec_ctx, tcp);
- }
- port_num = 0;
-
- const char *msg = grpc_error_string(err);
- gpr_log(GPR_ERROR, "%s", msg);
- grpc_error_free_string(msg);
- GRPC_ERROR_UNREF(err);
-
-done:
grpc_exec_ctx_finish(&exec_ctx);
- if (errors != NULL) {
- for (i = 0; i < naddrs; i++) {
- GRPC_ERROR_UNREF(errors[i]);
- }
- }
- gpr_free(errors);
return port_num;
}
diff --git a/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c b/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c
index 9af17fb5ae..aa2ecf5743 100644
--- a/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c
+++ b/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c
@@ -57,8 +57,12 @@ void grpc_server_add_insecure_channel_from_fd(grpc_server *server,
char *name;
gpr_asprintf(&name, "fd:%d", fd);
- grpc_endpoint *server_endpoint = grpc_tcp_create(
- grpc_fd_create(fd, name), GRPC_TCP_DEFAULT_READ_SLICE_SIZE, name);
+ grpc_resource_quota *resource_quota = grpc_resource_quota_from_channel_args(
+ grpc_server_get_channel_args(server));
+ grpc_endpoint *server_endpoint =
+ grpc_tcp_create(grpc_fd_create(fd, name), resource_quota,
+ GRPC_TCP_DEFAULT_READ_SLICE_SIZE, name);
+ grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
gpr_free(name);
diff --git a/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c b/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c
index 0fab888541..a33a7a3f7d 100644
--- a/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c
+++ b/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c
@@ -38,218 +38,63 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
-#include <grpc/support/sync.h>
-#include <grpc/support/useful.h>
+
+#include "src/core/ext/transport/chttp2/server/chttp2_server.h"
+
#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/handshaker.h"
-#include "src/core/lib/channel/http_server_filter.h"
-#include "src/core/lib/iomgr/endpoint.h"
-#include "src/core/lib/iomgr/resolve_address.h"
-#include "src/core/lib/iomgr/tcp_server.h"
#include "src/core/lib/security/context/security_context.h"
#include "src/core/lib/security/credentials/credentials.h"
-#include "src/core/lib/security/transport/auth_filters.h"
-#include "src/core/lib/security/transport/security_connector.h"
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/server.h"
-typedef struct server_secure_state {
- grpc_server *server;
- grpc_tcp_server *tcp;
- grpc_server_security_connector *sc;
- grpc_server_credentials *creds;
- bool is_shutdown;
- gpr_mu mu;
- grpc_closure tcp_server_shutdown_complete;
- grpc_closure *server_destroy_listener_done;
-} server_secure_state;
-
-typedef struct server_secure_connect {
- server_secure_state *server_state;
- grpc_pollset *accepting_pollset;
- grpc_tcp_server_acceptor *acceptor;
- grpc_handshake_manager *handshake_mgr;
- // TODO(roth): Remove the following two fields when we eliminate
- // grpc_server_security_connector_do_handshake().
- gpr_timespec deadline;
- grpc_channel_args *args;
-} server_secure_connect;
-
-static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *statep,
- grpc_security_status status,
- grpc_endpoint *secure_endpoint,
- grpc_auth_context *auth_context) {
- server_secure_connect *connection_state = statep;
- if (status == GRPC_SECURITY_OK) {
- if (secure_endpoint) {
- gpr_mu_lock(&connection_state->server_state->mu);
- if (!connection_state->server_state->is_shutdown) {
- grpc_transport *transport = grpc_create_chttp2_transport(
- exec_ctx, grpc_server_get_channel_args(
- connection_state->server_state->server),
- secure_endpoint, 0);
- grpc_arg args_to_add[2];
- args_to_add[0] = grpc_server_credentials_to_arg(
- connection_state->server_state->creds);
- args_to_add[1] = grpc_auth_context_to_arg(auth_context);
- grpc_channel_args *args_copy = grpc_channel_args_copy_and_add(
- connection_state->args, args_to_add, GPR_ARRAY_SIZE(args_to_add));
- grpc_server_setup_transport(
- exec_ctx, connection_state->server_state->server, transport,
- connection_state->accepting_pollset, args_copy);
- grpc_channel_args_destroy(args_copy);
- grpc_chttp2_transport_start_reading(exec_ctx, transport, NULL);
- } else {
- /* We need to consume this here, because the server may already have
- * gone away. */
- grpc_endpoint_destroy(exec_ctx, secure_endpoint);
- }
- gpr_mu_unlock(&connection_state->server_state->mu);
- }
- } else {
- gpr_log(GPR_ERROR, "Secure transport failed with error %d", status);
- }
- grpc_channel_args_destroy(connection_state->args);
- grpc_tcp_server_unref(exec_ctx, connection_state->server_state->tcp);
- gpr_free(connection_state);
-}
-
-static void on_handshake_done(grpc_exec_ctx *exec_ctx, grpc_endpoint *endpoint,
- grpc_channel_args *args,
- gpr_slice_buffer *read_buffer, void *user_data,
- grpc_error *error) {
- server_secure_connect *connection_state = user_data;
- if (error != GRPC_ERROR_NONE) {
- const char *error_str = grpc_error_string(error);
- gpr_log(GPR_ERROR, "Handshaking failed: %s", error_str);
- grpc_error_free_string(error_str);
- GRPC_ERROR_UNREF(error);
- grpc_channel_args_destroy(args);
- gpr_free(read_buffer);
- grpc_handshake_manager_shutdown(exec_ctx, connection_state->handshake_mgr);
- grpc_handshake_manager_destroy(exec_ctx, connection_state->handshake_mgr);
- grpc_tcp_server_unref(exec_ctx, connection_state->server_state->tcp);
- gpr_free(connection_state);
- return;
- }
- grpc_handshake_manager_destroy(exec_ctx, connection_state->handshake_mgr);
- connection_state->handshake_mgr = NULL;
- // TODO(roth, jboeuf): Convert security connector handshaking to use new
- // handshake API, and then move the code from on_secure_handshake_done()
- // into this function.
- connection_state->args = args;
- grpc_server_security_connector_do_handshake(
- exec_ctx, connection_state->server_state->sc, connection_state->acceptor,
- endpoint, read_buffer, connection_state->deadline,
- on_secure_handshake_done, connection_state);
-}
-
-static void on_accept(grpc_exec_ctx *exec_ctx, void *statep, grpc_endpoint *tcp,
- grpc_pollset *accepting_pollset,
- grpc_tcp_server_acceptor *acceptor) {
- server_secure_state *server_state = statep;
- server_secure_connect *connection_state = NULL;
- gpr_mu_lock(&server_state->mu);
- if (server_state->is_shutdown) {
- gpr_mu_unlock(&server_state->mu);
- grpc_endpoint_destroy(exec_ctx, tcp);
- return;
- }
- gpr_mu_unlock(&server_state->mu);
- grpc_tcp_server_ref(server_state->tcp);
- connection_state = gpr_malloc(sizeof(*connection_state));
- connection_state->server_state = server_state;
- connection_state->accepting_pollset = accepting_pollset;
- connection_state->acceptor = acceptor;
- connection_state->handshake_mgr = grpc_handshake_manager_create();
- // TODO(roth): We should really get this timeout value from channel
- // args instead of hard-coding it.
- connection_state->deadline = gpr_time_add(
- gpr_now(GPR_CLOCK_MONOTONIC), gpr_time_from_seconds(120, GPR_TIMESPAN));
- grpc_handshake_manager_do_handshake(
- exec_ctx, connection_state->handshake_mgr, tcp,
- grpc_server_get_channel_args(connection_state->server_state->server),
- connection_state->deadline, acceptor, on_handshake_done,
- connection_state);
+typedef struct {
+ grpc_chttp2_server_handshaker_factory base;
+ grpc_server_security_connector *security_connector;
+} server_security_handshaker_factory;
+
+static void server_security_handshaker_factory_add_handshakers(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_server_handshaker_factory *hf,
+ grpc_handshake_manager *handshake_mgr) {
+ server_security_handshaker_factory *handshaker_factory =
+ (server_security_handshaker_factory *)hf;
+ grpc_server_security_connector_add_handshakers(
+ exec_ctx, handshaker_factory->security_connector, handshake_mgr);
}
-/* Server callback: start listening on our ports */
-static void server_start_listener(grpc_exec_ctx *exec_ctx, grpc_server *server,
- void *statep, grpc_pollset **pollsets,
- size_t pollset_count) {
- server_secure_state *server_state = statep;
- gpr_mu_lock(&server_state->mu);
- server_state->is_shutdown = false;
- gpr_mu_unlock(&server_state->mu);
- grpc_tcp_server_start(exec_ctx, server_state->tcp, pollsets, pollset_count,
- on_accept, server_state);
+static void server_security_handshaker_factory_destroy(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_server_handshaker_factory *hf) {
+ server_security_handshaker_factory *handshaker_factory =
+ (server_security_handshaker_factory *)hf;
+ GRPC_SECURITY_CONNECTOR_UNREF(&handshaker_factory->security_connector->base,
+ "server");
+ gpr_free(hf);
}
-static void tcp_server_shutdown_complete(grpc_exec_ctx *exec_ctx, void *statep,
- grpc_error *error) {
- server_secure_state *server_state = statep;
- /* ensure all threads have unlocked */
- gpr_mu_lock(&server_state->mu);
- grpc_closure *destroy_done = server_state->server_destroy_listener_done;
- GPR_ASSERT(server_state->is_shutdown);
- gpr_mu_unlock(&server_state->mu);
- /* clean up */
- grpc_server_security_connector_shutdown(exec_ctx, server_state->sc);
-
- /* Flush queued work before a synchronous unref. */
- grpc_exec_ctx_flush(exec_ctx);
- GRPC_SECURITY_CONNECTOR_UNREF(&server_state->sc->base, "server");
- grpc_server_credentials_unref(server_state->creds);
-
- if (destroy_done != NULL) {
- destroy_done->cb(exec_ctx, destroy_done->cb_arg, GRPC_ERROR_REF(error));
- grpc_exec_ctx_flush(exec_ctx);
- }
- gpr_free(server_state);
-}
-
-static void server_destroy_listener(grpc_exec_ctx *exec_ctx,
- grpc_server *server, void *statep,
- grpc_closure *callback) {
- server_secure_state *server_state = statep;
- grpc_tcp_server *tcp;
- gpr_mu_lock(&server_state->mu);
- server_state->is_shutdown = true;
- server_state->server_destroy_listener_done = callback;
- tcp = server_state->tcp;
- gpr_mu_unlock(&server_state->mu);
- grpc_tcp_server_shutdown_listeners(exec_ctx, tcp);
- grpc_tcp_server_unref(exec_ctx, server_state->tcp);
-}
+static const grpc_chttp2_server_handshaker_factory_vtable
+ server_security_handshaker_factory_vtable = {
+ server_security_handshaker_factory_add_handshakers,
+ server_security_handshaker_factory_destroy};
int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
grpc_server_credentials *creds) {
- grpc_resolved_addresses *resolved = NULL;
- grpc_tcp_server *tcp = NULL;
- server_secure_state *server_state = NULL;
- size_t i;
- size_t count = 0;
- int port_num = -1;
- int port_temp;
- grpc_security_status status = GRPC_SECURITY_ERROR;
- grpc_server_security_connector *sc = NULL;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_error *err = GRPC_ERROR_NONE;
- grpc_error **errors = NULL;
-
+ grpc_server_security_connector *sc = NULL;
+ int port_num = 0;
GRPC_API_TRACE(
"grpc_server_add_secure_http2_port("
"server=%p, addr=%s, creds=%p)",
3, (server, addr, creds));
-
- /* create security context */
+ // Create security context.
if (creds == NULL) {
err = GRPC_ERROR_CREATE(
"No credentials specified for secure server port (creds==NULL)");
- goto error;
+ goto done;
}
- status = grpc_server_credentials_create_security_connector(creds, &sc);
+ grpc_security_status status =
+ grpc_server_credentials_create_security_connector(creds, &sc);
if (status != GRPC_SECURITY_OK) {
char *msg;
gpr_asprintf(&msg,
@@ -258,106 +103,28 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
err = grpc_error_set_int(GRPC_ERROR_CREATE(msg),
GRPC_ERROR_INT_SECURITY_STATUS, status);
gpr_free(msg);
- goto error;
+ goto done;
}
- sc->channel_args = grpc_server_get_channel_args(server);
-
- /* resolve address */
- err = grpc_blocking_resolve_address(addr, "https", &resolved);
- if (err != GRPC_ERROR_NONE) {
- goto error;
- }
- server_state = gpr_malloc(sizeof(*server_state));
- memset(server_state, 0, sizeof(*server_state));
- grpc_closure_init(&server_state->tcp_server_shutdown_complete,
- tcp_server_shutdown_complete, server_state);
- err = grpc_tcp_server_create(&server_state->tcp_server_shutdown_complete,
- grpc_server_get_channel_args(server), &tcp);
+ // Create handshaker factory.
+ server_security_handshaker_factory *handshaker_factory =
+ gpr_malloc(sizeof(*handshaker_factory));
+ memset(handshaker_factory, 0, sizeof(*handshaker_factory));
+ handshaker_factory->base.vtable = &server_security_handshaker_factory_vtable;
+ handshaker_factory->security_connector = sc;
+ // Create channel args.
+ grpc_arg channel_arg = grpc_server_credentials_to_arg(creds);
+ grpc_channel_args *args = grpc_channel_args_copy_and_add(
+ grpc_server_get_channel_args(server), &channel_arg, 1);
+ // Add server port.
+ err = grpc_chttp2_server_add_port(&exec_ctx, server, addr, args,
+ &handshaker_factory->base, &port_num);
+done:
+ grpc_exec_ctx_finish(&exec_ctx);
if (err != GRPC_ERROR_NONE) {
- goto error;
+ const char *msg = grpc_error_string(err);
+ gpr_log(GPR_ERROR, "%s", msg);
+ grpc_error_free_string(msg);
+ GRPC_ERROR_UNREF(err);
}
-
- server_state->server = server;
- server_state->tcp = tcp;
- server_state->sc = sc;
- server_state->creds = grpc_server_credentials_ref(creds);
- server_state->is_shutdown = true;
- gpr_mu_init(&server_state->mu);
-
- errors = gpr_malloc(sizeof(*errors) * resolved->naddrs);
- for (i = 0; i < resolved->naddrs; i++) {
- errors[i] = grpc_tcp_server_add_port(tcp, &resolved->addrs[i], &port_temp);
- if (errors[i] == GRPC_ERROR_NONE) {
- if (port_num == -1) {
- port_num = port_temp;
- } else {
- GPR_ASSERT(port_num == port_temp);
- }
- count++;
- }
- }
- if (count == 0) {
- char *msg;
- gpr_asprintf(&msg, "No address added out of total %" PRIuPTR " resolved",
- resolved->naddrs);
- err = GRPC_ERROR_CREATE_REFERENCING(msg, errors, resolved->naddrs);
- gpr_free(msg);
- goto error;
- } else if (count != resolved->naddrs) {
- char *msg;
- gpr_asprintf(&msg, "Only %" PRIuPTR
- " addresses added out of total %" PRIuPTR " resolved",
- count, resolved->naddrs);
- err = GRPC_ERROR_CREATE_REFERENCING(msg, errors, resolved->naddrs);
- gpr_free(msg);
-
- const char *warning_message = grpc_error_string(err);
- gpr_log(GPR_INFO, "WARNING: %s", warning_message);
- grpc_error_free_string(warning_message);
- /* we managed to bind some addresses: continue */
- } else {
- for (i = 0; i < resolved->naddrs; i++) {
- GRPC_ERROR_UNREF(errors[i]);
- }
- }
- gpr_free(errors);
- errors = NULL;
- grpc_resolved_addresses_destroy(resolved);
-
- /* Register with the server only upon success */
- grpc_server_add_listener(&exec_ctx, server, server_state,
- server_start_listener, server_destroy_listener);
-
- grpc_exec_ctx_finish(&exec_ctx);
return port_num;
-
-/* Error path: cleanup and return */
-error:
- GPR_ASSERT(err != GRPC_ERROR_NONE);
- if (errors != NULL) {
- for (i = 0; i < resolved->naddrs; i++) {
- GRPC_ERROR_UNREF(errors[i]);
- }
- gpr_free(errors);
- }
- if (resolved) {
- grpc_resolved_addresses_destroy(resolved);
- }
- if (tcp) {
- grpc_tcp_server_unref(&exec_ctx, tcp);
- } else {
- if (sc) {
- grpc_exec_ctx_flush(&exec_ctx);
- GRPC_SECURITY_CONNECTOR_UNREF(&sc->base, "server");
- }
- if (server_state) {
- gpr_free(server_state);
- }
- }
- grpc_exec_ctx_finish(&exec_ctx);
- const char *msg = grpc_error_string(err);
- GRPC_ERROR_UNREF(err);
- gpr_log(GPR_ERROR, "%s", msg);
- grpc_error_free_string(msg);
- return 0;
}
diff --git a/src/core/ext/transport/chttp2/transport/bin_decoder.c b/src/core/ext/transport/chttp2/transport/bin_decoder.c
index 2d90b01cd8..3eef80b557 100644
--- a/src/core/ext/transport/chttp2/transport/bin_decoder.c
+++ b/src/core/ext/transport/chttp2/transport/bin_decoder.c
@@ -34,6 +34,7 @@
#include "src/core/ext/transport/chttp2/transport/bin_decoder.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
+#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/support/string.h"
static uint8_t decode_table[] = {
@@ -142,11 +143,11 @@ bool grpc_base64_decode_partial(struct grpc_base64_decode_context *ctx) {
return true;
}
-gpr_slice grpc_chttp2_base64_decode(gpr_slice input) {
- size_t input_length = GPR_SLICE_LENGTH(input);
+grpc_slice grpc_chttp2_base64_decode(grpc_slice input) {
+ size_t input_length = GRPC_SLICE_LENGTH(input);
size_t output_length = input_length / 4 * 3;
struct grpc_base64_decode_context ctx;
- gpr_slice output;
+ grpc_slice output;
if (input_length % 4 != 0) {
gpr_log(GPR_ERROR,
@@ -158,7 +159,7 @@ gpr_slice grpc_chttp2_base64_decode(gpr_slice input) {
}
if (input_length > 0) {
- uint8_t *input_end = GPR_SLICE_END_PTR(input);
+ uint8_t *input_end = GRPC_SLICE_END_PTR(input);
if (*(--input_end) == '=') {
output_length--;
if (*(--input_end) == '=') {
@@ -166,30 +167,30 @@ gpr_slice grpc_chttp2_base64_decode(gpr_slice input) {
}
}
}
- output = gpr_slice_malloc(output_length);
+ output = grpc_slice_malloc(output_length);
- ctx.input_cur = GPR_SLICE_START_PTR(input);
- ctx.input_end = GPR_SLICE_END_PTR(input);
- ctx.output_cur = GPR_SLICE_START_PTR(output);
- ctx.output_end = GPR_SLICE_END_PTR(output);
+ ctx.input_cur = GRPC_SLICE_START_PTR(input);
+ ctx.input_end = GRPC_SLICE_END_PTR(input);
+ ctx.output_cur = GRPC_SLICE_START_PTR(output);
+ ctx.output_end = GRPC_SLICE_END_PTR(output);
ctx.contains_tail = false;
if (!grpc_base64_decode_partial(&ctx)) {
- char *s = gpr_dump_slice(input, GPR_DUMP_ASCII);
+ char *s = grpc_dump_slice(input, GPR_DUMP_ASCII);
gpr_log(GPR_ERROR, "Base64 decoding failed, input string:\n%s\n", s);
gpr_free(s);
- gpr_slice_unref(output);
+ grpc_slice_unref(output);
return gpr_empty_slice();
}
- GPR_ASSERT(ctx.output_cur == GPR_SLICE_END_PTR(output));
- GPR_ASSERT(ctx.input_cur == GPR_SLICE_END_PTR(input));
+ GPR_ASSERT(ctx.output_cur == GRPC_SLICE_END_PTR(output));
+ GPR_ASSERT(ctx.input_cur == GRPC_SLICE_END_PTR(input));
return output;
}
-gpr_slice grpc_chttp2_base64_decode_with_length(gpr_slice input,
- size_t output_length) {
- size_t input_length = GPR_SLICE_LENGTH(input);
- gpr_slice output = gpr_slice_malloc(output_length);
+grpc_slice grpc_chttp2_base64_decode_with_length(grpc_slice input,
+ size_t output_length) {
+ size_t input_length = GRPC_SLICE_LENGTH(input);
+ grpc_slice output = grpc_slice_malloc(output_length);
struct grpc_base64_decode_context ctx;
// The length of a base64 string cannot be 4 * n + 1
@@ -199,7 +200,7 @@ gpr_slice grpc_chttp2_base64_decode_with_length(gpr_slice input,
"grpc_chttp2_base64_decode_with_length has a length of %d, which "
"has a tail of 1 byte.\n",
(int)input_length);
- gpr_slice_unref(output);
+ grpc_slice_unref(output);
return gpr_empty_slice();
}
@@ -209,24 +210,24 @@ gpr_slice grpc_chttp2_base64_decode_with_length(gpr_slice input,
"than the max possible output length %d.\n",
(int)output_length,
(int)(input_length / 4 * 3 + tail_xtra[input_length % 4]));
- gpr_slice_unref(output);
+ grpc_slice_unref(output);
return gpr_empty_slice();
}
- ctx.input_cur = GPR_SLICE_START_PTR(input);
- ctx.input_end = GPR_SLICE_END_PTR(input);
- ctx.output_cur = GPR_SLICE_START_PTR(output);
- ctx.output_end = GPR_SLICE_END_PTR(output);
+ ctx.input_cur = GRPC_SLICE_START_PTR(input);
+ ctx.input_end = GRPC_SLICE_END_PTR(input);
+ ctx.output_cur = GRPC_SLICE_START_PTR(output);
+ ctx.output_end = GRPC_SLICE_END_PTR(output);
ctx.contains_tail = true;
if (!grpc_base64_decode_partial(&ctx)) {
- char *s = gpr_dump_slice(input, GPR_DUMP_ASCII);
+ char *s = grpc_dump_slice(input, GPR_DUMP_ASCII);
gpr_log(GPR_ERROR, "Base64 decoding failed, input string:\n%s\n", s);
gpr_free(s);
- gpr_slice_unref(output);
+ grpc_slice_unref(output);
return gpr_empty_slice();
}
- GPR_ASSERT(ctx.output_cur == GPR_SLICE_END_PTR(output));
- GPR_ASSERT(ctx.input_cur <= GPR_SLICE_END_PTR(input));
+ GPR_ASSERT(ctx.output_cur == GRPC_SLICE_END_PTR(output));
+ GPR_ASSERT(ctx.input_cur <= GRPC_SLICE_END_PTR(input));
return output;
}
diff --git a/src/core/ext/transport/chttp2/transport/bin_decoder.h b/src/core/ext/transport/chttp2/transport/bin_decoder.h
index b9d40c9b74..83a90be519 100644
--- a/src/core/ext/transport/chttp2/transport/bin_decoder.h
+++ b/src/core/ext/transport/chttp2/transport/bin_decoder.h
@@ -34,7 +34,7 @@
#ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_BIN_DECODER_H
#define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_BIN_DECODER_H
-#include <grpc/support/slice.h>
+#include <grpc/slice.h>
#include <stdbool.h>
struct grpc_base64_decode_context {
@@ -55,12 +55,12 @@ bool grpc_base64_decode_partial(struct grpc_base64_decode_context *ctx);
/* base64 decode a slice with pad chars. Returns a new slice, does not take
ownership of the input. Returns an empty slice if decoding is failed. */
-gpr_slice grpc_chttp2_base64_decode(gpr_slice input);
+grpc_slice grpc_chttp2_base64_decode(grpc_slice input);
/* base64 decode a slice without pad chars, data length is needed. Returns a new
slice, does not take ownership of the input. Returns an empty slice if
decoding is failed. */
-gpr_slice grpc_chttp2_base64_decode_with_length(gpr_slice input,
- size_t output_length);
+grpc_slice grpc_chttp2_base64_decode_with_length(grpc_slice input,
+ size_t output_length);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_BIN_DECODER_H */
diff --git a/src/core/ext/transport/chttp2/transport/bin_encoder.c b/src/core/ext/transport/chttp2/transport/bin_encoder.c
index 1b43c28be1..af25a4352a 100644
--- a/src/core/ext/transport/chttp2/transport/bin_encoder.c
+++ b/src/core/ext/transport/chttp2/transport/bin_encoder.c
@@ -61,14 +61,14 @@ static const b64_huff_sym huff_alphabet[64] = {
static const uint8_t tail_xtra[3] = {0, 2, 3};
-gpr_slice grpc_chttp2_base64_encode(gpr_slice input) {
- size_t input_length = GPR_SLICE_LENGTH(input);
+grpc_slice grpc_chttp2_base64_encode(grpc_slice input) {
+ size_t input_length = GRPC_SLICE_LENGTH(input);
size_t input_triplets = input_length / 3;
size_t tail_case = input_length % 3;
size_t output_length = input_triplets * 4 + tail_xtra[tail_case];
- gpr_slice output = gpr_slice_malloc(output_length);
- uint8_t *in = GPR_SLICE_START_PTR(input);
- char *out = (char *)GPR_SLICE_START_PTR(output);
+ grpc_slice output = grpc_slice_malloc(output_length);
+ uint8_t *in = GRPC_SLICE_START_PTR(input);
+ char *out = (char *)GRPC_SLICE_START_PTR(output);
size_t i;
/* encode full triplets */
@@ -100,27 +100,29 @@ gpr_slice grpc_chttp2_base64_encode(gpr_slice input) {
break;
}
- GPR_ASSERT(out == (char *)GPR_SLICE_END_PTR(output));
- GPR_ASSERT(in == GPR_SLICE_END_PTR(input));
+ GPR_ASSERT(out == (char *)GRPC_SLICE_END_PTR(output));
+ GPR_ASSERT(in == GRPC_SLICE_END_PTR(input));
return output;
}
-gpr_slice grpc_chttp2_huffman_compress(gpr_slice input) {
+grpc_slice grpc_chttp2_huffman_compress(grpc_slice input) {
size_t nbits;
uint8_t *in;
uint8_t *out;
- gpr_slice output;
+ grpc_slice output;
uint32_t temp = 0;
uint32_t temp_length = 0;
nbits = 0;
- for (in = GPR_SLICE_START_PTR(input); in != GPR_SLICE_END_PTR(input); ++in) {
+ for (in = GRPC_SLICE_START_PTR(input); in != GRPC_SLICE_END_PTR(input);
+ ++in) {
nbits += grpc_chttp2_huffsyms[*in].length;
}
- output = gpr_slice_malloc(nbits / 8 + (nbits % 8 != 0));
- out = GPR_SLICE_START_PTR(output);
- for (in = GPR_SLICE_START_PTR(input); in != GPR_SLICE_END_PTR(input); ++in) {
+ output = grpc_slice_malloc(nbits / 8 + (nbits % 8 != 0));
+ out = GRPC_SLICE_START_PTR(output);
+ for (in = GRPC_SLICE_START_PTR(input); in != GRPC_SLICE_END_PTR(input);
+ ++in) {
int sym = *in;
temp <<= grpc_chttp2_huffsyms[sym].length;
temp |= grpc_chttp2_huffsyms[sym].bits;
@@ -141,7 +143,7 @@ gpr_slice grpc_chttp2_huffman_compress(gpr_slice input) {
(uint8_t)(0xffu >> temp_length));
}
- GPR_ASSERT(out == GPR_SLICE_END_PTR(output));
+ GPR_ASSERT(out == GRPC_SLICE_END_PTR(output));
return output;
}
@@ -175,16 +177,17 @@ static void enc_add1(huff_out *out, uint8_t a) {
enc_flush_some(out);
}
-gpr_slice grpc_chttp2_base64_encode_and_huffman_compress_impl(gpr_slice input) {
- size_t input_length = GPR_SLICE_LENGTH(input);
+grpc_slice grpc_chttp2_base64_encode_and_huffman_compress_impl(
+ grpc_slice input) {
+ size_t input_length = GRPC_SLICE_LENGTH(input);
size_t input_triplets = input_length / 3;
size_t tail_case = input_length % 3;
size_t output_syms = input_triplets * 4 + tail_xtra[tail_case];
size_t max_output_bits = 11 * output_syms;
size_t max_output_length = max_output_bits / 8 + (max_output_bits % 8 != 0);
- gpr_slice output = gpr_slice_malloc(max_output_length);
- uint8_t *in = GPR_SLICE_START_PTR(input);
- uint8_t *start_out = GPR_SLICE_START_PTR(output);
+ grpc_slice output = grpc_slice_malloc(max_output_length);
+ uint8_t *in = GRPC_SLICE_START_PTR(input);
+ uint8_t *start_out = GRPC_SLICE_START_PTR(output);
huff_out out;
size_t i;
@@ -231,9 +234,9 @@ gpr_slice grpc_chttp2_base64_encode_and_huffman_compress_impl(gpr_slice input) {
(uint8_t)(0xffu >> out.temp_length));
}
- GPR_ASSERT(out.out <= GPR_SLICE_END_PTR(output));
- GPR_SLICE_SET_LENGTH(output, out.out - start_out);
+ GPR_ASSERT(out.out <= GRPC_SLICE_END_PTR(output));
+ GRPC_SLICE_SET_LENGTH(output, out.out - start_out);
- GPR_ASSERT(in == GPR_SLICE_END_PTR(input));
+ GPR_ASSERT(in == GRPC_SLICE_END_PTR(input));
return output;
}
diff --git a/src/core/ext/transport/chttp2/transport/bin_encoder.h b/src/core/ext/transport/chttp2/transport/bin_encoder.h
index 61ebbafa9a..9e143b46e2 100644
--- a/src/core/ext/transport/chttp2/transport/bin_encoder.h
+++ b/src/core/ext/transport/chttp2/transport/bin_encoder.h
@@ -34,21 +34,22 @@
#ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_BIN_ENCODER_H
#define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_BIN_ENCODER_H
-#include <grpc/support/slice.h>
+#include <grpc/slice.h>
/* base64 encode a slice. Returns a new slice, does not take ownership of the
input */
-gpr_slice grpc_chttp2_base64_encode(gpr_slice input);
+grpc_slice grpc_chttp2_base64_encode(grpc_slice input);
/* Compress a slice with the static huffman encoder detailed in the hpack
standard. Returns a new slice, does not take ownership of the input */
-gpr_slice grpc_chttp2_huffman_compress(gpr_slice input);
+grpc_slice grpc_chttp2_huffman_compress(grpc_slice input);
/* equivalent to:
- gpr_slice x = grpc_chttp2_base64_encode(input);
- gpr_slice y = grpc_chttp2_huffman_compress(x);
- gpr_slice_unref(x);
+ grpc_slice x = grpc_chttp2_base64_encode(input);
+ grpc_slice y = grpc_chttp2_huffman_compress(x);
+ grpc_slice_unref(x);
return y; */
-gpr_slice grpc_chttp2_base64_encode_and_huffman_compress_impl(gpr_slice input);
+grpc_slice grpc_chttp2_base64_encode_and_huffman_compress_impl(
+ grpc_slice input);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_BIN_ENCODER_H */
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
index ecf3aea870..6bc054866b 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
@@ -38,9 +38,9 @@
#include <stdio.h>
#include <string.h>
+#include <grpc/slice_buffer.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/slice_buffer.h>
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
@@ -51,6 +51,7 @@
#include "src/core/lib/http/parser.h"
#include "src/core/lib/iomgr/workqueue.h"
#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/transport/static_metadata.h"
#include "src/core/lib/transport/timeout_encoding.h"
@@ -110,9 +111,20 @@ static void incoming_byte_stream_update_flow_control(grpc_exec_ctx *exec_ctx,
static void incoming_byte_stream_destroy_locked(grpc_exec_ctx *exec_ctx,
void *byte_stream,
grpc_error *error_ignored);
-static void fail_pending_writes(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t, grpc_chttp2_stream *s,
- grpc_error *error);
+
+static void benign_reclaimer(grpc_exec_ctx *exec_ctx, void *t,
+ grpc_error *error);
+static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *t,
+ grpc_error *error);
+static void destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *t,
+ grpc_error *error);
+static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *t,
+ grpc_error *error);
+
+static void post_benign_reclaimer(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t);
+static void post_destructive_reclaimer(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t);
static void close_transport_locked(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t, grpc_error *error);
@@ -129,12 +141,12 @@ static void destruct_transport(grpc_exec_ctx *exec_ctx,
grpc_endpoint_destroy(exec_ctx, t->ep);
- gpr_slice_buffer_destroy(&t->qbuf);
+ grpc_slice_buffer_destroy(&t->qbuf);
- gpr_slice_buffer_destroy(&t->outbuf);
+ grpc_slice_buffer_destroy(&t->outbuf);
grpc_chttp2_hpack_compressor_destroy(&t->hpack_compressor);
- gpr_slice_buffer_destroy(&t->read_buffer);
+ grpc_slice_buffer_destroy(&t->read_buffer);
grpc_chttp2_hpack_parser_destroy(&t->hpack_parser);
grpc_chttp2_goaway_parser_destroy(&t->goaway_parser);
@@ -229,9 +241,9 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
&t->channel_callback.state_tracker, GRPC_CHANNEL_READY,
is_client ? "client_transport" : "server_transport");
- gpr_slice_buffer_init(&t->qbuf);
+ grpc_slice_buffer_init(&t->qbuf);
- gpr_slice_buffer_init(&t->outbuf);
+ grpc_slice_buffer_init(&t->outbuf);
grpc_chttp2_hpack_compressor_init(&t->hpack_compressor);
grpc_closure_init(&t->write_action_begin_locked, write_action_begin_locked,
@@ -241,11 +253,16 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_closure_init(&t->write_action_end_locked, write_action_end_locked, t);
grpc_closure_init(&t->read_action_begin, read_action_begin, t);
grpc_closure_init(&t->read_action_locked, read_action_locked, t);
+ grpc_closure_init(&t->benign_reclaimer, benign_reclaimer, t);
+ grpc_closure_init(&t->destructive_reclaimer, destructive_reclaimer, t);
+ grpc_closure_init(&t->benign_reclaimer_locked, benign_reclaimer_locked, t);
+ grpc_closure_init(&t->destructive_reclaimer_locked,
+ destructive_reclaimer_locked, t);
grpc_chttp2_goaway_parser_init(&t->goaway_parser);
grpc_chttp2_hpack_parser_init(&t->hpack_parser);
- gpr_slice_buffer_init(&t->read_buffer);
+ grpc_slice_buffer_init(&t->read_buffer);
/* 8 is a random stab in the dark as to a good initial size: it's small enough
that it shouldn't waste memory for infrequently used connections, yet
@@ -267,8 +284,8 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
t->sent_local_settings = 0;
if (is_client) {
- gpr_slice_buffer_add(&t->outbuf, gpr_slice_from_copied_string(
- GRPC_CHTTP2_CLIENT_CONNECT_STRING));
+ grpc_slice_buffer_add(&t->outbuf, grpc_slice_from_copied_string(
+ GRPC_CHTTP2_CLIENT_CONNECT_STRING));
grpc_chttp2_initiate_write(exec_ctx, t, false, "initial_write");
}
@@ -362,6 +379,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
}
grpc_chttp2_initiate_write(exec_ctx, t, false, "init");
+ post_benign_reclaimer(exec_ctx, t);
}
static void destroy_transport_locked(grpc_exec_ctx *exec_ctx, void *tp,
@@ -451,7 +469,7 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_chttp2_incoming_metadata_buffer_init(&s->metadata_buffer[0]);
grpc_chttp2_incoming_metadata_buffer_init(&s->metadata_buffer[1]);
grpc_chttp2_data_parser_init(&s->data_parser);
- gpr_slice_buffer_init(&s->flow_controlled_buffer);
+ grpc_slice_buffer_init(&s->flow_controlled_buffer);
s->deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
grpc_closure_init(&s->complete_fetch, complete_fetch, s);
grpc_closure_init(&s->complete_fetch_locked, complete_fetch_locked, s);
@@ -467,6 +485,7 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
*t->accepting_stream = s;
grpc_chttp2_stream_map_add(&t->stream_map, s->id, s);
+ post_destructive_reclaimer(exec_ctx, t);
}
GPR_TIMER_END("init_stream", 0);
@@ -510,7 +529,7 @@ static void destroy_stream_locked(grpc_exec_ctx *exec_ctx, void *sp,
grpc_chttp2_data_parser_destroy(exec_ctx, &s->data_parser);
grpc_chttp2_incoming_metadata_buffer_destroy(&s->metadata_buffer[0]);
grpc_chttp2_incoming_metadata_buffer_destroy(&s->metadata_buffer[1]);
- gpr_slice_buffer_destroy(&s->flow_controlled_buffer);
+ grpc_slice_buffer_destroy(&s->flow_controlled_buffer);
GRPC_ERROR_UNREF(s->read_closed_error);
GRPC_ERROR_UNREF(s->write_closed_error);
@@ -580,11 +599,13 @@ static void set_write_state(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
write_state_name(t->write_state),
write_state_name(st), reason));
t->write_state = st;
- if (st == GRPC_CHTTP2_WRITE_STATE_IDLE &&
- t->close_transport_on_writes_finished != NULL) {
- grpc_error *err = t->close_transport_on_writes_finished;
- t->close_transport_on_writes_finished = NULL;
- close_transport_locked(exec_ctx, t, err);
+ if (st == GRPC_CHTTP2_WRITE_STATE_IDLE) {
+ grpc_exec_ctx_enqueue_list(exec_ctx, &t->run_after_write, NULL);
+ if (t->close_transport_on_writes_finished != NULL) {
+ grpc_error *err = t->close_transport_on_writes_finished;
+ t->close_transport_on_writes_finished = NULL;
+ close_transport_locked(exec_ctx, t, err);
+ }
}
}
@@ -675,7 +696,12 @@ static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp,
close_transport_locked(exec_ctx, t, GRPC_ERROR_REF(error));
}
- grpc_chttp2_end_write(exec_ctx, t, GRPC_ERROR_REF(error));
+ if (t->sent_goaway_state == GRPC_CHTTP2_GOAWAY_SEND_SCHEDULED) {
+ t->sent_goaway_state = GRPC_CHTTP2_GOAWAY_SENT;
+ if (grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
+ close_transport_locked(exec_ctx, t, GRPC_ERROR_CREATE("goaway sent"));
+ }
+ }
switch (t->write_state) {
case GRPC_CHTTP2_WRITE_STATE_IDLE:
@@ -705,6 +731,8 @@ static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp,
break;
}
+ grpc_chttp2_end_write(exec_ctx, t, GRPC_ERROR_REF(error));
+
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "writing");
GPR_TIMER_END("terminate_writing_with_lock", 0);
}
@@ -728,11 +756,11 @@ static void push_setting(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
void grpc_chttp2_add_incoming_goaway(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
uint32_t goaway_error,
- gpr_slice goaway_text) {
- char *msg = gpr_dump_slice(goaway_text, GPR_DUMP_HEX | GPR_DUMP_ASCII);
+ grpc_slice goaway_text) {
+ char *msg = grpc_dump_slice(goaway_text, GPR_DUMP_HEX | GPR_DUMP_ASCII);
GRPC_CHTTP2_IF_TRACING(
gpr_log(GPR_DEBUG, "got goaway [%d]: %s", goaway_error, msg));
- gpr_slice_unref(goaway_text);
+ grpc_slice_unref(goaway_text);
t->seen_goaway = 1;
/* lie: use transient failure from the transport to indicate goaway has been
* received */
@@ -780,6 +808,7 @@ static void maybe_start_some_streams(grpc_exec_ctx *exec_ctx,
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
s->max_recv_bytes = GPR_MAX(stream_incoming_window, s->max_recv_bytes);
grpc_chttp2_stream_map_add(&t->stream_map, s->id, s);
+ post_destructive_reclaimer(exec_ctx, t);
grpc_chttp2_become_writable(exec_ctx, t, s, true, "new_stream");
}
/* cancel out streams that will never be started */
@@ -793,7 +822,14 @@ static void maybe_start_some_streams(grpc_exec_ctx *exec_ctx,
}
}
+/* Flag that this closure barrier wants stats to be updated before finishing */
#define CLOSURE_BARRIER_STATS_BIT (1 << 0)
+/* Flag that this closure barrier may be covering a write in a pollset, and so
+ we should not complete this closure until we can prove that the write got
+ scheduled */
+#define CLOSURE_BARRIER_MAY_COVER_WRITE (1 << 1)
+/* First bit of the reference count, stored in the high order bits (with the low
+ bits being used for flags defined above) */
#define CLOSURE_BARRIER_FIRST_REF_BIT (1 << 16)
static grpc_closure *add_closure_barrier(grpc_closure *closure) {
@@ -820,6 +856,16 @@ void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
return;
}
closure->next_data.scratch -= CLOSURE_BARRIER_FIRST_REF_BIT;
+ if (grpc_http_trace) {
+ const char *errstr = grpc_error_string(error);
+ gpr_log(GPR_DEBUG,
+ "complete_closure_step: %p refs=%d flags=0x%04x desc=%s err=%s",
+ closure,
+ (int)(closure->next_data.scratch / CLOSURE_BARRIER_FIRST_REF_BIT),
+ (int)(closure->next_data.scratch % CLOSURE_BARRIER_FIRST_REF_BIT),
+ desc, errstr);
+ grpc_error_free_string(errstr);
+ }
if (error != GRPC_ERROR_NONE) {
if (closure->error_data.error == GRPC_ERROR_NONE) {
closure->error_data.error =
@@ -836,7 +882,13 @@ void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
grpc_transport_move_stats(&s->stats, s->collecting_stats);
s->collecting_stats = NULL;
}
- grpc_closure_run(exec_ctx, closure, closure->error_data.error);
+ if ((t->write_state == GRPC_CHTTP2_WRITE_STATE_IDLE) ||
+ !(closure->next_data.scratch & CLOSURE_BARRIER_MAY_COVER_WRITE)) {
+ grpc_closure_run(exec_ctx, closure, closure->error_data.error);
+ } else {
+ grpc_closure_list_append(&t->run_after_write, closure,
+ closure->error_data.error);
+ }
}
}
@@ -855,8 +907,8 @@ static void add_fetched_slice_locked(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
s->fetched_send_message_length +=
- (uint32_t)GPR_SLICE_LENGTH(s->fetching_slice);
- gpr_slice_buffer_add(&s->flow_controlled_buffer, s->fetching_slice);
+ (uint32_t)GRPC_SLICE_LENGTH(s->fetching_slice);
+ grpc_slice_buffer_add(&s->flow_controlled_buffer, s->fetching_slice);
if (s->id != 0) {
grpc_chttp2_become_writable(exec_ctx, t, s, true, "op.send_message");
}
@@ -924,6 +976,16 @@ static void complete_fetch(grpc_exec_ctx *exec_ctx, void *gs,
static void do_nothing(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {}
+static void log_metadata(const grpc_metadata_batch *md_batch, uint32_t id,
+ bool is_client, bool is_initial) {
+ for (grpc_linked_mdelem *md = md_batch->list.head; md != md_batch->list.tail;
+ md = md->next) {
+ gpr_log(GPR_INFO, "HTTP:%d:%s:%s: %s: %s", id, is_initial ? "HDR" : "TRL",
+ is_client ? "CLI" : "SVR", grpc_mdstr_as_c_string(md->md->key),
+ grpc_mdstr_as_c_string(md->md->value));
+ }
+}
+
static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
grpc_error *error_ignored) {
GPR_TIMER_BEGIN("perform_stream_op_locked", 0);
@@ -937,6 +999,12 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
gpr_log(GPR_DEBUG, "perform_stream_op_locked: %s; on_complete = %p", str,
op->on_complete);
gpr_free(str);
+ if (op->send_initial_metadata) {
+ log_metadata(op->send_initial_metadata, s->id, t->is_client, true);
+ }
+ if (op->send_trailing_metadata) {
+ log_metadata(op->send_trailing_metadata, s->id, t->is_client, false);
+ }
}
grpc_closure *on_complete = op->on_complete;
@@ -965,6 +1033,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
if (op->send_initial_metadata != NULL) {
GPR_ASSERT(s->send_initial_metadata_finished == NULL);
+ on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE;
s->send_initial_metadata_finished = add_closure_barrier(on_complete);
s->send_initial_metadata = op->send_initial_metadata;
const size_t metadata_size =
@@ -993,16 +1062,21 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
}
if (!s->write_closed) {
if (t->is_client) {
- GPR_ASSERT(s->id == 0);
- grpc_chttp2_list_add_waiting_for_concurrency(t, s);
- maybe_start_some_streams(exec_ctx, t);
+ if (!t->closed) {
+ GPR_ASSERT(s->id == 0);
+ grpc_chttp2_list_add_waiting_for_concurrency(t, s);
+ maybe_start_some_streams(exec_ctx, t);
+ } else {
+ grpc_chttp2_cancel_stream(exec_ctx, t, s,
+ GRPC_ERROR_CREATE("Transport closed"));
+ }
} else {
GPR_ASSERT(s->id != 0);
grpc_chttp2_become_writable(exec_ctx, t, s, true,
"op.send_initial_metadata");
}
} else {
- s->send_trailing_metadata = NULL;
+ s->send_initial_metadata = NULL;
grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->send_initial_metadata_finished,
GRPC_ERROR_CREATE(
@@ -1013,6 +1087,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
}
if (op->send_message != NULL) {
+ on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE;
s->fetching_send_message_finished = add_closure_barrier(op->on_complete);
if (s->write_closed) {
grpc_chttp2_complete_closure_step(
@@ -1022,7 +1097,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
} else {
GPR_ASSERT(s->fetching_send_message == NULL);
uint8_t *frame_hdr =
- gpr_slice_buffer_tiny_add(&s->flow_controlled_buffer, 5);
+ grpc_slice_buffer_tiny_add(&s->flow_controlled_buffer, 5);
uint32_t flags = op->send_message->flags;
frame_hdr[0] = (flags & GRPC_WRITE_INTERNAL_COMPRESS) != 0;
size_t len = op->send_message->length;
@@ -1050,6 +1125,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
if (op->send_trailing_metadata != NULL) {
GPR_ASSERT(s->send_trailing_metadata_finished == NULL);
+ on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE;
s->send_trailing_metadata_finished = add_closure_barrier(on_complete);
s->send_trailing_metadata = op->send_trailing_metadata;
const size_t metadata_size =
@@ -1162,7 +1238,7 @@ static void send_ping_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
p->id[7] = (uint8_t)(t->ping_counter & 0xff);
t->ping_counter++;
p->on_recv = on_recv;
- gpr_slice_buffer_add(&t->qbuf, grpc_chttp2_ping_create(0, p->id));
+ grpc_slice_buffer_add(&t->qbuf, grpc_chttp2_ping_create(0, p->id));
grpc_chttp2_initiate_write(exec_ctx, t, true, "send_ping");
}
@@ -1185,6 +1261,14 @@ void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
gpr_free(msg);
}
+static void send_goaway(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_chttp2_error_code error, grpc_slice data) {
+ t->sent_goaway_state = GRPC_CHTTP2_GOAWAY_SEND_SCHEDULED;
+ grpc_chttp2_goaway_append(t->last_new_stream_id, (uint32_t)error, data,
+ &t->qbuf);
+ grpc_chttp2_initiate_write(exec_ctx, t, false, "goaway_sent");
+}
+
static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
void *stream_op,
grpc_error *error_ignored) {
@@ -1199,15 +1283,9 @@ static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
}
if (op->send_goaway) {
- t->sent_goaway = 1;
- grpc_chttp2_goaway_append(
- t->last_new_stream_id,
- (uint32_t)grpc_chttp2_grpc_status_to_http2_error(op->goaway_status),
- gpr_slice_ref(*op->goaway_message), &t->qbuf);
- close_transport = grpc_chttp2_stream_map_size(&t->stream_map) == 0
- ? GRPC_ERROR_CREATE("GOAWAY sent")
- : GRPC_ERROR_NONE;
- grpc_chttp2_initiate_write(exec_ctx, t, false, "goaway_sent");
+ send_goaway(exec_ctx, t,
+ grpc_chttp2_grpc_status_to_http2_error(op->goaway_status),
+ grpc_slice_ref(*op->goaway_message));
}
if (op->set_accept_stream) {
@@ -1341,10 +1419,14 @@ static void remove_stream(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
s->data_parser.parsing_frame = NULL;
}
- if (grpc_chttp2_stream_map_size(&t->stream_map) == 0 && t->sent_goaway) {
- close_transport_locked(
- exec_ctx, t, GRPC_ERROR_CREATE_REFERENCING(
- "Last stream closed after sending GOAWAY", &error, 1));
+ if (grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
+ post_benign_reclaimer(exec_ctx, t);
+ if (t->sent_goaway_state == GRPC_CHTTP2_GOAWAY_SENT) {
+ close_transport_locked(
+ exec_ctx, t,
+ GRPC_ERROR_CREATE_REFERENCING(
+ "Last stream closed after sending GOAWAY", &error, 1));
+ }
}
if (grpc_chttp2_list_remove_writable_stream(t, s)) {
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:remove_stream");
@@ -1392,7 +1474,7 @@ void grpc_chttp2_cancel_stream(grpc_exec_ctx *exec_ctx,
&grpc_status);
if (s->id != 0) {
- gpr_slice_buffer_add(
+ grpc_slice_buffer_add(
&t->qbuf, grpc_chttp2_rst_stream_create(s->id, (uint32_t)http_error,
&s->stats.outgoing));
grpc_chttp2_initiate_write(exec_ctx, t, false, "rst_stream");
@@ -1405,7 +1487,7 @@ void grpc_chttp2_cancel_stream(grpc_exec_ctx *exec_ctx,
free_msg = true;
msg = grpc_error_string(due_to_error);
}
- gpr_slice msg_slice = gpr_slice_from_copied_string(msg);
+ grpc_slice msg_slice = grpc_slice_from_copied_string(msg);
grpc_chttp2_fake_status(exec_ctx, t, s, grpc_status, &msg_slice);
if (free_msg) grpc_error_free_string(msg);
}
@@ -1418,7 +1500,7 @@ void grpc_chttp2_cancel_stream(grpc_exec_ctx *exec_ctx,
void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, grpc_status_code status,
- gpr_slice *slice) {
+ grpc_slice *slice) {
if (status != GRPC_STATUS_OK) {
s->seen_error = true;
}
@@ -1441,13 +1523,13 @@ void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
&s->metadata_buffer[1],
grpc_mdelem_from_metadata_strings(
GRPC_MDSTR_GRPC_MESSAGE,
- grpc_mdstr_from_slice(gpr_slice_ref(*slice))));
+ grpc_mdstr_from_slice(grpc_slice_ref(*slice))));
}
s->published_metadata[1] = GRPC_METADATA_SYNTHESIZED_FROM_FAKE;
grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s);
}
if (slice) {
- gpr_slice_unref(*slice);
+ grpc_slice_unref(*slice);
}
}
@@ -1477,18 +1559,22 @@ static grpc_error *removal_error(grpc_error *extra_error, grpc_chttp2_stream *s,
return error;
}
-static void fail_pending_writes(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t, grpc_chttp2_stream *s,
- grpc_error *error) {
+void grpc_chttp2_fail_pending_writes(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s, grpc_error *error) {
error =
removal_error(error, s, "Pending writes failed due to stream closure");
- s->fetching_send_message = NULL;
+ s->send_initial_metadata = NULL;
grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->send_initial_metadata_finished, GRPC_ERROR_REF(error),
"send_initial_metadata_finished");
+
+ s->send_trailing_metadata = NULL;
grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->send_trailing_metadata_finished,
GRPC_ERROR_REF(error), "send_trailing_metadata_finished");
+
+ s->fetching_send_message = NULL;
grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->fetching_send_message_finished, GRPC_ERROR_REF(error),
"fetching_send_message_finished");
@@ -1529,13 +1615,16 @@ void grpc_chttp2_mark_stream_closed(grpc_exec_ctx *exec_ctx,
if (close_writes && !s->write_closed) {
s->write_closed_error = GRPC_ERROR_REF(error);
s->write_closed = true;
- fail_pending_writes(exec_ctx, t, s, GRPC_ERROR_REF(error));
+ grpc_chttp2_fail_pending_writes(exec_ctx, t, s, GRPC_ERROR_REF(error));
grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s);
}
if (s->read_closed && s->write_closed) {
if (s->id != 0) {
remove_stream(exec_ctx, t, s->id,
removal_error(GRPC_ERROR_REF(error), s, "Stream removed"));
+ } else {
+ /* Purge streams waiting on concurrency still waiting for id assignment */
+ grpc_chttp2_list_remove_waiting_for_concurrency(t, s);
}
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2");
}
@@ -1544,9 +1633,9 @@ void grpc_chttp2_mark_stream_closed(grpc_exec_ctx *exec_ctx,
static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, grpc_error *error) {
- gpr_slice hdr;
- gpr_slice status_hdr;
- gpr_slice message_pfx;
+ grpc_slice hdr;
+ grpc_slice status_hdr;
+ grpc_slice message_pfx;
uint8_t *p;
uint32_t len = 0;
grpc_status_code grpc_status;
@@ -1565,8 +1654,8 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
time we got around to sending this, so instead we ignore HPACK
compression
and just write the uncompressed bytes onto the wire. */
- status_hdr = gpr_slice_malloc(15 + (grpc_status >= 10));
- p = GPR_SLICE_START_PTR(status_hdr);
+ status_hdr = grpc_slice_malloc(15 + (grpc_status >= 10));
+ p = GRPC_SLICE_START_PTR(status_hdr);
*p++ = 0x40; /* literal header */
*p++ = 11; /* len(grpc-status) */
*p++ = 'g';
@@ -1588,8 +1677,8 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
*p++ = (uint8_t)('0' + (grpc_status / 10));
*p++ = (uint8_t)('0' + (grpc_status % 10));
}
- GPR_ASSERT(p == GPR_SLICE_END_PTR(status_hdr));
- len += (uint32_t)GPR_SLICE_LENGTH(status_hdr);
+ GPR_ASSERT(p == GRPC_SLICE_END_PTR(status_hdr));
+ len += (uint32_t)GRPC_SLICE_LENGTH(status_hdr);
const char *optional_message =
grpc_error_get_str(error, GRPC_ERROR_STR_GRPC_MESSAGE);
@@ -1597,8 +1686,8 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
if (optional_message != NULL) {
size_t msg_len = strlen(optional_message);
GPR_ASSERT(msg_len < 127);
- message_pfx = gpr_slice_malloc(15);
- p = GPR_SLICE_START_PTR(message_pfx);
+ message_pfx = grpc_slice_malloc(15);
+ p = GRPC_SLICE_START_PTR(message_pfx);
*p++ = 0x40;
*p++ = 12; /* len(grpc-message) */
*p++ = 'g';
@@ -1614,13 +1703,13 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
*p++ = 'g';
*p++ = 'e';
*p++ = (uint8_t)msg_len;
- GPR_ASSERT(p == GPR_SLICE_END_PTR(message_pfx));
- len += (uint32_t)GPR_SLICE_LENGTH(message_pfx);
+ GPR_ASSERT(p == GRPC_SLICE_END_PTR(message_pfx));
+ len += (uint32_t)GRPC_SLICE_LENGTH(message_pfx);
len += (uint32_t)msg_len;
}
- hdr = gpr_slice_malloc(9);
- p = GPR_SLICE_START_PTR(hdr);
+ hdr = grpc_slice_malloc(9);
+ p = GRPC_SLICE_START_PTR(hdr);
*p++ = (uint8_t)(len >> 16);
*p++ = (uint8_t)(len >> 8);
*p++ = (uint8_t)(len);
@@ -1630,16 +1719,16 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
*p++ = (uint8_t)(s->id >> 16);
*p++ = (uint8_t)(s->id >> 8);
*p++ = (uint8_t)(s->id);
- GPR_ASSERT(p == GPR_SLICE_END_PTR(hdr));
+ GPR_ASSERT(p == GRPC_SLICE_END_PTR(hdr));
- gpr_slice_buffer_add(&t->qbuf, hdr);
- gpr_slice_buffer_add(&t->qbuf, status_hdr);
+ grpc_slice_buffer_add(&t->qbuf, hdr);
+ grpc_slice_buffer_add(&t->qbuf, status_hdr);
if (optional_message) {
- gpr_slice_buffer_add(&t->qbuf, message_pfx);
- gpr_slice_buffer_add(&t->qbuf,
- gpr_slice_from_copied_string(optional_message));
+ grpc_slice_buffer_add(&t->qbuf, message_pfx);
+ grpc_slice_buffer_add(&t->qbuf,
+ grpc_slice_from_copied_string(optional_message));
}
- gpr_slice_buffer_add(
+ grpc_slice_buffer_add(
&t->qbuf, grpc_chttp2_rst_stream_create(s->id, GRPC_CHTTP2_NO_ERROR,
&s->stats.outgoing));
}
@@ -1650,7 +1739,7 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
free_msg = true;
msg = grpc_error_string(error);
}
- gpr_slice msg_slice = gpr_slice_from_copied_string(msg);
+ grpc_slice msg_slice = grpc_slice_from_copied_string(msg);
grpc_chttp2_fake_status(exec_ctx, t, s, grpc_status, &msg_slice);
if (free_msg) grpc_error_free_string(msg);
@@ -1821,7 +1910,7 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
keep_reading = true;
GRPC_CHTTP2_REF_TRANSPORT(t, "keep_reading");
}
- gpr_slice_buffer_reset_and_unref(&t->read_buffer);
+ grpc_slice_buffer_reset_and_unref(&t->read_buffer);
if (keep_reading) {
grpc_endpoint_read(exec_ctx, t->ep, &t->read_buffer, &t->read_action_begin);
@@ -1875,7 +1964,7 @@ static void incoming_byte_stream_unref(grpc_exec_ctx *exec_ctx,
grpc_chttp2_incoming_byte_stream *bs) {
if (gpr_unref(&bs->refs)) {
GRPC_ERROR_UNREF(bs->error);
- gpr_slice_buffer_destroy(&bs->slices);
+ grpc_slice_buffer_destroy(&bs->slices);
gpr_mu_destroy(&bs->slice_mu);
gpr_free(bs);
}
@@ -1937,7 +2026,7 @@ static void incoming_byte_stream_next_locked(grpc_exec_ctx *exec_ctx,
}
gpr_mu_lock(&bs->slice_mu);
if (bs->slices.count > 0) {
- *bs->next_action.slice = gpr_slice_buffer_take_first(&bs->slices);
+ *bs->next_action.slice = grpc_slice_buffer_take_first(&bs->slices);
grpc_closure_run(exec_ctx, bs->next_action.on_complete, GRPC_ERROR_NONE);
} else if (bs->error != GRPC_ERROR_NONE) {
grpc_closure_run(exec_ctx, bs->next_action.on_complete,
@@ -1952,7 +2041,7 @@ static void incoming_byte_stream_next_locked(grpc_exec_ctx *exec_ctx,
static int incoming_byte_stream_next(grpc_exec_ctx *exec_ctx,
grpc_byte_stream *byte_stream,
- gpr_slice *slice, size_t max_size_hint,
+ grpc_slice *slice, size_t max_size_hint,
grpc_closure *on_complete) {
GPR_TIMER_BEGIN("incoming_byte_stream_next", 0);
grpc_chttp2_incoming_byte_stream *bs =
@@ -2005,19 +2094,19 @@ static void incoming_byte_stream_publish_error(
void grpc_chttp2_incoming_byte_stream_push(grpc_exec_ctx *exec_ctx,
grpc_chttp2_incoming_byte_stream *bs,
- gpr_slice slice) {
+ grpc_slice slice) {
gpr_mu_lock(&bs->slice_mu);
- if (bs->remaining_bytes < GPR_SLICE_LENGTH(slice)) {
+ if (bs->remaining_bytes < GRPC_SLICE_LENGTH(slice)) {
incoming_byte_stream_publish_error(
exec_ctx, bs, GRPC_ERROR_CREATE("Too many bytes in stream"));
} else {
- bs->remaining_bytes -= (uint32_t)GPR_SLICE_LENGTH(slice);
+ bs->remaining_bytes -= (uint32_t)GRPC_SLICE_LENGTH(slice);
if (bs->on_next != NULL) {
*bs->next = slice;
grpc_exec_ctx_sched(exec_ctx, bs->on_next, GRPC_ERROR_NONE, NULL);
bs->on_next = NULL;
} else {
- gpr_slice_buffer_add(&bs->slices, slice);
+ grpc_slice_buffer_add(&bs->slices, slice);
}
}
gpr_mu_unlock(&bs->slice_mu);
@@ -2055,7 +2144,7 @@ grpc_chttp2_incoming_byte_stream *grpc_chttp2_incoming_byte_stream_create(
incoming_byte_stream->transport = t;
incoming_byte_stream->stream = s;
gpr_ref(&incoming_byte_stream->stream->active_streams);
- gpr_slice_buffer_init(&incoming_byte_stream->slices);
+ grpc_slice_buffer_init(&incoming_byte_stream->slices);
incoming_byte_stream->on_next = NULL;
incoming_byte_stream->is_tail = 1;
incoming_byte_stream->error = GRPC_ERROR_NONE;
@@ -2072,6 +2161,103 @@ grpc_chttp2_incoming_byte_stream *grpc_chttp2_incoming_byte_stream_create(
}
/*******************************************************************************
+ * RESOURCE QUOTAS
+ */
+
+static void post_benign_reclaimer(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
+ if (!t->benign_reclaimer_registered) {
+ t->benign_reclaimer_registered = true;
+ GRPC_CHTTP2_REF_TRANSPORT(t, "benign_reclaimer");
+ grpc_resource_user_post_reclaimer(exec_ctx,
+ grpc_endpoint_get_resource_user(t->ep),
+ false, &t->benign_reclaimer);
+ }
+}
+
+static void post_destructive_reclaimer(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
+ if (!t->destructive_reclaimer_registered) {
+ t->destructive_reclaimer_registered = true;
+ GRPC_CHTTP2_REF_TRANSPORT(t, "destructive_reclaimer");
+ grpc_resource_user_post_reclaimer(exec_ctx,
+ grpc_endpoint_get_resource_user(t->ep),
+ true, &t->destructive_reclaimer);
+ }
+}
+
+static void benign_reclaimer(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_chttp2_transport *t = arg;
+ grpc_combiner_execute(exec_ctx, t->combiner, &t->benign_reclaimer_locked,
+ GRPC_ERROR_REF(error), false);
+}
+
+static void destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_chttp2_transport *t = arg;
+ grpc_combiner_execute(exec_ctx, t->combiner, &t->destructive_reclaimer_locked,
+ GRPC_ERROR_REF(error), false);
+}
+
+static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_chttp2_transport *t = arg;
+ if (error == GRPC_ERROR_NONE &&
+ grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
+ /* Channel with no active streams: send a goaway to try and make it
+ * disconnect cleanly */
+ if (grpc_resource_quota_trace) {
+ gpr_log(GPR_DEBUG, "HTTP2: %s - send goaway to free memory",
+ t->peer_string);
+ }
+ send_goaway(exec_ctx, t, GRPC_CHTTP2_ENHANCE_YOUR_CALM,
+ grpc_slice_from_static_string("Buffers full"));
+ } else if (error == GRPC_ERROR_NONE && grpc_resource_quota_trace) {
+ gpr_log(GPR_DEBUG,
+ "HTTP2: %s - skip benign reclamation, there are still %" PRIdPTR
+ " streams",
+ t->peer_string, grpc_chttp2_stream_map_size(&t->stream_map));
+ }
+ t->benign_reclaimer_registered = false;
+ if (error != GRPC_ERROR_CANCELLED) {
+ grpc_resource_user_finish_reclamation(
+ exec_ctx, grpc_endpoint_get_resource_user(t->ep));
+ }
+ GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "benign_reclaimer");
+}
+
+static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_chttp2_transport *t = arg;
+ size_t n = grpc_chttp2_stream_map_size(&t->stream_map);
+ t->destructive_reclaimer_registered = false;
+ if (error == GRPC_ERROR_NONE && n > 0) {
+ grpc_chttp2_stream *s = grpc_chttp2_stream_map_rand(&t->stream_map);
+ if (grpc_resource_quota_trace) {
+ gpr_log(GPR_DEBUG, "HTTP2: %s - abandon stream id %d", t->peer_string,
+ s->id);
+ }
+ grpc_chttp2_cancel_stream(
+ exec_ctx, t, s, grpc_error_set_int(GRPC_ERROR_CREATE("Buffers full"),
+ GRPC_ERROR_INT_HTTP2_ERROR,
+ GRPC_CHTTP2_ENHANCE_YOUR_CALM));
+ if (n > 1) {
+ /* Since we cancel one stream per destructive reclamation, if
+ there are more streams left, we can immediately post a new
+ reclaimer in case the resource quota needs to free more
+ memory */
+ post_destructive_reclaimer(exec_ctx, t);
+ }
+ }
+ if (error != GRPC_ERROR_CANCELLED) {
+ grpc_resource_user_finish_reclamation(
+ exec_ctx, grpc_endpoint_get_resource_user(t->ep));
+ }
+ GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "destructive_reclaimer");
+}
+
+/*******************************************************************************
* TRACING
*/
@@ -2156,6 +2342,14 @@ static char *chttp2_get_peer(grpc_exec_ctx *exec_ctx, grpc_transport *t) {
return gpr_strdup(((grpc_chttp2_transport *)t)->peer_string);
}
+/*******************************************************************************
+ * MONITORING
+ */
+static grpc_endpoint *chttp2_get_endpoint(grpc_exec_ctx *exec_ctx,
+ grpc_transport *t) {
+ return ((grpc_chttp2_transport *)t)->ep;
+}
+
static const grpc_transport_vtable vtable = {sizeof(grpc_chttp2_stream),
"chttp2",
init_stream,
@@ -2165,7 +2359,8 @@ static const grpc_transport_vtable vtable = {sizeof(grpc_chttp2_stream),
perform_transport_op,
destroy_stream,
destroy_transport,
- chttp2_get_peer};
+ chttp2_get_peer,
+ chttp2_get_endpoint};
grpc_transport *grpc_create_chttp2_transport(
grpc_exec_ctx *exec_ctx, const grpc_channel_args *channel_args,
@@ -2177,12 +2372,12 @@ grpc_transport *grpc_create_chttp2_transport(
void grpc_chttp2_transport_start_reading(grpc_exec_ctx *exec_ctx,
grpc_transport *transport,
- gpr_slice_buffer *read_buffer) {
+ grpc_slice_buffer *read_buffer) {
grpc_chttp2_transport *t = (grpc_chttp2_transport *)transport;
GRPC_CHTTP2_REF_TRANSPORT(
t, "reading_action"); /* matches unref inside reading_action */
if (read_buffer != NULL) {
- gpr_slice_buffer_move_into(read_buffer, &t->read_buffer);
+ grpc_slice_buffer_move_into(read_buffer, &t->read_buffer);
gpr_free(read_buffer);
}
read_action_begin(exec_ctx, t, GRPC_ERROR_NONE);
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.h b/src/core/ext/transport/chttp2/transport/chttp2_transport.h
index 4e2d0954bf..c372174f2d 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.h
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.h
@@ -48,6 +48,6 @@ grpc_transport *grpc_create_chttp2_transport(
/// leftover bytes previously read from the endpoint (e.g., by handshakers).
void grpc_chttp2_transport_start_reading(grpc_exec_ctx *exec_ctx,
grpc_transport *transport,
- gpr_slice_buffer *read_buffer);
+ grpc_slice_buffer *read_buffer);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_CHTTP2_TRANSPORT_H */
diff --git a/src/core/ext/transport/chttp2/transport/frame.h b/src/core/ext/transport/chttp2/transport/frame.h
index 1e444a91fd..ffd4d9669b 100644
--- a/src/core/ext/transport/chttp2/transport/frame.h
+++ b/src/core/ext/transport/chttp2/transport/frame.h
@@ -34,8 +34,8 @@
#ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_H
#define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_H
+#include <grpc/slice.h>
#include <grpc/support/port_platform.h>
-#include <grpc/support/slice.h>
#include "src/core/lib/iomgr/error.h"
diff --git a/src/core/ext/transport/chttp2/transport/frame_data.c b/src/core/ext/transport/chttp2/transport/frame_data.c
index 8668816930..f9b9e1b309 100644
--- a/src/core/ext/transport/chttp2/transport/frame_data.c
+++ b/src/core/ext/transport/chttp2/transport/frame_data.c
@@ -40,6 +40,7 @@
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
#include "src/core/ext/transport/chttp2/transport/internal.h"
+#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/transport/transport.h"
@@ -112,16 +113,16 @@ grpc_byte_stream *grpc_chttp2_incoming_frame_queue_pop(
return out;
}
-void grpc_chttp2_encode_data(uint32_t id, gpr_slice_buffer *inbuf,
+void grpc_chttp2_encode_data(uint32_t id, grpc_slice_buffer *inbuf,
uint32_t write_bytes, int is_eof,
grpc_transport_one_way_stats *stats,
- gpr_slice_buffer *outbuf) {
- gpr_slice hdr;
+ grpc_slice_buffer *outbuf) {
+ grpc_slice hdr;
uint8_t *p;
static const size_t header_size = 9;
- hdr = gpr_slice_malloc(header_size);
- p = GPR_SLICE_START_PTR(hdr);
+ hdr = grpc_slice_malloc(header_size);
+ p = GRPC_SLICE_START_PTR(hdr);
GPR_ASSERT(write_bytes < (1 << 24));
*p++ = (uint8_t)(write_bytes >> 16);
*p++ = (uint8_t)(write_bytes >> 8);
@@ -132,9 +133,9 @@ void grpc_chttp2_encode_data(uint32_t id, gpr_slice_buffer *inbuf,
*p++ = (uint8_t)(id >> 16);
*p++ = (uint8_t)(id >> 8);
*p++ = (uint8_t)(id);
- gpr_slice_buffer_add(outbuf, hdr);
+ grpc_slice_buffer_add(outbuf, hdr);
- gpr_slice_buffer_move_first(inbuf, write_bytes, outbuf);
+ grpc_slice_buffer_move_first(inbuf, write_bytes, outbuf);
stats->framing_bytes += header_size;
stats->data_bytes += write_bytes;
@@ -143,9 +144,9 @@ void grpc_chttp2_encode_data(uint32_t id, gpr_slice_buffer *inbuf,
static grpc_error *parse_inner(grpc_exec_ctx *exec_ctx,
grpc_chttp2_data_parser *p,
grpc_chttp2_transport *t, grpc_chttp2_stream *s,
- gpr_slice slice) {
- uint8_t *const beg = GPR_SLICE_START_PTR(slice);
- uint8_t *const end = GPR_SLICE_END_PTR(slice);
+ grpc_slice slice) {
+ uint8_t *const beg = GRPC_SLICE_START_PTR(slice);
+ uint8_t *const end = GRPC_SLICE_END_PTR(slice);
uint8_t *cur = beg;
uint32_t message_flags;
grpc_chttp2_incoming_byte_stream *incoming_byte_stream;
@@ -176,7 +177,7 @@ static grpc_error *parse_inner(grpc_exec_ctx *exec_ctx,
p->error = grpc_error_set_int(p->error, GRPC_ERROR_INT_STREAM_ID,
(intptr_t)s->id);
gpr_free(msg);
- msg = gpr_dump_slice(slice, GPR_DUMP_HEX | GPR_DUMP_ASCII);
+ msg = grpc_dump_slice(slice, GPR_DUMP_HEX | GPR_DUMP_ASCII);
p->error =
grpc_error_set_str(p->error, GRPC_ERROR_STR_RAW_BYTES, msg);
gpr_free(msg);
@@ -236,7 +237,7 @@ static grpc_error *parse_inner(grpc_exec_ctx *exec_ctx,
s->stats.incoming.data_bytes += p->frame_size;
grpc_chttp2_incoming_byte_stream_push(
exec_ctx, p->parsing_frame,
- gpr_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg)));
+ grpc_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg)));
grpc_chttp2_incoming_byte_stream_finished(exec_ctx, p->parsing_frame,
GRPC_ERROR_NONE);
p->parsing_frame = NULL;
@@ -246,8 +247,8 @@ static grpc_error *parse_inner(grpc_exec_ctx *exec_ctx,
s->stats.incoming.data_bytes += p->frame_size;
grpc_chttp2_incoming_byte_stream_push(
exec_ctx, p->parsing_frame,
- gpr_slice_sub(slice, (size_t)(cur - beg),
- (size_t)(cur + p->frame_size - beg)));
+ grpc_slice_sub(slice, (size_t)(cur - beg),
+ (size_t)(cur + p->frame_size - beg)));
grpc_chttp2_incoming_byte_stream_finished(exec_ctx, p->parsing_frame,
GRPC_ERROR_NONE);
p->parsing_frame = NULL;
@@ -257,7 +258,7 @@ static grpc_error *parse_inner(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(remaining <= p->frame_size);
grpc_chttp2_incoming_byte_stream_push(
exec_ctx, p->parsing_frame,
- gpr_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg)));
+ grpc_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg)));
p->frame_size -= remaining;
s->stats.incoming.data_bytes += remaining;
return GRPC_ERROR_NONE;
@@ -270,7 +271,7 @@ static grpc_error *parse_inner(grpc_exec_ctx *exec_ctx,
grpc_error *grpc_chttp2_data_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
- gpr_slice slice, int is_last) {
+ grpc_slice slice, int is_last) {
grpc_chttp2_data_parser *p = parser;
grpc_error *error = parse_inner(exec_ctx, p, t, s, slice);
diff --git a/src/core/ext/transport/chttp2/transport/frame_data.h b/src/core/ext/transport/chttp2/transport/frame_data.h
index eb2d97d898..264ad14608 100644
--- a/src/core/ext/transport/chttp2/transport/frame_data.h
+++ b/src/core/ext/transport/chttp2/transport/frame_data.h
@@ -36,8 +36,8 @@
/* Parser for GRPC streams embedded in DATA frames */
-#include <grpc/support/slice.h>
-#include <grpc/support/slice_buffer.h>
+#include <grpc/slice.h>
+#include <grpc/slice_buffer.h>
#include "src/core/ext/transport/chttp2/transport/frame.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/transport/byte_stream.h"
@@ -94,11 +94,11 @@ grpc_error *grpc_chttp2_data_parser_begin_frame(grpc_chttp2_data_parser *parser,
grpc_error *grpc_chttp2_data_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
- gpr_slice slice, int is_last);
+ grpc_slice slice, int is_last);
-void grpc_chttp2_encode_data(uint32_t id, gpr_slice_buffer *inbuf,
+void grpc_chttp2_encode_data(uint32_t id, grpc_slice_buffer *inbuf,
uint32_t write_bytes, int is_eof,
grpc_transport_one_way_stats *stats,
- gpr_slice_buffer *outbuf);
+ grpc_slice_buffer *outbuf);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_DATA_H */
diff --git a/src/core/ext/transport/chttp2/transport/frame_goaway.c b/src/core/ext/transport/chttp2/transport/frame_goaway.c
index 33d2269169..d99d486c1b 100644
--- a/src/core/ext/transport/chttp2/transport/frame_goaway.c
+++ b/src/core/ext/transport/chttp2/transport/frame_goaway.c
@@ -71,9 +71,9 @@ grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx,
void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
- gpr_slice slice, int is_last) {
- uint8_t *const beg = GPR_SLICE_START_PTR(slice);
- uint8_t *const end = GPR_SLICE_END_PTR(slice);
+ grpc_slice slice, int is_last) {
+ uint8_t *const beg = GRPC_SLICE_START_PTR(slice);
+ uint8_t *const end = GRPC_SLICE_END_PTR(slice);
uint8_t *cur = beg;
grpc_chttp2_goaway_parser *p = parser;
@@ -151,7 +151,7 @@ grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx,
if (is_last) {
grpc_chttp2_add_incoming_goaway(
exec_ctx, t, (uint32_t)p->error_code,
- gpr_slice_new(p->debug_data, p->debug_length, gpr_free));
+ grpc_slice_new(p->debug_data, p->debug_length, gpr_free));
p->debug_data = NULL;
}
return GRPC_ERROR_NONE;
@@ -160,13 +160,13 @@ grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx,
}
void grpc_chttp2_goaway_append(uint32_t last_stream_id, uint32_t error_code,
- gpr_slice debug_data,
- gpr_slice_buffer *slice_buffer) {
- gpr_slice header = gpr_slice_malloc(9 + 4 + 4);
- uint8_t *p = GPR_SLICE_START_PTR(header);
+ grpc_slice debug_data,
+ grpc_slice_buffer *slice_buffer) {
+ grpc_slice header = grpc_slice_malloc(9 + 4 + 4);
+ uint8_t *p = GRPC_SLICE_START_PTR(header);
uint32_t frame_length;
- GPR_ASSERT(GPR_SLICE_LENGTH(debug_data) < UINT32_MAX - 4 - 4);
- frame_length = 4 + 4 + (uint32_t)GPR_SLICE_LENGTH(debug_data);
+ GPR_ASSERT(GRPC_SLICE_LENGTH(debug_data) < UINT32_MAX - 4 - 4);
+ frame_length = 4 + 4 + (uint32_t)GRPC_SLICE_LENGTH(debug_data);
/* frame header: length */
*p++ = (uint8_t)(frame_length >> 16);
@@ -191,7 +191,7 @@ void grpc_chttp2_goaway_append(uint32_t last_stream_id, uint32_t error_code,
*p++ = (uint8_t)(error_code >> 16);
*p++ = (uint8_t)(error_code >> 8);
*p++ = (uint8_t)(error_code);
- GPR_ASSERT(p == GPR_SLICE_END_PTR(header));
- gpr_slice_buffer_add(slice_buffer, header);
- gpr_slice_buffer_add(slice_buffer, debug_data);
+ GPR_ASSERT(p == GRPC_SLICE_END_PTR(header));
+ grpc_slice_buffer_add(slice_buffer, header);
+ grpc_slice_buffer_add(slice_buffer, debug_data);
}
diff --git a/src/core/ext/transport/chttp2/transport/frame_goaway.h b/src/core/ext/transport/chttp2/transport/frame_goaway.h
index 355104a5a7..21fe819488 100644
--- a/src/core/ext/transport/chttp2/transport/frame_goaway.h
+++ b/src/core/ext/transport/chttp2/transport/frame_goaway.h
@@ -34,9 +34,9 @@
#ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_GOAWAY_H
#define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_GOAWAY_H
+#include <grpc/slice.h>
+#include <grpc/slice_buffer.h>
#include <grpc/support/port_platform.h>
-#include <grpc/support/slice.h>
-#include <grpc/support/slice_buffer.h>
#include "src/core/ext/transport/chttp2/transport/frame.h"
#include "src/core/lib/iomgr/exec_ctx.h"
@@ -69,10 +69,10 @@ grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx,
void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
- gpr_slice slice, int is_last);
+ grpc_slice slice, int is_last);
void grpc_chttp2_goaway_append(uint32_t last_stream_id, uint32_t error_code,
- gpr_slice debug_data,
- gpr_slice_buffer *slice_buffer);
+ grpc_slice debug_data,
+ grpc_slice_buffer *slice_buffer);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_GOAWAY_H */
diff --git a/src/core/ext/transport/chttp2/transport/frame_ping.c b/src/core/ext/transport/chttp2/transport/frame_ping.c
index 624f42649d..7de5f6362d 100644
--- a/src/core/ext/transport/chttp2/transport/frame_ping.c
+++ b/src/core/ext/transport/chttp2/transport/frame_ping.c
@@ -40,9 +40,9 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
-gpr_slice grpc_chttp2_ping_create(uint8_t ack, uint8_t *opaque_8bytes) {
- gpr_slice slice = gpr_slice_malloc(9 + 8);
- uint8_t *p = GPR_SLICE_START_PTR(slice);
+grpc_slice grpc_chttp2_ping_create(uint8_t ack, uint8_t *opaque_8bytes) {
+ grpc_slice slice = grpc_slice_malloc(9 + 8);
+ uint8_t *p = GRPC_SLICE_START_PTR(slice);
*p++ = 0;
*p++ = 0;
@@ -76,9 +76,9 @@ grpc_error *grpc_chttp2_ping_parser_begin_frame(grpc_chttp2_ping_parser *parser,
grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
- gpr_slice slice, int is_last) {
- uint8_t *const beg = GPR_SLICE_START_PTR(slice);
- uint8_t *const end = GPR_SLICE_END_PTR(slice);
+ grpc_slice slice, int is_last) {
+ uint8_t *const beg = GRPC_SLICE_START_PTR(slice);
+ uint8_t *const end = GRPC_SLICE_END_PTR(slice);
uint8_t *cur = beg;
grpc_chttp2_ping_parser *p = parser;
@@ -93,8 +93,8 @@ grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
if (p->is_ack) {
grpc_chttp2_ack_ping(exec_ctx, t, p->opaque_8bytes);
} else {
- gpr_slice_buffer_add(&t->qbuf,
- grpc_chttp2_ping_create(1, p->opaque_8bytes));
+ grpc_slice_buffer_add(&t->qbuf,
+ grpc_chttp2_ping_create(1, p->opaque_8bytes));
grpc_chttp2_initiate_write(exec_ctx, t, false, "ping response");
}
}
diff --git a/src/core/ext/transport/chttp2/transport/frame_ping.h b/src/core/ext/transport/chttp2/transport/frame_ping.h
index 2071f647fb..b9889e2d11 100644
--- a/src/core/ext/transport/chttp2/transport/frame_ping.h
+++ b/src/core/ext/transport/chttp2/transport/frame_ping.h
@@ -34,7 +34,7 @@
#ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_PING_H
#define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_PING_H
-#include <grpc/support/slice.h>
+#include <grpc/slice.h>
#include "src/core/ext/transport/chttp2/transport/frame.h"
#include "src/core/lib/iomgr/exec_ctx.h"
@@ -44,13 +44,13 @@ typedef struct {
uint8_t opaque_8bytes[8];
} grpc_chttp2_ping_parser;
-gpr_slice grpc_chttp2_ping_create(uint8_t ack, uint8_t *opaque_8bytes);
+grpc_slice grpc_chttp2_ping_create(uint8_t ack, uint8_t *opaque_8bytes);
grpc_error *grpc_chttp2_ping_parser_begin_frame(grpc_chttp2_ping_parser *parser,
uint32_t length, uint8_t flags);
grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
- gpr_slice slice, int is_last);
+ grpc_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_PING_H */
diff --git a/src/core/ext/transport/chttp2/transport/frame_rst_stream.c b/src/core/ext/transport/chttp2/transport/frame_rst_stream.c
index 9eac050797..b4c5ed769b 100644
--- a/src/core/ext/transport/chttp2/transport/frame_rst_stream.c
+++ b/src/core/ext/transport/chttp2/transport/frame_rst_stream.c
@@ -42,12 +42,12 @@
#include "src/core/ext/transport/chttp2/transport/http2_errors.h"
#include "src/core/ext/transport/chttp2/transport/status_conversion.h"
-gpr_slice grpc_chttp2_rst_stream_create(uint32_t id, uint32_t code,
- grpc_transport_one_way_stats *stats) {
+grpc_slice grpc_chttp2_rst_stream_create(uint32_t id, uint32_t code,
+ grpc_transport_one_way_stats *stats) {
static const size_t frame_size = 13;
- gpr_slice slice = gpr_slice_malloc(frame_size);
+ grpc_slice slice = grpc_slice_malloc(frame_size);
stats->framing_bytes += frame_size;
- uint8_t *p = GPR_SLICE_START_PTR(slice);
+ uint8_t *p = GRPC_SLICE_START_PTR(slice);
// Frame size.
*p++ = 0;
@@ -89,9 +89,9 @@ grpc_error *grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx *exec_ctx,
void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
- gpr_slice slice, int is_last) {
- uint8_t *const beg = GPR_SLICE_START_PTR(slice);
- uint8_t *const end = GPR_SLICE_END_PTR(slice);
+ grpc_slice slice, int is_last) {
+ uint8_t *const beg = GRPC_SLICE_START_PTR(slice);
+ uint8_t *const end = GRPC_SLICE_END_PTR(slice);
uint8_t *cur = beg;
grpc_chttp2_rst_stream_parser *p = parser;
@@ -117,7 +117,7 @@ grpc_error *grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx *exec_ctx,
char *status_details;
gpr_asprintf(&status_details, "Received RST_STREAM with error code %d",
reason);
- gpr_slice slice_details = gpr_slice_from_copied_string(status_details);
+ grpc_slice slice_details = grpc_slice_from_copied_string(status_details);
gpr_free(status_details);
grpc_chttp2_fake_status(exec_ctx, t, s, status_code, &slice_details);
}
diff --git a/src/core/ext/transport/chttp2/transport/frame_rst_stream.h b/src/core/ext/transport/chttp2/transport/frame_rst_stream.h
index 5a1f578a29..779507a617 100644
--- a/src/core/ext/transport/chttp2/transport/frame_rst_stream.h
+++ b/src/core/ext/transport/chttp2/transport/frame_rst_stream.h
@@ -34,7 +34,7 @@
#ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_RST_STREAM_H
#define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_RST_STREAM_H
-#include <grpc/support/slice.h>
+#include <grpc/slice.h>
#include "src/core/ext/transport/chttp2/transport/frame.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/transport/transport.h"
@@ -44,8 +44,8 @@ typedef struct {
uint8_t reason_bytes[4];
} grpc_chttp2_rst_stream_parser;
-gpr_slice grpc_chttp2_rst_stream_create(uint32_t stream_id, uint32_t code,
- grpc_transport_one_way_stats *stats);
+grpc_slice grpc_chttp2_rst_stream_create(uint32_t stream_id, uint32_t code,
+ grpc_transport_one_way_stats *stats);
grpc_error *grpc_chttp2_rst_stream_parser_begin_frame(
grpc_chttp2_rst_stream_parser *parser, uint32_t length, uint8_t flags);
@@ -53,6 +53,6 @@ grpc_error *grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx *exec_ctx,
void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
- gpr_slice slice, int is_last);
+ grpc_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_RST_STREAM_H */
diff --git a/src/core/ext/transport/chttp2/transport/frame_settings.c b/src/core/ext/transport/chttp2/transport/frame_settings.c
index 92022f90c9..98facae87f 100644
--- a/src/core/ext/transport/chttp2/transport/frame_settings.c
+++ b/src/core/ext/transport/chttp2/transport/frame_settings.c
@@ -82,19 +82,19 @@ static uint8_t *fill_header(uint8_t *out, uint32_t length, uint8_t flags) {
return out;
}
-gpr_slice grpc_chttp2_settings_create(uint32_t *old, const uint32_t *new,
- uint32_t force_mask, size_t count) {
+grpc_slice grpc_chttp2_settings_create(uint32_t *old, const uint32_t *new,
+ uint32_t force_mask, size_t count) {
size_t i;
uint32_t n = 0;
- gpr_slice output;
+ grpc_slice output;
uint8_t *p;
for (i = 0; i < count; i++) {
n += (new[i] != old[i] || (force_mask & (1u << i)) != 0);
}
- output = gpr_slice_malloc(9 + 6 * n);
- p = fill_header(GPR_SLICE_START_PTR(output), 6 * n, 0);
+ output = grpc_slice_malloc(9 + 6 * n);
+ p = fill_header(GRPC_SLICE_START_PTR(output), 6 * n, 0);
for (i = 0; i < count; i++) {
if (new[i] != old[i] || (force_mask & (1u << i)) != 0) {
@@ -109,14 +109,14 @@ gpr_slice grpc_chttp2_settings_create(uint32_t *old, const uint32_t *new,
}
}
- GPR_ASSERT(p == GPR_SLICE_END_PTR(output));
+ GPR_ASSERT(p == GRPC_SLICE_END_PTR(output));
return output;
}
-gpr_slice grpc_chttp2_settings_ack_create(void) {
- gpr_slice output = gpr_slice_malloc(9);
- fill_header(GPR_SLICE_START_PTR(output), 0, GRPC_CHTTP2_FLAG_ACK);
+grpc_slice grpc_chttp2_settings_ack_create(void) {
+ grpc_slice output = grpc_slice_malloc(9);
+ fill_header(GRPC_SLICE_START_PTR(output), 0, GRPC_CHTTP2_FLAG_ACK);
return output;
}
@@ -146,10 +146,10 @@ grpc_error *grpc_chttp2_settings_parser_begin_frame(
grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx, void *p,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
- gpr_slice slice, int is_last) {
+ grpc_slice slice, int is_last) {
grpc_chttp2_settings_parser *parser = p;
- const uint8_t *cur = GPR_SLICE_START_PTR(slice);
- const uint8_t *end = GPR_SLICE_END_PTR(slice);
+ const uint8_t *cur = GRPC_SLICE_START_PTR(slice);
+ const uint8_t *end = GRPC_SLICE_END_PTR(slice);
char *msg;
if (parser->is_ack) {
@@ -164,7 +164,7 @@ grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx, void *p,
if (is_last) {
memcpy(parser->target_settings, parser->incoming_settings,
GRPC_CHTTP2_NUM_SETTINGS * sizeof(uint32_t));
- gpr_slice_buffer_add(&t->qbuf, grpc_chttp2_settings_ack_create());
+ grpc_slice_buffer_add(&t->qbuf, grpc_chttp2_settings_ack_create());
}
return GRPC_ERROR_NONE;
}
@@ -225,7 +225,7 @@ grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx, void *p,
case GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE:
grpc_chttp2_goaway_append(
t->last_new_stream_id, sp->error_value,
- gpr_slice_from_static_string("HTTP2 settings error"),
+ grpc_slice_from_static_string("HTTP2 settings error"),
&t->qbuf);
gpr_asprintf(&msg, "invalid value %u passed for %s",
parser->value, sp->name);
diff --git a/src/core/ext/transport/chttp2/transport/frame_settings.h b/src/core/ext/transport/chttp2/transport/frame_settings.h
index 4bfa944cf1..a29dc82106 100644
--- a/src/core/ext/transport/chttp2/transport/frame_settings.h
+++ b/src/core/ext/transport/chttp2/transport/frame_settings.h
@@ -34,8 +34,8 @@
#ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_SETTINGS_H
#define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_SETTINGS_H
+#include <grpc/slice.h>
#include <grpc/support/port_platform.h>
-#include <grpc/support/slice.h>
#include "src/core/ext/transport/chttp2/transport/frame.h"
#include "src/core/lib/iomgr/exec_ctx.h"
@@ -87,10 +87,10 @@ extern const grpc_chttp2_setting_parameters
grpc_chttp2_settings_parameters[GRPC_CHTTP2_NUM_SETTINGS];
/* Create a settings frame by diffing old & new, and updating old to be new */
-gpr_slice grpc_chttp2_settings_create(uint32_t *old, const uint32_t *new,
- uint32_t force_mask, size_t count);
+grpc_slice grpc_chttp2_settings_create(uint32_t *old, const uint32_t *new,
+ uint32_t force_mask, size_t count);
/* Create an ack settings frame */
-gpr_slice grpc_chttp2_settings_ack_create(void);
+grpc_slice grpc_chttp2_settings_ack_create(void);
grpc_error *grpc_chttp2_settings_parser_begin_frame(
grpc_chttp2_settings_parser *parser, uint32_t length, uint8_t flags,
@@ -99,6 +99,6 @@ grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx,
void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
- gpr_slice slice, int is_last);
+ grpc_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_SETTINGS_H */
diff --git a/src/core/ext/transport/chttp2/transport/frame_window_update.c b/src/core/ext/transport/chttp2/transport/frame_window_update.c
index 418166a6df..31a31c2871 100644
--- a/src/core/ext/transport/chttp2/transport/frame_window_update.c
+++ b/src/core/ext/transport/chttp2/transport/frame_window_update.c
@@ -38,12 +38,12 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
-gpr_slice grpc_chttp2_window_update_create(
+grpc_slice grpc_chttp2_window_update_create(
uint32_t id, uint32_t window_update, grpc_transport_one_way_stats *stats) {
static const size_t frame_size = 13;
- gpr_slice slice = gpr_slice_malloc(frame_size);
+ grpc_slice slice = grpc_slice_malloc(frame_size);
stats->header_bytes += frame_size;
- uint8_t *p = GPR_SLICE_START_PTR(slice);
+ uint8_t *p = GRPC_SLICE_START_PTR(slice);
GPR_ASSERT(window_update);
@@ -81,9 +81,9 @@ grpc_error *grpc_chttp2_window_update_parser_begin_frame(
grpc_error *grpc_chttp2_window_update_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser, grpc_chttp2_transport *t,
- grpc_chttp2_stream *s, gpr_slice slice, int is_last) {
- uint8_t *const beg = GPR_SLICE_START_PTR(slice);
- uint8_t *const end = GPR_SLICE_END_PTR(slice);
+ grpc_chttp2_stream *s, grpc_slice slice, int is_last) {
+ uint8_t *const beg = GRPC_SLICE_START_PTR(slice);
+ uint8_t *const end = GRPC_SLICE_END_PTR(slice);
uint8_t *cur = beg;
grpc_chttp2_window_update_parser *p = parser;
diff --git a/src/core/ext/transport/chttp2/transport/frame_window_update.h b/src/core/ext/transport/chttp2/transport/frame_window_update.h
index 6e62f31872..f75dfb3d87 100644
--- a/src/core/ext/transport/chttp2/transport/frame_window_update.h
+++ b/src/core/ext/transport/chttp2/transport/frame_window_update.h
@@ -34,7 +34,7 @@
#ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_WINDOW_UPDATE_H
#define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_WINDOW_UPDATE_H
-#include <grpc/support/slice.h>
+#include <grpc/slice.h>
#include "src/core/ext/transport/chttp2/transport/frame.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/transport/transport.h"
@@ -45,13 +45,13 @@ typedef struct {
uint32_t amount;
} grpc_chttp2_window_update_parser;
-gpr_slice grpc_chttp2_window_update_create(uint32_t id, uint32_t window_delta,
- grpc_transport_one_way_stats *stats);
+grpc_slice grpc_chttp2_window_update_create(
+ uint32_t id, uint32_t window_delta, grpc_transport_one_way_stats *stats);
grpc_error *grpc_chttp2_window_update_parser_begin_frame(
grpc_chttp2_window_update_parser *parser, uint32_t length, uint8_t flags);
grpc_error *grpc_chttp2_window_update_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser, grpc_chttp2_transport *t,
- grpc_chttp2_stream *s, gpr_slice slice, int is_last);
+ grpc_chttp2_stream *s, grpc_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_WINDOW_UPDATE_H */
diff --git a/src/core/ext/transport/chttp2/transport/hpack_encoder.c b/src/core/ext/transport/chttp2/transport/hpack_encoder.c
index 581471ba02..eb68fe3138 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_encoder.c
+++ b/src/core/ext/transport/chttp2/transport/hpack_encoder.c
@@ -76,7 +76,7 @@ typedef struct {
uint8_t seen_regular_header;
/* output stream id */
uint32_t stream_id;
- gpr_slice_buffer *output;
+ grpc_slice_buffer *output;
grpc_transport_one_way_stats *stats;
/* maximum size of a frame */
size_t max_frame_size;
@@ -104,7 +104,7 @@ static void finish_frame(framer_state *st, int is_header_boundary,
type = st->is_first_frame ? GRPC_CHTTP2_FRAME_HEADER
: GRPC_CHTTP2_FRAME_CONTINUATION;
fill_header(
- GPR_SLICE_START_PTR(st->output->slices[st->header_idx]), type,
+ GRPC_SLICE_START_PTR(st->output->slices[st->header_idx]), type,
st->stream_id, st->output->length - st->output_length_at_start_of_frame,
(uint8_t)((is_last_in_stream ? GRPC_CHTTP2_DATA_FLAG_END_STREAM : 0) |
(is_header_boundary ? GRPC_CHTTP2_DATA_FLAG_END_HEADERS : 0)));
@@ -116,7 +116,7 @@ static void finish_frame(framer_state *st, int is_header_boundary,
output before beginning */
static void begin_frame(framer_state *st) {
st->header_idx =
- gpr_slice_buffer_add_indexed(st->output, gpr_slice_malloc(9));
+ grpc_slice_buffer_add_indexed(st->output, grpc_slice_malloc(9));
st->output_length_at_start_of_frame = st->output->length;
}
@@ -147,18 +147,18 @@ static void inc_filter(uint8_t idx, uint32_t *sum, uint8_t *elems) {
}
}
-static void add_header_data(framer_state *st, gpr_slice slice) {
- size_t len = GPR_SLICE_LENGTH(slice);
+static void add_header_data(framer_state *st, grpc_slice slice) {
+ size_t len = GRPC_SLICE_LENGTH(slice);
size_t remaining;
if (len == 0) return;
remaining = st->max_frame_size + st->output_length_at_start_of_frame -
st->output->length;
if (len <= remaining) {
st->stats->header_bytes += len;
- gpr_slice_buffer_add(st->output, slice);
+ grpc_slice_buffer_add(st->output, slice);
} else {
st->stats->header_bytes += remaining;
- gpr_slice_buffer_add(st->output, gpr_slice_split_head(&slice, remaining));
+ grpc_slice_buffer_add(st->output, grpc_slice_split_head(&slice, remaining));
finish_frame(st, 0, 0);
begin_frame(st);
add_header_data(st, slice);
@@ -167,7 +167,7 @@ static void add_header_data(framer_state *st, gpr_slice slice) {
static uint8_t *add_tiny_header_data(framer_state *st, size_t len) {
ensure_space(st, len);
- return gpr_slice_buffer_tiny_add(st->output, len);
+ return grpc_slice_buffer_tiny_add(st->output, len);
}
static void evict_entry(grpc_chttp2_hpack_compressor *c) {
@@ -268,9 +268,10 @@ static void emit_indexed(grpc_chttp2_hpack_compressor *c, uint32_t elem_index,
len);
}
-static gpr_slice get_wire_value(grpc_mdelem *elem, uint8_t *huffman_prefix) {
- if (grpc_is_binary_header((const char *)GPR_SLICE_START_PTR(elem->key->slice),
- GPR_SLICE_LENGTH(elem->key->slice))) {
+static grpc_slice get_wire_value(grpc_mdelem *elem, uint8_t *huffman_prefix) {
+ if (grpc_is_binary_header(
+ (const char *)GRPC_SLICE_START_PTR(elem->key->slice),
+ GRPC_SLICE_LENGTH(elem->key->slice))) {
*huffman_prefix = 0x80;
return grpc_mdstr_as_base64_encoded_and_huffman_compressed(elem->value);
}
@@ -284,8 +285,8 @@ static void emit_lithdr_incidx(grpc_chttp2_hpack_compressor *c,
framer_state *st) {
uint32_t len_pfx = GRPC_CHTTP2_VARINT_LENGTH(key_index, 2);
uint8_t huffman_prefix;
- gpr_slice value_slice = get_wire_value(elem, &huffman_prefix);
- size_t len_val = GPR_SLICE_LENGTH(value_slice);
+ grpc_slice value_slice = get_wire_value(elem, &huffman_prefix);
+ size_t len_val = GRPC_SLICE_LENGTH(value_slice);
uint32_t len_val_len;
GPR_ASSERT(len_val <= UINT32_MAX);
len_val_len = GRPC_CHTTP2_VARINT_LENGTH((uint32_t)len_val, 1);
@@ -293,7 +294,7 @@ static void emit_lithdr_incidx(grpc_chttp2_hpack_compressor *c,
add_tiny_header_data(st, len_pfx), len_pfx);
GRPC_CHTTP2_WRITE_VARINT((uint32_t)len_val, 1, huffman_prefix,
add_tiny_header_data(st, len_val_len), len_val_len);
- add_header_data(st, gpr_slice_ref(value_slice));
+ add_header_data(st, grpc_slice_ref(value_slice));
}
static void emit_lithdr_noidx(grpc_chttp2_hpack_compressor *c,
@@ -301,8 +302,8 @@ static void emit_lithdr_noidx(grpc_chttp2_hpack_compressor *c,
framer_state *st) {
uint32_t len_pfx = GRPC_CHTTP2_VARINT_LENGTH(key_index, 4);
uint8_t huffman_prefix;
- gpr_slice value_slice = get_wire_value(elem, &huffman_prefix);
- size_t len_val = GPR_SLICE_LENGTH(value_slice);
+ grpc_slice value_slice = get_wire_value(elem, &huffman_prefix);
+ size_t len_val = GRPC_SLICE_LENGTH(value_slice);
uint32_t len_val_len;
GPR_ASSERT(len_val <= UINT32_MAX);
len_val_len = GRPC_CHTTP2_VARINT_LENGTH((uint32_t)len_val, 1);
@@ -310,45 +311,45 @@ static void emit_lithdr_noidx(grpc_chttp2_hpack_compressor *c,
add_tiny_header_data(st, len_pfx), len_pfx);
GRPC_CHTTP2_WRITE_VARINT((uint32_t)len_val, 1, huffman_prefix,
add_tiny_header_data(st, len_val_len), len_val_len);
- add_header_data(st, gpr_slice_ref(value_slice));
+ add_header_data(st, grpc_slice_ref(value_slice));
}
static void emit_lithdr_incidx_v(grpc_chttp2_hpack_compressor *c,
grpc_mdelem *elem, framer_state *st) {
- uint32_t len_key = (uint32_t)GPR_SLICE_LENGTH(elem->key->slice);
+ uint32_t len_key = (uint32_t)GRPC_SLICE_LENGTH(elem->key->slice);
uint8_t huffman_prefix;
- gpr_slice value_slice = get_wire_value(elem, &huffman_prefix);
- uint32_t len_val = (uint32_t)GPR_SLICE_LENGTH(value_slice);
+ grpc_slice value_slice = get_wire_value(elem, &huffman_prefix);
+ uint32_t len_val = (uint32_t)GRPC_SLICE_LENGTH(value_slice);
uint32_t len_key_len = GRPC_CHTTP2_VARINT_LENGTH(len_key, 1);
uint32_t len_val_len = GRPC_CHTTP2_VARINT_LENGTH(len_val, 1);
GPR_ASSERT(len_key <= UINT32_MAX);
- GPR_ASSERT(GPR_SLICE_LENGTH(value_slice) <= UINT32_MAX);
+ GPR_ASSERT(GRPC_SLICE_LENGTH(value_slice) <= UINT32_MAX);
*add_tiny_header_data(st, 1) = 0x40;
GRPC_CHTTP2_WRITE_VARINT(len_key, 1, 0x00,
add_tiny_header_data(st, len_key_len), len_key_len);
- add_header_data(st, gpr_slice_ref(elem->key->slice));
+ add_header_data(st, grpc_slice_ref(elem->key->slice));
GRPC_CHTTP2_WRITE_VARINT(len_val, 1, huffman_prefix,
add_tiny_header_data(st, len_val_len), len_val_len);
- add_header_data(st, gpr_slice_ref(value_slice));
+ add_header_data(st, grpc_slice_ref(value_slice));
}
static void emit_lithdr_noidx_v(grpc_chttp2_hpack_compressor *c,
grpc_mdelem *elem, framer_state *st) {
- uint32_t len_key = (uint32_t)GPR_SLICE_LENGTH(elem->key->slice);
+ uint32_t len_key = (uint32_t)GRPC_SLICE_LENGTH(elem->key->slice);
uint8_t huffman_prefix;
- gpr_slice value_slice = get_wire_value(elem, &huffman_prefix);
- uint32_t len_val = (uint32_t)GPR_SLICE_LENGTH(value_slice);
+ grpc_slice value_slice = get_wire_value(elem, &huffman_prefix);
+ uint32_t len_val = (uint32_t)GRPC_SLICE_LENGTH(value_slice);
uint32_t len_key_len = GRPC_CHTTP2_VARINT_LENGTH(len_key, 1);
uint32_t len_val_len = GRPC_CHTTP2_VARINT_LENGTH(len_val, 1);
GPR_ASSERT(len_key <= UINT32_MAX);
- GPR_ASSERT(GPR_SLICE_LENGTH(value_slice) <= UINT32_MAX);
+ GPR_ASSERT(GRPC_SLICE_LENGTH(value_slice) <= UINT32_MAX);
*add_tiny_header_data(st, 1) = 0x00;
GRPC_CHTTP2_WRITE_VARINT(len_key, 1, 0x00,
add_tiny_header_data(st, len_key_len), len_key_len);
- add_header_data(st, gpr_slice_ref(elem->key->slice));
+ add_header_data(st, grpc_slice_ref(elem->key->slice));
GRPC_CHTTP2_WRITE_VARINT(len_val, 1, huffman_prefix,
add_tiny_header_data(st, len_val_len), len_val_len);
- add_header_data(st, gpr_slice_ref(value_slice));
+ add_header_data(st, grpc_slice_ref(value_slice));
}
static void emit_advertise_table_size_change(grpc_chttp2_hpack_compressor *c,
@@ -373,8 +374,8 @@ static void hpack_enc(grpc_chttp2_hpack_compressor *c, grpc_mdelem *elem,
uint32_t indices_key;
int should_add_elem;
- GPR_ASSERT(GPR_SLICE_LENGTH(elem->key->slice) > 0);
- if (GPR_SLICE_START_PTR(elem->key->slice)[0] != ':') { /* regular header */
+ GPR_ASSERT(GRPC_SLICE_LENGTH(elem->key->slice) > 0);
+ if (GRPC_SLICE_START_PTR(elem->key->slice)[0] != ':') { /* regular header */
st->seen_regular_header = 1;
} else {
GPR_ASSERT(
@@ -546,7 +547,7 @@ void grpc_chttp2_encode_header(grpc_chttp2_hpack_compressor *c,
grpc_metadata_batch *metadata, int is_eof,
size_t max_frame_size,
grpc_transport_one_way_stats *stats,
- gpr_slice_buffer *outbuf) {
+ grpc_slice_buffer *outbuf) {
framer_state st;
grpc_linked_mdelem *l;
gpr_timespec deadline;
diff --git a/src/core/ext/transport/chttp2/transport/hpack_encoder.h b/src/core/ext/transport/chttp2/transport/hpack_encoder.h
index 4c3a931549..bcbd675ca2 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_encoder.h
+++ b/src/core/ext/transport/chttp2/transport/hpack_encoder.h
@@ -34,9 +34,9 @@
#ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_ENCODER_H
#define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_ENCODER_H
+#include <grpc/slice.h>
+#include <grpc/slice_buffer.h>
#include <grpc/support/port_platform.h>
-#include <grpc/support/slice.h>
-#include <grpc/support/slice_buffer.h>
#include "src/core/ext/transport/chttp2/transport/frame.h"
#include "src/core/lib/transport/metadata.h"
#include "src/core/lib/transport/metadata_batch.h"
@@ -93,6 +93,6 @@ void grpc_chttp2_encode_header(grpc_chttp2_hpack_compressor *c, uint32_t id,
grpc_metadata_batch *metadata, int is_eof,
size_t max_frame_size,
grpc_transport_one_way_stats *stats,
- gpr_slice_buffer *outbuf);
+ grpc_slice_buffer *outbuf);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_ENCODER_H */
diff --git a/src/core/ext/transport/chttp2/transport/hpack_parser.c b/src/core/ext/transport/chttp2/transport/hpack_parser.c
index 8180f78fc0..6a9200b8db 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_parser.c
+++ b/src/core/ext/transport/chttp2/transport/hpack_parser.c
@@ -50,6 +50,7 @@
#include <grpc/support/useful.h>
#include "src/core/ext/transport/chttp2/transport/bin_encoder.h"
+#include "src/core/ext/transport/chttp2/transport/http2_errors.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/string.h"
@@ -1501,9 +1502,9 @@ static grpc_error *is_binary_indexed_header(grpc_chttp2_hpack_parser *p,
GRPC_ERROR_INT_INDEX, (intptr_t)p->index),
GRPC_ERROR_INT_SIZE, (intptr_t)p->table.num_ents);
}
- *is =
- grpc_is_binary_header((const char *)GPR_SLICE_START_PTR(elem->key->slice),
- GPR_SLICE_LENGTH(elem->key->slice));
+ *is = grpc_is_binary_header(
+ (const char *)GRPC_SLICE_START_PTR(elem->key->slice),
+ GRPC_SLICE_LENGTH(elem->key->slice));
return GRPC_ERROR_NONE;
}
@@ -1578,18 +1579,32 @@ static const maybe_complete_func_type maybe_complete_funcs[] = {
grpc_chttp2_maybe_complete_recv_initial_metadata,
grpc_chttp2_maybe_complete_recv_trailing_metadata};
+static void force_client_rst_stream(grpc_exec_ctx *exec_ctx, void *sp,
+ grpc_error *error) {
+ grpc_chttp2_stream *s = sp;
+ grpc_chttp2_transport *t = s->t;
+ if (!s->write_closed) {
+ grpc_slice_buffer_add(
+ &t->qbuf, grpc_chttp2_rst_stream_create(s->id, GRPC_CHTTP2_NO_ERROR,
+ &s->stats.outgoing));
+ grpc_chttp2_initiate_write(exec_ctx, t, false, "force_rst_stream");
+ grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, true, GRPC_ERROR_NONE);
+ }
+ GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "final_rst");
+}
+
grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
void *hpack_parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
- gpr_slice slice, int is_last) {
+ grpc_slice slice, int is_last) {
grpc_chttp2_hpack_parser *parser = hpack_parser;
GPR_TIMER_BEGIN("grpc_chttp2_hpack_parser_parse", 0);
if (s != NULL) {
- s->stats.incoming.header_bytes += GPR_SLICE_LENGTH(slice);
+ s->stats.incoming.header_bytes += GRPC_SLICE_LENGTH(slice);
}
grpc_error *error = grpc_chttp2_hpack_parser_parse(
- exec_ctx, parser, GPR_SLICE_START_PTR(slice), GPR_SLICE_END_PTR(slice));
+ exec_ctx, parser, GRPC_SLICE_START_PTR(slice), GRPC_SLICE_END_PTR(slice));
if (error != GRPC_ERROR_NONE) {
GPR_TIMER_END("grpc_chttp2_hpack_parser_parse", 0);
return error;
@@ -1613,6 +1628,17 @@ grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
s->header_frames_received++;
}
if (parser->is_eof) {
+ if (t->is_client && !s->write_closed) {
+ /* server eof ==> complete closure; we may need to forcefully close
+ the stream. Wait until the combiner lock is ready to be released
+ however -- it might be that we receive a RST_STREAM following this
+ and can avoid the extra write */
+ GRPC_CHTTP2_STREAM_REF(s, "final_rst");
+ grpc_combiner_execute_finally(
+ exec_ctx, t->combiner,
+ grpc_closure_create(force_client_rst_stream, s), GRPC_ERROR_NONE,
+ false);
+ }
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, false,
GRPC_ERROR_NONE);
}
diff --git a/src/core/ext/transport/chttp2/transport/hpack_parser.h b/src/core/ext/transport/chttp2/transport/hpack_parser.h
index 0290c78d5a..a39bf466cd 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_parser.h
+++ b/src/core/ext/transport/chttp2/transport/hpack_parser.h
@@ -116,6 +116,6 @@ grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
void *hpack_parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
- gpr_slice slice, int is_last);
+ grpc_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_PARSER_H */
diff --git a/src/core/ext/transport/chttp2/transport/hpack_table.c b/src/core/ext/transport/chttp2/transport/hpack_table.c
index 2b73ec969e..2dc793d304 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_table.c
+++ b/src/core/ext/transport/chttp2/transport/hpack_table.c
@@ -226,8 +226,8 @@ grpc_mdelem *grpc_chttp2_hptbl_lookup(const grpc_chttp2_hptbl *tbl,
/* Evict one element from the table */
static void evict1(grpc_chttp2_hptbl *tbl) {
grpc_mdelem *first_ent = tbl->ents[tbl->first_ent];
- size_t elem_bytes = GPR_SLICE_LENGTH(first_ent->key->slice) +
- GPR_SLICE_LENGTH(first_ent->value->slice) +
+ size_t elem_bytes = GRPC_SLICE_LENGTH(first_ent->key->slice) +
+ GRPC_SLICE_LENGTH(first_ent->value->slice) +
GRPC_CHTTP2_HPACK_ENTRY_OVERHEAD;
GPR_ASSERT(elem_bytes <= tbl->mem_used);
tbl->mem_used -= (uint32_t)elem_bytes;
@@ -298,8 +298,8 @@ grpc_error *grpc_chttp2_hptbl_set_current_table_size(grpc_chttp2_hptbl *tbl,
grpc_error *grpc_chttp2_hptbl_add(grpc_chttp2_hptbl *tbl, grpc_mdelem *md) {
/* determine how many bytes of buffer this entry represents */
- size_t elem_bytes = GPR_SLICE_LENGTH(md->key->slice) +
- GPR_SLICE_LENGTH(md->value->slice) +
+ size_t elem_bytes = GRPC_SLICE_LENGTH(md->key->slice) +
+ GRPC_SLICE_LENGTH(md->value->slice) +
GRPC_CHTTP2_HPACK_ENTRY_OVERHEAD;
if (tbl->current_table_bytes > tbl->max_bytes) {
diff --git a/src/core/ext/transport/chttp2/transport/hpack_table.h b/src/core/ext/transport/chttp2/transport/hpack_table.h
index 45bd9255bf..2ca130e64b 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_table.h
+++ b/src/core/ext/transport/chttp2/transport/hpack_table.h
@@ -34,8 +34,8 @@
#ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_TABLE_H
#define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_TABLE_H
+#include <grpc/slice.h>
#include <grpc/support/port_platform.h>
-#include <grpc/support/slice.h>
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/transport/metadata.h"
diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h
index 01f521c360..b727965d43 100644
--- a/src/core/ext/transport/chttp2/transport/internal.h
+++ b/src/core/ext/transport/chttp2/transport/internal.h
@@ -138,6 +138,12 @@ typedef enum {
GRPC_NUM_SETTING_SETS
} grpc_chttp2_setting_set;
+typedef enum {
+ GRPC_CHTTP2_NO_GOAWAY_SEND,
+ GRPC_CHTTP2_GOAWAY_SEND_SCHEDULED,
+ GRPC_CHTTP2_GOAWAY_SENT,
+} grpc_chttp2_sent_goaway_state;
+
/* Outstanding ping request data */
typedef struct grpc_chttp2_outstanding_ping {
uint8_t id[8];
@@ -164,14 +170,14 @@ struct grpc_chttp2_incoming_byte_stream {
bool is_tail;
gpr_mu slice_mu; // protects slices, on_next
- gpr_slice_buffer slices;
+ grpc_slice_buffer slices;
grpc_closure *on_next;
- gpr_slice *next;
+ grpc_slice *next;
uint32_t remaining_bytes;
struct {
grpc_closure closure;
- gpr_slice *slice;
+ grpc_slice *slice;
size_t max_size_hint;
grpc_closure *on_complete;
} next_action;
@@ -213,7 +219,7 @@ struct grpc_chttp2_transport {
grpc_closure read_action_locked;
/** incoming read bytes */
- gpr_slice_buffer read_buffer;
+ grpc_slice_buffer read_buffer;
/** address to place a newly accepted stream - set and unset by
grpc_chttp2_parsing_accept_stream; used by init_stream to
@@ -231,7 +237,7 @@ struct grpc_chttp2_transport {
} channel_callback;
/** data to write now */
- gpr_slice_buffer outbuf;
+ grpc_slice_buffer outbuf;
/** hpack encoding */
grpc_chttp2_hpack_compressor hpack_compressor;
int64_t outgoing_window;
@@ -239,7 +245,7 @@ struct grpc_chttp2_transport {
uint8_t is_client;
/** data to write next write */
- gpr_slice_buffer qbuf;
+ grpc_slice_buffer qbuf;
/** window available to announce to peer */
int64_t announce_incoming_window;
@@ -249,7 +255,7 @@ struct grpc_chttp2_transport {
/** have we seen a goaway */
uint8_t seen_goaway;
/** have we sent a goaway */
- uint8_t sent_goaway;
+ grpc_chttp2_sent_goaway_state sent_goaway_state;
/** are the local settings dirty and need to be sent? */
uint8_t dirtied_local_settings;
@@ -308,18 +314,33 @@ struct grpc_chttp2_transport {
grpc_chttp2_stream *incoming_stream;
grpc_error *(*parser)(grpc_exec_ctx *exec_ctx, void *parser_user_data,
grpc_chttp2_transport *t, grpc_chttp2_stream *s,
- gpr_slice slice, int is_last);
+ grpc_slice slice, int is_last);
/* goaway data */
grpc_status_code goaway_error;
uint32_t goaway_last_stream_index;
- gpr_slice goaway_text;
+ grpc_slice goaway_text;
grpc_chttp2_write_cb *write_cb_pool;
/* if non-NULL, close the transport with this error when writes are finished
*/
grpc_error *close_transport_on_writes_finished;
+
+ /* a list of closures to run after writes are finished */
+ grpc_closure_list run_after_write;
+
+ /* buffer pool state */
+ /** have we scheduled a benign cleanup? */
+ bool benign_reclaimer_registered;
+ /** have we scheduled a destructive cleanup? */
+ bool destructive_reclaimer_registered;
+ /** benign cleanup closure */
+ grpc_closure benign_reclaimer;
+ grpc_closure benign_reclaimer_locked;
+ /** destructive cleanup closure */
+ grpc_closure destructive_reclaimer;
+ grpc_closure destructive_reclaimer_locked;
};
typedef enum {
@@ -356,7 +377,7 @@ struct grpc_chttp2_stream {
grpc_byte_stream *fetching_send_message;
uint32_t fetched_send_message_length;
- gpr_slice fetching_slice;
+ grpc_slice fetching_slice;
int64_t next_message_end_offset;
int64_t flow_controlled_bytes_written;
bool complete_fetch_covered_by_poller;
@@ -416,7 +437,7 @@ struct grpc_chttp2_stream {
bool sent_trailing_metadata;
/** how much window should we announce? */
uint32_t announce_window;
- gpr_slice_buffer flow_controlled_buffer;
+ grpc_slice_buffer flow_controlled_buffer;
grpc_chttp2_write_cb *on_write_finished_cbs;
grpc_chttp2_write_cb *finish_after_write;
@@ -448,7 +469,8 @@ void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
/** Process one slice of incoming data; return 1 if the connection is still
viable after reading, or 0 if the connection should be torn down */
grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t, gpr_slice slice);
+ grpc_chttp2_transport *t,
+ grpc_slice slice);
bool grpc_chttp2_list_add_writable_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
@@ -474,6 +496,8 @@ void grpc_chttp2_list_add_waiting_for_concurrency(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
int grpc_chttp2_list_pop_waiting_for_concurrency(grpc_chttp2_transport *t,
grpc_chttp2_stream **s);
+void grpc_chttp2_list_remove_waiting_for_concurrency(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s);
void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
@@ -491,7 +515,7 @@ grpc_chttp2_stream *grpc_chttp2_parsing_accept_stream(grpc_exec_ctx *exec_ctx,
void grpc_chttp2_add_incoming_goaway(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
uint32_t goaway_error,
- gpr_slice goaway_text);
+ grpc_slice goaway_text);
void grpc_chttp2_parsing_become_skip_parser(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t);
@@ -593,7 +617,7 @@ void grpc_chttp2_flowctl_trace(const char *file, int line, const char *phase,
void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_stream *stream,
- grpc_status_code status, gpr_slice *details);
+ grpc_status_code status, grpc_slice *details);
void grpc_chttp2_mark_stream_closed(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s, int close_reads,
@@ -641,7 +665,7 @@ grpc_chttp2_incoming_byte_stream *grpc_chttp2_incoming_byte_stream_create(
uint32_t frame_size, uint32_t flags);
void grpc_chttp2_incoming_byte_stream_push(grpc_exec_ctx *exec_ctx,
grpc_chttp2_incoming_byte_stream *bs,
- gpr_slice slice);
+ grpc_slice slice);
void grpc_chttp2_incoming_byte_stream_finished(
grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs,
grpc_error *error);
@@ -670,4 +694,8 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
+void grpc_chttp2_fail_pending_writes(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s, grpc_error *error);
+
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_INTERNAL_H */
diff --git a/src/core/ext/transport/chttp2/transport/parsing.c b/src/core/ext/transport/chttp2/transport/parsing.c
index 8005350ae7..5efb49751c 100644
--- a/src/core/ext/transport/chttp2/transport/parsing.c
+++ b/src/core/ext/transport/chttp2/transport/parsing.c
@@ -67,14 +67,14 @@ static grpc_error *init_skip_frame_parser(grpc_exec_ctx *exec_ctx,
int is_header);
static grpc_error *parse_frame_slice(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t, gpr_slice slice,
+ grpc_chttp2_transport *t, grpc_slice slice,
int is_last);
grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
- gpr_slice slice) {
- uint8_t *beg = GPR_SLICE_START_PTR(slice);
- uint8_t *end = GPR_SLICE_END_PTR(slice);
+ grpc_slice slice) {
+ uint8_t *beg = GRPC_SLICE_START_PTR(slice);
+ uint8_t *end = GRPC_SLICE_END_PTR(slice);
uint8_t *cur = beg;
grpc_error *err;
@@ -229,10 +229,10 @@ grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
case GRPC_DTS_FRAME:
GPR_ASSERT(cur < end);
if ((uint32_t)(end - cur) == t->incoming_frame_size) {
- err = parse_frame_slice(exec_ctx, t,
- gpr_slice_sub_no_ref(slice, (size_t)(cur - beg),
- (size_t)(end - beg)),
- 1);
+ err = parse_frame_slice(
+ exec_ctx, t, grpc_slice_sub_no_ref(slice, (size_t)(cur - beg),
+ (size_t)(end - beg)),
+ 1);
if (err != GRPC_ERROR_NONE) {
return err;
}
@@ -243,8 +243,8 @@ grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
size_t cur_offset = (size_t)(cur - beg);
err = parse_frame_slice(
exec_ctx, t,
- gpr_slice_sub_no_ref(slice, cur_offset,
- cur_offset + t->incoming_frame_size),
+ grpc_slice_sub_no_ref(slice, cur_offset,
+ cur_offset + t->incoming_frame_size),
1);
if (err != GRPC_ERROR_NONE) {
return err;
@@ -253,10 +253,10 @@ grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
t->incoming_stream = NULL;
goto dts_fh_0; /* loop */
} else {
- err = parse_frame_slice(exec_ctx, t,
- gpr_slice_sub_no_ref(slice, (size_t)(cur - beg),
- (size_t)(end - beg)),
- 0);
+ err = parse_frame_slice(
+ exec_ctx, t, grpc_slice_sub_no_ref(slice, (size_t)(cur - beg),
+ (size_t)(end - beg)),
+ 0);
if (err != GRPC_ERROR_NONE) {
return err;
}
@@ -331,7 +331,7 @@ static grpc_error *init_frame_parser(grpc_exec_ctx *exec_ctx,
static grpc_error *skip_parser(grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport *t, grpc_chttp2_stream *s,
- gpr_slice slice, int is_last) {
+ grpc_slice slice, int is_last) {
return GRPC_ERROR_NONE;
}
@@ -430,7 +430,7 @@ error_handler:
if (s != NULL) {
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, false, err);
}
- gpr_slice_buffer_add(
+ grpc_slice_buffer_add(
&t->qbuf, grpc_chttp2_rst_stream_create(t->incoming_stream_id,
GRPC_CHTTP2_PROTOCOL_ERROR,
&s->stats.outgoing));
@@ -471,7 +471,8 @@ static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp,
grpc_mdstr_as_c_string(md->value));
*cached_timeout = gpr_inf_future(GPR_TIMESPAN);
}
- grpc_mdelem_set_user_data(md, free_timeout, cached_timeout);
+ cached_timeout =
+ grpc_mdelem_set_user_data(md, free_timeout, cached_timeout);
}
grpc_chttp2_incoming_metadata_buffer_set_deadline(
&s->metadata_buffer[0],
@@ -722,7 +723,7 @@ static grpc_error *init_settings_frame_parser(grpc_exec_ctx *exec_ctx,
}
static grpc_error *parse_frame_slice(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t, gpr_slice slice,
+ grpc_chttp2_transport *t, grpc_slice slice,
int is_last) {
grpc_chttp2_stream *s = t->incoming_stream;
grpc_error *err = t->parser(exec_ctx, t->parser_data, t, s, slice, is_last);
@@ -737,7 +738,7 @@ static grpc_error *parse_frame_slice(grpc_exec_ctx *exec_ctx,
grpc_chttp2_parsing_become_skip_parser(exec_ctx, t);
if (s) {
s->forced_close_error = err;
- gpr_slice_buffer_add(
+ grpc_slice_buffer_add(
&t->qbuf, grpc_chttp2_rst_stream_create(t->incoming_stream_id,
GRPC_CHTTP2_PROTOCOL_ERROR,
&s->stats.outgoing));
diff --git a/src/core/ext/transport/chttp2/transport/stream_lists.c b/src/core/ext/transport/chttp2/transport/stream_lists.c
index 6d25b3ae57..a60264cc51 100644
--- a/src/core/ext/transport/chttp2/transport/stream_lists.c
+++ b/src/core/ext/transport/chttp2/transport/stream_lists.c
@@ -158,6 +158,11 @@ int grpc_chttp2_list_pop_waiting_for_concurrency(grpc_chttp2_transport *t,
return stream_list_pop(t, s, GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY);
}
+void grpc_chttp2_list_remove_waiting_for_concurrency(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s) {
+ stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY);
+}
+
void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
stream_list_add(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
diff --git a/src/core/ext/transport/chttp2/transport/stream_map.c b/src/core/ext/transport/chttp2/transport/stream_map.c
index 59b3a14e0a..5f5a28446d 100644
--- a/src/core/ext/transport/chttp2/transport/stream_map.c
+++ b/src/core/ext/transport/chttp2/transport/stream_map.c
@@ -151,6 +151,17 @@ size_t grpc_chttp2_stream_map_size(grpc_chttp2_stream_map *map) {
return map->count - map->free;
}
+void *grpc_chttp2_stream_map_rand(grpc_chttp2_stream_map *map) {
+ if (map->count == map->free) {
+ return NULL;
+ }
+ if (map->free != 0) {
+ map->count = compact(map->keys, map->values, map->count);
+ map->free = 0;
+ }
+ return map->values[((size_t)rand()) % map->count];
+}
+
void grpc_chttp2_stream_map_for_each(grpc_chttp2_stream_map *map,
void (*f)(void *user_data, uint32_t key,
void *value),
diff --git a/src/core/ext/transport/chttp2/transport/stream_map.h b/src/core/ext/transport/chttp2/transport/stream_map.h
index e76312dd1a..203f640680 100644
--- a/src/core/ext/transport/chttp2/transport/stream_map.h
+++ b/src/core/ext/transport/chttp2/transport/stream_map.h
@@ -68,6 +68,9 @@ void *grpc_chttp2_stream_map_delete(grpc_chttp2_stream_map *map, uint32_t key);
/* Return an existing key, or NULL if it does not exist */
void *grpc_chttp2_stream_map_find(grpc_chttp2_stream_map *map, uint32_t key);
+/* Return a random entry */
+void *grpc_chttp2_stream_map_rand(grpc_chttp2_stream_map *map);
+
/* How many (populated) entries are in the stream map? */
size_t grpc_chttp2_stream_map_size(grpc_chttp2_stream_map *map);
diff --git a/src/core/ext/transport/chttp2/transport/writing.c b/src/core/ext/transport/chttp2/transport/writing.c
index b39695a1a5..139e7387c4 100644
--- a/src/core/ext/transport/chttp2/transport/writing.c
+++ b/src/core/ext/transport/chttp2/transport/writing.c
@@ -80,7 +80,7 @@ bool grpc_chttp2_begin_write(grpc_exec_ctx *exec_ctx,
GPR_TIMER_BEGIN("grpc_chttp2_begin_write", 0);
if (t->dirtied_local_settings && !t->sent_local_settings) {
- gpr_slice_buffer_add(
+ grpc_slice_buffer_add(
&t->outbuf,
grpc_chttp2_settings_create(
t->settings[GRPC_SENT_SETTINGS], t->settings[GRPC_LOCAL_SETTINGS],
@@ -91,7 +91,7 @@ bool grpc_chttp2_begin_write(grpc_exec_ctx *exec_ctx,
}
/* simple writes are queued to qbuf, and flushed here */
- gpr_slice_buffer_move_into(&t->qbuf, &t->outbuf);
+ grpc_slice_buffer_move_into(&t->qbuf, &t->outbuf);
GPR_ASSERT(t->qbuf.count == 0);
grpc_chttp2_hpack_compressor_set_max_table_size(
@@ -130,9 +130,9 @@ bool grpc_chttp2_begin_write(grpc_exec_ctx *exec_ctx,
/* send any window updates */
if (s->announce_window > 0) {
uint32_t announce = s->announce_window;
- gpr_slice_buffer_add(&t->outbuf,
- grpc_chttp2_window_update_create(
- s->id, s->announce_window, &s->stats.outgoing));
+ grpc_slice_buffer_add(&t->outbuf,
+ grpc_chttp2_window_update_create(
+ s->id, s->announce_window, &s->stats.outgoing));
GRPC_CHTTP2_FLOW_DEBIT_STREAM("write", t, s, announce_window, announce);
}
if (sent_initial_metadata) {
@@ -162,9 +162,9 @@ bool grpc_chttp2_begin_write(grpc_exec_ctx *exec_ctx,
s->send_trailing_metadata = NULL;
s->sent_trailing_metadata = true;
if (!t->is_client && !s->read_closed) {
- gpr_slice_buffer_add(&t->outbuf, grpc_chttp2_rst_stream_create(
- s->id, GRPC_CHTTP2_NO_ERROR,
- &s->stats.outgoing));
+ grpc_slice_buffer_add(&t->outbuf, grpc_chttp2_rst_stream_create(
+ s->id, GRPC_CHTTP2_NO_ERROR,
+ &s->stats.outgoing));
}
}
s->sending_bytes += send_bytes;
@@ -194,7 +194,7 @@ bool grpc_chttp2_begin_write(grpc_exec_ctx *exec_ctx,
s->send_trailing_metadata = NULL;
s->sent_trailing_metadata = true;
if (!t->is_client && !s->read_closed) {
- gpr_slice_buffer_add(
+ grpc_slice_buffer_add(
&t->outbuf, grpc_chttp2_rst_stream_create(
s->id, GRPC_CHTTP2_NO_ERROR, &s->stats.outgoing));
}
@@ -220,8 +220,8 @@ bool grpc_chttp2_begin_write(grpc_exec_ctx *exec_ctx,
GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("write", t, announce_incoming_window,
announced);
grpc_transport_one_way_stats throwaway_stats;
- gpr_slice_buffer_add(&t->outbuf, grpc_chttp2_window_update_create(
- 0, announced, &throwaway_stats));
+ grpc_slice_buffer_add(&t->outbuf, grpc_chttp2_window_update_create(
+ 0, announced, &throwaway_stats));
}
GPR_TIMER_END("grpc_chttp2_begin_write", 0);
@@ -254,7 +254,7 @@ void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
}
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:end");
}
- gpr_slice_buffer_reset_and_unref(&t->outbuf);
+ grpc_slice_buffer_reset_and_unref(&t->outbuf);
GRPC_ERROR_UNREF(error);
GPR_TIMER_END("grpc_chttp2_end_write", 0);
}
diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.c b/src/core/ext/transport/cronet/transport/cronet_transport.c
index 25ad40b935..afc59f4b12 100644
--- a/src/core/ext/transport/cronet/transport/cronet_transport.c
+++ b/src/core/ext/transport/cronet/transport/cronet_transport.c
@@ -34,14 +34,15 @@
#include <string.h>
#include <grpc/impl/codegen/port_platform.h>
+#include <grpc/slice_buffer.h>
#include <grpc/support/alloc.h>
#include <grpc/support/host_port.h>
#include <grpc/support/log.h>
-#include <grpc/support/slice_buffer.h>
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
#include "src/core/ext/transport/chttp2/transport/incoming_metadata.h"
+#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/surface/channel.h"
@@ -130,7 +131,7 @@ struct read_state {
/* vars for holding data destined for the application */
struct grpc_slice_buffer_stream sbs;
- gpr_slice_buffer read_slice_buffer;
+ grpc_slice_buffer read_slice_buffer;
/* vars for trailing metadata */
grpc_chttp2_incoming_metadata_buffer trailing_metadata;
@@ -148,6 +149,9 @@ struct write_state {
struct op_state {
bool state_op_done[OP_NUM_OPS];
bool state_callback_received[OP_NUM_OPS];
+ bool fail_state;
+ bool flush_read;
+ grpc_error *cancel_error;
/* data structure for storing data coming from server */
struct read_state rs;
/* data structure for storing data going to the server */
@@ -247,6 +251,12 @@ static void free_read_buffer(stream_obj *s) {
}
}
+static grpc_error *make_error_with_desc(int error_code, const char *desc) {
+ grpc_error *error = GRPC_ERROR_CREATE(desc);
+ error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS, error_code);
+ return error;
+}
+
/*
Add a new stream op to op storage.
*/
@@ -432,6 +442,18 @@ static void on_response_headers_received(
grpc_mdstr_from_string(headers->headers[i].value)));
}
s->state.state_callback_received[OP_RECV_INITIAL_METADATA] = true;
+ if (!(s->state.state_op_done[OP_CANCEL_ERROR] ||
+ s->state.state_callback_received[OP_FAILED])) {
+ /* Do an extra read to trigger on_succeeded() callback in case connection
+ is closed */
+ GPR_ASSERT(s->state.rs.length_field_received == false);
+ s->state.rs.read_buffer = s->state.rs.grpc_header_bytes;
+ s->state.rs.received_bytes = 0;
+ s->state.rs.remaining_bytes = GRPC_HEADER_SIZE_IN_BYTES;
+ CRONET_LOG(GPR_DEBUG, "cronet_bidirectional_stream_read(%p)", s->cbs);
+ cronet_bidirectional_stream_read(s->cbs, s->state.rs.read_buffer,
+ s->state.rs.remaining_bytes);
+ }
gpr_mu_unlock(&s->mu);
execute_from_storage(s);
}
@@ -463,7 +485,11 @@ static void on_read_completed(cronet_bidirectional_stream *stream, char *data,
count);
gpr_mu_lock(&s->mu);
s->state.state_callback_received[OP_RECV_MESSAGE] = true;
- if (count > 0) {
+ if (count > 0 && s->state.flush_read) {
+ CRONET_LOG(GPR_DEBUG, "cronet_bidirectional_stream_read(%p)", s->cbs);
+ cronet_bidirectional_stream_read(s->cbs, s->state.rs.read_buffer, 4096);
+ gpr_mu_unlock(&s->mu);
+ } else if (count > 0) {
s->state.rs.received_bytes += count;
s->state.rs.remaining_bytes -= count;
if (s->state.rs.remaining_bytes > 0) {
@@ -478,6 +504,10 @@ static void on_read_completed(cronet_bidirectional_stream *stream, char *data,
execute_from_storage(s);
}
} else {
+ if (s->state.flush_read) {
+ gpr_free(s->state.rs.read_buffer);
+ s->state.rs.read_buffer = NULL;
+ }
s->state.rs.read_stream_closed = true;
gpr_mu_unlock(&s->mu);
execute_from_storage(s);
@@ -507,21 +537,38 @@ static void on_response_trailers_received(
grpc_mdstr_from_string(trailers->headers[i].key),
grpc_mdstr_from_string(trailers->headers[i].value)));
s->state.rs.trailing_metadata_valid = true;
+ if (0 == strcmp(trailers->headers[i].key, "grpc-status") &&
+ 0 != strcmp(trailers->headers[i].value, "0")) {
+ s->state.fail_state = true;
+ }
}
s->state.state_callback_received[OP_RECV_TRAILING_METADATA] = true;
- gpr_mu_unlock(&s->mu);
- execute_from_storage(s);
+ /* Send a EOS when server terminates the stream (testServerFinishesRequest) to
+ * trigger on_succeeded */
+ if (!s->state.state_op_done[OP_SEND_TRAILING_METADATA] &&
+ !(s->state.state_op_done[OP_CANCEL_ERROR] ||
+ s->state.state_callback_received[OP_FAILED])) {
+ CRONET_LOG(GPR_DEBUG, "cronet_bidirectional_stream_write (%p, 0)", s->cbs);
+ s->state.state_callback_received[OP_SEND_MESSAGE] = false;
+ cronet_bidirectional_stream_write(s->cbs, "", 0, true);
+ s->state.state_op_done[OP_SEND_TRAILING_METADATA] = true;
+
+ gpr_mu_unlock(&s->mu);
+ } else {
+ gpr_mu_unlock(&s->mu);
+ execute_from_storage(s);
+ }
}
/*
Utility function that takes the data from s->write_slice_buffer and assembles
into a contiguous byte stream with 5 byte gRPC header prepended.
*/
-static void create_grpc_frame(gpr_slice_buffer *write_slice_buffer,
+static void create_grpc_frame(grpc_slice_buffer *write_slice_buffer,
char **pp_write_buffer,
size_t *p_write_buffer_size) {
- gpr_slice slice = gpr_slice_buffer_take_first(write_slice_buffer);
- size_t length = GPR_SLICE_LENGTH(slice);
+ grpc_slice slice = grpc_slice_buffer_take_first(write_slice_buffer);
+ size_t length = GRPC_SLICE_LENGTH(slice);
*p_write_buffer_size = length + GRPC_HEADER_SIZE_IN_BYTES;
/* This is freed in the on_write_completed callback */
char *write_buffer = gpr_malloc(length + GRPC_HEADER_SIZE_IN_BYTES);
@@ -534,7 +581,7 @@ static void create_grpc_frame(gpr_slice_buffer *write_slice_buffer,
*p++ = (uint8_t)(length >> 8);
*p++ = (uint8_t)(length);
/* append actual data */
- memcpy(p, GPR_SLICE_START_PTR(slice), length);
+ memcpy(p, GRPC_SLICE_START_PTR(slice), length);
}
/*
@@ -610,6 +657,16 @@ static int parse_grpc_header(const uint8_t *data) {
return length;
}
+static bool header_has_authority(grpc_linked_mdelem *head) {
+ while (head != NULL) {
+ if (head->md->key == GRPC_MDSTR_AUTHORITY) {
+ return true;
+ }
+ head = head->next;
+ }
+ return false;
+}
+
/*
Op Execution: Decide if one of the actions contained in the stream op can be
executed. This is the heart of the state machine.
@@ -621,9 +678,9 @@ static bool op_can_be_run(grpc_transport_stream_op *curr_op,
/* When call is canceled, every op can be run, except under following
conditions
*/
- bool is_canceled_of_failed = stream_state->state_op_done[OP_CANCEL_ERROR] ||
+ bool is_canceled_or_failed = stream_state->state_op_done[OP_CANCEL_ERROR] ||
stream_state->state_callback_received[OP_FAILED];
- if (is_canceled_of_failed) {
+ if (is_canceled_or_failed) {
if (op_id == OP_SEND_INITIAL_METADATA) result = false;
if (op_id == OP_SEND_MESSAGE) result = false;
if (op_id == OP_SEND_TRAILING_METADATA) result = false;
@@ -767,16 +824,10 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
op_can_be_run(stream_op, stream_state, &oas->state,
OP_SEND_INITIAL_METADATA)) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_SEND_INITIAL_METADATA", oas);
- /* This OP is the beginning. Reset various states */
- memset(&s->header_array, 0, sizeof(s->header_array));
- memset(&stream_state->rs, 0, sizeof(stream_state->rs));
- memset(&stream_state->ws, 0, sizeof(stream_state->ws));
- memset(stream_state->state_op_done, 0, sizeof(stream_state->state_op_done));
- memset(stream_state->state_callback_received, 0,
- sizeof(stream_state->state_callback_received));
/* Start new cronet stream. It is destroyed in on_succeeded, on_canceled,
* on_failed */
GPR_ASSERT(s->cbs == NULL);
+ GPR_ASSERT(!stream_state->state_op_done[OP_SEND_INITIAL_METADATA]);
s->cbs = cronet_bidirectional_stream_create(s->curr_ct.engine, s->curr_gs,
&cronet_callbacks);
CRONET_LOG(GPR_DEBUG, "%p = cronet_bidirectional_stream_create()", s->cbs);
@@ -797,10 +848,13 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
op_can_be_run(stream_op, stream_state, &oas->state,
OP_RECV_INITIAL_METADATA)) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_RECV_INITIAL_METADATA", oas);
- if (stream_state->state_op_done[OP_CANCEL_ERROR] ||
- stream_state->state_callback_received[OP_FAILED]) {
+ if (stream_state->state_op_done[OP_CANCEL_ERROR]) {
grpc_exec_ctx_sched(exec_ctx, stream_op->recv_initial_metadata_ready,
GRPC_ERROR_CANCELLED, NULL);
+ } else if (stream_state->state_callback_received[OP_FAILED]) {
+ grpc_exec_ctx_sched(
+ exec_ctx, stream_op->recv_initial_metadata_ready,
+ make_error_with_desc(GRPC_STATUS_UNAVAILABLE, "Unavailable."), NULL);
} else {
grpc_chttp2_incoming_metadata_buffer_publish(
&oas->s->state.rs.initial_metadata, stream_op->recv_initial_metadata);
@@ -817,9 +871,9 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
result = NO_ACTION_POSSIBLE;
CRONET_LOG(GPR_DEBUG, "Stream is either cancelled or failed.");
} else {
- gpr_slice_buffer write_slice_buffer;
- gpr_slice slice;
- gpr_slice_buffer_init(&write_slice_buffer);
+ grpc_slice_buffer write_slice_buffer;
+ grpc_slice slice;
+ grpc_slice_buffer_init(&write_slice_buffer);
grpc_byte_stream_next(NULL, stream_op->send_message, &slice,
stream_op->send_message->length, NULL);
/* Check that compression flag is OFF. We don't support compression yet.
@@ -828,7 +882,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_ERROR, "Compression is not supported");
GPR_ASSERT(stream_op->send_message->flags == 0);
}
- gpr_slice_buffer_add(&write_slice_buffer, slice);
+ grpc_slice_buffer_add(&write_slice_buffer, slice);
if (write_slice_buffer.count != 1) {
/* Empty request not handled yet */
gpr_log(GPR_ERROR, "Empty request is not supported");
@@ -854,12 +908,19 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
op_can_be_run(stream_op, stream_state, &oas->state,
OP_RECV_MESSAGE)) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_RECV_MESSAGE", oas);
- if (stream_state->state_op_done[OP_CANCEL_ERROR] ||
- stream_state->state_callback_received[OP_FAILED]) {
- CRONET_LOG(GPR_DEBUG, "Stream is either cancelled or failed.");
+ if (stream_state->state_op_done[OP_CANCEL_ERROR]) {
+ CRONET_LOG(GPR_DEBUG, "Stream is cancelled.");
grpc_exec_ctx_sched(exec_ctx, stream_op->recv_message_ready,
GRPC_ERROR_CANCELLED, NULL);
stream_state->state_op_done[OP_RECV_MESSAGE] = true;
+ result = ACTION_TAKEN_NO_CALLBACK;
+ } else if (stream_state->state_callback_received[OP_FAILED]) {
+ CRONET_LOG(GPR_DEBUG, "Stream failed.");
+ grpc_exec_ctx_sched(
+ exec_ctx, stream_op->recv_message_ready,
+ make_error_with_desc(GRPC_STATUS_UNAVAILABLE, "Unavailable."), NULL);
+ stream_state->state_op_done[OP_RECV_MESSAGE] = true;
+ result = ACTION_TAKEN_NO_CALLBACK;
} else if (stream_state->rs.read_stream_closed == true) {
/* No more data will be received */
CRONET_LOG(GPR_DEBUG, "read stream closed");
@@ -867,6 +928,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_NONE, NULL);
stream_state->state_op_done[OP_RECV_MESSAGE] = true;
oas->state.state_op_done[OP_RECV_MESSAGE] = true;
+ result = ACTION_TAKEN_NO_CALLBACK;
} else if (stream_state->rs.length_field_received == false) {
if (stream_state->rs.received_bytes == GRPC_HEADER_SIZE_IN_BYTES &&
stream_state->rs.remaining_bytes == 0) {
@@ -891,7 +953,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
} else {
stream_state->rs.remaining_bytes = 0;
CRONET_LOG(GPR_DEBUG, "read operation complete. Empty response.");
- gpr_slice_buffer_init(&stream_state->rs.read_slice_buffer);
+ grpc_slice_buffer_init(&stream_state->rs.read_slice_buffer);
grpc_slice_buffer_stream_init(&stream_state->rs.sbs,
&stream_state->rs.read_slice_buffer, 0);
*((grpc_byte_buffer **)stream_op->recv_message) =
@@ -918,15 +980,15 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
}
} else if (stream_state->rs.remaining_bytes == 0) {
CRONET_LOG(GPR_DEBUG, "read operation complete");
- gpr_slice read_data_slice =
- gpr_slice_malloc((uint32_t)stream_state->rs.length_field);
- uint8_t *dst_p = GPR_SLICE_START_PTR(read_data_slice);
+ grpc_slice read_data_slice =
+ grpc_slice_malloc((uint32_t)stream_state->rs.length_field);
+ uint8_t *dst_p = GRPC_SLICE_START_PTR(read_data_slice);
memcpy(dst_p, stream_state->rs.read_buffer,
(size_t)stream_state->rs.length_field);
free_read_buffer(s);
- gpr_slice_buffer_init(&stream_state->rs.read_slice_buffer);
- gpr_slice_buffer_add(&stream_state->rs.read_slice_buffer,
- read_data_slice);
+ grpc_slice_buffer_init(&stream_state->rs.read_slice_buffer);
+ grpc_slice_buffer_add(&stream_state->rs.read_slice_buffer,
+ read_data_slice);
grpc_slice_buffer_stream_init(&stream_state->rs.sbs,
&stream_state->rs.read_slice_buffer, 0);
*((grpc_byte_buffer **)stream_op->recv_message) =
@@ -935,10 +997,15 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_NONE, NULL);
stream_state->state_op_done[OP_RECV_MESSAGE] = true;
oas->state.state_op_done[OP_RECV_MESSAGE] = true;
- /* Clear read state of the stream, so next read op (if it were to come)
- * will work */
- stream_state->rs.received_bytes = stream_state->rs.remaining_bytes =
- stream_state->rs.length_field_received = 0;
+ /* Do an extra read to trigger on_succeeded() callback in case connection
+ is closed */
+ stream_state->rs.read_buffer = stream_state->rs.grpc_header_bytes;
+ stream_state->rs.received_bytes = 0;
+ stream_state->rs.remaining_bytes = GRPC_HEADER_SIZE_IN_BYTES;
+ stream_state->rs.length_field_received = false;
+ CRONET_LOG(GPR_DEBUG, "cronet_bidirectional_stream_read(%p)", s->cbs);
+ cronet_bidirectional_stream_read(s->cbs, stream_state->rs.read_buffer,
+ stream_state->rs.remaining_bytes);
result = ACTION_TAKEN_NO_CALLBACK;
}
} else if (stream_op->recv_trailing_metadata &&
@@ -975,17 +1042,32 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
CRONET_LOG(GPR_DEBUG, "W: cronet_bidirectional_stream_cancel(%p)", s->cbs);
if (s->cbs) {
cronet_bidirectional_stream_cancel(s->cbs);
+ result = ACTION_TAKEN_WITH_CALLBACK;
+ } else {
+ result = ACTION_TAKEN_NO_CALLBACK;
}
stream_state->state_op_done[OP_CANCEL_ERROR] = true;
- result = ACTION_TAKEN_WITH_CALLBACK;
+ if (!stream_state->cancel_error) {
+ stream_state->cancel_error = GRPC_ERROR_REF(stream_op->cancel_error);
+ }
} else if (stream_op->on_complete &&
op_can_be_run(stream_op, stream_state, &oas->state,
OP_ON_COMPLETE)) {
- /* All actions in this stream_op are complete. Call the on_complete callback
- */
CRONET_LOG(GPR_DEBUG, "running: %p OP_ON_COMPLETE", oas);
- grpc_exec_ctx_sched(exec_ctx, stream_op->on_complete, GRPC_ERROR_NONE,
- NULL);
+ if (stream_state->state_op_done[OP_CANCEL_ERROR]) {
+ grpc_exec_ctx_sched(exec_ctx, stream_op->on_complete,
+ GRPC_ERROR_REF(stream_state->cancel_error), NULL);
+ } else if (stream_state->state_callback_received[OP_FAILED]) {
+ grpc_exec_ctx_sched(
+ exec_ctx, stream_op->on_complete,
+ make_error_with_desc(GRPC_STATUS_UNAVAILABLE, "Unavailable."), NULL);
+ } else {
+ /* All actions in this stream_op are complete. Call the on_complete
+ * callback
+ */
+ grpc_exec_ctx_sched(exec_ctx, stream_op->on_complete, GRPC_ERROR_NONE,
+ NULL);
+ }
oas->state.state_op_done[OP_ON_COMPLETE] = true;
oas->done = true;
/* reset any send message state, only if this ON_COMPLETE is about a send.
@@ -999,6 +1081,15 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
make a note */
if (stream_op->recv_message)
stream_state->state_op_done[OP_RECV_MESSAGE_AND_ON_COMPLETE] = true;
+ } else if (stream_state->fail_state && !stream_state->flush_read) {
+ CRONET_LOG(GPR_DEBUG, "running: %p flush read", oas);
+ if (stream_state->rs.read_buffer &&
+ stream_state->rs.read_buffer != stream_state->rs.grpc_header_bytes) {
+ gpr_free(stream_state->rs.read_buffer);
+ stream_state->rs.read_buffer = NULL;
+ }
+ stream_state->rs.read_buffer = gpr_malloc(4096);
+ stream_state->flush_read = true;
} else {
result = NO_ACTION_POSSIBLE;
}
@@ -1024,6 +1115,8 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
memset(s->state.state_op_done, 0, sizeof(s->state.state_op_done));
memset(s->state.state_callback_received, 0,
sizeof(s->state.state_callback_received));
+ s->state.fail_state = s->state.flush_read = false;
+ s->state.cancel_error = NULL;
gpr_mu_init(&s->mu);
return 0;
}
@@ -1042,11 +1135,38 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
s->curr_gs = gs;
memcpy(&s->curr_ct, gt, sizeof(grpc_cronet_transport));
add_to_storage(s, op);
- execute_from_storage(s);
+ if (op->send_initial_metadata &&
+ header_has_authority(op->send_initial_metadata->list.head)) {
+ /* Cronet does not support :authority header field. We cancel the call when
+ this field is present in metadata */
+ cronet_bidirectional_stream_header_array header_array;
+ cronet_bidirectional_stream_header *header;
+ cronet_bidirectional_stream cbs;
+ CRONET_LOG(GPR_DEBUG,
+ ":authority header is provided but not supported;"
+ " cancel operations");
+ /* Notify application that operation is cancelled by forging trailers */
+ header_array.count = 1;
+ header_array.capacity = 1;
+ header_array.headers =
+ gpr_malloc(sizeof(cronet_bidirectional_stream_header));
+ header = (cronet_bidirectional_stream_header *)header_array.headers;
+ header->key = "grpc-status";
+ header->value = "1"; /* Return status GRPC_STATUS_CANCELLED */
+ cbs.annotation = (void *)s;
+ s->state.state_op_done[OP_CANCEL_ERROR] = true;
+ on_response_trailers_received(&cbs, &header_array);
+ gpr_free(header_array.headers);
+ } else {
+ execute_from_storage(s);
+ }
}
static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
- grpc_stream *gs, void *and_free_memory) {}
+ grpc_stream *gs, void *and_free_memory) {
+ stream_obj *s = (stream_obj *)gs;
+ GRPC_ERROR_UNREF(s->state.cancel_error);
+}
static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {}
@@ -1054,6 +1174,11 @@ static char *get_peer(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {
return NULL;
}
+static grpc_endpoint *get_endpoint(grpc_exec_ctx *exec_ctx,
+ grpc_transport *gt) {
+ return NULL;
+}
+
static void perform_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_transport_op *op) {}
@@ -1066,4 +1191,5 @@ const grpc_transport_vtable grpc_cronet_vtable = {sizeof(stream_obj),
perform_op,
destroy_stream,
destroy_transport,
- get_peer};
+ get_peer,
+ get_endpoint};
diff --git a/src/core/lib/channel/channel_args.c b/src/core/lib/channel/channel_args.c
index cfc072c0b5..401a2ad4fe 100644
--- a/src/core/lib/channel/channel_args.c
+++ b/src/core/lib/channel/channel_args.c
@@ -298,6 +298,12 @@ uint32_t grpc_channel_args_compression_algorithm_get_states(
}
}
+grpc_channel_args *grpc_channel_args_set_socket_mutator(
+ grpc_channel_args *a, grpc_socket_mutator *mutator) {
+ grpc_arg tmp = grpc_socket_mutator_to_arg(mutator);
+ return grpc_channel_args_copy_and_add(a, &tmp, 1);
+}
+
int grpc_channel_args_compare(const grpc_channel_args *a,
const grpc_channel_args *b) {
int c = GPR_ICMP(a->num_args, b->num_args);
diff --git a/src/core/lib/channel/channel_args.h b/src/core/lib/channel/channel_args.h
index 1e05303471..88fc0e37a3 100644
--- a/src/core/lib/channel/channel_args.h
+++ b/src/core/lib/channel/channel_args.h
@@ -36,6 +36,7 @@
#include <grpc/compression.h>
#include <grpc/grpc.h>
+#include "src/core/lib/iomgr/socket_mutator.h"
// Channel args are intentionally immutable, to avoid the need for locking.
@@ -100,6 +101,13 @@ uint32_t grpc_channel_args_compression_algorithm_get_states(
int grpc_channel_args_compare(const grpc_channel_args *a,
const grpc_channel_args *b);
+/** Returns a channel arg instance with socket mutator added. The socket mutator
+ * will perform its mutate_fd method on all file descriptors used by the
+ * channel.
+ * If \a a is non-MULL, its args are copied. */
+grpc_channel_args *grpc_channel_args_set_socket_mutator(
+ grpc_channel_args *a, grpc_socket_mutator *mutator);
+
/** Returns the value of argument \a name from \a args, or NULL if not found. */
const grpc_arg *grpc_channel_args_find(const grpc_channel_args *args,
const char *name);
diff --git a/src/core/lib/channel/channel_stack.c b/src/core/lib/channel/channel_stack.c
index 2c5367901d..1d0b7d4f31 100644
--- a/src/core/lib/channel/channel_stack.c
+++ b/src/core/lib/channel/channel_stack.c
@@ -102,13 +102,11 @@ grpc_call_element *grpc_call_stack_element(grpc_call_stack *call_stack,
return CALL_ELEMS_FROM_STACK(call_stack) + index;
}
-void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx, int initial_refs,
- grpc_iomgr_cb_func destroy, void *destroy_arg,
- const grpc_channel_filter **filters,
- size_t filter_count,
- const grpc_channel_args *channel_args,
- grpc_transport *optional_transport,
- const char *name, grpc_channel_stack *stack) {
+grpc_error *grpc_channel_stack_init(
+ grpc_exec_ctx *exec_ctx, int initial_refs, grpc_iomgr_cb_func destroy,
+ void *destroy_arg, const grpc_channel_filter **filters, size_t filter_count,
+ const grpc_channel_args *channel_args, grpc_transport *optional_transport,
+ const char *name, grpc_channel_stack *stack) {
size_t call_size =
ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call_stack)) +
ROUND_UP_TO_ALIGNMENT_SIZE(filter_count * sizeof(grpc_call_element));
@@ -126,6 +124,7 @@ void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx, int initial_refs,
ROUND_UP_TO_ALIGNMENT_SIZE(filter_count * sizeof(grpc_channel_element));
/* init per-filter data */
+ grpc_error *first_error = GRPC_ERROR_NONE;
for (i = 0; i < filter_count; i++) {
args.channel_stack = stack;
args.channel_args = channel_args;
@@ -134,7 +133,15 @@ void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx, int initial_refs,
args.is_last = i == (filter_count - 1);
elems[i].filter = filters[i];
elems[i].channel_data = user_data;
- elems[i].filter->init_channel_elem(exec_ctx, &elems[i], &args);
+ grpc_error *error =
+ elems[i].filter->init_channel_elem(exec_ctx, &elems[i], &args);
+ if (error != GRPC_ERROR_NONE) {
+ if (first_error == GRPC_ERROR_NONE) {
+ first_error = error;
+ } else {
+ GRPC_ERROR_UNREF(error);
+ }
+ }
user_data += ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_channel_data);
call_size += ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_call_data);
}
@@ -144,6 +151,7 @@ void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx, int initial_refs,
grpc_channel_stack_size(filters, filter_count));
stack->call_stack_size = call_size;
+ return first_error;
}
void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx,
@@ -162,7 +170,8 @@ grpc_error *grpc_call_stack_init(
grpc_exec_ctx *exec_ctx, grpc_channel_stack *channel_stack,
int initial_refs, grpc_iomgr_cb_func destroy, void *destroy_arg,
grpc_call_context_element *context, const void *transport_server_data,
- grpc_mdstr *path, gpr_timespec deadline, grpc_call_stack *call_stack) {
+ grpc_mdstr *path, gpr_timespec start_time, gpr_timespec deadline,
+ grpc_call_stack *call_stack) {
grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(channel_stack);
grpc_call_element_args args;
size_t count = channel_stack->count;
@@ -179,7 +188,7 @@ grpc_error *grpc_call_stack_init(
/* init per-filter data */
grpc_error *first_error = GRPC_ERROR_NONE;
- args.start_time = gpr_now(GPR_CLOCK_MONOTONIC);
+ args.start_time = start_time;
for (i = 0; i < count; i++) {
args.call_stack = call_stack;
args.server_transport_data = transport_server_data;
@@ -255,6 +264,13 @@ char *grpc_call_next_get_peer(grpc_exec_ctx *exec_ctx,
return next_elem->filter->get_peer(exec_ctx, next_elem);
}
+void grpc_channel_next_get_info(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ const grpc_channel_info *channel_info) {
+ grpc_channel_element *next_elem = elem + 1;
+ next_elem->filter->get_channel_info(exec_ctx, next_elem, channel_info);
+}
+
void grpc_channel_next_op(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
grpc_transport_op *op) {
grpc_channel_element *next_elem = elem + 1;
@@ -288,7 +304,7 @@ void grpc_call_element_send_cancel(grpc_exec_ctx *exec_ctx,
void grpc_call_element_send_cancel_with_message(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_status_code status,
- gpr_slice *optional_message) {
+ grpc_slice *optional_message) {
grpc_transport_stream_op *op = gpr_malloc(sizeof(*op));
memset(op, 0, sizeof(*op));
op->on_complete = grpc_closure_create(destroy_op, op);
@@ -300,7 +316,7 @@ void grpc_call_element_send_cancel_with_message(grpc_exec_ctx *exec_ctx,
void grpc_call_element_send_close_with_message(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_status_code status,
- gpr_slice *optional_message) {
+ grpc_slice *optional_message) {
grpc_transport_stream_op *op = gpr_malloc(sizeof(*op));
memset(op, 0, sizeof(*op));
op->on_complete = grpc_closure_create(destroy_op, op);
diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h
index 27f3be7b29..d9d3a85233 100644
--- a/src/core/lib/channel/channel_stack.h
+++ b/src/core/lib/channel/channel_stack.h
@@ -34,6 +34,13 @@
#ifndef GRPC_CORE_LIB_CHANNEL_CHANNEL_STACK_H
#define GRPC_CORE_LIB_CHANNEL_CHANNEL_STACK_H
+//////////////////////////////////////////////////////////////////////////////
+// IMPORTANT NOTE:
+//
+// When you update this API, please make the corresponding changes to
+// the C++ API in src/cpp/common/channel_filter.{h,cc}
+//////////////////////////////////////////////////////////////////////////////
+
/* A channel filter defines how operations on a channel are implemented.
Channel filters are chained together to create full channels, and if those
chains are linear, then channel stacks provide a mechanism to minimize
@@ -146,8 +153,9 @@ typedef struct {
is_first, is_last designate this elements position in the stack, and are
useful for asserting correct configuration by upper layer code.
The filter does not need to do any chaining */
- void (*init_channel_elem)(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
- grpc_channel_element_args *args);
+ grpc_error *(*init_channel_elem)(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args);
/* Destroy per channel data.
The filter does not need to do any chaining */
void (*destroy_channel_elem)(grpc_exec_ctx *exec_ctx,
@@ -156,6 +164,10 @@ typedef struct {
/* Implement grpc_call_get_peer() */
char *(*get_peer)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem);
+ /* Implement grpc_channel_get_info() */
+ void (*get_channel_info)(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
+ const grpc_channel_info *channel_info);
+
/* The name of this filter */
const char *name;
} grpc_channel_filter;
@@ -210,12 +222,11 @@ grpc_call_element *grpc_call_stack_element(grpc_call_stack *stack, size_t i);
size_t grpc_channel_stack_size(const grpc_channel_filter **filters,
size_t filter_count);
/* Initialize a channel stack given some filters */
-void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx, int initial_refs,
- grpc_iomgr_cb_func destroy, void *destroy_arg,
- const grpc_channel_filter **filters,
- size_t filter_count, const grpc_channel_args *args,
- grpc_transport *optional_transport,
- const char *name, grpc_channel_stack *stack);
+grpc_error *grpc_channel_stack_init(
+ grpc_exec_ctx *exec_ctx, int initial_refs, grpc_iomgr_cb_func destroy,
+ void *destroy_arg, const grpc_channel_filter **filters, size_t filter_count,
+ const grpc_channel_args *args, grpc_transport *optional_transport,
+ const char *name, grpc_channel_stack *stack);
/* Destroy a channel stack */
void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx,
grpc_channel_stack *stack);
@@ -227,7 +238,8 @@ grpc_error *grpc_call_stack_init(
grpc_exec_ctx *exec_ctx, grpc_channel_stack *channel_stack,
int initial_refs, grpc_iomgr_cb_func destroy, void *destroy_arg,
grpc_call_context_element *context, const void *transport_server_data,
- grpc_mdstr *path, gpr_timespec deadline, grpc_call_stack *call_stack);
+ grpc_mdstr *path, gpr_timespec start_time, gpr_timespec deadline,
+ grpc_call_stack *call_stack);
/* Set a pollset or a pollset_set for a call stack: must occur before the first
* op is started */
void grpc_call_stack_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
@@ -273,6 +285,10 @@ void grpc_channel_next_op(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
grpc_transport_op *op);
/* Pass through a request to get_peer to the next child element */
char *grpc_call_next_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem);
+/* Pass through a request to get_channel_info() to the next child element */
+void grpc_channel_next_get_info(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ const grpc_channel_info *channel_info);
/* Given the top element of a channel stack, get the channel stack itself */
grpc_channel_stack *grpc_channel_stack_from_top_element(
@@ -289,12 +305,12 @@ void grpc_call_element_send_cancel(grpc_exec_ctx *exec_ctx,
void grpc_call_element_send_cancel_with_message(grpc_exec_ctx *exec_ctx,
grpc_call_element *cur_elem,
grpc_status_code status,
- gpr_slice *optional_message);
+ grpc_slice *optional_message);
void grpc_call_element_send_close_with_message(grpc_exec_ctx *exec_ctx,
grpc_call_element *cur_elem,
grpc_status_code status,
- gpr_slice *optional_message);
+ grpc_slice *optional_message);
extern int grpc_trace_channel;
diff --git a/src/core/lib/channel/channel_stack_builder.c b/src/core/lib/channel/channel_stack_builder.c
index eda4968f48..b959517afb 100644
--- a/src/core/lib/channel/channel_stack_builder.c
+++ b/src/core/lib/channel/channel_stack_builder.c
@@ -227,11 +227,10 @@ void grpc_channel_stack_builder_destroy(grpc_channel_stack_builder *builder) {
gpr_free(builder);
}
-void *grpc_channel_stack_builder_finish(grpc_exec_ctx *exec_ctx,
- grpc_channel_stack_builder *builder,
- size_t prefix_bytes, int initial_refs,
- grpc_iomgr_cb_func destroy,
- void *destroy_arg) {
+grpc_error *grpc_channel_stack_builder_finish(
+ grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder,
+ size_t prefix_bytes, int initial_refs, grpc_iomgr_cb_func destroy,
+ void *destroy_arg, void **result) {
// count the number of filters
size_t num_filters = 0;
for (filter_node *p = builder->begin.next; p != &builder->end; p = p->next) {
@@ -250,28 +249,35 @@ void *grpc_channel_stack_builder_finish(grpc_exec_ctx *exec_ctx,
size_t channel_stack_size = grpc_channel_stack_size(filters, num_filters);
// allocate memory, with prefix_bytes followed by channel_stack_size
- char *result = gpr_malloc(prefix_bytes + channel_stack_size);
+ *result = gpr_malloc(prefix_bytes + channel_stack_size);
// fetch a pointer to the channel stack
grpc_channel_stack *channel_stack =
- (grpc_channel_stack *)(result + prefix_bytes);
+ (grpc_channel_stack *)((char *)(*result) + prefix_bytes);
// and initialize it
- grpc_channel_stack_init(exec_ctx, initial_refs, destroy,
- destroy_arg == NULL ? result : destroy_arg, filters,
- num_filters, builder->args, builder->transport,
- builder->name, channel_stack);
-
- // run post-initialization functions
- i = 0;
- for (filter_node *p = builder->begin.next; p != &builder->end; p = p->next) {
- if (p->init != NULL) {
- p->init(channel_stack, grpc_channel_stack_element(channel_stack, i),
- p->init_arg);
+ grpc_error *error = grpc_channel_stack_init(
+ exec_ctx, initial_refs, destroy,
+ destroy_arg == NULL ? *result : destroy_arg, filters, num_filters,
+ builder->args, builder->transport, builder->name, channel_stack);
+
+ if (error != GRPC_ERROR_NONE) {
+ grpc_channel_stack_destroy(exec_ctx, channel_stack);
+ gpr_free(*result);
+ *result = NULL;
+ } else {
+ // run post-initialization functions
+ i = 0;
+ for (filter_node *p = builder->begin.next; p != &builder->end;
+ p = p->next) {
+ if (p->init != NULL) {
+ p->init(channel_stack, grpc_channel_stack_element(channel_stack, i),
+ p->init_arg);
+ }
+ i++;
}
- i++;
}
grpc_channel_stack_builder_destroy(builder);
gpr_free((grpc_channel_filter **)filters);
- return result;
+ return error;
}
diff --git a/src/core/lib/channel/channel_stack_builder.h b/src/core/lib/channel/channel_stack_builder.h
index 4a00f7bfdb..65bfebcabc 100644
--- a/src/core/lib/channel/channel_stack_builder.h
+++ b/src/core/lib/channel/channel_stack_builder.h
@@ -146,16 +146,15 @@ bool grpc_channel_stack_builder_append_filter(
void grpc_channel_stack_builder_iterator_destroy(
grpc_channel_stack_builder_iterator *iterator);
-/// Destroy the builder, return the freshly minted channel stack
+/// Destroy the builder, return the freshly minted channel stack in \a result.
/// Allocates \a prefix_bytes bytes before the channel stack
/// Returns the base pointer of the allocated block
/// \a initial_refs, \a destroy, \a destroy_arg are as per
/// grpc_channel_stack_init
-void *grpc_channel_stack_builder_finish(grpc_exec_ctx *exec_ctx,
- grpc_channel_stack_builder *builder,
- size_t prefix_bytes, int initial_refs,
- grpc_iomgr_cb_func destroy,
- void *destroy_arg);
+grpc_error *grpc_channel_stack_builder_finish(
+ grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder,
+ size_t prefix_bytes, int initial_refs, grpc_iomgr_cb_func destroy,
+ void *destroy_arg, void **result);
/// Destroy the builder without creating a channel stack
void grpc_channel_stack_builder_destroy(grpc_channel_stack_builder *builder);
diff --git a/src/core/lib/channel/compress_filter.c b/src/core/lib/channel/compress_filter.c
index 0981d59f63..0e336dc330 100644
--- a/src/core/lib/channel/compress_filter.c
+++ b/src/core/lib/channel/compress_filter.c
@@ -35,9 +35,9 @@
#include <string.h>
#include <grpc/compression.h>
+#include <grpc/slice_buffer.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/slice_buffer.h>
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/compress_filter.h"
@@ -50,7 +50,7 @@
int grpc_compression_trace = 0;
typedef struct call_data {
- gpr_slice_buffer slices; /**< Buffers up input slices to be compressed */
+ grpc_slice_buffer slices; /**< Buffers up input slices to be compressed */
grpc_linked_mdelem compression_algorithm_storage;
grpc_linked_mdelem accept_encoding_storage;
uint32_t remaining_slice_bytes;
@@ -63,7 +63,7 @@ typedef struct call_data {
grpc_transport_stream_op *send_op;
uint32_t send_length;
uint32_t send_flags;
- gpr_slice incoming_slice;
+ grpc_slice incoming_slice;
grpc_slice_buffer_stream replacement_stream;
grpc_closure *post_send;
grpc_closure send_done;
@@ -111,9 +111,13 @@ static grpc_mdelem *compression_md_filter(void *user_data, grpc_mdelem *md) {
return md;
}
-static int skip_compression(grpc_call_element *elem) {
+static int skip_compression(grpc_call_element *elem, uint32_t flags) {
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
+
+ if (flags & (GRPC_WRITE_NO_COMPRESS | GRPC_WRITE_INTERNAL_COMPRESS)) {
+ return 1;
+ }
if (calld->has_compression_algorithm) {
if (calld->compression_algorithm == GRPC_COMPRESS_NONE) {
return 1;
@@ -157,7 +161,7 @@ static void continue_send_message(grpc_exec_ctx *exec_ctx,
static void send_done(grpc_exec_ctx *exec_ctx, void *elemp, grpc_error *error) {
grpc_call_element *elem = elemp;
call_data *calld = elem->call_data;
- gpr_slice_buffer_reset_and_unref(&calld->slices);
+ grpc_slice_buffer_reset_and_unref(&calld->slices);
calld->post_send->cb(exec_ctx, calld->post_send->cb_arg, error);
}
@@ -165,8 +169,8 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
call_data *calld = elem->call_data;
int did_compress;
- gpr_slice_buffer tmp;
- gpr_slice_buffer_init(&tmp);
+ grpc_slice_buffer tmp;
+ grpc_slice_buffer_init(&tmp);
did_compress =
grpc_msg_compress(calld->compression_algorithm, &calld->slices, &tmp);
if (did_compress) {
@@ -181,7 +185,7 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx,
" bytes (%.2f%% savings)",
algo_name, before_size, after_size, 100 * savings_ratio);
}
- gpr_slice_buffer_swap(&calld->slices, &tmp);
+ grpc_slice_buffer_swap(&calld->slices, &tmp);
calld->send_flags |= GRPC_WRITE_INTERNAL_COMPRESS;
} else {
if (grpc_compression_trace) {
@@ -195,7 +199,7 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx,
}
}
- gpr_slice_buffer_destroy(&tmp);
+ grpc_slice_buffer_destroy(&tmp);
grpc_slice_buffer_stream_init(&calld->replacement_stream, &calld->slices,
calld->send_flags);
@@ -209,7 +213,7 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx,
static void got_slice(grpc_exec_ctx *exec_ctx, void *elemp, grpc_error *error) {
grpc_call_element *elem = elemp;
call_data *calld = elem->call_data;
- gpr_slice_buffer_add(&calld->slices, calld->incoming_slice);
+ grpc_slice_buffer_add(&calld->slices, calld->incoming_slice);
if (calld->send_length == calld->slices.length) {
finish_send_message(exec_ctx, elem);
} else {
@@ -223,7 +227,7 @@ static void continue_send_message(grpc_exec_ctx *exec_ctx,
while (grpc_byte_stream_next(exec_ctx, calld->send_op->send_message,
&calld->incoming_slice, ~(size_t)0,
&calld->got_slice)) {
- gpr_slice_buffer_add(&calld->slices, calld->incoming_slice);
+ grpc_slice_buffer_add(&calld->slices, calld->incoming_slice);
if (calld->send_length == calld->slices.length) {
finish_send_message(exec_ctx, elem);
break;
@@ -241,8 +245,8 @@ static void compress_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
if (op->send_initial_metadata) {
process_send_initial_metadata(elem, op->send_initial_metadata);
}
- if (op->send_message != NULL && !skip_compression(elem) &&
- 0 == (op->send_message->flags & GRPC_WRITE_NO_COMPRESS)) {
+ if (op->send_message != NULL &&
+ !skip_compression(elem, op->send_message->flags)) {
calld->send_op = op;
calld->send_length = op->send_message->length;
calld->send_flags = op->send_message->flags;
@@ -263,7 +267,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
call_data *calld = elem->call_data;
/* initialize members */
- gpr_slice_buffer_init(&calld->slices);
+ grpc_slice_buffer_init(&calld->slices);
calld->has_compression_algorithm = 0;
grpc_closure_init(&calld->got_slice, got_slice, elem);
grpc_closure_init(&calld->send_done, send_done, elem);
@@ -277,13 +281,13 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
void *ignored) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
- gpr_slice_buffer_destroy(&calld->slices);
+ grpc_slice_buffer_destroy(&calld->slices);
}
/* Constructor for channel_data */
-static void init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_channel_element_args *args) {
+static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
channel_data *channeld = elem->channel_data;
channeld->enabled_algorithms_bitset =
@@ -311,6 +315,7 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
}
GPR_ASSERT(!args->is_last);
+ return GRPC_ERROR_NONE;
}
/* Destructor for channel data */
@@ -328,4 +333,5 @@ const grpc_channel_filter grpc_compress_filter = {
init_channel_elem,
destroy_channel_elem,
grpc_call_next_get_peer,
+ grpc_channel_next_get_info,
"compress"};
diff --git a/src/core/lib/channel/connected_channel.c b/src/core/lib/channel/connected_channel.c
index 918379c845..c2a36b5558 100644
--- a/src/core/lib/channel/connected_channel.c
+++ b/src/core/lib/channel/connected_channel.c
@@ -38,9 +38,9 @@
#include <string.h>
#include <grpc/byte_buffer.h>
+#include <grpc/slice_buffer.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/slice_buffer.h>
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/transport/transport.h"
@@ -114,12 +114,13 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
}
/* Constructor for channel_data */
-static void init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_channel_element_args *args) {
+static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
channel_data *cd = (channel_data *)elem->channel_data;
GPR_ASSERT(args->is_last);
cd->transport = NULL;
+ return GRPC_ERROR_NONE;
}
/* Destructor for channel_data */
@@ -134,6 +135,11 @@ static char *con_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
return grpc_transport_get_peer(exec_ctx, chand->transport);
}
+/* No-op. */
+static void con_get_channel_info(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ const grpc_channel_info *channel_info) {}
+
static const grpc_channel_filter connected_channel_filter = {
con_start_transport_stream_op,
con_start_transport_op,
@@ -145,6 +151,7 @@ static const grpc_channel_filter connected_channel_filter = {
init_channel_elem,
destroy_channel_elem,
con_get_peer,
+ con_get_channel_info,
"connected",
};
diff --git a/src/core/lib/channel/context.h b/src/core/lib/channel/context.h
index 071c5f695c..6c931ad28a 100644
--- a/src/core/lib/channel/context.h
+++ b/src/core/lib/channel/context.h
@@ -47,6 +47,9 @@ typedef enum {
/// Value is a \a census_context.
GRPC_CONTEXT_TRACING,
+ /// Reserved for traffic_class_context.
+ GRPC_CONTEXT_TRAFFIC,
+
GRPC_CONTEXT_COUNT
} grpc_context_index;
diff --git a/src/core/lib/channel/deadline_filter.c b/src/core/lib/channel/deadline_filter.c
index d2ea5250f6..470ccfea57 100644
--- a/src/core/lib/channel/deadline_filter.c
+++ b/src/core/lib/channel/deadline_filter.c
@@ -55,10 +55,10 @@ static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg,
deadline_state->timer_pending = false;
gpr_mu_unlock(&deadline_state->timer_mu);
if (error != GRPC_ERROR_CANCELLED) {
- gpr_slice msg = gpr_slice_from_static_string("Deadline Exceeded");
+ grpc_slice msg = grpc_slice_from_static_string("Deadline Exceeded");
grpc_call_element_send_cancel_with_message(
exec_ctx, elem, GRPC_STATUS_DEADLINE_EXCEEDED, &msg);
- gpr_slice_unref(msg);
+ grpc_slice_unref(msg);
}
GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack, "deadline_timer");
}
@@ -207,10 +207,11 @@ void grpc_deadline_state_client_start_transport_stream_op(
//
// Constructor for channel_data. Used for both client and server filters.
-static void init_channel_elem(grpc_exec_ctx* exec_ctx,
- grpc_channel_element* elem,
- grpc_channel_element_args* args) {
+static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem,
+ grpc_channel_element_args* args) {
GPR_ASSERT(!args->is_last);
+ return GRPC_ERROR_NONE;
}
// Destructor for channel_data. Used for both client and server filters.
@@ -316,6 +317,7 @@ const grpc_channel_filter grpc_client_deadline_filter = {
init_channel_elem,
destroy_channel_elem,
grpc_call_next_get_peer,
+ grpc_channel_next_get_info,
"deadline",
};
@@ -330,5 +332,6 @@ const grpc_channel_filter grpc_server_deadline_filter = {
init_channel_elem,
destroy_channel_elem,
grpc_call_next_get_peer,
+ grpc_channel_next_get_info,
"deadline",
};
diff --git a/src/core/lib/channel/handshaker.c b/src/core/lib/channel/handshaker.c
index 0d759887bc..23edc826ca 100644
--- a/src/core/lib/channel/handshaker.c
+++ b/src/core/lib/channel/handshaker.c
@@ -38,12 +38,13 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/handshaker.h"
+#include "src/core/lib/iomgr/timer.h"
//
// grpc_handshaker
//
-void grpc_handshaker_init(const struct grpc_handshaker_vtable* vtable,
+void grpc_handshaker_init(const grpc_handshaker_vtable* vtable,
grpc_handshaker* handshaker) {
handshaker->vtable = vtable;
}
@@ -60,45 +61,43 @@ void grpc_handshaker_shutdown(grpc_exec_ctx* exec_ctx,
void grpc_handshaker_do_handshake(grpc_exec_ctx* exec_ctx,
grpc_handshaker* handshaker,
- grpc_endpoint* endpoint,
- grpc_channel_args* args,
- gpr_slice_buffer* read_buffer,
- gpr_timespec deadline,
grpc_tcp_server_acceptor* acceptor,
- grpc_handshaker_done_cb cb, void* user_data) {
- handshaker->vtable->do_handshake(exec_ctx, handshaker, endpoint, args,
- read_buffer, deadline, acceptor, cb,
- user_data);
+ grpc_closure* on_handshake_done,
+ grpc_handshaker_args* args) {
+ handshaker->vtable->do_handshake(exec_ctx, handshaker, acceptor,
+ on_handshake_done, args);
}
//
// grpc_handshake_manager
//
-// State used while chaining handshakers.
-struct grpc_handshaker_state {
- // The index of the handshaker to invoke next.
- size_t index;
- // The deadline for all handshakers.
- gpr_timespec deadline;
- // The acceptor to call the handshakers with.
- grpc_tcp_server_acceptor* acceptor;
- // The final callback and user_data to invoke after the last handshaker.
- grpc_handshaker_done_cb final_cb;
- void* final_user_data;
-};
-
struct grpc_handshake_manager {
+ gpr_mu mu;
+ gpr_refcount refs;
+ bool shutdown;
// An array of handshakers added via grpc_handshake_manager_add().
size_t count;
grpc_handshaker** handshakers;
- // State used while chaining handshakers.
- struct grpc_handshaker_state* state;
+ // The index of the handshaker to invoke next and closure to invoke it.
+ size_t index;
+ grpc_closure call_next_handshaker;
+ // The acceptor to call the handshakers with.
+ grpc_tcp_server_acceptor* acceptor;
+ // Deadline timer across all handshakers.
+ grpc_timer deadline_timer;
+ // The final callback and user_data to invoke after the last handshaker.
+ grpc_closure on_handshake_done;
+ void* user_data;
+ // Handshaker args.
+ grpc_handshaker_args args;
};
grpc_handshake_manager* grpc_handshake_manager_create() {
grpc_handshake_manager* mgr = gpr_malloc(sizeof(grpc_handshake_manager));
memset(mgr, 0, sizeof(*mgr));
+ gpr_mu_init(&mgr->mu);
+ gpr_ref_init(&mgr->refs, 1);
return mgr;
}
@@ -106,6 +105,7 @@ static bool is_power_of_2(size_t n) { return (n & (n - 1)) == 0; }
void grpc_handshake_manager_add(grpc_handshake_manager* mgr,
grpc_handshaker* handshaker) {
+ gpr_mu_lock(&mgr->mu);
// To avoid allocating memory for each handshaker we add, we double
// the number of elements every time we need more.
size_t realloc_count = 0;
@@ -119,85 +119,117 @@ void grpc_handshake_manager_add(grpc_handshake_manager* mgr,
gpr_realloc(mgr->handshakers, realloc_count * sizeof(grpc_handshaker*));
}
mgr->handshakers[mgr->count++] = handshaker;
+ gpr_mu_unlock(&mgr->mu);
+}
+
+static void grpc_handshake_manager_unref(grpc_exec_ctx* exec_ctx,
+ grpc_handshake_manager* mgr) {
+ if (gpr_unref(&mgr->refs)) {
+ for (size_t i = 0; i < mgr->count; ++i) {
+ grpc_handshaker_destroy(exec_ctx, mgr->handshakers[i]);
+ }
+ gpr_free(mgr->handshakers);
+ gpr_mu_destroy(&mgr->mu);
+ gpr_free(mgr);
+ }
}
void grpc_handshake_manager_destroy(grpc_exec_ctx* exec_ctx,
grpc_handshake_manager* mgr) {
- for (size_t i = 0; i < mgr->count; ++i) {
- grpc_handshaker_destroy(exec_ctx, mgr->handshakers[i]);
- }
- gpr_free(mgr->handshakers);
- gpr_free(mgr);
+ grpc_handshake_manager_unref(exec_ctx, mgr);
}
void grpc_handshake_manager_shutdown(grpc_exec_ctx* exec_ctx,
grpc_handshake_manager* mgr) {
- for (size_t i = 0; i < mgr->count; ++i) {
- grpc_handshaker_shutdown(exec_ctx, mgr->handshakers[i]);
+ gpr_mu_lock(&mgr->mu);
+ // Shutdown the handshaker that's currently in progress, if any.
+ if (!mgr->shutdown && mgr->index > 0) {
+ mgr->shutdown = true;
+ grpc_handshaker_shutdown(exec_ctx, mgr->handshakers[mgr->index - 1]);
}
- if (mgr->state != NULL) {
- gpr_free(mgr->state);
- mgr->state = NULL;
+ gpr_mu_unlock(&mgr->mu);
+}
+
+// Helper function to call either the next handshaker or the
+// on_handshake_done callback.
+// Returns true if we've scheduled the on_handshake_done callback.
+static bool call_next_handshaker_locked(grpc_exec_ctx* exec_ctx,
+ grpc_handshake_manager* mgr,
+ grpc_error* error) {
+ GPR_ASSERT(mgr->index <= mgr->count);
+ // If we got an error or we've been shut down or we're exiting early or
+ // we've finished the last handshaker, invoke the on_handshake_done
+ // callback. Otherwise, call the next handshaker.
+ if (error != GRPC_ERROR_NONE || mgr->shutdown || mgr->args.exit_early ||
+ mgr->index == mgr->count) {
+ // Cancel deadline timer, since we're invoking the on_handshake_done
+ // callback now.
+ grpc_timer_cancel(exec_ctx, &mgr->deadline_timer);
+ grpc_exec_ctx_sched(exec_ctx, &mgr->on_handshake_done, error, NULL);
+ mgr->shutdown = true;
+ } else {
+ grpc_handshaker_do_handshake(exec_ctx, mgr->handshakers[mgr->index],
+ mgr->acceptor, &mgr->call_next_handshaker,
+ &mgr->args);
}
+ ++mgr->index;
+ return mgr->shutdown;
}
// A function used as the handshaker-done callback when chaining
// handshakers together.
-static void call_next_handshaker(grpc_exec_ctx* exec_ctx,
- grpc_endpoint* endpoint,
- grpc_channel_args* args,
- gpr_slice_buffer* read_buffer, void* user_data,
+static void call_next_handshaker(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
- grpc_handshake_manager* mgr = user_data;
- GPR_ASSERT(mgr->state != NULL);
- GPR_ASSERT(mgr->state->index < mgr->count);
- // If we got an error, skip all remaining handshakers and invoke the
- // caller-supplied callback immediately.
- if (error != GRPC_ERROR_NONE) {
- mgr->state->final_cb(exec_ctx, endpoint, args, read_buffer,
- mgr->state->final_user_data, error);
- return;
+ grpc_handshake_manager* mgr = arg;
+ gpr_mu_lock(&mgr->mu);
+ bool done = call_next_handshaker_locked(exec_ctx, mgr, GRPC_ERROR_REF(error));
+ gpr_mu_unlock(&mgr->mu);
+ // If we're invoked the final callback, we won't be coming back
+ // to this function, so we can release our reference to the
+ // handshake manager.
+ if (done) {
+ grpc_handshake_manager_unref(exec_ctx, mgr);
}
- grpc_handshaker_done_cb cb = call_next_handshaker;
- // If this is the last handshaker, use the caller-supplied callback
- // and user_data instead of chaining back to this function again.
- if (mgr->state->index == mgr->count - 1) {
- cb = mgr->state->final_cb;
- user_data = mgr->state->final_user_data;
- }
- // Invoke handshaker.
- grpc_handshaker_do_handshake(
- exec_ctx, mgr->handshakers[mgr->state->index], endpoint, args,
- read_buffer, mgr->state->deadline, mgr->state->acceptor, cb, user_data);
- ++mgr->state->index;
- // If this is the last handshaker, clean up state.
- if (mgr->state->index == mgr->count) {
- gpr_free(mgr->state);
- mgr->state = NULL;
+}
+
+// Callback invoked when deadline is exceeded.
+static void on_timeout(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+ grpc_handshake_manager* mgr = arg;
+ if (error == GRPC_ERROR_NONE) { // Timer fired, rather than being cancelled.
+ grpc_handshake_manager_shutdown(exec_ctx, mgr);
}
+ grpc_handshake_manager_unref(exec_ctx, mgr);
}
void grpc_handshake_manager_do_handshake(
grpc_exec_ctx* exec_ctx, grpc_handshake_manager* mgr,
- grpc_endpoint* endpoint, const grpc_channel_args* args,
+ grpc_endpoint* endpoint, const grpc_channel_args* channel_args,
gpr_timespec deadline, grpc_tcp_server_acceptor* acceptor,
- grpc_handshaker_done_cb cb, void* user_data) {
- grpc_channel_args* args_copy = grpc_channel_args_copy(args);
- gpr_slice_buffer* read_buffer = gpr_malloc(sizeof(*read_buffer));
- gpr_slice_buffer_init(read_buffer);
- if (mgr->count == 0) {
- // No handshakers registered, so we just immediately call the done
- // callback with the passed-in endpoint.
- cb(exec_ctx, endpoint, args_copy, read_buffer, user_data, GRPC_ERROR_NONE);
- } else {
- GPR_ASSERT(mgr->state == NULL);
- mgr->state = gpr_malloc(sizeof(struct grpc_handshaker_state));
- memset(mgr->state, 0, sizeof(*mgr->state));
- mgr->state->deadline = deadline;
- mgr->state->acceptor = acceptor;
- mgr->state->final_cb = cb;
- mgr->state->final_user_data = user_data;
- call_next_handshaker(exec_ctx, endpoint, args_copy, read_buffer, mgr,
- GRPC_ERROR_NONE);
+ grpc_iomgr_cb_func on_handshake_done, void* user_data) {
+ gpr_mu_lock(&mgr->mu);
+ GPR_ASSERT(mgr->index == 0);
+ GPR_ASSERT(!mgr->shutdown);
+ // Construct handshaker args. These will be passed through all
+ // handshakers and eventually be freed by the on_handshake_done callback.
+ mgr->args.endpoint = endpoint;
+ mgr->args.args = grpc_channel_args_copy(channel_args);
+ mgr->args.user_data = user_data;
+ mgr->args.read_buffer = gpr_malloc(sizeof(*mgr->args.read_buffer));
+ grpc_slice_buffer_init(mgr->args.read_buffer);
+ // Initialize state needed for calling handshakers.
+ mgr->acceptor = acceptor;
+ grpc_closure_init(&mgr->call_next_handshaker, call_next_handshaker, mgr);
+ grpc_closure_init(&mgr->on_handshake_done, on_handshake_done, &mgr->args);
+ // Start deadline timer, which owns a ref.
+ gpr_ref(&mgr->refs);
+ grpc_timer_init(exec_ctx, &mgr->deadline_timer,
+ gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
+ on_timeout, mgr, gpr_now(GPR_CLOCK_MONOTONIC));
+ // Start first handshaker, which also owns a ref.
+ gpr_ref(&mgr->refs);
+ bool done = call_next_handshaker_locked(exec_ctx, mgr, GRPC_ERROR_NONE);
+ gpr_mu_unlock(&mgr->mu);
+ if (done) {
+ grpc_handshake_manager_unref(exec_ctx, mgr);
}
}
diff --git a/src/core/lib/channel/handshaker.h b/src/core/lib/channel/handshaker.h
index d574b46242..450b7adaee 100644
--- a/src/core/lib/channel/handshaker.h
+++ b/src/core/lib/channel/handshaker.h
@@ -54,15 +54,33 @@
typedef struct grpc_handshaker grpc_handshaker;
-/// Callback type invoked when a handshaker is done.
-/// Takes ownership of \a args and \a read_buffer.
-typedef void (*grpc_handshaker_done_cb)(grpc_exec_ctx* exec_ctx,
- grpc_endpoint* endpoint,
- grpc_channel_args* args,
- gpr_slice_buffer* read_buffer,
- void* user_data, grpc_error* error);
-
-struct grpc_handshaker_vtable {
+/// Arguments passed through handshakers and to the on_handshake_done callback.
+///
+/// For handshakers, all members are input/output parameters; for
+/// example, a handshaker may read from or write to \a endpoint and
+/// then later replace it with a wrapped endpoint. Similarly, a
+/// handshaker may modify \a args.
+///
+/// A handshaker takes ownership of the members while a handshake is in
+/// progress. Upon failure or shutdown of an in-progress handshaker,
+/// the handshaker is responsible for destroying the members and setting
+/// them to NULL before invoking the on_handshake_done callback.
+///
+/// For the on_handshake_done callback, all members are input arguments,
+/// which the callback takes ownership of.
+typedef struct {
+ grpc_endpoint* endpoint;
+ grpc_channel_args* args;
+ grpc_slice_buffer* read_buffer;
+ // A handshaker may set this to true before invoking on_handshake_done
+ // to indicate that subsequent handshakers should be skipped.
+ bool exit_early;
+ // User data passed through the handshake manager. Not used by
+ // individual handshakers.
+ void* user_data;
+} grpc_handshaker_args;
+
+typedef struct {
/// Destroys the handshaker.
void (*destroy)(grpc_exec_ctx* exec_ctx, grpc_handshaker* handshaker);
@@ -70,43 +88,35 @@ struct grpc_handshaker_vtable {
/// aborted in the middle).
void (*shutdown)(grpc_exec_ctx* exec_ctx, grpc_handshaker* handshaker);
- /// Performs handshaking. When finished, calls \a cb with \a user_data.
- /// Takes ownership of \a args.
- /// Takes ownership of \a read_buffer, which contains leftover bytes read
- /// from the endpoint by the previous handshaker.
+ /// Performs handshaking, modifying \a args as needed (e.g., to
+ /// replace \a endpoint with a wrapped endpoint).
+ /// When finished, invokes \a on_handshake_done.
/// \a acceptor will be NULL for client-side handshakers.
void (*do_handshake)(grpc_exec_ctx* exec_ctx, grpc_handshaker* handshaker,
- grpc_endpoint* endpoint, grpc_channel_args* args,
- gpr_slice_buffer* read_buffer, gpr_timespec deadline,
grpc_tcp_server_acceptor* acceptor,
- grpc_handshaker_done_cb cb, void* user_data);
-};
+ grpc_closure* on_handshake_done,
+ grpc_handshaker_args* args);
+} grpc_handshaker_vtable;
/// Base struct. To subclass, make this the first member of the
/// implementation struct.
struct grpc_handshaker {
- const struct grpc_handshaker_vtable* vtable;
+ const grpc_handshaker_vtable* vtable;
};
/// Called by concrete implementations to initialize the base struct.
-void grpc_handshaker_init(const struct grpc_handshaker_vtable* vtable,
+void grpc_handshaker_init(const grpc_handshaker_vtable* vtable,
grpc_handshaker* handshaker);
-/// Convenient wrappers for invoking methods via the vtable.
-/// These probably do not need to be called from anywhere but
-/// grpc_handshake_manager.
void grpc_handshaker_destroy(grpc_exec_ctx* exec_ctx,
grpc_handshaker* handshaker);
void grpc_handshaker_shutdown(grpc_exec_ctx* exec_ctx,
grpc_handshaker* handshaker);
void grpc_handshaker_do_handshake(grpc_exec_ctx* exec_ctx,
grpc_handshaker* handshaker,
- grpc_endpoint* endpoint,
- grpc_channel_args* args,
- gpr_slice_buffer* read_buffer,
- gpr_timespec deadline,
grpc_tcp_server_acceptor* acceptor,
- grpc_handshaker_done_cb cb, void* user_data);
+ grpc_closure* on_handshake_done,
+ grpc_handshaker_args* args);
///
/// grpc_handshake_manager
@@ -134,15 +144,21 @@ void grpc_handshake_manager_shutdown(grpc_exec_ctx* exec_ctx,
grpc_handshake_manager* mgr);
/// Invokes handshakers in the order they were added.
-/// Does NOT take ownership of \a args. Instead, makes a copy before
+/// Takes ownership of \a endpoint, and then passes that ownership to
+/// the \a on_handshake_done callback.
+/// Does NOT take ownership of \a channel_args. Instead, makes a copy before
/// invoking the first handshaker.
/// \a acceptor will be NULL for client-side handshakers.
-/// Invokes \a cb with \a user_data after either a handshaker fails or
-/// all handshakers have completed successfully.
+///
+/// When done, invokes \a on_handshake_done with a grpc_handshaker_args
+/// object as its argument. If the callback is invoked with error !=
+/// GRPC_ERROR_NONE, then handshaking failed and the handshaker has done
+/// the necessary clean-up. Otherwise, the callback takes ownership of
+/// the arguments.
void grpc_handshake_manager_do_handshake(
grpc_exec_ctx* exec_ctx, grpc_handshake_manager* mgr,
- grpc_endpoint* endpoint, const grpc_channel_args* args,
+ grpc_endpoint* endpoint, const grpc_channel_args* channel_args,
gpr_timespec deadline, grpc_tcp_server_acceptor* acceptor,
- grpc_handshaker_done_cb cb, void* user_data);
+ grpc_iomgr_cb_func on_handshake_done, void* user_data);
#endif /* GRPC_CORE_LIB_CHANNEL_HANDSHAKER_H */
diff --git a/src/core/lib/channel/http_client_filter.c b/src/core/lib/channel/http_client_filter.c
index 1dc05fb20d..1a2d08dda5 100644
--- a/src/core/lib/channel/http_client_filter.c
+++ b/src/core/lib/channel/http_client_filter.c
@@ -36,6 +36,7 @@
#include <grpc/support/string_util.h>
#include <string.h>
#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/slice/percent_encoding.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/transport/static_metadata.h"
#include "src/core/lib/transport/transport_impl.h"
@@ -56,27 +57,30 @@ typedef struct call_data {
grpc_linked_mdelem payload_bin;
grpc_metadata_batch *recv_initial_metadata;
+ grpc_metadata_batch *recv_trailing_metadata;
uint8_t *payload_bytes;
/* Vars to read data off of send_message */
grpc_transport_stream_op send_op;
uint32_t send_length;
uint32_t send_flags;
- gpr_slice incoming_slice;
+ grpc_slice incoming_slice;
grpc_slice_buffer_stream replacement_stream;
- gpr_slice_buffer slices;
+ grpc_slice_buffer slices;
/* flag that indicates that all slices of send_messages aren't availble */
bool send_message_blocked;
/** Closure to call when finished with the hc_on_recv hook */
- grpc_closure *on_done_recv;
+ grpc_closure *on_done_recv_initial_metadata;
+ grpc_closure *on_done_recv_trailing_metadata;
grpc_closure *on_complete;
grpc_closure *post_send;
/** Receive closures are chained: we inject this closure as the on_done_recv
up-call on transport_op, and remember to call our on_done_recv member
after handling it. */
- grpc_closure hc_on_recv;
+ grpc_closure hc_on_recv_initial_metadata;
+ grpc_closure hc_on_recv_trailing_metadata;
grpc_closure hc_on_complete;
grpc_closure got_slice;
grpc_closure send_done;
@@ -101,11 +105,21 @@ static grpc_mdelem *client_recv_filter(void *user_data, grpc_mdelem *md) {
char *message_string;
gpr_asprintf(&message_string, "Received http2 header with status: %s",
grpc_mdstr_as_c_string(md->value));
- gpr_slice message = gpr_slice_from_copied_string(message_string);
+ grpc_slice message = grpc_slice_from_copied_string(message_string);
gpr_free(message_string);
grpc_call_element_send_close_with_message(a->exec_ctx, a->elem,
GRPC_STATUS_CANCELLED, &message);
return NULL;
+ } else if (md->key == GRPC_MDSTR_GRPC_MESSAGE) {
+ grpc_slice pct_decoded_msg =
+ grpc_permissive_percent_decode_slice(md->value->slice);
+ if (grpc_slice_is_equivalent(pct_decoded_msg, md->value->slice)) {
+ grpc_slice_unref(pct_decoded_msg);
+ return md;
+ } else {
+ return grpc_mdelem_from_metadata_strings(
+ GRPC_MDSTR_GRPC_MESSAGE, grpc_mdstr_from_slice(pct_decoded_msg));
+ }
} else if (md == GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC) {
return NULL;
} else if (md->key == GRPC_MDSTR_CONTENT_TYPE) {
@@ -129,8 +143,8 @@ static grpc_mdelem *client_recv_filter(void *user_data, grpc_mdelem *md) {
return md;
}
-static void hc_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
- grpc_error *error) {
+static void hc_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx,
+ void *user_data, grpc_error *error) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
client_recv_filter_args a;
@@ -138,7 +152,21 @@ static void hc_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
a.exec_ctx = exec_ctx;
grpc_metadata_batch_filter(calld->recv_initial_metadata, client_recv_filter,
&a);
- calld->on_done_recv->cb(exec_ctx, calld->on_done_recv->cb_arg, error);
+ grpc_closure_run(exec_ctx, calld->on_done_recv_initial_metadata,
+ GRPC_ERROR_REF(error));
+}
+
+static void hc_on_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
+ void *user_data, grpc_error *error) {
+ grpc_call_element *elem = user_data;
+ call_data *calld = elem->call_data;
+ client_recv_filter_args a;
+ a.elem = elem;
+ a.exec_ctx = exec_ctx;
+ grpc_metadata_batch_filter(calld->recv_trailing_metadata, client_recv_filter,
+ &a);
+ grpc_closure_run(exec_ctx, calld->on_done_recv_trailing_metadata,
+ GRPC_ERROR_REF(error));
}
static void hc_on_complete(grpc_exec_ctx *exec_ctx, void *user_data,
@@ -155,7 +183,7 @@ static void hc_on_complete(grpc_exec_ctx *exec_ctx, void *user_data,
static void send_done(grpc_exec_ctx *exec_ctx, void *elemp, grpc_error *error) {
grpc_call_element *elem = elemp;
call_data *calld = elem->call_data;
- gpr_slice_buffer_reset_and_unref(&calld->slices);
+ grpc_slice_buffer_reset_and_unref(&calld->slices);
calld->post_send->cb(exec_ctx, calld->post_send->cb_arg, error);
}
@@ -176,10 +204,10 @@ static void continue_send_message(grpc_exec_ctx *exec_ctx,
while (grpc_byte_stream_next(exec_ctx, calld->send_op.send_message,
&calld->incoming_slice, ~(size_t)0,
&calld->got_slice)) {
- memcpy(wrptr, GPR_SLICE_START_PTR(calld->incoming_slice),
- GPR_SLICE_LENGTH(calld->incoming_slice));
- wrptr += GPR_SLICE_LENGTH(calld->incoming_slice);
- gpr_slice_buffer_add(&calld->slices, calld->incoming_slice);
+ memcpy(wrptr, GRPC_SLICE_START_PTR(calld->incoming_slice),
+ GRPC_SLICE_LENGTH(calld->incoming_slice));
+ wrptr += GRPC_SLICE_LENGTH(calld->incoming_slice);
+ grpc_slice_buffer_add(&calld->slices, calld->incoming_slice);
if (calld->send_length == calld->slices.length) {
calld->send_message_blocked = false;
break;
@@ -191,7 +219,7 @@ static void got_slice(grpc_exec_ctx *exec_ctx, void *elemp, grpc_error *error) {
grpc_call_element *elem = elemp;
call_data *calld = elem->call_data;
calld->send_message_blocked = false;
- gpr_slice_buffer_add(&calld->slices, calld->incoming_slice);
+ grpc_slice_buffer_add(&calld->slices, calld->incoming_slice);
if (calld->send_length == calld->slices.length) {
/* Pass down the original send_message op that was blocked.*/
grpc_slice_buffer_stream_init(&calld->replacement_stream, &calld->slices,
@@ -217,12 +245,15 @@ static void hc_mutate_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
message, and the payload is below the size threshold, and all the data
for this request is immediately available. */
grpc_mdelem *method = GRPC_MDELEM_METHOD_POST;
- calld->send_message_blocked = false;
if ((op->send_initial_metadata_flags &
GRPC_INITIAL_METADATA_CACHEABLE_REQUEST) &&
op->send_message != NULL &&
op->send_message->length < channeld->max_payload_size_for_get) {
method = GRPC_MDELEM_METHOD_GET;
+ /* The following write to calld->send_message_blocked isn't racy with
+ reads in hc_start_transport_op (which deals with SEND_MESSAGE ops) because
+ being here means ops->send_message is not NULL, which is primarily
+ guarding the read there. */
calld->send_message_blocked = true;
} else if (op->send_initial_metadata_flags &
GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST) {
@@ -281,8 +312,15 @@ static void hc_mutate_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
if (op->recv_initial_metadata != NULL) {
/* substitute our callback for the higher callback */
calld->recv_initial_metadata = op->recv_initial_metadata;
- calld->on_done_recv = op->recv_initial_metadata_ready;
- op->recv_initial_metadata_ready = &calld->hc_on_recv;
+ calld->on_done_recv_initial_metadata = op->recv_initial_metadata_ready;
+ op->recv_initial_metadata_ready = &calld->hc_on_recv_initial_metadata;
+ }
+
+ if (op->recv_trailing_metadata != NULL) {
+ /* substitute our callback for the higher callback */
+ calld->recv_trailing_metadata = op->recv_trailing_metadata;
+ calld->on_done_recv_trailing_metadata = op->on_complete;
+ op->on_complete = &calld->hc_on_recv_trailing_metadata;
}
}
@@ -296,8 +334,7 @@ static void hc_start_transport_op(grpc_exec_ctx *exec_ctx,
call_data *calld = elem->call_data;
if (op->send_message != NULL && calld->send_message_blocked) {
/* Don't forward the op. send_message contains slices that aren't ready
- yet. The call will be forwarded by the op_complete of slice read call.
- */
+ yet. The call will be forwarded by the op_complete of slice read call. */
} else {
grpc_call_next_op(exec_ctx, elem, op);
}
@@ -308,11 +345,16 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_call_element_args *args) {
call_data *calld = elem->call_data;
- calld->on_done_recv = NULL;
+ calld->on_done_recv_initial_metadata = NULL;
+ calld->on_done_recv_trailing_metadata = NULL;
calld->on_complete = NULL;
calld->payload_bytes = NULL;
- gpr_slice_buffer_init(&calld->slices);
- grpc_closure_init(&calld->hc_on_recv, hc_on_recv, elem);
+ calld->send_message_blocked = false;
+ grpc_slice_buffer_init(&calld->slices);
+ grpc_closure_init(&calld->hc_on_recv_initial_metadata,
+ hc_on_recv_initial_metadata, elem);
+ grpc_closure_init(&calld->hc_on_recv_trailing_metadata,
+ hc_on_recv_trailing_metadata, elem);
grpc_closure_init(&calld->hc_on_complete, hc_on_complete, elem);
grpc_closure_init(&calld->got_slice, got_slice, elem);
grpc_closure_init(&calld->send_done, send_done, elem);
@@ -324,7 +366,7 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
void *ignored) {
call_data *calld = elem->call_data;
- gpr_slice_buffer_destroy(&calld->slices);
+ grpc_slice_buffer_destroy(&calld->slices);
}
static grpc_mdelem *scheme_from_args(const grpc_channel_args *args) {
@@ -415,9 +457,9 @@ static grpc_mdstr *user_agent_from_args(const grpc_channel_args *args,
}
/* Constructor for channel_data */
-static void init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_channel_element_args *args) {
+static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
channel_data *chand = elem->channel_data;
GPR_ASSERT(!args->is_last);
GPR_ASSERT(args->optional_transport != NULL);
@@ -428,6 +470,7 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
GRPC_MDSTR_USER_AGENT,
user_agent_from_args(args->channel_args,
args->optional_transport->vtable->name));
+ return GRPC_ERROR_NONE;
}
/* Destructor for channel data */
@@ -448,4 +491,5 @@ const grpc_channel_filter grpc_http_client_filter = {
init_channel_elem,
destroy_channel_elem,
grpc_call_next_get_peer,
+ grpc_channel_next_get_info,
"http-client"};
diff --git a/src/core/lib/channel/http_server_filter.c b/src/core/lib/channel/http_server_filter.c
index f2221fb0fb..a5134ee21b 100644
--- a/src/core/lib/channel/http_server_filter.c
+++ b/src/core/lib/channel/http_server_filter.c
@@ -37,6 +37,7 @@
#include <grpc/support/log.h>
#include <string.h>
#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/slice/percent_encoding.h"
#include "src/core/lib/transport/static_metadata.h"
#define EXPECTED_CONTENT_TYPE "application/grpc"
@@ -68,7 +69,7 @@ typedef struct call_data {
grpc_closure *recv_message_ready;
grpc_closure *on_complete;
grpc_byte_stream **pp_recv_message;
- gpr_slice_buffer read_slice_buffer;
+ grpc_slice_buffer read_slice_buffer;
grpc_slice_buffer_stream read_stream;
/** Receive closures are chained: we inject this closure as the on_done_recv
@@ -86,6 +87,23 @@ typedef struct {
grpc_exec_ctx *exec_ctx;
} server_filter_args;
+static grpc_mdelem *server_filter_outgoing_metadata(void *user_data,
+ grpc_mdelem *md) {
+ if (md->key == GRPC_MDSTR_GRPC_MESSAGE) {
+ grpc_slice pct_encoded_msg = grpc_percent_encode_slice(
+ md->value->slice, grpc_compatible_percent_encoding_unreserved_bytes);
+ if (grpc_slice_is_equivalent(pct_encoded_msg, md->value->slice)) {
+ grpc_slice_unref(pct_encoded_msg);
+ return md;
+ } else {
+ return grpc_mdelem_from_metadata_strings(
+ GRPC_MDSTR_GRPC_MESSAGE, grpc_mdstr_from_slice(pct_encoded_msg));
+ }
+ } else {
+ return md;
+ }
+}
+
static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
server_filter_args *a = user_data;
grpc_call_element *elem = a->elem;
@@ -162,9 +180,8 @@ static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
/* Retrieve the payload from the value of the 'grpc-internal-payload-bin'
header field */
calld->seen_payload_bin = 1;
- gpr_slice_buffer_init(&calld->read_slice_buffer);
- gpr_slice_buffer_add(&calld->read_slice_buffer,
- gpr_slice_ref(md->value->slice));
+ grpc_slice_buffer_add(&calld->read_slice_buffer,
+ grpc_slice_ref(md->value->slice));
grpc_slice_buffer_stream_init(&calld->read_stream,
&calld->read_slice_buffer, 0);
return NULL;
@@ -255,7 +272,7 @@ static void hs_recv_message_ready(grpc_exec_ctx *exec_ctx, void *user_data,
}
}
-static void hs_mutate_op(grpc_call_element *elem,
+static void hs_mutate_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op *op) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
@@ -291,6 +308,12 @@ static void hs_mutate_op(grpc_call_element *elem,
op->on_complete = &calld->hs_on_complete;
}
}
+
+ if (op->send_trailing_metadata) {
+ server_filter_args a = {elem, exec_ctx};
+ grpc_metadata_batch_filter(op->send_trailing_metadata,
+ server_filter_outgoing_metadata, &a);
+ }
}
static void hs_start_transport_op(grpc_exec_ctx *exec_ctx,
@@ -298,7 +321,7 @@ static void hs_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_transport_stream_op *op) {
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
GPR_TIMER_BEGIN("hs_start_transport_op", 0);
- hs_mutate_op(elem, op);
+ hs_mutate_op(exec_ctx, elem, op);
grpc_call_next_op(exec_ctx, elem, op);
GPR_TIMER_END("hs_start_transport_op", 0);
}
@@ -314,19 +337,24 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_closure_init(&calld->hs_on_recv, hs_on_recv, elem);
grpc_closure_init(&calld->hs_on_complete, hs_on_complete, elem);
grpc_closure_init(&calld->hs_recv_message_ready, hs_recv_message_ready, elem);
+ grpc_slice_buffer_init(&calld->read_slice_buffer);
return GRPC_ERROR_NONE;
}
/* Destructor for call_data */
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
- void *ignored) {}
+ void *ignored) {
+ call_data *calld = elem->call_data;
+ grpc_slice_buffer_destroy(&calld->read_slice_buffer);
+}
/* Constructor for channel_data */
-static void init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_channel_element_args *args) {
+static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
GPR_ASSERT(!args->is_last);
+ return GRPC_ERROR_NONE;
}
/* Destructor for channel data */
@@ -344,4 +372,5 @@ const grpc_channel_filter grpc_http_server_filter = {
init_channel_elem,
destroy_channel_elem,
grpc_call_next_get_peer,
+ grpc_channel_next_get_info,
"http-server"};
diff --git a/src/core/lib/compression/message_compress.c b/src/core/lib/compression/message_compress.c
index cbe0b5a285..6c245acf61 100644
--- a/src/core/lib/compression/message_compress.c
+++ b/src/core/lib/compression/message_compress.c
@@ -42,31 +42,31 @@
#define OUTPUT_BLOCK_SIZE 1024
-static int zlib_body(z_stream* zs, gpr_slice_buffer* input,
- gpr_slice_buffer* output,
+static int zlib_body(z_stream* zs, grpc_slice_buffer* input,
+ grpc_slice_buffer* output,
int (*flate)(z_stream* zs, int flush)) {
int r;
int flush;
size_t i;
- gpr_slice outbuf = gpr_slice_malloc(OUTPUT_BLOCK_SIZE);
+ grpc_slice outbuf = grpc_slice_malloc(OUTPUT_BLOCK_SIZE);
const uInt uint_max = ~(uInt)0;
- GPR_ASSERT(GPR_SLICE_LENGTH(outbuf) <= uint_max);
- zs->avail_out = (uInt)GPR_SLICE_LENGTH(outbuf);
- zs->next_out = GPR_SLICE_START_PTR(outbuf);
+ GPR_ASSERT(GRPC_SLICE_LENGTH(outbuf) <= uint_max);
+ zs->avail_out = (uInt)GRPC_SLICE_LENGTH(outbuf);
+ zs->next_out = GRPC_SLICE_START_PTR(outbuf);
flush = Z_NO_FLUSH;
for (i = 0; i < input->count; i++) {
if (i == input->count - 1) flush = Z_FINISH;
- GPR_ASSERT(GPR_SLICE_LENGTH(input->slices[i]) <= uint_max);
- zs->avail_in = (uInt)GPR_SLICE_LENGTH(input->slices[i]);
- zs->next_in = GPR_SLICE_START_PTR(input->slices[i]);
+ GPR_ASSERT(GRPC_SLICE_LENGTH(input->slices[i]) <= uint_max);
+ zs->avail_in = (uInt)GRPC_SLICE_LENGTH(input->slices[i]);
+ zs->next_in = GRPC_SLICE_START_PTR(input->slices[i]);
do {
if (zs->avail_out == 0) {
- gpr_slice_buffer_add_indexed(output, outbuf);
- outbuf = gpr_slice_malloc(OUTPUT_BLOCK_SIZE);
- GPR_ASSERT(GPR_SLICE_LENGTH(outbuf) <= uint_max);
- zs->avail_out = (uInt)GPR_SLICE_LENGTH(outbuf);
- zs->next_out = GPR_SLICE_START_PTR(outbuf);
+ grpc_slice_buffer_add_indexed(output, outbuf);
+ outbuf = grpc_slice_malloc(OUTPUT_BLOCK_SIZE);
+ GPR_ASSERT(GRPC_SLICE_LENGTH(outbuf) <= uint_max);
+ zs->avail_out = (uInt)GRPC_SLICE_LENGTH(outbuf);
+ zs->next_out = GRPC_SLICE_START_PTR(outbuf);
}
r = flate(zs, flush);
if (r < 0 && r != Z_BUF_ERROR /* not fatal */) {
@@ -82,12 +82,12 @@ static int zlib_body(z_stream* zs, gpr_slice_buffer* input,
GPR_ASSERT(outbuf.refcount);
outbuf.data.refcounted.length -= zs->avail_out;
- gpr_slice_buffer_add_indexed(output, outbuf);
+ grpc_slice_buffer_add_indexed(output, outbuf);
return 1;
error:
- gpr_slice_unref(outbuf);
+ grpc_slice_unref(outbuf);
return 0;
}
@@ -97,7 +97,7 @@ static void* zalloc_gpr(void* opaque, unsigned int items, unsigned int size) {
static void zfree_gpr(void* opaque, void* address) { gpr_free(address); }
-static int zlib_compress(gpr_slice_buffer* input, gpr_slice_buffer* output,
+static int zlib_compress(grpc_slice_buffer* input, grpc_slice_buffer* output,
int gzip) {
z_stream zs;
int r;
@@ -113,7 +113,7 @@ static int zlib_compress(gpr_slice_buffer* input, gpr_slice_buffer* output,
r = zlib_body(&zs, input, output, deflate) && output->length < input->length;
if (!r) {
for (i = count_before; i < output->count; i++) {
- gpr_slice_unref(output->slices[i]);
+ grpc_slice_unref(output->slices[i]);
}
output->count = count_before;
output->length = length_before;
@@ -122,7 +122,7 @@ static int zlib_compress(gpr_slice_buffer* input, gpr_slice_buffer* output,
return r;
}
-static int zlib_decompress(gpr_slice_buffer* input, gpr_slice_buffer* output,
+static int zlib_decompress(grpc_slice_buffer* input, grpc_slice_buffer* output,
int gzip) {
z_stream zs;
int r;
@@ -137,7 +137,7 @@ static int zlib_decompress(gpr_slice_buffer* input, gpr_slice_buffer* output,
r = zlib_body(&zs, input, output, inflate);
if (!r) {
for (i = count_before; i < output->count; i++) {
- gpr_slice_unref(output->slices[i]);
+ grpc_slice_unref(output->slices[i]);
}
output->count = count_before;
output->length = length_before;
@@ -146,16 +146,16 @@ static int zlib_decompress(gpr_slice_buffer* input, gpr_slice_buffer* output,
return r;
}
-static int copy(gpr_slice_buffer* input, gpr_slice_buffer* output) {
+static int copy(grpc_slice_buffer* input, grpc_slice_buffer* output) {
size_t i;
for (i = 0; i < input->count; i++) {
- gpr_slice_buffer_add(output, gpr_slice_ref(input->slices[i]));
+ grpc_slice_buffer_add(output, grpc_slice_ref(input->slices[i]));
}
return 1;
}
static int compress_inner(grpc_compression_algorithm algorithm,
- gpr_slice_buffer* input, gpr_slice_buffer* output) {
+ grpc_slice_buffer* input, grpc_slice_buffer* output) {
switch (algorithm) {
case GRPC_COMPRESS_NONE:
/* the fallback path always needs to be send uncompressed: we simply
@@ -173,7 +173,7 @@ static int compress_inner(grpc_compression_algorithm algorithm,
}
int grpc_msg_compress(grpc_compression_algorithm algorithm,
- gpr_slice_buffer* input, gpr_slice_buffer* output) {
+ grpc_slice_buffer* input, grpc_slice_buffer* output) {
if (!compress_inner(algorithm, input, output)) {
copy(input, output);
return 0;
@@ -182,7 +182,7 @@ int grpc_msg_compress(grpc_compression_algorithm algorithm,
}
int grpc_msg_decompress(grpc_compression_algorithm algorithm,
- gpr_slice_buffer* input, gpr_slice_buffer* output) {
+ grpc_slice_buffer* input, grpc_slice_buffer* output) {
switch (algorithm) {
case GRPC_COMPRESS_NONE:
return copy(input, output);
diff --git a/src/core/lib/compression/message_compress.h b/src/core/lib/compression/message_compress.h
index c69eaaf006..448d36a863 100644
--- a/src/core/lib/compression/message_compress.h
+++ b/src/core/lib/compression/message_compress.h
@@ -35,18 +35,18 @@
#define GRPC_CORE_LIB_COMPRESSION_MESSAGE_COMPRESS_H
#include <grpc/compression.h>
-#include <grpc/support/slice_buffer.h>
+#include <grpc/slice_buffer.h>
/* compress 'input' to 'output' using 'algorithm'.
On success, appends compressed slices to output and returns 1.
On failure, appends uncompressed slices to output and returns 0. */
int grpc_msg_compress(grpc_compression_algorithm algorithm,
- gpr_slice_buffer* input, gpr_slice_buffer* output);
+ grpc_slice_buffer* input, grpc_slice_buffer* output);
/* decompress 'input' to 'output' using 'algorithm'.
On success, appends slices to output and returns 1.
On failure, output is unchanged, and returns 0. */
int grpc_msg_decompress(grpc_compression_algorithm algorithm,
- gpr_slice_buffer* input, gpr_slice_buffer* output);
+ grpc_slice_buffer* input, grpc_slice_buffer* output);
#endif /* GRPC_CORE_LIB_COMPRESSION_MESSAGE_COMPRESS_H */
diff --git a/src/core/lib/http/format_request.c b/src/core/lib/http/format_request.c
index e818b70113..024664b6ee 100644
--- a/src/core/lib/http/format_request.c
+++ b/src/core/lib/http/format_request.c
@@ -37,8 +37,8 @@
#include <stdio.h>
#include <string.h>
+#include <grpc/slice.h>
#include <grpc/support/alloc.h>
-#include <grpc/support/slice.h>
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
#include "src/core/lib/support/string.h"
@@ -65,7 +65,8 @@ static void fill_common_header(const grpc_httpcli_request *request,
}
}
-gpr_slice grpc_httpcli_format_get_request(const grpc_httpcli_request *request) {
+grpc_slice grpc_httpcli_format_get_request(
+ const grpc_httpcli_request *request) {
gpr_strvec out;
char *flat;
size_t flat_len;
@@ -78,12 +79,12 @@ gpr_slice grpc_httpcli_format_get_request(const grpc_httpcli_request *request) {
flat = gpr_strvec_flatten(&out, &flat_len);
gpr_strvec_destroy(&out);
- return gpr_slice_new(flat, flat_len, gpr_free);
+ return grpc_slice_new(flat, flat_len, gpr_free);
}
-gpr_slice grpc_httpcli_format_post_request(const grpc_httpcli_request *request,
- const char *body_bytes,
- size_t body_size) {
+grpc_slice grpc_httpcli_format_post_request(const grpc_httpcli_request *request,
+ const char *body_bytes,
+ size_t body_size) {
gpr_strvec out;
char *tmp;
size_t out_len;
@@ -117,10 +118,10 @@ gpr_slice grpc_httpcli_format_post_request(const grpc_httpcli_request *request,
out_len += body_size;
}
- return gpr_slice_new(tmp, out_len, gpr_free);
+ return grpc_slice_new(tmp, out_len, gpr_free);
}
-gpr_slice grpc_httpcli_format_connect_request(
+grpc_slice grpc_httpcli_format_connect_request(
const grpc_httpcli_request *request) {
gpr_strvec out;
gpr_strvec_init(&out);
@@ -130,5 +131,5 @@ gpr_slice grpc_httpcli_format_connect_request(
size_t flat_len;
char *flat = gpr_strvec_flatten(&out, &flat_len);
gpr_strvec_destroy(&out);
- return gpr_slice_new(flat, flat_len, gpr_free);
+ return grpc_slice_new(flat, flat_len, gpr_free);
}
diff --git a/src/core/lib/http/format_request.h b/src/core/lib/http/format_request.h
index 7abd55f2f7..1c8e3f68c5 100644
--- a/src/core/lib/http/format_request.h
+++ b/src/core/lib/http/format_request.h
@@ -34,14 +34,14 @@
#ifndef GRPC_CORE_LIB_HTTP_FORMAT_REQUEST_H
#define GRPC_CORE_LIB_HTTP_FORMAT_REQUEST_H
-#include <grpc/support/slice.h>
+#include <grpc/slice.h>
#include "src/core/lib/http/httpcli.h"
-gpr_slice grpc_httpcli_format_get_request(const grpc_httpcli_request *request);
-gpr_slice grpc_httpcli_format_post_request(const grpc_httpcli_request *request,
- const char *body_bytes,
- size_t body_size);
-gpr_slice grpc_httpcli_format_connect_request(
+grpc_slice grpc_httpcli_format_get_request(const grpc_httpcli_request *request);
+grpc_slice grpc_httpcli_format_post_request(const grpc_httpcli_request *request,
+ const char *body_bytes,
+ size_t body_size);
+grpc_slice grpc_httpcli_format_connect_request(
const grpc_httpcli_request *request);
#endif /* GRPC_CORE_LIB_HTTP_FORMAT_REQUEST_H */
diff --git a/src/core/lib/http/httpcli.c b/src/core/lib/http/httpcli.c
index 55de1b5990..1035f31109 100644
--- a/src/core/lib/http/httpcli.c
+++ b/src/core/lib/http/httpcli.c
@@ -50,7 +50,7 @@
#include "src/core/lib/support/string.h"
typedef struct {
- gpr_slice request_text;
+ grpc_slice request_text;
grpc_http_parser parser;
grpc_resolved_addresses *addresses;
size_t next_address;
@@ -64,12 +64,13 @@ typedef struct {
grpc_httpcli_context *context;
grpc_polling_entity *pollent;
grpc_iomgr_object iomgr_obj;
- gpr_slice_buffer incoming;
- gpr_slice_buffer outgoing;
+ grpc_slice_buffer incoming;
+ grpc_slice_buffer outgoing;
grpc_closure on_read;
grpc_closure done_write;
grpc_closure connected;
grpc_error *overall_error;
+ grpc_resource_quota *resource_quota;
} internal_request;
static grpc_httpcli_get_override g_get_override = NULL;
@@ -110,13 +111,14 @@ static void finish(grpc_exec_ctx *exec_ctx, internal_request *req,
if (req->ep != NULL) {
grpc_endpoint_destroy(exec_ctx, req->ep);
}
- gpr_slice_unref(req->request_text);
+ grpc_slice_unref(req->request_text);
gpr_free(req->host);
gpr_free(req->ssl_host_override);
grpc_iomgr_unregister_object(&req->iomgr_obj);
- gpr_slice_buffer_destroy(&req->incoming);
- gpr_slice_buffer_destroy(&req->outgoing);
+ grpc_slice_buffer_destroy(&req->incoming);
+ grpc_slice_buffer_destroy(&req->outgoing);
GRPC_ERROR_UNREF(req->overall_error);
+ grpc_resource_quota_internal_unref(exec_ctx, req->resource_quota);
gpr_free(req);
}
@@ -142,7 +144,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *user_data,
size_t i;
for (i = 0; i < req->incoming.count; i++) {
- if (GPR_SLICE_LENGTH(req->incoming.slices[i])) {
+ if (GRPC_SLICE_LENGTH(req->incoming.slices[i])) {
req->have_read_byte = 1;
grpc_error *err =
grpc_http_parser_parse(&req->parser, req->incoming.slices[i], NULL);
@@ -176,8 +178,8 @@ static void done_write(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
}
static void start_write(grpc_exec_ctx *exec_ctx, internal_request *req) {
- gpr_slice_ref(req->request_text);
- gpr_slice_buffer_add(&req->outgoing, req->request_text);
+ grpc_slice_ref(req->request_text);
+ grpc_slice_buffer_add(&req->outgoing, req->request_text);
grpc_endpoint_write(exec_ctx, req->ep, &req->outgoing, &req->done_write);
}
@@ -223,8 +225,15 @@ static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req,
}
addr = &req->addresses->addrs[req->next_address++];
grpc_closure_init(&req->connected, on_connected, req);
+ grpc_arg arg;
+ arg.key = GRPC_ARG_RESOURCE_QUOTA;
+ arg.type = GRPC_ARG_POINTER;
+ arg.value.pointer.p = req->resource_quota;
+ arg.value.pointer.vtable = grpc_resource_quota_arg_vtable();
+ grpc_channel_args args = {1, &arg};
grpc_tcp_client_connect(exec_ctx, &req->connected, &req->ep,
- req->context->pollset_set, addr, req->deadline);
+ req->context->pollset_set, &args, addr,
+ req->deadline);
}
static void on_resolved(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
@@ -240,10 +249,11 @@ static void on_resolved(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
static void internal_request_begin(grpc_exec_ctx *exec_ctx,
grpc_httpcli_context *context,
grpc_polling_entity *pollent,
+ grpc_resource_quota *resource_quota,
const grpc_httpcli_request *request,
gpr_timespec deadline, grpc_closure *on_done,
grpc_httpcli_response *response,
- const char *name, gpr_slice request_text) {
+ const char *name, grpc_slice request_text) {
internal_request *req = gpr_malloc(sizeof(internal_request));
memset(req, 0, sizeof(*req));
req->request_text = request_text;
@@ -255,10 +265,11 @@ static void internal_request_begin(grpc_exec_ctx *exec_ctx,
req->context = context;
req->pollent = pollent;
req->overall_error = GRPC_ERROR_NONE;
+ req->resource_quota = grpc_resource_quota_internal_ref(resource_quota);
grpc_closure_init(&req->on_read, on_read, req);
grpc_closure_init(&req->done_write, done_write, req);
- gpr_slice_buffer_init(&req->incoming);
- gpr_slice_buffer_init(&req->outgoing);
+ grpc_slice_buffer_init(&req->incoming);
+ grpc_slice_buffer_init(&req->outgoing);
grpc_iomgr_register_object(&req->iomgr_obj, name);
req->host = gpr_strdup(request->host);
req->ssl_host_override = gpr_strdup(request->ssl_host_override);
@@ -267,11 +278,13 @@ static void internal_request_begin(grpc_exec_ctx *exec_ctx,
grpc_polling_entity_add_to_pollset_set(exec_ctx, req->pollent,
req->context->pollset_set);
grpc_resolve_address(exec_ctx, request->host, req->handshaker->default_port,
+ req->context->pollset_set,
grpc_closure_create(on_resolved, req), &req->addresses);
}
void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
grpc_polling_entity *pollent,
+ grpc_resource_quota *resource_quota,
const grpc_httpcli_request *request,
gpr_timespec deadline, grpc_closure *on_done,
grpc_httpcli_response *response) {
@@ -281,14 +294,15 @@ void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
return;
}
gpr_asprintf(&name, "HTTP:GET:%s:%s", request->host, request->http.path);
- internal_request_begin(exec_ctx, context, pollent, request, deadline, on_done,
- response, name,
+ internal_request_begin(exec_ctx, context, pollent, resource_quota, request,
+ deadline, on_done, response, name,
grpc_httpcli_format_get_request(request));
gpr_free(name);
}
void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
grpc_polling_entity *pollent,
+ grpc_resource_quota *resource_quota,
const grpc_httpcli_request *request,
const char *body_bytes, size_t body_size,
gpr_timespec deadline, grpc_closure *on_done,
@@ -301,7 +315,8 @@ void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
}
gpr_asprintf(&name, "HTTP:POST:%s:%s", request->host, request->http.path);
internal_request_begin(
- exec_ctx, context, pollent, request, deadline, on_done, response, name,
+ exec_ctx, context, pollent, resource_quota, request, deadline, on_done,
+ response, name,
grpc_httpcli_format_post_request(request, body_bytes, body_size));
gpr_free(name);
}
diff --git a/src/core/lib/http/httpcli.h b/src/core/lib/http/httpcli.h
index 320c0f86c6..11e03b44df 100644
--- a/src/core/lib/http/httpcli.h
+++ b/src/core/lib/http/httpcli.h
@@ -96,6 +96,7 @@ void grpc_httpcli_context_destroy(grpc_httpcli_context *context);
'on_response' is a callback to report results to */
void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
grpc_polling_entity *pollent,
+ grpc_resource_quota *resource_quota,
const grpc_httpcli_request *request,
gpr_timespec deadline, grpc_closure *on_complete,
grpc_httpcli_response *response);
@@ -116,6 +117,7 @@ void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
Does not support ?var1=val1&var2=val2 in the path. */
void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
grpc_polling_entity *pollent,
+ grpc_resource_quota *resource_quota,
const grpc_httpcli_request *request,
const char *body_bytes, size_t body_size,
gpr_timespec deadline, grpc_closure *on_complete,
diff --git a/src/core/lib/http/httpcli_security_connector.c b/src/core/lib/http/httpcli_security_connector.c
index 0006e809a6..14cdb1dab3 100644
--- a/src/core/lib/http/httpcli_security_connector.c
+++ b/src/core/lib/http/httpcli_security_connector.c
@@ -38,7 +38,9 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
-#include "src/core/lib/security/transport/handshake.h"
+
+#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/security/transport/security_handshaker.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/tsi/ssl_transport_security.h"
@@ -58,52 +60,43 @@ static void httpcli_ssl_destroy(grpc_security_connector *sc) {
gpr_free(sc);
}
-static void httpcli_ssl_do_handshake(grpc_exec_ctx *exec_ctx,
- grpc_channel_security_connector *sc,
- grpc_endpoint *nonsecure_endpoint,
- gpr_slice_buffer *read_buffer,
- gpr_timespec deadline,
- grpc_security_handshake_done_cb cb,
- void *user_data) {
+static void httpcli_ssl_add_handshakers(grpc_exec_ctx *exec_ctx,
+ grpc_channel_security_connector *sc,
+ grpc_handshake_manager *handshake_mgr) {
grpc_httpcli_ssl_channel_security_connector *c =
(grpc_httpcli_ssl_channel_security_connector *)sc;
- tsi_result result = TSI_OK;
- tsi_handshaker *handshaker;
- if (c->handshaker_factory == NULL) {
- gpr_free(read_buffer);
- cb(exec_ctx, user_data, GRPC_SECURITY_ERROR, NULL, NULL);
- return;
- }
- result = tsi_ssl_handshaker_factory_create_handshaker(
- c->handshaker_factory, c->secure_peer_name, &handshaker);
- if (result != TSI_OK) {
- gpr_log(GPR_ERROR, "Handshaker creation failed with error %s.",
- tsi_result_to_string(result));
- gpr_free(read_buffer);
- cb(exec_ctx, user_data, GRPC_SECURITY_ERROR, NULL, NULL);
- } else {
- grpc_do_security_handshake(exec_ctx, handshaker, &sc->base, true,
- nonsecure_endpoint, read_buffer, deadline, cb,
- user_data);
+ tsi_handshaker *handshaker = NULL;
+ if (c->handshaker_factory != NULL) {
+ tsi_result result = tsi_ssl_handshaker_factory_create_handshaker(
+ c->handshaker_factory, c->secure_peer_name, &handshaker);
+ if (result != TSI_OK) {
+ gpr_log(GPR_ERROR, "Handshaker creation failed with error %s.",
+ tsi_result_to_string(result));
+ }
}
+ grpc_handshake_manager_add(
+ handshake_mgr,
+ grpc_security_handshaker_create(exec_ctx, handshaker, &sc->base));
}
static void httpcli_ssl_check_peer(grpc_exec_ctx *exec_ctx,
grpc_security_connector *sc, tsi_peer peer,
- grpc_security_peer_check_cb cb,
- void *user_data) {
+ grpc_auth_context **auth_context,
+ grpc_closure *on_peer_checked) {
grpc_httpcli_ssl_channel_security_connector *c =
(grpc_httpcli_ssl_channel_security_connector *)sc;
- grpc_security_status status = GRPC_SECURITY_OK;
+ grpc_error *error = GRPC_ERROR_NONE;
/* Check the peer name. */
if (c->secure_peer_name != NULL &&
!tsi_ssl_peer_matches_name(&peer, c->secure_peer_name)) {
- gpr_log(GPR_ERROR, "Peer name %s is not in peer certificate",
- c->secure_peer_name);
- status = GRPC_SECURITY_ERROR;
+ char *msg;
+ gpr_asprintf(&msg, "Peer name %s is not in peer certificate",
+ c->secure_peer_name);
+ error = GRPC_ERROR_CREATE(msg);
+ gpr_free(msg);
}
- cb(exec_ctx, user_data, status, NULL);
+ grpc_exec_ctx_sched(exec_ctx, on_peer_checked, error, NULL);
tsi_peer_destruct(&peer);
}
@@ -140,7 +133,7 @@ static grpc_security_status httpcli_ssl_channel_security_connector_create(
*sc = NULL;
return GRPC_SECURITY_ERROR;
}
- c->base.do_handshake = httpcli_ssl_do_handshake;
+ c->base.add_handshakers = httpcli_ssl_add_handshakers;
*sc = &c->base;
return GRPC_SECURITY_OK;
}
@@ -150,19 +143,25 @@ static grpc_security_status httpcli_ssl_channel_security_connector_create(
typedef struct {
void (*func)(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *endpoint);
void *arg;
+ grpc_handshake_manager *handshake_mgr;
} on_done_closure;
-static void on_secure_transport_setup_done(grpc_exec_ctx *exec_ctx, void *rp,
- grpc_security_status status,
- grpc_endpoint *secure_endpoint,
- grpc_auth_context *auth_context) {
- on_done_closure *c = rp;
- if (status != GRPC_SECURITY_OK) {
- gpr_log(GPR_ERROR, "Secure transport setup failed with error %d.", status);
+static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_handshaker_args *args = arg;
+ on_done_closure *c = args->user_data;
+ if (error != GRPC_ERROR_NONE) {
+ const char *msg = grpc_error_string(error);
+ gpr_log(GPR_ERROR, "Secure transport setup failed: %s", msg);
+ grpc_error_free_string(msg);
c->func(exec_ctx, c->arg, NULL);
} else {
- c->func(exec_ctx, c->arg, secure_endpoint);
+ grpc_channel_args_destroy(args->args);
+ grpc_slice_buffer_destroy(args->read_buffer);
+ gpr_free(args->read_buffer);
+ c->func(exec_ctx, c->arg, args->endpoint);
}
+ grpc_handshake_manager_destroy(exec_ctx, c->handshake_mgr);
gpr_free(c);
}
@@ -183,11 +182,15 @@ static void ssl_handshake(grpc_exec_ctx *exec_ctx, void *arg,
}
c->func = on_done;
c->arg = arg;
+ c->handshake_mgr = grpc_handshake_manager_create();
GPR_ASSERT(httpcli_ssl_channel_security_connector_create(
pem_root_certs, pem_root_certs_size, host, &sc) ==
GRPC_SECURITY_OK);
- grpc_channel_security_connector_do_handshake(
- exec_ctx, sc, tcp, NULL, deadline, on_secure_transport_setup_done, c);
+ grpc_channel_security_connector_add_handshakers(exec_ctx, sc,
+ c->handshake_mgr);
+ grpc_handshake_manager_do_handshake(
+ exec_ctx, c->handshake_mgr, tcp, NULL /* channel_args */, deadline,
+ NULL /* acceptor */, on_handshake_done, c /* user_data */);
GRPC_SECURITY_CONNECTOR_UNREF(&sc->base, "httpcli");
}
diff --git a/src/core/lib/http/parser.c b/src/core/lib/http/parser.c
index be9e9b6b63..2f84adc187 100644
--- a/src/core/lib/http/parser.c
+++ b/src/core/lib/http/parser.c
@@ -333,12 +333,12 @@ void grpc_http_response_destroy(grpc_http_response *response) {
gpr_free(response->hdrs);
}
-grpc_error *grpc_http_parser_parse(grpc_http_parser *parser, gpr_slice slice,
+grpc_error *grpc_http_parser_parse(grpc_http_parser *parser, grpc_slice slice,
size_t *start_of_body) {
- for (size_t i = 0; i < GPR_SLICE_LENGTH(slice); i++) {
+ for (size_t i = 0; i < GRPC_SLICE_LENGTH(slice); i++) {
bool found_body_start = false;
grpc_error *err =
- addbyte(parser, GPR_SLICE_START_PTR(slice)[i], &found_body_start);
+ addbyte(parser, GRPC_SLICE_START_PTR(slice)[i], &found_body_start);
if (err != GRPC_ERROR_NONE) return err;
if (found_body_start && start_of_body != NULL) *start_of_body = i + 1;
}
diff --git a/src/core/lib/http/parser.h b/src/core/lib/http/parser.h
index fab42979cd..a68011dd43 100644
--- a/src/core/lib/http/parser.h
+++ b/src/core/lib/http/parser.h
@@ -34,8 +34,8 @@
#ifndef GRPC_CORE_LIB_HTTP_PARSER_H
#define GRPC_CORE_LIB_HTTP_PARSER_H
+#include <grpc/slice.h>
#include <grpc/support/port_platform.h>
-#include <grpc/support/slice.h>
#include "src/core/lib/iomgr/error.h"
/* Maximum length of a header string of the form 'Key: Value\r\n' */
@@ -114,7 +114,7 @@ void grpc_http_parser_init(grpc_http_parser *parser, grpc_http_type type,
void grpc_http_parser_destroy(grpc_http_parser *parser);
/* Sets \a start_of_body to the offset in \a slice of the start of the body. */
-grpc_error *grpc_http_parser_parse(grpc_http_parser *parser, gpr_slice slice,
+grpc_error *grpc_http_parser_parse(grpc_http_parser *parser, grpc_slice slice,
size_t *start_of_body);
grpc_error *grpc_http_parser_eof(grpc_http_parser *parser);
diff --git a/src/core/lib/iomgr/combiner.c b/src/core/lib/iomgr/combiner.c
index 60ee14eb23..cfc67020ae 100644
--- a/src/core/lib/iomgr/combiner.c
+++ b/src/core/lib/iomgr/combiner.c
@@ -90,6 +90,12 @@ static bool is_covered_by_poller(grpc_combiner *lock) {
gpr_atm_acq_load(&lock->elements_covered_by_poller) > 0;
}
+#define IS_COVERED_BY_POLLER_FMT "(final=%d elems=%" PRIdPTR ")->%d"
+#define IS_COVERED_BY_POLLER_ARGS(lock) \
+ (lock)->final_list_covered_by_poller, \
+ gpr_atm_acq_load(&(lock)->elements_covered_by_poller), \
+ is_covered_by_poller((lock))
+
grpc_combiner *grpc_combiner_create(grpc_workqueue *optional_workqueue) {
grpc_combiner *lock = gpr_malloc(sizeof(*lock));
lock->next_combiner_on_this_exec_ctx = NULL;
@@ -197,9 +203,10 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) {
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG,
"C:%p grpc_combiner_continue_exec_ctx workqueue=%p "
- "is_covered_by_poller=%d exec_ctx_ready_to_finish=%d "
+ "is_covered_by_poller=" IS_COVERED_BY_POLLER_FMT
+ " exec_ctx_ready_to_finish=%d "
"time_to_execute_final_list=%d",
- lock, lock->optional_workqueue, is_covered_by_poller(lock),
+ lock, lock->optional_workqueue, IS_COVERED_BY_POLLER_ARGS(lock),
grpc_exec_ctx_ready_to_finish(exec_ctx),
lock->time_to_execute_final_list));
diff --git a/src/core/lib/iomgr/endpoint.c b/src/core/lib/iomgr/endpoint.c
index f901fcf962..2d300f4560 100644
--- a/src/core/lib/iomgr/endpoint.c
+++ b/src/core/lib/iomgr/endpoint.c
@@ -34,12 +34,12 @@
#include "src/core/lib/iomgr/endpoint.h"
void grpc_endpoint_read(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
- gpr_slice_buffer* slices, grpc_closure* cb) {
+ grpc_slice_buffer* slices, grpc_closure* cb) {
ep->vtable->read(exec_ctx, ep, slices, cb);
}
void grpc_endpoint_write(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
- gpr_slice_buffer* slices, grpc_closure* cb) {
+ grpc_slice_buffer* slices, grpc_closure* cb) {
ep->vtable->write(exec_ctx, ep, slices, cb);
}
@@ -66,6 +66,12 @@ char* grpc_endpoint_get_peer(grpc_endpoint* ep) {
return ep->vtable->get_peer(ep);
}
+int grpc_endpoint_get_fd(grpc_endpoint* ep) { return ep->vtable->get_fd(ep); }
+
grpc_workqueue* grpc_endpoint_get_workqueue(grpc_endpoint* ep) {
return ep->vtable->get_workqueue(ep);
}
+
+grpc_resource_user* grpc_endpoint_get_resource_user(grpc_endpoint* ep) {
+ return ep->vtable->get_resource_user(ep);
+}
diff --git a/src/core/lib/iomgr/endpoint.h b/src/core/lib/iomgr/endpoint.h
index 910a6f6532..1609b64f2b 100644
--- a/src/core/lib/iomgr/endpoint.h
+++ b/src/core/lib/iomgr/endpoint.h
@@ -34,11 +34,12 @@
#ifndef GRPC_CORE_LIB_IOMGR_ENDPOINT_H
#define GRPC_CORE_LIB_IOMGR_ENDPOINT_H
-#include <grpc/support/slice.h>
-#include <grpc/support/slice_buffer.h>
+#include <grpc/slice.h>
+#include <grpc/slice_buffer.h>
#include <grpc/support/time.h>
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/pollset_set.h"
+#include "src/core/lib/iomgr/resource_quota.h"
/* An endpoint caps a streaming channel between two communicating processes.
Examples may be: a tcp socket, <stdin+stdout>, or some shared memory. */
@@ -48,9 +49,9 @@ typedef struct grpc_endpoint_vtable grpc_endpoint_vtable;
struct grpc_endpoint_vtable {
void (*read)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- gpr_slice_buffer *slices, grpc_closure *cb);
+ grpc_slice_buffer *slices, grpc_closure *cb);
void (*write)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- gpr_slice_buffer *slices, grpc_closure *cb);
+ grpc_slice_buffer *slices, grpc_closure *cb);
grpc_workqueue *(*get_workqueue)(grpc_endpoint *ep);
void (*add_to_pollset)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_pollset *pollset);
@@ -58,7 +59,9 @@ struct grpc_endpoint_vtable {
grpc_pollset_set *pollset);
void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep);
void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep);
+ grpc_resource_user *(*get_resource_user)(grpc_endpoint *ep);
char *(*get_peer)(grpc_endpoint *ep);
+ int (*get_fd)(grpc_endpoint *ep);
};
/* When data is available on the connection, calls the callback with slices.
@@ -67,10 +70,14 @@ struct grpc_endpoint_vtable {
Valid slices may be placed into \a slices even when the callback is
invoked with error != GRPC_ERROR_NONE. */
void grpc_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- gpr_slice_buffer *slices, grpc_closure *cb);
+ grpc_slice_buffer *slices, grpc_closure *cb);
char *grpc_endpoint_get_peer(grpc_endpoint *ep);
+/* Get the file descriptor used by \a ep. Return -1 if \a ep is not using an fd.
+ */
+int grpc_endpoint_get_fd(grpc_endpoint *ep);
+
/* Retrieve a reference to the workqueue associated with this endpoint */
grpc_workqueue *grpc_endpoint_get_workqueue(grpc_endpoint *ep);
@@ -85,7 +92,7 @@ grpc_workqueue *grpc_endpoint_get_workqueue(grpc_endpoint *ep);
it is a valid slice buffer.
*/
void grpc_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- gpr_slice_buffer *slices, grpc_closure *cb);
+ grpc_slice_buffer *slices, grpc_closure *cb);
/* Causes any pending and future read/write callbacks to run immediately with
success==0 */
@@ -100,6 +107,8 @@ void grpc_endpoint_add_to_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_endpoint *ep,
grpc_pollset_set *pollset_set);
+grpc_resource_user *grpc_endpoint_get_resource_user(grpc_endpoint *endpoint);
+
struct grpc_endpoint {
const grpc_endpoint_vtable *vtable;
};
diff --git a/src/core/lib/iomgr/endpoint_pair.h b/src/core/lib/iomgr/endpoint_pair.h
index 5cd78051bd..f9de0c715e 100644
--- a/src/core/lib/iomgr/endpoint_pair.h
+++ b/src/core/lib/iomgr/endpoint_pair.h
@@ -41,7 +41,8 @@ typedef struct {
grpc_endpoint *server;
} grpc_endpoint_pair;
-grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name,
- size_t read_slice_size);
+grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(
+ const char *name, grpc_resource_quota *resource_quota,
+ size_t read_slice_size);
#endif /* GRPC_CORE_LIB_IOMGR_ENDPOINT_PAIR_H */
diff --git a/src/core/lib/iomgr/endpoint_pair_posix.c b/src/core/lib/iomgr/endpoint_pair_posix.c
index ec2cd782b1..b9ff969e81 100644
--- a/src/core/lib/iomgr/endpoint_pair_posix.c
+++ b/src/core/lib/iomgr/endpoint_pair_posix.c
@@ -62,20 +62,21 @@ static void create_sockets(int sv[2]) {
GPR_ASSERT(grpc_set_socket_no_sigpipe_if_possible(sv[1]) == GRPC_ERROR_NONE);
}
-grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name,
- size_t read_slice_size) {
+grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(
+ const char *name, grpc_resource_quota *resource_quota,
+ size_t read_slice_size) {
int sv[2];
grpc_endpoint_pair p;
char *final_name;
create_sockets(sv);
gpr_asprintf(&final_name, "%s:client", name);
- p.client = grpc_tcp_create(grpc_fd_create(sv[1], final_name), read_slice_size,
- "socketpair-server");
+ p.client = grpc_tcp_create(grpc_fd_create(sv[1], final_name), resource_quota,
+ read_slice_size, "socketpair-server");
gpr_free(final_name);
gpr_asprintf(&final_name, "%s:server", name);
- p.server = grpc_tcp_create(grpc_fd_create(sv[0], final_name), read_slice_size,
- "socketpair-client");
+ p.server = grpc_tcp_create(grpc_fd_create(sv[0], final_name), resource_quota,
+ read_slice_size, "socketpair-client");
gpr_free(final_name);
return p;
}
diff --git a/src/core/lib/iomgr/endpoint_pair_uv.c b/src/core/lib/iomgr/endpoint_pair_uv.c
index 7941e20388..ff24894c6d 100644
--- a/src/core/lib/iomgr/endpoint_pair_uv.c
+++ b/src/core/lib/iomgr/endpoint_pair_uv.c
@@ -41,8 +41,9 @@
#include "src/core/lib/iomgr/endpoint_pair.h"
-grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name,
- size_t read_slice_size) {
+grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(
+ const char *name, grpc_resource_quota *resource_quota,
+ size_t read_slice_size) {
grpc_endpoint_pair endpoint_pair;
// TODO(mlumish): implement this properly under libuv
GPR_ASSERT(false &&
diff --git a/src/core/lib/iomgr/endpoint_pair_windows.c b/src/core/lib/iomgr/endpoint_pair_windows.c
index 5c78c95492..93f71b745c 100644
--- a/src/core/lib/iomgr/endpoint_pair_windows.c
+++ b/src/core/lib/iomgr/endpoint_pair_windows.c
@@ -82,15 +82,16 @@ static void create_sockets(SOCKET sv[2]) {
sv[0] = svr_sock;
}
-grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name,
- size_t read_slice_size) {
+grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(
+ const char *name, grpc_resource_quota *resource_quota,
+ size_t read_slice_size) {
SOCKET sv[2];
grpc_endpoint_pair p;
create_sockets(sv);
p.client = grpc_tcp_create(grpc_winsocket_create(sv[1], "endpoint:client"),
- "endpoint:server");
+ resource_quota, "endpoint:server");
p.server = grpc_tcp_create(grpc_winsocket_create(sv[0], "endpoint:server"),
- "endpoint:client");
+ resource_quota, "endpoint:client");
return p;
}
diff --git a/src/core/lib/iomgr/ev_epoll_linux.c b/src/core/lib/iomgr/ev_epoll_linux.c
index 8381f4a63a..1b15e0eb4f 100644
--- a/src/core/lib/iomgr/ev_epoll_linux.c
+++ b/src/core/lib/iomgr/ev_epoll_linux.c
@@ -69,9 +69,17 @@ static int grpc_polling_trace = 0; /* Disabled by default */
gpr_log(GPR_INFO, (fmt), __VA_ARGS__); \
}
+/* Uncomment the following enable extra checks on poll_object operations */
+/* #define PO_DEBUG */
+
static int grpc_wakeup_signal = -1;
static bool is_grpc_wakeup_signal_initialized = false;
+/* TODO: sreek: Right now, this wakes up all pollers. In future we should make
+ * sure to wake up one polling thread (which can wake up other threads if
+ * needed) */
+static grpc_wakeup_fd global_wakeup_fd;
+
/* Implements the function defined in grpc_posix.h. This function might be
* called before even calling grpc_init() to set either a different signal to
* use. If signum == -1, then the use of signals is disabled */
@@ -90,10 +98,42 @@ void grpc_use_signal(int signum) {
struct polling_island;
+typedef enum {
+ POLL_OBJ_FD,
+ POLL_OBJ_POLLSET,
+ POLL_OBJ_POLLSET_SET
+} poll_obj_type;
+
+typedef struct poll_obj {
+#ifdef PO_DEBUG
+ poll_obj_type obj_type;
+#endif
+ gpr_mu mu;
+ struct polling_island *pi;
+} poll_obj;
+
+const char *poll_obj_string(poll_obj_type po_type) {
+ switch (po_type) {
+ case POLL_OBJ_FD:
+ return "fd";
+ case POLL_OBJ_POLLSET:
+ return "pollset";
+ case POLL_OBJ_POLLSET_SET:
+ return "pollset_set";
+ }
+
+ GPR_UNREACHABLE_CODE(return "UNKNOWN");
+}
+
/*******************************************************************************
* Fd Declarations
*/
+
+#define FD_FROM_PO(po) ((grpc_fd *)(po))
+
struct grpc_fd {
+ poll_obj po;
+
int fd;
/* refst format:
bit 0 : 1=Active / 0=Orphaned
@@ -101,8 +141,6 @@ struct grpc_fd {
Ref/Unref by two to avoid altering the orphaned bit */
gpr_atm refst;
- gpr_mu mu;
-
/* Indicates that the fd is shutdown and that any pending read/write closures
should fail */
bool shutdown;
@@ -115,9 +153,6 @@ struct grpc_fd {
grpc_closure *read_closure;
grpc_closure *write_closure;
- /* The polling island to which this fd belongs to (protected by mu) */
- struct polling_island *polling_island;
-
struct grpc_fd *freelist_next;
grpc_closure *on_done_closure;
@@ -163,7 +198,7 @@ static void fd_global_shutdown(void);
#define PI_ADD_REF(p, r) pi_add_ref((p))
#define PI_UNREF(exec_ctx, p, r) pi_unref((exec_ctx), (p))
-#endif /* !defined(GPRC_PI_REF_COUNT_DEBUG) */
+#endif /* !defined(GRPC_PI_REF_COUNT_DEBUG) */
/* This is also used as grpc_workqueue (by directly casing it) */
typedef struct polling_island {
@@ -220,41 +255,21 @@ struct grpc_pollset_worker {
};
struct grpc_pollset {
- gpr_mu mu;
+ poll_obj po;
+
grpc_pollset_worker root_worker;
bool kicked_without_pollers;
bool shutting_down; /* Is the pollset shutting down ? */
bool finish_shutdown_called; /* Is the 'finish_shutdown_locked()' called ? */
grpc_closure *shutdown_done; /* Called after after shutdown is complete */
-
- /* The polling island to which this pollset belongs to */
- struct polling_island *polling_island;
};
/*******************************************************************************
* Pollset-set Declarations
*/
-/* TODO: sreek - Change the pollset_set implementation such that a pollset_set
- * directly points to a polling_island (and adding an fd/pollset/pollset_set to
- * the current pollset_set would result in polling island merges. This would
- * remove the need to maintain fd_count here. This will also significantly
- * simplify the grpc_fd structure since we would no longer need to explicitly
- * maintain the orphaned state */
struct grpc_pollset_set {
- gpr_mu mu;
-
- size_t pollset_count;
- size_t pollset_capacity;
- grpc_pollset **pollsets;
-
- size_t pollset_set_count;
- size_t pollset_set_capacity;
- struct grpc_pollset_set **pollset_sets;
-
- size_t fd_count;
- size_t fd_capacity;
- grpc_fd **fds;
+ poll_obj po;
};
/*******************************************************************************
@@ -437,9 +452,8 @@ static void polling_island_add_wakeup_fd_locked(polling_island *pi,
gpr_asprintf(&err_msg,
"epoll_ctl (epoll_fd: %d) add wakeup fd: %d failed with "
"error: %d (%s)",
- pi->epoll_fd,
- GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd), errno,
- strerror(errno));
+ pi->epoll_fd, GRPC_WAKEUP_FD_GET_READ_FD(&global_wakeup_fd),
+ errno, strerror(errno));
append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
gpr_free(err_msg);
}
@@ -541,7 +555,7 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
goto done;
}
- polling_island_add_wakeup_fd_locked(pi, &grpc_global_wakeup_fd, error);
+ polling_island_add_wakeup_fd_locked(pi, &global_wakeup_fd, error);
polling_island_add_wakeup_fd_locked(pi, &pi->workqueue_wakeup_fd, error);
if (initial_fd != NULL) {
@@ -843,11 +857,6 @@ static void polling_island_global_shutdown() {
* alarm 'epoch'). This wakeup_fd gives us something to alert on when such a
* case occurs. */
-/* TODO: sreek: Right now, this wakes up all pollers. In future we should make
- * sure to wake up one polling thread (which can wake up other threads if
- * needed) */
-grpc_wakeup_fd grpc_global_wakeup_fd;
-
static grpc_fd *fd_freelist = NULL;
static gpr_mu fd_freelist_mu;
@@ -916,7 +925,7 @@ static void fd_global_shutdown(void) {
while (fd_freelist != NULL) {
grpc_fd *fd = fd_freelist;
fd_freelist = fd_freelist->freelist_next;
- gpr_mu_destroy(&fd->mu);
+ gpr_mu_destroy(&fd->po.mu);
gpr_free(fd);
}
gpr_mu_destroy(&fd_freelist_mu);
@@ -934,13 +943,17 @@ static grpc_fd *fd_create(int fd, const char *name) {
if (new_fd == NULL) {
new_fd = gpr_malloc(sizeof(grpc_fd));
- gpr_mu_init(&new_fd->mu);
+ gpr_mu_init(&new_fd->po.mu);
}
- /* Note: It is not really needed to get the new_fd->mu lock here. If this is a
- newly created fd (or an fd we got from the freelist), no one else would be
- holding a lock to it anyway. */
- gpr_mu_lock(&new_fd->mu);
+ /* Note: It is not really needed to get the new_fd->po.mu lock here. If this
+ * is a newly created fd (or an fd we got from the freelist), no one else
+ * would be holding a lock to it anyway. */
+ gpr_mu_lock(&new_fd->po.mu);
+ new_fd->po.pi = NULL;
+#ifdef PO_DEBUG
+ new_fd->po.obj_type = POLL_OBJ_FD;
+#endif
gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1);
new_fd->fd = fd;
@@ -948,12 +961,11 @@ static grpc_fd *fd_create(int fd, const char *name) {
new_fd->orphaned = false;
new_fd->read_closure = CLOSURE_NOT_READY;
new_fd->write_closure = CLOSURE_NOT_READY;
- new_fd->polling_island = NULL;
new_fd->freelist_next = NULL;
new_fd->on_done_closure = NULL;
new_fd->read_notifier_pollset = NULL;
- gpr_mu_unlock(&new_fd->mu);
+ gpr_mu_unlock(&new_fd->po.mu);
char *fd_name;
gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
@@ -965,17 +977,13 @@ static grpc_fd *fd_create(int fd, const char *name) {
return new_fd;
}
-static bool fd_is_orphaned(grpc_fd *fd) {
- return (gpr_atm_acq_load(&fd->refst) & 1) == 0;
-}
-
static int fd_wrapped_fd(grpc_fd *fd) {
int ret_fd = -1;
- gpr_mu_lock(&fd->mu);
+ gpr_mu_lock(&fd->po.mu);
if (!fd->orphaned) {
ret_fd = fd->fd;
}
- gpr_mu_unlock(&fd->mu);
+ gpr_mu_unlock(&fd->po.mu);
return ret_fd;
}
@@ -987,7 +995,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_error *error = GRPC_ERROR_NONE;
polling_island *unref_pi = NULL;
- gpr_mu_lock(&fd->mu);
+ gpr_mu_lock(&fd->po.mu);
fd->on_done_closure = on_done;
/* If release_fd is not NULL, we should be relinquishing control of the file
@@ -1007,25 +1015,25 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
/* Remove the fd from the polling island:
- Get a lock on the latest polling island (i.e the last island in the
- linked list pointed by fd->polling_island). This is the island that
+ linked list pointed by fd->po.pi). This is the island that
would actually contain the fd
- Remove the fd from the latest polling island
- Unlock the latest polling island
- - Set fd->polling_island to NULL (but remove the ref on the polling island
+ - Set fd->po.pi to NULL (but remove the ref on the polling island
before doing this.) */
- if (fd->polling_island != NULL) {
- polling_island *pi_latest = polling_island_lock(fd->polling_island);
+ if (fd->po.pi != NULL) {
+ polling_island *pi_latest = polling_island_lock(fd->po.pi);
polling_island_remove_fd_locked(pi_latest, fd, is_fd_closed, &error);
gpr_mu_unlock(&pi_latest->mu);
- unref_pi = fd->polling_island;
- fd->polling_island = NULL;
+ unref_pi = fd->po.pi;
+ fd->po.pi = NULL;
}
grpc_exec_ctx_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error),
NULL);
- gpr_mu_unlock(&fd->mu);
+ gpr_mu_unlock(&fd->po.mu);
UNREF_BY(fd, 2, reason); /* Drop the reference */
if (unref_pi != NULL) {
/* Unref stale polling island here, outside the fd lock above.
@@ -1090,23 +1098,23 @@ static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
grpc_fd *fd) {
grpc_pollset *notifier = NULL;
- gpr_mu_lock(&fd->mu);
+ gpr_mu_lock(&fd->po.mu);
notifier = fd->read_notifier_pollset;
- gpr_mu_unlock(&fd->mu);
+ gpr_mu_unlock(&fd->po.mu);
return notifier;
}
static bool fd_is_shutdown(grpc_fd *fd) {
- gpr_mu_lock(&fd->mu);
+ gpr_mu_lock(&fd->po.mu);
const bool r = fd->shutdown;
- gpr_mu_unlock(&fd->mu);
+ gpr_mu_unlock(&fd->po.mu);
return r;
}
/* Might be called multiple times */
static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
- gpr_mu_lock(&fd->mu);
+ gpr_mu_lock(&fd->po.mu);
/* Do the actual shutdown only once */
if (!fd->shutdown) {
fd->shutdown = true;
@@ -1117,28 +1125,28 @@ static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
set_ready_locked(exec_ctx, fd, &fd->read_closure);
set_ready_locked(exec_ctx, fd, &fd->write_closure);
}
- gpr_mu_unlock(&fd->mu);
+ gpr_mu_unlock(&fd->po.mu);
}
static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) {
- gpr_mu_lock(&fd->mu);
+ gpr_mu_lock(&fd->po.mu);
notify_on_locked(exec_ctx, fd, &fd->read_closure, closure);
- gpr_mu_unlock(&fd->mu);
+ gpr_mu_unlock(&fd->po.mu);
}
static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) {
- gpr_mu_lock(&fd->mu);
+ gpr_mu_lock(&fd->po.mu);
notify_on_locked(exec_ctx, fd, &fd->write_closure, closure);
- gpr_mu_unlock(&fd->mu);
+ gpr_mu_unlock(&fd->po.mu);
}
static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) {
- gpr_mu_lock(&fd->mu);
- grpc_workqueue *workqueue = GRPC_WORKQUEUE_REF(
- (grpc_workqueue *)fd->polling_island, "fd_get_workqueue");
- gpr_mu_unlock(&fd->mu);
+ gpr_mu_lock(&fd->po.mu);
+ grpc_workqueue *workqueue =
+ GRPC_WORKQUEUE_REF((grpc_workqueue *)fd->po.pi, "fd_get_workqueue");
+ gpr_mu_unlock(&fd->po.mu);
return workqueue;
}
@@ -1163,11 +1171,11 @@ static grpc_error *pollset_global_init(void) {
gpr_tls_init(&g_current_thread_pollset);
gpr_tls_init(&g_current_thread_worker);
poller_kick_init();
- return grpc_wakeup_fd_init(&grpc_global_wakeup_fd);
+ return grpc_wakeup_fd_init(&global_wakeup_fd);
}
static void pollset_global_shutdown(void) {
- grpc_wakeup_fd_destroy(&grpc_global_wakeup_fd);
+ grpc_wakeup_fd_destroy(&global_wakeup_fd);
gpr_tls_destroy(&g_current_thread_pollset);
gpr_tls_destroy(&g_current_thread_worker);
}
@@ -1274,12 +1282,16 @@ static grpc_error *pollset_kick(grpc_pollset *p,
}
static grpc_error *kick_poller(void) {
- return grpc_wakeup_fd_wakeup(&grpc_global_wakeup_fd);
+ return grpc_wakeup_fd_wakeup(&global_wakeup_fd);
}
static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
- gpr_mu_init(&pollset->mu);
- *mu = &pollset->mu;
+ gpr_mu_init(&pollset->po.mu);
+ *mu = &pollset->po.mu;
+ pollset->po.pi = NULL;
+#ifdef PO_DEBUG
+ pollset->po.obj_type = POLL_OBJ_POLLSET;
+#endif
pollset->root_worker.next = pollset->root_worker.prev = &pollset->root_worker;
pollset->kicked_without_pollers = false;
@@ -1287,8 +1299,6 @@ static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
pollset->shutting_down = false;
pollset->finish_shutdown_called = false;
pollset->shutdown_done = NULL;
-
- pollset->polling_island = NULL;
}
/* Convert a timespec to milliseconds:
@@ -1318,26 +1328,26 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_pollset *notifier) {
- /* Need the fd->mu since we might be racing with fd_notify_on_read */
- gpr_mu_lock(&fd->mu);
+ /* Need the fd->po.mu since we might be racing with fd_notify_on_read */
+ gpr_mu_lock(&fd->po.mu);
set_ready_locked(exec_ctx, fd, &fd->read_closure);
fd->read_notifier_pollset = notifier;
- gpr_mu_unlock(&fd->mu);
+ gpr_mu_unlock(&fd->po.mu);
}
static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
- /* Need the fd->mu since we might be racing with fd_notify_on_write */
- gpr_mu_lock(&fd->mu);
+ /* Need the fd->po.mu since we might be racing with fd_notify_on_write */
+ gpr_mu_lock(&fd->po.mu);
set_ready_locked(exec_ctx, fd, &fd->write_closure);
- gpr_mu_unlock(&fd->mu);
+ gpr_mu_unlock(&fd->po.mu);
}
static void pollset_release_polling_island(grpc_exec_ctx *exec_ctx,
grpc_pollset *ps, char *reason) {
- if (ps->polling_island != NULL) {
- PI_UNREF(exec_ctx, ps->polling_island, reason);
+ if (ps->po.pi != NULL) {
+ PI_UNREF(exec_ctx, ps->po.pi, reason);
}
- ps->polling_island = NULL;
+ ps->po.pi = NULL;
}
static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx,
@@ -1347,12 +1357,12 @@ static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx,
pollset->finish_shutdown_called = true;
- /* Release the ref and set pollset->polling_island to NULL */
+ /* Release the ref and set pollset->po.pi to NULL */
pollset_release_polling_island(exec_ctx, pollset, "ps_shutdown");
grpc_exec_ctx_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE, NULL);
}
-/* pollset->mu lock must be held by the caller before calling this */
+/* pollset->po.mu lock must be held by the caller before calling this */
static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure *closure) {
GPR_TIMER_BEGIN("pollset_shutdown", 0);
@@ -1377,7 +1387,7 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
* here */
static void pollset_destroy(grpc_pollset *pollset) {
GPR_ASSERT(!pollset_has_workers(pollset));
- gpr_mu_destroy(&pollset->mu);
+ gpr_mu_destroy(&pollset->po.mu);
}
static void pollset_reset(grpc_pollset *pollset) {
@@ -1387,7 +1397,7 @@ static void pollset_reset(grpc_pollset *pollset) {
pollset->finish_shutdown_called = false;
pollset->kicked_without_pollers = false;
pollset->shutdown_done = NULL;
- GPR_ASSERT(pollset->polling_island == NULL);
+ GPR_ASSERT(pollset->po.pi == NULL);
}
static bool maybe_do_workqueue_work(grpc_exec_ctx *exec_ctx,
@@ -1427,7 +1437,7 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
GPR_TIMER_BEGIN("pollset_work_and_unlock", 0);
/* We need to get the epoll_fd to wait on. The epoll_fd is in inside the
- latest polling island pointed by pollset->polling_island.
+ latest polling island pointed by pollset->po.pi
Since epoll_fd is immutable, we can read it without obtaining the polling
island lock. There is however a possibility that the polling island (from
@@ -1436,36 +1446,36 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
right-away from epoll_wait() and pick up the latest polling_island the next
this function (i.e pollset_work_and_unlock()) is called */
- if (pollset->polling_island == NULL) {
- pollset->polling_island = polling_island_create(exec_ctx, NULL, error);
- if (pollset->polling_island == NULL) {
+ if (pollset->po.pi == NULL) {
+ pollset->po.pi = polling_island_create(exec_ctx, NULL, error);
+ if (pollset->po.pi == NULL) {
GPR_TIMER_END("pollset_work_and_unlock", 0);
return; /* Fatal error. We cannot continue */
}
- PI_ADD_REF(pollset->polling_island, "ps");
+ PI_ADD_REF(pollset->po.pi, "ps");
GRPC_POLLING_TRACE("pollset_work: pollset: %p created new pi: %p",
- (void *)pollset, (void *)pollset->polling_island);
+ (void *)pollset, (void *)pollset->po.pi);
}
- pi = polling_island_maybe_get_latest(pollset->polling_island);
+ pi = polling_island_maybe_get_latest(pollset->po.pi);
epoll_fd = pi->epoll_fd;
- /* Update the pollset->polling_island since the island being pointed by
- pollset->polling_island maybe older than the one pointed by pi) */
- if (pollset->polling_island != pi) {
+ /* Update the pollset->po.pi since the island being pointed by
+ pollset->po.pi maybe older than the one pointed by pi) */
+ if (pollset->po.pi != pi) {
/* Always do PI_ADD_REF before PI_UNREF because PI_UNREF may cause the
polling island to be deleted */
PI_ADD_REF(pi, "ps");
- PI_UNREF(exec_ctx, pollset->polling_island, "ps");
- pollset->polling_island = pi;
+ PI_UNREF(exec_ctx, pollset->po.pi, "ps");
+ pollset->po.pi = pi;
}
/* Add an extra ref so that the island does not get destroyed (which means
the epoll_fd won't be closed) while we are are doing an epoll_wait() on the
epoll_fd */
PI_ADD_REF(pi, "ps_work");
- gpr_mu_unlock(&pollset->mu);
+ gpr_mu_unlock(&pollset->po.mu);
/* If we get some workqueue work to do, it might end up completing an item on
the completion queue, so there's no need to poll... so we skip that and
@@ -1501,13 +1511,11 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
for (int i = 0; i < ep_rv; ++i) {
void *data_ptr = ep_ev[i].data.ptr;
- if (data_ptr == &grpc_global_wakeup_fd) {
- append_error(error,
- grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd),
+ if (data_ptr == &global_wakeup_fd) {
+ append_error(error, grpc_wakeup_fd_consume_wakeup(&global_wakeup_fd),
err_desc);
} else if (data_ptr == &pi->workqueue_wakeup_fd) {
- append_error(error,
- grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd),
+ append_error(error, grpc_wakeup_fd_consume_wakeup(&global_wakeup_fd),
err_desc);
maybe_do_workqueue_work(exec_ctx, pi);
} else if (data_ptr == &polling_island_wakeup_fd) {
@@ -1540,17 +1548,17 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(pi != NULL);
/* Before leaving, release the extra ref we added to the polling island. It
- is important to use "pi" here (i.e our old copy of pollset->polling_island
+ is important to use "pi" here (i.e our old copy of pollset->po.pi
that we got before releasing the polling island lock). This is because
- pollset->polling_island pointer might get udpated in other parts of the
+ pollset->po.pi pointer might get udpated in other parts of the
code when there is an island merge while we are doing epoll_wait() above */
PI_UNREF(exec_ctx, pi, "ps_work");
GPR_TIMER_END("pollset_work_and_unlock", 0);
}
-/* pollset->mu lock must be held by the caller before calling this.
- The function pollset_work() may temporarily release the lock (pollset->mu)
+/* pollset->po.mu lock must be held by the caller before calling this.
+ The function pollset_work() may temporarily release the lock (pollset->po.mu)
during the course of its execution but it will always re-acquire the lock and
ensure that it is held by the time the function returns */
static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
@@ -1620,7 +1628,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
&g_orig_sigmask, &error);
grpc_exec_ctx_flush(exec_ctx);
- gpr_mu_lock(&pollset->mu);
+ gpr_mu_lock(&pollset->po.mu);
/* Note: There is no need to reset worker.is_kicked to 0 since we are no
longer going to use this worker */
@@ -1640,9 +1648,9 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
GPR_TIMER_MARK("pollset_work.finish_shutdown_locked", 0);
finish_shutdown_locked(exec_ctx, pollset);
- gpr_mu_unlock(&pollset->mu);
+ gpr_mu_unlock(&pollset->po.mu);
grpc_exec_ctx_flush(exec_ctx);
- gpr_mu_lock(&pollset->mu);
+ gpr_mu_lock(&pollset->po.mu);
}
*worker_hdl = NULL;
@@ -1656,124 +1664,160 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
return error;
}
-static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_fd *fd) {
- GPR_TIMER_BEGIN("pollset_add_fd", 0);
+static void add_poll_object(grpc_exec_ctx *exec_ctx, poll_obj *bag,
+ poll_obj_type bag_type, poll_obj *item,
+ poll_obj_type item_type) {
+ GPR_TIMER_BEGIN("add_poll_object", 0);
- grpc_error *error = GRPC_ERROR_NONE;
-
- gpr_mu_lock(&pollset->mu);
- gpr_mu_lock(&fd->mu);
+#ifdef PO_DEBUG
+ GPR_ASSERT(item->obj_type == item_type);
+ GPR_ASSERT(bag->obj_type == bag_type);
+#endif
+ grpc_error *error = GRPC_ERROR_NONE;
polling_island *pi_new = NULL;
+ gpr_mu_lock(&bag->mu);
+ gpr_mu_lock(&item->mu);
+
retry:
- /* 1) If fd->polling_island and pollset->polling_island are both non-NULL and
- * equal, do nothing.
- * 2) If fd->polling_island and pollset->polling_island are both NULL, create
- * a new polling island (with a refcount of 2) and make the polling_island
- * fields in both fd and pollset to point to the new island
- * 3) If one of fd->polling_island or pollset->polling_island is NULL, update
- * the NULL polling_island field to point to the non-NULL polling_island
- * field (ensure that the refcount on the polling island is incremented by
- * 1 to account for the newly added reference)
- * 4) Finally, if fd->polling_island and pollset->polling_island are non-NULL
- * and different, merge both the polling islands and update the
- * polling_island fields in both fd and pollset to point to the merged
- * polling island.
+ /*
+ * 1) If item->pi and bag->pi are both non-NULL and equal, do nothing
+ * 2) If item->pi and bag->pi are both NULL, create a new polling island (with
+ * a refcount of 2) and point item->pi and bag->pi to the new island
+ * 3) If exactly one of item->pi or bag->pi is NULL, update it to point to
+ * the other's non-NULL pi
+ * 4) Finally if item->pi and bag-pi are non-NULL and not-equal, merge the
+ * polling islands and update item->pi and bag->pi to point to the new
+ * island
*/
- if (fd->orphaned) {
- gpr_mu_unlock(&fd->mu);
- gpr_mu_unlock(&pollset->mu);
- /* early out */
+ /* Early out if we are trying to add an 'fd' to a 'bag' but the fd is already
+ * orphaned */
+ if (item_type == POLL_OBJ_FD && (FD_FROM_PO(item))->orphaned) {
+ gpr_mu_unlock(&item->mu);
+ gpr_mu_unlock(&bag->mu);
return;
}
- if (fd->polling_island == pollset->polling_island) {
- pi_new = fd->polling_island;
+ if (item->pi == bag->pi) {
+ pi_new = item->pi;
if (pi_new == NULL) {
- /* Unlock before creating a new polling island: the polling island will
- create a workqueue which creates a file descriptor, and holding an fd
- lock here can eventually cause a loop to appear to TSAN (making it
- unhappy). We don't think it's a real loop (there's an epoch point where
- that loop possibility disappears), but the advantages of keeping TSAN
- happy outweigh any performance advantage we might have by keeping the
- lock held. */
- gpr_mu_unlock(&fd->mu);
- pi_new = polling_island_create(exec_ctx, fd, &error);
- gpr_mu_lock(&fd->mu);
- /* Need to reverify any assumptions made between the initial lock and
- getting to this branch: if they've changed, we need to throw away our
- work and figure things out again. */
- if (fd->polling_island != NULL) {
- GRPC_POLLING_TRACE(
- "pollset_add_fd: Raced creating new polling island. pi_new: %p "
- "(fd: %d, pollset: %p)",
- (void *)pi_new, fd->fd, (void *)pollset);
- PI_ADD_REF(pi_new, "dance_of_destruction");
- PI_UNREF(exec_ctx, pi_new, "dance_of_destruction");
- goto retry;
+ /* GPR_ASSERT(item->pi == bag->pi == NULL) */
+
+ /* If we are adding an fd to a bag (i.e pollset or pollset_set), then
+ * we need to do some extra work to make TSAN happy */
+ if (item_type == POLL_OBJ_FD) {
+ /* Unlock before creating a new polling island: the polling island will
+ create a workqueue which creates a file descriptor, and holding an fd
+ lock here can eventually cause a loop to appear to TSAN (making it
+ unhappy). We don't think it's a real loop (there's an epoch point
+ where that loop possibility disappears), but the advantages of
+ keeping TSAN happy outweigh any performance advantage we might have
+ by keeping the lock held. */
+ gpr_mu_unlock(&item->mu);
+ pi_new = polling_island_create(exec_ctx, FD_FROM_PO(item), &error);
+ gpr_mu_lock(&item->mu);
+
+ /* Need to reverify any assumptions made between the initial lock and
+ getting to this branch: if they've changed, we need to throw away our
+ work and figure things out again. */
+ if (item->pi != NULL) {
+ GRPC_POLLING_TRACE(
+ "add_poll_object: Raced creating new polling island. pi_new: %p "
+ "(fd: %d, %s: %p)",
+ (void *)pi_new, FD_FROM_PO(item)->fd, poll_obj_string(bag_type),
+ (void *)bag);
+ /* No need to lock 'pi_new' here since this is a new polling island
+ * and no one has a reference to it yet */
+ polling_island_remove_all_fds_locked(pi_new, true, &error);
+
+ /* Ref and unref so that the polling island gets deleted during unref
+ */
+ PI_ADD_REF(pi_new, "dance_of_destruction");
+ PI_UNREF(exec_ctx, pi_new, "dance_of_destruction");
+ goto retry;
+ }
} else {
- GRPC_POLLING_TRACE(
- "pollset_add_fd: Created new polling island. pi_new: %p (fd: %d, "
- "pollset: %p)",
- (void *)pi_new, fd->fd, (void *)pollset);
+ pi_new = polling_island_create(exec_ctx, NULL, &error);
}
+
+ GRPC_POLLING_TRACE(
+ "add_poll_object: Created new polling island. pi_new: %p (%s: %p, "
+ "%s: %p)",
+ (void *)pi_new, poll_obj_string(item_type), (void *)item,
+ poll_obj_string(bag_type), (void *)bag);
+ } else {
+ GRPC_POLLING_TRACE(
+ "add_poll_object: Same polling island. pi: %p (%s, %s)",
+ (void *)pi_new, poll_obj_string(item_type),
+ poll_obj_string(bag_type));
+ }
+ } else if (item->pi == NULL) {
+ /* GPR_ASSERT(bag->pi != NULL) */
+ /* Make pi_new point to latest pi*/
+ pi_new = polling_island_lock(bag->pi);
+
+ if (item_type == POLL_OBJ_FD) {
+ grpc_fd *fd = FD_FROM_PO(item);
+ polling_island_add_fds_locked(pi_new, &fd, 1, true, &error);
}
- } else if (fd->polling_island == NULL) {
- pi_new = polling_island_lock(pollset->polling_island);
- polling_island_add_fds_locked(pi_new, &fd, 1, true, &error);
- gpr_mu_unlock(&pi_new->mu);
+ gpr_mu_unlock(&pi_new->mu);
GRPC_POLLING_TRACE(
- "pollset_add_fd: fd->pi was NULL. pi_new: %p (fd: %d, pollset: %p, "
- "pollset->pi: %p)",
- (void *)pi_new, fd->fd, (void *)pollset,
- (void *)pollset->polling_island);
- } else if (pollset->polling_island == NULL) {
- pi_new = polling_island_lock(fd->polling_island);
+ "add_poll_obj: item->pi was NULL. pi_new: %p (item(%s): %p, "
+ "bag(%s): %p)",
+ (void *)pi_new, poll_obj_string(item_type), (void *)item,
+ poll_obj_string(bag_type), (void *)bag);
+ } else if (bag->pi == NULL) {
+ /* GPR_ASSERT(item->pi != NULL) */
+ /* Make pi_new to point to latest pi */
+ pi_new = polling_island_lock(item->pi);
gpr_mu_unlock(&pi_new->mu);
-
GRPC_POLLING_TRACE(
- "pollset_add_fd: pollset->pi was NULL. pi_new: %p (fd: %d, pollset: "
- "%p, fd->pi: %p",
- (void *)pi_new, fd->fd, (void *)pollset, (void *)fd->polling_island);
+ "add_poll_obj: bag->pi was NULL. pi_new: %p (item(%s): %p, "
+ "bag(%s): %p)",
+ (void *)pi_new, poll_obj_string(item_type), (void *)item,
+ poll_obj_string(bag_type), (void *)bag);
} else {
- pi_new = polling_island_merge(fd->polling_island, pollset->polling_island,
- &error);
+ pi_new = polling_island_merge(item->pi, bag->pi, &error);
GRPC_POLLING_TRACE(
- "pollset_add_fd: polling islands merged. pi_new: %p (fd: %d, pollset: "
- "%p, fd->pi: %p, pollset->pi: %p)",
- (void *)pi_new, fd->fd, (void *)pollset, (void *)fd->polling_island,
- (void *)pollset->polling_island);
+ "add_poll_obj: polling islands merged. pi_new: %p (item(%s): %p, "
+ "bag(%s): %p)",
+ (void *)pi_new, poll_obj_string(item_type), (void *)item,
+ poll_obj_string(bag_type), (void *)bag);
}
- /* At this point, pi_new is the polling island that both fd->polling_island
- and pollset->polling_island must be pointing to */
+ /* At this point, pi_new is the polling island that both item->pi and bag->pi
+ MUST be pointing to */
- if (fd->polling_island != pi_new) {
- PI_ADD_REF(pi_new, "fd");
- if (fd->polling_island != NULL) {
- PI_UNREF(exec_ctx, fd->polling_island, "fd");
+ if (item->pi != pi_new) {
+ PI_ADD_REF(pi_new, poll_obj_string(item_type));
+ if (item->pi != NULL) {
+ PI_UNREF(exec_ctx, item->pi, poll_obj_string(item_type));
}
- fd->polling_island = pi_new;
+ item->pi = pi_new;
}
- if (pollset->polling_island != pi_new) {
- PI_ADD_REF(pi_new, "ps");
- if (pollset->polling_island != NULL) {
- PI_UNREF(exec_ctx, pollset->polling_island, "ps");
+ if (bag->pi != pi_new) {
+ PI_ADD_REF(pi_new, poll_obj_string(bag_type));
+ if (bag->pi != NULL) {
+ PI_UNREF(exec_ctx, bag->pi, poll_obj_string(bag_type));
}
- pollset->polling_island = pi_new;
+ bag->pi = pi_new;
}
- gpr_mu_unlock(&fd->mu);
- gpr_mu_unlock(&pollset->mu);
+ gpr_mu_unlock(&item->mu);
+ gpr_mu_unlock(&bag->mu);
- GRPC_LOG_IF_ERROR("pollset_add_fd", error);
+ GRPC_LOG_IF_ERROR("add_poll_object", error);
+ GPR_TIMER_END("add_poll_object", 0);
+}
- GPR_TIMER_END("pollset_add_fd", 0);
+static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_fd *fd) {
+ add_poll_object(exec_ctx, &pollset->po, POLL_OBJ_POLLSET, &fd->po,
+ POLL_OBJ_FD);
}
/*******************************************************************************
@@ -1781,142 +1825,60 @@ retry:
*/
static grpc_pollset_set *pollset_set_create(void) {
- grpc_pollset_set *pollset_set = gpr_malloc(sizeof(*pollset_set));
- memset(pollset_set, 0, sizeof(*pollset_set));
- gpr_mu_init(&pollset_set->mu);
- return pollset_set;
+ grpc_pollset_set *pss = gpr_malloc(sizeof(*pss));
+ gpr_mu_init(&pss->po.mu);
+ pss->po.pi = NULL;
+#ifdef PO_DEBUG
+ pss->po.obj_type = POLL_OBJ_POLLSET_SET;
+#endif
+ return pss;
}
-static void pollset_set_destroy(grpc_pollset_set *pollset_set) {
- size_t i;
- gpr_mu_destroy(&pollset_set->mu);
- for (i = 0; i < pollset_set->fd_count; i++) {
- GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set");
+static void pollset_set_destroy(grpc_pollset_set *pss) {
+ gpr_mu_destroy(&pss->po.mu);
+
+ if (pss->po.pi != NULL) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ PI_UNREF(&exec_ctx, pss->po.pi, "pss_destroy");
+ grpc_exec_ctx_finish(&exec_ctx);
}
- gpr_free(pollset_set->pollsets);
- gpr_free(pollset_set->pollset_sets);
- gpr_free(pollset_set->fds);
- gpr_free(pollset_set);
+
+ gpr_free(pss);
}
-static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pollset_set, grpc_fd *fd) {
- size_t i;
- gpr_mu_lock(&pollset_set->mu);
- if (pollset_set->fd_count == pollset_set->fd_capacity) {
- pollset_set->fd_capacity = GPR_MAX(8, 2 * pollset_set->fd_capacity);
- pollset_set->fds = gpr_realloc(
- pollset_set->fds, pollset_set->fd_capacity * sizeof(*pollset_set->fds));
- }
- GRPC_FD_REF(fd, "pollset_set");
- pollset_set->fds[pollset_set->fd_count++] = fd;
- for (i = 0; i < pollset_set->pollset_count; i++) {
- pollset_add_fd(exec_ctx, pollset_set->pollsets[i], fd);
- }
- for (i = 0; i < pollset_set->pollset_set_count; i++) {
- pollset_set_add_fd(exec_ctx, pollset_set->pollset_sets[i], fd);
- }
- gpr_mu_unlock(&pollset_set->mu);
+static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
+ grpc_fd *fd) {
+ add_poll_object(exec_ctx, &pss->po, POLL_OBJ_POLLSET_SET, &fd->po,
+ POLL_OBJ_FD);
}
-static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pollset_set, grpc_fd *fd) {
- size_t i;
- gpr_mu_lock(&pollset_set->mu);
- for (i = 0; i < pollset_set->fd_count; i++) {
- if (pollset_set->fds[i] == fd) {
- pollset_set->fd_count--;
- GPR_SWAP(grpc_fd *, pollset_set->fds[i],
- pollset_set->fds[pollset_set->fd_count]);
- GRPC_FD_UNREF(fd, "pollset_set");
- break;
- }
- }
- for (i = 0; i < pollset_set->pollset_set_count; i++) {
- pollset_set_del_fd(exec_ctx, pollset_set->pollset_sets[i], fd);
- }
- gpr_mu_unlock(&pollset_set->mu);
+static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
+ grpc_fd *fd) {
+ /* Nothing to do */
}
static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pollset_set,
- grpc_pollset *pollset) {
- size_t i, j;
- gpr_mu_lock(&pollset_set->mu);
- if (pollset_set->pollset_count == pollset_set->pollset_capacity) {
- pollset_set->pollset_capacity =
- GPR_MAX(8, 2 * pollset_set->pollset_capacity);
- pollset_set->pollsets =
- gpr_realloc(pollset_set->pollsets, pollset_set->pollset_capacity *
- sizeof(*pollset_set->pollsets));
- }
- pollset_set->pollsets[pollset_set->pollset_count++] = pollset;
- for (i = 0, j = 0; i < pollset_set->fd_count; i++) {
- if (fd_is_orphaned(pollset_set->fds[i])) {
- GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set");
- } else {
- pollset_add_fd(exec_ctx, pollset, pollset_set->fds[i]);
- pollset_set->fds[j++] = pollset_set->fds[i];
- }
- }
- pollset_set->fd_count = j;
- gpr_mu_unlock(&pollset_set->mu);
+ grpc_pollset_set *pss, grpc_pollset *ps) {
+ add_poll_object(exec_ctx, &pss->po, POLL_OBJ_POLLSET_SET, &ps->po,
+ POLL_OBJ_POLLSET);
}
static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pollset_set,
- grpc_pollset *pollset) {
- size_t i;
- gpr_mu_lock(&pollset_set->mu);
- for (i = 0; i < pollset_set->pollset_count; i++) {
- if (pollset_set->pollsets[i] == pollset) {
- pollset_set->pollset_count--;
- GPR_SWAP(grpc_pollset *, pollset_set->pollsets[i],
- pollset_set->pollsets[pollset_set->pollset_count]);
- break;
- }
- }
- gpr_mu_unlock(&pollset_set->mu);
+ grpc_pollset_set *pss, grpc_pollset *ps) {
+ /* Nothing to do */
}
static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *bag,
grpc_pollset_set *item) {
- size_t i, j;
- gpr_mu_lock(&bag->mu);
- if (bag->pollset_set_count == bag->pollset_set_capacity) {
- bag->pollset_set_capacity = GPR_MAX(8, 2 * bag->pollset_set_capacity);
- bag->pollset_sets =
- gpr_realloc(bag->pollset_sets,
- bag->pollset_set_capacity * sizeof(*bag->pollset_sets));
- }
- bag->pollset_sets[bag->pollset_set_count++] = item;
- for (i = 0, j = 0; i < bag->fd_count; i++) {
- if (fd_is_orphaned(bag->fds[i])) {
- GRPC_FD_UNREF(bag->fds[i], "pollset_set");
- } else {
- pollset_set_add_fd(exec_ctx, item, bag->fds[i]);
- bag->fds[j++] = bag->fds[i];
- }
- }
- bag->fd_count = j;
- gpr_mu_unlock(&bag->mu);
+ add_poll_object(exec_ctx, &bag->po, POLL_OBJ_POLLSET_SET, &item->po,
+ POLL_OBJ_POLLSET_SET);
}
static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *bag,
grpc_pollset_set *item) {
- size_t i;
- gpr_mu_lock(&bag->mu);
- for (i = 0; i < bag->pollset_set_count; i++) {
- if (bag->pollset_sets[i] == item) {
- bag->pollset_set_count--;
- GPR_SWAP(grpc_pollset_set *, bag->pollset_sets[i],
- bag->pollset_sets[bag->pollset_set_count]);
- break;
- }
- }
- gpr_mu_unlock(&bag->mu);
+ /* Nothing to do */
}
/* Test helper functions
@@ -1924,9 +1886,9 @@ static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
void *grpc_fd_get_polling_island(grpc_fd *fd) {
polling_island *pi;
- gpr_mu_lock(&fd->mu);
- pi = fd->polling_island;
- gpr_mu_unlock(&fd->mu);
+ gpr_mu_lock(&fd->po.mu);
+ pi = fd->po.pi;
+ gpr_mu_unlock(&fd->po.mu);
return pi;
}
@@ -1934,9 +1896,9 @@ void *grpc_fd_get_polling_island(grpc_fd *fd) {
void *grpc_pollset_get_polling_island(grpc_pollset *ps) {
polling_island *pi;
- gpr_mu_lock(&ps->mu);
- pi = ps->polling_island;
- gpr_mu_unlock(&ps->mu);
+ gpr_mu_lock(&ps->po.mu);
+ pi = ps->po.pi;
+ gpr_mu_unlock(&ps->po.mu);
return pi;
}
diff --git a/src/core/lib/iomgr/ev_poll_and_epoll_posix.c b/src/core/lib/iomgr/ev_poll_and_epoll_posix.c
deleted file mode 100644
index bf51404203..0000000000
--- a/src/core/lib/iomgr/ev_poll_and_epoll_posix.c
+++ /dev/null
@@ -1,2076 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-/* This file will be removed shortly: it's here to keep refactoring
- * steps simple and auditable.
- * It's the combination of the old files:
- * - fd_posix.{h,c}
- * - pollset_posix.{h,c}
- * - pullset_multipoller_with_{poll,epoll}.{h,c}
- * The new version will be split into:
- * - ev_poll_posix.{h,c}
- * - ev_epoll_posix.{h,c}
- */
-
-#include "src/core/lib/iomgr/port.h"
-
-#ifdef GRPC_POSIX_SOCKET
-
-#include "src/core/lib/iomgr/ev_poll_and_epoll_posix.h"
-
-#include <assert.h>
-#include <errno.h>
-#include <poll.h>
-#include <string.h>
-#include <sys/socket.h>
-#include <unistd.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/string_util.h>
-#include <grpc/support/tls.h>
-#include <grpc/support/useful.h>
-
-#include "src/core/lib/iomgr/iomgr_internal.h"
-#include "src/core/lib/iomgr/wakeup_fd_posix.h"
-#include "src/core/lib/profiling/timers.h"
-#include "src/core/lib/support/block_annotate.h"
-
-/*******************************************************************************
- * FD declarations
- */
-
-typedef struct grpc_fd_watcher {
- struct grpc_fd_watcher *next;
- struct grpc_fd_watcher *prev;
- grpc_pollset *pollset;
- grpc_pollset_worker *worker;
- grpc_fd *fd;
-} grpc_fd_watcher;
-
-struct grpc_fd {
- int fd;
- /* refst format:
- bit0: 1=active/0=orphaned
- bit1-n: refcount
- meaning that mostly we ref by two to avoid altering the orphaned bit,
- and just unref by 1 when we're ready to flag the object as orphaned */
- gpr_atm refst;
-
- gpr_mu mu;
- int shutdown;
- int closed;
- int released;
-
- /* The watcher list.
-
- The following watcher related fields are protected by watcher_mu.
-
- An fd_watcher is an ephemeral object created when an fd wants to
- begin polling, and destroyed after the poll.
-
- It denotes the fd's interest in whether to read poll or write poll
- or both or neither on this fd.
-
- If a watcher is asked to poll for reads or writes, the read_watcher
- or write_watcher fields are set respectively. A watcher may be asked
- to poll for both, in which case both fields will be set.
-
- read_watcher and write_watcher may be NULL if no watcher has been
- asked to poll for reads or writes.
-
- If an fd_watcher is not asked to poll for reads or writes, it's added
- to a linked list of inactive watchers, rooted at inactive_watcher_root.
- If at a later time there becomes need of a poller to poll, one of
- the inactive pollers may be kicked out of their poll loops to take
- that responsibility. */
- grpc_fd_watcher inactive_watcher_root;
- grpc_fd_watcher *read_watcher;
- grpc_fd_watcher *write_watcher;
-
- grpc_closure *read_closure;
- grpc_closure *write_closure;
-
- struct grpc_fd *freelist_next;
-
- grpc_closure *on_done_closure;
-
- grpc_iomgr_object iomgr_object;
-
- /* The pollset that last noticed and notified that the fd is readable */
- grpc_pollset *read_notifier_pollset;
-};
-
-/* Begin polling on an fd.
- Registers that the given pollset is interested in this fd - so that if read
- or writability interest changes, the pollset can be kicked to pick up that
- new interest.
- Return value is:
- (fd_needs_read? read_mask : 0) | (fd_needs_write? write_mask : 0)
- i.e. a combination of read_mask and write_mask determined by the fd's current
- interest in said events.
- Polling strategies that do not need to alter their behavior depending on the
- fd's current interest (such as epoll) do not need to call this function.
- MUST NOT be called with a pollset lock taken */
-static uint32_t fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
- grpc_pollset_worker *worker, uint32_t read_mask,
- uint32_t write_mask, grpc_fd_watcher *rec);
-/* Complete polling previously started with fd_begin_poll
- MUST NOT be called with a pollset lock taken
- if got_read or got_write are 1, also does the become_{readable,writable} as
- appropriate. */
-static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *rec,
- int got_read, int got_write,
- grpc_pollset *read_notifier_pollset);
-
-/* Return 1 if this fd is orphaned, 0 otherwise */
-static bool fd_is_orphaned(grpc_fd *fd);
-
-/* Reference counting for fds */
-/*#define GRPC_FD_REF_COUNT_DEBUG*/
-#ifdef GRPC_FD_REF_COUNT_DEBUG
-static void fd_ref(grpc_fd *fd, const char *reason, const char *file, int line);
-static void fd_unref(grpc_fd *fd, const char *reason, const char *file,
- int line);
-#define GRPC_FD_REF(fd, reason) fd_ref(fd, reason, __FILE__, __LINE__)
-#define GRPC_FD_UNREF(fd, reason) fd_unref(fd, reason, __FILE__, __LINE__)
-#else
-static void fd_ref(grpc_fd *fd);
-static void fd_unref(grpc_fd *fd);
-#define GRPC_FD_REF(fd, reason) fd_ref(fd)
-#define GRPC_FD_UNREF(fd, reason) fd_unref(fd)
-#endif
-
-static void fd_global_init(void);
-static void fd_global_shutdown(void);
-
-#define CLOSURE_NOT_READY ((grpc_closure *)0)
-#define CLOSURE_READY ((grpc_closure *)1)
-
-/*******************************************************************************
- * pollset declarations
- */
-
-typedef struct grpc_pollset_vtable grpc_pollset_vtable;
-
-typedef struct grpc_cached_wakeup_fd {
- grpc_wakeup_fd fd;
- struct grpc_cached_wakeup_fd *next;
-} grpc_cached_wakeup_fd;
-
-struct grpc_pollset_worker {
- grpc_cached_wakeup_fd *wakeup_fd;
- int reevaluate_polling_on_wakeup;
- int kicked_specifically;
- struct grpc_pollset_worker *next;
- struct grpc_pollset_worker *prev;
-};
-
-struct grpc_pollset {
- /* pollsets under posix can mutate representation as fds are added and
- removed.
- For example, we may choose a poll() based implementation on linux for
- few fds, and an epoll() based implementation for many fds */
- const grpc_pollset_vtable *vtable;
- gpr_mu mu;
- grpc_pollset_worker root_worker;
- int in_flight_cbs;
- int shutting_down;
- int called_shutdown;
- int kicked_without_pollers;
- grpc_closure *shutdown_done;
- grpc_closure_list idle_jobs;
- union {
- int fd;
- void *ptr;
- } data;
- /* Local cache of eventfds for workers */
- grpc_cached_wakeup_fd *local_wakeup_cache;
-};
-
-struct grpc_pollset_vtable {
- void (*add_fd)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- struct grpc_fd *fd, int and_unlock_pollset);
- grpc_error *(*maybe_work_and_unlock)(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset,
- grpc_pollset_worker *worker,
- gpr_timespec deadline, gpr_timespec now);
- void (*finish_shutdown)(grpc_pollset *pollset);
- void (*destroy)(grpc_pollset *pollset);
-};
-
-/* Add an fd to a pollset */
-static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- struct grpc_fd *fd);
-
-static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pollset_set, grpc_fd *fd);
-
-/* Convert a timespec to milliseconds:
- - very small or negative poll times are clamped to zero to do a
- non-blocking poll (which becomes spin polling)
- - other small values are rounded up to one millisecond
- - longer than a millisecond polls are rounded up to the next nearest
- millisecond to avoid spinning
- - infinite timeouts are converted to -1 */
-static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
- gpr_timespec now);
-
-/* Allow kick to wakeup the currently polling worker */
-#define GRPC_POLLSET_CAN_KICK_SELF 1
-/* Force the wakee to repoll when awoken */
-#define GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP 2
-/* As per pollset_kick, with an extended set of flags (defined above)
- -- mostly for fd_posix's use. */
-static grpc_error *pollset_kick_ext(grpc_pollset *p,
- grpc_pollset_worker *specific_worker,
- uint32_t flags) GRPC_MUST_USE_RESULT;
-
-/* turn a pollset into a multipoller: platform specific */
-typedef void (*platform_become_multipoller_type)(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset,
- struct grpc_fd **fds,
- size_t fd_count);
-static platform_become_multipoller_type platform_become_multipoller;
-
-/* Return 1 if the pollset has active threads in pollset_work (pollset must
- * be locked) */
-static int pollset_has_workers(grpc_pollset *pollset);
-
-static void remove_fd_from_all_epoll_sets(int fd);
-
-/*******************************************************************************
- * pollset_set definitions
- */
-
-struct grpc_pollset_set {
- gpr_mu mu;
-
- size_t pollset_count;
- size_t pollset_capacity;
- grpc_pollset **pollsets;
-
- size_t pollset_set_count;
- size_t pollset_set_capacity;
- struct grpc_pollset_set **pollset_sets;
-
- size_t fd_count;
- size_t fd_capacity;
- grpc_fd **fds;
-};
-
-/*******************************************************************************
- * fd_posix.c
- */
-
-/* We need to keep a freelist not because of any concerns of malloc performance
- * but instead so that implementations with multiple threads in (for example)
- * epoll_wait deal with the race between pollset removal and incoming poll
- * notifications.
- *
- * The problem is that the poller ultimately holds a reference to this
- * object, so it is very difficult to know when is safe to free it, at least
- * without some expensive synchronization.
- *
- * If we keep the object freelisted, in the worst case losing this race just
- * becomes a spurious read notification on a reused fd.
- */
-/* TODO(klempner): We could use some form of polling generation count to know
- * when these are safe to free. */
-/* TODO(klempner): Consider disabling freelisting if we don't have multiple
- * threads in poll on the same fd */
-/* TODO(klempner): Batch these allocations to reduce fragmentation */
-static grpc_fd *fd_freelist = NULL;
-static gpr_mu fd_freelist_mu;
-
-static void freelist_fd(grpc_fd *fd) {
- gpr_mu_lock(&fd_freelist_mu);
- fd->freelist_next = fd_freelist;
- fd_freelist = fd;
- grpc_iomgr_unregister_object(&fd->iomgr_object);
- gpr_mu_unlock(&fd_freelist_mu);
-}
-
-static grpc_fd *alloc_fd(int fd) {
- grpc_fd *r = NULL;
- gpr_mu_lock(&fd_freelist_mu);
- if (fd_freelist != NULL) {
- r = fd_freelist;
- fd_freelist = fd_freelist->freelist_next;
- }
- gpr_mu_unlock(&fd_freelist_mu);
- if (r == NULL) {
- r = gpr_malloc(sizeof(grpc_fd));
- gpr_mu_init(&r->mu);
- }
-
- gpr_mu_lock(&r->mu);
- gpr_atm_rel_store(&r->refst, 1);
- r->shutdown = 0;
- r->read_closure = CLOSURE_NOT_READY;
- r->write_closure = CLOSURE_NOT_READY;
- r->fd = fd;
- r->inactive_watcher_root.next = r->inactive_watcher_root.prev =
- &r->inactive_watcher_root;
- r->freelist_next = NULL;
- r->read_watcher = r->write_watcher = NULL;
- r->on_done_closure = NULL;
- r->closed = 0;
- r->released = 0;
- r->read_notifier_pollset = NULL;
- gpr_mu_unlock(&r->mu);
- return r;
-}
-
-static void destroy(grpc_fd *fd) {
- gpr_mu_destroy(&fd->mu);
- gpr_free(fd);
-}
-
-#ifdef GRPC_FD_REF_COUNT_DEBUG
-#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
-#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
-static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
- int line) {
- gpr_log(GPR_DEBUG, "FD %d %p ref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n,
- gpr_atm_no_barrier_load(&fd->refst),
- gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
-#else
-#define REF_BY(fd, n, reason) ref_by(fd, n)
-#define UNREF_BY(fd, n, reason) unref_by(fd, n)
-static void ref_by(grpc_fd *fd, int n) {
-#endif
- GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
-}
-
-#ifdef GRPC_FD_REF_COUNT_DEBUG
-static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file,
- int line) {
- gpr_atm old;
- gpr_log(GPR_DEBUG, "FD %d %p unref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n,
- gpr_atm_no_barrier_load(&fd->refst),
- gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
-#else
-static void unref_by(grpc_fd *fd, int n) {
- gpr_atm old;
-#endif
- old = gpr_atm_full_fetch_add(&fd->refst, -n);
- if (old == n) {
- freelist_fd(fd);
- } else {
- GPR_ASSERT(old > n);
- }
-}
-
-static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
-
-static void fd_global_shutdown(void) {
- gpr_mu_lock(&fd_freelist_mu);
- gpr_mu_unlock(&fd_freelist_mu);
- while (fd_freelist != NULL) {
- grpc_fd *fd = fd_freelist;
- fd_freelist = fd_freelist->freelist_next;
- destroy(fd);
- }
- gpr_mu_destroy(&fd_freelist_mu);
-}
-
-static grpc_fd *fd_create(int fd, const char *name) {
- grpc_fd *r = alloc_fd(fd);
- char *name2;
- gpr_asprintf(&name2, "%s fd=%d", name, fd);
- grpc_iomgr_register_object(&r->iomgr_object, name2);
- gpr_free(name2);
-#ifdef GRPC_FD_REF_COUNT_DEBUG
- gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, r, name);
-#endif
- return r;
-}
-
-static bool fd_is_orphaned(grpc_fd *fd) {
- return (gpr_atm_acq_load(&fd->refst) & 1) == 0;
-}
-
-static grpc_error *pollset_kick_locked(grpc_fd_watcher *watcher) {
- gpr_mu_lock(&watcher->pollset->mu);
- GPR_ASSERT(watcher->worker);
- grpc_error *err = pollset_kick_ext(watcher->pollset, watcher->worker,
- GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP);
- gpr_mu_unlock(&watcher->pollset->mu);
- return err;
-}
-
-static void maybe_wake_one_watcher_locked(grpc_fd *fd) {
- if (fd->inactive_watcher_root.next != &fd->inactive_watcher_root) {
- pollset_kick_locked(fd->inactive_watcher_root.next);
- } else if (fd->read_watcher) {
- pollset_kick_locked(fd->read_watcher);
- } else if (fd->write_watcher) {
- pollset_kick_locked(fd->write_watcher);
- }
-}
-
-static void wake_all_watchers_locked(grpc_fd *fd) {
- grpc_fd_watcher *watcher;
- for (watcher = fd->inactive_watcher_root.next;
- watcher != &fd->inactive_watcher_root; watcher = watcher->next) {
- pollset_kick_locked(watcher);
- }
- if (fd->read_watcher) {
- pollset_kick_locked(fd->read_watcher);
- }
- if (fd->write_watcher && fd->write_watcher != fd->read_watcher) {
- pollset_kick_locked(fd->write_watcher);
- }
-}
-
-static int has_watchers(grpc_fd *fd) {
- return fd->read_watcher != NULL || fd->write_watcher != NULL ||
- fd->inactive_watcher_root.next != &fd->inactive_watcher_root;
-}
-
-static void close_fd_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
- fd->closed = 1;
- if (!fd->released) {
- close(fd->fd);
- } else {
- remove_fd_from_all_epoll_sets(fd->fd);
- }
- grpc_exec_ctx_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_NONE, NULL);
-}
-
-static int fd_wrapped_fd(grpc_fd *fd) {
- if (fd->released || fd->closed) {
- return -1;
- } else {
- return fd->fd;
- }
-}
-
-static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *on_done, int *release_fd,
- const char *reason) {
- fd->on_done_closure = on_done;
- fd->released = release_fd != NULL;
- if (!fd->released) {
- shutdown(fd->fd, SHUT_RDWR);
- } else {
- *release_fd = fd->fd;
- }
- gpr_mu_lock(&fd->mu);
- REF_BY(fd, 1, reason); /* remove active status, but keep referenced */
- if (!has_watchers(fd)) {
- close_fd_locked(exec_ctx, fd);
- } else {
- wake_all_watchers_locked(fd);
- }
- gpr_mu_unlock(&fd->mu);
- UNREF_BY(fd, 2, reason); /* drop the reference */
-}
-
-/* increment refcount by two to avoid changing the orphan bit */
-#ifdef GRPC_FD_REF_COUNT_DEBUG
-static void fd_ref(grpc_fd *fd, const char *reason, const char *file,
- int line) {
- ref_by(fd, 2, reason, file, line);
-}
-
-static void fd_unref(grpc_fd *fd, const char *reason, const char *file,
- int line) {
- unref_by(fd, 2, reason, file, line);
-}
-#else
-static void fd_ref(grpc_fd *fd) { ref_by(fd, 2); }
-
-static void fd_unref(grpc_fd *fd) { unref_by(fd, 2); }
-#endif
-
-static grpc_error *fd_shutdown_error(bool shutdown) {
- if (!shutdown) {
- return GRPC_ERROR_NONE;
- } else {
- return GRPC_ERROR_CREATE("FD shutdown");
- }
-}
-
-static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure **st, grpc_closure *closure) {
- if (fd->shutdown) {
- grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_CREATE("FD shutdown"),
- NULL);
- } else if (*st == CLOSURE_NOT_READY) {
- /* not ready ==> switch to a waiting state by setting the closure */
- *st = closure;
- } else if (*st == CLOSURE_READY) {
- /* already ready ==> queue the closure to run immediately */
- *st = CLOSURE_NOT_READY;
- grpc_exec_ctx_sched(exec_ctx, closure, fd_shutdown_error(fd->shutdown),
- NULL);
- maybe_wake_one_watcher_locked(fd);
- } else {
- /* upcallptr was set to a different closure. This is an error! */
- gpr_log(GPR_ERROR,
- "User called a notify_on function with a previous callback still "
- "pending");
- abort();
- }
-}
-
-/* returns 1 if state becomes not ready */
-static int set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure **st) {
- if (*st == CLOSURE_READY) {
- /* duplicate ready ==> ignore */
- return 0;
- } else if (*st == CLOSURE_NOT_READY) {
- /* not ready, and not waiting ==> flag ready */
- *st = CLOSURE_READY;
- return 0;
- } else {
- /* waiting ==> queue closure */
- grpc_exec_ctx_sched(exec_ctx, *st, fd_shutdown_error(fd->shutdown), NULL);
- *st = CLOSURE_NOT_READY;
- return 1;
- }
-}
-
-static void set_read_notifier_pollset_locked(
- grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_pollset *read_notifier_pollset) {
- fd->read_notifier_pollset = read_notifier_pollset;
-}
-
-static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
- gpr_mu_lock(&fd->mu);
- /* only shutdown once */
- if (!fd->shutdown) {
- fd->shutdown = 1;
- /* signal read/write closed to OS so that future operations fail */
- shutdown(fd->fd, SHUT_RDWR);
- set_ready_locked(exec_ctx, fd, &fd->read_closure);
- set_ready_locked(exec_ctx, fd, &fd->write_closure);
- }
- gpr_mu_unlock(&fd->mu);
-}
-
-static bool fd_is_shutdown(grpc_fd *fd) {
- gpr_mu_lock(&fd->mu);
- bool r = fd->shutdown;
- gpr_mu_unlock(&fd->mu);
- return r;
-}
-
-static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *closure) {
- gpr_mu_lock(&fd->mu);
- notify_on_locked(exec_ctx, fd, &fd->read_closure, closure);
- gpr_mu_unlock(&fd->mu);
-}
-
-static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *closure) {
- gpr_mu_lock(&fd->mu);
- notify_on_locked(exec_ctx, fd, &fd->write_closure, closure);
- gpr_mu_unlock(&fd->mu);
-}
-
-/* Return the read-notifier pollset */
-static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
- grpc_fd *fd) {
- grpc_pollset *notifier = NULL;
-
- gpr_mu_lock(&fd->mu);
- notifier = fd->read_notifier_pollset;
- gpr_mu_unlock(&fd->mu);
-
- return notifier;
-}
-
-static uint32_t fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
- grpc_pollset_worker *worker, uint32_t read_mask,
- uint32_t write_mask, grpc_fd_watcher *watcher) {
- uint32_t mask = 0;
- grpc_closure *cur;
- int requested;
- /* keep track of pollers that have requested our events, in case they change
- */
- GRPC_FD_REF(fd, "poll");
-
- gpr_mu_lock(&fd->mu);
-
- /* if we are shutdown, then don't add to the watcher set */
- if (fd->shutdown) {
- watcher->fd = NULL;
- watcher->pollset = NULL;
- watcher->worker = NULL;
- gpr_mu_unlock(&fd->mu);
- GRPC_FD_UNREF(fd, "poll");
- return 0;
- }
-
- /* if there is nobody polling for read, but we need to, then start doing so */
- cur = fd->read_closure;
- requested = cur != CLOSURE_READY;
- if (read_mask && fd->read_watcher == NULL && requested) {
- fd->read_watcher = watcher;
- mask |= read_mask;
- }
- /* if there is nobody polling for write, but we need to, then start doing so
- */
- cur = fd->write_closure;
- requested = cur != CLOSURE_READY;
- if (write_mask && fd->write_watcher == NULL && requested) {
- fd->write_watcher = watcher;
- mask |= write_mask;
- }
- /* if not polling, remember this watcher in case we need someone to later */
- if (mask == 0 && worker != NULL) {
- watcher->next = &fd->inactive_watcher_root;
- watcher->prev = watcher->next->prev;
- watcher->next->prev = watcher->prev->next = watcher;
- }
- watcher->pollset = pollset;
- watcher->worker = worker;
- watcher->fd = fd;
- gpr_mu_unlock(&fd->mu);
-
- return mask;
-}
-
-static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher,
- int got_read, int got_write,
- grpc_pollset *read_notifier_pollset) {
- int was_polling = 0;
- int kick = 0;
- grpc_fd *fd = watcher->fd;
-
- if (fd == NULL) {
- return;
- }
-
- gpr_mu_lock(&fd->mu);
-
- if (watcher == fd->read_watcher) {
- /* remove read watcher, kick if we still need a read */
- was_polling = 1;
- if (!got_read) {
- kick = 1;
- }
- fd->read_watcher = NULL;
- }
- if (watcher == fd->write_watcher) {
- /* remove write watcher, kick if we still need a write */
- was_polling = 1;
- if (!got_write) {
- kick = 1;
- }
- fd->write_watcher = NULL;
- }
- if (!was_polling && watcher->worker != NULL) {
- /* remove from inactive list */
- watcher->next->prev = watcher->prev;
- watcher->prev->next = watcher->next;
- }
- if (got_read) {
- if (set_ready_locked(exec_ctx, fd, &fd->read_closure)) {
- kick = 1;
- }
-
- if (read_notifier_pollset != NULL) {
- set_read_notifier_pollset_locked(exec_ctx, fd, read_notifier_pollset);
- }
- }
- if (got_write) {
- if (set_ready_locked(exec_ctx, fd, &fd->write_closure)) {
- kick = 1;
- }
- }
- if (kick) {
- maybe_wake_one_watcher_locked(fd);
- }
- if (fd_is_orphaned(fd) && !has_watchers(fd) && !fd->closed) {
- close_fd_locked(exec_ctx, fd);
- }
- gpr_mu_unlock(&fd->mu);
-
- GRPC_FD_UNREF(fd, "poll");
-}
-
-static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) { return NULL; }
-
-/*******************************************************************************
- * pollset_posix.c
- */
-
-GPR_TLS_DECL(g_current_thread_poller);
-GPR_TLS_DECL(g_current_thread_worker);
-
-/** The alarm system needs to be able to wakeup 'some poller' sometimes
- * (specifically when a new alarm needs to be triggered earlier than the next
- * alarm 'epoch').
- * This wakeup_fd gives us something to alert on when such a case occurs. */
-grpc_wakeup_fd grpc_global_wakeup_fd;
-
-static void remove_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
- worker->prev->next = worker->next;
- worker->next->prev = worker->prev;
-}
-
-static int pollset_has_workers(grpc_pollset *p) {
- return p->root_worker.next != &p->root_worker;
-}
-
-static grpc_pollset_worker *pop_front_worker(grpc_pollset *p) {
- if (pollset_has_workers(p)) {
- grpc_pollset_worker *w = p->root_worker.next;
- remove_worker(p, w);
- return w;
- } else {
- return NULL;
- }
-}
-
-static void push_back_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
- worker->next = &p->root_worker;
- worker->prev = worker->next->prev;
- worker->prev->next = worker->next->prev = worker;
-}
-
-static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
- worker->prev = &p->root_worker;
- worker->next = worker->prev->next;
- worker->prev->next = worker->next->prev = worker;
-}
-
-static void kick_append_error(grpc_error **composite, grpc_error *error) {
- if (error == GRPC_ERROR_NONE) return;
- if (*composite == GRPC_ERROR_NONE) {
- *composite = GRPC_ERROR_CREATE("Kick Failure");
- }
- *composite = grpc_error_add_child(*composite, error);
-}
-
-static grpc_error *pollset_kick_ext(grpc_pollset *p,
- grpc_pollset_worker *specific_worker,
- uint32_t flags) {
- GPR_TIMER_BEGIN("pollset_kick_ext", 0);
- grpc_error *error = GRPC_ERROR_NONE;
-
- /* pollset->mu already held */
- if (specific_worker != NULL) {
- if (specific_worker == GRPC_POLLSET_KICK_BROADCAST) {
- GPR_TIMER_BEGIN("pollset_kick_ext.broadcast", 0);
- GPR_ASSERT((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) == 0);
- for (specific_worker = p->root_worker.next;
- specific_worker != &p->root_worker;
- specific_worker = specific_worker->next) {
- kick_append_error(
- &error, grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd));
- }
- p->kicked_without_pollers = true;
- GPR_TIMER_END("pollset_kick_ext.broadcast", 0);
- } else if (gpr_tls_get(&g_current_thread_worker) !=
- (intptr_t)specific_worker) {
- GPR_TIMER_MARK("different_thread_worker", 0);
- if ((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) != 0) {
- specific_worker->reevaluate_polling_on_wakeup = true;
- }
- specific_worker->kicked_specifically = true;
- kick_append_error(&error,
- grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd));
- } else if ((flags & GRPC_POLLSET_CAN_KICK_SELF) != 0) {
- GPR_TIMER_MARK("kick_yoself", 0);
- if ((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) != 0) {
- specific_worker->reevaluate_polling_on_wakeup = true;
- }
- specific_worker->kicked_specifically = true;
- kick_append_error(&error,
- grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd));
- }
- } else if (gpr_tls_get(&g_current_thread_poller) != (intptr_t)p) {
- GPR_ASSERT((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) == 0);
- GPR_TIMER_MARK("kick_anonymous", 0);
- specific_worker = pop_front_worker(p);
- if (specific_worker != NULL) {
- if (gpr_tls_get(&g_current_thread_worker) == (intptr_t)specific_worker) {
- GPR_TIMER_MARK("kick_anonymous_not_self", 0);
- push_back_worker(p, specific_worker);
- specific_worker = pop_front_worker(p);
- if ((flags & GRPC_POLLSET_CAN_KICK_SELF) == 0 &&
- gpr_tls_get(&g_current_thread_worker) ==
- (intptr_t)specific_worker) {
- push_back_worker(p, specific_worker);
- specific_worker = NULL;
- }
- }
- if (specific_worker != NULL) {
- GPR_TIMER_MARK("finally_kick", 0);
- push_back_worker(p, specific_worker);
- kick_append_error(
- &error, grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd));
- }
- } else {
- GPR_TIMER_MARK("kicked_no_pollers", 0);
- p->kicked_without_pollers = true;
- }
- }
-
- GPR_TIMER_END("pollset_kick_ext", 0);
- return error;
-}
-
-static grpc_error *pollset_kick(grpc_pollset *p,
- grpc_pollset_worker *specific_worker) {
- return pollset_kick_ext(p, specific_worker, 0);
-}
-
-/* global state management */
-
-static grpc_error *pollset_global_init(void) {
- gpr_tls_init(&g_current_thread_poller);
- gpr_tls_init(&g_current_thread_worker);
- return grpc_wakeup_fd_init(&grpc_global_wakeup_fd);
-}
-
-static void pollset_global_shutdown(void) {
- grpc_wakeup_fd_destroy(&grpc_global_wakeup_fd);
- gpr_tls_destroy(&g_current_thread_poller);
- gpr_tls_destroy(&g_current_thread_worker);
-}
-
-static grpc_error *kick_poller(void) {
- return grpc_wakeup_fd_wakeup(&grpc_global_wakeup_fd);
-}
-
-/* main interface */
-
-static void become_basic_pollset(grpc_pollset *pollset, grpc_fd *fd_or_null);
-
-static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
- gpr_mu_init(&pollset->mu);
- *mu = &pollset->mu;
- pollset->root_worker.next = pollset->root_worker.prev = &pollset->root_worker;
- pollset->in_flight_cbs = 0;
- pollset->shutting_down = 0;
- pollset->called_shutdown = 0;
- pollset->kicked_without_pollers = 0;
- pollset->idle_jobs.head = pollset->idle_jobs.tail = NULL;
- pollset->local_wakeup_cache = NULL;
- pollset->kicked_without_pollers = 0;
- become_basic_pollset(pollset, NULL);
-}
-
-static void pollset_destroy(grpc_pollset *pollset) {
- GPR_ASSERT(pollset->in_flight_cbs == 0);
- GPR_ASSERT(!pollset_has_workers(pollset));
- GPR_ASSERT(pollset->idle_jobs.head == pollset->idle_jobs.tail);
- pollset->vtable->destroy(pollset);
- while (pollset->local_wakeup_cache) {
- grpc_cached_wakeup_fd *next = pollset->local_wakeup_cache->next;
- grpc_wakeup_fd_destroy(&pollset->local_wakeup_cache->fd);
- gpr_free(pollset->local_wakeup_cache);
- pollset->local_wakeup_cache = next;
- }
- gpr_mu_destroy(&pollset->mu);
-}
-
-static void pollset_reset(grpc_pollset *pollset) {
- GPR_ASSERT(pollset->shutting_down);
- GPR_ASSERT(pollset->in_flight_cbs == 0);
- GPR_ASSERT(!pollset_has_workers(pollset));
- GPR_ASSERT(pollset->idle_jobs.head == pollset->idle_jobs.tail);
- pollset->vtable->destroy(pollset);
- pollset->shutting_down = 0;
- pollset->called_shutdown = 0;
- pollset->kicked_without_pollers = 0;
- become_basic_pollset(pollset, NULL);
-}
-
-static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_fd *fd) {
- gpr_mu_lock(&pollset->mu);
- pollset->vtable->add_fd(exec_ctx, pollset, fd, 1);
-/* the following (enabled only in debug) will reacquire and then release
- our lock - meaning that if the unlocking flag passed to add_fd above is
- not respected, the code will deadlock (in a way that we have a chance of
- debugging) */
-#ifndef NDEBUG
- gpr_mu_lock(&pollset->mu);
- gpr_mu_unlock(&pollset->mu);
-#endif
-}
-
-static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
- GPR_ASSERT(grpc_closure_list_empty(pollset->idle_jobs));
- pollset->vtable->finish_shutdown(pollset);
- grpc_exec_ctx_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE, NULL);
-}
-
-static void work_combine_error(grpc_error **composite, grpc_error *error) {
- if (error == GRPC_ERROR_NONE) return;
- if (*composite == GRPC_ERROR_NONE) {
- *composite = GRPC_ERROR_CREATE("pollset_work");
- }
- *composite = grpc_error_add_child(*composite, error);
-}
-
-static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker **worker_hdl,
- gpr_timespec now, gpr_timespec deadline) {
- grpc_pollset_worker worker;
- *worker_hdl = &worker;
- grpc_error *error = GRPC_ERROR_NONE;
-
- /* pollset->mu already held */
- int added_worker = 0;
- int locked = 1;
- int queued_work = 0;
- int keep_polling = 0;
- GPR_TIMER_BEGIN("pollset_work", 0);
- /* this must happen before we (potentially) drop pollset->mu */
- worker.next = worker.prev = NULL;
- worker.reevaluate_polling_on_wakeup = 0;
- if (pollset->local_wakeup_cache != NULL) {
- worker.wakeup_fd = pollset->local_wakeup_cache;
- pollset->local_wakeup_cache = worker.wakeup_fd->next;
- } else {
- worker.wakeup_fd = gpr_malloc(sizeof(*worker.wakeup_fd));
- error = grpc_wakeup_fd_init(&worker.wakeup_fd->fd);
- if (error != GRPC_ERROR_NONE) {
- return error;
- }
- }
- worker.kicked_specifically = 0;
- /* If there's work waiting for the pollset to be idle, and the
- pollset is idle, then do that work */
- if (!pollset_has_workers(pollset) &&
- !grpc_closure_list_empty(pollset->idle_jobs)) {
- GPR_TIMER_MARK("pollset_work.idle_jobs", 0);
- grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL);
- goto done;
- }
- /* If we're shutting down then we don't execute any extended work */
- if (pollset->shutting_down) {
- GPR_TIMER_MARK("pollset_work.shutting_down", 0);
- goto done;
- }
- /* Give do_promote priority so we don't starve it out */
- if (pollset->in_flight_cbs) {
- GPR_TIMER_MARK("pollset_work.in_flight_cbs", 0);
- gpr_mu_unlock(&pollset->mu);
- locked = 0;
- goto done;
- }
- /* Start polling, and keep doing so while we're being asked to
- re-evaluate our pollers (this allows poll() based pollers to
- ensure they don't miss wakeups) */
- keep_polling = 1;
- while (keep_polling) {
- keep_polling = 0;
- if (!pollset->kicked_without_pollers) {
- if (!added_worker) {
- push_front_worker(pollset, &worker);
- added_worker = 1;
- gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
- }
- gpr_tls_set(&g_current_thread_poller, (intptr_t)pollset);
- GPR_TIMER_BEGIN("maybe_work_and_unlock", 0);
- work_combine_error(&error,
- pollset->vtable->maybe_work_and_unlock(
- exec_ctx, pollset, &worker, deadline, now));
- GPR_TIMER_END("maybe_work_and_unlock", 0);
- locked = 0;
- gpr_tls_set(&g_current_thread_poller, 0);
- } else {
- GPR_TIMER_MARK("pollset_work.kicked_without_pollers", 0);
- pollset->kicked_without_pollers = 0;
- }
- /* Finished execution - start cleaning up.
- Note that we may arrive here from outside the enclosing while() loop.
- In that case we won't loop though as we haven't added worker to the
- worker list, which means nobody could ask us to re-evaluate polling). */
- done:
- if (!locked) {
- queued_work |= grpc_exec_ctx_flush(exec_ctx);
- gpr_mu_lock(&pollset->mu);
- locked = 1;
- }
- /* If we're forced to re-evaluate polling (via pollset_kick with
- GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) then we land here and force
- a loop */
- if (worker.reevaluate_polling_on_wakeup) {
- worker.reevaluate_polling_on_wakeup = 0;
- pollset->kicked_without_pollers = 0;
- if (queued_work || worker.kicked_specifically) {
- /* If there's queued work on the list, then set the deadline to be
- immediate so we get back out of the polling loop quickly */
- deadline = gpr_inf_past(GPR_CLOCK_MONOTONIC);
- }
- keep_polling = 1;
- }
- }
- if (added_worker) {
- remove_worker(pollset, &worker);
- gpr_tls_set(&g_current_thread_worker, 0);
- }
- /* release wakeup fd to the local pool */
- worker.wakeup_fd->next = pollset->local_wakeup_cache;
- pollset->local_wakeup_cache = worker.wakeup_fd;
- /* check shutdown conditions */
- if (pollset->shutting_down) {
- if (pollset_has_workers(pollset)) {
- pollset_kick(pollset, NULL);
- } else if (!pollset->called_shutdown && pollset->in_flight_cbs == 0) {
- pollset->called_shutdown = 1;
- gpr_mu_unlock(&pollset->mu);
- finish_shutdown(exec_ctx, pollset);
- grpc_exec_ctx_flush(exec_ctx);
- /* Continuing to access pollset here is safe -- it is the caller's
- * responsibility to not destroy when it has outstanding calls to
- * pollset_work.
- * TODO(dklempner): Can we refactor the shutdown logic to avoid this? */
- gpr_mu_lock(&pollset->mu);
- } else if (!grpc_closure_list_empty(pollset->idle_jobs)) {
- grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL);
- gpr_mu_unlock(&pollset->mu);
- grpc_exec_ctx_flush(exec_ctx);
- gpr_mu_lock(&pollset->mu);
- }
- }
- *worker_hdl = NULL;
- GPR_TIMER_END("pollset_work", 0);
- return error;
-}
-
-static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_closure *closure) {
- GPR_ASSERT(!pollset->shutting_down);
- pollset->shutting_down = 1;
- pollset->shutdown_done = closure;
- pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
- if (!pollset_has_workers(pollset)) {
- grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL);
- }
- if (!pollset->called_shutdown && pollset->in_flight_cbs == 0 &&
- !pollset_has_workers(pollset)) {
- pollset->called_shutdown = 1;
- finish_shutdown(exec_ctx, pollset);
- }
-}
-
-static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
- gpr_timespec now) {
- gpr_timespec timeout;
- static const int64_t max_spin_polling_us = 10;
- if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) {
- return -1;
- }
- if (gpr_time_cmp(deadline, gpr_time_add(now, gpr_time_from_micros(
- max_spin_polling_us,
- GPR_TIMESPAN))) <= 0) {
- return 0;
- }
- timeout = gpr_time_sub(deadline, now);
- return gpr_time_to_millis(gpr_time_add(
- timeout, gpr_time_from_nanos(GPR_NS_PER_MS - 1, GPR_TIMESPAN)));
-}
-
-/*
- * basic_pollset - a vtable that provides polling for zero or one file
- * descriptor via poll()
- */
-
-typedef struct grpc_unary_promote_args {
- const grpc_pollset_vtable *original_vtable;
- grpc_pollset *pollset;
- grpc_fd *fd;
- grpc_closure promotion_closure;
-} grpc_unary_promote_args;
-
-static void basic_do_promote(grpc_exec_ctx *exec_ctx, void *args,
- grpc_error *error) {
- grpc_unary_promote_args *up_args = args;
- const grpc_pollset_vtable *original_vtable = up_args->original_vtable;
- grpc_pollset *pollset = up_args->pollset;
- grpc_fd *fd = up_args->fd;
-
- /*
- * This is quite tricky. There are a number of cases to keep in mind here:
- * 1. fd may have been orphaned
- * 2. The pollset may no longer be a unary poller (and we can't let case #1
- * leak to other pollset types!)
- * 3. pollset's fd (which may have changed) may have been orphaned
- * 4. The pollset may be shutting down.
- */
-
- gpr_mu_lock(&pollset->mu);
- /* First we need to ensure that nobody is polling concurrently */
- GPR_ASSERT(!pollset_has_workers(pollset));
-
- gpr_free(up_args);
- /* At this point the pollset may no longer be a unary poller. In that case
- * we should just call the right add function and be done. */
- /* TODO(klempner): If we're not careful this could cause infinite recursion.
- * That's not a problem for now because empty_pollset has a trivial poller
- * and we don't have any mechanism to unbecome multipoller. */
- pollset->in_flight_cbs--;
- if (pollset->shutting_down) {
- /* We don't care about this pollset anymore. */
- if (pollset->in_flight_cbs == 0 && !pollset->called_shutdown) {
- pollset->called_shutdown = 1;
- finish_shutdown(exec_ctx, pollset);
- }
- } else if (fd_is_orphaned(fd)) {
- /* Don't try to add it to anything, we'll drop our ref on it below */
- } else if (pollset->vtable != original_vtable) {
- pollset->vtable->add_fd(exec_ctx, pollset, fd, 0);
- } else if (fd != pollset->data.ptr) {
- grpc_fd *fds[2];
- fds[0] = pollset->data.ptr;
- fds[1] = fd;
-
- if (fds[0] && !fd_is_orphaned(fds[0])) {
- platform_become_multipoller(exec_ctx, pollset, fds, GPR_ARRAY_SIZE(fds));
- GRPC_FD_UNREF(fds[0], "basicpoll");
- } else {
- /* old fd is orphaned and we haven't cleaned it up until now, so remain a
- * unary poller */
- /* Note that it is possible that fds[1] is also orphaned at this point.
- * That's okay, we'll correct it at the next add or poll. */
- if (fds[0]) GRPC_FD_UNREF(fds[0], "basicpoll");
- pollset->data.ptr = fd;
- GRPC_FD_REF(fd, "basicpoll");
- }
- }
-
- gpr_mu_unlock(&pollset->mu);
-
- /* Matching ref in basic_pollset_add_fd */
- GRPC_FD_UNREF(fd, "basicpoll_add");
-}
-
-static void basic_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_fd *fd, int and_unlock_pollset) {
- grpc_unary_promote_args *up_args;
- GPR_ASSERT(fd);
- if (fd == pollset->data.ptr) goto exit;
-
- if (!pollset_has_workers(pollset)) {
- /* Fast path -- no in flight cbs */
- /* TODO(klempner): Comment this out and fix any test failures or establish
- * they are due to timing issues */
- grpc_fd *fds[2];
- fds[0] = pollset->data.ptr;
- fds[1] = fd;
-
- if (fds[0] == NULL) {
- pollset->data.ptr = fd;
- GRPC_FD_REF(fd, "basicpoll");
- } else if (!fd_is_orphaned(fds[0])) {
- platform_become_multipoller(exec_ctx, pollset, fds, GPR_ARRAY_SIZE(fds));
- GRPC_FD_UNREF(fds[0], "basicpoll");
- } else {
- /* old fd is orphaned and we haven't cleaned it up until now, so remain a
- * unary poller */
- GRPC_FD_UNREF(fds[0], "basicpoll");
- pollset->data.ptr = fd;
- GRPC_FD_REF(fd, "basicpoll");
- }
- goto exit;
- }
-
- /* Now we need to promote. This needs to happen when we're not polling. Since
- * this may be called from poll, the wait needs to happen asynchronously. */
- GRPC_FD_REF(fd, "basicpoll_add");
- pollset->in_flight_cbs++;
- up_args = gpr_malloc(sizeof(*up_args));
- up_args->fd = fd;
- up_args->original_vtable = pollset->vtable;
- up_args->pollset = pollset;
- up_args->promotion_closure.cb = basic_do_promote;
- up_args->promotion_closure.cb_arg = up_args;
-
- grpc_closure_list_append(&pollset->idle_jobs, &up_args->promotion_closure,
- GRPC_ERROR_NONE);
- pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
-
-exit:
- if (and_unlock_pollset) {
- gpr_mu_unlock(&pollset->mu);
- }
-}
-
-static grpc_error *basic_pollset_maybe_work_and_unlock(
- grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker *worker,
- gpr_timespec deadline, gpr_timespec now) {
-#define POLLOUT_CHECK (POLLOUT | POLLHUP | POLLERR)
-#define POLLIN_CHECK (POLLIN | POLLHUP | POLLERR)
-
- struct pollfd pfd[3];
- grpc_fd *fd;
- grpc_fd_watcher fd_watcher;
- int timeout;
- int r;
- nfds_t nfds;
- grpc_error *error = GRPC_ERROR_NONE;
-
- fd = pollset->data.ptr;
- if (fd && fd_is_orphaned(fd)) {
- GRPC_FD_UNREF(fd, "basicpoll");
- fd = pollset->data.ptr = NULL;
- }
- timeout = poll_deadline_to_millis_timeout(deadline, now);
- pfd[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd);
- pfd[0].events = POLLIN;
- pfd[0].revents = 0;
- pfd[1].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd->fd);
- pfd[1].events = POLLIN;
- pfd[1].revents = 0;
- nfds = 2;
- if (fd) {
- pfd[2].fd = fd->fd;
- pfd[2].revents = 0;
- GRPC_FD_REF(fd, "basicpoll_begin");
- gpr_mu_unlock(&pollset->mu);
- pfd[2].events =
- (short)fd_begin_poll(fd, pollset, worker, POLLIN, POLLOUT, &fd_watcher);
- if (pfd[2].events != 0) {
- nfds++;
- }
- } else {
- gpr_mu_unlock(&pollset->mu);
- }
-
- /* TODO(vpai): Consider first doing a 0 timeout poll here to avoid
- even going into the blocking annotation if possible */
- /* poll fd count (argument 2) is shortened by one if we have no events
- to poll on - such that it only includes the kicker */
- GPR_TIMER_BEGIN("poll", 0);
- GRPC_SCHEDULING_START_BLOCKING_REGION;
- r = grpc_poll_function(pfd, nfds, timeout);
- GRPC_SCHEDULING_END_BLOCKING_REGION;
- GPR_TIMER_END("poll", 0);
-
- if (r < 0) {
- if (errno != EINTR) {
- work_combine_error(&error, GRPC_OS_ERROR(errno, "poll"));
- }
- if (fd) {
- fd_end_poll(exec_ctx, &fd_watcher, 0, 0, NULL);
- }
- } else if (r == 0) {
- if (fd) {
- fd_end_poll(exec_ctx, &fd_watcher, 0, 0, NULL);
- }
- } else {
- if (pfd[0].revents & POLLIN_CHECK) {
- work_combine_error(&error,
- grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd));
- }
- if (pfd[1].revents & POLLIN_CHECK) {
- work_combine_error(&error,
- grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd));
- }
- if (nfds > 2) {
- fd_end_poll(exec_ctx, &fd_watcher, pfd[2].revents & POLLIN_CHECK,
- pfd[2].revents & POLLOUT_CHECK, pollset);
- } else if (fd) {
- fd_end_poll(exec_ctx, &fd_watcher, 0, 0, NULL);
- }
- }
-
- if (fd) {
- GRPC_FD_UNREF(fd, "basicpoll_begin");
- }
-
- return error;
-}
-
-static void basic_pollset_destroy(grpc_pollset *pollset) {
- if (pollset->data.ptr != NULL) {
- GRPC_FD_UNREF(pollset->data.ptr, "basicpoll");
- pollset->data.ptr = NULL;
- }
-}
-
-static const grpc_pollset_vtable basic_pollset = {
- basic_pollset_add_fd, basic_pollset_maybe_work_and_unlock,
- basic_pollset_destroy, basic_pollset_destroy};
-
-static void become_basic_pollset(grpc_pollset *pollset, grpc_fd *fd_or_null) {
- pollset->vtable = &basic_pollset;
- pollset->data.ptr = fd_or_null;
- if (fd_or_null != NULL) {
- GRPC_FD_REF(fd_or_null, "basicpoll");
- }
-}
-
-/*******************************************************************************
- * pollset_multipoller_with_poll_posix.c
- */
-
-#ifndef GRPC_LINUX_MULTIPOLL_WITH_EPOLL
-
-typedef struct {
- /* all polled fds */
- size_t fd_count;
- size_t fd_capacity;
- grpc_fd **fds;
- /* fds that have been removed from the pollset explicitly */
- size_t del_count;
- size_t del_capacity;
- grpc_fd **dels;
-} poll_hdr;
-
-static void multipoll_with_poll_pollset_add_fd(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset,
- grpc_fd *fd,
- int and_unlock_pollset) {
- size_t i;
- poll_hdr *h = pollset->data.ptr;
- /* TODO(ctiller): this is O(num_fds^2); maybe switch to a hash set here */
- for (i = 0; i < h->fd_count; i++) {
- if (h->fds[i] == fd) goto exit;
- }
- if (h->fd_count == h->fd_capacity) {
- h->fd_capacity = GPR_MAX(h->fd_capacity + 8, h->fd_count * 3 / 2);
- h->fds = gpr_realloc(h->fds, sizeof(grpc_fd *) * h->fd_capacity);
- }
- h->fds[h->fd_count++] = fd;
- GRPC_FD_REF(fd, "multipoller");
-exit:
- if (and_unlock_pollset) {
- gpr_mu_unlock(&pollset->mu);
- }
-}
-
-static grpc_error *multipoll_with_poll_pollset_maybe_work_and_unlock(
- grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker *worker,
- gpr_timespec deadline, gpr_timespec now) {
-#define POLLOUT_CHECK (POLLOUT | POLLHUP | POLLERR)
-#define POLLIN_CHECK (POLLIN | POLLHUP | POLLERR)
-
- int timeout;
- int r;
- size_t i, j, fd_count;
- nfds_t pfd_count;
- poll_hdr *h;
- /* TODO(ctiller): inline some elements to avoid an allocation */
- grpc_fd_watcher *watchers;
- struct pollfd *pfds;
- grpc_error *error = GRPC_ERROR_NONE;
-
- h = pollset->data.ptr;
- timeout = poll_deadline_to_millis_timeout(deadline, now);
- /* TODO(ctiller): perform just one malloc here if we exceed the inline case */
- pfds = gpr_malloc(sizeof(*pfds) * (h->fd_count + 2));
- watchers = gpr_malloc(sizeof(*watchers) * (h->fd_count + 2));
- fd_count = 0;
- pfd_count = 2;
- pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd);
- pfds[0].events = POLLIN;
- pfds[0].revents = 0;
- pfds[1].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd->fd);
- pfds[1].events = POLLIN;
- pfds[1].revents = 0;
- for (i = 0; i < h->fd_count; i++) {
- int remove = fd_is_orphaned(h->fds[i]);
- for (j = 0; !remove && j < h->del_count; j++) {
- if (h->fds[i] == h->dels[j]) remove = 1;
- }
- if (remove) {
- GRPC_FD_UNREF(h->fds[i], "multipoller");
- } else {
- h->fds[fd_count++] = h->fds[i];
- watchers[pfd_count].fd = h->fds[i];
- GRPC_FD_REF(watchers[pfd_count].fd, "multipoller_start");
- pfds[pfd_count].fd = h->fds[i]->fd;
- pfds[pfd_count].revents = 0;
- pfd_count++;
- }
- }
- for (j = 0; j < h->del_count; j++) {
- GRPC_FD_UNREF(h->dels[j], "multipoller_del");
- }
- h->del_count = 0;
- h->fd_count = fd_count;
- gpr_mu_unlock(&pollset->mu);
-
- for (i = 2; i < pfd_count; i++) {
- grpc_fd *fd = watchers[i].fd;
- pfds[i].events = (short)fd_begin_poll(fd, pollset, worker, POLLIN, POLLOUT,
- &watchers[i]);
- GRPC_FD_UNREF(fd, "multipoller_start");
- }
-
- /* TODO(vpai): Consider first doing a 0 timeout poll here to avoid
- even going into the blocking annotation if possible */
- GRPC_SCHEDULING_START_BLOCKING_REGION;
- r = grpc_poll_function(pfds, pfd_count, timeout);
- GRPC_SCHEDULING_END_BLOCKING_REGION;
-
- if (r < 0) {
- if (errno != EINTR) {
- work_combine_error(&error, GRPC_OS_ERROR(errno, "poll"));
- }
- for (i = 2; i < pfd_count; i++) {
- fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL);
- }
- } else if (r == 0) {
- for (i = 2; i < pfd_count; i++) {
- fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL);
- }
- } else {
- if (pfds[0].revents & POLLIN_CHECK) {
- work_combine_error(&error,
- grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd));
- }
- if (pfds[1].revents & POLLIN_CHECK) {
- work_combine_error(&error,
- grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd));
- }
- for (i = 2; i < pfd_count; i++) {
- if (watchers[i].fd == NULL) {
- fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL);
- continue;
- }
- fd_end_poll(exec_ctx, &watchers[i], pfds[i].revents & POLLIN_CHECK,
- pfds[i].revents & POLLOUT_CHECK, pollset);
- }
- }
-
- gpr_free(pfds);
- gpr_free(watchers);
-
- return error;
-}
-
-static void multipoll_with_poll_pollset_finish_shutdown(grpc_pollset *pollset) {
- size_t i;
- poll_hdr *h = pollset->data.ptr;
- for (i = 0; i < h->fd_count; i++) {
- GRPC_FD_UNREF(h->fds[i], "multipoller");
- }
- for (i = 0; i < h->del_count; i++) {
- GRPC_FD_UNREF(h->dels[i], "multipoller_del");
- }
- h->fd_count = 0;
- h->del_count = 0;
-}
-
-static void multipoll_with_poll_pollset_destroy(grpc_pollset *pollset) {
- poll_hdr *h = pollset->data.ptr;
- multipoll_with_poll_pollset_finish_shutdown(pollset);
- gpr_free(h->fds);
- gpr_free(h->dels);
- gpr_free(h);
-}
-
-static const grpc_pollset_vtable multipoll_with_poll_pollset = {
- multipoll_with_poll_pollset_add_fd,
- multipoll_with_poll_pollset_maybe_work_and_unlock,
- multipoll_with_poll_pollset_finish_shutdown,
- multipoll_with_poll_pollset_destroy};
-
-static void poll_become_multipoller(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset, grpc_fd **fds,
- size_t nfds) {
- size_t i;
- poll_hdr *h = gpr_malloc(sizeof(poll_hdr));
- pollset->vtable = &multipoll_with_poll_pollset;
- pollset->data.ptr = h;
- h->fd_count = nfds;
- h->fd_capacity = nfds;
- h->fds = gpr_malloc(nfds * sizeof(grpc_fd *));
- h->del_count = 0;
- h->del_capacity = 0;
- h->dels = NULL;
- for (i = 0; i < nfds; i++) {
- h->fds[i] = fds[i];
- GRPC_FD_REF(fds[i], "multipoller");
- }
-}
-
-#endif /* !GRPC_LINUX_MULTIPOLL_WITH_EPOLL */
-
-/*******************************************************************************
- * pollset_multipoller_with_epoll_posix.c
- */
-
-#ifdef GRPC_LINUX_MULTIPOLL_WITH_EPOLL
-
-#include <errno.h>
-#include <poll.h>
-#include <string.h>
-#include <sys/epoll.h>
-#include <unistd.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/useful.h>
-
-#include "src/core/lib/iomgr/ev_posix.h"
-#include "src/core/lib/profiling/timers.h"
-#include "src/core/lib/support/block_annotate.h"
-
-static void set_ready(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure **st,
- grpc_pollset *read_notifier_pollset) {
- /* only one set_ready can be active at once (but there may be a racing
- notify_on) */
- gpr_mu_lock(&fd->mu);
- set_ready_locked(exec_ctx, fd, st);
-
- /* A non-NULL read_notifier_pollset means that the fd is readable. */
- if (read_notifier_pollset != NULL) {
- /* Note: Since the fd might be a part of multiple pollsets, this might be
- * called multiple times (for each time the fd becomes readable) and it is
- * okay to set the fd's read-notifier pollset to anyone of these pollsets */
- set_read_notifier_pollset_locked(exec_ctx, fd, read_notifier_pollset);
- }
-
- gpr_mu_unlock(&fd->mu);
-}
-
-static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_pollset *notifier_pollset) {
- set_ready(exec_ctx, fd, &fd->read_closure, notifier_pollset);
-}
-
-static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
- set_ready(exec_ctx, fd, &fd->write_closure, NULL);
-}
-
-struct epoll_fd_list {
- int *epoll_fds;
- size_t count;
- size_t capacity;
-};
-
-static struct epoll_fd_list epoll_fd_global_list;
-static gpr_once init_epoll_fd_list_mu = GPR_ONCE_INIT;
-static gpr_mu epoll_fd_list_mu;
-
-static void init_mu(void) { gpr_mu_init(&epoll_fd_list_mu); }
-
-static void add_epoll_fd_to_global_list(int epoll_fd) {
- gpr_once_init(&init_epoll_fd_list_mu, init_mu);
-
- gpr_mu_lock(&epoll_fd_list_mu);
- if (epoll_fd_global_list.count == epoll_fd_global_list.capacity) {
- epoll_fd_global_list.capacity =
- GPR_MAX((size_t)8, epoll_fd_global_list.capacity * 2);
- epoll_fd_global_list.epoll_fds =
- gpr_realloc(epoll_fd_global_list.epoll_fds,
- epoll_fd_global_list.capacity * sizeof(int));
- }
- epoll_fd_global_list.epoll_fds[epoll_fd_global_list.count++] = epoll_fd;
- gpr_mu_unlock(&epoll_fd_list_mu);
-}
-
-static void remove_epoll_fd_from_global_list(int epoll_fd) {
- gpr_mu_lock(&epoll_fd_list_mu);
- GPR_ASSERT(epoll_fd_global_list.count > 0);
- for (size_t i = 0; i < epoll_fd_global_list.count; i++) {
- if (epoll_fd == epoll_fd_global_list.epoll_fds[i]) {
- epoll_fd_global_list.epoll_fds[i] =
- epoll_fd_global_list.epoll_fds[--(epoll_fd_global_list.count)];
- break;
- }
- }
- gpr_mu_unlock(&epoll_fd_list_mu);
-}
-
-static void remove_fd_from_all_epoll_sets(int fd) {
- int err;
- gpr_once_init(&init_epoll_fd_list_mu, init_mu);
- gpr_mu_lock(&epoll_fd_list_mu);
- if (epoll_fd_global_list.count == 0) {
- gpr_mu_unlock(&epoll_fd_list_mu);
- return;
- }
- for (size_t i = 0; i < epoll_fd_global_list.count; i++) {
- err = epoll_ctl(epoll_fd_global_list.epoll_fds[i], EPOLL_CTL_DEL, fd, NULL);
- if (err < 0 && errno != ENOENT) {
- gpr_log(GPR_ERROR, "epoll_ctl del for %d failed: %s", fd,
- strerror(errno));
- }
- }
- gpr_mu_unlock(&epoll_fd_list_mu);
-}
-
-typedef struct {
- grpc_pollset *pollset;
- grpc_fd *fd;
- grpc_closure closure;
-} delayed_add;
-
-typedef struct { int epoll_fd; } epoll_hdr;
-
-static void finally_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_fd *fd) {
- epoll_hdr *h = pollset->data.ptr;
- struct epoll_event ev;
- int err;
- grpc_fd_watcher watcher;
-
- /* We pretend to be polling whilst adding an fd to keep the fd from being
- closed during the add. This may result in a spurious wakeup being assigned
- to this pollset whilst adding, but that should be benign. */
- GPR_ASSERT(fd_begin_poll(fd, pollset, NULL, 0, 0, &watcher) == 0);
- if (watcher.fd != NULL) {
- ev.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET);
- ev.data.ptr = fd;
- err = epoll_ctl(h->epoll_fd, EPOLL_CTL_ADD, fd->fd, &ev);
- if (err < 0) {
- /* FDs may be added to a pollset multiple times, so EEXIST is normal. */
- if (errno != EEXIST) {
- gpr_log(GPR_ERROR, "epoll_ctl add for %d failed: %s", fd->fd,
- strerror(errno));
- }
- }
- }
- fd_end_poll(exec_ctx, &watcher, 0, 0, NULL);
-}
-
-static void perform_delayed_add(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- delayed_add *da = arg;
-
- if (!fd_is_orphaned(da->fd)) {
- finally_add_fd(exec_ctx, da->pollset, da->fd);
- }
-
- gpr_mu_lock(&da->pollset->mu);
- da->pollset->in_flight_cbs--;
- if (da->pollset->shutting_down) {
- /* We don't care about this pollset anymore. */
- if (da->pollset->in_flight_cbs == 0 && !da->pollset->called_shutdown) {
- da->pollset->called_shutdown = 1;
- grpc_exec_ctx_sched(exec_ctx, da->pollset->shutdown_done, GRPC_ERROR_NONE,
- NULL);
- }
- }
- gpr_mu_unlock(&da->pollset->mu);
-
- GRPC_FD_UNREF(da->fd, "delayed_add");
-
- gpr_free(da);
-}
-
-static void multipoll_with_epoll_pollset_add_fd(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset,
- grpc_fd *fd,
- int and_unlock_pollset) {
- if (and_unlock_pollset) {
- gpr_mu_unlock(&pollset->mu);
- finally_add_fd(exec_ctx, pollset, fd);
- } else {
- delayed_add *da = gpr_malloc(sizeof(*da));
- da->pollset = pollset;
- da->fd = fd;
- GRPC_FD_REF(fd, "delayed_add");
- grpc_closure_init(&da->closure, perform_delayed_add, da);
- pollset->in_flight_cbs++;
- grpc_exec_ctx_sched(exec_ctx, &da->closure, GRPC_ERROR_NONE, NULL);
- }
-}
-
-/* TODO(klempner): We probably want to turn this down a bit */
-#define GRPC_EPOLL_MAX_EVENTS 1000
-
-static grpc_error *multipoll_with_epoll_pollset_maybe_work_and_unlock(
- grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker *worker,
- gpr_timespec deadline, gpr_timespec now) {
- struct epoll_event ep_ev[GRPC_EPOLL_MAX_EVENTS];
- int ep_rv;
- int poll_rv;
- epoll_hdr *h = pollset->data.ptr;
- int timeout_ms;
- struct pollfd pfds[2];
- grpc_error *error = GRPC_ERROR_NONE;
-
- /* If you want to ignore epoll's ability to sanely handle parallel pollers,
- * for a more apples-to-apples performance comparison with poll, add a
- * if (pollset->counter != 0) { return 0; }
- * here.
- */
-
- gpr_mu_unlock(&pollset->mu);
-
- timeout_ms = poll_deadline_to_millis_timeout(deadline, now);
-
- pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd->fd);
- pfds[0].events = POLLIN;
- pfds[0].revents = 0;
- pfds[1].fd = h->epoll_fd;
- pfds[1].events = POLLIN;
- pfds[1].revents = 0;
-
- /* TODO(vpai): Consider first doing a 0 timeout poll here to avoid
- even going into the blocking annotation if possible */
- GPR_TIMER_BEGIN("poll", 0);
- GRPC_SCHEDULING_START_BLOCKING_REGION;
- poll_rv = grpc_poll_function(pfds, 2, timeout_ms);
- GRPC_SCHEDULING_END_BLOCKING_REGION;
- GPR_TIMER_END("poll", 0);
-
- if (poll_rv < 0) {
- if (errno != EINTR) {
- work_combine_error(&error, GRPC_OS_ERROR(errno, "poll"));
- }
- } else if (poll_rv == 0) {
- /* do nothing */
- } else {
- if (pfds[0].revents) {
- work_combine_error(&error,
- grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd));
- }
- if (pfds[1].revents) {
- do {
- /* The following epoll_wait never blocks; it has a timeout of 0 */
- ep_rv = epoll_wait(h->epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, 0);
- if (ep_rv < 0) {
- if (errno != EINTR) {
- work_combine_error(&error, GRPC_OS_ERROR(errno, "epoll_wait"));
- }
- } else {
- int i;
- for (i = 0; i < ep_rv; ++i) {
- grpc_fd *fd = ep_ev[i].data.ptr;
- /* TODO(klempner): We might want to consider making err and pri
- * separate events */
- int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP);
- int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
- int write_ev = ep_ev[i].events & EPOLLOUT;
- if (fd == NULL) {
- work_combine_error(&error, grpc_wakeup_fd_consume_wakeup(
- &grpc_global_wakeup_fd));
- } else {
- if (read_ev || cancel) {
- fd_become_readable(exec_ctx, fd, pollset);
- }
- if (write_ev || cancel) {
- fd_become_writable(exec_ctx, fd);
- }
- }
- }
- }
- } while (ep_rv == GRPC_EPOLL_MAX_EVENTS);
- }
- }
- return error;
-}
-
-static void multipoll_with_epoll_pollset_finish_shutdown(
- grpc_pollset *pollset) {}
-
-static void multipoll_with_epoll_pollset_destroy(grpc_pollset *pollset) {
- epoll_hdr *h = pollset->data.ptr;
- close(h->epoll_fd);
- remove_epoll_fd_from_global_list(h->epoll_fd);
- gpr_free(h);
-}
-
-static const grpc_pollset_vtable multipoll_with_epoll_pollset = {
- multipoll_with_epoll_pollset_add_fd,
- multipoll_with_epoll_pollset_maybe_work_and_unlock,
- multipoll_with_epoll_pollset_finish_shutdown,
- multipoll_with_epoll_pollset_destroy};
-
-static void epoll_become_multipoller(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset, grpc_fd **fds,
- size_t nfds) {
- size_t i;
- epoll_hdr *h = gpr_malloc(sizeof(epoll_hdr));
- struct epoll_event ev;
- int err;
-
- pollset->vtable = &multipoll_with_epoll_pollset;
- pollset->data.ptr = h;
- h->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
- if (h->epoll_fd < 0) {
- /* TODO(klempner): Fall back to poll here, especially on ENOSYS */
- gpr_log(GPR_ERROR, "epoll_create1 failed: %s", strerror(errno));
- abort();
- }
- add_epoll_fd_to_global_list(h->epoll_fd);
-
- ev.events = (uint32_t)(EPOLLIN | EPOLLET);
- ev.data.ptr = NULL;
- err = epoll_ctl(h->epoll_fd, EPOLL_CTL_ADD,
- GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd), &ev);
- if (err < 0) {
- gpr_log(GPR_ERROR, "epoll_ctl add for %d failed: %s",
- GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd),
- strerror(errno));
- }
-
- for (i = 0; i < nfds; i++) {
- multipoll_with_epoll_pollset_add_fd(exec_ctx, pollset, fds[i], 0);
- }
-}
-
-#else /* GRPC_LINUX_MULTIPOLL_WITH_EPOLL */
-
-static void remove_fd_from_all_epoll_sets(int fd) {}
-
-#endif /* GRPC_LINUX_MULTIPOLL_WITH_EPOLL */
-
-/*******************************************************************************
- * pollset_set_posix.c
- */
-
-static grpc_pollset_set *pollset_set_create(void) {
- grpc_pollset_set *pollset_set = gpr_malloc(sizeof(*pollset_set));
- memset(pollset_set, 0, sizeof(*pollset_set));
- gpr_mu_init(&pollset_set->mu);
- return pollset_set;
-}
-
-static void pollset_set_destroy(grpc_pollset_set *pollset_set) {
- size_t i;
- gpr_mu_destroy(&pollset_set->mu);
- for (i = 0; i < pollset_set->fd_count; i++) {
- GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set");
- }
- gpr_free(pollset_set->pollsets);
- gpr_free(pollset_set->pollset_sets);
- gpr_free(pollset_set->fds);
- gpr_free(pollset_set);
-}
-
-static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pollset_set,
- grpc_pollset *pollset) {
- size_t i, j;
- gpr_mu_lock(&pollset_set->mu);
- if (pollset_set->pollset_count == pollset_set->pollset_capacity) {
- pollset_set->pollset_capacity =
- GPR_MAX(8, 2 * pollset_set->pollset_capacity);
- pollset_set->pollsets =
- gpr_realloc(pollset_set->pollsets, pollset_set->pollset_capacity *
- sizeof(*pollset_set->pollsets));
- }
- pollset_set->pollsets[pollset_set->pollset_count++] = pollset;
- for (i = 0, j = 0; i < pollset_set->fd_count; i++) {
- if (fd_is_orphaned(pollset_set->fds[i])) {
- GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set");
- } else {
- pollset_add_fd(exec_ctx, pollset, pollset_set->fds[i]);
- pollset_set->fds[j++] = pollset_set->fds[i];
- }
- }
- pollset_set->fd_count = j;
- gpr_mu_unlock(&pollset_set->mu);
-}
-
-static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pollset_set,
- grpc_pollset *pollset) {
- size_t i;
- gpr_mu_lock(&pollset_set->mu);
- for (i = 0; i < pollset_set->pollset_count; i++) {
- if (pollset_set->pollsets[i] == pollset) {
- pollset_set->pollset_count--;
- GPR_SWAP(grpc_pollset *, pollset_set->pollsets[i],
- pollset_set->pollsets[pollset_set->pollset_count]);
- break;
- }
- }
- gpr_mu_unlock(&pollset_set->mu);
-}
-
-static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *bag,
- grpc_pollset_set *item) {
- size_t i, j;
- gpr_mu_lock(&bag->mu);
- if (bag->pollset_set_count == bag->pollset_set_capacity) {
- bag->pollset_set_capacity = GPR_MAX(8, 2 * bag->pollset_set_capacity);
- bag->pollset_sets =
- gpr_realloc(bag->pollset_sets,
- bag->pollset_set_capacity * sizeof(*bag->pollset_sets));
- }
- bag->pollset_sets[bag->pollset_set_count++] = item;
- for (i = 0, j = 0; i < bag->fd_count; i++) {
- if (fd_is_orphaned(bag->fds[i])) {
- GRPC_FD_UNREF(bag->fds[i], "pollset_set");
- } else {
- pollset_set_add_fd(exec_ctx, item, bag->fds[i]);
- bag->fds[j++] = bag->fds[i];
- }
- }
- bag->fd_count = j;
- gpr_mu_unlock(&bag->mu);
-}
-
-static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *bag,
- grpc_pollset_set *item) {
- size_t i;
- gpr_mu_lock(&bag->mu);
- for (i = 0; i < bag->pollset_set_count; i++) {
- if (bag->pollset_sets[i] == item) {
- bag->pollset_set_count--;
- GPR_SWAP(grpc_pollset_set *, bag->pollset_sets[i],
- bag->pollset_sets[bag->pollset_set_count]);
- break;
- }
- }
- gpr_mu_unlock(&bag->mu);
-}
-
-static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pollset_set, grpc_fd *fd) {
- size_t i;
- gpr_mu_lock(&pollset_set->mu);
- if (pollset_set->fd_count == pollset_set->fd_capacity) {
- pollset_set->fd_capacity = GPR_MAX(8, 2 * pollset_set->fd_capacity);
- pollset_set->fds = gpr_realloc(
- pollset_set->fds, pollset_set->fd_capacity * sizeof(*pollset_set->fds));
- }
- GRPC_FD_REF(fd, "pollset_set");
- pollset_set->fds[pollset_set->fd_count++] = fd;
- for (i = 0; i < pollset_set->pollset_count; i++) {
- pollset_add_fd(exec_ctx, pollset_set->pollsets[i], fd);
- }
- for (i = 0; i < pollset_set->pollset_set_count; i++) {
- pollset_set_add_fd(exec_ctx, pollset_set->pollset_sets[i], fd);
- }
- gpr_mu_unlock(&pollset_set->mu);
-}
-
-static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pollset_set, grpc_fd *fd) {
- size_t i;
- gpr_mu_lock(&pollset_set->mu);
- for (i = 0; i < pollset_set->fd_count; i++) {
- if (pollset_set->fds[i] == fd) {
- pollset_set->fd_count--;
- GPR_SWAP(grpc_fd *, pollset_set->fds[i],
- pollset_set->fds[pollset_set->fd_count]);
- GRPC_FD_UNREF(fd, "pollset_set");
- break;
- }
- }
- for (i = 0; i < pollset_set->pollset_set_count; i++) {
- pollset_set_del_fd(exec_ctx, pollset_set->pollset_sets[i], fd);
- }
- gpr_mu_unlock(&pollset_set->mu);
-}
-
-/*******************************************************************************
- * workqueue stubs
- */
-
-#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
-static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
- const char *file, int line,
- const char *reason) {
- return workqueue;
-}
-static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
- const char *file, int line, const char *reason) {}
-#else
-static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
- return workqueue;
-}
-static void workqueue_unref(grpc_exec_ctx *exec_ctx,
- grpc_workqueue *workqueue) {}
-#endif
-
-static void workqueue_enqueue(grpc_exec_ctx *exec_ctx,
- grpc_workqueue *workqueue, grpc_closure *closure,
- grpc_error *error) {
- grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
-}
-
-/*******************************************************************************
- * event engine binding
- */
-
-static void shutdown_engine(void) {
- fd_global_shutdown();
- pollset_global_shutdown();
-}
-
-static const grpc_event_engine_vtable vtable = {
- .pollset_size = sizeof(grpc_pollset),
-
- .fd_create = fd_create,
- .fd_wrapped_fd = fd_wrapped_fd,
- .fd_orphan = fd_orphan,
- .fd_shutdown = fd_shutdown,
- .fd_is_shutdown = fd_is_shutdown,
- .fd_notify_on_read = fd_notify_on_read,
- .fd_notify_on_write = fd_notify_on_write,
- .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
- .fd_get_workqueue = fd_get_workqueue,
-
- .pollset_init = pollset_init,
- .pollset_shutdown = pollset_shutdown,
- .pollset_reset = pollset_reset,
- .pollset_destroy = pollset_destroy,
- .pollset_work = pollset_work,
- .pollset_kick = pollset_kick,
- .pollset_add_fd = pollset_add_fd,
-
- .pollset_set_create = pollset_set_create,
- .pollset_set_destroy = pollset_set_destroy,
- .pollset_set_add_pollset = pollset_set_add_pollset,
- .pollset_set_del_pollset = pollset_set_del_pollset,
- .pollset_set_add_pollset_set = pollset_set_add_pollset_set,
- .pollset_set_del_pollset_set = pollset_set_del_pollset_set,
- .pollset_set_add_fd = pollset_set_add_fd,
- .pollset_set_del_fd = pollset_set_del_fd,
-
- .kick_poller = kick_poller,
-
- .workqueue_ref = workqueue_ref,
- .workqueue_unref = workqueue_unref,
- .workqueue_enqueue = workqueue_enqueue,
-
- .shutdown_engine = shutdown_engine,
-};
-
-const grpc_event_engine_vtable *grpc_init_poll_and_epoll_posix(void) {
-#ifdef GRPC_LINUX_MULTIPOLL_WITH_EPOLL
- platform_become_multipoller = epoll_become_multipoller;
-#else
- platform_become_multipoller = poll_become_multipoller;
-#endif
- fd_global_init();
- pollset_global_init();
- return &vtable;
-}
-
-#endif
diff --git a/src/core/lib/iomgr/ev_poll_posix.c b/src/core/lib/iomgr/ev_poll_posix.c
index e1d620cfff..21b28e5554 100644
--- a/src/core/lib/iomgr/ev_poll_posix.c
+++ b/src/core/lib/iomgr/ev_poll_posix.c
@@ -120,6 +120,8 @@ struct grpc_fd {
grpc_pollset *read_notifier_pollset;
};
+static grpc_wakeup_fd global_wakeup_fd;
+
/* Begin polling on an fd.
Registers that the given pollset is interested in this fd - so that if read
or writability interest changes, the pollset can be kicked to pick up that
@@ -769,17 +771,17 @@ static grpc_error *pollset_kick(grpc_pollset *p,
static grpc_error *pollset_global_init(void) {
gpr_tls_init(&g_current_thread_poller);
gpr_tls_init(&g_current_thread_worker);
- return grpc_wakeup_fd_init(&grpc_global_wakeup_fd);
+ return grpc_wakeup_fd_init(&global_wakeup_fd);
}
static void pollset_global_shutdown(void) {
- grpc_wakeup_fd_destroy(&grpc_global_wakeup_fd);
+ grpc_wakeup_fd_destroy(&global_wakeup_fd);
gpr_tls_destroy(&g_current_thread_poller);
gpr_tls_destroy(&g_current_thread_worker);
}
static grpc_error *kick_poller(void) {
- return grpc_wakeup_fd_wakeup(&grpc_global_wakeup_fd);
+ return grpc_wakeup_fd_wakeup(&global_wakeup_fd);
}
/* main interface */
@@ -947,7 +949,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
fd_count = 0;
pfd_count = 2;
- pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd);
+ pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&global_wakeup_fd);
pfds[0].events = POLLIN;
pfds[0].revents = 0;
pfds[1].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker.wakeup_fd->fd);
@@ -1001,8 +1003,8 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
} else {
if (pfds[0].revents & POLLIN_CHECK) {
- work_combine_error(
- &error, grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd));
+ work_combine_error(&error,
+ grpc_wakeup_fd_consume_wakeup(&global_wakeup_fd));
}
if (pfds[1].revents & POLLIN_CHECK) {
work_combine_error(
@@ -1343,6 +1345,7 @@ static int cvfd_poll(struct pollfd *fds, nfds_t nfds, int timeout) {
int res, idx;
gpr_cv *pollcv;
cv_node *cvn, *prev;
+ int skip_poll = 0;
nfds_t nsockfds = 0;
gpr_thd_id t_id;
gpr_thd_options opt;
@@ -1358,17 +1361,17 @@ static int cvfd_poll(struct pollfd *fds, nfds_t nfds, int timeout) {
cvn->cv = pollcv;
cvn->next = g_cvfds.cvfds[idx].cvs;
g_cvfds.cvfds[idx].cvs = cvn;
- // We should return immediately if there are pending events,
- // but we still need to call poll() to check for socket events
+ // Don't bother polling if a wakeup fd is ready
if (g_cvfds.cvfds[idx].is_set) {
- timeout = 0;
+ skip_poll = 1;
}
} else if (fds[i].fd >= 0) {
nsockfds++;
}
}
- if (nsockfds > 0) {
+ res = 0;
+ if (!skip_poll && nsockfds > 0) {
pargs = gpr_malloc(sizeof(struct poll_args));
// Both the main thread and calling thread get a reference
gpr_ref_init(&pargs->refcount, 2);
@@ -1398,16 +1401,14 @@ static int cvfd_poll(struct pollfd *fds, nfds_t nfds, int timeout) {
res = pargs->retval;
errno = pargs->err;
} else {
- res = 0;
errno = 0;
gpr_atm_no_barrier_store(&pargs->status, CANCELLED);
}
- } else {
+ } else if (!skip_poll) {
gpr_timespec deadline = gpr_now(GPR_CLOCK_REALTIME);
deadline =
gpr_time_add(deadline, gpr_time_from_millis(timeout, GPR_TIMESPAN));
gpr_cv_wait(pollcv, &g_cvfds.mu, deadline);
- res = 0;
}
idx = 0;
@@ -1431,7 +1432,7 @@ static int cvfd_poll(struct pollfd *fds, nfds_t nfds, int timeout) {
fds[i].revents = POLLIN;
if (res >= 0) res++;
}
- } else if (fds[i].fd >= 0 &&
+ } else if (!skip_poll && fds[i].fd >= 0 &&
gpr_atm_no_barrier_load(&pargs->status) == COMPLETED) {
fds[i].revents = pargs->fds[idx].revents;
idx++;
diff --git a/src/core/lib/iomgr/ev_posix.c b/src/core/lib/iomgr/ev_posix.c
index ef36ba89b2..ab139895fd 100644
--- a/src/core/lib/iomgr/ev_posix.c
+++ b/src/core/lib/iomgr/ev_posix.c
@@ -45,7 +45,6 @@
#include <grpc/support/useful.h>
#include "src/core/lib/iomgr/ev_epoll_linux.h"
-#include "src/core/lib/iomgr/ev_poll_and_epoll_posix.h"
#include "src/core/lib/iomgr/ev_poll_posix.h"
#include "src/core/lib/support/env.h"
@@ -67,7 +66,6 @@ static const event_engine_factory g_factories[] = {
{"epoll", grpc_init_epoll_linux},
{"poll", grpc_init_poll_posix},
{"poll-cv", grpc_init_poll_cv_posix},
- {"legacy", grpc_init_poll_and_epoll_posix},
};
static void add(const char *beg, const char *end, char ***ss, size_t *ns) {
diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h
index 2fdef06838..cb5832539d 100644
--- a/src/core/lib/iomgr/ev_posix.h
+++ b/src/core/lib/iomgr/ev_posix.h
@@ -183,6 +183,5 @@ void grpc_pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
/* override to allow tests to hook poll() usage */
typedef int (*grpc_poll_function_type)(struct pollfd *, nfds_t, int);
extern grpc_poll_function_type grpc_poll_function;
-extern grpc_wakeup_fd grpc_global_wakeup_fd;
#endif /* GRPC_CORE_LIB_IOMGR_EV_POSIX_H */
diff --git a/src/core/lib/iomgr/iomgr.c b/src/core/lib/iomgr/iomgr.c
index 4fd83e0b22..3470b5ac81 100644
--- a/src/core/lib/iomgr/iomgr.c
+++ b/src/core/lib/iomgr/iomgr.c
@@ -108,6 +108,7 @@ void grpc_iomgr_shutdown(void) {
NULL)) {
gpr_mu_unlock(&g_mu);
grpc_exec_ctx_flush(&exec_ctx);
+ grpc_iomgr_platform_flush();
gpr_mu_lock(&g_mu);
continue;
}
diff --git a/src/core/lib/iomgr/load_file.c b/src/core/lib/iomgr/load_file.c
index b62ecbc534..217bc5da59 100644
--- a/src/core/lib/iomgr/load_file.c
+++ b/src/core/lib/iomgr/load_file.c
@@ -44,10 +44,10 @@
#include "src/core/lib/support/string.h"
grpc_error *grpc_load_file(const char *filename, int add_null_terminator,
- gpr_slice *output) {
+ grpc_slice *output) {
unsigned char *contents = NULL;
size_t contents_size = 0;
- gpr_slice result = gpr_empty_slice();
+ grpc_slice result = gpr_empty_slice();
FILE *file;
size_t bytes_read = 0;
grpc_error *error = GRPC_ERROR_NONE;
@@ -72,7 +72,7 @@ grpc_error *grpc_load_file(const char *filename, int add_null_terminator,
if (add_null_terminator) {
contents[contents_size++] = 0;
}
- result = gpr_slice_new(contents, contents_size, gpr_free);
+ result = grpc_slice_new(contents, contents_size, gpr_free);
end:
*output = result;
diff --git a/src/core/lib/iomgr/load_file.h b/src/core/lib/iomgr/load_file.h
index 9aac2225d1..73ee8c3abf 100644
--- a/src/core/lib/iomgr/load_file.h
+++ b/src/core/lib/iomgr/load_file.h
@@ -36,7 +36,7 @@
#include <stdio.h>
-#include <grpc/support/slice.h>
+#include <grpc/slice.h>
#include "src/core/lib/iomgr/error.h"
@@ -47,7 +47,7 @@ extern "C" {
/* Loads the content of a file into a slice. add_null_terminator will add
a NULL terminator if non-zero. */
grpc_error *grpc_load_file(const char *filename, int add_null_terminator,
- gpr_slice *slice);
+ grpc_slice *slice);
#ifdef __cplusplus
}
diff --git a/src/core/lib/iomgr/network_status_tracker.c b/src/core/lib/iomgr/network_status_tracker.c
index b4bb7e3cf7..a5ca9ed2c3 100644
--- a/src/core/lib/iomgr/network_status_tracker.c
+++ b/src/core/lib/iomgr/network_status_tracker.c
@@ -46,7 +46,7 @@ static gpr_mu g_endpoint_mutex;
void grpc_network_status_shutdown(void) {
if (head != NULL) {
gpr_log(GPR_ERROR,
- "Memory leaked as all network endpoints were not shut down");
+ "Memory leaked as not all network endpoints were shut down");
}
gpr_mu_destroy(&g_endpoint_mutex);
}
diff --git a/src/core/lib/iomgr/port.h b/src/core/lib/iomgr/port.h
index c0bb3b5a23..f1897bb91f 100644
--- a/src/core/lib/iomgr/port.h
+++ b/src/core/lib/iomgr/port.h
@@ -90,7 +90,6 @@
#define GRPC_POSIX_SOCKETUTILS
#endif
#elif defined(GPR_APPLE)
-#define GRPC_HAVE_IP_PKTINFO 1
#define GRPC_HAVE_SO_NOSIGPIPE 1
#define GRPC_HAVE_UNIX_SOCKET 1
#define GRPC_MSG_IOVLEN_TYPE int
@@ -102,7 +101,6 @@
#define GRPC_TIMER_USE_GENERIC 1
#elif defined(GPR_FREEBSD)
#define GRPC_HAVE_IPV6_RECVPKTINFO 1
-#define GRPC_HAVE_IP_PKTINFO 1
#define GRPC_HAVE_SO_NOSIGPIPE 1
#define GRPC_HAVE_UNIX_SOCKET 1
#define GRPC_POSIX_NO_SPECIAL_WAKEUP_FD 1
diff --git a/src/core/lib/iomgr/resolve_address.h b/src/core/lib/iomgr/resolve_address.h
index 275924448a..e03d16fa4e 100644
--- a/src/core/lib/iomgr/resolve_address.h
+++ b/src/core/lib/iomgr/resolve_address.h
@@ -36,6 +36,7 @@
#include <stddef.h>
#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/iomgr/pollset_set.h"
#define GRPC_MAX_SOCKADDR_SIZE 128
@@ -54,6 +55,7 @@ typedef struct {
/* TODO(ctiller): add a timeout here */
extern void (*grpc_resolve_address)(grpc_exec_ctx *exec_ctx, const char *addr,
const char *default_port,
+ grpc_pollset_set *interested_parties,
grpc_closure *on_done,
grpc_resolved_addresses **addresses);
/* Destroy resolved addresses */
diff --git a/src/core/lib/iomgr/resolve_address_posix.c b/src/core/lib/iomgr/resolve_address_posix.c
index de791b2b67..821932e562 100644
--- a/src/core/lib/iomgr/resolve_address_posix.c
+++ b/src/core/lib/iomgr/resolve_address_posix.c
@@ -181,6 +181,7 @@ void grpc_resolved_addresses_destroy(grpc_resolved_addresses *addrs) {
static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
const char *default_port,
+ grpc_pollset_set *interested_parties,
grpc_closure *on_done,
grpc_resolved_addresses **addrs) {
request *r = gpr_malloc(sizeof(request));
@@ -192,9 +193,9 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
grpc_executor_push(&r->request_closure, GRPC_ERROR_NONE);
}
-void (*grpc_resolve_address)(grpc_exec_ctx *exec_ctx, const char *name,
- const char *default_port, grpc_closure *on_done,
- grpc_resolved_addresses **addrs) =
- resolve_address_impl;
+void (*grpc_resolve_address)(
+ grpc_exec_ctx *exec_ctx, const char *name, const char *default_port,
+ grpc_pollset_set *interested_parties, grpc_closure *on_done,
+ grpc_resolved_addresses **addrs) = resolve_address_impl;
#endif
diff --git a/src/core/lib/iomgr/resolve_address_uv.c b/src/core/lib/iomgr/resolve_address_uv.c
index b8295acfa1..3269c4f09f 100644
--- a/src/core/lib/iomgr/resolve_address_uv.c
+++ b/src/core/lib/iomgr/resolve_address_uv.c
@@ -181,6 +181,7 @@ void grpc_resolved_addresses_destroy(grpc_resolved_addresses *addrs) {
static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
const char *default_port,
+ grpc_pollset_set *interested_parties,
grpc_closure *on_done,
grpc_resolved_addresses **addrs) {
uv_getaddrinfo_t *req;
@@ -223,9 +224,9 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
}
}
-void (*grpc_resolve_address)(grpc_exec_ctx *exec_ctx, const char *name,
- const char *default_port, grpc_closure *on_done,
- grpc_resolved_addresses **addrs) =
- resolve_address_impl;
+void (*grpc_resolve_address)(
+ grpc_exec_ctx *exec_ctx, const char *name, const char *default_port,
+ grpc_pollset_set *interested_parties, grpc_closure *on_done,
+ grpc_resolved_addresses **addrs) = resolve_address_impl;
#endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/resolve_address_windows.c b/src/core/lib/iomgr/resolve_address_windows.c
index e139293c03..fada5ecbe8 100644
--- a/src/core/lib/iomgr/resolve_address_windows.c
+++ b/src/core/lib/iomgr/resolve_address_windows.c
@@ -169,6 +169,7 @@ void grpc_resolved_addresses_destroy(grpc_resolved_addresses *addrs) {
static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
const char *default_port,
+ grpc_pollset_set *interested_parties,
grpc_closure *on_done,
grpc_resolved_addresses **addresses) {
request *r = gpr_malloc(sizeof(request));
@@ -180,9 +181,9 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
grpc_executor_push(&r->request_closure, GRPC_ERROR_NONE);
}
-void (*grpc_resolve_address)(grpc_exec_ctx *exec_ctx, const char *name,
- const char *default_port, grpc_closure *on_done,
- grpc_resolved_addresses **addresses) =
- resolve_address_impl;
+void (*grpc_resolve_address)(
+ grpc_exec_ctx *exec_ctx, const char *name, const char *default_port,
+ grpc_pollset_set *interested_parties, grpc_closure *on_done,
+ grpc_resolved_addresses **addresses) = resolve_address_impl;
#endif
diff --git a/src/core/lib/iomgr/resource_quota.c b/src/core/lib/iomgr/resource_quota.c
new file mode 100644
index 0000000000..213d29600c
--- /dev/null
+++ b/src/core/lib/iomgr/resource_quota.c
@@ -0,0 +1,829 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/iomgr/resource_quota.h"
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/useful.h>
+
+#include "src/core/lib/iomgr/combiner.h"
+
+int grpc_resource_quota_trace = 0;
+
+/* Internal linked list pointers for a resource user */
+typedef struct {
+ grpc_resource_user *next;
+ grpc_resource_user *prev;
+} grpc_resource_user_link;
+
+/* Resource users are kept in (potentially) several intrusive linked lists
+ at once. These are the list names. */
+typedef enum {
+ /* Resource users that are waiting for an allocation */
+ GRPC_RULIST_AWAITING_ALLOCATION,
+ /* Resource users that have free memory available for internal reclamation */
+ GRPC_RULIST_NON_EMPTY_FREE_POOL,
+ /* Resource users that have published a benign reclamation is available */
+ GRPC_RULIST_RECLAIMER_BENIGN,
+ /* Resource users that have published a destructive reclamation is
+ available */
+ GRPC_RULIST_RECLAIMER_DESTRUCTIVE,
+ /* Number of lists: must be last */
+ GRPC_RULIST_COUNT
+} grpc_rulist;
+
+struct grpc_resource_user {
+ /* The quota this resource user consumes from */
+ grpc_resource_quota *resource_quota;
+
+ /* Closure to schedule an allocation under the resource quota combiner lock */
+ grpc_closure allocate_closure;
+ /* Closure to publish a non empty free pool under the resource quota combiner
+ lock */
+ grpc_closure add_to_free_pool_closure;
+
+ /* one ref for each ref call (released by grpc_resource_user_unref), and one
+ ref for each byte allocated (released by grpc_resource_user_free) */
+ gpr_atm refs;
+ /* is this resource user unlocked? starts at 0, increases for each shutdown
+ call */
+ gpr_atm shutdown;
+
+ gpr_mu mu;
+ /* The amount of memory (in bytes) this user has cached for its own use: to
+ avoid quota contention, each resource user can keep some memory in
+ addition to what it is immediately using (e.g., for caching), and the quota
+ can pull it back under memory pressure.
+ This value can become negative if more memory has been requested than
+ existed in the free pool, at which point the quota is consulted to bring
+ this value non-negative (asynchronously). */
+ int64_t free_pool;
+ /* A list of closures to call once free_pool becomes non-negative - ie when
+ all outstanding allocations have been granted. */
+ grpc_closure_list on_allocated;
+ /* True if we are currently trying to allocate from the quota, false if not */
+ bool allocating;
+ /* True if we are currently trying to add ourselves to the non-free quota
+ list, false otherwise */
+ bool added_to_free_pool;
+
+ /* Reclaimers: index 0 is the benign reclaimer, 1 is the destructive reclaimer
+ */
+ grpc_closure *reclaimers[2];
+ /* Reclaimers just posted: once we're in the combiner lock, we'll move them
+ to the array above */
+ grpc_closure *new_reclaimers[2];
+ /* Trampoline closures to finish reclamation and re-enter the quota combiner
+ lock */
+ grpc_closure post_reclaimer_closure[2];
+
+ /* Closure to execute under the quota combiner to de-register and shutdown the
+ resource user */
+ grpc_closure destroy_closure;
+
+ /* Links in the various grpc_rulist lists */
+ grpc_resource_user_link links[GRPC_RULIST_COUNT];
+
+ /* The name of this resource user, for debugging/tracing */
+ char *name;
+};
+
+struct grpc_resource_quota {
+ /* refcount */
+ gpr_refcount refs;
+
+ /* Master combiner lock: all activity on a quota executes under this combiner
+ * (so no mutex is needed for this data structure)
+ */
+ grpc_combiner *combiner;
+ /* Size of the resource quota */
+ int64_t size;
+ /* Amount of free memory in the resource quota */
+ int64_t free_pool;
+
+ /* Has rq_step been scheduled to occur? */
+ bool step_scheduled;
+ /* Are we currently reclaiming memory */
+ bool reclaiming;
+ /* Closure around rq_step */
+ grpc_closure rq_step_closure;
+ /* Closure around rq_reclamation_done */
+ grpc_closure rq_reclamation_done_closure;
+
+ /* This is only really usable for debugging: it's always a stale pointer, but
+ a stale pointer that might just be fresh enough to guide us to where the
+ reclamation system is stuck */
+ grpc_closure *debug_only_last_initiated_reclaimer;
+ grpc_resource_user *debug_only_last_reclaimer_resource_user;
+
+ /* Roots of all resource user lists */
+ grpc_resource_user *roots[GRPC_RULIST_COUNT];
+
+ char *name;
+};
+
+/*******************************************************************************
+ * list management
+ */
+
+static void rulist_add_head(grpc_resource_user *resource_user,
+ grpc_rulist list) {
+ grpc_resource_quota *resource_quota = resource_user->resource_quota;
+ grpc_resource_user **root = &resource_quota->roots[list];
+ if (*root == NULL) {
+ *root = resource_user;
+ resource_user->links[list].next = resource_user->links[list].prev =
+ resource_user;
+ } else {
+ resource_user->links[list].next = *root;
+ resource_user->links[list].prev = (*root)->links[list].prev;
+ resource_user->links[list].next->links[list].prev =
+ resource_user->links[list].prev->links[list].next = resource_user;
+ *root = resource_user;
+ }
+}
+
+static void rulist_add_tail(grpc_resource_user *resource_user,
+ grpc_rulist list) {
+ grpc_resource_quota *resource_quota = resource_user->resource_quota;
+ grpc_resource_user **root = &resource_quota->roots[list];
+ if (*root == NULL) {
+ *root = resource_user;
+ resource_user->links[list].next = resource_user->links[list].prev =
+ resource_user;
+ } else {
+ resource_user->links[list].next = (*root)->links[list].next;
+ resource_user->links[list].prev = *root;
+ resource_user->links[list].next->links[list].prev =
+ resource_user->links[list].prev->links[list].next = resource_user;
+ }
+}
+
+static bool rulist_empty(grpc_resource_quota *resource_quota,
+ grpc_rulist list) {
+ return resource_quota->roots[list] == NULL;
+}
+
+static grpc_resource_user *rulist_pop_head(grpc_resource_quota *resource_quota,
+ grpc_rulist list) {
+ grpc_resource_user **root = &resource_quota->roots[list];
+ grpc_resource_user *resource_user = *root;
+ if (resource_user == NULL) {
+ return NULL;
+ }
+ if (resource_user->links[list].next == resource_user) {
+ *root = NULL;
+ } else {
+ resource_user->links[list].next->links[list].prev =
+ resource_user->links[list].prev;
+ resource_user->links[list].prev->links[list].next =
+ resource_user->links[list].next;
+ *root = resource_user->links[list].next;
+ }
+ resource_user->links[list].next = resource_user->links[list].prev = NULL;
+ return resource_user;
+}
+
+static void rulist_remove(grpc_resource_user *resource_user, grpc_rulist list) {
+ if (resource_user->links[list].next == NULL) return;
+ grpc_resource_quota *resource_quota = resource_user->resource_quota;
+ if (resource_quota->roots[list] == resource_user) {
+ resource_quota->roots[list] = resource_user->links[list].next;
+ if (resource_quota->roots[list] == resource_user) {
+ resource_quota->roots[list] = NULL;
+ }
+ }
+ resource_user->links[list].next->links[list].prev =
+ resource_user->links[list].prev;
+ resource_user->links[list].prev->links[list].next =
+ resource_user->links[list].next;
+ resource_user->links[list].next = resource_user->links[list].prev = NULL;
+}
+
+/*******************************************************************************
+ * resource quota state machine
+ */
+
+static bool rq_alloc(grpc_exec_ctx *exec_ctx,
+ grpc_resource_quota *resource_quota);
+static bool rq_reclaim_from_per_user_free_pool(
+ grpc_exec_ctx *exec_ctx, grpc_resource_quota *resource_quota);
+static bool rq_reclaim(grpc_exec_ctx *exec_ctx,
+ grpc_resource_quota *resource_quota, bool destructive);
+
+static void rq_step(grpc_exec_ctx *exec_ctx, void *rq, grpc_error *error) {
+ grpc_resource_quota *resource_quota = rq;
+ resource_quota->step_scheduled = false;
+ do {
+ if (rq_alloc(exec_ctx, resource_quota)) goto done;
+ } while (rq_reclaim_from_per_user_free_pool(exec_ctx, resource_quota));
+
+ if (!rq_reclaim(exec_ctx, resource_quota, false)) {
+ rq_reclaim(exec_ctx, resource_quota, true);
+ }
+
+done:
+ grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
+}
+
+static void rq_step_sched(grpc_exec_ctx *exec_ctx,
+ grpc_resource_quota *resource_quota) {
+ if (resource_quota->step_scheduled) return;
+ resource_quota->step_scheduled = true;
+ grpc_resource_quota_internal_ref(resource_quota);
+ grpc_combiner_execute_finally(exec_ctx, resource_quota->combiner,
+ &resource_quota->rq_step_closure,
+ GRPC_ERROR_NONE, false);
+}
+
+/* returns true if all allocations are completed */
+static bool rq_alloc(grpc_exec_ctx *exec_ctx,
+ grpc_resource_quota *resource_quota) {
+ grpc_resource_user *resource_user;
+ while ((resource_user = rulist_pop_head(resource_quota,
+ GRPC_RULIST_AWAITING_ALLOCATION))) {
+ gpr_mu_lock(&resource_user->mu);
+ if (resource_user->free_pool < 0 &&
+ -resource_user->free_pool <= resource_quota->free_pool) {
+ int64_t amt = -resource_user->free_pool;
+ resource_user->free_pool = 0;
+ resource_quota->free_pool -= amt;
+ if (grpc_resource_quota_trace) {
+ gpr_log(GPR_DEBUG, "RQ %s %s: grant alloc %" PRId64
+ " bytes; rq_free_pool -> %" PRId64,
+ resource_quota->name, resource_user->name, amt,
+ resource_quota->free_pool);
+ }
+ } else if (grpc_resource_quota_trace && resource_user->free_pool >= 0) {
+ gpr_log(GPR_DEBUG, "RQ %s %s: discard already satisfied alloc request",
+ resource_quota->name, resource_user->name);
+ }
+ if (resource_user->free_pool >= 0) {
+ resource_user->allocating = false;
+ grpc_exec_ctx_enqueue_list(exec_ctx, &resource_user->on_allocated, NULL);
+ gpr_mu_unlock(&resource_user->mu);
+ } else {
+ rulist_add_head(resource_user, GRPC_RULIST_AWAITING_ALLOCATION);
+ gpr_mu_unlock(&resource_user->mu);
+ return false;
+ }
+ }
+ return true;
+}
+
+/* returns true if any memory could be reclaimed from buffers */
+static bool rq_reclaim_from_per_user_free_pool(
+ grpc_exec_ctx *exec_ctx, grpc_resource_quota *resource_quota) {
+ grpc_resource_user *resource_user;
+ while ((resource_user = rulist_pop_head(resource_quota,
+ GRPC_RULIST_NON_EMPTY_FREE_POOL))) {
+ gpr_mu_lock(&resource_user->mu);
+ if (resource_user->free_pool > 0) {
+ int64_t amt = resource_user->free_pool;
+ resource_user->free_pool = 0;
+ resource_quota->free_pool += amt;
+ if (grpc_resource_quota_trace) {
+ gpr_log(GPR_DEBUG, "RQ %s %s: reclaim_from_per_user_free_pool %" PRId64
+ " bytes; rq_free_pool -> %" PRId64,
+ resource_quota->name, resource_user->name, amt,
+ resource_quota->free_pool);
+ }
+ gpr_mu_unlock(&resource_user->mu);
+ return true;
+ } else {
+ gpr_mu_unlock(&resource_user->mu);
+ }
+ }
+ return false;
+}
+
+/* returns true if reclamation is proceeding */
+static bool rq_reclaim(grpc_exec_ctx *exec_ctx,
+ grpc_resource_quota *resource_quota, bool destructive) {
+ if (resource_quota->reclaiming) return true;
+ grpc_rulist list = destructive ? GRPC_RULIST_RECLAIMER_DESTRUCTIVE
+ : GRPC_RULIST_RECLAIMER_BENIGN;
+ grpc_resource_user *resource_user = rulist_pop_head(resource_quota, list);
+ if (resource_user == NULL) return false;
+ if (grpc_resource_quota_trace) {
+ gpr_log(GPR_DEBUG, "RQ %s %s: initiate %s reclamation",
+ resource_quota->name, resource_user->name,
+ destructive ? "destructive" : "benign");
+ }
+ resource_quota->reclaiming = true;
+ grpc_resource_quota_internal_ref(resource_quota);
+ grpc_closure *c = resource_user->reclaimers[destructive];
+ GPR_ASSERT(c);
+ resource_quota->debug_only_last_reclaimer_resource_user = resource_user;
+ resource_quota->debug_only_last_initiated_reclaimer = c;
+ resource_user->reclaimers[destructive] = NULL;
+ grpc_closure_run(exec_ctx, c, GRPC_ERROR_NONE);
+ return true;
+}
+
+/*******************************************************************************
+ * ru_slice: a slice implementation that is backed by a grpc_resource_user
+ */
+
+typedef struct {
+ grpc_slice_refcount base;
+ gpr_refcount refs;
+ grpc_resource_user *resource_user;
+ size_t size;
+} ru_slice_refcount;
+
+static void ru_slice_ref(void *p) {
+ ru_slice_refcount *rc = p;
+ gpr_ref(&rc->refs);
+}
+
+static void ru_slice_unref(void *p) {
+ ru_slice_refcount *rc = p;
+ if (gpr_unref(&rc->refs)) {
+ /* TODO(ctiller): this is dangerous, but I think safe for now:
+ we have no guarantee here that we're at a safe point for creating an
+ execution context, but we have no way of writing this code otherwise.
+ In the future: consider lifting grpc_slice to grpc, and offering an
+ internal_{ref,unref} pair that is execution context aware.
+ Alternatively,
+ make exec_ctx be thread local and 'do the right thing' (whatever that
+ is)
+ if NULL */
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_resource_user_free(&exec_ctx, rc->resource_user, rc->size);
+ grpc_exec_ctx_finish(&exec_ctx);
+ gpr_free(rc);
+ }
+}
+
+static grpc_slice ru_slice_create(grpc_resource_user *resource_user,
+ size_t size) {
+ ru_slice_refcount *rc = gpr_malloc(sizeof(ru_slice_refcount) + size);
+ rc->base.ref = ru_slice_ref;
+ rc->base.unref = ru_slice_unref;
+ gpr_ref_init(&rc->refs, 1);
+ rc->resource_user = resource_user;
+ rc->size = size;
+ grpc_slice slice;
+ slice.refcount = &rc->base;
+ slice.data.refcounted.bytes = (uint8_t *)(rc + 1);
+ slice.data.refcounted.length = size;
+ return slice;
+}
+
+/*******************************************************************************
+ * grpc_resource_quota internal implementation: resource user manipulation under
+ * the combiner
+ */
+
+static void ru_allocate(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
+ grpc_resource_user *resource_user = ru;
+ if (rulist_empty(resource_user->resource_quota,
+ GRPC_RULIST_AWAITING_ALLOCATION)) {
+ rq_step_sched(exec_ctx, resource_user->resource_quota);
+ }
+ rulist_add_tail(resource_user, GRPC_RULIST_AWAITING_ALLOCATION);
+}
+
+static void ru_add_to_free_pool(grpc_exec_ctx *exec_ctx, void *ru,
+ grpc_error *error) {
+ grpc_resource_user *resource_user = ru;
+ if (!rulist_empty(resource_user->resource_quota,
+ GRPC_RULIST_AWAITING_ALLOCATION) &&
+ rulist_empty(resource_user->resource_quota,
+ GRPC_RULIST_NON_EMPTY_FREE_POOL)) {
+ rq_step_sched(exec_ctx, resource_user->resource_quota);
+ }
+ rulist_add_tail(resource_user, GRPC_RULIST_NON_EMPTY_FREE_POOL);
+}
+
+static bool ru_post_reclaimer(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user,
+ bool destructive) {
+ grpc_closure *closure = resource_user->new_reclaimers[destructive];
+ GPR_ASSERT(closure != NULL);
+ resource_user->new_reclaimers[destructive] = NULL;
+ GPR_ASSERT(resource_user->reclaimers[destructive] == NULL);
+ if (gpr_atm_acq_load(&resource_user->shutdown) > 0) {
+ grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_CANCELLED, NULL);
+ return false;
+ }
+ resource_user->reclaimers[destructive] = closure;
+ return true;
+}
+
+static void ru_post_benign_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
+ grpc_error *error) {
+ grpc_resource_user *resource_user = ru;
+ if (!ru_post_reclaimer(exec_ctx, resource_user, false)) return;
+ if (!rulist_empty(resource_user->resource_quota,
+ GRPC_RULIST_AWAITING_ALLOCATION) &&
+ rulist_empty(resource_user->resource_quota,
+ GRPC_RULIST_NON_EMPTY_FREE_POOL) &&
+ rulist_empty(resource_user->resource_quota,
+ GRPC_RULIST_RECLAIMER_BENIGN)) {
+ rq_step_sched(exec_ctx, resource_user->resource_quota);
+ }
+ rulist_add_tail(resource_user, GRPC_RULIST_RECLAIMER_BENIGN);
+}
+
+static void ru_post_destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
+ grpc_error *error) {
+ grpc_resource_user *resource_user = ru;
+ if (!ru_post_reclaimer(exec_ctx, resource_user, true)) return;
+ if (!rulist_empty(resource_user->resource_quota,
+ GRPC_RULIST_AWAITING_ALLOCATION) &&
+ rulist_empty(resource_user->resource_quota,
+ GRPC_RULIST_NON_EMPTY_FREE_POOL) &&
+ rulist_empty(resource_user->resource_quota,
+ GRPC_RULIST_RECLAIMER_BENIGN) &&
+ rulist_empty(resource_user->resource_quota,
+ GRPC_RULIST_RECLAIMER_DESTRUCTIVE)) {
+ rq_step_sched(exec_ctx, resource_user->resource_quota);
+ }
+ rulist_add_tail(resource_user, GRPC_RULIST_RECLAIMER_DESTRUCTIVE);
+}
+
+static void ru_shutdown(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
+ grpc_resource_user *resource_user = ru;
+ grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[0],
+ GRPC_ERROR_CANCELLED, NULL);
+ grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[1],
+ GRPC_ERROR_CANCELLED, NULL);
+ resource_user->reclaimers[0] = NULL;
+ resource_user->reclaimers[1] = NULL;
+ rulist_remove(resource_user, GRPC_RULIST_RECLAIMER_BENIGN);
+ rulist_remove(resource_user, GRPC_RULIST_RECLAIMER_DESTRUCTIVE);
+}
+
+static void ru_destroy(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
+ grpc_resource_user *resource_user = ru;
+ GPR_ASSERT(gpr_atm_no_barrier_load(&resource_user->refs) == 0);
+ for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
+ rulist_remove(resource_user, (grpc_rulist)i);
+ }
+ grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[0],
+ GRPC_ERROR_CANCELLED, NULL);
+ grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[1],
+ GRPC_ERROR_CANCELLED, NULL);
+ if (resource_user->free_pool != 0) {
+ resource_user->resource_quota->free_pool += resource_user->free_pool;
+ rq_step_sched(exec_ctx, resource_user->resource_quota);
+ }
+ grpc_resource_quota_internal_unref(exec_ctx, resource_user->resource_quota);
+ gpr_mu_destroy(&resource_user->mu);
+ gpr_free(resource_user->name);
+ gpr_free(resource_user);
+}
+
+static void ru_allocated_slices(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_resource_user_slice_allocator *slice_allocator = arg;
+ if (error == GRPC_ERROR_NONE) {
+ for (size_t i = 0; i < slice_allocator->count; i++) {
+ grpc_slice_buffer_add_indexed(
+ slice_allocator->dest, ru_slice_create(slice_allocator->resource_user,
+ slice_allocator->length));
+ }
+ }
+ grpc_closure_run(exec_ctx, &slice_allocator->on_done, GRPC_ERROR_REF(error));
+}
+
+/*******************************************************************************
+ * grpc_resource_quota internal implementation: quota manipulation under the
+ * combiner
+ */
+
+typedef struct {
+ int64_t size;
+ grpc_resource_quota *resource_quota;
+ grpc_closure closure;
+} rq_resize_args;
+
+static void rq_resize(grpc_exec_ctx *exec_ctx, void *args, grpc_error *error) {
+ rq_resize_args *a = args;
+ int64_t delta = a->size - a->resource_quota->size;
+ a->resource_quota->size += delta;
+ a->resource_quota->free_pool += delta;
+ rq_step_sched(exec_ctx, a->resource_quota);
+ grpc_resource_quota_internal_unref(exec_ctx, a->resource_quota);
+ gpr_free(a);
+}
+
+static void rq_reclamation_done(grpc_exec_ctx *exec_ctx, void *rq,
+ grpc_error *error) {
+ grpc_resource_quota *resource_quota = rq;
+ resource_quota->reclaiming = false;
+ rq_step_sched(exec_ctx, resource_quota);
+ grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
+}
+
+/*******************************************************************************
+ * grpc_resource_quota api
+ */
+
+/* Public API */
+grpc_resource_quota *grpc_resource_quota_create(const char *name) {
+ grpc_resource_quota *resource_quota = gpr_malloc(sizeof(*resource_quota));
+ gpr_ref_init(&resource_quota->refs, 1);
+ resource_quota->combiner = grpc_combiner_create(NULL);
+ resource_quota->free_pool = INT64_MAX;
+ resource_quota->size = INT64_MAX;
+ resource_quota->step_scheduled = false;
+ resource_quota->reclaiming = false;
+ if (name != NULL) {
+ resource_quota->name = gpr_strdup(name);
+ } else {
+ gpr_asprintf(&resource_quota->name, "anonymous_pool_%" PRIxPTR,
+ (intptr_t)resource_quota);
+ }
+ grpc_closure_init(&resource_quota->rq_step_closure, rq_step, resource_quota);
+ grpc_closure_init(&resource_quota->rq_reclamation_done_closure,
+ rq_reclamation_done, resource_quota);
+ for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
+ resource_quota->roots[i] = NULL;
+ }
+ return resource_quota;
+}
+
+void grpc_resource_quota_internal_unref(grpc_exec_ctx *exec_ctx,
+ grpc_resource_quota *resource_quota) {
+ if (gpr_unref(&resource_quota->refs)) {
+ grpc_combiner_destroy(exec_ctx, resource_quota->combiner);
+ gpr_free(resource_quota->name);
+ gpr_free(resource_quota);
+ }
+}
+
+/* Public API */
+void grpc_resource_quota_unref(grpc_resource_quota *resource_quota) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+grpc_resource_quota *grpc_resource_quota_internal_ref(
+ grpc_resource_quota *resource_quota) {
+ gpr_ref(&resource_quota->refs);
+ return resource_quota;
+}
+
+/* Public API */
+void grpc_resource_quota_ref(grpc_resource_quota *resource_quota) {
+ grpc_resource_quota_internal_ref(resource_quota);
+}
+
+/* Public API */
+void grpc_resource_quota_resize(grpc_resource_quota *resource_quota,
+ size_t size) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ rq_resize_args *a = gpr_malloc(sizeof(*a));
+ a->resource_quota = grpc_resource_quota_internal_ref(resource_quota);
+ a->size = (int64_t)size;
+ grpc_closure_init(&a->closure, rq_resize, a);
+ grpc_combiner_execute(&exec_ctx, resource_quota->combiner, &a->closure,
+ GRPC_ERROR_NONE, false);
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+/*******************************************************************************
+ * grpc_resource_user channel args api
+ */
+
+grpc_resource_quota *grpc_resource_quota_from_channel_args(
+ const grpc_channel_args *channel_args) {
+ for (size_t i = 0; i < channel_args->num_args; i++) {
+ if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
+ if (channel_args->args[i].type == GRPC_ARG_POINTER) {
+ return grpc_resource_quota_internal_ref(
+ channel_args->args[i].value.pointer.p);
+ } else {
+ gpr_log(GPR_DEBUG, GRPC_ARG_RESOURCE_QUOTA " should be a pointer");
+ }
+ }
+ }
+ return grpc_resource_quota_create(NULL);
+}
+
+static void *rq_copy(void *rq) {
+ grpc_resource_quota_ref(rq);
+ return rq;
+}
+
+static void rq_destroy(void *rq) { grpc_resource_quota_unref(rq); }
+
+static int rq_cmp(void *a, void *b) { return GPR_ICMP(a, b); }
+
+const grpc_arg_pointer_vtable *grpc_resource_quota_arg_vtable(void) {
+ static const grpc_arg_pointer_vtable vtable = {rq_copy, rq_destroy, rq_cmp};
+ return &vtable;
+}
+
+/*******************************************************************************
+ * grpc_resource_user api
+ */
+
+grpc_resource_user *grpc_resource_user_create(
+ grpc_resource_quota *resource_quota, const char *name) {
+ grpc_resource_user *resource_user = gpr_malloc(sizeof(*resource_user));
+ resource_user->resource_quota =
+ grpc_resource_quota_internal_ref(resource_quota);
+ grpc_closure_init(&resource_user->allocate_closure, &ru_allocate,
+ resource_user);
+ grpc_closure_init(&resource_user->add_to_free_pool_closure,
+ &ru_add_to_free_pool, resource_user);
+ grpc_closure_init(&resource_user->post_reclaimer_closure[0],
+ &ru_post_benign_reclaimer, resource_user);
+ grpc_closure_init(&resource_user->post_reclaimer_closure[1],
+ &ru_post_destructive_reclaimer, resource_user);
+ grpc_closure_init(&resource_user->destroy_closure, &ru_destroy,
+ resource_user);
+ gpr_mu_init(&resource_user->mu);
+ gpr_atm_rel_store(&resource_user->refs, 1);
+ gpr_atm_rel_store(&resource_user->shutdown, 0);
+ resource_user->free_pool = 0;
+ grpc_closure_list_init(&resource_user->on_allocated);
+ resource_user->allocating = false;
+ resource_user->added_to_free_pool = false;
+ resource_user->reclaimers[0] = NULL;
+ resource_user->reclaimers[1] = NULL;
+ resource_user->new_reclaimers[0] = NULL;
+ resource_user->new_reclaimers[1] = NULL;
+ for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
+ resource_user->links[i].next = resource_user->links[i].prev = NULL;
+ }
+ if (name != NULL) {
+ resource_user->name = gpr_strdup(name);
+ } else {
+ gpr_asprintf(&resource_user->name, "anonymous_resource_user_%" PRIxPTR,
+ (intptr_t)resource_user);
+ }
+ return resource_user;
+}
+
+static void ru_ref_by(grpc_resource_user *resource_user, gpr_atm amount) {
+ GPR_ASSERT(amount > 0);
+ GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&resource_user->refs, amount) != 0);
+}
+
+static void ru_unref_by(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user, gpr_atm amount) {
+ GPR_ASSERT(amount > 0);
+ gpr_atm old = gpr_atm_full_fetch_add(&resource_user->refs, -amount);
+ GPR_ASSERT(old >= amount);
+ if (old == amount) {
+ grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
+ &resource_user->destroy_closure, GRPC_ERROR_NONE,
+ false);
+ }
+}
+
+void grpc_resource_user_ref(grpc_resource_user *resource_user) {
+ ru_ref_by(resource_user, 1);
+}
+
+void grpc_resource_user_unref(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user) {
+ ru_unref_by(exec_ctx, resource_user, 1);
+}
+
+void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user) {
+ if (gpr_atm_full_fetch_add(&resource_user->shutdown, 1) == 0) {
+ grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
+ grpc_closure_create(ru_shutdown, resource_user),
+ GRPC_ERROR_NONE, false);
+ }
+}
+
+void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user, size_t size,
+ grpc_closure *optional_on_done) {
+ gpr_mu_lock(&resource_user->mu);
+ ru_ref_by(resource_user, (gpr_atm)size);
+ resource_user->free_pool -= (int64_t)size;
+ if (grpc_resource_quota_trace) {
+ gpr_log(GPR_DEBUG, "RQ %s %s: alloc %" PRIdPTR "; free_pool -> %" PRId64,
+ resource_user->resource_quota->name, resource_user->name, size,
+ resource_user->free_pool);
+ }
+ if (resource_user->free_pool < 0) {
+ grpc_closure_list_append(&resource_user->on_allocated, optional_on_done,
+ GRPC_ERROR_NONE);
+ if (!resource_user->allocating) {
+ resource_user->allocating = true;
+ grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
+ &resource_user->allocate_closure, GRPC_ERROR_NONE,
+ false);
+ }
+ } else {
+ grpc_exec_ctx_sched(exec_ctx, optional_on_done, GRPC_ERROR_NONE, NULL);
+ }
+ gpr_mu_unlock(&resource_user->mu);
+}
+
+void grpc_resource_user_free(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user, size_t size) {
+ gpr_mu_lock(&resource_user->mu);
+ bool was_zero_or_negative = resource_user->free_pool <= 0;
+ resource_user->free_pool += (int64_t)size;
+ if (grpc_resource_quota_trace) {
+ gpr_log(GPR_DEBUG, "RQ %s %s: free %" PRIdPTR "; free_pool -> %" PRId64,
+ resource_user->resource_quota->name, resource_user->name, size,
+ resource_user->free_pool);
+ }
+ bool is_bigger_than_zero = resource_user->free_pool > 0;
+ if (is_bigger_than_zero && was_zero_or_negative &&
+ !resource_user->added_to_free_pool) {
+ resource_user->added_to_free_pool = true;
+ grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
+ &resource_user->add_to_free_pool_closure,
+ GRPC_ERROR_NONE, false);
+ }
+ gpr_mu_unlock(&resource_user->mu);
+ ru_unref_by(exec_ctx, resource_user, (gpr_atm)size);
+}
+
+void grpc_resource_user_post_reclaimer(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user,
+ bool destructive,
+ grpc_closure *closure) {
+ GPR_ASSERT(resource_user->new_reclaimers[destructive] == NULL);
+ resource_user->new_reclaimers[destructive] = closure;
+ grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
+ &resource_user->post_reclaimer_closure[destructive],
+ GRPC_ERROR_NONE, false);
+}
+
+void grpc_resource_user_finish_reclamation(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user) {
+ if (grpc_resource_quota_trace) {
+ gpr_log(GPR_DEBUG, "RQ %s %s: reclamation complete",
+ resource_user->resource_quota->name, resource_user->name);
+ }
+ grpc_combiner_execute(
+ exec_ctx, resource_user->resource_quota->combiner,
+ &resource_user->resource_quota->rq_reclamation_done_closure,
+ GRPC_ERROR_NONE, false);
+}
+
+void grpc_resource_user_slice_allocator_init(
+ grpc_resource_user_slice_allocator *slice_allocator,
+ grpc_resource_user *resource_user, grpc_iomgr_cb_func cb, void *p) {
+ grpc_closure_init(&slice_allocator->on_allocated, ru_allocated_slices,
+ slice_allocator);
+ grpc_closure_init(&slice_allocator->on_done, cb, p);
+ slice_allocator->resource_user = resource_user;
+}
+
+void grpc_resource_user_alloc_slices(
+ grpc_exec_ctx *exec_ctx,
+ grpc_resource_user_slice_allocator *slice_allocator, size_t length,
+ size_t count, grpc_slice_buffer *dest) {
+ slice_allocator->length = length;
+ slice_allocator->count = count;
+ slice_allocator->dest = dest;
+ grpc_resource_user_alloc(exec_ctx, slice_allocator->resource_user,
+ count * length, &slice_allocator->on_allocated);
+}
+
+grpc_slice grpc_resource_user_slice_malloc(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user,
+ size_t size) {
+ grpc_resource_user_alloc(exec_ctx, resource_user, size, NULL);
+ return ru_slice_create(resource_user, size);
+}
diff --git a/src/core/lib/iomgr/resource_quota.h b/src/core/lib/iomgr/resource_quota.h
new file mode 100644
index 0000000000..0181fd978b
--- /dev/null
+++ b/src/core/lib/iomgr/resource_quota.h
@@ -0,0 +1,153 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_RESOURCE_QUOTA_H
+#define GRPC_CORE_LIB_IOMGR_RESOURCE_QUOTA_H
+
+#include <grpc/grpc.h>
+
+#include "src/core/lib/iomgr/exec_ctx.h"
+
+/** \file Tracks resource usage against a pool.
+
+ The current implementation tracks only memory usage, but in the future
+ this may be extended to (for example) threads and file descriptors.
+
+ A grpc_resource_quota represents the pooled resources, and
+ grpc_resource_user instances attach to the quota and consume those
+ resources. They also offer a vector for reclamation: if we become
+ resource constrained, grpc_resource_user instances are asked (in turn) to
+ free up whatever they can so that the system as a whole can make progress.
+
+ There are three kinds of reclamation that take place, in order of increasing
+ invasiveness:
+ - an internal reclamation, where cached resource at the resource user level
+ is returned to the quota
+ - a benign reclamation phase, whereby resources that are in use but are not
+ helping anything make progress are reclaimed
+ - a destructive reclamation, whereby resources that are helping something
+ make progress may be enacted so that at least one part of the system can
+ complete.
+
+ Only one reclamation will be outstanding for a given quota at a given time.
+ On each reclamation attempt, the kinds of reclamation are tried in order of
+ increasing invasiveness, stopping at the first one that succeeds. Thus, on a
+ given reclamation attempt, if internal and benign reclamation both fail, it
+ will wind up doing a destructive reclamation. However, the next reclamation
+ attempt may then be able to get what it needs via internal or benign
+ reclamation, due to resources that may have been freed up by the destructive
+ reclamation in the previous attempt.
+
+ Future work will be to expose the current resource pressure so that back
+ pressure can be applied to avoid reclamation phases starting.
+
+ Resource users own references to resource quotas, and resource quotas
+ maintain lists of users (which users arrange to leave before they are
+ destroyed) */
+
+extern int grpc_resource_quota_trace;
+
+grpc_resource_quota *grpc_resource_quota_internal_ref(
+ grpc_resource_quota *resource_quota);
+void grpc_resource_quota_internal_unref(grpc_exec_ctx *exec_ctx,
+ grpc_resource_quota *resource_quota);
+grpc_resource_quota *grpc_resource_quota_from_channel_args(
+ const grpc_channel_args *channel_args);
+
+typedef struct grpc_resource_user grpc_resource_user;
+
+grpc_resource_user *grpc_resource_user_create(
+ grpc_resource_quota *resource_quota, const char *name);
+void grpc_resource_user_ref(grpc_resource_user *resource_user);
+void grpc_resource_user_unref(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user);
+void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user);
+
+/* Allocate from the resource user (and its quota).
+ If optional_on_done is NULL, then allocate immediately. This may push the
+ quota over-limit, at which point reclamation will kick in.
+ If optional_on_done is non-NULL, it will be scheduled when the allocation has
+ been granted by the quota. */
+void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user, size_t size,
+ grpc_closure *optional_on_done);
+/* Release memory back to the quota */
+void grpc_resource_user_free(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user, size_t size);
+/* Post a memory reclaimer to the resource user. Only one benign and one
+ destructive reclaimer can be posted at once. When executed, the reclaimer
+ MUST call grpc_resource_user_finish_reclamation before it completes, to
+ return control to the resource quota. */
+void grpc_resource_user_post_reclaimer(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user,
+ bool destructive, grpc_closure *closure);
+/* Finish a reclamation step */
+void grpc_resource_user_finish_reclamation(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user);
+
+/* Helper to allocate slices from a resource user */
+typedef struct grpc_resource_user_slice_allocator {
+ /* Closure for when a resource user allocation completes */
+ grpc_closure on_allocated;
+ /* Closure to call when slices have been allocated */
+ grpc_closure on_done;
+ /* Length of slices to allocate on the current request */
+ size_t length;
+ /* Number of slices to allocate on the current request */
+ size_t count;
+ /* Destination for slices to allocate on the current request */
+ grpc_slice_buffer *dest;
+ /* Parent resource user */
+ grpc_resource_user *resource_user;
+} grpc_resource_user_slice_allocator;
+
+/* Initialize a slice allocator.
+ When an allocation is completed, calls \a cb with arg \p. */
+void grpc_resource_user_slice_allocator_init(
+ grpc_resource_user_slice_allocator *slice_allocator,
+ grpc_resource_user *resource_user, grpc_iomgr_cb_func cb, void *p);
+
+/* Allocate \a count slices of length \a length into \a dest. Only one request
+ can be outstanding at a time. */
+void grpc_resource_user_alloc_slices(
+ grpc_exec_ctx *exec_ctx,
+ grpc_resource_user_slice_allocator *slice_allocator, size_t length,
+ size_t count, grpc_slice_buffer *dest);
+
+/* Allocate one slice of length \a size synchronously. */
+grpc_slice grpc_resource_user_slice_malloc(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user,
+ size_t size);
+
+#endif /* GRPC_CORE_LIB_IOMGR_RESOURCE_QUOTA_H */
diff --git a/src/core/lib/iomgr/socket_mutator.c b/src/core/lib/iomgr/socket_mutator.c
new file mode 100644
index 0000000000..8b1efb6bab
--- /dev/null
+++ b/src/core/lib/iomgr/socket_mutator.c
@@ -0,0 +1,98 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/iomgr/socket_mutator.h"
+
+#include <grpc/impl/codegen/grpc_types.h>
+#include <grpc/support/sync.h>
+#include <grpc/support/useful.h>
+
+void grpc_socket_mutator_init(grpc_socket_mutator *mutator,
+ const grpc_socket_mutator_vtable *vtable) {
+ mutator->vtable = vtable;
+ gpr_ref_init(&mutator->refcount, 1);
+}
+
+grpc_socket_mutator *grpc_socket_mutator_ref(grpc_socket_mutator *mutator) {
+ gpr_ref(&mutator->refcount);
+ return mutator;
+}
+
+bool grpc_socket_mutator_mutate_fd(grpc_socket_mutator *mutator, int fd) {
+ return mutator->vtable->mutate_fd(fd, mutator);
+}
+
+int grpc_socket_mutator_compare(grpc_socket_mutator *a,
+ grpc_socket_mutator *b) {
+ int c = GPR_ICMP(a, b);
+ if (c != 0) {
+ grpc_socket_mutator *sma = a;
+ grpc_socket_mutator *smb = b;
+ c = GPR_ICMP(sma->vtable, smb->vtable);
+ if (c == 0) {
+ c = sma->vtable->compare(sma, smb);
+ }
+ }
+ return c;
+}
+
+void grpc_socket_mutator_unref(grpc_socket_mutator *mutator) {
+ if (gpr_unref(&mutator->refcount)) {
+ mutator->vtable->destory(mutator);
+ }
+}
+
+static void *socket_mutator_arg_copy(void *p) {
+ return grpc_socket_mutator_ref(p);
+}
+
+static void socket_mutator_arg_destroy(void *p) {
+ grpc_socket_mutator_unref(p);
+}
+
+static int socket_mutator_cmp(void *a, void *b) {
+ return grpc_socket_mutator_compare((grpc_socket_mutator *)a,
+ (grpc_socket_mutator *)b);
+}
+
+static const grpc_arg_pointer_vtable socket_mutator_arg_vtable = {
+ socket_mutator_arg_copy, socket_mutator_arg_destroy, socket_mutator_cmp};
+
+grpc_arg grpc_socket_mutator_to_arg(grpc_socket_mutator *mutator) {
+ grpc_arg arg;
+ arg.type = GRPC_ARG_POINTER;
+ arg.key = GRPC_ARG_SOCKET_MUTATOR;
+ arg.value.pointer.vtable = &socket_mutator_arg_vtable;
+ arg.value.pointer.p = mutator;
+ return arg;
+}
diff --git a/src/core/lib/iomgr/socket_mutator.h b/src/core/lib/iomgr/socket_mutator.h
new file mode 100644
index 0000000000..2f5b6c248e
--- /dev/null
+++ b/src/core/lib/iomgr/socket_mutator.h
@@ -0,0 +1,80 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_SOCKET_MUTATOR_H
+#define GRPC_CORE_LIB_IOMGR_SOCKET_MUTATOR_H
+
+#include <grpc/impl/codegen/grpc_types.h>
+#include <grpc/support/sync.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** The virtual table of grpc_socket_mutator */
+typedef struct {
+ /** Mutates the socket opitons of \a fd */
+ bool (*mutate_fd)(int fd, grpc_socket_mutator *mutator);
+ /** Compare socket mutator \a a and \a b */
+ int (*compare)(grpc_socket_mutator *a, grpc_socket_mutator *b);
+ /** Destroys the socket mutator instance */
+ void (*destory)(grpc_socket_mutator *mutator);
+} grpc_socket_mutator_vtable;
+
+/** The Socket Mutator interface allows changes on socket options */
+struct grpc_socket_mutator {
+ const grpc_socket_mutator_vtable *vtable;
+ gpr_refcount refcount;
+};
+
+/** called by concrete implementations to initialize the base struct */
+void grpc_socket_mutator_init(grpc_socket_mutator *mutator,
+ const grpc_socket_mutator_vtable *vtable);
+
+/** Wrap \a mutator as a grpc_arg */
+grpc_arg grpc_socket_mutator_to_arg(grpc_socket_mutator *mutator);
+
+/** Perform the file descriptor mutation operation of \a mutator on \a fd */
+bool grpc_socket_mutator_mutate_fd(grpc_socket_mutator *mutator, int fd);
+
+/** Compare if \a a and \a b are the same mutator or have same settings */
+int grpc_socket_mutator_compare(grpc_socket_mutator *a, grpc_socket_mutator *b);
+
+grpc_socket_mutator *grpc_socket_mutator_ref(grpc_socket_mutator *mutator);
+void grpc_socket_mutator_unref(grpc_socket_mutator *mutator);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRPC_CORE_LIB_IOMGR_SOCKET_MUTATOR_H */
diff --git a/src/core/lib/iomgr/socket_utils_common_posix.c b/src/core/lib/iomgr/socket_utils_common_posix.c
index bc28bbe316..88e9ade253 100644
--- a/src/core/lib/iomgr/socket_utils_common_posix.c
+++ b/src/core/lib/iomgr/socket_utils_common_posix.c
@@ -209,6 +209,15 @@ grpc_error *grpc_set_socket_low_latency(int fd, int low_latency) {
return GRPC_ERROR_NONE;
}
+/* set a socket using a grpc_socket_mutator */
+grpc_error *grpc_set_socket_with_mutator(int fd, grpc_socket_mutator *mutator) {
+ GPR_ASSERT(mutator);
+ if (!grpc_socket_mutator_mutate_fd(mutator, fd)) {
+ return GRPC_ERROR_CREATE("grpc_socket_mutator failed.");
+ }
+ return GRPC_ERROR_NONE;
+}
+
static gpr_once g_probe_ipv6_once = GPR_ONCE_INIT;
static int g_ipv6_loopback_available;
diff --git a/src/core/lib/iomgr/socket_utils_posix.h b/src/core/lib/iomgr/socket_utils_posix.h
index 175fb2b717..e84d3781a1 100644
--- a/src/core/lib/iomgr/socket_utils_posix.h
+++ b/src/core/lib/iomgr/socket_utils_posix.h
@@ -39,7 +39,9 @@
#include <sys/socket.h>
#include <unistd.h>
+#include <grpc/impl/codegen/grpc_types.h>
#include "src/core/lib/iomgr/error.h"
+#include "src/core/lib/iomgr/socket_mutator.h"
/* a wrapper for accept or accept4 */
int grpc_accept4(int sockfd, grpc_resolved_address *resolved_addr, int nonblock,
@@ -88,6 +90,9 @@ grpc_error *grpc_set_socket_sndbuf(int fd, int buffer_size_bytes);
/* Tries to set the socket's receive buffer to given size. */
grpc_error *grpc_set_socket_rcvbuf(int fd, int buffer_size_bytes);
+/* Tries to set the socket using a grpc_socket_mutator */
+grpc_error *grpc_set_socket_with_mutator(int fd, grpc_socket_mutator *mutator);
+
/* An enum to keep track of IPv4/IPv6 socket modes.
Currently, this information is only used when a socket is first created, but
diff --git a/src/core/lib/iomgr/socket_windows.c b/src/core/lib/iomgr/socket_windows.c
index 35f23300dc..54911e0e31 100644
--- a/src/core/lib/iomgr/socket_windows.c
+++ b/src/core/lib/iomgr/socket_windows.c
@@ -76,6 +76,14 @@ void grpc_winsocket_shutdown(grpc_winsocket *winsocket) {
LPFN_DISCONNECTEX DisconnectEx;
DWORD ioctl_num_bytes;
+ gpr_mu_lock(&winsocket->state_mu);
+ if (winsocket->shutdown_called) {
+ gpr_mu_unlock(&winsocket->state_mu);
+ return;
+ }
+ winsocket->shutdown_called = true;
+ gpr_mu_unlock(&winsocket->state_mu);
+
status = WSAIoctl(winsocket->socket, SIO_GET_EXTENSION_FUNCTION_POINTER,
&guid, sizeof(guid), &DisconnectEx, sizeof(DisconnectEx),
&ioctl_num_bytes, NULL, NULL);
diff --git a/src/core/lib/iomgr/socket_windows.h b/src/core/lib/iomgr/socket_windows.h
index 490d0e0a06..a3875ce16c 100644
--- a/src/core/lib/iomgr/socket_windows.h
+++ b/src/core/lib/iomgr/socket_windows.h
@@ -87,6 +87,7 @@ typedef struct grpc_winsocket {
grpc_winsocket_callback_info read_info;
gpr_mu state_mu;
+ bool shutdown_called;
/* You can't add the same socket twice to the same IO Completion Port.
This prevents that. */
diff --git a/src/core/lib/iomgr/tcp_client.h b/src/core/lib/iomgr/tcp_client.h
index fe1e7f4d3e..0485661316 100644
--- a/src/core/lib/iomgr/tcp_client.h
+++ b/src/core/lib/iomgr/tcp_client.h
@@ -34,11 +34,16 @@
#ifndef GRPC_CORE_LIB_IOMGR_TCP_CLIENT_H
#define GRPC_CORE_LIB_IOMGR_TCP_CLIENT_H
+#include <grpc/impl/codegen/grpc_types.h>
#include <grpc/support/time.h>
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/pollset_set.h"
#include "src/core/lib/iomgr/resolve_address.h"
+/* Channel arg (integer) setting how large a slice to try and read from the wire
+ each time recvmsg (or equivalent) is called */
+#define GRPC_ARG_TCP_READ_CHUNK_SIZE "grpc.experimental.tcp_read_chunk_size"
+
/* Asynchronously connect to an address (specified as (addr, len)), and call
cb with arg and the completed connection when done (or call cb with arg and
NULL on failure).
@@ -47,6 +52,7 @@
void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *on_connect,
grpc_endpoint **endpoint,
grpc_pollset_set *interested_parties,
+ const grpc_channel_args *channel_args,
const grpc_resolved_address *addr,
gpr_timespec deadline);
diff --git a/src/core/lib/iomgr/tcp_client_posix.c b/src/core/lib/iomgr/tcp_client_posix.c
index e7ae1ef695..a3a70a8ed7 100644
--- a/src/core/lib/iomgr/tcp_client_posix.c
+++ b/src/core/lib/iomgr/tcp_client_posix.c
@@ -35,7 +35,7 @@
#ifdef GRPC_POSIX_SOCKET
-#include "src/core/lib/iomgr/tcp_client.h"
+#include "src/core/lib/iomgr/tcp_client_posix.h"
#include <errno.h>
#include <netinet/in.h>
@@ -47,9 +47,11 @@
#include <grpc/support/string_util.h>
#include <grpc/support/time.h>
+#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/iomgr_posix.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
+#include "src/core/lib/iomgr/socket_mutator.h"
#include "src/core/lib/iomgr/socket_utils_posix.h"
#include "src/core/lib/iomgr/tcp_posix.h"
#include "src/core/lib/iomgr/timer.h"
@@ -69,9 +71,11 @@ typedef struct {
char *addr_str;
grpc_endpoint **ep;
grpc_closure *closure;
+ grpc_channel_args *channel_args;
} async_connect;
-static grpc_error *prepare_socket(const grpc_resolved_address *addr, int fd) {
+static grpc_error *prepare_socket(const grpc_resolved_address *addr, int fd,
+ const grpc_channel_args *channel_args) {
grpc_error *err = GRPC_ERROR_NONE;
GPR_ASSERT(fd >= 0);
@@ -86,6 +90,16 @@ static grpc_error *prepare_socket(const grpc_resolved_address *addr, int fd) {
}
err = grpc_set_socket_no_sigpipe_if_possible(fd);
if (err != GRPC_ERROR_NONE) goto error;
+ if (channel_args) {
+ for (size_t i = 0; i < channel_args->num_args; i++) {
+ if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_SOCKET_MUTATOR)) {
+ GPR_ASSERT(channel_args->args[i].type == GRPC_ARG_POINTER);
+ grpc_socket_mutator *mutator = channel_args->args[i].value.pointer.p;
+ err = grpc_set_socket_with_mutator(fd, mutator);
+ if (err != GRPC_ERROR_NONE) goto error;
+ }
+ }
+ }
goto done;
error:
@@ -114,10 +128,39 @@ static void tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
if (done) {
gpr_mu_destroy(&ac->mu);
gpr_free(ac->addr_str);
+ grpc_channel_args_destroy(ac->channel_args);
gpr_free(ac);
}
}
+grpc_endpoint *grpc_tcp_client_create_from_fd(
+ grpc_exec_ctx *exec_ctx, grpc_fd *fd, const grpc_channel_args *channel_args,
+ const char *addr_str) {
+ size_t tcp_read_chunk_size = GRPC_TCP_DEFAULT_READ_SLICE_SIZE;
+ grpc_resource_quota *resource_quota = grpc_resource_quota_create(NULL);
+ if (channel_args != NULL) {
+ for (size_t i = 0; i < channel_args->num_args; i++) {
+ if (0 ==
+ strcmp(channel_args->args[i].key, GRPC_ARG_TCP_READ_CHUNK_SIZE)) {
+ grpc_integer_options options = {(int)tcp_read_chunk_size, 1,
+ 8 * 1024 * 1024};
+ tcp_read_chunk_size = (size_t)grpc_channel_arg_get_integer(
+ &channel_args->args[i], options);
+ } else if (0 ==
+ strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
+ grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
+ resource_quota = grpc_resource_quota_internal_ref(
+ channel_args->args[i].value.pointer.p);
+ }
+ }
+ }
+
+ grpc_endpoint *ep =
+ grpc_tcp_create(fd, resource_quota, tcp_read_chunk_size, addr_str);
+ grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
+ return ep;
+}
+
static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
async_connect *ac = acp;
int so_error = 0;
@@ -165,7 +208,8 @@ static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
switch (so_error) {
case 0:
grpc_pollset_set_del_fd(exec_ctx, ac->interested_parties, fd);
- *ep = grpc_tcp_create(fd, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, ac->addr_str);
+ *ep = grpc_tcp_client_create_from_fd(exec_ctx, fd, ac->channel_args,
+ ac->addr_str);
fd = NULL;
break;
case ENOBUFS:
@@ -207,14 +251,18 @@ finish:
done = (--ac->refs == 0);
gpr_mu_unlock(&ac->mu);
if (error != GRPC_ERROR_NONE) {
- error = grpc_error_set_str(error, GRPC_ERROR_STR_DESCRIPTION,
- "Failed to connect to remote host");
+ char *error_descr;
+ gpr_asprintf(&error_descr, "Failed to connect to remote host: %s",
+ grpc_error_get_str(error, GRPC_ERROR_STR_DESCRIPTION));
+ error = grpc_error_set_str(error, GRPC_ERROR_STR_DESCRIPTION, error_descr);
+ gpr_free(error_descr);
error =
grpc_error_set_str(error, GRPC_ERROR_STR_TARGET_ADDRESS, ac->addr_str);
}
if (done) {
gpr_mu_destroy(&ac->mu);
gpr_free(ac->addr_str);
+ grpc_channel_args_destroy(ac->channel_args);
gpr_free(ac);
}
grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
@@ -223,6 +271,7 @@ finish:
static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
grpc_closure *closure, grpc_endpoint **ep,
grpc_pollset_set *interested_parties,
+ const grpc_channel_args *channel_args,
const grpc_resolved_address *addr,
gpr_timespec deadline) {
int fd;
@@ -253,7 +302,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(grpc_sockaddr_is_v4mapped(addr, &addr4_copy));
addr = &addr4_copy;
}
- if ((error = prepare_socket(addr, fd)) != GRPC_ERROR_NONE) {
+ if ((error = prepare_socket(addr, fd, channel_args)) != GRPC_ERROR_NONE) {
grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
return;
}
@@ -270,7 +319,8 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
fdobj = grpc_fd_create(fd, name);
if (err >= 0) {
- *ep = grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str);
+ *ep =
+ grpc_tcp_client_create_from_fd(exec_ctx, fdobj, channel_args, addr_str);
grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_NONE, NULL);
goto done;
}
@@ -295,6 +345,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
ac->refs = 2;
ac->write_closure.cb = on_writable;
ac->write_closure.cb_arg = ac;
+ ac->channel_args = grpc_channel_args_copy(channel_args);
if (grpc_tcp_trace) {
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: asynchronously connecting",
@@ -316,16 +367,18 @@ done:
// overridden by api_fuzzer.c
void (*grpc_tcp_client_connect_impl)(
grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep,
- grpc_pollset_set *interested_parties, const grpc_resolved_address *addr,
+ grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args,
+ const grpc_resolved_address *addr,
gpr_timespec deadline) = tcp_client_connect_impl;
void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_endpoint **ep,
grpc_pollset_set *interested_parties,
+ const grpc_channel_args *channel_args,
const grpc_resolved_address *addr,
gpr_timespec deadline) {
- grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties, addr,
- deadline);
+ grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties,
+ channel_args, addr, deadline);
}
#endif
diff --git a/src/core/lib/iomgr/ev_poll_and_epoll_posix.h b/src/core/lib/iomgr/tcp_client_posix.h
index 06d6dbf29d..efc5fcd5bb 100644
--- a/src/core/lib/iomgr/ev_poll_and_epoll_posix.h
+++ b/src/core/lib/iomgr/tcp_client_posix.h
@@ -31,11 +31,15 @@
*
*/
-#ifndef GRPC_CORE_LIB_IOMGR_EV_POLL_AND_EPOLL_POSIX_H
-#define GRPC_CORE_LIB_IOMGR_EV_POLL_AND_EPOLL_POSIX_H
+#ifndef GRPC_CORE_LIB_IOMGR_TCP_CLIENT_POSIX_H
+#define GRPC_CORE_LIB_IOMGR_TCP_CLIENT_POSIX_H
+#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/ev_posix.h"
+#include "src/core/lib/iomgr/tcp_client.h"
-const grpc_event_engine_vtable *grpc_init_poll_and_epoll_posix(void);
+grpc_endpoint *grpc_tcp_client_create_from_fd(
+ grpc_exec_ctx *exec_ctx, grpc_fd *fd, const grpc_channel_args *channel_args,
+ const char *addr_str);
-#endif /* GRPC_CORE_LIB_IOMGR_EV_POLL_AND_EPOLL_POSIX_H */
+#endif /* GRPC_CORE_LIB_IOMGR_TCP_CLIENT_POSIX_H */
diff --git a/src/core/lib/iomgr/tcp_client_uv.c b/src/core/lib/iomgr/tcp_client_uv.c
index 6274667042..b07f9ceffa 100644
--- a/src/core/lib/iomgr/tcp_client_uv.c
+++ b/src/core/lib/iomgr/tcp_client_uv.c
@@ -54,9 +54,12 @@ typedef struct grpc_uv_tcp_connect {
grpc_endpoint **endpoint;
int refs;
char *addr_name;
+ grpc_resource_quota *resource_quota;
} grpc_uv_tcp_connect;
-static void uv_tcp_connect_cleanup(grpc_uv_tcp_connect *connect) {
+static void uv_tcp_connect_cleanup(grpc_exec_ctx *exec_ctx,
+ grpc_uv_tcp_connect *connect) {
+ grpc_resource_quota_internal_unref(exec_ctx, connect->resource_quota);
gpr_free(connect);
}
@@ -74,7 +77,7 @@ static void uv_tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp,
}
done = (--connect->refs == 0);
if (done) {
- uv_tcp_connect_cleanup(connect);
+ uv_tcp_connect_cleanup(exec_ctx, connect);
}
}
@@ -86,8 +89,8 @@ static void uv_tc_on_connect(uv_connect_t *req, int status) {
grpc_closure *closure = connect->closure;
grpc_timer_cancel(&exec_ctx, &connect->alarm);
if (status == 0) {
- *connect->endpoint =
- grpc_tcp_create(connect->tcp_handle, connect->addr_name);
+ *connect->endpoint = grpc_tcp_create(
+ connect->tcp_handle, connect->resource_quota, connect->addr_name);
} else {
error = GRPC_ERROR_CREATE("Failed to connect to remote host");
error = grpc_error_set_int(error, GRPC_ERROR_INT_ERRNO, -status);
@@ -105,7 +108,7 @@ static void uv_tc_on_connect(uv_connect_t *req, int status) {
}
done = (--connect->refs == 0);
if (done) {
- uv_tcp_connect_cleanup(connect);
+ uv_tcp_connect_cleanup(&exec_ctx, connect);
}
grpc_exec_ctx_sched(&exec_ctx, closure, error, NULL);
grpc_exec_ctx_finish(&exec_ctx);
@@ -114,16 +117,31 @@ static void uv_tc_on_connect(uv_connect_t *req, int status) {
static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
grpc_closure *closure, grpc_endpoint **ep,
grpc_pollset_set *interested_parties,
+ const grpc_channel_args *channel_args,
const grpc_resolved_address *resolved_addr,
gpr_timespec deadline) {
grpc_uv_tcp_connect *connect;
+ grpc_resource_quota *resource_quota = grpc_resource_quota_create(NULL);
+ (void)channel_args;
(void)interested_parties;
+
+ if (channel_args != NULL) {
+ for (size_t i = 0; i < channel_args->num_args; i++) {
+ if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
+ grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
+ resource_quota = grpc_resource_quota_internal_ref(
+ channel_args->args[i].value.pointer.p);
+ }
+ }
+ }
+
connect = gpr_malloc(sizeof(grpc_uv_tcp_connect));
memset(connect, 0, sizeof(grpc_uv_tcp_connect));
connect->closure = closure;
connect->endpoint = ep;
connect->tcp_handle = gpr_malloc(sizeof(uv_tcp_t));
connect->addr_name = grpc_sockaddr_to_uri(resolved_addr);
+ connect->resource_quota = resource_quota;
uv_tcp_init(uv_default_loop(), connect->tcp_handle);
connect->connect_req.data = connect;
// TODO(murgatroid99): figure out what the return value here means
@@ -138,16 +156,18 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
// overridden by api_fuzzer.c
void (*grpc_tcp_client_connect_impl)(
grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep,
- grpc_pollset_set *interested_parties, const grpc_resolved_address *addr,
+ grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args,
+ const grpc_resolved_address *addr,
gpr_timespec deadline) = tcp_client_connect_impl;
void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_endpoint **ep,
grpc_pollset_set *interested_parties,
+ const grpc_channel_args *channel_args,
const grpc_resolved_address *addr,
gpr_timespec deadline) {
- grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties, addr,
- deadline);
+ grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties,
+ channel_args, addr, deadline);
}
#endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/tcp_client_windows.c b/src/core/lib/iomgr/tcp_client_windows.c
index fdd8c1a1f8..1127588ebc 100644
--- a/src/core/lib/iomgr/tcp_client_windows.c
+++ b/src/core/lib/iomgr/tcp_client_windows.c
@@ -37,12 +37,13 @@
#include "src/core/lib/iomgr/sockaddr_windows.h"
+#include <grpc/slice_buffer.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/log_windows.h>
-#include <grpc/support/slice_buffer.h>
#include <grpc/support/useful.h>
+#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/iocp_windows.h"
#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
@@ -61,13 +62,16 @@ typedef struct {
int refs;
grpc_closure on_connect;
grpc_endpoint **endpoint;
+ grpc_resource_quota *resource_quota;
} async_connect;
-static void async_connect_unlock_and_cleanup(async_connect *ac,
+static void async_connect_unlock_and_cleanup(grpc_exec_ctx *exec_ctx,
+ async_connect *ac,
grpc_winsocket *socket) {
int done = (--ac->refs == 0);
gpr_mu_unlock(&ac->mu);
if (done) {
+ grpc_resource_quota_internal_unref(exec_ctx, ac->resource_quota);
gpr_mu_destroy(&ac->mu);
gpr_free(ac->addr_name);
gpr_free(ac);
@@ -83,7 +87,7 @@ static void on_alarm(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
if (socket != NULL) {
grpc_winsocket_shutdown(socket);
}
- async_connect_unlock_and_cleanup(ac, socket);
+ async_connect_unlock_and_cleanup(exec_ctx, ac, socket);
}
static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
@@ -103,22 +107,26 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
gpr_mu_lock(&ac->mu);
- if (error == GRPC_ERROR_NONE && socket != NULL) {
- DWORD transfered_bytes = 0;
- DWORD flags;
- BOOL wsa_success =
- WSAGetOverlappedResult(socket->socket, &socket->write_info.overlapped,
- &transfered_bytes, FALSE, &flags);
- GPR_ASSERT(transfered_bytes == 0);
- if (!wsa_success) {
- error = GRPC_WSA_ERROR(WSAGetLastError(), "ConnectEx");
+ if (error == GRPC_ERROR_NONE) {
+ if (socket != NULL) {
+ DWORD transfered_bytes = 0;
+ DWORD flags;
+ BOOL wsa_success =
+ WSAGetOverlappedResult(socket->socket, &socket->write_info.overlapped,
+ &transfered_bytes, FALSE, &flags);
+ GPR_ASSERT(transfered_bytes == 0);
+ if (!wsa_success) {
+ error = GRPC_WSA_ERROR(WSAGetLastError(), "ConnectEx");
+ } else {
+ *ep = grpc_tcp_create(socket, ac->resource_quota, ac->addr_name);
+ socket = NULL;
+ }
} else {
- *ep = grpc_tcp_create(socket, ac->addr_name);
- socket = NULL;
+ error = GRPC_ERROR_CREATE("socket is null");
}
}
- async_connect_unlock_and_cleanup(ac, socket);
+ async_connect_unlock_and_cleanup(exec_ctx, ac, socket);
/* If the connection was aborted, the callback was already called when
the deadline was met. */
grpc_exec_ctx_sched(exec_ctx, on_done, error, NULL);
@@ -129,6 +137,7 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *on_done,
grpc_endpoint **endpoint,
grpc_pollset_set *interested_parties,
+ const grpc_channel_args *channel_args,
const grpc_resolved_address *addr,
gpr_timespec deadline) {
SOCKET sock = INVALID_SOCKET;
@@ -144,6 +153,17 @@ void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *on_done,
grpc_winsocket_callback_info *info;
grpc_error *error = GRPC_ERROR_NONE;
+ grpc_resource_quota *resource_quota = grpc_resource_quota_create(NULL);
+ if (channel_args != NULL) {
+ for (size_t i = 0; i < channel_args->num_args; i++) {
+ if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
+ grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
+ resource_quota = grpc_resource_quota_internal_ref(
+ channel_args->args[i].value.pointer.p);
+ }
+ }
+ }
+
*endpoint = NULL;
/* Use dualstack sockets where available. */
@@ -177,8 +197,8 @@ void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *on_done,
grpc_sockaddr_make_wildcard6(0, &local_address);
- status =
- bind(sock, (struct sockaddr *)&local_address.addr, local_address.len);
+ status = bind(sock, (struct sockaddr *)&local_address.addr,
+ (int)local_address.len);
if (status != 0) {
error = GRPC_WSA_ERROR(WSAGetLastError(), "bind");
goto failure;
@@ -206,6 +226,7 @@ void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *on_done,
ac->refs = 2;
ac->addr_name = grpc_sockaddr_to_uri(addr);
ac->endpoint = endpoint;
+ ac->resource_quota = resource_quota;
grpc_closure_init(&ac->on_connect, on_connect, ac);
grpc_timer_init(exec_ctx, &ac->alarm, deadline, on_alarm, ac,
@@ -225,6 +246,7 @@ failure:
} else if (sock != INVALID_SOCKET) {
closesocket(sock);
}
+ grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
grpc_exec_ctx_sched(exec_ctx, on_done, final_error, NULL);
}
diff --git a/src/core/lib/iomgr/tcp_posix.c b/src/core/lib/iomgr/tcp_posix.c
index 18c63cfbae..540305e4fa 100644
--- a/src/core/lib/iomgr/tcp_posix.c
+++ b/src/core/lib/iomgr/tcp_posix.c
@@ -46,9 +46,9 @@
#include <sys/types.h>
#include <unistd.h>
+#include <grpc/slice.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/slice.h>
#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
@@ -56,6 +56,7 @@
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/support/string.h"
#ifdef GRPC_HAVE_MSG_NOSIGNAL
@@ -80,12 +81,13 @@ typedef struct {
msg_iovlen_type iov_size; /* Number of slices to allocate per read attempt */
size_t slice_size;
gpr_refcount refcount;
+ gpr_atm shutdown_count;
/* garbage after the last read */
- gpr_slice_buffer last_read_buffer;
+ grpc_slice_buffer last_read_buffer;
- gpr_slice_buffer *incoming_buffer;
- gpr_slice_buffer *outgoing_buffer;
+ grpc_slice_buffer *incoming_buffer;
+ grpc_slice_buffer *outgoing_buffer;
/** slice within outgoing_buffer to write next */
size_t outgoing_slice_idx;
/** byte within outgoing_buffer->slices[outgoing_slice_idx] to write next */
@@ -100,8 +102,17 @@ typedef struct {
grpc_closure write_closure;
char *peer_string;
+
+ grpc_resource_user *resource_user;
+ grpc_resource_user_slice_allocator slice_allocator;
} grpc_tcp;
+static grpc_error *tcp_annotate_error(grpc_error *src_error, grpc_tcp *tcp) {
+ return grpc_error_set_str(
+ grpc_error_set_int(src_error, GRPC_ERROR_INT_FD, tcp->fd),
+ GRPC_ERROR_STR_TARGET_ADDRESS, tcp->peer_string);
+}
+
static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
grpc_error *error);
static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
@@ -110,12 +121,14 @@ static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
static void tcp_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *)ep;
grpc_fd_shutdown(exec_ctx, tcp->em_fd);
+ grpc_resource_user_shutdown(exec_ctx, tcp->resource_user);
}
static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
grpc_fd_orphan(exec_ctx, tcp->em_fd, tcp->release_fd_cb, tcp->release_fd,
"tcp_unref_orphan");
- gpr_slice_buffer_destroy(&tcp->last_read_buffer);
+ grpc_slice_buffer_destroy(&tcp->last_read_buffer);
+ grpc_resource_user_unref(exec_ctx, tcp->resource_user);
gpr_free(tcp->peer_string);
gpr_free(tcp);
}
@@ -155,6 +168,7 @@ static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); }
static void tcp_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
grpc_network_status_unregister_endpoint(ep);
grpc_tcp *tcp = (grpc_tcp *)ep;
+ grpc_slice_buffer_reset_and_unref(&tcp->last_read_buffer);
TCP_UNREF(exec_ctx, tcp, "destroy");
}
@@ -168,8 +182,8 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
gpr_log(GPR_DEBUG, "read: error=%s", str);
grpc_error_free_string(str);
for (i = 0; i < tcp->incoming_buffer->count; i++) {
- char *dump = gpr_dump_slice(tcp->incoming_buffer->slices[i],
- GPR_DUMP_HEX | GPR_DUMP_ASCII);
+ char *dump = grpc_dump_slice(tcp->incoming_buffer->slices[i],
+ GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "READ %p (peer=%s): %s", tcp, tcp->peer_string, dump);
gpr_free(dump);
}
@@ -181,7 +195,7 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
}
#define MAX_READ_IOVEC 4
-static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
struct msghdr msg;
struct iovec iov[MAX_READ_IOVEC];
ssize_t read_bytes;
@@ -192,13 +206,9 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
GPR_ASSERT(tcp->incoming_buffer->count <= MAX_READ_IOVEC);
GPR_TIMER_BEGIN("tcp_continue_read", 0);
- while (tcp->incoming_buffer->count < (size_t)tcp->iov_size) {
- gpr_slice_buffer_add_indexed(tcp->incoming_buffer,
- gpr_slice_malloc(tcp->slice_size));
- }
for (i = 0; i < tcp->incoming_buffer->count; i++) {
- iov[i].iov_base = GPR_SLICE_START_PTR(tcp->incoming_buffer->slices[i]);
- iov[i].iov_len = GPR_SLICE_LENGTH(tcp->incoming_buffer->slices[i]);
+ iov[i].iov_base = GRPC_SLICE_START_PTR(tcp->incoming_buffer->slices[i]);
+ iov[i].iov_len = GRPC_SLICE_LENGTH(tcp->incoming_buffer->slices[i]);
}
msg.msg_name = NULL;
@@ -225,19 +235,21 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
/* We've consumed the edge, request a new one */
grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_closure);
} else {
- gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer);
- call_read_cb(exec_ctx, tcp, GRPC_OS_ERROR(errno, "recvmsg"));
+ grpc_slice_buffer_reset_and_unref(tcp->incoming_buffer);
+ call_read_cb(exec_ctx, tcp,
+ tcp_annotate_error(GRPC_OS_ERROR(errno, "recvmsg"), tcp));
TCP_UNREF(exec_ctx, tcp, "read");
}
} else if (read_bytes == 0) {
/* 0 read size ==> end of stream */
- gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer);
- call_read_cb(exec_ctx, tcp, GRPC_ERROR_CREATE("EOF"));
+ grpc_slice_buffer_reset_and_unref(tcp->incoming_buffer);
+ call_read_cb(exec_ctx, tcp,
+ tcp_annotate_error(GRPC_ERROR_CREATE("Socket closed"), tcp));
TCP_UNREF(exec_ctx, tcp, "read");
} else {
GPR_ASSERT((size_t)read_bytes <= tcp->incoming_buffer->length);
if ((size_t)read_bytes < tcp->incoming_buffer->length) {
- gpr_slice_buffer_trim_end(
+ grpc_slice_buffer_trim_end(
tcp->incoming_buffer,
tcp->incoming_buffer->length - (size_t)read_bytes,
&tcp->last_read_buffer);
@@ -252,13 +264,38 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
GPR_TIMER_END("tcp_continue_read", 0);
}
+static void tcp_read_allocation_done(grpc_exec_ctx *exec_ctx, void *tcpp,
+ grpc_error *error) {
+ grpc_tcp *tcp = tcpp;
+ if (error != GRPC_ERROR_NONE) {
+ grpc_slice_buffer_reset_and_unref(tcp->incoming_buffer);
+ grpc_slice_buffer_reset_and_unref(&tcp->last_read_buffer);
+ call_read_cb(exec_ctx, tcp, GRPC_ERROR_REF(error));
+ TCP_UNREF(exec_ctx, tcp, "read");
+ } else {
+ tcp_do_read(exec_ctx, tcp);
+ }
+}
+
+static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+ if (tcp->incoming_buffer->count < (size_t)tcp->iov_size) {
+ grpc_resource_user_alloc_slices(
+ exec_ctx, &tcp->slice_allocator, tcp->slice_size,
+ (size_t)tcp->iov_size - tcp->incoming_buffer->count,
+ tcp->incoming_buffer);
+ } else {
+ tcp_do_read(exec_ctx, tcp);
+ }
+}
+
static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
grpc_error *error) {
grpc_tcp *tcp = (grpc_tcp *)arg;
GPR_ASSERT(!tcp->finished_edge);
if (error != GRPC_ERROR_NONE) {
- gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer);
+ grpc_slice_buffer_reset_and_unref(tcp->incoming_buffer);
+ grpc_slice_buffer_reset_and_unref(&tcp->last_read_buffer);
call_read_cb(exec_ctx, tcp, GRPC_ERROR_REF(error));
TCP_UNREF(exec_ctx, tcp, "read");
} else {
@@ -267,13 +304,13 @@ static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
}
static void tcp_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- gpr_slice_buffer *incoming_buffer, grpc_closure *cb) {
+ grpc_slice_buffer *incoming_buffer, grpc_closure *cb) {
grpc_tcp *tcp = (grpc_tcp *)ep;
GPR_ASSERT(tcp->read_cb == NULL);
tcp->read_cb = cb;
tcp->incoming_buffer = incoming_buffer;
- gpr_slice_buffer_reset_and_unref(incoming_buffer);
- gpr_slice_buffer_swap(incoming_buffer, &tcp->last_read_buffer);
+ grpc_slice_buffer_reset_and_unref(incoming_buffer);
+ grpc_slice_buffer_swap(incoming_buffer, &tcp->last_read_buffer);
TCP_REF(tcp, "read");
if (tcp->finished_edge) {
tcp->finished_edge = false;
@@ -303,11 +340,11 @@ static bool tcp_flush(grpc_tcp *tcp, grpc_error **error) {
iov_size != MAX_WRITE_IOVEC;
iov_size++) {
iov[iov_size].iov_base =
- GPR_SLICE_START_PTR(
+ GRPC_SLICE_START_PTR(
tcp->outgoing_buffer->slices[tcp->outgoing_slice_idx]) +
tcp->outgoing_byte_idx;
iov[iov_size].iov_len =
- GPR_SLICE_LENGTH(
+ GRPC_SLICE_LENGTH(
tcp->outgoing_buffer->slices[tcp->outgoing_slice_idx]) -
tcp->outgoing_byte_idx;
sending_length += iov[iov_size].iov_len;
@@ -336,8 +373,13 @@ static bool tcp_flush(grpc_tcp *tcp, grpc_error **error) {
tcp->outgoing_slice_idx = unwind_slice_idx;
tcp->outgoing_byte_idx = unwind_byte_idx;
return false;
+ } else if (errno == EPIPE) {
+ *error = grpc_error_set_int(GRPC_OS_ERROR(errno, "sendmsg"),
+ GRPC_ERROR_INT_GRPC_STATUS,
+ GRPC_STATUS_UNAVAILABLE);
+ return true;
} else {
- *error = GRPC_OS_ERROR(errno, "sendmsg");
+ *error = tcp_annotate_error(GRPC_OS_ERROR(errno, "sendmsg"), tcp);
return true;
}
}
@@ -348,7 +390,7 @@ static bool tcp_flush(grpc_tcp *tcp, grpc_error **error) {
size_t slice_length;
tcp->outgoing_slice_idx--;
- slice_length = GPR_SLICE_LENGTH(
+ slice_length = GRPC_SLICE_LENGTH(
tcp->outgoing_buffer->slices[tcp->outgoing_slice_idx]);
if (slice_length > trailing) {
tcp->outgoing_byte_idx = slice_length - trailing;
@@ -398,7 +440,7 @@ static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
}
static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- gpr_slice_buffer *buf, grpc_closure *cb) {
+ grpc_slice_buffer *buf, grpc_closure *cb) {
grpc_tcp *tcp = (grpc_tcp *)ep;
grpc_error *error = GRPC_ERROR_NONE;
@@ -407,7 +449,7 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
for (i = 0; i < buf->count; i++) {
char *data =
- gpr_dump_slice(buf->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
+ grpc_dump_slice(buf->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "WRITE %p (peer=%s): %s", tcp, tcp->peer_string, data);
gpr_free(data);
}
@@ -418,9 +460,10 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
if (buf->length == 0) {
GPR_TIMER_END("tcp_write", 0);
- grpc_exec_ctx_sched(exec_ctx, cb, grpc_fd_is_shutdown(tcp->em_fd)
- ? GRPC_ERROR_CREATE("EOF")
- : GRPC_ERROR_NONE,
+ grpc_exec_ctx_sched(exec_ctx, cb,
+ grpc_fd_is_shutdown(tcp->em_fd)
+ ? tcp_annotate_error(GRPC_ERROR_CREATE("EOF"), tcp)
+ : GRPC_ERROR_NONE,
NULL);
return;
}
@@ -464,11 +507,21 @@ static char *tcp_get_peer(grpc_endpoint *ep) {
return gpr_strdup(tcp->peer_string);
}
+static int tcp_get_fd(grpc_endpoint *ep) {
+ grpc_tcp *tcp = (grpc_tcp *)ep;
+ return tcp->fd;
+}
+
static grpc_workqueue *tcp_get_workqueue(grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *)ep;
return grpc_fd_get_workqueue(tcp->em_fd);
}
+static grpc_resource_user *tcp_get_resource_user(grpc_endpoint *ep) {
+ grpc_tcp *tcp = (grpc_tcp *)ep;
+ return tcp->resource_user;
+}
+
static const grpc_endpoint_vtable vtable = {tcp_read,
tcp_write,
tcp_get_workqueue,
@@ -476,10 +529,13 @@ static const grpc_endpoint_vtable vtable = {tcp_read,
tcp_add_to_pollset_set,
tcp_shutdown,
tcp_destroy,
- tcp_get_peer};
+ tcp_get_resource_user,
+ tcp_get_peer,
+ tcp_get_fd};
-grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size,
- const char *peer_string) {
+grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd,
+ grpc_resource_quota *resource_quota,
+ size_t slice_size, const char *peer_string) {
grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp));
tcp->base.vtable = &vtable;
tcp->peer_string = gpr_strdup(peer_string);
@@ -494,12 +550,16 @@ grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size,
tcp->finished_edge = true;
/* paired with unref in grpc_tcp_destroy */
gpr_ref_init(&tcp->refcount, 1);
+ gpr_atm_no_barrier_store(&tcp->shutdown_count, 0);
tcp->em_fd = em_fd;
tcp->read_closure.cb = tcp_handle_read;
tcp->read_closure.cb_arg = tcp;
tcp->write_closure.cb = tcp_handle_write;
tcp->write_closure.cb_arg = tcp;
- gpr_slice_buffer_init(&tcp->last_read_buffer);
+ grpc_slice_buffer_init(&tcp->last_read_buffer);
+ tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
+ grpc_resource_user_slice_allocator_init(
+ &tcp->slice_allocator, tcp->resource_user, tcp_read_allocation_done, tcp);
/* Tell network status tracker about new endpoint */
grpc_network_status_register_endpoint(&tcp->base);
@@ -514,10 +574,12 @@ int grpc_tcp_fd(grpc_endpoint *ep) {
void grpc_tcp_destroy_and_release_fd(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
int *fd, grpc_closure *done) {
+ grpc_network_status_unregister_endpoint(ep);
grpc_tcp *tcp = (grpc_tcp *)ep;
GPR_ASSERT(ep->vtable == &vtable);
tcp->release_fd = fd;
tcp->release_fd_cb = done;
+ grpc_slice_buffer_reset_and_unref(&tcp->last_read_buffer);
TCP_UNREF(exec_ctx, tcp, "destroy");
}
diff --git a/src/core/lib/iomgr/tcp_posix.h b/src/core/lib/iomgr/tcp_posix.h
index 99125836d6..1c0d13f96e 100644
--- a/src/core/lib/iomgr/tcp_posix.h
+++ b/src/core/lib/iomgr/tcp_posix.h
@@ -53,8 +53,8 @@ extern int grpc_tcp_trace;
/* Create a tcp endpoint given a file desciptor and a read slice size.
Takes ownership of fd. */
-grpc_endpoint *grpc_tcp_create(grpc_fd *fd, size_t read_slice_size,
- const char *peer_string);
+grpc_endpoint *grpc_tcp_create(grpc_fd *fd, grpc_resource_quota *resource_quota,
+ size_t read_slice_size, const char *peer_string);
/* Return the tcp endpoint's fd, or -1 if this is not available. Does not
release the fd.
diff --git a/src/core/lib/iomgr/tcp_server.h b/src/core/lib/iomgr/tcp_server.h
index 0c2a4f41da..437a94beff 100644
--- a/src/core/lib/iomgr/tcp_server.h
+++ b/src/core/lib/iomgr/tcp_server.h
@@ -52,7 +52,8 @@ typedef struct grpc_tcp_server_acceptor {
unsigned fd_index;
} grpc_tcp_server_acceptor;
-/* Called for newly connected TCP connections. */
+/* Called for newly connected TCP connections.
+ Takes ownership of acceptor. */
typedef void (*grpc_tcp_server_cb)(grpc_exec_ctx *exec_ctx, void *arg,
grpc_endpoint *ep,
grpc_pollset *accepting_pollset,
@@ -61,7 +62,8 @@ typedef void (*grpc_tcp_server_cb)(grpc_exec_ctx *exec_ctx, void *arg,
/* Create a server, initially not bound to any ports. The caller owns one ref.
If shutdown_complete is not NULL, it will be used by
grpc_tcp_server_unref() when the ref count reaches zero. */
-grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete,
+grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
+ grpc_closure *shutdown_complete,
const grpc_channel_args *args,
grpc_tcp_server **server);
diff --git a/src/core/lib/iomgr/tcp_server_posix.c b/src/core/lib/iomgr/tcp_server_posix.c
index 127f8595d1..179f47ef76 100644
--- a/src/core/lib/iomgr/tcp_server_posix.c
+++ b/src/core/lib/iomgr/tcp_server_posix.c
@@ -134,6 +134,8 @@ struct grpc_tcp_server {
/* next pollset to assign a channel to */
gpr_atm next_pollset_to_assign;
+
+ grpc_resource_quota *resource_quota;
};
static gpr_once check_init = GPR_ONCE_INIT;
@@ -150,23 +152,37 @@ static void init(void) {
#endif
}
-grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete,
+grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
+ grpc_closure *shutdown_complete,
const grpc_channel_args *args,
grpc_tcp_server **server) {
gpr_once_init(&check_init, init);
grpc_tcp_server *s = gpr_malloc(sizeof(grpc_tcp_server));
s->so_reuseport = has_so_reuseport;
+ s->resource_quota = grpc_resource_quota_create(NULL);
for (size_t i = 0; i < (args == NULL ? 0 : args->num_args); i++) {
if (0 == strcmp(GRPC_ARG_ALLOW_REUSEPORT, args->args[i].key)) {
if (args->args[i].type == GRPC_ARG_INTEGER) {
s->so_reuseport =
has_so_reuseport && (args->args[i].value.integer != 0);
} else {
+ grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
gpr_free(s);
return GRPC_ERROR_CREATE(GRPC_ARG_ALLOW_REUSEPORT
" must be an integer");
}
+ } else if (0 == strcmp(GRPC_ARG_RESOURCE_QUOTA, args->args[i].key)) {
+ if (args->args[i].type == GRPC_ARG_POINTER) {
+ grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
+ s->resource_quota =
+ grpc_resource_quota_internal_ref(args->args[i].value.pointer.p);
+ } else {
+ grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
+ gpr_free(s);
+ return GRPC_ERROR_CREATE(GRPC_ARG_RESOURCE_QUOTA
+ " must be a pointer to a buffer pool");
+ }
}
}
gpr_ref_init(&s->refs, 1);
@@ -203,6 +219,8 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
gpr_free(sp);
}
+ grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
+
gpr_free(s);
}
@@ -363,16 +381,12 @@ error:
/* event manager callback when reads are ready */
static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
grpc_tcp_listener *sp = arg;
- grpc_tcp_server_acceptor acceptor = {sp->server, sp->port_index,
- sp->fd_index};
- grpc_pollset *read_notifier_pollset = NULL;
- grpc_fd *fdobj;
if (err != GRPC_ERROR_NONE) {
goto error;
}
- read_notifier_pollset =
+ grpc_pollset *read_notifier_pollset =
sp->server->pollsets[(size_t)gpr_atm_no_barrier_fetch_add(
&sp->server->next_pollset_to_assign, 1) %
sp->server->pollset_count];
@@ -408,7 +422,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
gpr_log(GPR_DEBUG, "SERVER_CONNECT: incoming connection: %s", addr_str);
}
- fdobj = grpc_fd_create(fd, name);
+ grpc_fd *fdobj = grpc_fd_create(fd, name);
if (read_notifier_pollset == NULL) {
gpr_log(GPR_ERROR, "Read notifier pollset is not set on the fd");
@@ -417,10 +431,17 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
grpc_pollset_add_fd(exec_ctx, read_notifier_pollset, fdobj);
+ // Create acceptor.
+ grpc_tcp_server_acceptor *acceptor = gpr_malloc(sizeof(*acceptor));
+ acceptor->from_server = sp->server;
+ acceptor->port_index = sp->port_index;
+ acceptor->fd_index = sp->fd_index;
+
sp->server->on_accept_cb(
exec_ctx, sp->server->on_accept_cb_arg,
- grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str),
- read_notifier_pollset, &acceptor);
+ grpc_tcp_create(fdobj, sp->server->resource_quota,
+ GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str),
+ read_notifier_pollset, acceptor);
gpr_free(name);
gpr_free(addr_str);
@@ -638,41 +659,46 @@ done:
}
}
+/* Return listener at port_index or NULL. Should only be called with s->mu
+ locked. */
+static grpc_tcp_listener *get_port_index(grpc_tcp_server *s,
+ unsigned port_index) {
+ unsigned num_ports = 0;
+ grpc_tcp_listener *sp;
+ for (sp = s->head; sp; sp = sp->next) {
+ if (!sp->is_sibling) {
+ if (++num_ports > port_index) {
+ return sp;
+ }
+ }
+ }
+ return NULL;
+}
+
unsigned grpc_tcp_server_port_fd_count(grpc_tcp_server *s,
unsigned port_index) {
unsigned num_fds = 0;
- grpc_tcp_listener *sp;
gpr_mu_lock(&s->mu);
- for (sp = s->head; sp && port_index != 0; sp = sp->next) {
- if (!sp->is_sibling) {
- --port_index;
- }
+ grpc_tcp_listener *sp = get_port_index(s, port_index);
+ for (; sp; sp = sp->sibling) {
+ ++num_fds;
}
- for (; sp; sp = sp->sibling, ++num_fds)
- ;
gpr_mu_unlock(&s->mu);
return num_fds;
}
int grpc_tcp_server_port_fd(grpc_tcp_server *s, unsigned port_index,
unsigned fd_index) {
- grpc_tcp_listener *sp;
- int fd;
gpr_mu_lock(&s->mu);
- for (sp = s->head; sp && port_index != 0; sp = sp->next) {
- if (!sp->is_sibling) {
- --port_index;
+ grpc_tcp_listener *sp = get_port_index(s, port_index);
+ for (; sp; sp = sp->sibling, --fd_index) {
+ if (fd_index == 0) {
+ gpr_mu_unlock(&s->mu);
+ return sp->fd;
}
}
- for (; sp && fd_index != 0; sp = sp->sibling, --fd_index)
- ;
- if (sp) {
- fd = sp->fd;
- } else {
- fd = -1;
- }
gpr_mu_unlock(&s->mu);
- return fd;
+ return -1;
}
void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
diff --git a/src/core/lib/iomgr/tcp_server_uv.c b/src/core/lib/iomgr/tcp_server_uv.c
index 73e4db3d65..e1a174cfa2 100644
--- a/src/core/lib/iomgr/tcp_server_uv.c
+++ b/src/core/lib/iomgr/tcp_server_uv.c
@@ -76,13 +76,30 @@ struct grpc_tcp_server {
/* shutdown callback */
grpc_closure *shutdown_complete;
+
+ grpc_resource_quota *resource_quota;
};
-grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete,
+grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
+ grpc_closure *shutdown_complete,
const grpc_channel_args *args,
grpc_tcp_server **server) {
grpc_tcp_server *s = gpr_malloc(sizeof(grpc_tcp_server));
- (void)args;
+ s->resource_quota = grpc_resource_quota_create(NULL);
+ for (size_t i = 0; i < (args == NULL ? 0 : args->num_args); i++) {
+ if (0 == strcmp(GRPC_ARG_RESOURCE_QUOTA, args->args[i].key)) {
+ if (args->args[i].type == GRPC_ARG_POINTER) {
+ grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
+ s->resource_quota =
+ grpc_resource_quota_internal_ref(args->args[i].value.pointer.p);
+ } else {
+ grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
+ gpr_free(s);
+ return GRPC_ERROR_CREATE(GRPC_ARG_RESOURCE_QUOTA
+ " must be a pointer to a buffer pool");
+ }
+ }
+ }
gpr_ref_init(&s->refs, 1);
s->on_accept_cb = NULL;
s->on_accept_cb_arg = NULL;
@@ -119,6 +136,7 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
gpr_free(sp->handle);
gpr_free(sp);
}
+ grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
gpr_free(s);
}
@@ -170,7 +188,6 @@ static void accepted_connection_close_cb(uv_handle_t *handle) {
static void on_connect(uv_stream_t *server, int status) {
grpc_tcp_listener *sp = (grpc_tcp_listener *)server->data;
- grpc_tcp_server_acceptor acceptor = {sp->server, sp->port_index, 0};
uv_tcp_t *client;
grpc_endpoint *ep = NULL;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
@@ -183,6 +200,7 @@ static void on_connect(uv_stream_t *server, int status) {
uv_strerror(status));
return;
}
+
client = gpr_malloc(sizeof(uv_tcp_t));
uv_tcp_init(uv_default_loop(), client);
// UV documentation says this is guaranteed to succeed
@@ -201,9 +219,14 @@ static void on_connect(uv_stream_t *server, int status) {
} else {
gpr_log(GPR_INFO, "uv_tcp_getpeername error: %s", uv_strerror(status));
}
- ep = grpc_tcp_create(client, peer_name_string);
+ ep = grpc_tcp_create(client, sp->server->resource_quota, peer_name_string);
+ // Create acceptor.
+ grpc_tcp_server_acceptor *acceptor = gpr_malloc(sizeof(*acceptor));
+ acceptor->from_server = sp->server;
+ acceptor->port_index = sp->port_index;
+ acceptor->fd_index = 0;
sp->server->on_accept_cb(&exec_ctx, sp->server->on_accept_cb_arg, ep, NULL,
- &acceptor);
+ acceptor);
grpc_exec_ctx_finish(&exec_ctx);
}
}
diff --git a/src/core/lib/iomgr/tcp_server_windows.c b/src/core/lib/iomgr/tcp_server_windows.c
index ad6769a6ba..b0c8586bac 100644
--- a/src/core/lib/iomgr/tcp_server_windows.c
+++ b/src/core/lib/iomgr/tcp_server_windows.c
@@ -73,6 +73,7 @@ struct grpc_tcp_listener {
/* The cached AcceptEx for that port. */
LPFN_ACCEPTEX AcceptEx;
int shutting_down;
+ int outstanding_calls;
/* closure for socket notification of accept being ready */
grpc_closure on_accept;
/* linked list */
@@ -100,14 +101,32 @@ struct grpc_tcp_server {
/* shutdown callback */
grpc_closure *shutdown_complete;
+
+ grpc_resource_quota *resource_quota;
};
/* Public function. Allocates the proper data structures to hold a
grpc_tcp_server. */
-grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete,
+grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
+ grpc_closure *shutdown_complete,
const grpc_channel_args *args,
grpc_tcp_server **server) {
grpc_tcp_server *s = gpr_malloc(sizeof(grpc_tcp_server));
+ s->resource_quota = grpc_resource_quota_create(NULL);
+ for (size_t i = 0; i < (args == NULL ? 0 : args->num_args); i++) {
+ if (0 == strcmp(GRPC_ARG_RESOURCE_QUOTA, args->args[i].key)) {
+ if (args->args[i].type == GRPC_ARG_POINTER) {
+ grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
+ s->resource_quota =
+ grpc_resource_quota_internal_ref(args->args[i].value.pointer.p);
+ } else {
+ grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
+ gpr_free(s);
+ return GRPC_ERROR_CREATE(GRPC_ARG_RESOURCE_QUOTA
+ " must be a pointer to a buffer pool");
+ }
+ }
+ }
gpr_ref_init(&s->refs, 1);
gpr_mu_init(&s->mu);
s->active_ports = 0;
@@ -122,10 +141,9 @@ grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete,
return GRPC_ERROR_NONE;
}
-static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
- if (s->shutdown_complete != NULL) {
- grpc_exec_ctx_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE, NULL);
- }
+static void destroy_server(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_tcp_server *s = arg;
/* Now that the accepts have been aborted, we can destroy the sockets.
The IOCP won't get notified on these, so we can flag them as already
@@ -137,9 +155,20 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
grpc_winsocket_destroy(sp->socket);
gpr_free(sp);
}
+ grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
gpr_free(s);
}
+static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx,
+ grpc_tcp_server *s) {
+ if (s->shutdown_complete != NULL) {
+ grpc_exec_ctx_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE, NULL);
+ }
+
+ grpc_exec_ctx_sched(exec_ctx, grpc_closure_create(destroy_server, s),
+ GRPC_ERROR_NONE, NULL);
+}
+
grpc_tcp_server *grpc_tcp_server_ref(grpc_tcp_server *s) {
gpr_ref_non_zero(&s->refs);
return s;
@@ -161,17 +190,14 @@ static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
/* First, shutdown all fd's. This will queue abortion calls for all
of the pending accepts due to the normal operation mechanism. */
if (s->active_ports == 0) {
- immediately_done = 1;
- }
- for (sp = s->head; sp; sp = sp->next) {
- sp->shutting_down = 1;
- grpc_winsocket_shutdown(sp->socket);
+ finish_shutdown_locked(exec_ctx, s);
+ } else {
+ for (sp = s->head; sp; sp = sp->next) {
+ sp->shutting_down = 1;
+ grpc_winsocket_shutdown(sp->socket);
+ }
}
gpr_mu_unlock(&s->mu);
-
- if (immediately_done) {
- finish_shutdown(exec_ctx, s);
- }
}
void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
@@ -207,12 +233,13 @@ static grpc_error *prepare_socket(SOCKET sock,
goto failure;
}
- sockname_temp.len = sizeof(struct sockaddr_storage);
+ int sockname_temp_len = sizeof(struct sockaddr_storage);
if (getsockname(sock, (struct sockaddr *)sockname_temp.addr,
- &sockname_temp.len) == SOCKET_ERROR) {
+ &sockname_temp_len) == SOCKET_ERROR) {
error = GRPC_WSA_ERROR(WSAGetLastError(), "getsockname");
goto failure;
}
+ sockname_temp.len = sockname_temp_len;
*port = grpc_sockaddr_get_port(&sockname_temp);
return GRPC_ERROR_NONE;
@@ -231,31 +258,30 @@ failure:
return error;
}
-static void decrement_active_ports_and_notify(grpc_exec_ctx *exec_ctx,
- grpc_tcp_listener *sp) {
+static void decrement_active_ports_and_notify_locked(grpc_exec_ctx *exec_ctx,
+ grpc_tcp_listener *sp) {
int notify = 0;
sp->shutting_down = 0;
- gpr_mu_lock(&sp->server->mu);
GPR_ASSERT(sp->server->active_ports > 0);
if (0 == --sp->server->active_ports) {
- notify = 1;
- }
- gpr_mu_unlock(&sp->server->mu);
- if (notify) {
- finish_shutdown(exec_ctx, sp->server);
+ finish_shutdown_locked(exec_ctx, sp->server);
}
}
/* In order to do an async accept, we need to create a socket first which
will be the one assigned to the new incoming connection. */
-static grpc_error *start_accept(grpc_exec_ctx *exec_ctx,
- grpc_tcp_listener *port) {
+static grpc_error *start_accept_locked(grpc_exec_ctx *exec_ctx,
+ grpc_tcp_listener *port) {
SOCKET sock = INVALID_SOCKET;
BOOL success;
DWORD addrlen = sizeof(struct sockaddr_in6) + 16;
DWORD bytes_received = 0;
grpc_error *error = GRPC_ERROR_NONE;
+ if (port->shutting_down) {
+ return GRPC_ERROR_NONE;
+ }
+
sock = WSASocket(AF_INET6, SOCK_STREAM, IPPROTO_TCP, NULL, 0,
WSA_FLAG_OVERLAPPED);
if (sock == INVALID_SOCKET) {
@@ -285,20 +311,11 @@ static grpc_error *start_accept(grpc_exec_ctx *exec_ctx,
immediately process an accept that happened in the meantime. */
port->new_socket = sock;
grpc_socket_notify_on_read(exec_ctx, port->socket, &port->on_accept);
+ port->outstanding_calls++;
return error;
failure:
GPR_ASSERT(error != GRPC_ERROR_NONE);
- if (port->shutting_down) {
- /* We are abandoning the listener port, take that into account to prevent
- occasional hangs on shutdown. The hang happens when sp->shutting_down
- change is not seen by on_accept and we proceed to trying new accept,
- but we fail there because the listening port has been closed in the
- meantime. */
- decrement_active_ports_and_notify(exec_ctx, port);
- GRPC_ERROR_UNREF(error);
- return GRPC_ERROR_NONE;
- }
if (sock != INVALID_SOCKET) closesocket(sock);
return error;
}
@@ -306,7 +323,6 @@ failure:
/* Event manager callback when reads are ready. */
static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
grpc_tcp_listener *sp = arg;
- grpc_tcp_server_acceptor acceptor = {sp->server, sp->port_index, 0};
SOCKET sock = sp->new_socket;
grpc_winsocket_callback_info *info = &sp->socket->read_info;
grpc_endpoint *ep = NULL;
@@ -318,6 +334,8 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
BOOL wsa_success;
int err;
+ gpr_mu_lock(&sp->server->mu);
+
peer_name.len = sizeof(struct sockaddr_storage);
/* The general mechanism for shutting down is to queue abortion calls. While
@@ -327,6 +345,7 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
const char *msg = grpc_error_string(error);
gpr_log(GPR_INFO, "Skipping on_accept due to error: %s", msg);
grpc_error_free_string(msg);
+ gpr_mu_unlock(&sp->server->mu);
return;
}
@@ -336,17 +355,12 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
wsa_success = WSAGetOverlappedResult(sock, &info->overlapped,
&transfered_bytes, FALSE, &flags);
if (!wsa_success) {
- if (sp->shutting_down) {
- /* During the shutdown case, we ARE expecting an error. So that's well,
- and we can wake up the shutdown thread. */
- decrement_active_ports_and_notify(exec_ctx, sp);
- return;
- } else {
+ if (!sp->shutting_down) {
char *utf8_message = gpr_format_message(WSAGetLastError());
gpr_log(GPR_ERROR, "on_accept error: %s", utf8_message);
gpr_free(utf8_message);
- closesocket(sock);
}
+ closesocket(sock);
} else {
if (!sp->shutting_down) {
peer_name_string = NULL;
@@ -357,8 +371,10 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
gpr_log(GPR_ERROR, "setsockopt error: %s", utf8_message);
gpr_free(utf8_message);
}
+ int peer_name_len = (int)peer_name.len;
err =
- getpeername(sock, (struct sockaddr *)peer_name.addr, &peer_name.len);
+ getpeername(sock, (struct sockaddr *)peer_name.addr, &peer_name_len);
+ peer_name.len = peer_name_len;
if (!err) {
peer_name_string = grpc_sockaddr_to_uri(&peer_name);
} else {
@@ -368,7 +384,7 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
}
gpr_asprintf(&fd_name, "tcp_server:%s", peer_name_string);
ep = grpc_tcp_create(grpc_winsocket_create(sock, fd_name),
- peer_name_string);
+ sp->server->resource_quota, peer_name_string);
gpr_free(fd_name);
gpr_free(peer_name_string);
} else {
@@ -379,14 +395,24 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
/* The only time we should call our callback, is where we successfully
managed to accept a connection, and created an endpoint. */
if (ep) {
+ // Create acceptor.
+ grpc_tcp_server_acceptor *acceptor = gpr_malloc(sizeof(*acceptor));
+ acceptor->from_server = sp->server;
+ acceptor->port_index = sp->port_index;
+ acceptor->fd_index = 0;
sp->server->on_accept_cb(exec_ctx, sp->server->on_accept_cb_arg, ep, NULL,
- &acceptor);
+ acceptor);
}
/* As we were notified from the IOCP of one and exactly one accept,
the former socked we created has now either been destroy or assigned
to the new connection. We need to create a new one for the next
connection. */
- GPR_ASSERT(GRPC_LOG_IF_ERROR("start_accept", start_accept(exec_ctx, sp)));
+ GPR_ASSERT(
+ GRPC_LOG_IF_ERROR("start_accept", start_accept_locked(exec_ctx, sp)));
+ if (0 == --sp->outstanding_calls) {
+ decrement_active_ports_and_notify_locked(exec_ctx, sp);
+ }
+ gpr_mu_unlock(&sp->server->mu);
}
static grpc_error *add_socket_to_server(grpc_tcp_server *s, SOCKET sock,
@@ -434,6 +460,7 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, SOCKET sock,
sp->server = s;
sp->socket = grpc_winsocket_create(sock, "listener");
sp->shutting_down = 0;
+ sp->outstanding_calls = 0;
sp->AcceptEx = AcceptEx;
sp->new_socket = INVALID_SOCKET;
sp->port = port;
@@ -466,10 +493,11 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s,
as some previously created listener. */
if (grpc_sockaddr_get_port(addr) == 0) {
for (sp = s->head; sp; sp = sp->next) {
- sockname_temp.len = sizeof(struct sockaddr_storage);
+ int sockname_temp_len = sizeof(struct sockaddr_storage);
if (0 == getsockname(sp->socket->socket,
(struct sockaddr *)sockname_temp.addr,
- &sockname_temp.len)) {
+ &sockname_temp_len)) {
+ sockname_temp.len = sockname_temp_len;
*port = grpc_sockaddr_get_port(&sockname_temp);
if (*port > 0) {
allocated_addr = gpr_malloc(sizeof(grpc_resolved_address));
@@ -530,7 +558,8 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
s->on_accept_cb = on_accept_cb;
s->on_accept_cb_arg = on_accept_cb_arg;
for (sp = s->head; sp; sp = sp->next) {
- GPR_ASSERT(GRPC_LOG_IF_ERROR("start_accept", start_accept(exec_ctx, sp)));
+ GPR_ASSERT(
+ GRPC_LOG_IF_ERROR("start_accept", start_accept_locked(exec_ctx, sp)));
s->active_ports++;
}
gpr_mu_unlock(&s->mu);
diff --git a/src/core/lib/iomgr/tcp_uv.c b/src/core/lib/iomgr/tcp_uv.c
index 05d6eeb240..6e2ad1dbe9 100644
--- a/src/core/lib/iomgr/tcp_uv.c
+++ b/src/core/lib/iomgr/tcp_uv.c
@@ -38,14 +38,17 @@
#include <limits.h>
#include <string.h>
+#include <grpc/slice_buffer.h>
+
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/slice_buffer.h>
#include <grpc/support/string_util.h>
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/network_status_tracker.h"
+#include "src/core/lib/iomgr/resource_quota.h"
#include "src/core/lib/iomgr/tcp_uv.h"
+#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/support/string.h"
int grpc_tcp_trace = 0;
@@ -54,35 +57,46 @@ typedef struct {
grpc_endpoint base;
gpr_refcount refcount;
+ uv_write_t write_req;
+ uv_shutdown_t shutdown_req;
+
uv_tcp_t *handle;
grpc_closure *read_cb;
grpc_closure *write_cb;
- gpr_slice read_slice;
- gpr_slice_buffer *read_slices;
- gpr_slice_buffer *write_slices;
+ grpc_slice read_slice;
+ grpc_slice_buffer *read_slices;
+ grpc_slice_buffer *write_slices;
uv_buf_t *write_buffers;
+ grpc_resource_user *resource_user;
+
bool shutting_down;
+
char *peer_string;
grpc_pollset *pollset;
} grpc_tcp;
static void uv_close_callback(uv_handle_t *handle) { gpr_free(handle); }
-static void tcp_free(grpc_tcp *tcp) { gpr_free(tcp); }
+static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+ grpc_resource_user_unref(exec_ctx, tcp->resource_user);
+ gpr_free(tcp);
+}
/*#define GRPC_TCP_REFCOUNT_DEBUG*/
#ifdef GRPC_TCP_REFCOUNT_DEBUG
-#define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__)
-#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__)
-static void tcp_unref(grpc_tcp *tcp, const char *reason, const char *file,
- int line) {
+#define TCP_UNREF(exec_ctx, tcp, reason) \
+ tcp_unref((exec_ctx), (tcp), (reason), __FILE__, __LINE__)
+#define TCP_REF(tcp, reason) \
+ tcp_ref((exec_ctx), (tcp), (reason), __FILE__, __LINE__)
+static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
+ const char *reason, const char *file, int line) {
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP unref %p : %s %d -> %d", tcp,
reason, tcp->refcount.count, tcp->refcount.count - 1);
if (gpr_unref(&tcp->refcount)) {
- tcp_free(tcp);
+ tcp_free(exec_ctx, tcp);
}
}
@@ -93,11 +107,11 @@ static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file,
gpr_ref(&tcp->refcount);
}
#else
-#define TCP_UNREF(tcp, reason) tcp_unref((tcp))
+#define TCP_UNREF(exec_ctx, tcp, reason) tcp_unref((exec_ctx), (tcp))
#define TCP_REF(tcp, reason) tcp_ref((tcp))
-static void tcp_unref(grpc_tcp *tcp) {
+static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
if (gpr_unref(&tcp->refcount)) {
- tcp_free(tcp);
+ tcp_free(exec_ctx, tcp);
}
}
@@ -106,16 +120,19 @@ static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); }
static void alloc_uv_buf(uv_handle_t *handle, size_t suggested_size,
uv_buf_t *buf) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_tcp *tcp = handle->data;
(void)suggested_size;
- tcp->read_slice = gpr_slice_malloc(GRPC_TCP_DEFAULT_READ_SLICE_SIZE);
- buf->base = (char *)GPR_SLICE_START_PTR(tcp->read_slice);
- buf->len = GPR_SLICE_LENGTH(tcp->read_slice);
+ tcp->read_slice = grpc_resource_user_slice_malloc(
+ &exec_ctx, tcp->resource_user, GRPC_TCP_DEFAULT_READ_SLICE_SIZE);
+ buf->base = (char *)GRPC_SLICE_START_PTR(tcp->read_slice);
+ buf->len = GRPC_SLICE_LENGTH(tcp->read_slice);
+ grpc_exec_ctx_finish(&exec_ctx);
}
static void read_callback(uv_stream_t *stream, ssize_t nread,
const uv_buf_t *buf) {
- gpr_slice sub;
+ grpc_slice sub;
grpc_error *error;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_tcp *tcp = stream->data;
@@ -124,7 +141,7 @@ static void read_callback(uv_stream_t *stream, ssize_t nread,
// Nothing happened. Wait for the next callback
return;
}
- TCP_UNREF(tcp, "read");
+ TCP_UNREF(&exec_ctx, tcp, "read");
tcp->read_cb = NULL;
// TODO(murgatroid99): figure out what the return value here means
uv_read_stop(stream);
@@ -132,8 +149,8 @@ static void read_callback(uv_stream_t *stream, ssize_t nread,
error = GRPC_ERROR_CREATE("EOF");
} else if (nread > 0) {
// Successful read
- sub = gpr_slice_sub_no_ref(tcp->read_slice, 0, (size_t)nread);
- gpr_slice_buffer_add(tcp->read_slices, sub);
+ sub = grpc_slice_sub_no_ref(tcp->read_slice, 0, (size_t)nread);
+ grpc_slice_buffer_add(tcp->read_slices, sub);
error = GRPC_ERROR_NONE;
if (grpc_tcp_trace) {
size_t i;
@@ -141,8 +158,8 @@ static void read_callback(uv_stream_t *stream, ssize_t nread,
gpr_log(GPR_DEBUG, "read: error=%s", str);
grpc_error_free_string(str);
for (i = 0; i < tcp->read_slices->count; i++) {
- char *dump = gpr_dump_slice(tcp->read_slices->slices[i],
- GPR_DUMP_HEX | GPR_DUMP_ASCII);
+ char *dump = grpc_dump_slice(tcp->read_slices->slices[i],
+ GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "READ %p (peer=%s): %s", tcp, tcp->peer_string,
dump);
gpr_free(dump);
@@ -157,14 +174,14 @@ static void read_callback(uv_stream_t *stream, ssize_t nread,
}
static void uv_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- gpr_slice_buffer *read_slices, grpc_closure *cb) {
+ grpc_slice_buffer *read_slices, grpc_closure *cb) {
grpc_tcp *tcp = (grpc_tcp *)ep;
int status;
grpc_error *error = GRPC_ERROR_NONE;
GPR_ASSERT(tcp->read_cb == NULL);
tcp->read_cb = cb;
tcp->read_slices = read_slices;
- gpr_slice_buffer_reset_and_unref(read_slices);
+ grpc_slice_buffer_reset_and_unref(read_slices);
TCP_REF(tcp, "read");
// TODO(murgatroid99): figure out what the return value here means
status =
@@ -187,7 +204,7 @@ static void write_callback(uv_write_t *req, int status) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_closure *cb = tcp->write_cb;
tcp->write_cb = NULL;
- TCP_UNREF(tcp, "write");
+ TCP_UNREF(&exec_ctx, tcp, "write");
if (status == 0) {
error = GRPC_ERROR_NONE;
} else {
@@ -198,27 +215,28 @@ static void write_callback(uv_write_t *req, int status) {
gpr_log(GPR_DEBUG, "write complete on %p: error=%s", tcp, str);
}
gpr_free(tcp->write_buffers);
- gpr_free(req);
+ grpc_resource_user_free(&exec_ctx, tcp->resource_user,
+ sizeof(uv_buf_t) * tcp->write_slices->count);
grpc_exec_ctx_sched(&exec_ctx, cb, error, NULL);
grpc_exec_ctx_finish(&exec_ctx);
}
static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- gpr_slice_buffer *write_slices,
+ grpc_slice_buffer *write_slices,
grpc_closure *cb) {
grpc_tcp *tcp = (grpc_tcp *)ep;
uv_buf_t *buffers;
unsigned int buffer_count;
unsigned int i;
- gpr_slice *slice;
+ grpc_slice *slice;
uv_write_t *write_req;
if (grpc_tcp_trace) {
size_t j;
for (j = 0; j < write_slices->count; j++) {
- char *data = gpr_dump_slice(write_slices->slices[j],
- GPR_DUMP_HEX | GPR_DUMP_ASCII);
+ char *data = grpc_dump_slice(write_slices->slices[j],
+ GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "WRITE %p (peer=%s): %s", tcp, tcp->peer_string, data);
gpr_free(data);
}
@@ -243,12 +261,15 @@ static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
tcp->write_cb = cb;
buffer_count = (unsigned int)tcp->write_slices->count;
buffers = gpr_malloc(sizeof(uv_buf_t) * buffer_count);
+ grpc_resource_user_alloc(exec_ctx, tcp->resource_user,
+ sizeof(uv_buf_t) * buffer_count, NULL);
for (i = 0; i < buffer_count; i++) {
slice = &tcp->write_slices->slices[i];
- buffers[i].base = (char *)GPR_SLICE_START_PTR(*slice);
- buffers[i].len = GPR_SLICE_LENGTH(*slice);
+ buffers[i].base = (char *)GRPC_SLICE_START_PTR(*slice);
+ buffers[i].len = GRPC_SLICE_LENGTH(*slice);
}
- write_req = gpr_malloc(sizeof(uv_write_t));
+ tcp->write_buffers = buffers;
+ write_req = &tcp->write_req;
write_req->data = tcp;
TCP_REF(tcp, "write");
// TODO(murgatroid99): figure out what the return value here means
@@ -274,13 +295,13 @@ static void uv_add_to_pollset_set(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
(void)pollset;
}
-static void shutdown_callback(uv_shutdown_t *req, int status) { gpr_free(req); }
+static void shutdown_callback(uv_shutdown_t *req, int status) {}
static void uv_endpoint_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *)ep;
if (!tcp->shutting_down) {
tcp->shutting_down = true;
- uv_shutdown_t *req = gpr_malloc(sizeof(uv_shutdown_t));
+ uv_shutdown_t *req = &tcp->shutdown_req;
uv_shutdown(req, (uv_stream_t *)tcp->handle, shutdown_callback);
}
}
@@ -289,7 +310,7 @@ static void uv_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
grpc_network_status_unregister_endpoint(ep);
grpc_tcp *tcp = (grpc_tcp *)ep;
uv_close((uv_handle_t *)tcp->handle, uv_close_callback);
- TCP_UNREF(tcp, "destroy");
+ TCP_UNREF(exec_ctx, tcp, "destroy");
}
static char *uv_get_peer(grpc_endpoint *ep) {
@@ -297,24 +318,33 @@ static char *uv_get_peer(grpc_endpoint *ep) {
return gpr_strdup(tcp->peer_string);
}
+static grpc_resource_user *uv_get_resource_user(grpc_endpoint *ep) {
+ grpc_tcp *tcp = (grpc_tcp *)ep;
+ return tcp->resource_user;
+}
+
static grpc_workqueue *uv_get_workqueue(grpc_endpoint *ep) { return NULL; }
-static grpc_endpoint_vtable vtable = {uv_endpoint_read,
- uv_endpoint_write,
- uv_get_workqueue,
- uv_add_to_pollset,
- uv_add_to_pollset_set,
- uv_endpoint_shutdown,
- uv_destroy,
- uv_get_peer};
+static int uv_get_fd(grpc_endpoint *ep) { return -1; }
+
+static grpc_endpoint_vtable vtable = {
+ uv_endpoint_read, uv_endpoint_write, uv_get_workqueue,
+ uv_add_to_pollset, uv_add_to_pollset_set, uv_endpoint_shutdown,
+ uv_destroy, uv_get_resource_user, uv_get_peer,
+ uv_get_fd};
-grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle, char *peer_string) {
+grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle,
+ grpc_resource_quota *resource_quota,
+ char *peer_string) {
grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp));
if (grpc_tcp_trace) {
gpr_log(GPR_DEBUG, "Creating TCP endpoint %p", tcp);
}
+ /* Disable Nagle's Algorithm */
+ uv_tcp_nodelay(handle, 1);
+
memset(tcp, 0, sizeof(grpc_tcp));
tcp->base.vtable = &vtable;
tcp->handle = handle;
@@ -322,6 +352,7 @@ grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle, char *peer_string) {
gpr_ref_init(&tcp->refcount, 1);
tcp->peer_string = gpr_strdup(peer_string);
tcp->shutting_down = false;
+ tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
/* Tell network status tracking code about the new endpoint */
grpc_network_status_register_endpoint(&tcp->base);
diff --git a/src/core/lib/iomgr/tcp_uv.h b/src/core/lib/iomgr/tcp_uv.h
index eed41151ea..970fcafe4a 100644
--- a/src/core/lib/iomgr/tcp_uv.h
+++ b/src/core/lib/iomgr/tcp_uv.h
@@ -52,6 +52,8 @@ extern int grpc_tcp_trace;
#define GRPC_TCP_DEFAULT_READ_SLICE_SIZE 8192
-grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle, char *peer_string);
+grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle,
+ grpc_resource_quota *resource_quota,
+ char *peer_string);
#endif /* GRPC_CORE_LIB_IOMGR_TCP_UV_H */
diff --git a/src/core/lib/iomgr/tcp_windows.c b/src/core/lib/iomgr/tcp_windows.c
index a5f508a2c3..d4613b674e 100644
--- a/src/core/lib/iomgr/tcp_windows.c
+++ b/src/core/lib/iomgr/tcp_windows.c
@@ -40,10 +40,10 @@
#include "src/core/lib/iomgr/network_status_tracker.h"
#include "src/core/lib/iomgr/sockaddr_windows.h"
+#include <grpc/slice_buffer.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/log_windows.h>
-#include <grpc/support/slice_buffer.h>
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
@@ -105,9 +105,11 @@ typedef struct grpc_tcp {
grpc_closure *read_cb;
grpc_closure *write_cb;
- gpr_slice read_slice;
- gpr_slice_buffer *write_slices;
- gpr_slice_buffer *read_slices;
+ grpc_slice read_slice;
+ grpc_slice_buffer *write_slices;
+ grpc_slice_buffer *read_slices;
+
+ grpc_resource_user *resource_user;
/* The IO Completion Port runs from another thread. We need some mechanism
to protect ourselves when requesting a shutdown. */
@@ -117,23 +119,25 @@ typedef struct grpc_tcp {
char *peer_string;
} grpc_tcp;
-static void tcp_free(grpc_tcp *tcp) {
+static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
grpc_winsocket_destroy(tcp->socket);
gpr_mu_destroy(&tcp->mu);
gpr_free(tcp->peer_string);
+ grpc_resource_user_unref(exec_ctx, tcp->resource_user);
gpr_free(tcp);
}
/*#define GRPC_TCP_REFCOUNT_DEBUG*/
#ifdef GRPC_TCP_REFCOUNT_DEBUG
-#define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__)
+#define TCP_UNREF(exec_ctx, tcp, reason) \
+ tcp_unref((exec_ctx), (tcp), (reason), __FILE__, __LINE__)
#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__)
-static void tcp_unref(grpc_tcp *tcp, const char *reason, const char *file,
- int line) {
+static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
+ const char *reason, const char *file, int line) {
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP unref %p : %s %d -> %d", tcp,
reason, tcp->refcount.count, tcp->refcount.count - 1);
if (gpr_unref(&tcp->refcount)) {
- tcp_free(tcp);
+ tcp_free(exec_ctx, tcp);
}
}
@@ -144,11 +148,11 @@ static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file,
gpr_ref(&tcp->refcount);
}
#else
-#define TCP_UNREF(tcp, reason) tcp_unref((tcp))
+#define TCP_UNREF(exec_ctx, tcp, reason) tcp_unref((exec_ctx), (tcp))
#define TCP_REF(tcp, reason) tcp_ref((tcp))
-static void tcp_unref(grpc_tcp *tcp) {
+static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
if (gpr_unref(&tcp->refcount)) {
- tcp_free(tcp);
+ tcp_free(exec_ctx, tcp);
}
}
@@ -160,7 +164,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) {
grpc_tcp *tcp = tcpp;
grpc_closure *cb = tcp->read_cb;
grpc_winsocket *socket = tcp->socket;
- gpr_slice sub;
+ grpc_slice sub;
grpc_winsocket_callback_info *info = &socket->read_info;
GRPC_ERROR_REF(error);
@@ -170,25 +174,25 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) {
char *utf8_message = gpr_format_message(info->wsa_error);
error = GRPC_ERROR_CREATE(utf8_message);
gpr_free(utf8_message);
- gpr_slice_unref(tcp->read_slice);
+ grpc_slice_unref(tcp->read_slice);
} else {
if (info->bytes_transfered != 0 && !tcp->shutting_down) {
- sub = gpr_slice_sub_no_ref(tcp->read_slice, 0, info->bytes_transfered);
- gpr_slice_buffer_add(tcp->read_slices, sub);
+ sub = grpc_slice_sub_no_ref(tcp->read_slice, 0, info->bytes_transfered);
+ grpc_slice_buffer_add(tcp->read_slices, sub);
} else {
- gpr_slice_unref(tcp->read_slice);
+ grpc_slice_unref(tcp->read_slice);
error = GRPC_ERROR_CREATE("End of TCP stream");
}
}
}
tcp->read_cb = NULL;
- TCP_UNREF(tcp, "read");
+ TCP_UNREF(exec_ctx, tcp, "read");
grpc_exec_ctx_sched(exec_ctx, cb, error, NULL);
}
static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- gpr_slice_buffer *read_slices, grpc_closure *cb) {
+ grpc_slice_buffer *read_slices, grpc_closure *cb) {
grpc_tcp *tcp = (grpc_tcp *)ep;
grpc_winsocket *handle = tcp->socket;
grpc_winsocket_callback_info *info = &handle->read_info;
@@ -205,13 +209,13 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
tcp->read_cb = cb;
tcp->read_slices = read_slices;
- gpr_slice_buffer_reset_and_unref(read_slices);
+ grpc_slice_buffer_reset_and_unref(read_slices);
- tcp->read_slice = gpr_slice_malloc(8192);
+ tcp->read_slice = grpc_slice_malloc(8192);
- buffer.len = (ULONG)GPR_SLICE_LENGTH(
+ buffer.len = (ULONG)GRPC_SLICE_LENGTH(
tcp->read_slice); // we know slice size fits in 32bit.
- buffer.buf = (char *)GPR_SLICE_START_PTR(tcp->read_slice);
+ buffer.buf = (char *)GRPC_SLICE_START_PTR(tcp->read_slice);
TCP_REF(tcp, "read");
@@ -267,13 +271,13 @@ static void on_write(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) {
}
}
- TCP_UNREF(tcp, "write");
+ TCP_UNREF(exec_ctx, tcp, "write");
grpc_exec_ctx_sched(exec_ctx, cb, error, NULL);
}
/* Initiates a write. */
static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- gpr_slice_buffer *slices, grpc_closure *cb) {
+ grpc_slice_buffer *slices, grpc_closure *cb) {
grpc_tcp *tcp = (grpc_tcp *)ep;
grpc_winsocket *socket = tcp->socket;
grpc_winsocket_callback_info *info = &socket->write_info;
@@ -300,10 +304,10 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
}
for (i = 0; i < tcp->write_slices->count; i++) {
- len = GPR_SLICE_LENGTH(tcp->write_slices->slices[i]);
+ len = GRPC_SLICE_LENGTH(tcp->write_slices->slices[i]);
GPR_ASSERT(len <= ULONG_MAX);
buffers[i].len = (ULONG)len;
- buffers[i].buf = (char *)GPR_SLICE_START_PTR(tcp->write_slices->slices[i]);
+ buffers[i].buf = (char *)GRPC_SLICE_START_PTR(tcp->write_slices->slices[i]);
}
/* First, let's try a synchronous, non-blocking write. */
@@ -335,7 +339,7 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
if (status != 0) {
int wsa_error = WSAGetLastError();
if (wsa_error != WSA_IO_PENDING) {
- TCP_UNREF(tcp, "write");
+ TCP_UNREF(exec_ctx, tcp, "write");
grpc_exec_ctx_sched(exec_ctx, cb, GRPC_WSA_ERROR(wsa_error, "WSASend"),
NULL);
return;
@@ -377,12 +381,13 @@ static void win_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
tcp->shutting_down = 1;
grpc_winsocket_shutdown(tcp->socket);
gpr_mu_unlock(&tcp->mu);
+ grpc_resource_user_shutdown(exec_ctx, tcp->resource_user);
}
static void win_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
grpc_network_status_unregister_endpoint(ep);
grpc_tcp *tcp = (grpc_tcp *)ep;
- TCP_UNREF(tcp, "destroy");
+ TCP_UNREF(exec_ctx, tcp, "destroy");
}
static char *win_get_peer(grpc_endpoint *ep) {
@@ -392,6 +397,13 @@ static char *win_get_peer(grpc_endpoint *ep) {
static grpc_workqueue *win_get_workqueue(grpc_endpoint *ep) { return NULL; }
+static grpc_resource_user *win_get_resource_user(grpc_endpoint *ep) {
+ grpc_tcp *tcp = (grpc_tcp *)ep;
+ return tcp->resource_user;
+}
+
+static int win_get_fd(grpc_endpoint *ep) { return -1; }
+
static grpc_endpoint_vtable vtable = {win_read,
win_write,
win_get_workqueue,
@@ -399,9 +411,13 @@ static grpc_endpoint_vtable vtable = {win_read,
win_add_to_pollset_set,
win_shutdown,
win_destroy,
- win_get_peer};
+ win_get_resource_user,
+ win_get_peer,
+ win_get_fd};
-grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket, char *peer_string) {
+grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket,
+ grpc_resource_quota *resource_quota,
+ char *peer_string) {
grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp));
memset(tcp, 0, sizeof(grpc_tcp));
tcp->base.vtable = &vtable;
@@ -411,6 +427,7 @@ grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket, char *peer_string) {
grpc_closure_init(&tcp->on_read, on_read, tcp);
grpc_closure_init(&tcp->on_write, on_write, tcp);
tcp->peer_string = gpr_strdup(peer_string);
+ tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
/* Tell network status tracking code about the new endpoint */
grpc_network_status_register_endpoint(&tcp->base);
diff --git a/src/core/lib/iomgr/tcp_windows.h b/src/core/lib/iomgr/tcp_windows.h
index 86d777235e..4402de1c38 100644
--- a/src/core/lib/iomgr/tcp_windows.h
+++ b/src/core/lib/iomgr/tcp_windows.h
@@ -50,7 +50,9 @@
/* Create a tcp endpoint given a winsock handle.
* Takes ownership of the handle.
*/
-grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket, char *peer_string);
+grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket,
+ grpc_resource_quota *resource_quota,
+ char *peer_string);
grpc_error *grpc_tcp_prepare_socket(SOCKET sock);
diff --git a/src/core/lib/iomgr/udp_server.c b/src/core/lib/iomgr/udp_server.c
index fd0c7a0f9d..3c24ea9afa 100644
--- a/src/core/lib/iomgr/udp_server.c
+++ b/src/core/lib/iomgr/udp_server.c
@@ -388,7 +388,8 @@ int grpc_udp_server_add_port(grpc_udp_server *s,
/* Try listening on IPv6 first. */
addr = &wild6;
// TODO(rjshade): Test and propagate the returned grpc_error*:
- grpc_create_dualstack_socket(addr, SOCK_DGRAM, IPPROTO_UDP, &dsmode, &fd);
+ GRPC_ERROR_UNREF(grpc_create_dualstack_socket(addr, SOCK_DGRAM, IPPROTO_UDP,
+ &dsmode, &fd));
allocated_port1 = add_socket_to_server(s, fd, addr, read_cb, orphan_cb);
if (fd >= 0 && dsmode == GRPC_DSMODE_DUALSTACK) {
goto done;
@@ -402,7 +403,8 @@ int grpc_udp_server_add_port(grpc_udp_server *s,
}
// TODO(rjshade): Test and propagate the returned grpc_error*:
- grpc_create_dualstack_socket(addr, SOCK_DGRAM, IPPROTO_UDP, &dsmode, &fd);
+ GRPC_ERROR_UNREF(grpc_create_dualstack_socket(addr, SOCK_DGRAM, IPPROTO_UDP,
+ &dsmode, &fd));
if (fd < 0) {
gpr_log(GPR_ERROR, "Unable to create socket: %s", strerror(errno));
}
diff --git a/src/core/lib/iomgr/wakeup_fd_pipe.c b/src/core/lib/iomgr/wakeup_fd_pipe.c
index 183f0eb930..e186d63683 100644
--- a/src/core/lib/iomgr/wakeup_fd_pipe.c
+++ b/src/core/lib/iomgr/wakeup_fd_pipe.c
@@ -95,6 +95,8 @@ static void pipe_destroy(grpc_wakeup_fd* fd_info) {
static int pipe_check_availability(void) {
grpc_wakeup_fd fd;
+ fd.read_fd = fd.write_fd = -1;
+
if (pipe_init(&fd) == GRPC_ERROR_NONE) {
pipe_destroy(&fd);
return 1;
diff --git a/src/core/lib/json/json.c b/src/core/lib/json/json.c
index 5b583a1f2e..48b13686d7 100644
--- a/src/core/lib/json/json.c
+++ b/src/core/lib/json/json.c
@@ -37,15 +37,15 @@
#include "src/core/lib/json/json.h"
-grpc_json *grpc_json_create(grpc_json_type type) {
- grpc_json *json = gpr_malloc(sizeof(*json));
+grpc_json* grpc_json_create(grpc_json_type type) {
+ grpc_json* json = gpr_malloc(sizeof(*json));
memset(json, 0, sizeof(*json));
json->type = type;
return json;
}
-void grpc_json_destroy(grpc_json *json) {
+void grpc_json_destroy(grpc_json* json) {
while (json->child) {
grpc_json_destroy(json->child);
}
diff --git a/src/core/lib/json/json.h b/src/core/lib/json/json.h
index 681df4bb77..7111db0b52 100644
--- a/src/core/lib/json/json.h
+++ b/src/core/lib/json/json.h
@@ -42,14 +42,14 @@
* are not owned by it.
*/
typedef struct grpc_json {
- struct grpc_json *next;
- struct grpc_json *prev;
- struct grpc_json *child;
- struct grpc_json *parent;
+ struct grpc_json* next;
+ struct grpc_json* prev;
+ struct grpc_json* child;
+ struct grpc_json* parent;
grpc_json_type type;
- const char *key;
- const char *value;
+ const char* key;
+ const char* value;
} grpc_json;
/* The next two functions are going to parse the input string, and
@@ -65,8 +65,8 @@ typedef struct grpc_json {
*
* Delete the allocated tree afterward using grpc_json_destroy().
*/
-grpc_json *grpc_json_parse_string_with_len(char *input, size_t size);
-grpc_json *grpc_json_parse_string(char *input);
+grpc_json* grpc_json_parse_string_with_len(char* input, size_t size);
+grpc_json* grpc_json_parse_string(char* input);
/* This function will create a new string using gpr_realloc, and will
* deserialize the grpc_json tree into it. It'll be zero-terminated,
@@ -76,13 +76,13 @@ grpc_json *grpc_json_parse_string(char *input);
* If indent is 0, then newlines will be suppressed as well, and the
* output will be condensed at its maximum.
*/
-char *grpc_json_dump_to_string(grpc_json *json, int indent);
+char* grpc_json_dump_to_string(grpc_json* json, int indent);
/* Use these to create or delete a grpc_json object.
* Deletion is recursive. We will not attempt to free any of the strings
* in any of the objects of that tree.
*/
-grpc_json *grpc_json_create(grpc_json_type type);
-void grpc_json_destroy(grpc_json *json);
+grpc_json* grpc_json_create(grpc_json_type type);
+void grpc_json_destroy(grpc_json* json);
#endif /* GRPC_CORE_LIB_JSON_JSON_H */
diff --git a/src/core/lib/security/credentials/credentials.h b/src/core/lib/security/credentials/credentials.h
index 6fb5b5b15a..85b3bc5350 100644
--- a/src/core/lib/security/credentials/credentials.h
+++ b/src/core/lib/security/credentials/credentials.h
@@ -141,8 +141,8 @@ grpc_channel_credentials_duplicate_without_call_credentials(
/* --- grpc_credentials_md. --- */
typedef struct {
- gpr_slice key;
- gpr_slice value;
+ grpc_slice key;
+ grpc_slice value;
} grpc_credentials_md;
typedef struct {
@@ -157,7 +157,7 @@ grpc_credentials_md_store *grpc_credentials_md_store_create(
/* Will ref key and value. */
void grpc_credentials_md_store_add(grpc_credentials_md_store *store,
- gpr_slice key, gpr_slice value);
+ grpc_slice key, grpc_slice value);
void grpc_credentials_md_store_add_cstrings(grpc_credentials_md_store *store,
const char *key, const char *value);
grpc_credentials_md_store *grpc_credentials_md_store_ref(
diff --git a/src/core/lib/security/credentials/credentials_metadata.c b/src/core/lib/security/credentials/credentials_metadata.c
index 6a352aab3a..e6cb567734 100644
--- a/src/core/lib/security/credentials/credentials_metadata.c
+++ b/src/core/lib/security/credentials/credentials_metadata.c
@@ -59,11 +59,11 @@ grpc_credentials_md_store *grpc_credentials_md_store_create(
}
void grpc_credentials_md_store_add(grpc_credentials_md_store *store,
- gpr_slice key, gpr_slice value) {
+ grpc_slice key, grpc_slice value) {
if (store == NULL) return;
store_ensure_capacity(store);
- store->entries[store->num_entries].key = gpr_slice_ref(key);
- store->entries[store->num_entries].value = gpr_slice_ref(value);
+ store->entries[store->num_entries].key = grpc_slice_ref(key);
+ store->entries[store->num_entries].value = grpc_slice_ref(value);
store->num_entries++;
}
@@ -72,9 +72,9 @@ void grpc_credentials_md_store_add_cstrings(grpc_credentials_md_store *store,
const char *value) {
if (store == NULL) return;
store_ensure_capacity(store);
- store->entries[store->num_entries].key = gpr_slice_from_copied_string(key);
+ store->entries[store->num_entries].key = grpc_slice_from_copied_string(key);
store->entries[store->num_entries].value =
- gpr_slice_from_copied_string(value);
+ grpc_slice_from_copied_string(value);
store->num_entries++;
}
@@ -91,8 +91,8 @@ void grpc_credentials_md_store_unref(grpc_credentials_md_store *store) {
if (store->entries != NULL) {
size_t i;
for (i = 0; i < store->num_entries; i++) {
- gpr_slice_unref(store->entries[i].key);
- gpr_slice_unref(store->entries[i].value);
+ grpc_slice_unref(store->entries[i].key);
+ grpc_slice_unref(store->entries[i].value);
}
gpr_free(store->entries);
}
diff --git a/src/core/lib/security/credentials/google_default/google_default_credentials.c b/src/core/lib/security/credentials/google_default/google_default_credentials.c
index 312a3d4f90..afe0e3d357 100644
--- a/src/core/lib/security/credentials/google_default/google_default_credentials.c
+++ b/src/core/lib/security/credentials/google_default/google_default_credentials.c
@@ -45,6 +45,7 @@
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/security/credentials/jwt/jwt_credentials.h"
#include "src/core/lib/security/credentials/oauth2/oauth2_credentials.h"
+#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/support/env.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/surface/api_trace.h"
@@ -124,11 +125,14 @@ static int is_stack_running_on_compute_engine(void) {
grpc_httpcli_context_init(&context);
+ grpc_resource_quota *resource_quota =
+ grpc_resource_quota_create("google_default_credentials");
grpc_httpcli_get(
- &exec_ctx, &context, &detector.pollent, &request,
+ &exec_ctx, &context, &detector.pollent, resource_quota, &request,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), max_detection_delay),
grpc_closure_create(on_compute_engine_detection_http_response, &detector),
&detector.response);
+ grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
grpc_exec_ctx_flush(&exec_ctx);
@@ -171,7 +175,7 @@ static grpc_error *create_default_creds_from_path(
grpc_auth_json_key key;
grpc_auth_refresh_token token;
grpc_call_credentials *result = NULL;
- gpr_slice creds_data = gpr_empty_slice();
+ grpc_slice creds_data = gpr_empty_slice();
grpc_error *error = GRPC_ERROR_NONE;
if (creds_path == NULL) {
error = GRPC_ERROR_CREATE("creds_path unset");
@@ -182,9 +186,9 @@ static grpc_error *create_default_creds_from_path(
goto end;
}
json = grpc_json_parse_string_with_len(
- (char *)GPR_SLICE_START_PTR(creds_data), GPR_SLICE_LENGTH(creds_data));
+ (char *)GRPC_SLICE_START_PTR(creds_data), GRPC_SLICE_LENGTH(creds_data));
if (json == NULL) {
- char *dump = gpr_dump_slice(creds_data, GPR_DUMP_HEX | GPR_DUMP_ASCII);
+ char *dump = grpc_dump_slice(creds_data, GPR_DUMP_HEX | GPR_DUMP_ASCII);
error = grpc_error_set_str(GRPC_ERROR_CREATE("Failed to parse JSON"),
GRPC_ERROR_STR_RAW_BYTES, dump);
gpr_free(dump);
@@ -221,7 +225,7 @@ static grpc_error *create_default_creds_from_path(
end:
GPR_ASSERT((result == NULL) + (error == GRPC_ERROR_NONE) == 1);
if (creds_path != NULL) gpr_free(creds_path);
- gpr_slice_unref(creds_data);
+ grpc_slice_unref(creds_data);
if (json != NULL) grpc_json_destroy(json);
*creds = result;
return error;
diff --git a/src/core/lib/security/credentials/jwt/json_token.h b/src/core/lib/security/credentials/jwt/json_token.h
index 07fc5bf0e0..c13eb55803 100644
--- a/src/core/lib/security/credentials/jwt/json_token.h
+++ b/src/core/lib/security/credentials/jwt/json_token.h
@@ -34,7 +34,7 @@
#ifndef GRPC_CORE_LIB_SECURITY_CREDENTIALS_JWT_JSON_TOKEN_H
#define GRPC_CORE_LIB_SECURITY_CREDENTIALS_JWT_JSON_TOKEN_H
-#include <grpc/support/slice.h>
+#include <grpc/slice.h>
#include <openssl/rsa.h>
#include "src/core/lib/json/json.h"
diff --git a/src/core/lib/security/credentials/jwt/jwt_credentials.c b/src/core/lib/security/credentials/jwt/jwt_credentials.c
index f87ba0ce8d..3daf0f4ef7 100644
--- a/src/core/lib/security/credentials/jwt/jwt_credentials.c
+++ b/src/core/lib/security/credentials/jwt/jwt_credentials.c
@@ -144,17 +144,44 @@ grpc_service_account_jwt_access_credentials_create_from_auth_json_key(
return &c->base;
}
+static char *redact_private_key(const char *json_key) {
+ char *json_copy = gpr_strdup(json_key);
+ grpc_json *json = grpc_json_parse_string(json_copy);
+ if (!json) {
+ gpr_free(json_copy);
+ return gpr_strdup("<Json failed to parse.>");
+ }
+ const char *redacted = "<redacted>";
+ grpc_json *current = json->child;
+ while (current) {
+ if (current->type == GRPC_JSON_STRING &&
+ strcmp(current->key, "private_key") == 0) {
+ current->value = (char *)redacted;
+ break;
+ }
+ current = current->next;
+ }
+ char *clean_json = grpc_json_dump_to_string(json, 2);
+ gpr_free(json_copy);
+ grpc_json_destroy(json);
+ return clean_json;
+}
+
grpc_call_credentials *grpc_service_account_jwt_access_credentials_create(
const char *json_key, gpr_timespec token_lifetime, void *reserved) {
- GRPC_API_TRACE(
- "grpc_service_account_jwt_access_credentials_create("
- "json_key=%s, "
- "token_lifetime="
- "gpr_timespec { tv_sec: %" PRId64
- ", tv_nsec: %d, clock_type: %d }, "
- "reserved=%p)",
- 5, (json_key, token_lifetime.tv_sec, token_lifetime.tv_nsec,
- (int)token_lifetime.clock_type, reserved));
+ if (grpc_api_trace) {
+ char *clean_json = redact_private_key(json_key);
+ gpr_log(GPR_INFO,
+ "grpc_service_account_jwt_access_credentials_create("
+ "json_key=%s, "
+ "token_lifetime="
+ "gpr_timespec { tv_sec: %" PRId64
+ ", tv_nsec: %d, clock_type: %d }, "
+ "reserved=%p)",
+ clean_json, token_lifetime.tv_sec, token_lifetime.tv_nsec,
+ (int)token_lifetime.clock_type, reserved);
+ gpr_free(clean_json);
+ }
GPR_ASSERT(reserved == NULL);
return grpc_service_account_jwt_access_credentials_create_from_auth_json_key(
grpc_auth_json_key_create_from_string(json_key), token_lifetime);
diff --git a/src/core/lib/security/credentials/jwt/jwt_verifier.c b/src/core/lib/security/credentials/jwt/jwt_verifier.c
index 73eb2e3258..03097a57c0 100644
--- a/src/core/lib/security/credentials/jwt/jwt_verifier.c
+++ b/src/core/lib/security/credentials/jwt/jwt_verifier.c
@@ -39,6 +39,7 @@
#include "src/core/lib/http/httpcli.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/security/util/b64.h"
+#include "src/core/lib/support/string.h"
#include "src/core/lib/tsi/ssl_types.h"
#include <grpc/support/alloc.h>
@@ -85,18 +86,18 @@ static const EVP_MD *evp_md_from_alg(const char *alg) {
}
static grpc_json *parse_json_part_from_jwt(const char *str, size_t len,
- gpr_slice *buffer) {
+ grpc_slice *buffer) {
grpc_json *json;
*buffer = grpc_base64_decode_with_len(str, len, 1);
- if (GPR_SLICE_IS_EMPTY(*buffer)) {
+ if (GRPC_SLICE_IS_EMPTY(*buffer)) {
gpr_log(GPR_ERROR, "Invalid base64.");
return NULL;
}
- json = grpc_json_parse_string_with_len((char *)GPR_SLICE_START_PTR(*buffer),
- GPR_SLICE_LENGTH(*buffer));
+ json = grpc_json_parse_string_with_len((char *)GRPC_SLICE_START_PTR(*buffer),
+ GRPC_SLICE_LENGTH(*buffer));
if (json == NULL) {
- gpr_slice_unref(*buffer);
+ grpc_slice_unref(*buffer);
gpr_log(GPR_ERROR, "JSON parsing error.");
}
return json;
@@ -129,16 +130,16 @@ typedef struct {
const char *kid;
const char *typ;
/* TODO(jboeuf): Add others as needed (jku, jwk, x5u, x5c and so on...). */
- gpr_slice buffer;
+ grpc_slice buffer;
} jose_header;
static void jose_header_destroy(jose_header *h) {
- gpr_slice_unref(h->buffer);
+ grpc_slice_unref(h->buffer);
gpr_free(h);
}
/* Takes ownership of json and buffer. */
-static jose_header *jose_header_from_json(grpc_json *json, gpr_slice buffer) {
+static jose_header *jose_header_from_json(grpc_json *json, grpc_slice buffer) {
grpc_json *cur;
jose_header *h = gpr_malloc(sizeof(jose_header));
memset(h, 0, sizeof(jose_header));
@@ -190,12 +191,12 @@ struct grpc_jwt_claims {
gpr_timespec nbf;
grpc_json *json;
- gpr_slice buffer;
+ grpc_slice buffer;
};
void grpc_jwt_claims_destroy(grpc_jwt_claims *claims) {
grpc_json_destroy(claims->json);
- gpr_slice_unref(claims->buffer);
+ grpc_slice_unref(claims->buffer);
gpr_free(claims);
}
@@ -240,7 +241,7 @@ gpr_timespec grpc_jwt_claims_not_before(const grpc_jwt_claims *claims) {
}
/* Takes ownership of json and buffer even in case of failure. */
-grpc_jwt_claims *grpc_jwt_claims_from_json(grpc_json *json, gpr_slice buffer) {
+grpc_jwt_claims *grpc_jwt_claims_from_json(grpc_json *json, grpc_slice buffer) {
grpc_json *cur;
grpc_jwt_claims *claims = gpr_malloc(sizeof(grpc_jwt_claims));
memset(claims, 0, sizeof(grpc_jwt_claims));
@@ -305,6 +306,17 @@ grpc_jwt_verifier_status grpc_jwt_claims_check(const grpc_jwt_claims *claims,
return GRPC_JWT_VERIFIER_TIME_CONSTRAINT_FAILURE;
}
+ /* This should be probably up to the upper layer to decide but let's harcode
+ the 99% use case here for email issuers, where the JWT must be self
+ issued. */
+ if (grpc_jwt_issuer_email_domain(claims->iss) != NULL &&
+ claims->sub != NULL && strcmp(claims->iss, claims->sub) != 0) {
+ gpr_log(GPR_ERROR,
+ "Email issuer (%s) cannot assert another subject (%s) than itself.",
+ claims->iss, claims->sub);
+ return GRPC_JWT_VERIFIER_BAD_SUBJECT;
+ }
+
if (audience == NULL) {
audience_ok = claims->aud == NULL;
} else {
@@ -333,8 +345,8 @@ typedef struct {
jose_header *header;
grpc_jwt_claims *claims;
char *audience;
- gpr_slice signature;
- gpr_slice signed_data;
+ grpc_slice signature;
+ grpc_slice signed_data;
void *user_data;
grpc_jwt_verification_done_cb user_cb;
grpc_http_response responses[HTTP_RESPONSE_COUNT];
@@ -343,7 +355,7 @@ typedef struct {
/* Takes ownership of the header, claims and signature. */
static verifier_cb_ctx *verifier_cb_ctx_create(
grpc_jwt_verifier *verifier, grpc_pollset *pollset, jose_header *header,
- grpc_jwt_claims *claims, const char *audience, gpr_slice signature,
+ grpc_jwt_claims *claims, const char *audience, grpc_slice signature,
const char *signed_jwt, size_t signed_jwt_len, void *user_data,
grpc_jwt_verification_done_cb cb) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
@@ -355,7 +367,7 @@ static verifier_cb_ctx *verifier_cb_ctx_create(
ctx->audience = gpr_strdup(audience);
ctx->claims = claims;
ctx->signature = signature;
- ctx->signed_data = gpr_slice_from_copied_buffer(signed_jwt, signed_jwt_len);
+ ctx->signed_data = grpc_slice_from_copied_buffer(signed_jwt, signed_jwt_len);
ctx->user_data = user_data;
ctx->user_cb = cb;
grpc_exec_ctx_finish(&exec_ctx);
@@ -365,8 +377,8 @@ static verifier_cb_ctx *verifier_cb_ctx_create(
void verifier_cb_ctx_destroy(verifier_cb_ctx *ctx) {
if (ctx->audience != NULL) gpr_free(ctx->audience);
if (ctx->claims != NULL) grpc_jwt_claims_destroy(ctx->claims);
- gpr_slice_unref(ctx->signature);
- gpr_slice_unref(ctx->signed_data);
+ grpc_slice_unref(ctx->signature);
+ grpc_slice_unref(ctx->signed_data);
jose_header_destroy(ctx->header);
for (size_t i = 0; i < HTTP_RESPONSE_COUNT; i++) {
grpc_http_response_destroy(&ctx->responses[i]);
@@ -449,17 +461,17 @@ end:
static BIGNUM *bignum_from_base64(const char *b64) {
BIGNUM *result = NULL;
- gpr_slice bin;
+ grpc_slice bin;
if (b64 == NULL) return NULL;
bin = grpc_base64_decode(b64, 1);
- if (GPR_SLICE_IS_EMPTY(bin)) {
+ if (GRPC_SLICE_IS_EMPTY(bin)) {
gpr_log(GPR_ERROR, "Invalid base64 for big num.");
return NULL;
}
- result = BN_bin2bn(GPR_SLICE_START_PTR(bin),
- TSI_SIZE_AS_SIZE(GPR_SLICE_LENGTH(bin)), NULL);
- gpr_slice_unref(bin);
+ result = BN_bin2bn(GRPC_SLICE_START_PTR(bin),
+ TSI_SIZE_AS_SIZE(GRPC_SLICE_LENGTH(bin)), NULL);
+ grpc_slice_unref(bin);
return result;
}
@@ -553,7 +565,7 @@ static EVP_PKEY *find_verification_key(const grpc_json *json,
}
static int verify_jwt_signature(EVP_PKEY *key, const char *alg,
- gpr_slice signature, gpr_slice signed_data) {
+ grpc_slice signature, grpc_slice signed_data) {
EVP_MD_CTX *md_ctx = EVP_MD_CTX_create();
const EVP_MD *md = evp_md_from_alg(alg);
int result = 0;
@@ -567,13 +579,13 @@ static int verify_jwt_signature(EVP_PKEY *key, const char *alg,
gpr_log(GPR_ERROR, "EVP_DigestVerifyInit failed.");
goto end;
}
- if (EVP_DigestVerifyUpdate(md_ctx, GPR_SLICE_START_PTR(signed_data),
- GPR_SLICE_LENGTH(signed_data)) != 1) {
+ if (EVP_DigestVerifyUpdate(md_ctx, GRPC_SLICE_START_PTR(signed_data),
+ GRPC_SLICE_LENGTH(signed_data)) != 1) {
gpr_log(GPR_ERROR, "EVP_DigestVerifyUpdate failed.");
goto end;
}
- if (EVP_DigestVerifyFinal(md_ctx, GPR_SLICE_START_PTR(signature),
- GPR_SLICE_LENGTH(signature)) != 1) {
+ if (EVP_DigestVerifyFinal(md_ctx, GRPC_SLICE_START_PTR(signature),
+ GRPC_SLICE_LENGTH(signature)) != 1) {
gpr_log(GPR_ERROR, "JWT signature verification failed.");
goto end;
}
@@ -657,11 +669,17 @@ static void on_openid_config_retrieved(grpc_exec_ctx *exec_ctx, void *user_data,
*(req.host + (req.http.path - jwks_uri)) = '\0';
}
+ /* TODO(ctiller): Carry the resource_quota in ctx and share it with the host
+ channel. This would allow us to cancel an authentication query when under
+ extreme memory pressure. */
+ grpc_resource_quota *resource_quota =
+ grpc_resource_quota_create("jwt_verifier");
grpc_httpcli_get(
- exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, &req,
+ exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay),
grpc_closure_create(on_keys_retrieved, ctx),
&ctx->responses[HTTP_RESPONSE_KEYS]);
+ grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
grpc_json_destroy(json);
gpr_free(req.host);
return;
@@ -699,10 +717,26 @@ static void verifier_put_mapping(grpc_jwt_verifier *v, const char *email_domain,
GPR_ASSERT(v->num_mappings <= v->allocated_mappings);
}
+/* Very non-sophisticated way to detect an email address. Should be good
+ enough for now... */
+const char *grpc_jwt_issuer_email_domain(const char *issuer) {
+ const char *at_sign = strchr(issuer, '@');
+ if (at_sign == NULL) return NULL;
+ const char *email_domain = at_sign + 1;
+ if (*email_domain == '\0') return NULL;
+ const char *dot = strrchr(email_domain, '.');
+ if (dot == NULL || dot == email_domain) return email_domain;
+ GPR_ASSERT(dot > email_domain);
+ /* There may be a subdomain, we just want the domain. */
+ dot = gpr_memrchr(email_domain, '.', (size_t)(dot - email_domain));
+ if (dot == NULL) return email_domain;
+ return dot + 1;
+}
+
/* Takes ownership of ctx. */
static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx,
verifier_cb_ctx *ctx) {
- const char *at_sign;
+ const char *email_domain;
grpc_closure *http_cb;
char *path_prefix = NULL;
const char *iss;
@@ -727,13 +761,9 @@ static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx,
Nobody seems to implement the account/email/webfinger part 2. of the spec
so we will rely instead on email/url mappings if we detect such an issuer.
Part 4, on the other hand is implemented by both google and salesforce. */
-
- /* Very non-sophisticated way to detect an email address. Should be good
- enough for now... */
- at_sign = strchr(iss, '@');
- if (at_sign != NULL) {
+ email_domain = grpc_jwt_issuer_email_domain(iss);
+ if (email_domain != NULL) {
email_key_mapping *mapping;
- const char *email_domain = at_sign + 1;
GPR_ASSERT(ctx->verifier != NULL);
mapping = verifier_get_mapping(ctx->verifier, email_domain);
if (mapping == NULL) {
@@ -764,10 +794,16 @@ static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx,
rsp_idx = HTTP_RESPONSE_OPENID;
}
+ /* TODO(ctiller): Carry the resource_quota in ctx and share it with the host
+ channel. This would allow us to cancel an authentication query when under
+ extreme memory pressure. */
+ grpc_resource_quota *resource_quota =
+ grpc_resource_quota_create("jwt_verifier");
grpc_httpcli_get(
- exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, &req,
+ exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay),
http_cb, &ctx->responses[rsp_idx]);
+ grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
gpr_free(req.host);
gpr_free(req.http.path);
return;
@@ -787,9 +823,9 @@ void grpc_jwt_verifier_verify(grpc_exec_ctx *exec_ctx,
grpc_json *json;
jose_header *header = NULL;
grpc_jwt_claims *claims = NULL;
- gpr_slice header_buffer;
- gpr_slice claims_buffer;
- gpr_slice signature;
+ grpc_slice header_buffer;
+ grpc_slice claims_buffer;
+ grpc_slice signature;
size_t signed_jwt_len;
const char *cur = jwt;
@@ -812,7 +848,7 @@ void grpc_jwt_verifier_verify(grpc_exec_ctx *exec_ctx,
signed_jwt_len = (size_t)(dot - jwt);
cur = dot + 1;
signature = grpc_base64_decode(cur, 1);
- if (GPR_SLICE_IS_EMPTY(signature)) goto error;
+ if (GRPC_SLICE_IS_EMPTY(signature)) goto error;
retrieve_key_and_verify(
exec_ctx,
verifier_cb_ctx_create(verifier, pollset, header, claims, audience,
diff --git a/src/core/lib/security/credentials/jwt/jwt_verifier.h b/src/core/lib/security/credentials/jwt/jwt_verifier.h
index b0f6d1c240..54ff9b05e5 100644
--- a/src/core/lib/security/credentials/jwt/jwt_verifier.h
+++ b/src/core/lib/security/credentials/jwt/jwt_verifier.h
@@ -37,14 +37,13 @@
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/json/json.h"
-#include <grpc/support/slice.h>
+#include <grpc/slice.h>
#include <grpc/support/time.h>
/* --- Constants. --- */
#define GRPC_OPENID_CONFIG_URL_SUFFIX "/.well-known/openid-configuration"
-#define GRPC_GOOGLE_SERVICE_ACCOUNTS_EMAIL_DOMAIN \
- "developer.gserviceaccount.com"
+#define GRPC_GOOGLE_SERVICE_ACCOUNTS_EMAIL_DOMAIN "gserviceaccount.com"
#define GRPC_GOOGLE_SERVICE_ACCOUNTS_KEY_URL_PREFIX \
"www.googleapis.com/robot/v1/metadata/x509"
@@ -57,6 +56,7 @@ typedef enum {
GRPC_JWT_VERIFIER_BAD_AUDIENCE,
GRPC_JWT_VERIFIER_KEY_RETRIEVAL_ERROR,
GRPC_JWT_VERIFIER_TIME_CONSTRAINT_FAILURE,
+ GRPC_JWT_VERIFIER_BAD_SUBJECT,
GRPC_JWT_VERIFIER_GENERIC_ERROR
} grpc_jwt_verifier_status;
@@ -129,8 +129,9 @@ void grpc_jwt_verifier_verify(grpc_exec_ctx *exec_ctx,
/* --- TESTING ONLY exposed functions. --- */
-grpc_jwt_claims *grpc_jwt_claims_from_json(grpc_json *json, gpr_slice buffer);
+grpc_jwt_claims *grpc_jwt_claims_from_json(grpc_json *json, grpc_slice buffer);
grpc_jwt_verifier_status grpc_jwt_claims_check(const grpc_jwt_claims *claims,
const char *audience);
+const char *grpc_jwt_issuer_email_domain(const char *issuer);
#endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_JWT_JWT_VERIFIER_H */
diff --git a/src/core/lib/security/credentials/oauth2/oauth2_credentials.c b/src/core/lib/security/credentials/oauth2/oauth2_credentials.c
index c22ea5c468..b3625b22c0 100644
--- a/src/core/lib/security/credentials/oauth2/oauth2_credentials.c
+++ b/src/core/lib/security/credentials/oauth2/oauth2_credentials.c
@@ -307,9 +307,15 @@ static void compute_engine_fetch_oauth2(
request.http.path = GRPC_COMPUTE_ENGINE_METADATA_TOKEN_PATH;
request.http.hdr_count = 1;
request.http.hdrs = &header;
- grpc_httpcli_get(exec_ctx, httpcli_context, pollent, &request, deadline,
- grpc_closure_create(response_cb, metadata_req),
+ /* TODO(ctiller): Carry the resource_quota in ctx and share it with the host
+ channel. This would allow us to cancel an authentication query when under
+ extreme memory pressure. */
+ grpc_resource_quota *resource_quota =
+ grpc_resource_quota_create("oauth2_credentials");
+ grpc_httpcli_get(exec_ctx, httpcli_context, pollent, resource_quota, &request,
+ deadline, grpc_closure_create(response_cb, metadata_req),
&metadata_req->response);
+ grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
}
grpc_call_credentials *grpc_google_compute_engine_credentials_create(
@@ -357,10 +363,16 @@ static void refresh_token_fetch_oauth2(
request.http.hdr_count = 1;
request.http.hdrs = &header;
request.handshaker = &grpc_httpcli_ssl;
- grpc_httpcli_post(exec_ctx, httpcli_context, pollent, &request, body,
- strlen(body), deadline,
+ /* TODO(ctiller): Carry the resource_quota in ctx and share it with the host
+ channel. This would allow us to cancel an authentication query when under
+ extreme memory pressure. */
+ grpc_resource_quota *resource_quota =
+ grpc_resource_quota_create("oauth2_credentials_refresh");
+ grpc_httpcli_post(exec_ctx, httpcli_context, pollent, resource_quota,
+ &request, body, strlen(body), deadline,
grpc_closure_create(response_cb, metadata_req),
&metadata_req->response);
+ grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
gpr_free(body);
}
@@ -380,15 +392,32 @@ grpc_refresh_token_credentials_create_from_auth_refresh_token(
return &c->base.base;
}
+static char *create_loggable_refresh_token(grpc_auth_refresh_token *token) {
+ if (strcmp(token->type, GRPC_AUTH_JSON_TYPE_INVALID) == 0) {
+ return gpr_strdup("<Invalid json token>");
+ }
+ char *loggable_token = NULL;
+ gpr_asprintf(&loggable_token,
+ "{\n type: %s\n client_id: %s\n client_secret: "
+ "<redacted>\n refresh_token: <redacted>\n}",
+ token->type, token->client_id);
+ return loggable_token;
+}
+
grpc_call_credentials *grpc_google_refresh_token_credentials_create(
const char *json_refresh_token, void *reserved) {
- GRPC_API_TRACE(
- "grpc_refresh_token_credentials_create(json_refresh_token=%s, "
- "reserved=%p)",
- 2, (json_refresh_token, reserved));
+ grpc_auth_refresh_token token =
+ grpc_auth_refresh_token_create_from_string(json_refresh_token);
+ if (grpc_api_trace) {
+ char *loggable_token = create_loggable_refresh_token(&token);
+ gpr_log(GPR_INFO,
+ "grpc_refresh_token_credentials_create(json_refresh_token=%s, "
+ "reserved=%p)",
+ loggable_token, reserved);
+ gpr_free(loggable_token);
+ }
GPR_ASSERT(reserved == NULL);
- return grpc_refresh_token_credentials_create_from_auth_refresh_token(
- grpc_auth_refresh_token_create_from_string(json_refresh_token));
+ return grpc_refresh_token_credentials_create_from_auth_refresh_token(token);
}
//
@@ -418,9 +447,9 @@ grpc_call_credentials *grpc_access_token_credentials_create(
gpr_malloc(sizeof(grpc_access_token_credentials));
char *token_md_value;
GRPC_API_TRACE(
- "grpc_access_token_credentials_create(access_token=%s, "
+ "grpc_access_token_credentials_create(access_token=<redacted>, "
"reserved=%p)",
- 2, (access_token, reserved));
+ 1, (reserved));
GPR_ASSERT(reserved == NULL);
memset(c, 0, sizeof(grpc_access_token_credentials));
c->base.type = GRPC_CALL_CREDENTIALS_TYPE_OAUTH2;
diff --git a/src/core/lib/security/credentials/plugin/plugin_credentials.c b/src/core/lib/security/credentials/plugin/plugin_credentials.c
index 905de3723e..5d950098a0 100644
--- a/src/core/lib/security/credentials/plugin/plugin_credentials.c
+++ b/src/core/lib/security/credentials/plugin/plugin_credentials.c
@@ -93,17 +93,19 @@ static void plugin_md_request_metadata_ready(void *request,
} else if (num_md > 0) {
md_array = gpr_malloc(num_md * sizeof(grpc_credentials_md));
for (i = 0; i < num_md; i++) {
- md_array[i].key = gpr_slice_from_copied_string(md[i].key);
+ md_array[i].key = grpc_slice_from_copied_string(md[i].key);
md_array[i].value =
- gpr_slice_from_copied_buffer(md[i].value, md[i].value_length);
+ grpc_slice_from_copied_buffer(md[i].value, md[i].value_length);
}
r->cb(&exec_ctx, r->user_data, md_array, num_md, GRPC_CREDENTIALS_OK,
NULL);
for (i = 0; i < num_md; i++) {
- gpr_slice_unref(md_array[i].key);
- gpr_slice_unref(md_array[i].value);
+ grpc_slice_unref(md_array[i].key);
+ grpc_slice_unref(md_array[i].value);
}
gpr_free(md_array);
+ } else if (num_md == 0) {
+ r->cb(&exec_ctx, r->user_data, NULL, 0, GRPC_CREDENTIALS_OK, NULL);
}
}
gpr_free(r);
diff --git a/src/core/lib/security/transport/client_auth_filter.c b/src/core/lib/security/transport/client_auth_filter.c
index b366d1410f..da897296e4 100644
--- a/src/core/lib/security/transport/client_auth_filter.c
+++ b/src/core/lib/security/transport/client_auth_filter.c
@@ -92,7 +92,7 @@ static void bubble_up_error(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_status_code status, const char *error_msg) {
call_data *calld = elem->call_data;
gpr_log(GPR_ERROR, "Client side authentication failure: %s", error_msg);
- gpr_slice error_slice = gpr_slice_from_copied_string(error_msg);
+ grpc_slice error_slice = grpc_slice_from_copied_string(error_msg);
grpc_transport_stream_op_add_close(&calld->op, status, &error_slice);
grpc_call_next_op(exec_ctx, elem, &calld->op);
}
@@ -121,8 +121,8 @@ static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *user_data,
for (i = 0; i < num_md; i++) {
grpc_metadata_batch_add_tail(
mdb, &calld->md_links[i],
- grpc_mdelem_from_slices(gpr_slice_ref(md_elems[i].key),
- gpr_slice_ref(md_elems[i].value)));
+ grpc_mdelem_from_slices(grpc_slice_ref(md_elems[i].key),
+ grpc_slice_ref(md_elems[i].value)));
}
grpc_call_next_op(exec_ctx, elem, op);
}
@@ -303,9 +303,9 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
}
/* Constructor for channel_data */
-static void init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_channel_element_args *args) {
+static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
grpc_security_connector *sc =
grpc_find_security_connector_in_args(args->channel_args);
grpc_auth_context *auth_context =
@@ -327,6 +327,7 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
sc, "client_auth_filter");
chand->auth_context =
GRPC_AUTH_CONTEXT_REF(auth_context, "client_auth_filter");
+ return GRPC_ERROR_NONE;
}
/* Destructor for channel data */
@@ -341,14 +342,8 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
GRPC_AUTH_CONTEXT_UNREF(chand->auth_context, "client_auth_filter");
}
-const grpc_channel_filter grpc_client_auth_filter = {auth_start_transport_op,
- grpc_channel_next_op,
- sizeof(call_data),
- init_call_elem,
- set_pollset_or_pollset_set,
- destroy_call_elem,
- sizeof(channel_data),
- init_channel_elem,
- destroy_channel_elem,
- grpc_call_next_get_peer,
- "client-auth"};
+const grpc_channel_filter grpc_client_auth_filter = {
+ auth_start_transport_op, grpc_channel_next_op, sizeof(call_data),
+ init_call_elem, set_pollset_or_pollset_set, destroy_call_elem,
+ sizeof(channel_data), init_channel_elem, destroy_channel_elem,
+ grpc_call_next_get_peer, grpc_channel_next_get_info, "client-auth"};
diff --git a/src/core/lib/security/transport/handshake.c b/src/core/lib/security/transport/handshake.c
deleted file mode 100644
index fbeec312b6..0000000000
--- a/src/core/lib/security/transport/handshake.c
+++ /dev/null
@@ -1,374 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include "src/core/lib/security/transport/handshake.h"
-
-#include <stdbool.h>
-#include <string.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/slice_buffer.h>
-#include "src/core/lib/iomgr/timer.h"
-#include "src/core/lib/security/context/security_context.h"
-#include "src/core/lib/security/transport/secure_endpoint.h"
-#include "src/core/lib/security/transport/tsi_error.h"
-
-#define GRPC_INITIAL_HANDSHAKE_BUFFER_SIZE 256
-
-typedef struct {
- grpc_security_connector *connector;
- tsi_handshaker *handshaker;
- bool is_client_side;
- unsigned char *handshake_buffer;
- size_t handshake_buffer_size;
- grpc_endpoint *wrapped_endpoint;
- grpc_endpoint *secure_endpoint;
- gpr_slice_buffer left_overs;
- gpr_slice_buffer incoming;
- gpr_slice_buffer outgoing;
- grpc_security_handshake_done_cb cb;
- void *user_data;
- grpc_closure on_handshake_data_sent_to_peer;
- grpc_closure on_handshake_data_received_from_peer;
- grpc_auth_context *auth_context;
- grpc_timer timer;
- gpr_refcount refs;
-} grpc_security_handshake;
-
-static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
- void *setup,
- grpc_error *error);
-
-static void on_handshake_data_sent_to_peer(grpc_exec_ctx *exec_ctx, void *setup,
- grpc_error *error);
-
-static void security_connector_remove_handshake(grpc_security_handshake *h) {
- GPR_ASSERT(!h->is_client_side);
- grpc_security_connector_handshake_list *node;
- grpc_security_connector_handshake_list *tmp;
- grpc_server_security_connector *sc =
- (grpc_server_security_connector *)h->connector;
- gpr_mu_lock(&sc->mu);
- node = sc->handshaking_handshakes;
- if (node && node->handshake == h) {
- sc->handshaking_handshakes = node->next;
- gpr_free(node);
- gpr_mu_unlock(&sc->mu);
- return;
- }
- while (node) {
- if (node->next->handshake == h) {
- tmp = node->next;
- node->next = node->next->next;
- gpr_free(tmp);
- gpr_mu_unlock(&sc->mu);
- return;
- }
- node = node->next;
- }
- gpr_mu_unlock(&sc->mu);
-}
-
-static void unref_handshake(grpc_security_handshake *h) {
- if (gpr_unref(&h->refs)) {
- if (h->handshaker != NULL) tsi_handshaker_destroy(h->handshaker);
- if (h->handshake_buffer != NULL) gpr_free(h->handshake_buffer);
- gpr_slice_buffer_destroy(&h->left_overs);
- gpr_slice_buffer_destroy(&h->outgoing);
- gpr_slice_buffer_destroy(&h->incoming);
- GRPC_AUTH_CONTEXT_UNREF(h->auth_context, "handshake");
- GRPC_SECURITY_CONNECTOR_UNREF(h->connector, "handshake");
- gpr_free(h);
- }
-}
-
-static void security_handshake_done(grpc_exec_ctx *exec_ctx,
- grpc_security_handshake *h,
- grpc_error *error) {
- grpc_timer_cancel(exec_ctx, &h->timer);
- if (!h->is_client_side) {
- security_connector_remove_handshake(h);
- }
- if (error == GRPC_ERROR_NONE) {
- h->cb(exec_ctx, h->user_data, GRPC_SECURITY_OK, h->secure_endpoint,
- h->auth_context);
- } else {
- const char *msg = grpc_error_string(error);
- gpr_log(GPR_ERROR, "Security handshake failed: %s", msg);
- grpc_error_free_string(msg);
-
- if (h->secure_endpoint != NULL) {
- grpc_endpoint_shutdown(exec_ctx, h->secure_endpoint);
- grpc_endpoint_destroy(exec_ctx, h->secure_endpoint);
- } else {
- grpc_endpoint_destroy(exec_ctx, h->wrapped_endpoint);
- }
- h->cb(exec_ctx, h->user_data, GRPC_SECURITY_ERROR, NULL, NULL);
- }
- unref_handshake(h);
- GRPC_ERROR_UNREF(error);
-}
-
-static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *user_data,
- grpc_security_status status,
- grpc_auth_context *auth_context) {
- grpc_security_handshake *h = user_data;
- tsi_frame_protector *protector;
- tsi_result result;
- if (status != GRPC_SECURITY_OK) {
- security_handshake_done(
- exec_ctx, h,
- grpc_error_set_int(GRPC_ERROR_CREATE("Error checking peer."),
- GRPC_ERROR_INT_SECURITY_STATUS, status));
- return;
- }
- h->auth_context = GRPC_AUTH_CONTEXT_REF(auth_context, "handshake");
- result =
- tsi_handshaker_create_frame_protector(h->handshaker, NULL, &protector);
- if (result != TSI_OK) {
- security_handshake_done(
- exec_ctx, h,
- grpc_set_tsi_error_result(
- GRPC_ERROR_CREATE("Frame protector creation failed"), result));
- return;
- }
- h->secure_endpoint =
- grpc_secure_endpoint_create(protector, h->wrapped_endpoint,
- h->left_overs.slices, h->left_overs.count);
- h->left_overs.count = 0;
- h->left_overs.length = 0;
- security_handshake_done(exec_ctx, h, GRPC_ERROR_NONE);
- return;
-}
-
-static void check_peer(grpc_exec_ctx *exec_ctx, grpc_security_handshake *h) {
- tsi_peer peer;
- tsi_result result = tsi_handshaker_extract_peer(h->handshaker, &peer);
-
- if (result != TSI_OK) {
- security_handshake_done(
- exec_ctx, h, grpc_set_tsi_error_result(
- GRPC_ERROR_CREATE("Peer extraction failed"), result));
- return;
- }
- grpc_security_connector_check_peer(exec_ctx, h->connector, peer,
- on_peer_checked, h);
-}
-
-static void send_handshake_bytes_to_peer(grpc_exec_ctx *exec_ctx,
- grpc_security_handshake *h) {
- size_t offset = 0;
- tsi_result result = TSI_OK;
- gpr_slice to_send;
-
- do {
- size_t to_send_size = h->handshake_buffer_size - offset;
- result = tsi_handshaker_get_bytes_to_send_to_peer(
- h->handshaker, h->handshake_buffer + offset, &to_send_size);
- offset += to_send_size;
- if (result == TSI_INCOMPLETE_DATA) {
- h->handshake_buffer_size *= 2;
- h->handshake_buffer =
- gpr_realloc(h->handshake_buffer, h->handshake_buffer_size);
- }
- } while (result == TSI_INCOMPLETE_DATA);
-
- if (result != TSI_OK) {
- security_handshake_done(exec_ctx, h,
- grpc_set_tsi_error_result(
- GRPC_ERROR_CREATE("Handshake failed"), result));
- return;
- }
-
- to_send =
- gpr_slice_from_copied_buffer((const char *)h->handshake_buffer, offset);
- gpr_slice_buffer_reset_and_unref(&h->outgoing);
- gpr_slice_buffer_add(&h->outgoing, to_send);
- /* TODO(klempner,jboeuf): This should probably use the client setup
- deadline */
- grpc_endpoint_write(exec_ctx, h->wrapped_endpoint, &h->outgoing,
- &h->on_handshake_data_sent_to_peer);
-}
-
-static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
- void *handshake,
- grpc_error *error) {
- grpc_security_handshake *h = handshake;
- size_t consumed_slice_size = 0;
- tsi_result result = TSI_OK;
- size_t i;
- size_t num_left_overs;
- int has_left_overs_in_current_slice = 0;
-
- if (error != GRPC_ERROR_NONE) {
- security_handshake_done(
- exec_ctx, h,
- GRPC_ERROR_CREATE_REFERENCING("Handshake read failed", &error, 1));
- return;
- }
-
- for (i = 0; i < h->incoming.count; i++) {
- consumed_slice_size = GPR_SLICE_LENGTH(h->incoming.slices[i]);
- result = tsi_handshaker_process_bytes_from_peer(
- h->handshaker, GPR_SLICE_START_PTR(h->incoming.slices[i]),
- &consumed_slice_size);
- if (!tsi_handshaker_is_in_progress(h->handshaker)) break;
- }
-
- if (tsi_handshaker_is_in_progress(h->handshaker)) {
- /* We may need more data. */
- if (result == TSI_INCOMPLETE_DATA) {
- grpc_endpoint_read(exec_ctx, h->wrapped_endpoint, &h->incoming,
- &h->on_handshake_data_received_from_peer);
- return;
- } else {
- send_handshake_bytes_to_peer(exec_ctx, h);
- return;
- }
- }
-
- if (result != TSI_OK) {
- security_handshake_done(exec_ctx, h,
- grpc_set_tsi_error_result(
- GRPC_ERROR_CREATE("Handshake failed"), result));
- return;
- }
-
- /* Handshake is done and successful this point. */
- has_left_overs_in_current_slice =
- (consumed_slice_size < GPR_SLICE_LENGTH(h->incoming.slices[i]));
- num_left_overs =
- (has_left_overs_in_current_slice ? 1 : 0) + h->incoming.count - i - 1;
- if (num_left_overs == 0) {
- check_peer(exec_ctx, h);
- return;
- }
-
- /* Put the leftovers in our buffer (ownership transfered). */
- if (has_left_overs_in_current_slice) {
- gpr_slice_buffer_add(
- &h->left_overs,
- gpr_slice_split_tail(&h->incoming.slices[i], consumed_slice_size));
- gpr_slice_unref(
- h->incoming.slices[i]); /* split_tail above increments refcount. */
- }
- gpr_slice_buffer_addn(
- &h->left_overs, &h->incoming.slices[i + 1],
- num_left_overs - (size_t)has_left_overs_in_current_slice);
- check_peer(exec_ctx, h);
-}
-
-/* If handshake is NULL, the handshake is done. */
-static void on_handshake_data_sent_to_peer(grpc_exec_ctx *exec_ctx,
- void *handshake, grpc_error *error) {
- grpc_security_handshake *h = handshake;
-
- /* Make sure that write is OK. */
- if (error != GRPC_ERROR_NONE) {
- if (handshake != NULL)
- security_handshake_done(
- exec_ctx, h,
- GRPC_ERROR_CREATE_REFERENCING("Handshake write failed", &error, 1));
- return;
- }
-
- /* We may be done. */
- if (tsi_handshaker_is_in_progress(h->handshaker)) {
- /* TODO(klempner,jboeuf): This should probably use the client setup
- deadline */
- grpc_endpoint_read(exec_ctx, h->wrapped_endpoint, &h->incoming,
- &h->on_handshake_data_received_from_peer);
- } else {
- check_peer(exec_ctx, h);
- }
-}
-
-static void on_timeout(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_security_handshake *h = arg;
- if (error == GRPC_ERROR_NONE) {
- grpc_endpoint_shutdown(exec_ctx, h->wrapped_endpoint);
- }
- unref_handshake(h);
-}
-
-void grpc_do_security_handshake(
- grpc_exec_ctx *exec_ctx, tsi_handshaker *handshaker,
- grpc_security_connector *connector, bool is_client_side,
- grpc_endpoint *nonsecure_endpoint, gpr_slice_buffer *read_buffer,
- gpr_timespec deadline, grpc_security_handshake_done_cb cb,
- void *user_data) {
- grpc_security_connector_handshake_list *handshake_node;
- grpc_security_handshake *h = gpr_malloc(sizeof(grpc_security_handshake));
- memset(h, 0, sizeof(grpc_security_handshake));
- h->handshaker = handshaker;
- h->connector = GRPC_SECURITY_CONNECTOR_REF(connector, "handshake");
- h->is_client_side = is_client_side;
- h->handshake_buffer_size = GRPC_INITIAL_HANDSHAKE_BUFFER_SIZE;
- h->handshake_buffer = gpr_malloc(h->handshake_buffer_size);
- h->wrapped_endpoint = nonsecure_endpoint;
- h->user_data = user_data;
- h->cb = cb;
- gpr_ref_init(&h->refs, 2); /* timer and handshake proper each get a ref */
- grpc_closure_init(&h->on_handshake_data_sent_to_peer,
- on_handshake_data_sent_to_peer, h);
- grpc_closure_init(&h->on_handshake_data_received_from_peer,
- on_handshake_data_received_from_peer, h);
- gpr_slice_buffer_init(&h->left_overs);
- gpr_slice_buffer_init(&h->outgoing);
- gpr_slice_buffer_init(&h->incoming);
- if (read_buffer != NULL) {
- gpr_slice_buffer_move_into(read_buffer, &h->incoming);
- gpr_free(read_buffer);
- }
- if (!is_client_side) {
- grpc_server_security_connector *server_connector =
- (grpc_server_security_connector *)connector;
- handshake_node = gpr_malloc(sizeof(grpc_security_connector_handshake_list));
- handshake_node->handshake = h;
- gpr_mu_lock(&server_connector->mu);
- handshake_node->next = server_connector->handshaking_handshakes;
- server_connector->handshaking_handshakes = handshake_node;
- gpr_mu_unlock(&server_connector->mu);
- }
- send_handshake_bytes_to_peer(exec_ctx, h);
- grpc_timer_init(exec_ctx, &h->timer,
- gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
- on_timeout, h, gpr_now(GPR_CLOCK_MONOTONIC));
-}
-
-void grpc_security_handshake_shutdown(grpc_exec_ctx *exec_ctx,
- void *handshake) {
- grpc_security_handshake *h = handshake;
- grpc_endpoint_shutdown(exec_ctx, h->wrapped_endpoint);
-}
diff --git a/src/core/lib/security/transport/secure_endpoint.c b/src/core/lib/security/transport/secure_endpoint.c
index acb0113ea8..331a8f1835 100644
--- a/src/core/lib/security/transport/secure_endpoint.c
+++ b/src/core/lib/security/transport/secure_endpoint.c
@@ -31,15 +31,22 @@
*
*/
-#include "src/core/lib/security/transport/secure_endpoint.h"
+/* With the addition of a libuv endpoint, sockaddr.h now includes uv.h when
+ using that endpoint. Because of various transitive includes in uv.h,
+ including windows.h on Windows, uv.h must be included before other system
+ headers. Therefore, sockaddr.h must always be included first */
+#include "src/core/lib/iomgr/sockaddr.h"
+
+#include <grpc/slice.h>
+#include <grpc/slice_buffer.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/slice.h>
-#include <grpc/support/slice_buffer.h>
#include <grpc/support/sync.h>
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/security/transport/secure_endpoint.h"
#include "src/core/lib/security/transport/tsi_error.h"
+#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/tsi/transport_security_interface.h"
@@ -54,15 +61,15 @@ typedef struct {
grpc_closure *read_cb;
grpc_closure *write_cb;
grpc_closure on_read;
- gpr_slice_buffer *read_buffer;
- gpr_slice_buffer source_buffer;
+ grpc_slice_buffer *read_buffer;
+ grpc_slice_buffer source_buffer;
/* saved handshaker leftover data to unprotect. */
- gpr_slice_buffer leftover_bytes;
+ grpc_slice_buffer leftover_bytes;
/* buffers for read and write */
- gpr_slice read_staging_buffer;
+ grpc_slice read_staging_buffer;
- gpr_slice write_staging_buffer;
- gpr_slice_buffer output_buffer;
+ grpc_slice write_staging_buffer;
+ grpc_slice_buffer output_buffer;
gpr_refcount ref;
} secure_endpoint;
@@ -73,11 +80,11 @@ static void destroy(grpc_exec_ctx *exec_ctx, secure_endpoint *secure_ep) {
secure_endpoint *ep = secure_ep;
grpc_endpoint_destroy(exec_ctx, ep->wrapped_ep);
tsi_frame_protector_destroy(ep->protector);
- gpr_slice_buffer_destroy(&ep->leftover_bytes);
- gpr_slice_unref(ep->read_staging_buffer);
- gpr_slice_unref(ep->write_staging_buffer);
- gpr_slice_buffer_destroy(&ep->output_buffer);
- gpr_slice_buffer_destroy(&ep->source_buffer);
+ grpc_slice_buffer_destroy(&ep->leftover_bytes);
+ grpc_slice_unref(ep->read_staging_buffer);
+ grpc_slice_unref(ep->write_staging_buffer);
+ grpc_slice_buffer_destroy(&ep->output_buffer);
+ grpc_slice_buffer_destroy(&ep->source_buffer);
gpr_mu_destroy(&ep->protector_mu);
gpr_free(ep);
}
@@ -121,10 +128,10 @@ static void secure_endpoint_ref(secure_endpoint *ep) { gpr_ref(&ep->ref); }
static void flush_read_staging_buffer(secure_endpoint *ep, uint8_t **cur,
uint8_t **end) {
- gpr_slice_buffer_add(ep->read_buffer, ep->read_staging_buffer);
- ep->read_staging_buffer = gpr_slice_malloc(STAGING_BUFFER_SIZE);
- *cur = GPR_SLICE_START_PTR(ep->read_staging_buffer);
- *end = GPR_SLICE_END_PTR(ep->read_staging_buffer);
+ grpc_slice_buffer_add(ep->read_buffer, ep->read_staging_buffer);
+ ep->read_staging_buffer = grpc_slice_malloc(STAGING_BUFFER_SIZE);
+ *cur = GRPC_SLICE_START_PTR(ep->read_staging_buffer);
+ *end = GRPC_SLICE_END_PTR(ep->read_staging_buffer);
}
static void call_read_cb(grpc_exec_ctx *exec_ctx, secure_endpoint *ep,
@@ -132,8 +139,8 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, secure_endpoint *ep,
if (grpc_trace_secure_endpoint) {
size_t i;
for (i = 0; i < ep->read_buffer->count; i++) {
- char *data = gpr_dump_slice(ep->read_buffer->slices[i],
- GPR_DUMP_HEX | GPR_DUMP_ASCII);
+ char *data = grpc_dump_slice(ep->read_buffer->slices[i],
+ GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "READ %p: %s", ep, data);
gpr_free(data);
}
@@ -149,11 +156,11 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *user_data,
uint8_t keep_looping = 0;
tsi_result result = TSI_OK;
secure_endpoint *ep = (secure_endpoint *)user_data;
- uint8_t *cur = GPR_SLICE_START_PTR(ep->read_staging_buffer);
- uint8_t *end = GPR_SLICE_END_PTR(ep->read_staging_buffer);
+ uint8_t *cur = GRPC_SLICE_START_PTR(ep->read_staging_buffer);
+ uint8_t *end = GRPC_SLICE_END_PTR(ep->read_staging_buffer);
if (error != GRPC_ERROR_NONE) {
- gpr_slice_buffer_reset_and_unref(ep->read_buffer);
+ grpc_slice_buffer_reset_and_unref(ep->read_buffer);
call_read_cb(exec_ctx, ep, GRPC_ERROR_CREATE_REFERENCING(
"Secure read failed", &error, 1));
return;
@@ -161,9 +168,9 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *user_data,
/* TODO(yangg) check error, maybe bail out early */
for (i = 0; i < ep->source_buffer.count; i++) {
- gpr_slice encrypted = ep->source_buffer.slices[i];
- uint8_t *message_bytes = GPR_SLICE_START_PTR(encrypted);
- size_t message_size = GPR_SLICE_LENGTH(encrypted);
+ grpc_slice encrypted = ep->source_buffer.slices[i];
+ uint8_t *message_bytes = GRPC_SLICE_START_PTR(encrypted);
+ size_t message_size = GRPC_SLICE_LENGTH(encrypted);
while (message_size > 0 || keep_looping) {
size_t unprotected_buffer_size_written = (size_t)(end - cur);
@@ -198,20 +205,20 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *user_data,
if (result != TSI_OK) break;
}
- if (cur != GPR_SLICE_START_PTR(ep->read_staging_buffer)) {
- gpr_slice_buffer_add(
+ if (cur != GRPC_SLICE_START_PTR(ep->read_staging_buffer)) {
+ grpc_slice_buffer_add(
ep->read_buffer,
- gpr_slice_split_head(
+ grpc_slice_split_head(
&ep->read_staging_buffer,
- (size_t)(cur - GPR_SLICE_START_PTR(ep->read_staging_buffer))));
+ (size_t)(cur - GRPC_SLICE_START_PTR(ep->read_staging_buffer))));
}
/* TODO(yangg) experiment with moving this block after read_cb to see if it
helps latency */
- gpr_slice_buffer_reset_and_unref(&ep->source_buffer);
+ grpc_slice_buffer_reset_and_unref(&ep->source_buffer);
if (result != TSI_OK) {
- gpr_slice_buffer_reset_and_unref(ep->read_buffer);
+ grpc_slice_buffer_reset_and_unref(ep->read_buffer);
call_read_cb(exec_ctx, ep, grpc_set_tsi_error_result(
GRPC_ERROR_CREATE("Unwrap failed"), result));
return;
@@ -221,15 +228,15 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *user_data,
}
static void endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep,
- gpr_slice_buffer *slices, grpc_closure *cb) {
+ grpc_slice_buffer *slices, grpc_closure *cb) {
secure_endpoint *ep = (secure_endpoint *)secure_ep;
ep->read_cb = cb;
ep->read_buffer = slices;
- gpr_slice_buffer_reset_and_unref(ep->read_buffer);
+ grpc_slice_buffer_reset_and_unref(ep->read_buffer);
SECURE_ENDPOINT_REF(ep, "read");
if (ep->leftover_bytes.count) {
- gpr_slice_buffer_swap(&ep->leftover_bytes, &ep->source_buffer);
+ grpc_slice_buffer_swap(&ep->leftover_bytes, &ep->source_buffer);
GPR_ASSERT(ep->leftover_bytes.count == 0);
on_read(exec_ctx, ep, GRPC_ERROR_NONE);
return;
@@ -241,37 +248,37 @@ static void endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep,
static void flush_write_staging_buffer(secure_endpoint *ep, uint8_t **cur,
uint8_t **end) {
- gpr_slice_buffer_add(&ep->output_buffer, ep->write_staging_buffer);
- ep->write_staging_buffer = gpr_slice_malloc(STAGING_BUFFER_SIZE);
- *cur = GPR_SLICE_START_PTR(ep->write_staging_buffer);
- *end = GPR_SLICE_END_PTR(ep->write_staging_buffer);
+ grpc_slice_buffer_add(&ep->output_buffer, ep->write_staging_buffer);
+ ep->write_staging_buffer = grpc_slice_malloc(STAGING_BUFFER_SIZE);
+ *cur = GRPC_SLICE_START_PTR(ep->write_staging_buffer);
+ *end = GRPC_SLICE_END_PTR(ep->write_staging_buffer);
}
static void endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep,
- gpr_slice_buffer *slices, grpc_closure *cb) {
+ grpc_slice_buffer *slices, grpc_closure *cb) {
GPR_TIMER_BEGIN("secure_endpoint.endpoint_write", 0);
unsigned i;
tsi_result result = TSI_OK;
secure_endpoint *ep = (secure_endpoint *)secure_ep;
- uint8_t *cur = GPR_SLICE_START_PTR(ep->write_staging_buffer);
- uint8_t *end = GPR_SLICE_END_PTR(ep->write_staging_buffer);
+ uint8_t *cur = GRPC_SLICE_START_PTR(ep->write_staging_buffer);
+ uint8_t *end = GRPC_SLICE_END_PTR(ep->write_staging_buffer);
- gpr_slice_buffer_reset_and_unref(&ep->output_buffer);
+ grpc_slice_buffer_reset_and_unref(&ep->output_buffer);
if (grpc_trace_secure_endpoint) {
for (i = 0; i < slices->count; i++) {
char *data =
- gpr_dump_slice(slices->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
+ grpc_dump_slice(slices->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "WRITE %p: %s", ep, data);
gpr_free(data);
}
}
for (i = 0; i < slices->count; i++) {
- gpr_slice plain = slices->slices[i];
- uint8_t *message_bytes = GPR_SLICE_START_PTR(plain);
- size_t message_size = GPR_SLICE_LENGTH(plain);
+ grpc_slice plain = slices->slices[i];
+ uint8_t *message_bytes = GRPC_SLICE_START_PTR(plain);
+ size_t message_size = GRPC_SLICE_LENGTH(plain);
while (message_size > 0) {
size_t protected_buffer_size_to_send = (size_t)(end - cur);
size_t processed_message_size = message_size;
@@ -310,18 +317,18 @@ static void endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep,
flush_write_staging_buffer(ep, &cur, &end);
}
} while (still_pending_size > 0);
- if (cur != GPR_SLICE_START_PTR(ep->write_staging_buffer)) {
- gpr_slice_buffer_add(
+ if (cur != GRPC_SLICE_START_PTR(ep->write_staging_buffer)) {
+ grpc_slice_buffer_add(
&ep->output_buffer,
- gpr_slice_split_head(
+ grpc_slice_split_head(
&ep->write_staging_buffer,
- (size_t)(cur - GPR_SLICE_START_PTR(ep->write_staging_buffer))));
+ (size_t)(cur - GRPC_SLICE_START_PTR(ep->write_staging_buffer))));
}
}
if (result != TSI_OK) {
/* TODO(yangg) do different things according to the error type? */
- gpr_slice_buffer_reset_and_unref(&ep->output_buffer);
+ grpc_slice_buffer_reset_and_unref(&ep->output_buffer);
grpc_exec_ctx_sched(
exec_ctx, cb,
grpc_set_tsi_error_result(GRPC_ERROR_CREATE("Wrap failed"), result),
@@ -365,11 +372,22 @@ static char *endpoint_get_peer(grpc_endpoint *secure_ep) {
return grpc_endpoint_get_peer(ep->wrapped_ep);
}
+static int endpoint_get_fd(grpc_endpoint *secure_ep) {
+ secure_endpoint *ep = (secure_endpoint *)secure_ep;
+ return grpc_endpoint_get_fd(ep->wrapped_ep);
+}
+
static grpc_workqueue *endpoint_get_workqueue(grpc_endpoint *secure_ep) {
secure_endpoint *ep = (secure_endpoint *)secure_ep;
return grpc_endpoint_get_workqueue(ep->wrapped_ep);
}
+static grpc_resource_user *endpoint_get_resource_user(
+ grpc_endpoint *secure_ep) {
+ secure_endpoint *ep = (secure_endpoint *)secure_ep;
+ return grpc_endpoint_get_resource_user(ep->wrapped_ep);
+}
+
static const grpc_endpoint_vtable vtable = {endpoint_read,
endpoint_write,
endpoint_get_workqueue,
@@ -377,25 +395,27 @@ static const grpc_endpoint_vtable vtable = {endpoint_read,
endpoint_add_to_pollset_set,
endpoint_shutdown,
endpoint_destroy,
- endpoint_get_peer};
+ endpoint_get_resource_user,
+ endpoint_get_peer,
+ endpoint_get_fd};
grpc_endpoint *grpc_secure_endpoint_create(
struct tsi_frame_protector *protector, grpc_endpoint *transport,
- gpr_slice *leftover_slices, size_t leftover_nslices) {
+ grpc_slice *leftover_slices, size_t leftover_nslices) {
size_t i;
secure_endpoint *ep = (secure_endpoint *)gpr_malloc(sizeof(secure_endpoint));
ep->base.vtable = &vtable;
ep->wrapped_ep = transport;
ep->protector = protector;
- gpr_slice_buffer_init(&ep->leftover_bytes);
+ grpc_slice_buffer_init(&ep->leftover_bytes);
for (i = 0; i < leftover_nslices; i++) {
- gpr_slice_buffer_add(&ep->leftover_bytes,
- gpr_slice_ref(leftover_slices[i]));
+ grpc_slice_buffer_add(&ep->leftover_bytes,
+ grpc_slice_ref(leftover_slices[i]));
}
- ep->write_staging_buffer = gpr_slice_malloc(STAGING_BUFFER_SIZE);
- ep->read_staging_buffer = gpr_slice_malloc(STAGING_BUFFER_SIZE);
- gpr_slice_buffer_init(&ep->output_buffer);
- gpr_slice_buffer_init(&ep->source_buffer);
+ ep->write_staging_buffer = grpc_slice_malloc(STAGING_BUFFER_SIZE);
+ ep->read_staging_buffer = grpc_slice_malloc(STAGING_BUFFER_SIZE);
+ grpc_slice_buffer_init(&ep->output_buffer);
+ grpc_slice_buffer_init(&ep->source_buffer);
ep->read_buffer = NULL;
grpc_closure_init(&ep->on_read, on_read, ep);
gpr_mu_init(&ep->protector_mu);
diff --git a/src/core/lib/security/transport/secure_endpoint.h b/src/core/lib/security/transport/secure_endpoint.h
index d00075b769..a61f40a4fa 100644
--- a/src/core/lib/security/transport/secure_endpoint.h
+++ b/src/core/lib/security/transport/secure_endpoint.h
@@ -34,7 +34,7 @@
#ifndef GRPC_CORE_LIB_SECURITY_TRANSPORT_SECURE_ENDPOINT_H
#define GRPC_CORE_LIB_SECURITY_TRANSPORT_SECURE_ENDPOINT_H
-#include <grpc/support/slice.h>
+#include <grpc/slice.h>
#include "src/core/lib/iomgr/endpoint.h"
struct tsi_frame_protector;
@@ -44,6 +44,6 @@ extern int grpc_trace_secure_endpoint;
/* Takes ownership of protector and to_wrap, and refs leftover_slices. */
grpc_endpoint *grpc_secure_endpoint_create(
struct tsi_frame_protector *protector, grpc_endpoint *to_wrap,
- gpr_slice *leftover_slices, size_t leftover_nslices);
+ grpc_slice *leftover_slices, size_t leftover_nslices);
#endif /* GRPC_CORE_LIB_SECURITY_TRANSPORT_SECURE_ENDPOINT_H */
diff --git a/src/core/lib/security/transport/security_connector.c b/src/core/lib/security/transport/security_connector.c
index 0eca46eb52..5b088aa58d 100644
--- a/src/core/lib/security/transport/security_connector.c
+++ b/src/core/lib/security/transport/security_connector.c
@@ -36,18 +36,19 @@
#include <stdbool.h>
#include <string.h>
+#include <grpc/slice_buffer.h>
#include <grpc/support/alloc.h>
#include <grpc/support/host_port.h>
#include <grpc/support/log.h>
-#include <grpc/support/slice_buffer.h>
#include <grpc/support/string_util.h>
#include "src/core/ext/transport/chttp2/alpn/alpn.h"
+#include "src/core/lib/channel/handshaker.h"
#include "src/core/lib/iomgr/load_file.h"
#include "src/core/lib/security/context/security_context.h"
#include "src/core/lib/security/credentials/credentials.h"
-#include "src/core/lib/security/transport/handshake.h"
#include "src/core/lib/security/transport/secure_endpoint.h"
+#include "src/core/lib/security/transport/security_handshaker.h"
#include "src/core/lib/support/env.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/tsi/fake_transport_security.h"
@@ -111,58 +112,34 @@ const tsi_peer_property *tsi_peer_get_property_by_name(const tsi_peer *peer,
return NULL;
}
-void grpc_server_security_connector_shutdown(
- grpc_exec_ctx *exec_ctx, grpc_server_security_connector *connector) {
- grpc_security_connector_handshake_list *tmp;
- gpr_mu_lock(&connector->mu);
- while (connector->handshaking_handshakes) {
- tmp = connector->handshaking_handshakes;
- grpc_security_handshake_shutdown(
- exec_ctx, connector->handshaking_handshakes->handshake);
- connector->handshaking_handshakes = tmp->next;
- gpr_free(tmp);
+void grpc_channel_security_connector_add_handshakers(
+ grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *connector,
+ grpc_handshake_manager *handshake_mgr) {
+ if (connector != NULL) {
+ connector->add_handshakers(exec_ctx, connector, handshake_mgr);
}
- gpr_mu_unlock(&connector->mu);
}
-void grpc_channel_security_connector_do_handshake(
- grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc,
- grpc_endpoint *nonsecure_endpoint, gpr_slice_buffer *read_buffer,
- gpr_timespec deadline, grpc_security_handshake_done_cb cb,
- void *user_data) {
- if (sc == NULL || nonsecure_endpoint == NULL) {
- gpr_free(read_buffer);
- cb(exec_ctx, user_data, GRPC_SECURITY_ERROR, NULL, NULL);
- } else {
- sc->do_handshake(exec_ctx, sc, nonsecure_endpoint, read_buffer, deadline,
- cb, user_data);
- }
-}
-
-void grpc_server_security_connector_do_handshake(
- grpc_exec_ctx *exec_ctx, grpc_server_security_connector *sc,
- grpc_tcp_server_acceptor *acceptor, grpc_endpoint *nonsecure_endpoint,
- gpr_slice_buffer *read_buffer, gpr_timespec deadline,
- grpc_security_handshake_done_cb cb, void *user_data) {
- if (sc == NULL || nonsecure_endpoint == NULL) {
- gpr_free(read_buffer);
- cb(exec_ctx, user_data, GRPC_SECURITY_ERROR, NULL, NULL);
- } else {
- sc->do_handshake(exec_ctx, sc, acceptor, nonsecure_endpoint, read_buffer,
- deadline, cb, user_data);
+void grpc_server_security_connector_add_handshakers(
+ grpc_exec_ctx *exec_ctx, grpc_server_security_connector *connector,
+ grpc_handshake_manager *handshake_mgr) {
+ if (connector != NULL) {
+ connector->add_handshakers(exec_ctx, connector, handshake_mgr);
}
}
void grpc_security_connector_check_peer(grpc_exec_ctx *exec_ctx,
grpc_security_connector *sc,
tsi_peer peer,
- grpc_security_peer_check_cb cb,
- void *user_data) {
+ grpc_auth_context **auth_context,
+ grpc_closure *on_peer_checked) {
if (sc == NULL) {
- cb(exec_ctx, user_data, GRPC_SECURITY_ERROR, NULL);
+ grpc_exec_ctx_sched(
+ exec_ctx, on_peer_checked,
+ GRPC_ERROR_CREATE("cannot check peer -- no security connector"), NULL);
tsi_peer_destruct(&peer);
} else {
- sc->vtable->check_peer(exec_ctx, sc, peer, cb, user_data);
+ sc->vtable->check_peer(exec_ctx, sc, peer, auth_context, on_peer_checked);
}
}
@@ -210,11 +187,11 @@ void grpc_security_connector_unref(grpc_security_connector *sc) {
}
static void connector_pointer_arg_destroy(void *p) {
- GRPC_SECURITY_CONNECTOR_UNREF(p, "connector_pointer_arg");
+ GRPC_SECURITY_CONNECTOR_UNREF(p, "connector_pointer_arg_destroy");
}
static void *connector_pointer_arg_copy(void *p) {
- return GRPC_SECURITY_CONNECTOR_REF(p, "connector_pointer_arg");
+ return GRPC_SECURITY_CONNECTOR_REF(p, "connector_pointer_arg_copy");
}
static int connector_pointer_cmp(void *a, void *b) { return GPR_ICMP(a, b); }
@@ -262,45 +239,41 @@ static void fake_channel_destroy(grpc_security_connector *sc) {
gpr_free(sc);
}
-static void fake_server_destroy(grpc_security_connector *sc) {
- grpc_server_security_connector *c = (grpc_server_security_connector *)sc;
- gpr_mu_destroy(&c->mu);
- gpr_free(sc);
-}
+static void fake_server_destroy(grpc_security_connector *sc) { gpr_free(sc); }
static void fake_check_peer(grpc_exec_ctx *exec_ctx,
grpc_security_connector *sc, tsi_peer peer,
- grpc_security_peer_check_cb cb, void *user_data) {
+ grpc_auth_context **auth_context,
+ grpc_closure *on_peer_checked) {
const char *prop_name;
- grpc_security_status status = GRPC_SECURITY_OK;
- grpc_auth_context *auth_context = NULL;
+ grpc_error *error = GRPC_ERROR_NONE;
+ *auth_context = NULL;
if (peer.property_count != 1) {
- gpr_log(GPR_ERROR, "Fake peers should only have 1 property.");
- status = GRPC_SECURITY_ERROR;
+ error = GRPC_ERROR_CREATE("Fake peers should only have 1 property.");
goto end;
}
prop_name = peer.properties[0].name;
if (prop_name == NULL ||
strcmp(prop_name, TSI_CERTIFICATE_TYPE_PEER_PROPERTY)) {
- gpr_log(GPR_ERROR, "Unexpected property in fake peer: %s.",
- prop_name == NULL ? "<EMPTY>" : prop_name);
- status = GRPC_SECURITY_ERROR;
+ char *msg;
+ gpr_asprintf(&msg, "Unexpected property in fake peer: %s.",
+ prop_name == NULL ? "<EMPTY>" : prop_name);
+ error = GRPC_ERROR_CREATE(msg);
+ gpr_free(msg);
goto end;
}
if (strncmp(peer.properties[0].value.data, TSI_FAKE_CERTIFICATE_TYPE,
peer.properties[0].value.length)) {
- gpr_log(GPR_ERROR, "Invalid value for cert type property.");
- status = GRPC_SECURITY_ERROR;
+ error = GRPC_ERROR_CREATE("Invalid value for cert type property.");
goto end;
}
- auth_context = grpc_auth_context_create(NULL);
+ *auth_context = grpc_auth_context_create(NULL);
grpc_auth_context_add_cstring_property(
- auth_context, GRPC_TRANSPORT_SECURITY_TYPE_PROPERTY_NAME,
+ *auth_context, GRPC_TRANSPORT_SECURITY_TYPE_PROPERTY_NAME,
GRPC_FAKE_TRANSPORT_SECURITY_TYPE);
end:
- cb(exec_ctx, user_data, status, auth_context);
- grpc_auth_context_unref(auth_context);
+ grpc_exec_ctx_sched(exec_ctx, on_peer_checked, error, NULL);
tsi_peer_destruct(&peer);
}
@@ -313,26 +286,24 @@ static void fake_channel_check_call_host(grpc_exec_ctx *exec_ctx,
cb(exec_ctx, user_data, GRPC_SECURITY_OK);
}
-static void fake_channel_do_handshake(grpc_exec_ctx *exec_ctx,
- grpc_channel_security_connector *sc,
- grpc_endpoint *nonsecure_endpoint,
- gpr_slice_buffer *read_buffer,
- gpr_timespec deadline,
- grpc_security_handshake_done_cb cb,
- void *user_data) {
- grpc_do_security_handshake(exec_ctx, tsi_create_fake_handshaker(1), &sc->base,
- true, nonsecure_endpoint, read_buffer, deadline,
- cb, user_data);
+static void fake_channel_add_handshakers(
+ grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc,
+ grpc_handshake_manager *handshake_mgr) {
+ grpc_handshake_manager_add(
+ handshake_mgr,
+ grpc_security_handshaker_create(
+ exec_ctx, tsi_create_fake_handshaker(true /* is_client */),
+ &sc->base));
}
-static void fake_server_do_handshake(
- grpc_exec_ctx *exec_ctx, grpc_server_security_connector *sc,
- grpc_tcp_server_acceptor *acceptor, grpc_endpoint *nonsecure_endpoint,
- gpr_slice_buffer *read_buffer, gpr_timespec deadline,
- grpc_security_handshake_done_cb cb, void *user_data) {
- grpc_do_security_handshake(exec_ctx, tsi_create_fake_handshaker(0), &sc->base,
- false, nonsecure_endpoint, read_buffer, deadline,
- cb, user_data);
+static void fake_server_add_handshakers(grpc_exec_ctx *exec_ctx,
+ grpc_server_security_connector *sc,
+ grpc_handshake_manager *handshake_mgr) {
+ grpc_handshake_manager_add(
+ handshake_mgr,
+ grpc_security_handshaker_create(
+ exec_ctx, tsi_create_fake_handshaker(false /* is_client */),
+ &sc->base));
}
static grpc_security_connector_vtable fake_channel_vtable = {
@@ -350,7 +321,7 @@ grpc_channel_security_connector *grpc_fake_channel_security_connector_create(
c->base.vtable = &fake_channel_vtable;
c->request_metadata_creds = grpc_call_credentials_ref(request_metadata_creds);
c->check_call_host = fake_channel_check_call_host;
- c->do_handshake = fake_channel_do_handshake;
+ c->add_handshakers = fake_channel_add_handshakers;
return c;
}
@@ -362,8 +333,7 @@ grpc_server_security_connector *grpc_fake_server_security_connector_create(
gpr_ref_init(&c->base.refcount, 1);
c->base.vtable = &fake_server_vtable;
c->base.url_scheme = GRPC_FAKE_SECURITY_URL_SCHEME;
- c->do_handshake = fake_server_do_handshake;
- gpr_mu_init(&c->mu);
+ c->add_handshakers = fake_server_add_handshakers;
return c;
}
@@ -396,11 +366,9 @@ static void ssl_channel_destroy(grpc_security_connector *sc) {
static void ssl_server_destroy(grpc_security_connector *sc) {
grpc_ssl_server_security_connector *c =
(grpc_ssl_server_security_connector *)sc;
-
if (c->handshaker_factory != NULL) {
tsi_ssl_handshaker_factory_destroy(c->handshaker_factory);
}
- gpr_mu_destroy(&c->base.mu);
gpr_free(sc);
}
@@ -419,49 +387,35 @@ static grpc_security_status ssl_create_handshaker(
return GRPC_SECURITY_OK;
}
-static void ssl_channel_do_handshake(grpc_exec_ctx *exec_ctx,
- grpc_channel_security_connector *sc,
- grpc_endpoint *nonsecure_endpoint,
- gpr_slice_buffer *read_buffer,
- gpr_timespec deadline,
- grpc_security_handshake_done_cb cb,
- void *user_data) {
+static void ssl_channel_add_handshakers(grpc_exec_ctx *exec_ctx,
+ grpc_channel_security_connector *sc,
+ grpc_handshake_manager *handshake_mgr) {
grpc_ssl_channel_security_connector *c =
(grpc_ssl_channel_security_connector *)sc;
- tsi_handshaker *handshaker;
- grpc_security_status status = ssl_create_handshaker(
- c->handshaker_factory, true,
- c->overridden_target_name != NULL ? c->overridden_target_name
- : c->target_name,
- &handshaker);
- if (status != GRPC_SECURITY_OK) {
- gpr_free(read_buffer);
- cb(exec_ctx, user_data, status, NULL, NULL);
- } else {
- grpc_do_security_handshake(exec_ctx, handshaker, &sc->base, true,
- nonsecure_endpoint, read_buffer, deadline, cb,
- user_data);
- }
-}
-
-static void ssl_server_do_handshake(
- grpc_exec_ctx *exec_ctx, grpc_server_security_connector *sc,
- grpc_tcp_server_acceptor *acceptor, grpc_endpoint *nonsecure_endpoint,
- gpr_slice_buffer *read_buffer, gpr_timespec deadline,
- grpc_security_handshake_done_cb cb, void *user_data) {
+ // Instantiate TSI handshaker.
+ tsi_handshaker *tsi_hs = NULL;
+ ssl_create_handshaker(c->handshaker_factory, true /* is_client */,
+ c->overridden_target_name != NULL
+ ? c->overridden_target_name
+ : c->target_name,
+ &tsi_hs);
+ // Create handshakers.
+ grpc_handshake_manager_add(handshake_mgr, grpc_security_handshaker_create(
+ exec_ctx, tsi_hs, &sc->base));
+}
+
+static void ssl_server_add_handshakers(grpc_exec_ctx *exec_ctx,
+ grpc_server_security_connector *sc,
+ grpc_handshake_manager *handshake_mgr) {
grpc_ssl_server_security_connector *c =
(grpc_ssl_server_security_connector *)sc;
- tsi_handshaker *handshaker;
- grpc_security_status status =
- ssl_create_handshaker(c->handshaker_factory, false, NULL, &handshaker);
- if (status != GRPC_SECURITY_OK) {
- gpr_free(read_buffer);
- cb(exec_ctx, user_data, status, NULL, NULL);
- } else {
- grpc_do_security_handshake(exec_ctx, handshaker, &sc->base, false,
- nonsecure_endpoint, read_buffer, deadline, cb,
- user_data);
- }
+ // Instantiate TSI handshaker.
+ tsi_handshaker *tsi_hs = NULL;
+ ssl_create_handshaker(c->handshaker_factory, false /* is_client */,
+ NULL /* peer_name */, &tsi_hs);
+ // Create handshakers.
+ grpc_handshake_manager_add(handshake_mgr, grpc_security_handshaker_create(
+ exec_ctx, tsi_hs, &sc->base));
}
static int ssl_host_matches_name(const tsi_peer *peer, const char *peer_name) {
@@ -518,57 +472,53 @@ grpc_auth_context *tsi_ssl_peer_to_auth_context(const tsi_peer *peer) {
return ctx;
}
-static grpc_security_status ssl_check_peer(grpc_security_connector *sc,
- const char *peer_name,
- const tsi_peer *peer,
- grpc_auth_context **auth_context) {
+static grpc_error *ssl_check_peer(grpc_security_connector *sc,
+ const char *peer_name, const tsi_peer *peer,
+ grpc_auth_context **auth_context) {
/* Check the ALPN. */
const tsi_peer_property *p =
tsi_peer_get_property_by_name(peer, TSI_SSL_ALPN_SELECTED_PROTOCOL);
if (p == NULL) {
- gpr_log(GPR_ERROR, "Missing selected ALPN property.");
- return GRPC_SECURITY_ERROR;
+ return GRPC_ERROR_CREATE(
+ "Cannot check peer: missing selected ALPN property.");
}
if (!grpc_chttp2_is_alpn_version_supported(p->value.data, p->value.length)) {
- gpr_log(GPR_ERROR, "Invalid ALPN value.");
- return GRPC_SECURITY_ERROR;
+ return GRPC_ERROR_CREATE("Cannot check peer: invalid ALPN value.");
}
/* Check the peer name if specified. */
if (peer_name != NULL && !ssl_host_matches_name(peer, peer_name)) {
- gpr_log(GPR_ERROR, "Peer name %s is not in peer certificate", peer_name);
- return GRPC_SECURITY_ERROR;
+ char *msg;
+ gpr_asprintf(&msg, "Peer name %s is not in peer certificate", peer_name);
+ grpc_error *error = GRPC_ERROR_CREATE(msg);
+ gpr_free(msg);
+ return error;
}
*auth_context = tsi_ssl_peer_to_auth_context(peer);
- return GRPC_SECURITY_OK;
+ return GRPC_ERROR_NONE;
}
static void ssl_channel_check_peer(grpc_exec_ctx *exec_ctx,
grpc_security_connector *sc, tsi_peer peer,
- grpc_security_peer_check_cb cb,
- void *user_data) {
+ grpc_auth_context **auth_context,
+ grpc_closure *on_peer_checked) {
grpc_ssl_channel_security_connector *c =
(grpc_ssl_channel_security_connector *)sc;
- grpc_security_status status;
- grpc_auth_context *auth_context = NULL;
- status = ssl_check_peer(sc, c->overridden_target_name != NULL
- ? c->overridden_target_name
- : c->target_name,
- &peer, &auth_context);
- cb(exec_ctx, user_data, status, auth_context);
- grpc_auth_context_unref(auth_context);
+ grpc_error *error = ssl_check_peer(sc, c->overridden_target_name != NULL
+ ? c->overridden_target_name
+ : c->target_name,
+ &peer, auth_context);
+ grpc_exec_ctx_sched(exec_ctx, on_peer_checked, error, NULL);
tsi_peer_destruct(&peer);
}
static void ssl_server_check_peer(grpc_exec_ctx *exec_ctx,
grpc_security_connector *sc, tsi_peer peer,
- grpc_security_peer_check_cb cb,
- void *user_data) {
- grpc_auth_context *auth_context = NULL;
- grpc_security_status status = ssl_check_peer(sc, NULL, &peer, &auth_context);
+ grpc_auth_context **auth_context,
+ grpc_closure *on_peer_checked) {
+ grpc_error *error = ssl_check_peer(sc, NULL, &peer, auth_context);
tsi_peer_destruct(&peer);
- cb(exec_ctx, user_data, status, auth_context);
- grpc_auth_context_unref(auth_context);
+ grpc_exec_ctx_sched(exec_ctx, on_peer_checked, error, NULL);
}
static void add_shallow_auth_property_to_peer(tsi_peer *peer,
@@ -642,8 +592,8 @@ static grpc_security_connector_vtable ssl_channel_vtable = {
static grpc_security_connector_vtable ssl_server_vtable = {
ssl_server_destroy, ssl_server_check_peer};
-static gpr_slice compute_default_pem_root_certs_once(void) {
- gpr_slice result = gpr_empty_slice();
+static grpc_slice compute_default_pem_root_certs_once(void) {
+ grpc_slice result = gpr_empty_slice();
/* First try to load the roots from the environment. */
char *default_root_certs_path =
@@ -656,17 +606,17 @@ static gpr_slice compute_default_pem_root_certs_once(void) {
/* Try overridden roots if needed. */
grpc_ssl_roots_override_result ovrd_res = GRPC_SSL_ROOTS_OVERRIDE_FAIL;
- if (GPR_SLICE_IS_EMPTY(result) && ssl_roots_override_cb != NULL) {
+ if (GRPC_SLICE_IS_EMPTY(result) && ssl_roots_override_cb != NULL) {
char *pem_root_certs = NULL;
ovrd_res = ssl_roots_override_cb(&pem_root_certs);
if (ovrd_res == GRPC_SSL_ROOTS_OVERRIDE_OK) {
GPR_ASSERT(pem_root_certs != NULL);
- result = gpr_slice_new(pem_root_certs, strlen(pem_root_certs), gpr_free);
+ result = grpc_slice_new(pem_root_certs, strlen(pem_root_certs), gpr_free);
}
}
/* Fall back to installed certs if needed. */
- if (GPR_SLICE_IS_EMPTY(result) &&
+ if (GRPC_SLICE_IS_EMPTY(result) &&
ovrd_res != GRPC_SSL_ROOTS_OVERRIDE_FAIL_PERMANENTLY) {
GRPC_LOG_IF_ERROR("load_file",
grpc_load_file(installed_roots_path, 0, &result));
@@ -674,13 +624,13 @@ static gpr_slice compute_default_pem_root_certs_once(void) {
return result;
}
-static gpr_slice default_pem_root_certs;
+static grpc_slice default_pem_root_certs;
static void init_default_pem_root_certs(void) {
default_pem_root_certs = compute_default_pem_root_certs_once();
}
-gpr_slice grpc_get_default_ssl_roots_for_testing(void) {
+grpc_slice grpc_get_default_ssl_roots_for_testing(void) {
return compute_default_pem_root_certs_once();
}
@@ -714,8 +664,8 @@ size_t grpc_get_default_ssl_roots(const unsigned char **pem_root_certs) {
loading all the roots once for the lifetime of the process. */
static gpr_once once = GPR_ONCE_INIT;
gpr_once_init(&once, init_default_pem_root_certs);
- *pem_root_certs = GPR_SLICE_START_PTR(default_pem_root_certs);
- return GPR_SLICE_LENGTH(default_pem_root_certs);
+ *pem_root_certs = GRPC_SLICE_START_PTR(default_pem_root_certs);
+ return GRPC_SLICE_LENGTH(default_pem_root_certs);
}
grpc_security_status grpc_ssl_channel_security_connector_create(
@@ -765,7 +715,7 @@ grpc_security_status grpc_ssl_channel_security_connector_create(
c->base.request_metadata_creds =
grpc_call_credentials_ref(request_metadata_creds);
c->base.check_call_host = ssl_channel_check_call_host;
- c->base.do_handshake = ssl_channel_do_handshake;
+ c->base.add_handshakers = ssl_channel_add_handshakers;
gpr_split_host_port(target_name, &c->target_name, &port);
gpr_free(port);
if (overridden_target_name != NULL) {
@@ -840,8 +790,7 @@ grpc_security_status grpc_ssl_server_security_connector_create(
*sc = NULL;
goto error;
}
- gpr_mu_init(&c->base.mu);
- c->base.do_handshake = ssl_server_do_handshake;
+ c->base.add_handshakers = ssl_server_add_handshakers;
*sc = &c->base;
gpr_free((void *)alpn_protocol_strings);
gpr_free(alpn_protocol_string_lengths);
diff --git a/src/core/lib/security/transport/security_connector.h b/src/core/lib/security/transport/security_connector.h
index 0b5b44bf1a..a84b359051 100644
--- a/src/core/lib/security/transport/security_connector.h
+++ b/src/core/lib/security/transport/security_connector.h
@@ -35,6 +35,8 @@
#define GRPC_CORE_LIB_SECURITY_TRANSPORT_SECURITY_CONNECTOR_H
#include <grpc/grpc_security.h>
+
+#include "src/core/lib/channel/handshaker.h"
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/tcp_server.h"
#include "src/core/lib/tsi/transport_security_interface.h"
@@ -57,21 +59,11 @@ typedef struct grpc_security_connector grpc_security_connector;
#define GRPC_SECURITY_CONNECTOR_ARG "grpc.security_connector"
-typedef void (*grpc_security_peer_check_cb)(grpc_exec_ctx *exec_ctx,
- void *user_data,
- grpc_security_status status,
- grpc_auth_context *auth_context);
-
-/* Ownership of the secure_endpoint is transfered. */
-typedef void (*grpc_security_handshake_done_cb)(
- grpc_exec_ctx *exec_ctx, void *user_data, grpc_security_status status,
- grpc_endpoint *secure_endpoint, grpc_auth_context *auth_context);
-
typedef struct {
void (*destroy)(grpc_security_connector *sc);
void (*check_peer)(grpc_exec_ctx *exec_ctx, grpc_security_connector *sc,
- tsi_peer peer, grpc_security_peer_check_cb cb,
- void *user_data);
+ tsi_peer peer, grpc_auth_context **auth_context,
+ grpc_closure *on_peer_checked);
} grpc_security_connector_vtable;
typedef struct grpc_security_connector_handshake_list {
@@ -106,12 +98,12 @@ void grpc_security_connector_unref(grpc_security_connector *policy);
#endif
/* Check the peer. Callee takes ownership of the peer object.
- The callback will include the resulting auth_context. */
+ When done, sets *auth_context and invokes on_peer_checked. */
void grpc_security_connector_check_peer(grpc_exec_ctx *exec_ctx,
grpc_security_connector *sc,
tsi_peer peer,
- grpc_security_peer_check_cb cb,
- void *user_data);
+ grpc_auth_context **auth_context,
+ grpc_closure *on_peer_checked);
/* Util to encapsulate the connector in a channel arg. */
grpc_arg grpc_security_connector_to_arg(grpc_security_connector *sc);
@@ -141,11 +133,9 @@ struct grpc_channel_security_connector {
grpc_channel_security_connector *sc, const char *host,
grpc_auth_context *auth_context,
grpc_security_call_host_check_cb cb, void *user_data);
- void (*do_handshake)(grpc_exec_ctx *exec_ctx,
- grpc_channel_security_connector *sc,
- grpc_endpoint *nonsecure_endpoint,
- gpr_slice_buffer *read_buffer, gpr_timespec deadline,
- grpc_security_handshake_done_cb cb, void *user_data);
+ void (*add_handshakers)(grpc_exec_ctx *exec_ctx,
+ grpc_channel_security_connector *sc,
+ grpc_handshake_manager *handshake_mgr);
};
/* Checks that the host that will be set for a call is acceptable. */
@@ -154,11 +144,10 @@ void grpc_channel_security_connector_check_call_host(
const char *host, grpc_auth_context *auth_context,
grpc_security_call_host_check_cb cb, void *user_data);
-/* Handshake. */
-void grpc_channel_security_connector_do_handshake(
+/* Registers handshakers with \a handshake_mgr. */
+void grpc_channel_security_connector_add_handshakers(
grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *connector,
- grpc_endpoint *nonsecure_endpoint, gpr_slice_buffer *read_buffer,
- gpr_timespec deadline, grpc_security_handshake_done_cb cb, void *user_data);
+ grpc_handshake_manager *handshake_mgr);
/* --- server_security_connector object. ---
@@ -169,25 +158,14 @@ typedef struct grpc_server_security_connector grpc_server_security_connector;
struct grpc_server_security_connector {
grpc_security_connector base;
- gpr_mu mu;
- grpc_security_connector_handshake_list *handshaking_handshakes;
- const grpc_channel_args *channel_args;
- void (*do_handshake)(grpc_exec_ctx *exec_ctx,
- grpc_server_security_connector *sc,
- grpc_tcp_server_acceptor *acceptor,
- grpc_endpoint *nonsecure_endpoint,
- gpr_slice_buffer *read_buffer, gpr_timespec deadline,
- grpc_security_handshake_done_cb cb, void *user_data);
+ void (*add_handshakers)(grpc_exec_ctx *exec_ctx,
+ grpc_server_security_connector *sc,
+ grpc_handshake_manager *handshake_mgr);
};
-void grpc_server_security_connector_do_handshake(
+void grpc_server_security_connector_add_handshakers(
grpc_exec_ctx *exec_ctx, grpc_server_security_connector *sc,
- grpc_tcp_server_acceptor *acceptor, grpc_endpoint *nonsecure_endpoint,
- gpr_slice_buffer *read_buffer, gpr_timespec deadline,
- grpc_security_handshake_done_cb cb, void *user_data);
-
-void grpc_server_security_connector_shutdown(
- grpc_exec_ctx *exec_ctx, grpc_server_security_connector *connector);
+ grpc_handshake_manager *handshake_mgr);
/* --- Creation security connectors. --- */
@@ -233,7 +211,7 @@ grpc_security_status grpc_ssl_channel_security_connector_create(
size_t grpc_get_default_ssl_roots(const unsigned char **pem_root_certs);
/* Exposed for TESTING ONLY!. */
-gpr_slice grpc_get_default_ssl_roots_for_testing(void);
+grpc_slice grpc_get_default_ssl_roots_for_testing(void);
/* Config for ssl servers. */
typedef struct {
diff --git a/src/core/lib/security/transport/security_handshaker.c b/src/core/lib/security/transport/security_handshaker.c
new file mode 100644
index 0000000000..41a775db85
--- /dev/null
+++ b/src/core/lib/security/transport/security_handshaker.c
@@ -0,0 +1,450 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/security/transport/security_handshaker.h"
+
+#include <stdbool.h>
+#include <string.h>
+
+#include <grpc/slice_buffer.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+
+#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/channel/handshaker.h"
+#include "src/core/lib/security/context/security_context.h"
+#include "src/core/lib/security/transport/secure_endpoint.h"
+#include "src/core/lib/security/transport/tsi_error.h"
+
+#define GRPC_INITIAL_HANDSHAKE_BUFFER_SIZE 256
+
+typedef struct {
+ grpc_handshaker base;
+
+ // State set at creation time.
+ tsi_handshaker *handshaker;
+ grpc_security_connector *connector;
+
+ gpr_mu mu;
+ gpr_refcount refs;
+
+ bool shutdown;
+ // Endpoint and read buffer to destroy after a shutdown.
+ grpc_endpoint *endpoint_to_destroy;
+ grpc_slice_buffer *read_buffer_to_destroy;
+
+ // State saved while performing the handshake.
+ grpc_handshaker_args *args;
+ grpc_closure *on_handshake_done;
+
+ unsigned char *handshake_buffer;
+ size_t handshake_buffer_size;
+ grpc_slice_buffer left_overs;
+ grpc_slice_buffer outgoing;
+ grpc_closure on_handshake_data_sent_to_peer;
+ grpc_closure on_handshake_data_received_from_peer;
+ grpc_closure on_peer_checked;
+ grpc_auth_context *auth_context;
+} security_handshaker;
+
+static void security_handshaker_unref(grpc_exec_ctx *exec_ctx,
+ security_handshaker *h) {
+ if (gpr_unref(&h->refs)) {
+ gpr_mu_destroy(&h->mu);
+ tsi_handshaker_destroy(h->handshaker);
+ if (h->endpoint_to_destroy != NULL) {
+ grpc_endpoint_destroy(exec_ctx, h->endpoint_to_destroy);
+ }
+ if (h->read_buffer_to_destroy != NULL) {
+ grpc_slice_buffer_destroy(h->read_buffer_to_destroy);
+ gpr_free(h->read_buffer_to_destroy);
+ }
+ gpr_free(h->handshake_buffer);
+ grpc_slice_buffer_destroy(&h->left_overs);
+ grpc_slice_buffer_destroy(&h->outgoing);
+ GRPC_AUTH_CONTEXT_UNREF(h->auth_context, "handshake");
+ GRPC_SECURITY_CONNECTOR_UNREF(h->connector, "handshake");
+ gpr_free(h);
+ }
+}
+
+// Set args fields to NULL, saving the endpoint and read buffer for
+// later destruction.
+static void cleanup_args_for_failure_locked(security_handshaker *h) {
+ h->endpoint_to_destroy = h->args->endpoint;
+ h->args->endpoint = NULL;
+ h->read_buffer_to_destroy = h->args->read_buffer;
+ h->args->read_buffer = NULL;
+ grpc_channel_args_destroy(h->args->args);
+ h->args->args = NULL;
+}
+
+// If the handshake failed or we're shutting down, clean up and invoke the
+// callback with the error.
+static void security_handshake_failed_locked(grpc_exec_ctx *exec_ctx,
+ security_handshaker *h,
+ grpc_error *error) {
+ if (error == GRPC_ERROR_NONE) {
+ // If we were shut down after the handshake succeeded but before an
+ // endpoint callback was invoked, we need to generate our own error.
+ error = GRPC_ERROR_CREATE("Handshaker shutdown");
+ }
+ const char *msg = grpc_error_string(error);
+ gpr_log(GPR_DEBUG, "Security handshake failed: %s", msg);
+ grpc_error_free_string(msg);
+ if (!h->shutdown) {
+ // TODO(ctiller): It is currently necessary to shutdown endpoints
+ // before destroying them, even if we know that there are no
+ // pending read/write callbacks. This should be fixed, at which
+ // point this can be removed.
+ grpc_endpoint_shutdown(exec_ctx, h->args->endpoint);
+ // Not shutting down, so the write failed. Clean up before
+ // invoking the callback.
+ cleanup_args_for_failure_locked(h);
+ // Set shutdown to true so that subsequent calls to
+ // security_handshaker_shutdown() do nothing.
+ h->shutdown = true;
+ }
+ // Invoke callback.
+ grpc_exec_ctx_sched(exec_ctx, h->on_handshake_done, error, NULL);
+}
+
+static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ security_handshaker *h = arg;
+ gpr_mu_lock(&h->mu);
+ if (error != GRPC_ERROR_NONE || h->shutdown) {
+ security_handshake_failed_locked(exec_ctx, h, GRPC_ERROR_REF(error));
+ goto done;
+ }
+ // Get frame protector.
+ tsi_frame_protector *protector;
+ tsi_result result =
+ tsi_handshaker_create_frame_protector(h->handshaker, NULL, &protector);
+ if (result != TSI_OK) {
+ error = grpc_set_tsi_error_result(
+ GRPC_ERROR_CREATE("Frame protector creation failed"), result);
+ security_handshake_failed_locked(exec_ctx, h, error);
+ goto done;
+ }
+ // Success.
+ // Create secure endpoint.
+ h->args->endpoint = grpc_secure_endpoint_create(
+ protector, h->args->endpoint, h->left_overs.slices, h->left_overs.count);
+ h->left_overs.count = 0;
+ h->left_overs.length = 0;
+ // Clear out the read buffer before it gets passed to the transport,
+ // since any excess bytes were already copied to h->left_overs.
+ grpc_slice_buffer_reset_and_unref(h->args->read_buffer);
+ // Add auth context to channel args.
+ grpc_arg auth_context_arg = grpc_auth_context_to_arg(h->auth_context);
+ grpc_channel_args *tmp_args = h->args->args;
+ h->args->args =
+ grpc_channel_args_copy_and_add(tmp_args, &auth_context_arg, 1);
+ grpc_channel_args_destroy(tmp_args);
+ // Invoke callback.
+ grpc_exec_ctx_sched(exec_ctx, h->on_handshake_done, GRPC_ERROR_NONE, NULL);
+ // Set shutdown to true so that subsequent calls to
+ // security_handshaker_shutdown() do nothing.
+ h->shutdown = true;
+done:
+ gpr_mu_unlock(&h->mu);
+ security_handshaker_unref(exec_ctx, h);
+}
+
+static grpc_error *check_peer_locked(grpc_exec_ctx *exec_ctx,
+ security_handshaker *h) {
+ tsi_peer peer;
+ tsi_result result = tsi_handshaker_extract_peer(h->handshaker, &peer);
+ if (result != TSI_OK) {
+ return grpc_set_tsi_error_result(
+ GRPC_ERROR_CREATE("Peer extraction failed"), result);
+ }
+ grpc_security_connector_check_peer(exec_ctx, h->connector, peer,
+ &h->auth_context, &h->on_peer_checked);
+ return GRPC_ERROR_NONE;
+}
+
+static grpc_error *send_handshake_bytes_to_peer_locked(grpc_exec_ctx *exec_ctx,
+ security_handshaker *h) {
+ // Get data to send.
+ tsi_result result = TSI_OK;
+ size_t offset = 0;
+ do {
+ size_t to_send_size = h->handshake_buffer_size - offset;
+ result = tsi_handshaker_get_bytes_to_send_to_peer(
+ h->handshaker, h->handshake_buffer + offset, &to_send_size);
+ offset += to_send_size;
+ if (result == TSI_INCOMPLETE_DATA) {
+ h->handshake_buffer_size *= 2;
+ h->handshake_buffer =
+ gpr_realloc(h->handshake_buffer, h->handshake_buffer_size);
+ }
+ } while (result == TSI_INCOMPLETE_DATA);
+ if (result != TSI_OK) {
+ return grpc_set_tsi_error_result(GRPC_ERROR_CREATE("Handshake failed"),
+ result);
+ }
+ // Send data.
+ grpc_slice to_send =
+ grpc_slice_from_copied_buffer((const char *)h->handshake_buffer, offset);
+ grpc_slice_buffer_reset_and_unref(&h->outgoing);
+ grpc_slice_buffer_add(&h->outgoing, to_send);
+ grpc_endpoint_write(exec_ctx, h->args->endpoint, &h->outgoing,
+ &h->on_handshake_data_sent_to_peer);
+ return GRPC_ERROR_NONE;
+}
+
+static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
+ void *arg, grpc_error *error) {
+ security_handshaker *h = arg;
+ gpr_mu_lock(&h->mu);
+ if (error != GRPC_ERROR_NONE || h->shutdown) {
+ security_handshake_failed_locked(
+ exec_ctx, h,
+ GRPC_ERROR_CREATE_REFERENCING("Handshake read failed", &error, 1));
+ gpr_mu_unlock(&h->mu);
+ security_handshaker_unref(exec_ctx, h);
+ return;
+ }
+ // Process received data.
+ tsi_result result = TSI_OK;
+ size_t consumed_slice_size = 0;
+ size_t i;
+ for (i = 0; i < h->args->read_buffer->count; i++) {
+ consumed_slice_size = GRPC_SLICE_LENGTH(h->args->read_buffer->slices[i]);
+ result = tsi_handshaker_process_bytes_from_peer(
+ h->handshaker, GRPC_SLICE_START_PTR(h->args->read_buffer->slices[i]),
+ &consumed_slice_size);
+ if (!tsi_handshaker_is_in_progress(h->handshaker)) break;
+ }
+ if (tsi_handshaker_is_in_progress(h->handshaker)) {
+ /* We may need more data. */
+ if (result == TSI_INCOMPLETE_DATA) {
+ grpc_endpoint_read(exec_ctx, h->args->endpoint, h->args->read_buffer,
+ &h->on_handshake_data_received_from_peer);
+ goto done;
+ } else {
+ error = send_handshake_bytes_to_peer_locked(exec_ctx, h);
+ if (error != GRPC_ERROR_NONE) {
+ security_handshake_failed_locked(exec_ctx, h, error);
+ gpr_mu_unlock(&h->mu);
+ security_handshaker_unref(exec_ctx, h);
+ return;
+ }
+ goto done;
+ }
+ }
+ if (result != TSI_OK) {
+ security_handshake_failed_locked(
+ exec_ctx, h, grpc_set_tsi_error_result(
+ GRPC_ERROR_CREATE("Handshake failed"), result));
+ gpr_mu_unlock(&h->mu);
+ security_handshaker_unref(exec_ctx, h);
+ return;
+ }
+ /* Handshake is done and successful this point. */
+ bool has_left_overs_in_current_slice =
+ (consumed_slice_size <
+ GRPC_SLICE_LENGTH(h->args->read_buffer->slices[i]));
+ size_t num_left_overs = (has_left_overs_in_current_slice ? 1 : 0) +
+ h->args->read_buffer->count - i - 1;
+ if (num_left_overs > 0) {
+ /* Put the leftovers in our buffer (ownership transfered). */
+ if (has_left_overs_in_current_slice) {
+ grpc_slice_buffer_add(
+ &h->left_overs,
+ grpc_slice_split_tail(&h->args->read_buffer->slices[i],
+ consumed_slice_size));
+ /* split_tail above increments refcount. */
+ grpc_slice_unref(h->args->read_buffer->slices[i]);
+ }
+ grpc_slice_buffer_addn(
+ &h->left_overs, &h->args->read_buffer->slices[i + 1],
+ num_left_overs - (size_t)has_left_overs_in_current_slice);
+ }
+ // Check peer.
+ error = check_peer_locked(exec_ctx, h);
+ if (error != GRPC_ERROR_NONE) {
+ security_handshake_failed_locked(exec_ctx, h, error);
+ gpr_mu_unlock(&h->mu);
+ security_handshaker_unref(exec_ctx, h);
+ return;
+ }
+done:
+ gpr_mu_unlock(&h->mu);
+}
+
+static void on_handshake_data_sent_to_peer(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ security_handshaker *h = arg;
+ gpr_mu_lock(&h->mu);
+ if (error != GRPC_ERROR_NONE || h->shutdown) {
+ security_handshake_failed_locked(
+ exec_ctx, h,
+ GRPC_ERROR_CREATE_REFERENCING("Handshake write failed", &error, 1));
+ gpr_mu_unlock(&h->mu);
+ security_handshaker_unref(exec_ctx, h);
+ return;
+ }
+ /* We may be done. */
+ if (tsi_handshaker_is_in_progress(h->handshaker)) {
+ grpc_endpoint_read(exec_ctx, h->args->endpoint, h->args->read_buffer,
+ &h->on_handshake_data_received_from_peer);
+ } else {
+ error = check_peer_locked(exec_ctx, h);
+ if (error != GRPC_ERROR_NONE) {
+ security_handshake_failed_locked(exec_ctx, h, error);
+ gpr_mu_unlock(&h->mu);
+ security_handshaker_unref(exec_ctx, h);
+ return;
+ }
+ }
+ gpr_mu_unlock(&h->mu);
+}
+
+//
+// public handshaker API
+//
+
+static void security_handshaker_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_handshaker *handshaker) {
+ security_handshaker *h = (security_handshaker *)handshaker;
+ security_handshaker_unref(exec_ctx, h);
+}
+
+static void security_handshaker_shutdown(grpc_exec_ctx *exec_ctx,
+ grpc_handshaker *handshaker) {
+ security_handshaker *h = (security_handshaker *)handshaker;
+ gpr_mu_lock(&h->mu);
+ if (!h->shutdown) {
+ h->shutdown = true;
+ grpc_endpoint_shutdown(exec_ctx, h->args->endpoint);
+ cleanup_args_for_failure_locked(h);
+ }
+ gpr_mu_unlock(&h->mu);
+}
+
+static void security_handshaker_do_handshake(grpc_exec_ctx *exec_ctx,
+ grpc_handshaker *handshaker,
+ grpc_tcp_server_acceptor *acceptor,
+ grpc_closure *on_handshake_done,
+ grpc_handshaker_args *args) {
+ security_handshaker *h = (security_handshaker *)handshaker;
+ gpr_mu_lock(&h->mu);
+ h->args = args;
+ h->on_handshake_done = on_handshake_done;
+ gpr_ref(&h->refs);
+ grpc_error *error = send_handshake_bytes_to_peer_locked(exec_ctx, h);
+ if (error != GRPC_ERROR_NONE) {
+ security_handshake_failed_locked(exec_ctx, h, error);
+ gpr_mu_unlock(&h->mu);
+ security_handshaker_unref(exec_ctx, h);
+ return;
+ }
+ gpr_mu_unlock(&h->mu);
+}
+
+static const grpc_handshaker_vtable security_handshaker_vtable = {
+ security_handshaker_destroy, security_handshaker_shutdown,
+ security_handshaker_do_handshake};
+
+static grpc_handshaker *security_handshaker_create(
+ grpc_exec_ctx *exec_ctx, tsi_handshaker *handshaker,
+ grpc_security_connector *connector) {
+ security_handshaker *h = gpr_malloc(sizeof(security_handshaker));
+ memset(h, 0, sizeof(security_handshaker));
+ grpc_handshaker_init(&security_handshaker_vtable, &h->base);
+ h->handshaker = handshaker;
+ h->connector = GRPC_SECURITY_CONNECTOR_REF(connector, "handshake");
+ gpr_mu_init(&h->mu);
+ gpr_ref_init(&h->refs, 1);
+ h->handshake_buffer_size = GRPC_INITIAL_HANDSHAKE_BUFFER_SIZE;
+ h->handshake_buffer = gpr_malloc(h->handshake_buffer_size);
+ grpc_closure_init(&h->on_handshake_data_sent_to_peer,
+ on_handshake_data_sent_to_peer, h);
+ grpc_closure_init(&h->on_handshake_data_received_from_peer,
+ on_handshake_data_received_from_peer, h);
+ grpc_closure_init(&h->on_peer_checked, on_peer_checked, h);
+ grpc_slice_buffer_init(&h->left_overs);
+ grpc_slice_buffer_init(&h->outgoing);
+ return &h->base;
+}
+
+//
+// fail_handshaker
+//
+
+static void fail_handshaker_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_handshaker *handshaker) {
+ gpr_free(handshaker);
+}
+
+static void fail_handshaker_shutdown(grpc_exec_ctx *exec_ctx,
+ grpc_handshaker *handshaker) {}
+
+static void fail_handshaker_do_handshake(grpc_exec_ctx *exec_ctx,
+ grpc_handshaker *handshaker,
+ grpc_tcp_server_acceptor *acceptor,
+ grpc_closure *on_handshake_done,
+ grpc_handshaker_args *args) {
+ grpc_exec_ctx_sched(exec_ctx, on_handshake_done,
+ GRPC_ERROR_CREATE("Failed to create security handshaker"),
+ NULL);
+}
+
+static const grpc_handshaker_vtable fail_handshaker_vtable = {
+ fail_handshaker_destroy, fail_handshaker_shutdown,
+ fail_handshaker_do_handshake};
+
+static grpc_handshaker *fail_handshaker_create() {
+ grpc_handshaker *h = gpr_malloc(sizeof(*h));
+ grpc_handshaker_init(&fail_handshaker_vtable, h);
+ return h;
+}
+
+//
+// exported functions
+//
+
+grpc_handshaker *grpc_security_handshaker_create(
+ grpc_exec_ctx *exec_ctx, tsi_handshaker *handshaker,
+ grpc_security_connector *connector) {
+ // If no TSI handshaker was created, return a handshaker that always fails.
+ // Otherwise, return a real security handshaker.
+ if (handshaker == NULL) {
+ return fail_handshaker_create();
+ } else {
+ return security_handshaker_create(exec_ctx, handshaker, connector);
+ }
+}
diff --git a/src/core/lib/security/transport/handshake.h b/src/core/lib/security/transport/security_handshaker.h
index 53092f5421..5ddbf4b451 100644
--- a/src/core/lib/security/transport/handshake.h
+++ b/src/core/lib/security/transport/security_handshaker.h
@@ -31,20 +31,16 @@
*
*/
-#ifndef GRPC_CORE_LIB_SECURITY_TRANSPORT_HANDSHAKE_H
-#define GRPC_CORE_LIB_SECURITY_TRANSPORT_HANDSHAKE_H
+#ifndef GRPC_CORE_LIB_SECURITY_TRANSPORT_SECURITY_HANDSHAKER_H
+#define GRPC_CORE_LIB_SECURITY_TRANSPORT_SECURITY_HANDSHAKER_H
-#include "src/core/lib/iomgr/endpoint.h"
+#include "src/core/lib/channel/handshaker.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/security/transport/security_connector.h"
-/* Calls the callback upon completion. Takes owership of handshaker and
- * read_buffer. */
-void grpc_do_security_handshake(
+/// Creates a security handshaker using \a handshaker.
+grpc_handshaker *grpc_security_handshaker_create(
grpc_exec_ctx *exec_ctx, tsi_handshaker *handshaker,
- grpc_security_connector *connector, bool is_client_side,
- grpc_endpoint *nonsecure_endpoint, gpr_slice_buffer *read_buffer,
- gpr_timespec deadline, grpc_security_handshake_done_cb cb, void *user_data);
+ grpc_security_connector *connector);
-void grpc_security_handshake_shutdown(grpc_exec_ctx *exec_ctx, void *handshake);
-
-#endif /* GRPC_CORE_LIB_SECURITY_TRANSPORT_HANDSHAKE_H */
+#endif /* GRPC_CORE_LIB_SECURITY_TRANSPORT_SECURITY_HANDSHAKER_H */
diff --git a/src/core/lib/security/transport/server_auth_filter.c b/src/core/lib/security/transport/server_auth_filter.c
index b2c6815af8..e6a242e68f 100644
--- a/src/core/lib/security/transport/server_auth_filter.c
+++ b/src/core/lib/security/transport/server_auth_filter.c
@@ -78,7 +78,7 @@ static grpc_metadata_array metadata_batch_to_md_array(
usr_md = &result.metadata[result.count++];
usr_md->key = grpc_mdstr_as_c_string(key);
usr_md->value = grpc_mdstr_as_c_string(value);
- usr_md->value_length = GPR_SLICE_LENGTH(value->slice);
+ usr_md->value_length = GRPC_SLICE_LENGTH(value->slice);
}
return result;
}
@@ -92,14 +92,14 @@ static grpc_mdelem *remove_consumed_md(void *user_data, grpc_mdelem *md) {
/* Maybe we could do a pointer comparison but we do not have any guarantee
that the metadata processor used the same pointers for consumed_md in the
callback. */
- if (GPR_SLICE_LENGTH(md->key->slice) != strlen(consumed_md->key) ||
- GPR_SLICE_LENGTH(md->value->slice) != consumed_md->value_length) {
+ if (GRPC_SLICE_LENGTH(md->key->slice) != strlen(consumed_md->key) ||
+ GRPC_SLICE_LENGTH(md->value->slice) != consumed_md->value_length) {
continue;
}
- if (memcmp(GPR_SLICE_START_PTR(md->key->slice), consumed_md->key,
- GPR_SLICE_LENGTH(md->key->slice)) == 0 &&
- memcmp(GPR_SLICE_START_PTR(md->value->slice), consumed_md->value,
- GPR_SLICE_LENGTH(md->value->slice)) == 0) {
+ if (memcmp(GRPC_SLICE_START_PTR(md->key->slice), consumed_md->key,
+ GRPC_SLICE_LENGTH(md->key->slice)) == 0 &&
+ memcmp(GRPC_SLICE_START_PTR(md->value->slice), consumed_md->value,
+ GRPC_SLICE_LENGTH(md->value->slice)) == 0) {
return NULL; /* Delete. */
}
}
@@ -134,14 +134,14 @@ static void on_md_processing_done(
grpc_metadata_array_destroy(&calld->md);
grpc_exec_ctx_sched(&exec_ctx, calld->on_done_recv, GRPC_ERROR_NONE, NULL);
} else {
- gpr_slice message;
+ grpc_slice message;
grpc_transport_stream_op *close_op = gpr_malloc(sizeof(*close_op));
memset(close_op, 0, sizeof(*close_op));
grpc_metadata_array_destroy(&calld->md);
error_details = error_details != NULL
? error_details
: "Authentication metadata processing failed.";
- message = gpr_slice_from_copied_string(error_details);
+ message = grpc_slice_from_copied_string(error_details);
calld->transport_op->send_initial_metadata = NULL;
if (calld->transport_op->send_message != NULL) {
grpc_byte_stream_destroy(&exec_ctx, calld->transport_op->send_message);
@@ -238,9 +238,9 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
void *ignored) {}
/* Constructor for channel_data */
-static void init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_channel_element_args *args) {
+static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
grpc_auth_context *auth_context =
grpc_find_auth_context_in_args(args->channel_args);
grpc_server_credentials *creds =
@@ -256,6 +256,7 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
chand->auth_context =
GRPC_AUTH_CONTEXT_REF(auth_context, "server_auth_filter");
chand->creds = grpc_server_credentials_ref(creds);
+ return GRPC_ERROR_NONE;
}
/* Destructor for channel data */
@@ -278,4 +279,5 @@ const grpc_channel_filter grpc_server_auth_filter = {
init_channel_elem,
destroy_channel_elem,
grpc_call_next_get_peer,
+ grpc_channel_next_get_info,
"server-auth"};
diff --git a/src/core/lib/security/util/b64.c b/src/core/lib/security/util/b64.c
index 9da42e4e73..4892e8e621 100644
--- a/src/core/lib/security/util/b64.c
+++ b/src/core/lib/security/util/b64.c
@@ -120,7 +120,7 @@ char *grpc_base64_encode(const void *vdata, size_t data_size, int url_safe,
return result;
}
-gpr_slice grpc_base64_decode(const char *b64, int url_safe) {
+grpc_slice grpc_base64_decode(const char *b64, int url_safe) {
return grpc_base64_decode_with_len(b64, strlen(b64), url_safe);
}
@@ -182,10 +182,10 @@ static int decode_group(const unsigned char *codes, size_t num_codes,
return 1;
}
-gpr_slice grpc_base64_decode_with_len(const char *b64, size_t b64_len,
- int url_safe) {
- gpr_slice result = gpr_slice_malloc(b64_len);
- unsigned char *current = GPR_SLICE_START_PTR(result);
+grpc_slice grpc_base64_decode_with_len(const char *b64, size_t b64_len,
+ int url_safe) {
+ grpc_slice result = grpc_slice_malloc(b64_len);
+ unsigned char *current = GRPC_SLICE_START_PTR(result);
size_t result_size = 0;
unsigned char codes[4];
size_t num_codes = 0;
@@ -224,10 +224,10 @@ gpr_slice grpc_base64_decode_with_len(const char *b64, size_t b64_len,
!decode_group(codes, num_codes, current, &result_size)) {
goto fail;
}
- GPR_SLICE_SET_LENGTH(result, result_size);
+ GRPC_SLICE_SET_LENGTH(result, result_size);
return result;
fail:
- gpr_slice_unref(result);
+ grpc_slice_unref(result);
return gpr_empty_slice();
}
diff --git a/src/core/lib/security/util/b64.h b/src/core/lib/security/util/b64.h
index 6908095287..6ea0b5365b 100644
--- a/src/core/lib/security/util/b64.h
+++ b/src/core/lib/security/util/b64.h
@@ -34,7 +34,7 @@
#ifndef GRPC_CORE_LIB_SECURITY_UTIL_B64_H
#define GRPC_CORE_LIB_SECURITY_UTIL_B64_H
-#include <grpc/support/slice.h>
+#include <grpc/slice.h>
/* Encodes data using base64. It is the caller's responsability to free
the returned char * using gpr_free. Returns NULL on NULL input. */
@@ -43,10 +43,10 @@ char *grpc_base64_encode(const void *data, size_t data_size, int url_safe,
/* Decodes data according to the base64 specification. Returns an empty
slice in case of failure. */
-gpr_slice grpc_base64_decode(const char *b64, int url_safe);
+grpc_slice grpc_base64_decode(const char *b64, int url_safe);
/* Same as above except that the length is provided by the caller. */
-gpr_slice grpc_base64_decode_with_len(const char *b64, size_t b64_len,
- int url_safe);
+grpc_slice grpc_base64_decode_with_len(const char *b64, size_t b64_len,
+ int url_safe);
#endif /* GRPC_CORE_LIB_SECURITY_UTIL_B64_H */
diff --git a/src/core/lib/support/percent_encoding.c b/src/core/lib/slice/percent_encoding.c
index 3c19f264f9..b9e35f1c71 100644
--- a/src/core/lib/support/percent_encoding.c
+++ b/src/core/lib/slice/percent_encoding.c
@@ -31,15 +31,15 @@
*
*/
-#include "src/core/lib/support/percent_encoding.h"
+#include "src/core/lib/slice/percent_encoding.h"
#include <grpc/support/log.h>
-const uint8_t gpr_url_percent_encoding_unreserved_bytes[256 / 8] = {
+const uint8_t grpc_url_percent_encoding_unreserved_bytes[256 / 8] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0xff, 0x03, 0xfe, 0xff, 0xff,
0x87, 0xfe, 0xff, 0xff, 0x47, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
-const uint8_t gpr_compatible_percent_encoding_unreserved_bytes[256 / 8] = {
+const uint8_t grpc_compatible_percent_encoding_unreserved_bytes[256 / 8] = {
0x00, 0x00, 0x00, 0x00, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
@@ -49,14 +49,14 @@ static bool is_unreserved_character(uint8_t c,
return ((unreserved_bytes[c / 8] >> (c % 8)) & 1) != 0;
}
-gpr_slice gpr_percent_encode_slice(gpr_slice slice,
- const uint8_t *unreserved_bytes) {
+grpc_slice grpc_percent_encode_slice(grpc_slice slice,
+ const uint8_t *unreserved_bytes) {
static const uint8_t hex[] = "0123456789ABCDEF";
// first pass: count the number of bytes needed to output this string
size_t output_length = 0;
- const uint8_t *slice_start = GPR_SLICE_START_PTR(slice);
- const uint8_t *slice_end = GPR_SLICE_END_PTR(slice);
+ const uint8_t *slice_start = GRPC_SLICE_START_PTR(slice);
+ const uint8_t *slice_end = GRPC_SLICE_END_PTR(slice);
const uint8_t *p;
bool any_reserved_bytes = false;
for (p = slice_start; p < slice_end; p++) {
@@ -66,11 +66,11 @@ gpr_slice gpr_percent_encode_slice(gpr_slice slice,
}
// no unreserved bytes: return the string unmodified
if (!any_reserved_bytes) {
- return gpr_slice_ref(slice);
+ return grpc_slice_ref(slice);
}
// second pass: actually encode
- gpr_slice out = gpr_slice_malloc(output_length);
- uint8_t *q = GPR_SLICE_START_PTR(out);
+ grpc_slice out = grpc_slice_malloc(output_length);
+ uint8_t *q = GRPC_SLICE_START_PTR(out);
for (p = slice_start; p < slice_end; p++) {
if (is_unreserved_character(*p, unreserved_bytes)) {
*q++ = *p;
@@ -80,7 +80,7 @@ gpr_slice gpr_percent_encode_slice(gpr_slice slice,
*q++ = hex[*p & 15];
}
}
- GPR_ASSERT(q == GPR_SLICE_END_PTR(out));
+ GPR_ASSERT(q == GRPC_SLICE_END_PTR(out));
return out;
}
@@ -97,11 +97,11 @@ static uint8_t dehex(uint8_t c) {
GPR_UNREACHABLE_CODE(return 255);
}
-bool gpr_strict_percent_decode_slice(gpr_slice slice_in,
- const uint8_t *unreserved_bytes,
- gpr_slice *slice_out) {
- const uint8_t *p = GPR_SLICE_START_PTR(slice_in);
- const uint8_t *in_end = GPR_SLICE_END_PTR(slice_in);
+bool grpc_strict_percent_decode_slice(grpc_slice slice_in,
+ const uint8_t *unreserved_bytes,
+ grpc_slice *slice_out) {
+ const uint8_t *p = GRPC_SLICE_START_PTR(slice_in);
+ const uint8_t *in_end = GRPC_SLICE_END_PTR(slice_in);
size_t out_length = 0;
bool any_percent_encoded_stuff = false;
while (p != in_end) {
@@ -119,12 +119,12 @@ bool gpr_strict_percent_decode_slice(gpr_slice slice_in,
}
}
if (!any_percent_encoded_stuff) {
- *slice_out = gpr_slice_ref(slice_in);
+ *slice_out = grpc_slice_ref(slice_in);
return true;
}
- p = GPR_SLICE_START_PTR(slice_in);
- *slice_out = gpr_slice_malloc(out_length);
- uint8_t *q = GPR_SLICE_START_PTR(*slice_out);
+ p = GRPC_SLICE_START_PTR(slice_in);
+ *slice_out = grpc_slice_malloc(out_length);
+ uint8_t *q = GRPC_SLICE_START_PTR(*slice_out);
while (p != in_end) {
if (*p == '%') {
*q++ = (uint8_t)(dehex(p[1]) << 4) | (dehex(p[2]));
@@ -133,13 +133,13 @@ bool gpr_strict_percent_decode_slice(gpr_slice slice_in,
*q++ = *p++;
}
}
- GPR_ASSERT(q == GPR_SLICE_END_PTR(*slice_out));
+ GPR_ASSERT(q == GRPC_SLICE_END_PTR(*slice_out));
return true;
}
-gpr_slice gpr_permissive_percent_decode_slice(gpr_slice slice_in) {
- const uint8_t *p = GPR_SLICE_START_PTR(slice_in);
- const uint8_t *in_end = GPR_SLICE_END_PTR(slice_in);
+grpc_slice grpc_permissive_percent_decode_slice(grpc_slice slice_in) {
+ const uint8_t *p = GRPC_SLICE_START_PTR(slice_in);
+ const uint8_t *in_end = GRPC_SLICE_END_PTR(slice_in);
size_t out_length = 0;
bool any_percent_encoded_stuff = false;
while (p != in_end) {
@@ -158,11 +158,11 @@ gpr_slice gpr_permissive_percent_decode_slice(gpr_slice slice_in) {
}
}
if (!any_percent_encoded_stuff) {
- return gpr_slice_ref(slice_in);
+ return grpc_slice_ref(slice_in);
}
- p = GPR_SLICE_START_PTR(slice_in);
- gpr_slice out = gpr_slice_malloc(out_length);
- uint8_t *q = GPR_SLICE_START_PTR(out);
+ p = GRPC_SLICE_START_PTR(slice_in);
+ grpc_slice out = grpc_slice_malloc(out_length);
+ uint8_t *q = GRPC_SLICE_START_PTR(out);
while (p != in_end) {
if (*p == '%') {
if (!valid_hex(p + 1, in_end) || !valid_hex(p + 2, in_end)) {
@@ -175,6 +175,6 @@ gpr_slice gpr_permissive_percent_decode_slice(gpr_slice slice_in) {
*q++ = *p++;
}
}
- GPR_ASSERT(q == GPR_SLICE_END_PTR(out));
+ GPR_ASSERT(q == GRPC_SLICE_END_PTR(out));
return out;
}
diff --git a/src/core/lib/support/percent_encoding.h b/src/core/lib/slice/percent_encoding.h
index 000bf14ede..189bdbe312 100644
--- a/src/core/lib/support/percent_encoding.h
+++ b/src/core/lib/slice/percent_encoding.h
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_CORE_LIB_SUPPORT_PERCENT_ENCODING_H
-#define GRPC_CORE_LIB_SUPPORT_PERCENT_ENCODING_H
+#ifndef GRPC_CORE_LIB_SLICE_PERCENT_ENCODING_H
+#define GRPC_CORE_LIB_SLICE_PERCENT_ENCODING_H
/* Percent encoding and decoding of slices.
Transforms arbitrary strings into safe-for-transmission strings by using
@@ -43,36 +43,36 @@
#include <stdbool.h>
-#include <grpc/support/slice.h>
+#include <grpc/slice.h>
/* URL percent encoding spec bitfield (usabel as 'unreserved_bytes' in
- gpr_percent_encode_slice, gpr_strict_percent_decode_slice).
+ grpc_percent_encode_slice, grpc_strict_percent_decode_slice).
Flags [A-Za-z0-9-_.~] as unreserved bytes for the percent encoding routines
*/
-extern const uint8_t gpr_url_percent_encoding_unreserved_bytes[256 / 8];
+extern const uint8_t grpc_url_percent_encoding_unreserved_bytes[256 / 8];
/* URL percent encoding spec bitfield (usabel as 'unreserved_bytes' in
- gpr_percent_encode_slice, gpr_strict_percent_decode_slice).
+ grpc_percent_encode_slice, grpc_strict_percent_decode_slice).
Flags ascii7 non-control characters excluding '%' as unreserved bytes for the
percent encoding routines */
-extern const uint8_t gpr_compatible_percent_encoding_unreserved_bytes[256 / 8];
+extern const uint8_t grpc_compatible_percent_encoding_unreserved_bytes[256 / 8];
/* Percent-encode a slice, returning the new slice (this cannot fail):
unreserved_bytes is a bitfield indicating which bytes are considered
unreserved and thus do not need percent encoding */
-gpr_slice gpr_percent_encode_slice(gpr_slice slice,
- const uint8_t *unreserved_bytes);
+grpc_slice grpc_percent_encode_slice(grpc_slice slice,
+ const uint8_t *unreserved_bytes);
/* Percent-decode a slice, strictly.
If the input is legal (contains no unreserved bytes, and legal % encodings),
returns true and sets *slice_out to the decoded slice.
If the input is not legal, returns false and leaves *slice_out untouched.
unreserved_bytes is a bitfield indicating which bytes are considered
unreserved and thus do not need percent encoding */
-bool gpr_strict_percent_decode_slice(gpr_slice slice_in,
- const uint8_t *unreserved_bytes,
- gpr_slice *slice_out);
+bool grpc_strict_percent_decode_slice(grpc_slice slice_in,
+ const uint8_t *unreserved_bytes,
+ grpc_slice *slice_out);
/* Percent-decode a slice, permissively.
If a % triplet can not be decoded, pass it through verbatim.
This cannot fail. */
-gpr_slice gpr_permissive_percent_decode_slice(gpr_slice slice_in);
+grpc_slice grpc_permissive_percent_decode_slice(grpc_slice slice_in);
-#endif /* GRPC_CORE_LIB_SUPPORT_PERCENT_ENCODING_H */
+#endif /* GRPC_CORE_LIB_SLICE_PERCENT_ENCODING_H */
diff --git a/src/core/lib/support/slice.c b/src/core/lib/slice/slice.c
index 8a2c0a9086..52977e6d9a 100644
--- a/src/core/lib/support/slice.c
+++ b/src/core/lib/slice/slice.c
@@ -31,51 +31,51 @@
*
*/
+#include <grpc/slice.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/slice.h>
#include <string.h>
-gpr_slice gpr_empty_slice(void) {
- gpr_slice out;
+grpc_slice gpr_empty_slice(void) {
+ grpc_slice out;
out.refcount = 0;
out.data.inlined.length = 0;
return out;
}
-gpr_slice gpr_slice_ref(gpr_slice slice) {
+grpc_slice grpc_slice_ref(grpc_slice slice) {
if (slice.refcount) {
slice.refcount->ref(slice.refcount);
}
return slice;
}
-void gpr_slice_unref(gpr_slice slice) {
+void grpc_slice_unref(grpc_slice slice) {
if (slice.refcount) {
slice.refcount->unref(slice.refcount);
}
}
-/* gpr_slice_from_static_string support structure - a refcount that does
+/* grpc_slice_from_static_string support structure - a refcount that does
nothing */
static void noop_ref_or_unref(void *unused) {}
-static gpr_slice_refcount noop_refcount = {noop_ref_or_unref,
- noop_ref_or_unref};
+static grpc_slice_refcount noop_refcount = {noop_ref_or_unref,
+ noop_ref_or_unref};
-gpr_slice gpr_slice_from_static_string(const char *s) {
- gpr_slice slice;
+grpc_slice grpc_slice_from_static_string(const char *s) {
+ grpc_slice slice;
slice.refcount = &noop_refcount;
slice.data.refcounted.bytes = (uint8_t *)s;
slice.data.refcounted.length = strlen(s);
return slice;
}
-/* gpr_slice_new support structures - we create a refcount object extended
+/* grpc_slice_new support structures - we create a refcount object extended
with the user provided data pointer & destroy function */
typedef struct new_slice_refcount {
- gpr_slice_refcount rc;
+ grpc_slice_refcount rc;
gpr_refcount refs;
void (*user_destroy)(void *);
void *user_data;
@@ -94,10 +94,10 @@ static void new_slice_unref(void *p) {
}
}
-gpr_slice gpr_slice_new_with_user_data(void *p, size_t len,
- void (*destroy)(void *),
- void *user_data) {
- gpr_slice slice;
+grpc_slice grpc_slice_new_with_user_data(void *p, size_t len,
+ void (*destroy)(void *),
+ void *user_data) {
+ grpc_slice slice;
new_slice_refcount *rc = gpr_malloc(sizeof(new_slice_refcount));
gpr_ref_init(&rc->refs, 1);
rc->rc.ref = new_slice_ref;
@@ -111,15 +111,15 @@ gpr_slice gpr_slice_new_with_user_data(void *p, size_t len,
return slice;
}
-gpr_slice gpr_slice_new(void *p, size_t len, void (*destroy)(void *)) {
+grpc_slice grpc_slice_new(void *p, size_t len, void (*destroy)(void *)) {
/* Pass "p" to *destroy when the slice is no longer needed. */
- return gpr_slice_new_with_user_data(p, len, destroy, p);
+ return grpc_slice_new_with_user_data(p, len, destroy, p);
}
-/* gpr_slice_new_with_len support structures - we create a refcount object
+/* grpc_slice_new_with_len support structures - we create a refcount object
extended with the user provided data pointer & destroy function */
typedef struct new_with_len_slice_refcount {
- gpr_slice_refcount rc;
+ grpc_slice_refcount rc;
gpr_refcount refs;
void *user_data;
size_t user_length;
@@ -139,9 +139,9 @@ static void new_with_len_unref(void *p) {
}
}
-gpr_slice gpr_slice_new_with_len(void *p, size_t len,
- void (*destroy)(void *, size_t)) {
- gpr_slice slice;
+grpc_slice grpc_slice_new_with_len(void *p, size_t len,
+ void (*destroy)(void *, size_t)) {
+ grpc_slice slice;
new_with_len_slice_refcount *rc =
gpr_malloc(sizeof(new_with_len_slice_refcount));
gpr_ref_init(&rc->refs, 1);
@@ -157,18 +157,18 @@ gpr_slice gpr_slice_new_with_len(void *p, size_t len,
return slice;
}
-gpr_slice gpr_slice_from_copied_buffer(const char *source, size_t length) {
- gpr_slice slice = gpr_slice_malloc(length);
- memcpy(GPR_SLICE_START_PTR(slice), source, length);
+grpc_slice grpc_slice_from_copied_buffer(const char *source, size_t length) {
+ grpc_slice slice = grpc_slice_malloc(length);
+ memcpy(GRPC_SLICE_START_PTR(slice), source, length);
return slice;
}
-gpr_slice gpr_slice_from_copied_string(const char *source) {
- return gpr_slice_from_copied_buffer(source, strlen(source));
+grpc_slice grpc_slice_from_copied_string(const char *source) {
+ return grpc_slice_from_copied_buffer(source, strlen(source));
}
typedef struct {
- gpr_slice_refcount base;
+ grpc_slice_refcount base;
gpr_refcount refs;
} malloc_refcount;
@@ -184,8 +184,8 @@ static void malloc_unref(void *p) {
}
}
-gpr_slice gpr_slice_malloc(size_t length) {
- gpr_slice slice;
+grpc_slice grpc_slice_malloc(size_t length) {
+ grpc_slice slice;
if (length > sizeof(slice.data.inlined.bytes)) {
/* Memory layout used by the slice created here:
@@ -221,8 +221,8 @@ gpr_slice gpr_slice_malloc(size_t length) {
return slice;
}
-gpr_slice gpr_slice_sub_no_ref(gpr_slice source, size_t begin, size_t end) {
- gpr_slice subset;
+grpc_slice grpc_slice_sub_no_ref(grpc_slice source, size_t begin, size_t end) {
+ grpc_slice subset;
GPR_ASSERT(end >= begin);
@@ -246,24 +246,24 @@ gpr_slice gpr_slice_sub_no_ref(gpr_slice source, size_t begin, size_t end) {
return subset;
}
-gpr_slice gpr_slice_sub(gpr_slice source, size_t begin, size_t end) {
- gpr_slice subset;
+grpc_slice grpc_slice_sub(grpc_slice source, size_t begin, size_t end) {
+ grpc_slice subset;
if (end - begin <= sizeof(subset.data.inlined.bytes)) {
subset.refcount = NULL;
subset.data.inlined.length = (uint8_t)(end - begin);
- memcpy(subset.data.inlined.bytes, GPR_SLICE_START_PTR(source) + begin,
+ memcpy(subset.data.inlined.bytes, GRPC_SLICE_START_PTR(source) + begin,
end - begin);
} else {
- subset = gpr_slice_sub_no_ref(source, begin, end);
+ subset = grpc_slice_sub_no_ref(source, begin, end);
/* Bump the refcount */
subset.refcount->ref(subset.refcount);
}
return subset;
}
-gpr_slice gpr_slice_split_tail(gpr_slice *source, size_t split) {
- gpr_slice tail;
+grpc_slice grpc_slice_split_tail(grpc_slice *source, size_t split) {
+ grpc_slice tail;
if (source->refcount == NULL) {
/* inlined data, copy it out */
@@ -297,8 +297,8 @@ gpr_slice gpr_slice_split_tail(gpr_slice *source, size_t split) {
return tail;
}
-gpr_slice gpr_slice_split_head(gpr_slice *source, size_t split) {
- gpr_slice head;
+grpc_slice grpc_slice_split_head(grpc_slice *source, size_t split) {
+ grpc_slice head;
if (source->refcount == NULL) {
GPR_ASSERT(source->data.inlined.length >= split);
@@ -335,16 +335,24 @@ gpr_slice gpr_slice_split_head(gpr_slice *source, size_t split) {
return head;
}
-int gpr_slice_cmp(gpr_slice a, gpr_slice b) {
- int d = (int)(GPR_SLICE_LENGTH(a) - GPR_SLICE_LENGTH(b));
+int grpc_slice_cmp(grpc_slice a, grpc_slice b) {
+ int d = (int)(GRPC_SLICE_LENGTH(a) - GRPC_SLICE_LENGTH(b));
if (d != 0) return d;
- return memcmp(GPR_SLICE_START_PTR(a), GPR_SLICE_START_PTR(b),
- GPR_SLICE_LENGTH(a));
+ return memcmp(GRPC_SLICE_START_PTR(a), GRPC_SLICE_START_PTR(b),
+ GRPC_SLICE_LENGTH(a));
}
-int gpr_slice_str_cmp(gpr_slice a, const char *b) {
+int grpc_slice_str_cmp(grpc_slice a, const char *b) {
size_t b_length = strlen(b);
- int d = (int)(GPR_SLICE_LENGTH(a) - b_length);
+ int d = (int)(GRPC_SLICE_LENGTH(a) - b_length);
if (d != 0) return d;
- return memcmp(GPR_SLICE_START_PTR(a), b, b_length);
+ return memcmp(GRPC_SLICE_START_PTR(a), b, b_length);
+}
+
+int grpc_slice_is_equivalent(grpc_slice a, grpc_slice b) {
+ if (a.refcount == NULL || b.refcount == NULL) {
+ return grpc_slice_cmp(a, b) == 0;
+ }
+ return a.data.refcounted.length == b.data.refcounted.length &&
+ a.data.refcounted.bytes == b.data.refcounted.bytes;
}
diff --git a/src/core/lib/support/slice_buffer.c b/src/core/lib/slice/slice_buffer.c
index 66f111d767..990ef128bd 100644
--- a/src/core/lib/support/slice_buffer.c
+++ b/src/core/lib/slice/slice_buffer.c
@@ -31,8 +31,8 @@
*
*/
+#include <grpc/slice_buffer.h>
#include <grpc/support/port_platform.h>
-#include <grpc/support/slice_buffer.h>
#include <string.h>
@@ -43,35 +43,35 @@
/* grow a buffer; requires GRPC_SLICE_BUFFER_INLINE_ELEMENTS > 1 */
#define GROW(x) (3 * (x) / 2)
-static void maybe_embiggen(gpr_slice_buffer *sb) {
+static void maybe_embiggen(grpc_slice_buffer *sb) {
if (sb->count == sb->capacity) {
sb->capacity = GROW(sb->capacity);
GPR_ASSERT(sb->capacity > sb->count);
if (sb->slices == sb->inlined) {
- sb->slices = gpr_malloc(sb->capacity * sizeof(gpr_slice));
- memcpy(sb->slices, sb->inlined, sb->count * sizeof(gpr_slice));
+ sb->slices = gpr_malloc(sb->capacity * sizeof(grpc_slice));
+ memcpy(sb->slices, sb->inlined, sb->count * sizeof(grpc_slice));
} else {
- sb->slices = gpr_realloc(sb->slices, sb->capacity * sizeof(gpr_slice));
+ sb->slices = gpr_realloc(sb->slices, sb->capacity * sizeof(grpc_slice));
}
}
}
-void gpr_slice_buffer_init(gpr_slice_buffer *sb) {
+void grpc_slice_buffer_init(grpc_slice_buffer *sb) {
sb->count = 0;
sb->length = 0;
sb->capacity = GRPC_SLICE_BUFFER_INLINE_ELEMENTS;
sb->slices = sb->inlined;
}
-void gpr_slice_buffer_destroy(gpr_slice_buffer *sb) {
- gpr_slice_buffer_reset_and_unref(sb);
+void grpc_slice_buffer_destroy(grpc_slice_buffer *sb) {
+ grpc_slice_buffer_reset_and_unref(sb);
if (sb->slices != sb->inlined) {
gpr_free(sb->slices);
}
}
-uint8_t *gpr_slice_buffer_tiny_add(gpr_slice_buffer *sb, size_t n) {
- gpr_slice *back;
+uint8_t *grpc_slice_buffer_tiny_add(grpc_slice_buffer *sb, size_t n) {
+ grpc_slice *back;
uint8_t *out;
sb->length += n;
@@ -94,16 +94,16 @@ add_new:
return back->data.inlined.bytes;
}
-size_t gpr_slice_buffer_add_indexed(gpr_slice_buffer *sb, gpr_slice s) {
+size_t grpc_slice_buffer_add_indexed(grpc_slice_buffer *sb, grpc_slice s) {
size_t out = sb->count;
maybe_embiggen(sb);
sb->slices[out] = s;
- sb->length += GPR_SLICE_LENGTH(s);
+ sb->length += GRPC_SLICE_LENGTH(s);
sb->count = out + 1;
return out;
}
-void gpr_slice_buffer_add(gpr_slice_buffer *sb, gpr_slice s) {
+void grpc_slice_buffer_add(grpc_slice_buffer *sb, grpc_slice s) {
size_t n = sb->count;
/* if both the last slice in the slice buffer and the slice being added
are inlined (that is, that they carry their data inside the slice data
@@ -111,19 +111,20 @@ void gpr_slice_buffer_add(gpr_slice_buffer *sb, gpr_slice s) {
into the back slice, preventing many small slices being passed into
writes */
if (!s.refcount && n) {
- gpr_slice *back = &sb->slices[n - 1];
- if (!back->refcount && back->data.inlined.length < GPR_SLICE_INLINED_SIZE) {
+ grpc_slice *back = &sb->slices[n - 1];
+ if (!back->refcount &&
+ back->data.inlined.length < GRPC_SLICE_INLINED_SIZE) {
if (s.data.inlined.length + back->data.inlined.length <=
- GPR_SLICE_INLINED_SIZE) {
+ GRPC_SLICE_INLINED_SIZE) {
memcpy(back->data.inlined.bytes + back->data.inlined.length,
s.data.inlined.bytes, s.data.inlined.length);
back->data.inlined.length =
(uint8_t)(back->data.inlined.length + s.data.inlined.length);
} else {
- size_t cp1 = GPR_SLICE_INLINED_SIZE - back->data.inlined.length;
+ size_t cp1 = GRPC_SLICE_INLINED_SIZE - back->data.inlined.length;
memcpy(back->data.inlined.bytes + back->data.inlined.length,
s.data.inlined.bytes, cp1);
- back->data.inlined.length = GPR_SLICE_INLINED_SIZE;
+ back->data.inlined.length = GRPC_SLICE_INLINED_SIZE;
maybe_embiggen(sb);
back = &sb->slices[n];
sb->count = n + 1;
@@ -136,35 +137,35 @@ void gpr_slice_buffer_add(gpr_slice_buffer *sb, gpr_slice s) {
return; /* early out */
}
}
- gpr_slice_buffer_add_indexed(sb, s);
+ grpc_slice_buffer_add_indexed(sb, s);
}
-void gpr_slice_buffer_addn(gpr_slice_buffer *sb, gpr_slice *s, size_t n) {
+void grpc_slice_buffer_addn(grpc_slice_buffer *sb, grpc_slice *s, size_t n) {
size_t i;
for (i = 0; i < n; i++) {
- gpr_slice_buffer_add(sb, s[i]);
+ grpc_slice_buffer_add(sb, s[i]);
}
}
-void gpr_slice_buffer_pop(gpr_slice_buffer *sb) {
+void grpc_slice_buffer_pop(grpc_slice_buffer *sb) {
if (sb->count != 0) {
size_t count = --sb->count;
- sb->length -= GPR_SLICE_LENGTH(sb->slices[count]);
+ sb->length -= GRPC_SLICE_LENGTH(sb->slices[count]);
}
}
-void gpr_slice_buffer_reset_and_unref(gpr_slice_buffer *sb) {
+void grpc_slice_buffer_reset_and_unref(grpc_slice_buffer *sb) {
size_t i;
for (i = 0; i < sb->count; i++) {
- gpr_slice_unref(sb->slices[i]);
+ grpc_slice_unref(sb->slices[i]);
}
sb->count = 0;
sb->length = 0;
}
-void gpr_slice_buffer_swap(gpr_slice_buffer *a, gpr_slice_buffer *b) {
+void grpc_slice_buffer_swap(grpc_slice_buffer *a, grpc_slice_buffer *b) {
GPR_SWAP(size_t, a->count, b->count);
GPR_SWAP(size_t, a->capacity, b->capacity);
GPR_SWAP(size_t, a->length, b->length);
@@ -172,111 +173,112 @@ void gpr_slice_buffer_swap(gpr_slice_buffer *a, gpr_slice_buffer *b) {
if (a->slices == a->inlined) {
if (b->slices == b->inlined) {
/* swap contents of inlined buffer */
- gpr_slice temp[GRPC_SLICE_BUFFER_INLINE_ELEMENTS];
- memcpy(temp, a->slices, b->count * sizeof(gpr_slice));
- memcpy(a->slices, b->slices, a->count * sizeof(gpr_slice));
- memcpy(b->slices, temp, b->count * sizeof(gpr_slice));
+ grpc_slice temp[GRPC_SLICE_BUFFER_INLINE_ELEMENTS];
+ memcpy(temp, a->slices, b->count * sizeof(grpc_slice));
+ memcpy(a->slices, b->slices, a->count * sizeof(grpc_slice));
+ memcpy(b->slices, temp, b->count * sizeof(grpc_slice));
} else {
/* a is inlined, b is not - copy a inlined into b, fix pointers */
a->slices = b->slices;
b->slices = b->inlined;
- memcpy(b->slices, a->inlined, b->count * sizeof(gpr_slice));
+ memcpy(b->slices, a->inlined, b->count * sizeof(grpc_slice));
}
} else if (b->slices == b->inlined) {
/* b is inlined, a is not - copy b inlined int a, fix pointers */
b->slices = a->slices;
a->slices = a->inlined;
- memcpy(a->slices, b->inlined, a->count * sizeof(gpr_slice));
+ memcpy(a->slices, b->inlined, a->count * sizeof(grpc_slice));
} else {
/* no inlining: easy swap */
- GPR_SWAP(gpr_slice *, a->slices, b->slices);
+ GPR_SWAP(grpc_slice *, a->slices, b->slices);
}
}
-void gpr_slice_buffer_move_into(gpr_slice_buffer *src, gpr_slice_buffer *dst) {
+void grpc_slice_buffer_move_into(grpc_slice_buffer *src,
+ grpc_slice_buffer *dst) {
/* anything to move? */
if (src->count == 0) {
return;
}
/* anything in dst? */
if (dst->count == 0) {
- gpr_slice_buffer_swap(src, dst);
+ grpc_slice_buffer_swap(src, dst);
return;
}
/* both buffers have data - copy, and reset src */
- gpr_slice_buffer_addn(dst, src->slices, src->count);
+ grpc_slice_buffer_addn(dst, src->slices, src->count);
src->count = 0;
src->length = 0;
}
-void gpr_slice_buffer_move_first(gpr_slice_buffer *src, size_t n,
- gpr_slice_buffer *dst) {
+void grpc_slice_buffer_move_first(grpc_slice_buffer *src, size_t n,
+ grpc_slice_buffer *dst) {
size_t src_idx;
size_t output_len = dst->length + n;
size_t new_input_len = src->length - n;
GPR_ASSERT(src->length >= n);
if (src->length == n) {
- gpr_slice_buffer_move_into(src, dst);
+ grpc_slice_buffer_move_into(src, dst);
return;
}
src_idx = 0;
while (src_idx < src->capacity) {
- gpr_slice slice = src->slices[src_idx];
- size_t slice_len = GPR_SLICE_LENGTH(slice);
+ grpc_slice slice = src->slices[src_idx];
+ size_t slice_len = GRPC_SLICE_LENGTH(slice);
if (n > slice_len) {
- gpr_slice_buffer_add(dst, slice);
+ grpc_slice_buffer_add(dst, slice);
n -= slice_len;
src_idx++;
} else if (n == slice_len) {
- gpr_slice_buffer_add(dst, slice);
+ grpc_slice_buffer_add(dst, slice);
src_idx++;
break;
} else { /* n < slice_len */
- src->slices[src_idx] = gpr_slice_split_tail(&slice, n);
- GPR_ASSERT(GPR_SLICE_LENGTH(slice) == n);
- GPR_ASSERT(GPR_SLICE_LENGTH(src->slices[src_idx]) == slice_len - n);
- gpr_slice_buffer_add(dst, slice);
+ src->slices[src_idx] = grpc_slice_split_tail(&slice, n);
+ GPR_ASSERT(GRPC_SLICE_LENGTH(slice) == n);
+ GPR_ASSERT(GRPC_SLICE_LENGTH(src->slices[src_idx]) == slice_len - n);
+ grpc_slice_buffer_add(dst, slice);
break;
}
}
GPR_ASSERT(dst->length == output_len);
memmove(src->slices, src->slices + src_idx,
- sizeof(gpr_slice) * (src->count - src_idx));
+ sizeof(grpc_slice) * (src->count - src_idx));
src->count -= src_idx;
src->length = new_input_len;
GPR_ASSERT(src->count > 0);
}
-void gpr_slice_buffer_trim_end(gpr_slice_buffer *sb, size_t n,
- gpr_slice_buffer *garbage) {
+void grpc_slice_buffer_trim_end(grpc_slice_buffer *sb, size_t n,
+ grpc_slice_buffer *garbage) {
GPR_ASSERT(n <= sb->length);
sb->length -= n;
for (;;) {
size_t idx = sb->count - 1;
- gpr_slice slice = sb->slices[idx];
- size_t slice_len = GPR_SLICE_LENGTH(slice);
+ grpc_slice slice = sb->slices[idx];
+ size_t slice_len = GRPC_SLICE_LENGTH(slice);
if (slice_len > n) {
- sb->slices[idx] = gpr_slice_split_head(&slice, slice_len - n);
- gpr_slice_buffer_add_indexed(garbage, slice);
+ sb->slices[idx] = grpc_slice_split_head(&slice, slice_len - n);
+ grpc_slice_buffer_add_indexed(garbage, slice);
return;
} else if (slice_len == n) {
- gpr_slice_buffer_add_indexed(garbage, slice);
+ grpc_slice_buffer_add_indexed(garbage, slice);
sb->count = idx;
return;
} else {
- gpr_slice_buffer_add_indexed(garbage, slice);
+ grpc_slice_buffer_add_indexed(garbage, slice);
n -= slice_len;
sb->count = idx;
}
}
}
-gpr_slice gpr_slice_buffer_take_first(gpr_slice_buffer *sb) {
- gpr_slice slice;
+grpc_slice grpc_slice_buffer_take_first(grpc_slice_buffer *sb) {
+ grpc_slice slice;
GPR_ASSERT(sb->count > 0);
slice = sb->slices[0];
- memmove(&sb->slices[0], &sb->slices[1], (sb->count - 1) * sizeof(gpr_slice));
+ memmove(&sb->slices[0], &sb->slices[1], (sb->count - 1) * sizeof(grpc_slice));
sb->count--;
- sb->length -= GPR_SLICE_LENGTH(slice);
+ sb->length -= GRPC_SLICE_LENGTH(slice);
return slice;
}
diff --git a/src/core/lib/slice/slice_string_helpers.c b/src/core/lib/slice/slice_string_helpers.c
new file mode 100644
index 0000000000..4731762ece
--- /dev/null
+++ b/src/core/lib/slice/slice_string_helpers.c
@@ -0,0 +1,89 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/slice/slice_string_helpers.h"
+
+#include <string.h>
+
+#include <grpc/support/log.h>
+
+#include "src/core/lib/support/string.h"
+
+char *grpc_dump_slice(grpc_slice s, uint32_t flags) {
+ return gpr_dump((const char *)GRPC_SLICE_START_PTR(s), GRPC_SLICE_LENGTH(s),
+ flags);
+}
+
+/** Finds the initial (\a begin) and final (\a end) offsets of the next
+ * substring from \a str + \a read_offset until the next \a sep or the end of \a
+ * str.
+ *
+ * Returns 1 and updates \a begin and \a end. Returns 0 otherwise. */
+static int slice_find_separator_offset(const grpc_slice str, const char *sep,
+ const size_t read_offset, size_t *begin,
+ size_t *end) {
+ size_t i;
+ const uint8_t *str_ptr = GRPC_SLICE_START_PTR(str) + read_offset;
+ const size_t str_len = GRPC_SLICE_LENGTH(str) - read_offset;
+ const size_t sep_len = strlen(sep);
+ if (str_len < sep_len) {
+ return 0;
+ }
+
+ for (i = 0; i <= str_len - sep_len; i++) {
+ if (memcmp(str_ptr + i, sep, sep_len) == 0) {
+ *begin = read_offset;
+ *end = read_offset + i;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+void grpc_slice_split(grpc_slice str, const char *sep, grpc_slice_buffer *dst) {
+ const size_t sep_len = strlen(sep);
+ size_t begin, end;
+
+ GPR_ASSERT(sep_len > 0);
+
+ if (slice_find_separator_offset(str, sep, 0, &begin, &end) != 0) {
+ do {
+ grpc_slice_buffer_add_indexed(dst, grpc_slice_sub(str, begin, end));
+ } while (slice_find_separator_offset(str, sep, end + sep_len, &begin,
+ &end) != 0);
+ grpc_slice_buffer_add_indexed(
+ dst, grpc_slice_sub(str, end + sep_len, GRPC_SLICE_LENGTH(str)));
+ } else { /* no sep found, add whole input */
+ grpc_slice_buffer_add_indexed(dst, grpc_slice_ref(str));
+ }
+}
diff --git a/src/core/lib/slice/slice_string_helpers.h b/src/core/lib/slice/slice_string_helpers.h
new file mode 100644
index 0000000000..151c720777
--- /dev/null
+++ b/src/core/lib/slice/slice_string_helpers.h
@@ -0,0 +1,58 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_SLICE_SLICE_STRING_HELPERS_H
+#define GRPC_CORE_LIB_SLICE_SLICE_STRING_HELPERS_H
+
+#include <stddef.h>
+
+#include <grpc/slice.h>
+#include <grpc/slice_buffer.h>
+#include <grpc/support/port_platform.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Calls gpr_dump on a slice. */
+char *grpc_dump_slice(grpc_slice slice, uint32_t flags);
+
+/** Split \a str by the separator \a sep. Results are stored in \a dst, which
+ * should be a properly initialized instance. */
+void grpc_slice_split(grpc_slice str, const char *sep, grpc_slice_buffer *dst);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRPC_CORE_LIB_SLICE_SLICE_STRING_HELPERS_H */
diff --git a/src/core/lib/support/backoff.c b/src/core/lib/support/backoff.c
index e89ef47220..0612472712 100644
--- a/src/core/lib/support/backoff.c
+++ b/src/core/lib/support/backoff.c
@@ -35,8 +35,10 @@
#include <grpc/support/useful.h>
-void gpr_backoff_init(gpr_backoff *backoff, double multiplier, double jitter,
+void gpr_backoff_init(gpr_backoff *backoff, int64_t initial_connect_timeout,
+ double multiplier, double jitter,
int64_t min_timeout_millis, int64_t max_timeout_millis) {
+ backoff->initial_connect_timeout = initial_connect_timeout;
backoff->multiplier = multiplier;
backoff->jitter = jitter;
backoff->min_timeout_millis = min_timeout_millis;
@@ -45,9 +47,10 @@ void gpr_backoff_init(gpr_backoff *backoff, double multiplier, double jitter,
}
gpr_timespec gpr_backoff_begin(gpr_backoff *backoff, gpr_timespec now) {
- backoff->current_timeout_millis = backoff->min_timeout_millis;
- return gpr_time_add(
- now, gpr_time_from_millis(backoff->current_timeout_millis, GPR_TIMESPAN));
+ backoff->current_timeout_millis = backoff->initial_connect_timeout;
+ const int64_t first_timeout =
+ GPR_MAX(backoff->current_timeout_millis, backoff->min_timeout_millis);
+ return gpr_time_add(now, gpr_time_from_millis(first_timeout, GPR_TIMESPAN));
}
/* Generate a random number between 0 and 1. */
@@ -57,20 +60,28 @@ static double generate_uniform_random_number(uint32_t *rng_state) {
}
gpr_timespec gpr_backoff_step(gpr_backoff *backoff, gpr_timespec now) {
- double new_timeout_millis =
+ const double new_timeout_millis =
backoff->multiplier * (double)backoff->current_timeout_millis;
- double jitter_range = backoff->jitter * new_timeout_millis;
- double jitter =
+ backoff->current_timeout_millis =
+ GPR_MIN((int64_t)new_timeout_millis, backoff->max_timeout_millis);
+
+ const double jitter_range_width = backoff->jitter * new_timeout_millis;
+ const double jitter =
(2 * generate_uniform_random_number(&backoff->rng_state) - 1) *
- jitter_range;
+ jitter_range_width;
+
backoff->current_timeout_millis =
- GPR_CLAMP((int64_t)(new_timeout_millis + jitter),
- backoff->min_timeout_millis, backoff->max_timeout_millis);
- return gpr_time_add(
+ (int64_t)((double)(backoff->current_timeout_millis) + jitter);
+
+ const gpr_timespec current_deadline = gpr_time_add(
now, gpr_time_from_millis(backoff->current_timeout_millis, GPR_TIMESPAN));
+
+ const gpr_timespec min_deadline = gpr_time_add(
+ now, gpr_time_from_millis(backoff->min_timeout_millis, GPR_TIMESPAN));
+
+ return gpr_time_max(current_deadline, min_deadline);
}
void gpr_backoff_reset(gpr_backoff *backoff) {
- // forces step() to return a timeout of min_timeout_millis
- backoff->current_timeout_millis = 0;
+ backoff->current_timeout_millis = backoff->initial_connect_timeout;
}
diff --git a/src/core/lib/support/backoff.h b/src/core/lib/support/backoff.h
index 6d40c15546..5e9b740824 100644
--- a/src/core/lib/support/backoff.h
+++ b/src/core/lib/support/backoff.h
@@ -37,7 +37,9 @@
#include <grpc/support/time.h>
typedef struct {
- /// const: multiplier between retry attempts
+ /// const: how long to wait after the first failure before retrying
+ int64_t initial_connect_timeout;
+ /// const: factor with which to multiply backoff after a failed retry
double multiplier;
/// const: amount to randomize backoffs
double jitter;
@@ -54,7 +56,8 @@ typedef struct {
} gpr_backoff;
/// Initialize backoff machinery - does not need to be destroyed
-void gpr_backoff_init(gpr_backoff *backoff, double multiplier, double jitter,
+void gpr_backoff_init(gpr_backoff *backoff, int64_t initial_connect_timeout,
+ double multiplier, double jitter,
int64_t min_timeout_millis, int64_t max_timeout_millis);
/// Begin retry loop: returns a timespec for the NEXT retry
diff --git a/src/core/lib/support/env.h b/src/core/lib/support/env.h
index ec3959bc6e..6ada5d9390 100644
--- a/src/core/lib/support/env.h
+++ b/src/core/lib/support/env.h
@@ -36,8 +36,6 @@
#include <stdio.h>
-#include <grpc/support/slice.h>
-
#ifdef __cplusplus
extern "C" {
#endif
diff --git a/src/core/lib/support/string.c b/src/core/lib/support/string.c
index d17fb9da4b..f263f82baf 100644
--- a/src/core/lib/support/string.c
+++ b/src/core/lib/support/string.c
@@ -34,7 +34,9 @@
#include "src/core/lib/support/string.h"
#include <ctype.h>
+#include <limits.h>
#include <stddef.h>
+#include <stdlib.h>
#include <string.h>
#include <grpc/support/alloc.h>
@@ -120,11 +122,6 @@ char *gpr_dump(const char *buf, size_t len, uint32_t flags) {
return out.data;
}
-char *gpr_dump_slice(gpr_slice s, uint32_t flags) {
- return gpr_dump((const char *)GPR_SLICE_START_PTR(s), GPR_SLICE_LENGTH(s),
- flags);
-}
-
int gpr_parse_bytes_to_uint32(const char *buf, size_t len, uint32_t *result) {
uint32_t out = 0;
uint32_t new;
@@ -194,6 +191,13 @@ int int64_ttoa(int64_t value, char *string) {
return i;
}
+int gpr_parse_nonnegative_int(const char *value) {
+ char *end;
+ long result = strtol(value, &end, 0);
+ if (*end != '\0' || result < 0 || result > INT_MAX) return -1;
+ return (int)result;
+}
+
char *gpr_leftpad(const char *str, char flag, size_t length) {
const size_t str_length = strlen(str);
const size_t out_length = str_length > length ? str_length : length;
@@ -239,50 +243,6 @@ char *gpr_strjoin_sep(const char **strs, size_t nstrs, const char *sep,
return out;
}
-/** Finds the initial (\a begin) and final (\a end) offsets of the next
- * substring from \a str + \a read_offset until the next \a sep or the end of \a
- * str.
- *
- * Returns 1 and updates \a begin and \a end. Returns 0 otherwise. */
-static int slice_find_separator_offset(const gpr_slice str, const char *sep,
- const size_t read_offset, size_t *begin,
- size_t *end) {
- size_t i;
- const uint8_t *str_ptr = GPR_SLICE_START_PTR(str) + read_offset;
- const size_t str_len = GPR_SLICE_LENGTH(str) - read_offset;
- const size_t sep_len = strlen(sep);
- if (str_len < sep_len) {
- return 0;
- }
-
- for (i = 0; i <= str_len - sep_len; i++) {
- if (memcmp(str_ptr + i, sep, sep_len) == 0) {
- *begin = read_offset;
- *end = read_offset + i;
- return 1;
- }
- }
- return 0;
-}
-
-void gpr_slice_split(gpr_slice str, const char *sep, gpr_slice_buffer *dst) {
- const size_t sep_len = strlen(sep);
- size_t begin, end;
-
- GPR_ASSERT(sep_len > 0);
-
- if (slice_find_separator_offset(str, sep, 0, &begin, &end) != 0) {
- do {
- gpr_slice_buffer_add_indexed(dst, gpr_slice_sub(str, begin, end));
- } while (slice_find_separator_offset(str, sep, end + sep_len, &begin,
- &end) != 0);
- gpr_slice_buffer_add_indexed(
- dst, gpr_slice_sub(str, end + sep_len, GPR_SLICE_LENGTH(str)));
- } else { /* no sep found, add whole input */
- gpr_slice_buffer_add_indexed(dst, gpr_slice_ref(str));
- }
-}
-
void gpr_strvec_init(gpr_strvec *sv) { memset(sv, 0, sizeof(*sv)); }
void gpr_strvec_destroy(gpr_strvec *sv) {
@@ -315,3 +275,14 @@ int gpr_stricmp(const char *a, const char *b) {
} while (ca == cb && ca && cb);
return ca - cb;
}
+
+void *gpr_memrchr(const void *s, int c, size_t n) {
+ if (s == NULL) return NULL;
+ char *b = (char *)s;
+ for (size_t i = 0; i < n; i++) {
+ if (b[n - i - 1] == c) {
+ return &b[n - i - 1];
+ }
+ }
+ return NULL;
+}
diff --git a/src/core/lib/support/string.h b/src/core/lib/support/string.h
index 9a94e9471c..987d31ca81 100644
--- a/src/core/lib/support/string.h
+++ b/src/core/lib/support/string.h
@@ -37,8 +37,6 @@
#include <stddef.h>
#include <grpc/support/port_platform.h>
-#include <grpc/support/slice.h>
-#include <grpc/support/slice_buffer.h>
#ifdef __cplusplus
extern "C" {
@@ -54,9 +52,6 @@ extern "C" {
Result should be freed with gpr_free() */
char *gpr_dump(const char *buf, size_t len, uint32_t flags);
-/* Calls gpr_dump on a slice. */
-char *gpr_dump_slice(gpr_slice slice, uint32_t flags);
-
/* Parses an array of bytes into an integer (base 10). Returns 1 on success,
0 on failure. */
int gpr_parse_bytes_to_uint32(const char *data, size_t length,
@@ -80,6 +75,9 @@ NOTE: This function ensures sufficient bit width even on Win x64,
where long is 32bit is size.*/
int int64_ttoa(int64_t value, char *output);
+// Parses a non-negative number from a value string. Returns -1 on error.
+int gpr_parse_nonnegative_int(const char *value);
+
/* Reverse a run of bytes */
void gpr_reverse_bytes(char *str, int len);
@@ -98,10 +96,6 @@ char *gpr_strjoin(const char **strs, size_t nstrs, size_t *total_length);
char *gpr_strjoin_sep(const char **strs, size_t nstrs, const char *sep,
size_t *total_length);
-/** Split \a str by the separator \a sep. Results are stored in \a dst, which
- * should be a properly initialized instance. */
-void gpr_slice_split(gpr_slice str, const char *sep, gpr_slice_buffer *dst);
-
/* A vector of strings... for building up a final string one piece at a time */
typedef struct {
char **strs;
@@ -122,6 +116,8 @@ char *gpr_strvec_flatten(gpr_strvec *strs, size_t *total_length);
lower(a)==lower(b), >0 if lower(a)>lower(b) */
int gpr_stricmp(const char *a, const char *b);
+void *gpr_memrchr(const void *s, int c, size_t n);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/core/lib/support/subprocess_posix.c b/src/core/lib/support/subprocess_posix.c
index 4f4de9298e..4247a1c12b 100644
--- a/src/core/lib/support/subprocess_posix.c
+++ b/src/core/lib/support/subprocess_posix.c
@@ -40,6 +40,7 @@
#include <assert.h>
#include <errno.h>
#include <signal.h>
+#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -52,7 +53,7 @@
struct gpr_subprocess {
int pid;
- int joined;
+ bool joined;
};
const char *gpr_subprocess_binary_extension() { return ""; }
@@ -97,9 +98,11 @@ retry:
if (errno == EINTR) {
goto retry;
}
- gpr_log(GPR_ERROR, "waitpid failed: %s", strerror(errno));
+ gpr_log(GPR_ERROR, "waitpid failed for pid %d: %s", p->pid,
+ strerror(errno));
return -1;
}
+ p->joined = true;
return status;
}
diff --git a/src/core/lib/support/tmpfile.h b/src/core/lib/support/tmpfile.h
index 059142ab0f..f613cf9bc8 100644
--- a/src/core/lib/support/tmpfile.h
+++ b/src/core/lib/support/tmpfile.h
@@ -36,8 +36,6 @@
#include <stdio.h>
-#include <grpc/support/slice.h>
-
#ifdef __cplusplus
extern "C" {
#endif
diff --git a/src/core/lib/surface/byte_buffer.c b/src/core/lib/surface/byte_buffer.c
index 054a6e6c58..d646591a65 100644
--- a/src/core/lib/surface/byte_buffer.c
+++ b/src/core/lib/surface/byte_buffer.c
@@ -35,22 +35,23 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-grpc_byte_buffer *grpc_raw_byte_buffer_create(gpr_slice *slices,
+grpc_byte_buffer *grpc_raw_byte_buffer_create(grpc_slice *slices,
size_t nslices) {
return grpc_raw_compressed_byte_buffer_create(slices, nslices,
GRPC_COMPRESS_NONE);
}
grpc_byte_buffer *grpc_raw_compressed_byte_buffer_create(
- gpr_slice *slices, size_t nslices, grpc_compression_algorithm compression) {
+ grpc_slice *slices, size_t nslices,
+ grpc_compression_algorithm compression) {
size_t i;
grpc_byte_buffer *bb = gpr_malloc(sizeof(grpc_byte_buffer));
bb->type = GRPC_BB_RAW;
bb->data.raw.compression = compression;
- gpr_slice_buffer_init(&bb->data.raw.slice_buffer);
+ grpc_slice_buffer_init(&bb->data.raw.slice_buffer);
for (i = 0; i < nslices; i++) {
- gpr_slice_ref(slices[i]);
- gpr_slice_buffer_add(&bb->data.raw.slice_buffer, slices[i]);
+ grpc_slice_ref(slices[i]);
+ grpc_slice_buffer_add(&bb->data.raw.slice_buffer, slices[i]);
}
return bb;
}
@@ -58,13 +59,13 @@ grpc_byte_buffer *grpc_raw_compressed_byte_buffer_create(
grpc_byte_buffer *grpc_raw_byte_buffer_from_reader(
grpc_byte_buffer_reader *reader) {
grpc_byte_buffer *bb = gpr_malloc(sizeof(grpc_byte_buffer));
- gpr_slice slice;
+ grpc_slice slice;
bb->type = GRPC_BB_RAW;
bb->data.raw.compression = GRPC_COMPRESS_NONE;
- gpr_slice_buffer_init(&bb->data.raw.slice_buffer);
+ grpc_slice_buffer_init(&bb->data.raw.slice_buffer);
while (grpc_byte_buffer_reader_next(reader, &slice)) {
- gpr_slice_buffer_add(&bb->data.raw.slice_buffer, slice);
+ grpc_slice_buffer_add(&bb->data.raw.slice_buffer, slice);
}
return bb;
}
@@ -83,7 +84,7 @@ void grpc_byte_buffer_destroy(grpc_byte_buffer *bb) {
if (!bb) return;
switch (bb->type) {
case GRPC_BB_RAW:
- gpr_slice_buffer_destroy(&bb->data.raw.slice_buffer);
+ grpc_slice_buffer_destroy(&bb->data.raw.slice_buffer);
break;
}
gpr_free(bb);
diff --git a/src/core/lib/surface/byte_buffer_reader.c b/src/core/lib/surface/byte_buffer_reader.c
index 310bacb2c9..0089959fbb 100644
--- a/src/core/lib/surface/byte_buffer_reader.c
+++ b/src/core/lib/surface/byte_buffer_reader.c
@@ -37,9 +37,9 @@
#include <grpc/byte_buffer.h>
#include <grpc/compression.h>
#include <grpc/grpc.h>
+#include <grpc/slice_buffer.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/slice_buffer.h>
#include "src/core/lib/compression/message_compress.h"
@@ -56,11 +56,11 @@ static int is_compressed(grpc_byte_buffer *buffer) {
int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader,
grpc_byte_buffer *buffer) {
- gpr_slice_buffer decompressed_slices_buffer;
+ grpc_slice_buffer decompressed_slices_buffer;
reader->buffer_in = buffer;
switch (reader->buffer_in->type) {
case GRPC_BB_RAW:
- gpr_slice_buffer_init(&decompressed_slices_buffer);
+ grpc_slice_buffer_init(&decompressed_slices_buffer);
if (is_compressed(reader->buffer_in)) {
if (grpc_msg_decompress(reader->buffer_in->data.raw.compression,
&reader->buffer_in->data.raw.slice_buffer,
@@ -76,7 +76,7 @@ int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader,
grpc_raw_byte_buffer_create(decompressed_slices_buffer.slices,
decompressed_slices_buffer.count);
}
- gpr_slice_buffer_destroy(&decompressed_slices_buffer);
+ grpc_slice_buffer_destroy(&decompressed_slices_buffer);
} else { /* not compressed, use the input buffer as output */
reader->buffer_out = reader->buffer_in;
}
@@ -98,13 +98,13 @@ void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader *reader) {
}
int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader *reader,
- gpr_slice *slice) {
+ grpc_slice *slice) {
switch (reader->buffer_in->type) {
case GRPC_BB_RAW: {
- gpr_slice_buffer *slice_buffer;
+ grpc_slice_buffer *slice_buffer;
slice_buffer = &reader->buffer_out->data.raw.slice_buffer;
if (reader->current.index < slice_buffer->count) {
- *slice = gpr_slice_ref(slice_buffer->slices[reader->current.index]);
+ *slice = grpc_slice_ref(slice_buffer->slices[reader->current.index]);
reader->current.index += 1;
return 1;
}
@@ -114,18 +114,18 @@ int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader *reader,
return 0;
}
-gpr_slice grpc_byte_buffer_reader_readall(grpc_byte_buffer_reader *reader) {
- gpr_slice in_slice;
+grpc_slice grpc_byte_buffer_reader_readall(grpc_byte_buffer_reader *reader) {
+ grpc_slice in_slice;
size_t bytes_read = 0;
const size_t input_size = grpc_byte_buffer_length(reader->buffer_out);
- gpr_slice out_slice = gpr_slice_malloc(input_size);
- uint8_t *const outbuf = GPR_SLICE_START_PTR(out_slice); /* just an alias */
+ grpc_slice out_slice = grpc_slice_malloc(input_size);
+ uint8_t *const outbuf = GRPC_SLICE_START_PTR(out_slice); /* just an alias */
while (grpc_byte_buffer_reader_next(reader, &in_slice) != 0) {
- const size_t slice_length = GPR_SLICE_LENGTH(in_slice);
- memcpy(&(outbuf[bytes_read]), GPR_SLICE_START_PTR(in_slice), slice_length);
+ const size_t slice_length = GRPC_SLICE_LENGTH(in_slice);
+ memcpy(&(outbuf[bytes_read]), GRPC_SLICE_START_PTR(in_slice), slice_length);
bytes_read += slice_length;
- gpr_slice_unref(in_slice);
+ grpc_slice_unref(in_slice);
GPR_ASSERT(bytes_read <= input_size);
}
return out_slice;
diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c
index 02a5de0857..8ca3cab9d5 100644
--- a/src/core/lib/surface/call.c
+++ b/src/core/lib/surface/call.c
@@ -39,9 +39,9 @@
#include <grpc/compression.h>
#include <grpc/grpc.h>
+#include <grpc/slice.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/slice.h>
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
@@ -49,6 +49,7 @@
#include "src/core/lib/compression/algorithm_metadata.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/call.h"
@@ -122,6 +123,7 @@ struct grpc_call {
grpc_channel *channel;
grpc_call *parent;
grpc_call *first_child;
+ gpr_timespec start_time;
/* TODO(ctiller): share with cq if possible? */
gpr_mu mu;
@@ -184,7 +186,7 @@ struct grpc_call {
grpc_slice_buffer_stream sending_stream;
grpc_byte_stream *receiving_stream;
grpc_byte_buffer **receiving_buffer;
- gpr_slice receiving_slice;
+ grpc_slice receiving_slice;
grpc_closure receiving_slice_ready;
grpc_closure receiving_stream_ready;
grpc_closure receiving_initial_metadata_ready;
@@ -239,6 +241,7 @@ grpc_error *grpc_call_create(const grpc_call_create_args *args,
call->channel = args->channel;
call->cq = args->cq;
call->parent = args->parent_call;
+ call->start_time = gpr_now(GPR_CLOCK_MONOTONIC);
/* Always support no compression */
GPR_BITSET(&call->encodings_accepted_by_peer, GRPC_COMPRESS_NONE);
call->is_client = args->server_transport_data == NULL;
@@ -311,10 +314,10 @@ grpc_error *grpc_call_create(const grpc_call_create_args *args,
GRPC_CHANNEL_INTERNAL_REF(args->channel, "call");
/* initial refcount dropped by grpc_call_destroy */
- grpc_error *error =
- grpc_call_stack_init(&exec_ctx, channel_stack, 1, destroy_call, call,
- call->context, args->server_transport_data, path,
- send_deadline, CALL_STACK_FROM_CALL(call));
+ grpc_error *error = grpc_call_stack_init(
+ &exec_ctx, channel_stack, 1, destroy_call, call, call->context,
+ args->server_transport_data, path, call->start_time, send_deadline,
+ CALL_STACK_FROM_CALL(call));
if (error != GRPC_ERROR_NONE) {
grpc_status_code status;
const char *error_str;
@@ -427,6 +430,8 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
get_final_status(call, set_status_value_directly,
&c->final_info.final_status);
+ c->final_info.stats.latency =
+ gpr_time_sub(gpr_now(GPR_CLOCK_MONOTONIC), c->start_time);
grpc_call_stack_destroy(exec_ctx, CALL_STACK_FROM_CALL(c), &c->final_info, c);
GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, "call");
@@ -493,8 +498,8 @@ static void destroy_encodings_accepted_by_peer(void *p) { return; }
static void set_encodings_accepted_by_peer(grpc_call *call, grpc_mdelem *mdel) {
size_t i;
grpc_compression_algorithm algorithm;
- gpr_slice_buffer accept_encoding_parts;
- gpr_slice accept_encoding_slice;
+ grpc_slice_buffer accept_encoding_parts;
+ grpc_slice accept_encoding_slice;
void *accepted_user_data;
accepted_user_data =
@@ -506,23 +511,23 @@ static void set_encodings_accepted_by_peer(grpc_call *call, grpc_mdelem *mdel) {
}
accept_encoding_slice = mdel->value->slice;
- gpr_slice_buffer_init(&accept_encoding_parts);
- gpr_slice_split(accept_encoding_slice, ",", &accept_encoding_parts);
+ grpc_slice_buffer_init(&accept_encoding_parts);
+ grpc_slice_split(accept_encoding_slice, ",", &accept_encoding_parts);
/* No need to zero call->encodings_accepted_by_peer: grpc_call_create already
* zeroes the whole grpc_call */
/* Always support no compression */
GPR_BITSET(&call->encodings_accepted_by_peer, GRPC_COMPRESS_NONE);
for (i = 0; i < accept_encoding_parts.count; i++) {
- const gpr_slice *accept_encoding_entry_slice =
+ const grpc_slice *accept_encoding_entry_slice =
&accept_encoding_parts.slices[i];
if (grpc_compression_algorithm_parse(
- (const char *)GPR_SLICE_START_PTR(*accept_encoding_entry_slice),
- GPR_SLICE_LENGTH(*accept_encoding_entry_slice), &algorithm)) {
+ (const char *)GRPC_SLICE_START_PTR(*accept_encoding_entry_slice),
+ GRPC_SLICE_LENGTH(*accept_encoding_entry_slice), &algorithm)) {
GPR_BITSET(&call->encodings_accepted_by_peer, algorithm);
} else {
char *accept_encoding_entry_str =
- gpr_dump_slice(*accept_encoding_entry_slice, GPR_DUMP_ASCII);
+ grpc_dump_slice(*accept_encoding_entry_slice, GPR_DUMP_ASCII);
gpr_log(GPR_ERROR,
"Invalid entry in accept encoding metadata: '%s'. Ignoring.",
accept_encoding_entry_str);
@@ -530,7 +535,7 @@ static void set_encodings_accepted_by_peer(grpc_call *call, grpc_mdelem *mdel) {
}
}
- gpr_slice_buffer_destroy(&accept_encoding_parts);
+ grpc_slice_buffer_destroy(&accept_encoding_parts);
grpc_mdelem_set_user_data(
mdel, destroy_encodings_accepted_by_peer,
@@ -551,14 +556,14 @@ static void get_final_details(grpc_call *call, char **out_details,
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
if (call->status[i].is_set) {
if (call->status[i].details) {
- gpr_slice details = call->status[i].details->slice;
- size_t len = GPR_SLICE_LENGTH(details);
+ grpc_slice details = call->status[i].details->slice;
+ size_t len = GRPC_SLICE_LENGTH(details);
if (len + 1 > *out_details_capacity) {
*out_details_capacity =
GPR_MAX(len + 1, *out_details_capacity * 3 / 2);
*out_details = gpr_realloc(*out_details, *out_details_capacity);
}
- memcpy(*out_details, GPR_SLICE_START_PTR(details), len);
+ memcpy(*out_details, GRPC_SLICE_START_PTR(details), len);
(*out_details)[len] = 0;
} else {
goto no_details;
@@ -632,9 +637,6 @@ static int prepare_application_metadata(grpc_call *call, int count,
if (call->send_extra_metadata_count == 0) {
prepend_extra_metadata = 0;
} else {
- for (i = 0; i < call->send_extra_metadata_count; i++) {
- GRPC_MDELEM_REF(call->send_extra_metadata[i].md);
- }
for (i = 1; i < call->send_extra_metadata_count; i++) {
call->send_extra_metadata[i].prev = &call->send_extra_metadata[i - 1];
}
@@ -680,6 +682,7 @@ static int prepare_application_metadata(grpc_call *call, int count,
&call->send_extra_metadata[call->send_extra_metadata_count - 1];
batch->list.head->prev = NULL;
batch->list.tail->next = NULL;
+ call->send_extra_metadata_count = 0;
break;
case 3: {
/* prepend AND md */
@@ -695,6 +698,7 @@ static int prepare_application_metadata(grpc_call *call, int count,
batch->list.tail = linked_from_md(last_md);
batch->list.head->prev = NULL;
batch->list.tail->next = NULL;
+ call->send_extra_metadata_count = 0;
break;
}
default:
@@ -900,7 +904,7 @@ static uint32_t decode_status(grpc_mdelem *md) {
status = ((uint32_t)(intptr_t)user_data) - STATUS_OFFSET;
} else {
if (!gpr_parse_bytes_to_uint32(grpc_mdstr_as_c_string(md->value),
- GPR_SLICE_LENGTH(md->value->slice),
+ GRPC_SLICE_LENGTH(md->value->slice),
&status)) {
status = GRPC_STATUS_UNKNOWN; /* could not parse status code */
}
@@ -953,7 +957,7 @@ static grpc_mdelem *publish_app_metadata(grpc_call *call, grpc_mdelem *elem,
mdusr = &dest->metadata[dest->count++];
mdusr->key = grpc_mdstr_as_c_string(elem->key);
mdusr->value = grpc_mdstr_as_c_string(elem->value);
- mdusr->value_length = GPR_SLICE_LENGTH(elem->value->slice);
+ mdusr->value_length = GRPC_SLICE_LENGTH(elem->value->slice);
GPR_TIMER_END("publish_app_metadata", 0);
return elem;
}
@@ -1085,8 +1089,8 @@ static void continue_receiving_slices(grpc_exec_ctx *exec_ctx,
if (grpc_byte_stream_next(exec_ctx, call->receiving_stream,
&call->receiving_slice, remaining,
&call->receiving_slice_ready)) {
- gpr_slice_buffer_add(&(*call->receiving_buffer)->data.raw.slice_buffer,
- call->receiving_slice);
+ grpc_slice_buffer_add(&(*call->receiving_buffer)->data.raw.slice_buffer,
+ call->receiving_slice);
} else {
return;
}
@@ -1099,8 +1103,8 @@ static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
grpc_call *call = bctl->call;
if (error == GRPC_ERROR_NONE) {
- gpr_slice_buffer_add(&(*call->receiving_buffer)->data.raw.slice_buffer,
- call->receiving_slice);
+ grpc_slice_buffer_add(&(*call->receiving_buffer)->data.raw.slice_buffer,
+ call->receiving_slice);
continue_receiving_slices(exec_ctx, bctl);
} else {
if (grpc_trace_operation_failures) {
@@ -1461,6 +1465,12 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
grpc_slice_buffer_stream_init(
&call->sending_stream,
&op->data.send_message->data.raw.slice_buffer, op->flags);
+ /* If the outgoing buffer is already compressed, mark it as so in the
+ flags. These will be picked up by the compression filter and further
+ (wasteful) attempts at compression skipped. */
+ if (op->data.send_message->data.raw.compression > GRPC_COMPRESS_NONE) {
+ call->sending_stream.base.flags |= GRPC_WRITE_INTERNAL_COMPRESS;
+ }
stream_op->send_message = &call->sending_stream.base;
break;
case GRPC_OP_SEND_CLOSE_FROM_CLIENT:
@@ -1516,8 +1526,10 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
call, STATUS_FROM_API_OVERRIDE,
GRPC_MDSTR_REF(call->send_extra_metadata[1].md->value));
}
- set_status_code(call, STATUS_FROM_API_OVERRIDE,
- (uint32_t)op->data.send_status_from_server.status);
+ if (op->data.send_status_from_server.status != GRPC_STATUS_OK) {
+ set_status_code(call, STATUS_FROM_API_OVERRIDE,
+ (uint32_t)op->data.send_status_from_server.status);
+ }
if (!prepare_application_metadata(
call,
(int)op->data.send_status_from_server.trailing_metadata_count,
@@ -1539,6 +1551,10 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
goto done_with_error;
}
+ /* IF this is a server, then GRPC_OP_RECV_INITIAL_METADATA *must* come
+ from server.c. In that case, it's coming from accept_stream, and in
+ that case we're not necessarily covered by a poller. */
+ stream_op->covered_by_poller = call->is_client;
call->received_initial_metadata = 1;
call->buffered_metadata[0] = op->data.recv_initial_metadata;
grpc_closure_init(&call->receiving_initial_metadata_ready,
diff --git a/src/core/lib/surface/channel.c b/src/core/lib/surface/channel.c
index 92d783b78d..9405015c50 100644
--- a/src/core/lib/surface/channel.c
+++ b/src/core/lib/surface/channel.c
@@ -86,87 +86,90 @@ grpc_channel *grpc_channel_create(grpc_exec_ctx *exec_ctx, const char *target,
const grpc_channel_args *input_args,
grpc_channel_stack_type channel_stack_type,
grpc_transport *optional_transport) {
- bool is_client = grpc_channel_stack_type_is_client(channel_stack_type);
-
grpc_channel_stack_builder *builder = grpc_channel_stack_builder_create();
grpc_channel_stack_builder_set_channel_arguments(builder, input_args);
grpc_channel_stack_builder_set_target(builder, target);
grpc_channel_stack_builder_set_transport(builder, optional_transport);
- grpc_channel *channel;
- grpc_channel_args *args;
if (!grpc_channel_init_create_stack(exec_ctx, builder, channel_stack_type)) {
grpc_channel_stack_builder_destroy(builder);
return NULL;
- } else {
- args = grpc_channel_args_copy(
- grpc_channel_stack_builder_get_channel_arguments(builder));
- channel = grpc_channel_stack_builder_finish(
- exec_ctx, builder, sizeof(grpc_channel), 1, destroy_channel, NULL);
+ }
+ grpc_channel_args *args = grpc_channel_args_copy(
+ grpc_channel_stack_builder_get_channel_arguments(builder));
+ grpc_channel *channel;
+ grpc_error *error = grpc_channel_stack_builder_finish(
+ exec_ctx, builder, sizeof(grpc_channel), 1, destroy_channel, NULL,
+ (void **)&channel);
+ if (error != GRPC_ERROR_NONE) {
+ const char *msg = grpc_error_string(error);
+ gpr_log(GPR_ERROR, "channel stack builder failed: %s", msg);
+ grpc_error_free_string(msg);
+ GRPC_ERROR_UNREF(error);
+ goto done;
}
memset(channel, 0, sizeof(*channel));
channel->target = gpr_strdup(target);
- channel->is_client = is_client;
+ channel->is_client = grpc_channel_stack_type_is_client(channel_stack_type);
gpr_mu_init(&channel->registered_call_mu);
channel->registered_calls = NULL;
grpc_compression_options_init(&channel->compression_options);
- if (args) {
- for (size_t i = 0; i < args->num_args; i++) {
- if (0 == strcmp(args->args[i].key, GRPC_ARG_DEFAULT_AUTHORITY)) {
- if (args->args[i].type != GRPC_ARG_STRING) {
- gpr_log(GPR_ERROR, "%s ignored: it must be a string",
- GRPC_ARG_DEFAULT_AUTHORITY);
- } else {
- if (channel->default_authority) {
- /* setting this takes precedence over anything else */
- GRPC_MDELEM_UNREF(channel->default_authority);
- }
- channel->default_authority = grpc_mdelem_from_strings(
- ":authority", args->args[i].value.string);
+
+ for (size_t i = 0; i < args->num_args; i++) {
+ if (0 == strcmp(args->args[i].key, GRPC_ARG_DEFAULT_AUTHORITY)) {
+ if (args->args[i].type != GRPC_ARG_STRING) {
+ gpr_log(GPR_ERROR, "%s ignored: it must be a string",
+ GRPC_ARG_DEFAULT_AUTHORITY);
+ } else {
+ if (channel->default_authority) {
+ /* setting this takes precedence over anything else */
+ GRPC_MDELEM_UNREF(channel->default_authority);
}
- } else if (0 ==
- strcmp(args->args[i].key, GRPC_SSL_TARGET_NAME_OVERRIDE_ARG)) {
- if (args->args[i].type != GRPC_ARG_STRING) {
- gpr_log(GPR_ERROR, "%s ignored: it must be a string",
+ channel->default_authority =
+ grpc_mdelem_from_strings(":authority", args->args[i].value.string);
+ }
+ } else if (0 ==
+ strcmp(args->args[i].key, GRPC_SSL_TARGET_NAME_OVERRIDE_ARG)) {
+ if (args->args[i].type != GRPC_ARG_STRING) {
+ gpr_log(GPR_ERROR, "%s ignored: it must be a string",
+ GRPC_SSL_TARGET_NAME_OVERRIDE_ARG);
+ } else {
+ if (channel->default_authority) {
+ /* other ways of setting this (notably ssl) take precedence */
+ gpr_log(GPR_ERROR,
+ "%s ignored: default host already set some other way",
GRPC_SSL_TARGET_NAME_OVERRIDE_ARG);
} else {
- if (channel->default_authority) {
- /* other ways of setting this (notably ssl) take precedence */
- gpr_log(GPR_ERROR,
- "%s ignored: default host already set some other way",
- GRPC_SSL_TARGET_NAME_OVERRIDE_ARG);
- } else {
- channel->default_authority = grpc_mdelem_from_strings(
- ":authority", args->args[i].value.string);
- }
+ channel->default_authority = grpc_mdelem_from_strings(
+ ":authority", args->args[i].value.string);
}
- } else if (0 == strcmp(args->args[i].key,
- GRPC_COMPRESSION_CHANNEL_DEFAULT_LEVEL)) {
- channel->compression_options.default_level.is_set = true;
- GPR_ASSERT(args->args[i].value.integer >= 0 &&
- args->args[i].value.integer < GRPC_COMPRESS_LEVEL_COUNT);
- channel->compression_options.default_level.level =
- (grpc_compression_level)args->args[i].value.integer;
- } else if (0 == strcmp(args->args[i].key,
- GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM)) {
- channel->compression_options.default_algorithm.is_set = true;
- GPR_ASSERT(args->args[i].value.integer >= 0 &&
- args->args[i].value.integer <
- GRPC_COMPRESS_ALGORITHMS_COUNT);
- channel->compression_options.default_algorithm.algorithm =
- (grpc_compression_algorithm)args->args[i].value.integer;
- } else if (0 ==
- strcmp(args->args[i].key,
- GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET)) {
- channel->compression_options.enabled_algorithms_bitset =
- (uint32_t)args->args[i].value.integer |
- 0x1; /* always support no compression */
}
+ } else if (0 == strcmp(args->args[i].key,
+ GRPC_COMPRESSION_CHANNEL_DEFAULT_LEVEL)) {
+ channel->compression_options.default_level.is_set = true;
+ GPR_ASSERT(args->args[i].value.integer >= 0 &&
+ args->args[i].value.integer < GRPC_COMPRESS_LEVEL_COUNT);
+ channel->compression_options.default_level.level =
+ (grpc_compression_level)args->args[i].value.integer;
+ } else if (0 == strcmp(args->args[i].key,
+ GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM)) {
+ channel->compression_options.default_algorithm.is_set = true;
+ GPR_ASSERT(args->args[i].value.integer >= 0 &&
+ args->args[i].value.integer < GRPC_COMPRESS_ALGORITHMS_COUNT);
+ channel->compression_options.default_algorithm.algorithm =
+ (grpc_compression_algorithm)args->args[i].value.integer;
+ } else if (0 ==
+ strcmp(args->args[i].key,
+ GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET)) {
+ channel->compression_options.enabled_algorithms_bitset =
+ (uint32_t)args->args[i].value.integer |
+ 0x1; /* always support no compression */
}
- grpc_channel_args_destroy(args);
}
+done:
+ grpc_channel_args_destroy(args);
return channel;
}
@@ -175,6 +178,15 @@ char *grpc_channel_get_target(grpc_channel *channel) {
return gpr_strdup(channel->target);
}
+void grpc_channel_get_info(grpc_channel *channel,
+ const grpc_channel_info *channel_info) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_channel_element *elem =
+ grpc_channel_stack_element(CHANNEL_STACK_FROM_CHANNEL(channel), 0);
+ elem->filter->get_channel_info(&exec_ctx, elem, channel_info);
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
static grpc_call *grpc_channel_create_call_internal(
grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask,
grpc_completion_queue *cq, grpc_pollset_set *pollset_set_alternative,
diff --git a/src/core/lib/surface/completion_queue.c b/src/core/lib/surface/completion_queue.c
index 4e0feb56ac..184c1a1a16 100644
--- a/src/core/lib/surface/completion_queue.c
+++ b/src/core/lib/surface/completion_queue.c
@@ -354,11 +354,13 @@ static void dump_pending_tags(grpc_completion_queue *cc) {
gpr_strvec v;
gpr_strvec_init(&v);
gpr_strvec_add(&v, gpr_strdup("PENDING TAGS:"));
+ gpr_mu_lock(cc->mu);
for (size_t i = 0; i < cc->outstanding_tag_count; i++) {
char *s;
gpr_asprintf(&s, " %p", cc->outstanding_tags[i]);
gpr_strvec_add(&v, s);
}
+ gpr_mu_unlock(cc->mu);
char *out = gpr_strvec_flatten(&v, NULL);
gpr_strvec_destroy(&v);
gpr_log(GPR_DEBUG, "%s", out);
diff --git a/src/core/lib/surface/init.c b/src/core/lib/surface/init.c
index 2c8f28ee45..64aa0e3361 100644
--- a/src/core/lib/surface/init.c
+++ b/src/core/lib/surface/init.c
@@ -51,6 +51,7 @@
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr.h"
+#include "src/core/lib/iomgr/resource_quota.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/call.h"
@@ -181,6 +182,7 @@ void grpc_init(void) {
// Default timeout trace to 1
grpc_cq_event_timeout_trace = 1;
grpc_register_tracer("op_failure", &grpc_trace_operation_failures);
+ grpc_register_tracer("resource_quota", &grpc_resource_quota_trace);
#ifndef NDEBUG
grpc_register_tracer("pending_tags", &grpc_trace_pending_tags);
#endif
diff --git a/src/core/lib/surface/lame_client.c b/src/core/lib/surface/lame_client.c
index d32c884e8e..57da94ac1e 100644
--- a/src/core/lib/surface/lame_client.c
+++ b/src/core/lib/surface/lame_client.c
@@ -88,6 +88,10 @@ static char *lame_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
return NULL;
}
+static void lame_get_channel_info(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ const grpc_channel_info *channel_info) {}
+
static void lame_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_transport_op *op) {
@@ -119,11 +123,12 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
gpr_free(and_free_memory);
}
-static void init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_channel_element_args *args) {
+static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
GPR_ASSERT(args->is_first);
GPR_ASSERT(args->is_last);
+ return GRPC_ERROR_NONE;
}
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
@@ -140,6 +145,7 @@ const grpc_channel_filter grpc_lame_filter = {
init_channel_elem,
destroy_channel_elem,
lame_get_peer,
+ lame_get_channel_info,
"lame-client",
};
diff --git a/src/core/lib/surface/server.c b/src/core/lib/surface/server.c
index 3a90308058..62d7afc8da 100644
--- a/src/core/lib/surface/server.c
+++ b/src/core/lib/surface/server.c
@@ -195,6 +195,7 @@ struct grpc_server {
grpc_completion_queue **cqs;
grpc_pollset **pollsets;
size_t cq_count;
+ size_t pollset_count;
bool started;
/* The two following mutexes control access to server-state
@@ -264,13 +265,13 @@ static void channel_broadcaster_init(grpc_server *s, channel_broadcaster *cb) {
struct shutdown_cleanup_args {
grpc_closure closure;
- gpr_slice slice;
+ grpc_slice slice;
};
static void shutdown_cleanup(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
struct shutdown_cleanup_args *a = arg;
- gpr_slice_unref(a->slice);
+ grpc_slice_unref(a->slice);
gpr_free(a);
}
@@ -283,7 +284,7 @@ static void send_shutdown(grpc_exec_ctx *exec_ctx, grpc_channel *channel,
op->send_goaway = send_goaway;
op->set_accept_stream = true;
- sc->slice = gpr_slice_from_copied_string("Server shutdown");
+ sc->slice = grpc_slice_from_copied_string("Server shutdown");
op->goaway_message = &sc->slice;
op->goaway_status = GRPC_STATUS_OK;
op->disconnect_with_error = send_disconnect;
@@ -459,8 +460,8 @@ static void destroy_channel(grpc_exec_ctx *exec_ctx, channel_data *chand,
}
static void cpstr(char **dest, size_t *capacity, grpc_mdstr *value) {
- gpr_slice slice = value->slice;
- size_t len = GPR_SLICE_LENGTH(slice);
+ grpc_slice slice = value->slice;
+ size_t len = GRPC_SLICE_LENGTH(slice);
if (len + 1 > *capacity) {
*capacity = GPR_MAX(len + 1, *capacity * 2);
@@ -913,9 +914,9 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
server_unref(exec_ctx, chand->server);
}
-static void init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_channel_element_args *args) {
+static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
channel_data *chand = elem->channel_data;
GPR_ASSERT(args->is_first);
GPR_ASSERT(!args->is_last);
@@ -926,6 +927,7 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
chand->connectivity_state = GRPC_CHANNEL_IDLE;
grpc_closure_init(&chand->channel_connectivity_changed,
channel_connectivity_changed, chand);
+ return GRPC_ERROR_NONE;
}
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
@@ -965,6 +967,7 @@ const grpc_channel_filter grpc_server_top_filter = {
init_channel_elem,
destroy_channel_elem,
grpc_call_next_get_peer,
+ grpc_channel_next_get_info,
"server",
};
@@ -1084,7 +1087,7 @@ void grpc_server_start(grpc_server *server) {
GRPC_API_TRACE("grpc_server_start(server=%p)", 1, (server));
server->started = true;
- size_t pollset_count = 0;
+ server->pollset_count = 0;
server->pollsets = gpr_malloc(sizeof(grpc_pollset *) * server->cq_count);
server->request_freelist_per_cq =
gpr_malloc(sizeof(*server->request_freelist_per_cq) * server->cq_count);
@@ -1092,7 +1095,8 @@ void grpc_server_start(grpc_server *server) {
gpr_malloc(sizeof(*server->requested_calls_per_cq) * server->cq_count);
for (i = 0; i < server->cq_count; i++) {
if (!grpc_cq_is_non_listening_server_cq(server->cqs[i])) {
- server->pollsets[pollset_count++] = grpc_cq_pollset(server->cqs[i]);
+ server->pollsets[server->pollset_count++] =
+ grpc_cq_pollset(server->cqs[i]);
}
server->request_freelist_per_cq[i] =
gpr_stack_lockfree_create((size_t)server->max_requested_calls_per_cq);
@@ -1111,7 +1115,8 @@ void grpc_server_start(grpc_server *server) {
}
for (l = server->listeners; l; l = l->next) {
- l->start(&exec_ctx, server, l->arg, server->pollsets, pollset_count);
+ l->start(&exec_ctx, server, l->arg, server->pollsets,
+ server->pollset_count);
}
grpc_exec_ctx_finish(&exec_ctx);
@@ -1119,7 +1124,7 @@ void grpc_server_start(grpc_server *server) {
void grpc_server_get_pollsets(grpc_server *server, grpc_pollset ***pollsets,
size_t *pollset_count) {
- *pollset_count = server->cq_count;
+ *pollset_count = server->pollset_count;
*pollsets = server->pollsets;
}
diff --git a/src/core/lib/surface/validate_metadata.c b/src/core/lib/surface/validate_metadata.c
index 84f0a083bc..f49dd8584b 100644
--- a/src/core/lib/surface/validate_metadata.c
+++ b/src/core/lib/surface/validate_metadata.c
@@ -53,7 +53,7 @@ int grpc_header_key_is_legal(const char *key, size_t length) {
0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0xff, 0x03, 0x00, 0x00, 0x00,
0x80, 0xfe, 0xff, 0xff, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
- if (length == 0) {
+ if (length == 0 || key[0] == ':') {
return 0;
}
return conforms_to(key, length, legal_header_bits);
diff --git a/src/core/lib/surface/version.c b/src/core/lib/surface/version.c
index 41242684da..0db8b41aa9 100644
--- a/src/core/lib/surface/version.c
+++ b/src/core/lib/surface/version.c
@@ -36,6 +36,6 @@
#include <grpc/grpc.h>
-const char *grpc_version_string(void) { return "1.1.0-dev"; }
+const char *grpc_version_string(void) { return "2.0.0-dev"; }
const char *grpc_g_stands_for(void) { return "good"; }
diff --git a/src/core/lib/transport/byte_stream.c b/src/core/lib/transport/byte_stream.c
index 2f6c75cb6a..2f1d7b7c60 100644
--- a/src/core/lib/transport/byte_stream.c
+++ b/src/core/lib/transport/byte_stream.c
@@ -38,7 +38,7 @@
#include <grpc/support/log.h>
int grpc_byte_stream_next(grpc_exec_ctx *exec_ctx,
- grpc_byte_stream *byte_stream, gpr_slice *slice,
+ grpc_byte_stream *byte_stream, grpc_slice *slice,
size_t max_size_hint, grpc_closure *on_complete) {
return byte_stream->next(exec_ctx, byte_stream, slice, max_size_hint,
on_complete);
@@ -53,11 +53,11 @@ void grpc_byte_stream_destroy(grpc_exec_ctx *exec_ctx,
static int slice_buffer_stream_next(grpc_exec_ctx *exec_ctx,
grpc_byte_stream *byte_stream,
- gpr_slice *slice, size_t max_size_hint,
+ grpc_slice *slice, size_t max_size_hint,
grpc_closure *on_complete) {
grpc_slice_buffer_stream *stream = (grpc_slice_buffer_stream *)byte_stream;
GPR_ASSERT(stream->cursor < stream->backing_buffer->count);
- *slice = gpr_slice_ref(stream->backing_buffer->slices[stream->cursor]);
+ *slice = grpc_slice_ref(stream->backing_buffer->slices[stream->cursor]);
stream->cursor++;
return 1;
}
@@ -66,7 +66,7 @@ static void slice_buffer_stream_destroy(grpc_exec_ctx *exec_ctx,
grpc_byte_stream *byte_stream) {}
void grpc_slice_buffer_stream_init(grpc_slice_buffer_stream *stream,
- gpr_slice_buffer *slice_buffer,
+ grpc_slice_buffer *slice_buffer,
uint32_t flags) {
GPR_ASSERT(slice_buffer->length <= UINT32_MAX);
stream->base.length = (uint32_t)slice_buffer->length;
diff --git a/src/core/lib/transport/byte_stream.h b/src/core/lib/transport/byte_stream.h
index e64dce6283..1fdd5b4d77 100644
--- a/src/core/lib/transport/byte_stream.h
+++ b/src/core/lib/transport/byte_stream.h
@@ -34,7 +34,7 @@
#ifndef GRPC_CORE_LIB_TRANSPORT_BYTE_STREAM_H
#define GRPC_CORE_LIB_TRANSPORT_BYTE_STREAM_H
-#include <grpc/support/slice_buffer.h>
+#include <grpc/slice_buffer.h>
#include "src/core/lib/iomgr/exec_ctx.h"
/** Internal bit flag for grpc_begin_message's \a flags signaling the use of
@@ -50,7 +50,7 @@ struct grpc_byte_stream {
uint32_t length;
uint32_t flags;
int (*next)(grpc_exec_ctx *exec_ctx, grpc_byte_stream *byte_stream,
- gpr_slice *slice, size_t max_size_hint,
+ grpc_slice *slice, size_t max_size_hint,
grpc_closure *on_complete);
void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_byte_stream *byte_stream);
};
@@ -65,7 +65,7 @@ struct grpc_byte_stream {
* once a slice is returned into *slice, it is owned by the caller.
*/
int grpc_byte_stream_next(grpc_exec_ctx *exec_ctx,
- grpc_byte_stream *byte_stream, gpr_slice *slice,
+ grpc_byte_stream *byte_stream, grpc_slice *slice,
size_t max_size_hint, grpc_closure *on_complete);
void grpc_byte_stream_destroy(grpc_exec_ctx *exec_ctx,
@@ -74,12 +74,12 @@ void grpc_byte_stream_destroy(grpc_exec_ctx *exec_ctx,
/* grpc_byte_stream that wraps a slice buffer */
typedef struct grpc_slice_buffer_stream {
grpc_byte_stream base;
- gpr_slice_buffer *backing_buffer;
+ grpc_slice_buffer *backing_buffer;
size_t cursor;
} grpc_slice_buffer_stream;
void grpc_slice_buffer_stream_init(grpc_slice_buffer_stream *stream,
- gpr_slice_buffer *slice_buffer,
+ grpc_slice_buffer *slice_buffer,
uint32_t flags);
#endif /* GRPC_CORE_LIB_TRANSPORT_BYTE_STREAM_H */
diff --git a/src/core/lib/transport/connectivity_state.c b/src/core/lib/transport/connectivity_state.c
index fdb5307814..4f49d7cf7d 100644
--- a/src/core/lib/transport/connectivity_state.c
+++ b/src/core/lib/transport/connectivity_state.c
@@ -43,6 +43,8 @@ int grpc_connectivity_state_trace = 0;
const char *grpc_connectivity_state_name(grpc_connectivity_state state) {
switch (state) {
+ case GRPC_CHANNEL_INIT:
+ return "INIT";
case GRPC_CHANNEL_IDLE:
return "IDLE";
case GRPC_CHANNEL_CONNECTING:
@@ -98,7 +100,12 @@ grpc_connectivity_state grpc_connectivity_state_check(
return tracker->current_state;
}
-int grpc_connectivity_state_notify_on_state_change(
+bool grpc_connectivity_state_has_watchers(
+ grpc_connectivity_state_tracker *connectivity_state) {
+ return connectivity_state->watchers != NULL;
+}
+
+bool grpc_connectivity_state_notify_on_state_change(
grpc_exec_ctx *exec_ctx, grpc_connectivity_state_tracker *tracker,
grpc_connectivity_state *current, grpc_closure *notify) {
if (grpc_connectivity_state_trace) {
@@ -117,7 +124,7 @@ int grpc_connectivity_state_notify_on_state_change(
grpc_exec_ctx_sched(exec_ctx, notify, GRPC_ERROR_CANCELLED, NULL);
tracker->watchers = w->next;
gpr_free(w);
- return 0;
+ return false;
}
while (w != NULL) {
grpc_connectivity_state_watcher *rm_candidate = w->next;
@@ -125,11 +132,11 @@ int grpc_connectivity_state_notify_on_state_change(
grpc_exec_ctx_sched(exec_ctx, notify, GRPC_ERROR_CANCELLED, NULL);
w->next = w->next->next;
gpr_free(rm_candidate);
- return 0;
+ return false;
}
w = w->next;
}
- return 0;
+ return false;
} else {
if (tracker->current_state != *current) {
*current = tracker->current_state;
@@ -159,6 +166,7 @@ void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx,
grpc_error_free_string(error_string);
}
switch (state) {
+ case GRPC_CHANNEL_INIT:
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_IDLE:
case GRPC_CHANNEL_READY:
diff --git a/src/core/lib/transport/connectivity_state.h b/src/core/lib/transport/connectivity_state.h
index 7a2fa52c10..769c675b79 100644
--- a/src/core/lib/transport/connectivity_state.h
+++ b/src/core/lib/transport/connectivity_state.h
@@ -75,13 +75,16 @@ void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx,
grpc_error *associated_error,
const char *reason);
+bool grpc_connectivity_state_has_watchers(
+ grpc_connectivity_state_tracker *tracker);
+
grpc_connectivity_state grpc_connectivity_state_check(
grpc_connectivity_state_tracker *tracker, grpc_error **current_error);
/** Return 1 if the channel should start connecting, 0 otherwise.
If current==NULL cancel notify if it is already queued (success==0 in that
case) */
-int grpc_connectivity_state_notify_on_state_change(
+bool grpc_connectivity_state_notify_on_state_change(
grpc_exec_ctx *exec_ctx, grpc_connectivity_state_tracker *tracker,
grpc_connectivity_state *current, grpc_closure *notify);
diff --git a/src/core/lib/transport/mdstr_hash_table.c b/src/core/lib/transport/mdstr_hash_table.c
index 8e914c420b..3d01e56df7 100644
--- a/src/core/lib/transport/mdstr_hash_table.c
+++ b/src/core/lib/transport/mdstr_hash_table.c
@@ -41,7 +41,6 @@
struct grpc_mdstr_hash_table {
gpr_refcount refs;
- size_t num_entries;
size_t size;
grpc_mdstr_hash_table_entry* entries;
};
@@ -77,7 +76,6 @@ grpc_mdstr_hash_table* grpc_mdstr_hash_table_create(
grpc_mdstr_hash_table* table = gpr_malloc(sizeof(*table));
memset(table, 0, sizeof(*table));
gpr_ref_init(&table->refs, 1);
- table->num_entries = num_entries;
// Quadratic probing gets best performance when the table is no more
// than half full.
table->size = num_entries * 2;
@@ -96,7 +94,7 @@ grpc_mdstr_hash_table* grpc_mdstr_hash_table_ref(grpc_mdstr_hash_table* table) {
return table;
}
-int grpc_mdstr_hash_table_unref(grpc_mdstr_hash_table* table) {
+void grpc_mdstr_hash_table_unref(grpc_mdstr_hash_table* table) {
if (table != NULL && gpr_unref(&table->refs)) {
for (size_t i = 0; i < table->size; ++i) {
grpc_mdstr_hash_table_entry* entry = &table->entries[i];
@@ -107,13 +105,7 @@ int grpc_mdstr_hash_table_unref(grpc_mdstr_hash_table* table) {
}
gpr_free(table->entries);
gpr_free(table);
- return 1;
}
- return 0;
-}
-
-size_t grpc_mdstr_hash_table_num_entries(const grpc_mdstr_hash_table* table) {
- return table->num_entries;
}
void* grpc_mdstr_hash_table_get(const grpc_mdstr_hash_table* table,
@@ -123,35 +115,3 @@ void* grpc_mdstr_hash_table_get(const grpc_mdstr_hash_table* table,
if (idx == table->size) return NULL; // Not found.
return table->entries[idx].value;
}
-
-int grpc_mdstr_hash_table_cmp(const grpc_mdstr_hash_table* table1,
- const grpc_mdstr_hash_table* table2) {
- // Compare by num_entries.
- if (table1->num_entries < table2->num_entries) return -1;
- if (table1->num_entries > table2->num_entries) return 1;
- for (size_t i = 0; i < table1->num_entries; ++i) {
- grpc_mdstr_hash_table_entry* e1 = &table1->entries[i];
- grpc_mdstr_hash_table_entry* e2 = &table2->entries[i];
- // Compare keys by hash value.
- if (e1->key->hash < e2->key->hash) return -1;
- if (e1->key->hash > e2->key->hash) return 1;
- // Compare by vtable (pointer equality).
- if (e1->vtable < e2->vtable) return -1;
- if (e1->vtable > e2->vtable) return 1;
- // Compare values via vtable.
- const int value_result = e1->vtable->compare_value(e1->value, e2->value);
- if (value_result != 0) return value_result;
- }
- return 0;
-}
-
-void grpc_mdstr_hash_table_iterate(
- const grpc_mdstr_hash_table* table,
- void (*func)(const grpc_mdstr_hash_table_entry* entry, void* user_data),
- void* user_data) {
- for (size_t i = 0; i < table->size; ++i) {
- if (table->entries[i].key != NULL) {
- func(&table->entries[i], user_data);
- }
- }
-}
diff --git a/src/core/lib/transport/mdstr_hash_table.h b/src/core/lib/transport/mdstr_hash_table.h
index bceb4df93d..8982ec3a8d 100644
--- a/src/core/lib/transport/mdstr_hash_table.h
+++ b/src/core/lib/transport/mdstr_hash_table.h
@@ -51,7 +51,6 @@ typedef struct grpc_mdstr_hash_table grpc_mdstr_hash_table;
typedef struct grpc_mdstr_hash_table_vtable {
void (*destroy_value)(void* value);
void* (*copy_value)(void* value);
- int (*compare_value)(void* value1, void* value2);
} grpc_mdstr_hash_table_vtable;
typedef struct grpc_mdstr_hash_table_entry {
@@ -67,26 +66,11 @@ grpc_mdstr_hash_table* grpc_mdstr_hash_table_create(
size_t num_entries, grpc_mdstr_hash_table_entry* entries);
grpc_mdstr_hash_table* grpc_mdstr_hash_table_ref(grpc_mdstr_hash_table* table);
-/** Returns 1 when \a table is destroyed. */
-int grpc_mdstr_hash_table_unref(grpc_mdstr_hash_table* table);
-
-/** Returns the number of entries in \a table. */
-size_t grpc_mdstr_hash_table_num_entries(const grpc_mdstr_hash_table* table);
+void grpc_mdstr_hash_table_unref(grpc_mdstr_hash_table* table);
/** Returns the value from \a table associated with \a key.
Returns NULL if \a key is not found. */
void* grpc_mdstr_hash_table_get(const grpc_mdstr_hash_table* table,
const grpc_mdstr* key);
-/** Compares two hash tables.
- The sort order is stable but undefined. */
-int grpc_mdstr_hash_table_cmp(const grpc_mdstr_hash_table* table1,
- const grpc_mdstr_hash_table* table2);
-
-/** Iterates over the entries in \a table, calling \a func for each entry. */
-void grpc_mdstr_hash_table_iterate(
- const grpc_mdstr_hash_table* table,
- void (*func)(const grpc_mdstr_hash_table_entry* entry, void* user_data),
- void* user_data);
-
#endif /* GRPC_CORE_LIB_TRANSPORT_MDSTR_HASH_TABLE_H */
diff --git a/src/core/lib/transport/metadata.c b/src/core/lib/transport/metadata.c
index 4b40c275ad..fac19b91d9 100644
--- a/src/core/lib/transport/metadata.c
+++ b/src/core/lib/transport/metadata.c
@@ -51,7 +51,7 @@
#include "src/core/lib/support/string.h"
#include "src/core/lib/transport/static_metadata.h"
-gpr_slice (*grpc_chttp2_base64_encode_and_huffman_compress)(gpr_slice input);
+grpc_slice (*grpc_chttp2_base64_encode_and_huffman_compress)(grpc_slice input);
/* There are two kinds of mdelem and mdstr instances.
* Static instances are declared in static_metadata.{h,c} and
@@ -85,16 +85,16 @@ typedef void (*destroy_user_data_func)(void *user_data);
/* Shadow structure for grpc_mdstr for non-static values */
typedef struct internal_string {
/* must be byte compatible with grpc_mdstr */
- gpr_slice slice;
+ grpc_slice slice;
uint32_t hash;
/* private only data */
gpr_atm refcnt;
uint8_t has_base64_and_huffman_encoded;
- gpr_slice_refcount refcount;
+ grpc_slice_refcount refcount;
- gpr_slice base64_and_huffman;
+ grpc_slice base64_and_huffman;
gpr_atm size_in_decoder_table;
@@ -174,7 +174,7 @@ void grpc_mdctx_global_init(void) {
grpc_mdstr *elem = &grpc_static_mdstr_table[i];
const char *str = grpc_static_metadata_strings[i];
uint32_t hash = gpr_murmur_hash3(str, strlen(str), g_hash_seed);
- *(gpr_slice *)&elem->slice = gpr_slice_from_static_string(str);
+ *(grpc_slice *)&elem->slice = grpc_slice_from_static_string(str);
*(uint32_t *)&elem->hash = hash;
for (j = 0;; j++) {
size_t idx = (hash + j) % GPR_ARRAY_SIZE(g_static_strtab);
@@ -321,7 +321,7 @@ static void internal_destroy_string(strtab_shard *shard, internal_string *is) {
internal_string *cur;
GPR_TIMER_BEGIN("internal_destroy_string", 0);
if (is->has_base64_and_huffman_encoded) {
- gpr_slice_unref(is->base64_and_huffman);
+ grpc_slice_unref(is->base64_and_huffman);
}
for (prev_next = &shard->strs[TABLE_IDX(is->hash, LOG2_STRTAB_SHARD_COUNT,
shard->capacity)],
@@ -350,10 +350,10 @@ grpc_mdstr *grpc_mdstr_from_string(const char *str) {
return grpc_mdstr_from_buffer((const uint8_t *)str, strlen(str));
}
-grpc_mdstr *grpc_mdstr_from_slice(gpr_slice slice) {
- grpc_mdstr *result = grpc_mdstr_from_buffer(GPR_SLICE_START_PTR(slice),
- GPR_SLICE_LENGTH(slice));
- gpr_slice_unref(slice);
+grpc_mdstr *grpc_mdstr_from_slice(grpc_slice slice) {
+ grpc_mdstr *result = grpc_mdstr_from_buffer(GRPC_SLICE_START_PTR(slice),
+ GRPC_SLICE_LENGTH(slice));
+ grpc_slice_unref(slice);
return result;
}
@@ -373,9 +373,9 @@ grpc_mdstr *grpc_mdstr_from_buffer(const uint8_t *buf, size_t length) {
idx = (hash + i) % GPR_ARRAY_SIZE(g_static_strtab);
ss = g_static_strtab[idx];
if (ss == NULL) break;
- if (ss->hash == hash && GPR_SLICE_LENGTH(ss->slice) == length &&
+ if (ss->hash == hash && GRPC_SLICE_LENGTH(ss->slice) == length &&
(length == 0 ||
- 0 == memcmp(buf, GPR_SLICE_START_PTR(ss->slice), length))) {
+ 0 == memcmp(buf, GRPC_SLICE_START_PTR(ss->slice), length))) {
GPR_TIMER_END("grpc_mdstr_from_buffer", 0);
return ss;
}
@@ -386,8 +386,8 @@ grpc_mdstr *grpc_mdstr_from_buffer(const uint8_t *buf, size_t length) {
/* search for an existing string */
idx = TABLE_IDX(hash, LOG2_STRTAB_SHARD_COUNT, shard->capacity);
for (s = shard->strs[idx]; s; s = s->bucket_next) {
- if (s->hash == hash && GPR_SLICE_LENGTH(s->slice) == length &&
- 0 == memcmp(buf, GPR_SLICE_START_PTR(s->slice), length)) {
+ if (s->hash == hash && GRPC_SLICE_LENGTH(s->slice) == length &&
+ 0 == memcmp(buf, GRPC_SLICE_START_PTR(s->slice), length)) {
if (gpr_atm_full_fetch_add(&s->refcnt, 1) == 0) {
/* If we get here, we've added a ref to something that was about to
* die - drop it immediately.
@@ -404,7 +404,7 @@ grpc_mdstr *grpc_mdstr_from_buffer(const uint8_t *buf, size_t length) {
}
/* not found: create a new string */
- if (length + 1 < GPR_SLICE_INLINED_SIZE) {
+ if (length + 1 < GRPC_SLICE_INLINED_SIZE) {
/* string data goes directly into the slice */
s = gpr_malloc(sizeof(internal_string));
gpr_atm_rel_store(&s->refcnt, 1);
@@ -589,7 +589,7 @@ grpc_mdelem *grpc_mdelem_from_strings(const char *key, const char *value) {
grpc_mdstr_from_string(value));
}
-grpc_mdelem *grpc_mdelem_from_slices(gpr_slice key, gpr_slice value) {
+grpc_mdelem *grpc_mdelem_from_slices(grpc_slice key, grpc_slice value) {
return grpc_mdelem_from_metadata_strings(grpc_mdstr_from_slice(key),
grpc_mdstr_from_slice(value));
}
@@ -607,12 +607,12 @@ static size_t get_base64_encoded_size(size_t raw_length) {
}
size_t grpc_mdelem_get_size_in_hpack_table(grpc_mdelem *elem) {
- size_t overhead_and_key = 32 + GPR_SLICE_LENGTH(elem->key->slice);
- size_t value_len = GPR_SLICE_LENGTH(elem->value->slice);
+ size_t overhead_and_key = 32 + GRPC_SLICE_LENGTH(elem->key->slice);
+ size_t value_len = GRPC_SLICE_LENGTH(elem->value->slice);
if (is_mdstr_static(elem->value)) {
if (grpc_is_binary_header(
- (const char *)GPR_SLICE_START_PTR(elem->key->slice),
- GPR_SLICE_LENGTH(elem->key->slice))) {
+ (const char *)GRPC_SLICE_START_PTR(elem->key->slice),
+ GRPC_SLICE_LENGTH(elem->key->slice))) {
return overhead_and_key + get_base64_encoded_size(value_len);
} else {
return overhead_and_key + value_len;
@@ -622,8 +622,8 @@ size_t grpc_mdelem_get_size_in_hpack_table(grpc_mdelem *elem) {
gpr_atm current_size = gpr_atm_acq_load(&is->size_in_decoder_table);
if (current_size == SIZE_IN_DECODER_TABLE_NOT_SET) {
if (grpc_is_binary_header(
- (const char *)GPR_SLICE_START_PTR(elem->key->slice),
- GPR_SLICE_LENGTH(elem->key->slice))) {
+ (const char *)GRPC_SLICE_START_PTR(elem->key->slice),
+ GRPC_SLICE_LENGTH(elem->key->slice))) {
current_size = (gpr_atm)get_base64_encoded_size(value_len);
} else {
current_size = (gpr_atm)value_len;
@@ -679,7 +679,7 @@ void grpc_mdelem_unref(grpc_mdelem *gmd DEBUG_ARGS) {
}
const char *grpc_mdstr_as_c_string(const grpc_mdstr *s) {
- return (const char *)GPR_SLICE_START_PTR(s->slice);
+ return (const char *)GRPC_SLICE_START_PTR(s->slice);
}
size_t grpc_mdstr_length(const grpc_mdstr *s) { return GRPC_MDSTR_LENGTH(s); }
@@ -687,6 +687,11 @@ size_t grpc_mdstr_length(const grpc_mdstr *s) { return GRPC_MDSTR_LENGTH(s); }
grpc_mdstr *grpc_mdstr_ref(grpc_mdstr *gs DEBUG_ARGS) {
internal_string *s = (internal_string *)gs;
if (is_mdstr_static(gs)) return gs;
+#ifdef GRPC_METADATA_REFCOUNT_DEBUG
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "STR REF:%p:%zu->%zu: '%s'",
+ (void *)s, gpr_atm_no_barrier_load(&s->refcnt),
+ gpr_atm_no_barrier_load(&s->refcnt) + 1, grpc_mdstr_as_c_string(gs));
+#endif
GPR_ASSERT(gpr_atm_full_fetch_add(&s->refcnt, 1) > 0);
return gs;
}
@@ -694,6 +699,11 @@ grpc_mdstr *grpc_mdstr_ref(grpc_mdstr *gs DEBUG_ARGS) {
void grpc_mdstr_unref(grpc_mdstr *gs DEBUG_ARGS) {
internal_string *s = (internal_string *)gs;
if (is_mdstr_static(gs)) return;
+#ifdef GRPC_METADATA_REFCOUNT_DEBUG
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "STR UNREF:%p:%zu->%zu: '%s'",
+ (void *)s, gpr_atm_no_barrier_load(&s->refcnt),
+ gpr_atm_no_barrier_load(&s->refcnt) - 1, grpc_mdstr_as_c_string(gs));
+#endif
if (1 == gpr_atm_full_fetch_add(&s->refcnt, -1)) {
strtab_shard *shard =
&g_strtab_shard[SHARD_IDX(s->hash, LOG2_STRTAB_SHARD_COUNT)];
@@ -718,8 +728,8 @@ void *grpc_mdelem_get_user_data(grpc_mdelem *md, void (*destroy_func)(void *)) {
return result;
}
-void grpc_mdelem_set_user_data(grpc_mdelem *md, void (*destroy_func)(void *),
- void *user_data) {
+void *grpc_mdelem_set_user_data(grpc_mdelem *md, void (*destroy_func)(void *),
+ void *user_data) {
internal_metadata *im = (internal_metadata *)md;
GPR_ASSERT(!is_mdelem_static(md));
GPR_ASSERT((user_data == NULL) == (destroy_func == NULL));
@@ -730,16 +740,17 @@ void grpc_mdelem_set_user_data(grpc_mdelem *md, void (*destroy_func)(void *),
if (destroy_func != NULL) {
destroy_func(user_data);
}
- return;
+ return (void *)gpr_atm_no_barrier_load(&im->user_data);
}
gpr_atm_no_barrier_store(&im->user_data, (gpr_atm)user_data);
gpr_atm_rel_store(&im->destroy_user_data, (gpr_atm)destroy_func);
gpr_mu_unlock(&im->mu_user_data);
+ return user_data;
}
-gpr_slice grpc_mdstr_as_base64_encoded_and_huffman_compressed(grpc_mdstr *gs) {
+grpc_slice grpc_mdstr_as_base64_encoded_and_huffman_compressed(grpc_mdstr *gs) {
internal_string *s = (internal_string *)gs;
- gpr_slice slice;
+ grpc_slice slice;
strtab_shard *shard =
&g_strtab_shard[SHARD_IDX(s->hash, LOG2_STRTAB_SHARD_COUNT)];
gpr_mu_lock(&shard->mu);
diff --git a/src/core/lib/transport/metadata.h b/src/core/lib/transport/metadata.h
index 71eff0acf2..a4955a1ea7 100644
--- a/src/core/lib/transport/metadata.h
+++ b/src/core/lib/transport/metadata.h
@@ -34,7 +34,7 @@
#ifndef GRPC_CORE_LIB_TRANSPORT_METADATA_H
#define GRPC_CORE_LIB_TRANSPORT_METADATA_H
-#include <grpc/support/slice.h>
+#include <grpc/slice.h>
#include <grpc/support/useful.h>
#ifdef __cplusplus
@@ -77,7 +77,7 @@ typedef struct grpc_mdelem grpc_mdelem;
/* if changing this, make identical changes in internal_string in metadata.c */
struct grpc_mdstr {
- const gpr_slice slice;
+ const grpc_slice slice;
const uint32_t hash;
/* there is a private part to this in metadata.c */
};
@@ -96,12 +96,12 @@ void grpc_test_only_set_metadata_hash_seed(uint32_t seed);
clients may have handy */
grpc_mdstr *grpc_mdstr_from_string(const char *str);
/* Unrefs the slice. */
-grpc_mdstr *grpc_mdstr_from_slice(gpr_slice slice);
+grpc_mdstr *grpc_mdstr_from_slice(grpc_slice slice);
grpc_mdstr *grpc_mdstr_from_buffer(const uint8_t *str, size_t length);
/* Returns a borrowed slice from the mdstr with its contents base64 encoded
and huffman compressed */
-gpr_slice grpc_mdstr_as_base64_encoded_and_huffman_compressed(grpc_mdstr *str);
+grpc_slice grpc_mdstr_as_base64_encoded_and_huffman_compressed(grpc_mdstr *str);
/* Constructors for grpc_mdelem instances; take a variety of data types that
clients may have handy */
@@ -109,7 +109,7 @@ grpc_mdelem *grpc_mdelem_from_metadata_strings(grpc_mdstr *key,
grpc_mdstr *value);
grpc_mdelem *grpc_mdelem_from_strings(const char *key, const char *value);
/* Unrefs the slices. */
-grpc_mdelem *grpc_mdelem_from_slices(gpr_slice key, gpr_slice value);
+grpc_mdelem *grpc_mdelem_from_slices(grpc_slice key, grpc_slice value);
grpc_mdelem *grpc_mdelem_from_string_and_buffer(const char *key,
const uint8_t *value,
size_t value_length);
@@ -120,8 +120,8 @@ size_t grpc_mdelem_get_size_in_hpack_table(grpc_mdelem *elem);
is used as a type tag and is checked during user_data fetch. */
void *grpc_mdelem_get_user_data(grpc_mdelem *md,
void (*if_destroy_func)(void *));
-void grpc_mdelem_set_user_data(grpc_mdelem *md, void (*destroy_func)(void *),
- void *user_data);
+void *grpc_mdelem_set_user_data(grpc_mdelem *md, void (*destroy_func)(void *),
+ void *user_data);
/* Reference counting */
//#define GRPC_METADATA_REFCOUNT_DEBUG
@@ -149,7 +149,7 @@ void grpc_mdelem_unref(grpc_mdelem *md);
Does not promise that the returned string has no embedded nulls however. */
const char *grpc_mdstr_as_c_string(const grpc_mdstr *s);
-#define GRPC_MDSTR_LENGTH(s) (GPR_SLICE_LENGTH(s->slice))
+#define GRPC_MDSTR_LENGTH(s) (GRPC_SLICE_LENGTH(s->slice))
/* We add 32 bytes of padding as per RFC-7540 section 6.5.2. */
#define GRPC_MDELEM_LENGTH(e) \
@@ -165,8 +165,8 @@ void grpc_mdctx_global_init(void);
void grpc_mdctx_global_shutdown(void);
/* Implementation provided by chttp2_transport */
-extern gpr_slice (*grpc_chttp2_base64_encode_and_huffman_compress)(
- gpr_slice input);
+extern grpc_slice (*grpc_chttp2_base64_encode_and_huffman_compress)(
+ grpc_slice input);
#ifdef __cplusplus
}
diff --git a/src/core/lib/transport/metadata_batch.h b/src/core/lib/transport/metadata_batch.h
index 0424b4db98..7a9ccb4bc8 100644
--- a/src/core/lib/transport/metadata_batch.h
+++ b/src/core/lib/transport/metadata_batch.h
@@ -37,8 +37,8 @@
#include <stdbool.h>
#include <grpc/grpc.h>
+#include <grpc/slice.h>
#include <grpc/support/port_platform.h>
-#include <grpc/support/slice.h>
#include <grpc/support/time.h>
#include "src/core/lib/transport/metadata.h"
diff --git a/src/core/lib/transport/method_config.c b/src/core/lib/transport/method_config.c
deleted file mode 100644
index 57d97700bf..0000000000
--- a/src/core/lib/transport/method_config.c
+++ /dev/null
@@ -1,340 +0,0 @@
-//
-// Copyright 2015, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-
-#include "src/core/lib/transport/method_config.h"
-
-#include <string.h>
-
-#include <grpc/impl/codegen/grpc_types.h>
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/string_util.h>
-#include <grpc/support/time.h>
-
-#include "src/core/lib/transport/mdstr_hash_table.h"
-#include "src/core/lib/transport/metadata.h"
-
-//
-// grpc_method_config
-//
-
-// bool vtable
-
-static void* bool_copy(void* valuep) {
- bool value = *(bool*)valuep;
- bool* new_value = gpr_malloc(sizeof(bool));
- *new_value = value;
- return new_value;
-}
-
-static int bool_cmp(void* v1, void* v2) {
- bool b1 = *(bool*)v1;
- bool b2 = *(bool*)v2;
- if (!b1 && b2) return -1;
- if (b1 && !b2) return 1;
- return 0;
-}
-
-static grpc_mdstr_hash_table_vtable bool_vtable = {gpr_free, bool_copy,
- bool_cmp};
-
-// timespec vtable
-
-static void* timespec_copy(void* valuep) {
- gpr_timespec value = *(gpr_timespec*)valuep;
- gpr_timespec* new_value = gpr_malloc(sizeof(gpr_timespec));
- *new_value = value;
- return new_value;
-}
-
-static int timespec_cmp(void* v1, void* v2) {
- return gpr_time_cmp(*(gpr_timespec*)v1, *(gpr_timespec*)v2);
-}
-
-static grpc_mdstr_hash_table_vtable timespec_vtable = {gpr_free, timespec_copy,
- timespec_cmp};
-
-// int32 vtable
-
-static void* int32_copy(void* valuep) {
- int32_t value = *(int32_t*)valuep;
- int32_t* new_value = gpr_malloc(sizeof(int32_t));
- *new_value = value;
- return new_value;
-}
-
-static int int32_cmp(void* v1, void* v2) {
- int32_t i1 = *(int32_t*)v1;
- int32_t i2 = *(int32_t*)v2;
- if (i1 < i2) return -1;
- if (i1 > i2) return 1;
- return 0;
-}
-
-static grpc_mdstr_hash_table_vtable int32_vtable = {gpr_free, int32_copy,
- int32_cmp};
-
-// Hash table keys.
-#define GRPC_METHOD_CONFIG_WAIT_FOR_READY "grpc.wait_for_ready" // bool
-#define GRPC_METHOD_CONFIG_TIMEOUT "grpc.timeout" // gpr_timespec
-#define GRPC_METHOD_CONFIG_MAX_REQUEST_MESSAGE_BYTES \
- "grpc.max_request_message_bytes" // int32
-#define GRPC_METHOD_CONFIG_MAX_RESPONSE_MESSAGE_BYTES \
- "grpc.max_response_message_bytes" // int32
-
-struct grpc_method_config {
- grpc_mdstr_hash_table* table;
- grpc_mdstr* wait_for_ready_key;
- grpc_mdstr* timeout_key;
- grpc_mdstr* max_request_message_bytes_key;
- grpc_mdstr* max_response_message_bytes_key;
-};
-
-grpc_method_config* grpc_method_config_create(
- bool* wait_for_ready, gpr_timespec* timeout,
- int32_t* max_request_message_bytes, int32_t* max_response_message_bytes) {
- grpc_method_config* method_config = gpr_malloc(sizeof(grpc_method_config));
- memset(method_config, 0, sizeof(grpc_method_config));
- method_config->wait_for_ready_key =
- grpc_mdstr_from_string(GRPC_METHOD_CONFIG_WAIT_FOR_READY);
- method_config->timeout_key =
- grpc_mdstr_from_string(GRPC_METHOD_CONFIG_TIMEOUT);
- method_config->max_request_message_bytes_key =
- grpc_mdstr_from_string(GRPC_METHOD_CONFIG_MAX_REQUEST_MESSAGE_BYTES);
- method_config->max_response_message_bytes_key =
- grpc_mdstr_from_string(GRPC_METHOD_CONFIG_MAX_RESPONSE_MESSAGE_BYTES);
- grpc_mdstr_hash_table_entry entries[4];
- size_t num_entries = 0;
- if (wait_for_ready != NULL) {
- entries[num_entries].key = method_config->wait_for_ready_key;
- entries[num_entries].value = wait_for_ready;
- entries[num_entries].vtable = &bool_vtable;
- ++num_entries;
- }
- if (timeout != NULL) {
- entries[num_entries].key = method_config->timeout_key;
- entries[num_entries].value = timeout;
- entries[num_entries].vtable = &timespec_vtable;
- ++num_entries;
- }
- if (max_request_message_bytes != NULL) {
- entries[num_entries].key = method_config->max_request_message_bytes_key;
- entries[num_entries].value = max_request_message_bytes;
- entries[num_entries].vtable = &int32_vtable;
- ++num_entries;
- }
- if (max_response_message_bytes != NULL) {
- entries[num_entries].key = method_config->max_response_message_bytes_key;
- entries[num_entries].value = max_response_message_bytes;
- entries[num_entries].vtable = &int32_vtable;
- ++num_entries;
- }
- method_config->table = grpc_mdstr_hash_table_create(num_entries, entries);
- return method_config;
-}
-
-grpc_method_config* grpc_method_config_ref(grpc_method_config* method_config) {
- grpc_mdstr_hash_table_ref(method_config->table);
- return method_config;
-}
-
-void grpc_method_config_unref(grpc_method_config* method_config) {
- if (grpc_mdstr_hash_table_unref(method_config->table)) {
- GRPC_MDSTR_UNREF(method_config->wait_for_ready_key);
- GRPC_MDSTR_UNREF(method_config->timeout_key);
- GRPC_MDSTR_UNREF(method_config->max_request_message_bytes_key);
- GRPC_MDSTR_UNREF(method_config->max_response_message_bytes_key);
- gpr_free(method_config);
- }
-}
-
-int grpc_method_config_cmp(const grpc_method_config* method_config1,
- const grpc_method_config* method_config2) {
- return grpc_mdstr_hash_table_cmp(method_config1->table,
- method_config2->table);
-}
-
-const bool* grpc_method_config_get_wait_for_ready(
- const grpc_method_config* method_config) {
- return grpc_mdstr_hash_table_get(method_config->table,
- method_config->wait_for_ready_key);
-}
-
-const gpr_timespec* grpc_method_config_get_timeout(
- const grpc_method_config* method_config) {
- return grpc_mdstr_hash_table_get(method_config->table,
- method_config->timeout_key);
-}
-
-const int32_t* grpc_method_config_get_max_request_message_bytes(
- const grpc_method_config* method_config) {
- return grpc_mdstr_hash_table_get(
- method_config->table, method_config->max_request_message_bytes_key);
-}
-
-const int32_t* grpc_method_config_get_max_response_message_bytes(
- const grpc_method_config* method_config) {
- return grpc_mdstr_hash_table_get(
- method_config->table, method_config->max_response_message_bytes_key);
-}
-
-//
-// grpc_method_config_table
-//
-
-static void method_config_unref(void* valuep) {
- grpc_method_config_unref(valuep);
-}
-
-static void* method_config_ref(void* valuep) {
- return grpc_method_config_ref(valuep);
-}
-
-static int method_config_cmp(void* valuep1, void* valuep2) {
- return grpc_method_config_cmp(valuep1, valuep2);
-}
-
-static const grpc_mdstr_hash_table_vtable method_config_table_vtable = {
- method_config_unref, method_config_ref, method_config_cmp};
-
-grpc_method_config_table* grpc_method_config_table_create(
- size_t num_entries, grpc_method_config_table_entry* entries) {
- grpc_mdstr_hash_table_entry* hash_table_entries =
- gpr_malloc(sizeof(grpc_mdstr_hash_table_entry) * num_entries);
- for (size_t i = 0; i < num_entries; ++i) {
- hash_table_entries[i].key = entries[i].method_name;
- hash_table_entries[i].value = entries[i].method_config;
- hash_table_entries[i].vtable = &method_config_table_vtable;
- }
- grpc_method_config_table* method_config_table =
- grpc_mdstr_hash_table_create(num_entries, hash_table_entries);
- gpr_free(hash_table_entries);
- return method_config_table;
-}
-
-grpc_method_config_table* grpc_method_config_table_ref(
- grpc_method_config_table* table) {
- return grpc_mdstr_hash_table_ref(table);
-}
-
-void grpc_method_config_table_unref(grpc_method_config_table* table) {
- grpc_mdstr_hash_table_unref(table);
-}
-
-int grpc_method_config_table_cmp(const grpc_method_config_table* table1,
- const grpc_method_config_table* table2) {
- return grpc_mdstr_hash_table_cmp(table1, table2);
-}
-
-void* grpc_method_config_table_get(const grpc_mdstr_hash_table* table,
- const grpc_mdstr* path) {
- void* value = grpc_mdstr_hash_table_get(table, path);
- // If we didn't find a match for the path, try looking for a wildcard
- // entry (i.e., change "/service/method" to "/service/*").
- if (value == NULL) {
- const char* path_str = grpc_mdstr_as_c_string(path);
- const char* sep = strrchr(path_str, '/') + 1;
- const size_t len = (size_t)(sep - path_str);
- char* buf = gpr_malloc(len + 2); // '*' and NUL
- memcpy(buf, path_str, len);
- buf[len] = '*';
- buf[len + 1] = '\0';
- grpc_mdstr* wildcard_path = grpc_mdstr_from_string(buf);
- gpr_free(buf);
- value = grpc_mdstr_hash_table_get(table, wildcard_path);
- GRPC_MDSTR_UNREF(wildcard_path);
- }
- return value;
-}
-
-static void* copy_arg(void* p) { return grpc_method_config_table_ref(p); }
-
-static void destroy_arg(void* p) { grpc_method_config_table_unref(p); }
-
-static int cmp_arg(void* p1, void* p2) {
- return grpc_method_config_table_cmp(p1, p2);
-}
-
-static grpc_arg_pointer_vtable arg_vtable = {copy_arg, destroy_arg, cmp_arg};
-
-grpc_arg grpc_method_config_table_create_channel_arg(
- grpc_method_config_table* table) {
- grpc_arg arg;
- arg.type = GRPC_ARG_POINTER;
- arg.key = GRPC_ARG_SERVICE_CONFIG;
- arg.value.pointer.p = table;
- arg.value.pointer.vtable = &arg_vtable;
- return arg;
-}
-
-// State used by convert_entry() below.
-typedef struct conversion_state {
- void* (*convert_value)(const grpc_method_config* method_config);
- const grpc_mdstr_hash_table_vtable* vtable;
- size_t num_entries;
- grpc_mdstr_hash_table_entry* entries;
-} conversion_state;
-
-// A function to be passed to grpc_mdstr_hash_table_iterate() to create
-// a copy of the entries.
-static void convert_entry(const grpc_mdstr_hash_table_entry* entry,
- void* user_data) {
- conversion_state* state = user_data;
- state->entries[state->num_entries].key = GRPC_MDSTR_REF(entry->key);
- state->entries[state->num_entries].value = state->convert_value(entry->value);
- state->entries[state->num_entries].vtable = state->vtable;
- ++state->num_entries;
-}
-
-grpc_mdstr_hash_table* grpc_method_config_table_convert(
- const grpc_method_config_table* table,
- void* (*convert_value)(const grpc_method_config* method_config),
- const grpc_mdstr_hash_table_vtable* vtable) {
- // Create an array of the entries in the table with converted values.
- conversion_state state;
- state.convert_value = convert_value;
- state.vtable = vtable;
- state.num_entries = 0;
- state.entries = gpr_malloc(sizeof(grpc_mdstr_hash_table_entry) *
- grpc_mdstr_hash_table_num_entries(table));
- grpc_mdstr_hash_table_iterate(table, convert_entry, &state);
- // Create a new table based on the array we just constructed.
- grpc_mdstr_hash_table* new_table =
- grpc_mdstr_hash_table_create(state.num_entries, state.entries);
- // Clean up the array.
- for (size_t i = 0; i < state.num_entries; ++i) {
- GRPC_MDSTR_UNREF(state.entries[i].key);
- vtable->destroy_value(state.entries[i].value);
- }
- gpr_free(state.entries);
- // Return the new table.
- return new_table;
-}
diff --git a/src/core/lib/transport/method_config.h b/src/core/lib/transport/method_config.h
deleted file mode 100644
index 58fedd9436..0000000000
--- a/src/core/lib/transport/method_config.h
+++ /dev/null
@@ -1,136 +0,0 @@
-//
-// Copyright 2016, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-
-#ifndef GRPC_CORE_LIB_TRANSPORT_METHOD_CONFIG_H
-#define GRPC_CORE_LIB_TRANSPORT_METHOD_CONFIG_H
-
-#include <stdbool.h>
-
-#include <grpc/impl/codegen/gpr_types.h>
-#include <grpc/impl/codegen/grpc_types.h>
-
-#include "src/core/lib/transport/mdstr_hash_table.h"
-#include "src/core/lib/transport/metadata.h"
-
-/// Per-method configuration.
-typedef struct grpc_method_config grpc_method_config;
-
-/// Creates a grpc_method_config with the specified parameters.
-/// Any parameter may be NULL to indicate that the value is unset.
-///
-/// \a wait_for_ready indicates whether the client should wait until the
-/// request deadline for the channel to become ready, even if there is a
-/// temporary failure before the deadline while attempting to connect.
-///
-/// \a timeout indicates the timeout for calls.
-///
-/// \a max_request_message_bytes and \a max_response_message_bytes
-/// indicate the maximum sizes of the request (checked when sending) and
-/// response (checked when receiving) messages.
-grpc_method_config* grpc_method_config_create(
- bool* wait_for_ready, gpr_timespec* timeout,
- int32_t* max_request_message_bytes, int32_t* max_response_message_bytes);
-
-grpc_method_config* grpc_method_config_ref(grpc_method_config* method_config);
-void grpc_method_config_unref(grpc_method_config* method_config);
-
-/// Compares two grpc_method_configs.
-/// The sort order is stable but undefined.
-int grpc_method_config_cmp(const grpc_method_config* method_config1,
- const grpc_method_config* method_config2);
-
-/// These methods return NULL if the requested field is unset.
-/// The caller does NOT take ownership of the result.
-const bool* grpc_method_config_get_wait_for_ready(
- const grpc_method_config* method_config);
-const gpr_timespec* grpc_method_config_get_timeout(
- const grpc_method_config* method_config);
-const int32_t* grpc_method_config_get_max_request_message_bytes(
- const grpc_method_config* method_config);
-const int32_t* grpc_method_config_get_max_response_message_bytes(
- const grpc_method_config* method_config);
-
-/// A table of method configs.
-typedef grpc_mdstr_hash_table grpc_method_config_table;
-
-typedef struct grpc_method_config_table_entry {
- /// The name is of one of the following forms:
- /// service/method -- specifies exact service and method name
- /// service/* -- matches all methods for the specified service
- grpc_mdstr* method_name;
- grpc_method_config* method_config;
-} grpc_method_config_table_entry;
-
-/// Takes new references to all keys and values in \a entries.
-grpc_method_config_table* grpc_method_config_table_create(
- size_t num_entries, grpc_method_config_table_entry* entries);
-
-grpc_method_config_table* grpc_method_config_table_ref(
- grpc_method_config_table* table);
-void grpc_method_config_table_unref(grpc_method_config_table* table);
-
-/// Compares two grpc_method_config_tables.
-/// The sort order is stable but undefined.
-int grpc_method_config_table_cmp(const grpc_method_config_table* table1,
- const grpc_method_config_table* table2);
-
-/// Gets the method config for the specified \a path, which should be of
-/// the form "/service/method".
-/// Returns NULL if the method has no config.
-/// Caller does NOT own a reference to the result.
-///
-/// Note: This returns a void* instead of a grpc_method_config* so that
-/// it can also be used for tables constructed via
-/// grpc_method_config_table_convert().
-void* grpc_method_config_table_get(const grpc_mdstr_hash_table* table,
- const grpc_mdstr* path);
-
-/// Returns a channel arg containing \a table.
-grpc_arg grpc_method_config_table_create_channel_arg(
- grpc_method_config_table* table);
-
-/// Generates a new table from \a table whose values are converted to a
-/// new form via the \a convert_value function. The new table will use
-/// \a vtable for its values.
-///
-/// This is generally used to convert the table's value type from
-/// grpc_method_config to a simple struct containing only the parameters
-/// relevant to a particular filter, thus avoiding the need for a hash
-/// table lookup on the fast path. In that scenario, \a convert_value
-/// will return a new instance of the struct containing the values from
-/// the grpc_method_config, and \a vtable provides the methods for
-/// operating on the struct type.
-grpc_mdstr_hash_table* grpc_method_config_table_convert(
- const grpc_method_config_table* table,
- void* (*convert_value)(const grpc_method_config* method_config),
- const grpc_mdstr_hash_table_vtable* vtable);
-
-#endif /* GRPC_CORE_LIB_TRANSPORT_METHOD_CONFIG_H */
diff --git a/src/core/lib/transport/pid_controller.c b/src/core/lib/transport/pid_controller.c
new file mode 100644
index 0000000000..3cef225d4b
--- /dev/null
+++ b/src/core/lib/transport/pid_controller.c
@@ -0,0 +1,57 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/transport/pid_controller.h"
+
+void grpc_pid_controller_init(grpc_pid_controller *pid_controller,
+ double gain_p, double gain_i, double gain_d) {
+ pid_controller->gain_p = gain_p;
+ pid_controller->gain_i = gain_i;
+ pid_controller->gain_d = gain_d;
+ grpc_pid_controller_reset(pid_controller);
+}
+
+void grpc_pid_controller_reset(grpc_pid_controller *pid_controller) {
+ pid_controller->last_error = 0.0;
+ pid_controller->error_integral = 0.0;
+}
+
+double grpc_pid_controller_update(grpc_pid_controller *pid_controller,
+ double error, double dt) {
+ pid_controller->error_integral += error * dt;
+ double diff_error = (error - pid_controller->last_error) / dt;
+ pid_controller->last_error = error;
+ return dt * (pid_controller->gain_p * error +
+ pid_controller->gain_i * pid_controller->error_integral +
+ pid_controller->gain_d * diff_error);
+}
diff --git a/src/core/lib/transport/pid_controller.h b/src/core/lib/transport/pid_controller.h
new file mode 100644
index 0000000000..059b5b0834
--- /dev/null
+++ b/src/core/lib/transport/pid_controller.h
@@ -0,0 +1,64 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_TRANSPORT_PID_CONTROLLER_H
+#define GRPC_CORE_LIB_TRANSPORT_PID_CONTROLLER_H
+
+/* \file Simple PID controller.
+ Implements a proportional-integral-derivative controller.
+ Used when we want to iteratively control a variable to converge some other
+ observed value to a 'set-point'.
+ Gains can be set to adjust sensitivity to current error (p), the integral
+ of error (i), and the derivative of error (d). */
+
+typedef struct {
+ double gain_p;
+ double gain_i;
+ double gain_d;
+ double last_error;
+ double error_integral;
+} grpc_pid_controller;
+
+/** Initialize the controller */
+void grpc_pid_controller_init(grpc_pid_controller *pid_controller,
+ double gain_p, double gain_i, double gain_d);
+
+/** Reset the controller: useful when things have changed significantly */
+void grpc_pid_controller_reset(grpc_pid_controller *pid_controller);
+
+/** Update the controller: given a current error estimate, and the time since
+ the last update, returns a delta to the control value */
+double grpc_pid_controller_update(grpc_pid_controller *pid_controller,
+ double error, double dt);
+
+#endif
diff --git a/src/core/lib/transport/service_config.c b/src/core/lib/transport/service_config.c
new file mode 100644
index 0000000000..2e2b59e3f7
--- /dev/null
+++ b/src/core/lib/transport/service_config.c
@@ -0,0 +1,249 @@
+//
+// Copyright 2015, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "src/core/lib/transport/service_config.h"
+
+#include <string.h>
+
+#include <grpc/impl/codegen/grpc_types.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+
+#include "src/core/lib/json/json.h"
+#include "src/core/lib/support/string.h"
+#include "src/core/lib/transport/mdstr_hash_table.h"
+
+// The main purpose of the code here is to parse the service config in
+// JSON form, which will look like this:
+//
+// {
+// "loadBalancingPolicy": "string", // optional
+// "methodConfig": [ // array of one or more method_config objects
+// {
+// "name": [ // array of one or more name objects
+// {
+// "service": "string", // required
+// "method": "string", // optional
+// }
+// ],
+// // remaining fields are optional.
+// // see https://developers.google.com/protocol-buffers/docs/proto3#json
+// // for format details.
+// "waitForReady": bool,
+// "timeout": "duration_string",
+// "maxRequestMessageBytes": "int64_string",
+// "maxResponseMessageBytes": "int64_string",
+// }
+// ]
+// }
+
+struct grpc_service_config {
+ char* json_string; // Underlying storage for json_tree.
+ grpc_json* json_tree;
+};
+
+grpc_service_config* grpc_service_config_create(const char* json_string) {
+ grpc_service_config* service_config = gpr_malloc(sizeof(*service_config));
+ service_config->json_string = gpr_strdup(json_string);
+ service_config->json_tree =
+ grpc_json_parse_string(service_config->json_string);
+ if (service_config->json_tree == NULL) {
+ gpr_log(GPR_INFO, "failed to parse JSON for service config");
+ gpr_free(service_config->json_string);
+ gpr_free(service_config);
+ return NULL;
+ }
+ return service_config;
+}
+
+void grpc_service_config_destroy(grpc_service_config* service_config) {
+ grpc_json_destroy(service_config->json_tree);
+ gpr_free(service_config->json_string);
+ gpr_free(service_config);
+}
+
+const char* grpc_service_config_get_lb_policy_name(
+ const grpc_service_config* service_config) {
+ const grpc_json* json = service_config->json_tree;
+ if (json->type != GRPC_JSON_OBJECT || json->key != NULL) return NULL;
+ const char* lb_policy_name = NULL;
+ for (grpc_json* field = json->child; field != NULL; field = field->next) {
+ if (field->key == NULL) return NULL;
+ if (strcmp(field->key, "loadBalancingPolicy") == 0) {
+ if (lb_policy_name != NULL) return NULL; // Duplicate.
+ if (field->type != GRPC_JSON_STRING) return NULL;
+ lb_policy_name = field->value;
+ }
+ }
+ return lb_policy_name;
+}
+
+// Returns the number of names specified in the method config \a json.
+static size_t count_names_in_method_config_json(grpc_json* json) {
+ size_t num_names = 0;
+ for (grpc_json* field = json->child; field != NULL; field = field->next) {
+ if (field->key != NULL && strcmp(field->key, "name") == 0) ++num_names;
+ }
+ return num_names;
+}
+
+// Returns a path string for the JSON name object specified by \a json.
+// Returns NULL on error. Caller takes ownership of result.
+static char* parse_json_method_name(grpc_json* json) {
+ if (json->type != GRPC_JSON_OBJECT) return NULL;
+ const char* service_name = NULL;
+ const char* method_name = NULL;
+ for (grpc_json* child = json->child; child != NULL; child = child->next) {
+ if (child->key == NULL) return NULL;
+ if (child->type != GRPC_JSON_STRING) return NULL;
+ if (strcmp(child->key, "service") == 0) {
+ if (service_name != NULL) return NULL; // Duplicate.
+ if (child->value == NULL) return NULL;
+ service_name = child->value;
+ } else if (strcmp(child->key, "method") == 0) {
+ if (method_name != NULL) return NULL; // Duplicate.
+ if (child->value == NULL) return NULL;
+ method_name = child->value;
+ }
+ }
+ if (service_name == NULL) return NULL; // Required field.
+ char* path;
+ gpr_asprintf(&path, "/%s/%s", service_name,
+ method_name == NULL ? "*" : method_name);
+ return path;
+}
+
+// Parses the method config from \a json. Adds an entry to \a entries for
+// each name found, incrementing \a idx for each entry added.
+// Returns false on error.
+static bool parse_json_method_config(
+ grpc_json* json, void* (*create_value)(const grpc_json* method_config_json),
+ const grpc_mdstr_hash_table_vtable* vtable,
+ grpc_mdstr_hash_table_entry* entries, size_t* idx) {
+ // Construct value.
+ void* method_config = create_value(json);
+ if (method_config == NULL) return false;
+ // Construct list of paths.
+ bool success = false;
+ gpr_strvec paths;
+ gpr_strvec_init(&paths);
+ for (grpc_json* child = json->child; child != NULL; child = child->next) {
+ if (child->key == NULL) continue;
+ if (strcmp(child->key, "name") == 0) {
+ if (child->type != GRPC_JSON_ARRAY) goto done;
+ for (grpc_json* name = child->child; name != NULL; name = name->next) {
+ char* path = parse_json_method_name(name);
+ gpr_strvec_add(&paths, path);
+ }
+ }
+ }
+ if (paths.count == 0) goto done; // No names specified.
+ // Add entry for each path.
+ for (size_t i = 0; i < paths.count; ++i) {
+ entries[*idx].key = grpc_mdstr_from_string(paths.strs[i]);
+ entries[*idx].value = vtable->copy_value(method_config);
+ entries[*idx].vtable = vtable;
+ ++*idx;
+ }
+ success = true;
+done:
+ vtable->destroy_value(method_config);
+ gpr_strvec_destroy(&paths);
+ return success;
+}
+
+grpc_mdstr_hash_table* grpc_service_config_create_method_config_table(
+ const grpc_service_config* service_config,
+ void* (*create_value)(const grpc_json* method_config_json),
+ const grpc_mdstr_hash_table_vtable* vtable) {
+ const grpc_json* json = service_config->json_tree;
+ // Traverse parsed JSON tree.
+ if (json->type != GRPC_JSON_OBJECT || json->key != NULL) return NULL;
+ size_t num_entries = 0;
+ grpc_mdstr_hash_table_entry* entries = NULL;
+ for (grpc_json* field = json->child; field != NULL; field = field->next) {
+ if (field->key == NULL) return NULL;
+ if (strcmp(field->key, "methodConfig") == 0) {
+ if (entries != NULL) return NULL; // Duplicate.
+ if (field->type != GRPC_JSON_ARRAY) return NULL;
+ // Find number of entries.
+ for (grpc_json* method = field->child; method != NULL;
+ method = method->next) {
+ num_entries += count_names_in_method_config_json(method);
+ }
+ // Populate method config table entries.
+ entries = gpr_malloc(num_entries * sizeof(grpc_mdstr_hash_table_entry));
+ size_t idx = 0;
+ for (grpc_json* method = field->child; method != NULL;
+ method = method->next) {
+ if (!parse_json_method_config(method, create_value, vtable, entries,
+ &idx)) {
+ return NULL;
+ }
+ }
+ GPR_ASSERT(idx == num_entries);
+ }
+ }
+ // Instantiate method config table.
+ grpc_mdstr_hash_table* method_config_table = NULL;
+ if (entries != NULL) {
+ method_config_table = grpc_mdstr_hash_table_create(num_entries, entries);
+ // Clean up.
+ for (size_t i = 0; i < num_entries; ++i) {
+ GRPC_MDSTR_UNREF(entries[i].key);
+ vtable->destroy_value(entries[i].value);
+ }
+ gpr_free(entries);
+ }
+ return method_config_table;
+}
+
+void* grpc_method_config_table_get(const grpc_mdstr_hash_table* table,
+ const grpc_mdstr* path) {
+ void* value = grpc_mdstr_hash_table_get(table, path);
+ // If we didn't find a match for the path, try looking for a wildcard
+ // entry (i.e., change "/service/method" to "/service/*").
+ if (value == NULL) {
+ const char* path_str = grpc_mdstr_as_c_string(path);
+ const char* sep = strrchr(path_str, '/') + 1;
+ const size_t len = (size_t)(sep - path_str);
+ char* buf = gpr_malloc(len + 2); // '*' and NUL
+ memcpy(buf, path_str, len);
+ buf[len] = '*';
+ buf[len + 1] = '\0';
+ grpc_mdstr* wildcard_path = grpc_mdstr_from_string(buf);
+ gpr_free(buf);
+ value = grpc_mdstr_hash_table_get(table, wildcard_path);
+ GRPC_MDSTR_UNREF(wildcard_path);
+ }
+ return value;
+}
diff --git a/src/core/lib/transport/service_config.h b/src/core/lib/transport/service_config.h
new file mode 100644
index 0000000000..2ffe475193
--- /dev/null
+++ b/src/core/lib/transport/service_config.h
@@ -0,0 +1,70 @@
+//
+// Copyright 2016, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef GRPC_CORE_LIB_TRANSPORT_SERVICE_CONFIG_H
+#define GRPC_CORE_LIB_TRANSPORT_SERVICE_CONFIG_H
+
+#include <grpc/impl/codegen/grpc_types.h>
+
+#include "src/core/lib/json/json.h"
+#include "src/core/lib/transport/mdstr_hash_table.h"
+
+typedef struct grpc_service_config grpc_service_config;
+
+grpc_service_config* grpc_service_config_create(const char* json_string);
+void grpc_service_config_destroy(grpc_service_config* service_config);
+
+/// Gets the LB policy name from \a service_config.
+/// Returns NULL if no LB policy name was specified.
+/// Caller does NOT take ownership.
+const char* grpc_service_config_get_lb_policy_name(
+ const grpc_service_config* service_config);
+
+/// Creates a method config table based on the data in \a json.
+/// The table's keys are request paths. The table's value type is
+/// returned by \a create_value(), based on data parsed from the JSON tree.
+/// \a vtable provides methods used to manage the values.
+/// Returns NULL on error.
+grpc_mdstr_hash_table* grpc_service_config_create_method_config_table(
+ const grpc_service_config* service_config,
+ void* (*create_value)(const grpc_json* method_config_json),
+ const grpc_mdstr_hash_table_vtable* vtable);
+
+/// A helper function for looking up values in the table returned by
+/// \a grpc_service_config_create_method_config_table().
+/// Gets the method config for the specified \a path, which should be of
+/// the form "/service/method".
+/// Returns NULL if the method has no config.
+/// Caller does NOT own a reference to the result.
+void* grpc_method_config_table_get(const grpc_mdstr_hash_table* table,
+ const grpc_mdstr* path);
+
+#endif /* GRPC_CORE_LIB_TRANSPORT_SERVICE_CONFIG_H */
diff --git a/src/core/lib/transport/transport.c b/src/core/lib/transport/transport.c
index 75aec7a5b4..b448126da8 100644
--- a/src/core/lib/transport/transport.c
+++ b/src/core/lib/transport/transport.c
@@ -40,6 +40,7 @@
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
+#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/transport/transport_impl.h"
@@ -159,6 +160,11 @@ char *grpc_transport_get_peer(grpc_exec_ctx *exec_ctx,
return transport->vtable->get_peer(exec_ctx, transport);
}
+grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx,
+ grpc_transport *transport) {
+ return transport->vtable->get_endpoint(exec_ctx, transport);
+}
+
void grpc_transport_stream_op_finish_with_failure(grpc_exec_ctx *exec_ctx,
grpc_transport_stream_op *op,
grpc_error *error) {
@@ -207,21 +213,21 @@ void grpc_transport_stream_op_add_cancellation(grpc_transport_stream_op *op,
void grpc_transport_stream_op_add_cancellation_with_message(
grpc_transport_stream_op *op, grpc_status_code status,
- gpr_slice *optional_message) {
+ grpc_slice *optional_message) {
GPR_ASSERT(status != GRPC_STATUS_OK);
if (op->cancel_error != GRPC_ERROR_NONE) {
if (optional_message) {
- gpr_slice_unref(*optional_message);
+ grpc_slice_unref(*optional_message);
}
return;
}
grpc_error *error;
if (optional_message != NULL) {
- char *msg = gpr_dump_slice(*optional_message, GPR_DUMP_ASCII);
+ char *msg = grpc_dump_slice(*optional_message, GPR_DUMP_ASCII);
error = grpc_error_set_str(GRPC_ERROR_CREATE(msg),
GRPC_ERROR_STR_GRPC_MESSAGE, msg);
gpr_free(msg);
- gpr_slice_unref(*optional_message);
+ grpc_slice_unref(*optional_message);
} else {
error = GRPC_ERROR_CREATE("Call cancelled");
}
@@ -231,22 +237,22 @@ void grpc_transport_stream_op_add_cancellation_with_message(
void grpc_transport_stream_op_add_close(grpc_transport_stream_op *op,
grpc_status_code status,
- gpr_slice *optional_message) {
+ grpc_slice *optional_message) {
GPR_ASSERT(status != GRPC_STATUS_OK);
if (op->cancel_error != GRPC_ERROR_NONE ||
op->close_error != GRPC_ERROR_NONE) {
if (optional_message) {
- gpr_slice_unref(*optional_message);
+ grpc_slice_unref(*optional_message);
}
return;
}
grpc_error *error;
if (optional_message != NULL) {
- char *msg = gpr_dump_slice(*optional_message, GPR_DUMP_ASCII);
+ char *msg = grpc_dump_slice(*optional_message, GPR_DUMP_ASCII);
error = grpc_error_set_str(GRPC_ERROR_CREATE(msg),
GRPC_ERROR_STR_GRPC_MESSAGE, msg);
gpr_free(msg);
- gpr_slice_unref(*optional_message);
+ grpc_slice_unref(*optional_message);
} else {
error = GRPC_ERROR_CREATE("Call force closed");
}
diff --git a/src/core/lib/transport/transport.h b/src/core/lib/transport/transport.h
index 50253ebad1..96c26749c8 100644
--- a/src/core/lib/transport/transport.h
+++ b/src/core/lib/transport/transport.h
@@ -37,6 +37,7 @@
#include <stddef.h>
#include "src/core/lib/channel/context.h"
+#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/pollset_set.h"
@@ -181,7 +182,7 @@ typedef struct grpc_transport_op {
bool send_goaway;
/** what should the goaway contain? */
grpc_status_code goaway_status;
- gpr_slice *goaway_message;
+ grpc_slice *goaway_message;
/** set the callback for accepting new streams;
this is a permanent callback, unlike the other one-shot closures.
If true, the callback is set to set_accept_stream_fn, with its
@@ -249,11 +250,11 @@ void grpc_transport_stream_op_add_cancellation(grpc_transport_stream_op *op,
void grpc_transport_stream_op_add_cancellation_with_message(
grpc_transport_stream_op *op, grpc_status_code status,
- gpr_slice *optional_message);
+ grpc_slice *optional_message);
void grpc_transport_stream_op_add_close(grpc_transport_stream_op *op,
grpc_status_code status,
- gpr_slice *optional_message);
+ grpc_slice *optional_message);
char *grpc_transport_stream_op_string(grpc_transport_stream_op *op);
char *grpc_transport_op_string(grpc_transport_op *op);
@@ -283,7 +284,7 @@ void grpc_transport_ping(grpc_transport *transport, grpc_closure *cb);
/* Advise peer of pending connection termination. */
void grpc_transport_goaway(grpc_transport *transport, grpc_status_code status,
- gpr_slice debug_data);
+ grpc_slice debug_data);
/* Close a transport. Aborts all open streams. */
void grpc_transport_close(grpc_transport *transport);
@@ -295,6 +296,10 @@ void grpc_transport_destroy(grpc_exec_ctx *exec_ctx, grpc_transport *transport);
char *grpc_transport_get_peer(grpc_exec_ctx *exec_ctx,
grpc_transport *transport);
+/* Get the endpoint used by \a transport */
+grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx,
+ grpc_transport *transport);
+
/* Allocate a grpc_transport_op, and preconfigure the on_consumed closure to
\a on_consumed and then delete the returned transport op */
grpc_transport_op *grpc_make_transport_op(grpc_closure *on_consumed);
diff --git a/src/core/lib/transport/transport_impl.h b/src/core/lib/transport/transport_impl.h
index fc7140671b..8553148c35 100644
--- a/src/core/lib/transport/transport_impl.h
+++ b/src/core/lib/transport/transport_impl.h
@@ -74,6 +74,9 @@ typedef struct grpc_transport_vtable {
/* implementation of grpc_transport_get_peer */
char *(*get_peer)(grpc_exec_ctx *exec_ctx, grpc_transport *self);
+
+ /* implementation of grpc_transport_get_endpoint */
+ grpc_endpoint *(*get_endpoint)(grpc_exec_ctx *exec_ctx, grpc_transport *self);
} grpc_transport_vtable;
/* an instance of a grpc transport */
diff --git a/src/core/lib/transport/transport_op_string.c b/src/core/lib/transport/transport_op_string.c
index 533ec52077..58d6ad508e 100644
--- a/src/core/lib/transport/transport_op_string.c
+++ b/src/core/lib/transport/transport_op_string.c
@@ -40,6 +40,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
+#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/transport/connectivity_state.h"
@@ -48,12 +49,12 @@
static void put_metadata(gpr_strvec *b, grpc_mdelem *md) {
gpr_strvec_add(b, gpr_strdup("key="));
- gpr_strvec_add(b,
- gpr_dump_slice(md->key->slice, GPR_DUMP_HEX | GPR_DUMP_ASCII));
+ gpr_strvec_add(
+ b, grpc_dump_slice(md->key->slice, GPR_DUMP_HEX | GPR_DUMP_ASCII));
gpr_strvec_add(b, gpr_strdup(" value="));
gpr_strvec_add(
- b, gpr_dump_slice(md->value->slice, GPR_DUMP_HEX | GPR_DUMP_ASCII));
+ b, grpc_dump_slice(md->value->slice, GPR_DUMP_HEX | GPR_DUMP_ASCII));
}
static void put_metadata_list(gpr_strvec *b, grpc_metadata_batch md) {
@@ -175,8 +176,8 @@ char *grpc_transport_op_string(grpc_transport_op *op) {
first = false;
char *msg = op->goaway_message == NULL
? "null"
- : gpr_dump_slice(*op->goaway_message,
- GPR_DUMP_ASCII | GPR_DUMP_HEX);
+ : grpc_dump_slice(*op->goaway_message,
+ GPR_DUMP_ASCII | GPR_DUMP_HEX);
gpr_asprintf(&tmp, "SEND_GOAWAY:status=%d:msg=%s", op->goaway_status, msg);
if (op->goaway_message != NULL) gpr_free(msg);
gpr_strvec_add(&b, tmp);
diff --git a/src/core/lib/tsi/ssl_transport_security.c b/src/core/lib/tsi/ssl_transport_security.c
index 749b46e19f..366dca9507 100644
--- a/src/core/lib/tsi/ssl_transport_security.c
+++ b/src/core/lib/tsi/ssl_transport_security.c
@@ -31,9 +31,6 @@
*
*/
-#include "src/core/lib/iomgr/sockaddr.h"
-
-#include "src/core/lib/iomgr/socket_utils.h"
#include "src/core/lib/tsi/ssl_transport_security.h"
#include <grpc/support/port_platform.h>
@@ -41,6 +38,15 @@
#include <limits.h>
#include <string.h>
+/* TODO(jboeuf): refactor inet_ntop into a portability header. */
+/* Note: for whomever reads this and tries to refactor this, this
+ can't be in grpc, it has to be in gpr. */
+#ifdef GPR_WINDOWS
+#include <ws2tcpip.h>
+#else
+#include <arpa/inet.h>
+#endif
+
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
@@ -349,8 +355,8 @@ static tsi_result add_subject_alt_names_properties_to_peer(
result = TSI_INTERNAL_ERROR;
break;
}
- const char *name = grpc_inet_ntop(af, subject_alt_name->d.iPAddress->data,
- ntop_buf, INET6_ADDRSTRLEN);
+ const char *name = inet_ntop(af, subject_alt_name->d.iPAddress->data,
+ ntop_buf, INET6_ADDRSTRLEN);
if (name == NULL) {
gpr_log(GPR_ERROR, "Could not get IP string from asn1 octet.");
result = TSI_INTERNAL_ERROR;