aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core
diff options
context:
space:
mode:
Diffstat (limited to 'src/core')
-rw-r--r--src/core/channel/client_channel.c50
-rw-r--r--src/core/channel/compress_filter.c7
-rw-r--r--src/core/channel/connected_channel.c1
-rw-r--r--src/core/channel/http_client_filter.c3
-rw-r--r--src/core/channel/http_server_filter.c3
-rw-r--r--src/core/client_config/lb_policies/pick_first.c48
-rw-r--r--src/core/client_config/subchannel.c17
-rw-r--r--src/core/client_config/subchannel.h22
-rw-r--r--src/core/client_config/uri_parser.c4
-rw-r--r--src/core/compression/algorithm.c2
-rw-r--r--src/core/httpcli/parser.c11
-rw-r--r--src/core/iomgr/closure.c24
-rw-r--r--src/core/iomgr/closure.h3
-rw-r--r--src/core/iomgr/exec_ctx.c8
-rw-r--r--src/core/iomgr/fd_posix.c224
-rw-r--r--src/core/iomgr/fd_posix.h19
-rw-r--r--src/core/iomgr/pollset_multipoller_with_epoll.c9
-rw-r--r--src/core/iomgr/pollset_multipoller_with_poll_posix.c34
-rw-r--r--src/core/iomgr/pollset_posix.c165
-rw-r--r--src/core/iomgr/pollset_posix.h11
-rw-r--r--src/core/iomgr/tcp_client_posix.c2
-rw-r--r--src/core/iomgr/tcp_posix.c22
-rw-r--r--src/core/iomgr/tcp_server_posix.c2
-rw-r--r--src/core/iomgr/tcp_server_windows.c2
-rw-r--r--src/core/iomgr/udp_server.c2
-rw-r--r--src/core/iomgr/udp_server.h3
-rw-r--r--src/core/iomgr/wakeup_fd_eventfd.c6
-rw-r--r--src/core/profiling/basic_timers.c103
-rw-r--r--src/core/profiling/stap_timers.c16
-rw-r--r--src/core/profiling/timers.h115
-rw-r--r--src/core/security/credentials.c42
-rw-r--r--src/core/security/credentials.h8
-rw-r--r--src/core/security/security_context.c26
-rw-r--r--src/core/security/security_context.h11
-rw-r--r--src/core/security/server_auth_filter.c36
-rw-r--r--src/core/security/server_secure_chttp2.c4
-rw-r--r--src/core/support/alloc.c14
-rw-r--r--src/core/support/sync_posix.c13
-rw-r--r--src/core/support/time_posix.c3
-rw-r--r--src/core/support/time_precise.c89
-rw-r--r--src/core/support/time_precise.h55
-rw-r--r--src/core/surface/byte_buffer.c7
-rw-r--r--src/core/surface/call.c91
-rw-r--r--src/core/surface/call.h11
-rw-r--r--src/core/surface/call_test_only.h65
-rw-r--r--src/core/surface/channel_connectivity.c8
-rw-r--r--src/core/surface/completion_queue.c18
-rw-r--r--src/core/surface/init.c4
-rw-r--r--src/core/transport/chttp2/bin_encoder.c16
-rw-r--r--src/core/transport/chttp2/frame_data.c4
-rw-r--r--src/core/transport/chttp2/frame_goaway.c4
-rw-r--r--src/core/transport/chttp2/hpack_parser.c8
-rw-r--r--src/core/transport/chttp2/parsing.c21
-rw-r--r--src/core/transport/chttp2/stream_encoder.c6
-rw-r--r--src/core/transport/chttp2/writing.c5
-rw-r--r--src/core/transport/chttp2_transport.c16
-rw-r--r--src/core/transport/stream_op.c6
-rw-r--r--src/core/tsi/fake_transport_security.c8
-rw-r--r--src/core/tsi/ssl_transport_security.c13
59 files changed, 993 insertions, 557 deletions
diff --git a/src/core/channel/client_channel.c b/src/core/channel/client_channel.c
index b59b62a6aa..9f85557ea1 100644
--- a/src/core/channel/client_channel.c
+++ b/src/core/channel/client_channel.c
@@ -36,22 +36,24 @@
#include <stdio.h>
#include <string.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/sync.h>
+#include <grpc/support/useful.h>
+
#include "src/core/channel/channel_args.h"
#include "src/core/channel/connected_channel.h"
-#include "src/core/surface/channel.h"
#include "src/core/iomgr/iomgr.h"
+#include "src/core/profiling/timers.h"
#include "src/core/support/string.h"
+#include "src/core/surface/channel.h"
#include "src/core/transport/connectivity_state.h"
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/sync.h>
-#include <grpc/support/useful.h>
/* Client channel implementation */
typedef struct call_data call_data;
-typedef struct {
+typedef struct client_channel_channel_data {
/** metadata context for this channel */
grpc_mdctx *mdctx;
/** resolver for this channel */
@@ -196,13 +198,12 @@ static int is_empty(void *p, int len) {
return 1;
}
-static void started_call(grpc_exec_ctx *exec_ctx, void *arg,
- int iomgr_success) {
+static void started_call_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ int iomgr_success) {
call_data *calld = arg;
grpc_transport_stream_op op;
int have_waiting;
- gpr_mu_lock(&calld->mu_state);
if (calld->state == CALL_CANCELLED && calld->subchannel_call != NULL) {
memset(&op, 0, sizeof(op));
op.cancel_with_status = GRPC_STATUS_CANCELLED;
@@ -230,10 +231,20 @@ static void started_call(grpc_exec_ctx *exec_ctx, void *arg,
}
}
+static void started_call(grpc_exec_ctx *exec_ctx, void *arg,
+ int iomgr_success) {
+ call_data *calld = arg;
+ gpr_mu_lock(&calld->mu_state);
+ started_call_locked(exec_ctx, arg, iomgr_success);
+}
+
static void picked_target(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
call_data *calld = arg;
grpc_pollset *pollset;
+ grpc_subchannel_call_create_status call_creation_status;
+
+ GPR_TIMER_BEGIN("picked_target", 0);
if (calld->picked_channel == NULL) {
/* treat this like a cancellation */
@@ -248,13 +259,19 @@ static void picked_target(grpc_exec_ctx *exec_ctx, void *arg,
GPR_ASSERT(calld->state == CALL_WAITING_FOR_PICK);
calld->state = CALL_WAITING_FOR_CALL;
pollset = calld->waiting_op.bind_pollset;
- gpr_mu_unlock(&calld->mu_state);
grpc_closure_init(&calld->async_setup_task, started_call, calld);
- grpc_subchannel_create_call(exec_ctx, calld->picked_channel, pollset,
- &calld->subchannel_call,
- &calld->async_setup_task);
+ call_creation_status = grpc_subchannel_create_call(
+ exec_ctx, calld->picked_channel, pollset, &calld->subchannel_call,
+ &calld->async_setup_task);
+ if (call_creation_status == GRPC_SUBCHANNEL_CALL_CREATE_READY) {
+ started_call_locked(exec_ctx, calld, iomgr_success);
+ } else {
+ gpr_mu_unlock(&calld->mu_state);
+ }
}
}
+
+ GPR_TIMER_END("picked_target", 0);
}
static grpc_closure *merge_into_waiting_op(grpc_call_element *elem,
@@ -315,6 +332,7 @@ static void perform_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call *subchannel_call;
grpc_lb_policy *lb_policy;
grpc_transport_stream_op op2;
+ GPR_TIMER_BEGIN("perform_transport_stream_op", 0);
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
@@ -426,6 +444,8 @@ static void perform_transport_stream_op(grpc_exec_ctx *exec_ctx,
}
break;
}
+
+ GPR_TIMER_END("perform_transport_stream_op", 0);
}
static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
@@ -645,9 +665,7 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
case CALL_WAITING_FOR_CONFIG:
case CALL_WAITING_FOR_CALL:
case CALL_WAITING_FOR_SEND:
- gpr_log(GPR_ERROR, "should never reach here");
- abort();
- break;
+ GPR_UNREACHABLE_CODE(return );
}
}
diff --git a/src/core/channel/compress_filter.c b/src/core/channel/compress_filter.c
index 182fbf18bf..20b5084044 100644
--- a/src/core/channel/compress_filter.c
+++ b/src/core/channel/compress_filter.c
@@ -41,6 +41,7 @@
#include "src/core/channel/compress_filter.h"
#include "src/core/channel/channel_args.h"
+#include "src/core/profiling/timers.h"
#include "src/core/compression/message_compress.h"
#include "src/core/support/string.h"
@@ -242,7 +243,7 @@ static void process_send_ops(grpc_call_element *elem,
GPR_ASSERT(calld->remaining_slice_bytes > 0);
/* Increase input ref count, gpr_slice_buffer_add takes ownership. */
gpr_slice_buffer_add(&calld->slices, gpr_slice_ref(sop->data.slice));
- GPR_ASSERT(GPR_SLICE_LENGTH(sop->data.slice) >=
+ GPR_ASSERT(GPR_SLICE_LENGTH(sop->data.slice) <=
calld->remaining_slice_bytes);
calld->remaining_slice_bytes -=
(gpr_uint32)GPR_SLICE_LENGTH(sop->data.slice);
@@ -271,10 +272,14 @@ static void process_send_ops(grpc_call_element *elem,
static void compress_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
+ GPR_TIMER_BEGIN("compress_start_transport_stream_op", 0);
+
if (op->send_ops && op->send_ops->nops > 0) {
process_send_ops(elem, op->send_ops);
}
+ GPR_TIMER_END("compress_start_transport_stream_op", 0);
+
/* pass control down the stack */
grpc_call_next_op(exec_ctx, elem, op);
}
diff --git a/src/core/channel/connected_channel.c b/src/core/channel/connected_channel.c
index f9fc280259..6d4d7be632 100644
--- a/src/core/channel/connected_channel.c
+++ b/src/core/channel/connected_channel.c
@@ -39,6 +39,7 @@
#include "src/core/support/string.h"
#include "src/core/transport/transport.h"
+#include "src/core/profiling/timers.h"
#include <grpc/byte_buffer.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
diff --git a/src/core/channel/http_client_filter.c b/src/core/channel/http_client_filter.c
index d67dc37ad2..f78a5cc315 100644
--- a/src/core/channel/http_client_filter.c
+++ b/src/core/channel/http_client_filter.c
@@ -36,6 +36,7 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include "src/core/support/string.h"
+#include "src/core/profiling/timers.h"
typedef struct call_data {
grpc_linked_mdelem method;
@@ -162,8 +163,10 @@ static void hc_mutate_op(grpc_call_element *elem,
static void hc_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
+ GPR_TIMER_BEGIN("hc_start_transport_op", 0);
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
hc_mutate_op(elem, op);
+ GPR_TIMER_END("hc_start_transport_op", 0);
grpc_call_next_op(exec_ctx, elem, op);
}
diff --git a/src/core/channel/http_server_filter.c b/src/core/channel/http_server_filter.c
index 5e6d684a52..99e5066a4e 100644
--- a/src/core/channel/http_server_filter.c
+++ b/src/core/channel/http_server_filter.c
@@ -36,6 +36,7 @@
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
+#include "src/core/profiling/timers.h"
typedef struct call_data {
gpr_uint8 got_initial_metadata;
@@ -230,8 +231,10 @@ static void hs_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
+ GPR_TIMER_BEGIN("hs_start_transport_op", 0);
hs_mutate_op(elem, op);
grpc_call_next_op(exec_ctx, elem, op);
+ GPR_TIMER_END("hs_start_transport_op", 0);
}
/* Constructor for call_data */
diff --git a/src/core/client_config/lb_policies/pick_first.c b/src/core/client_config/lb_policies/pick_first.c
index 28155d0fbc..e5bf0680ff 100644
--- a/src/core/client_config/lb_policies/pick_first.c
+++ b/src/core/client_config/lb_policies/pick_first.c
@@ -101,6 +101,9 @@ void pf_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
for (i = 0; i < p->num_subchannels; i++) {
GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[i], "pick_first");
}
+ if (p->selected) {
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, p->selected, "picked_first");
+ }
grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
gpr_free(p->subchannels);
gpr_mu_destroy(&p->mu);
@@ -172,6 +175,35 @@ void pf_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
}
+static void destroy_subchannels(grpc_exec_ctx *exec_ctx, void *arg,
+ int iomgr_success) {
+ pick_first_lb_policy *p = arg;
+ size_t i;
+ grpc_transport_op op;
+ size_t num_subchannels = p->num_subchannels;
+ grpc_subchannel **subchannels;
+ grpc_subchannel *exclude_subchannel;
+
+ gpr_mu_lock(&p->mu);
+ subchannels = p->subchannels;
+ p->num_subchannels = 0;
+ p->subchannels = NULL;
+ exclude_subchannel = p->selected;
+ gpr_mu_unlock(&p->mu);
+ GRPC_LB_POLICY_UNREF(exec_ctx, &p->base, "destroy_subchannels");
+
+ for (i = 0; i < num_subchannels; i++) {
+ if (subchannels[i] != exclude_subchannel) {
+ memset(&op, 0, sizeof(op));
+ op.disconnect = 1;
+ grpc_subchannel_process_transport_op(exec_ctx, subchannels[i], &op);
+ }
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, subchannels[i], "pick_first");
+ }
+
+ gpr_free(subchannels);
+}
+
static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
pick_first_lb_policy *p = arg;
@@ -200,6 +232,12 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_READY, "connecting_ready");
p->selected = p->subchannels[p->checking_subchannel];
+ GRPC_SUBCHANNEL_REF(p->selected, "picked_first");
+ /* drop the pick list: we are connected now */
+ GRPC_LB_POLICY_REF(&p->base, "destroy_subchannels");
+ grpc_exec_ctx_enqueue(exec_ctx,
+ grpc_closure_create(destroy_subchannels, p), 1);
+ /* update any calls that were waiting for a pick */
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = p->selected;
@@ -279,10 +317,15 @@ static void pf_broadcast(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
size_t i;
size_t n;
grpc_subchannel **subchannels;
+ grpc_subchannel *selected;
gpr_mu_lock(&p->mu);
n = p->num_subchannels;
subchannels = gpr_malloc(n * sizeof(*subchannels));
+ selected = p->selected;
+ if (selected) {
+ GRPC_SUBCHANNEL_REF(selected, "pf_broadcast_to_selected");
+ }
for (i = 0; i < n; i++) {
subchannels[i] = p->subchannels[i];
GRPC_SUBCHANNEL_REF(subchannels[i], "pf_broadcast");
@@ -290,9 +333,14 @@ static void pf_broadcast(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
gpr_mu_unlock(&p->mu);
for (i = 0; i < n; i++) {
+ if (selected == subchannels[i]) continue;
grpc_subchannel_process_transport_op(exec_ctx, subchannels[i], op);
GRPC_SUBCHANNEL_UNREF(exec_ctx, subchannels[i], "pf_broadcast");
}
+ if (p->selected) {
+ grpc_subchannel_process_transport_op(exec_ctx, selected, op);
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, selected, "pf_broadcast_to_selected");
+ }
gpr_free(subchannels);
}
diff --git a/src/core/client_config/subchannel.c b/src/core/client_config/subchannel.c
index a378f06543..8494ebdc1d 100644
--- a/src/core/client_config/subchannel.c
+++ b/src/core/client_config/subchannel.c
@@ -358,18 +358,20 @@ static void start_connect(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
static void continue_creating_call(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
+ grpc_subchannel_call_create_status call_creation_status;
waiting_for_connect *w4c = arg;
grpc_subchannel_del_interested_party(exec_ctx, w4c->subchannel, w4c->pollset);
- grpc_subchannel_create_call(exec_ctx, w4c->subchannel, w4c->pollset,
- w4c->target, w4c->notify);
+ call_creation_status = grpc_subchannel_create_call(
+ exec_ctx, w4c->subchannel, w4c->pollset, w4c->target, w4c->notify);
+ GPR_ASSERT(call_creation_status == GRPC_SUBCHANNEL_CALL_CREATE_READY);
+ w4c->notify->cb(exec_ctx, w4c->notify->cb_arg, iomgr_success);
GRPC_SUBCHANNEL_UNREF(exec_ctx, w4c->subchannel, "waiting_for_connect");
gpr_free(w4c);
}
-void grpc_subchannel_create_call(grpc_exec_ctx *exec_ctx, grpc_subchannel *c,
- grpc_pollset *pollset,
- grpc_subchannel_call **target,
- grpc_closure *notify) {
+grpc_subchannel_call_create_status grpc_subchannel_create_call(
+ grpc_exec_ctx *exec_ctx, grpc_subchannel *c, grpc_pollset *pollset,
+ grpc_subchannel_call **target, grpc_closure *notify) {
connection *con;
gpr_mu_lock(&c->mu);
if (c->active != NULL) {
@@ -378,7 +380,7 @@ void grpc_subchannel_create_call(grpc_exec_ctx *exec_ctx, grpc_subchannel *c,
gpr_mu_unlock(&c->mu);
*target = create_call(exec_ctx, con);
- notify->cb(exec_ctx, notify->cb_arg, 1);
+ return GRPC_SUBCHANNEL_CALL_CREATE_READY;
} else {
waiting_for_connect *w4c = gpr_malloc(sizeof(*w4c));
w4c->next = c->waiting;
@@ -403,6 +405,7 @@ void grpc_subchannel_create_call(grpc_exec_ctx *exec_ctx, grpc_subchannel *c,
} else {
gpr_mu_unlock(&c->mu);
}
+ return GRPC_SUBCHANNEL_CALL_CREATE_PENDING;
}
}
diff --git a/src/core/client_config/subchannel.h b/src/core/client_config/subchannel.h
index f9bc0c2d2f..ec1cc7cc69 100644
--- a/src/core/client_config/subchannel.h
+++ b/src/core/client_config/subchannel.h
@@ -75,12 +75,22 @@ void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call *call
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-/** construct a call (possibly asynchronously) */
-void grpc_subchannel_create_call(grpc_exec_ctx *exec_ctx,
- grpc_subchannel *subchannel,
- grpc_pollset *pollset,
- grpc_subchannel_call **target,
- grpc_closure *notify);
+typedef enum {
+ GRPC_SUBCHANNEL_CALL_CREATE_READY,
+ GRPC_SUBCHANNEL_CALL_CREATE_PENDING
+} grpc_subchannel_call_create_status;
+
+/** construct a subchannel call (possibly asynchronously).
+ *
+ * If the returned status is \a GRPC_SUBCHANNEL_CALL_CREATE_READY, the call will
+ * return immediately and \a target will point to a connected \a subchannel_call
+ * instance. Note that \a notify will \em not be invoked in this case.
+ * Otherwise, if the returned status is GRPC_SUBCHANNEL_CALL_CREATE_PENDING, the
+ * subchannel call will be created asynchronously, invoking the \a notify
+ * callback upon completion. */
+grpc_subchannel_call_create_status grpc_subchannel_create_call(
+ grpc_exec_ctx *exec_ctx, grpc_subchannel *subchannel, grpc_pollset *pollset,
+ grpc_subchannel_call **target, grpc_closure *notify);
/** cancel \a call in the waiting state. */
void grpc_subchannel_cancel_waiting_call(grpc_exec_ctx *exec_ctx,
diff --git a/src/core/client_config/uri_parser.c b/src/core/client_config/uri_parser.c
index df9f32d403..cbdfffcf8e 100644
--- a/src/core/client_config/uri_parser.c
+++ b/src/core/client_config/uri_parser.c
@@ -37,6 +37,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
+#include <grpc/support/port_platform.h>
#include <grpc/support/string_util.h>
/** a size_t default value... maps to all 1's */
@@ -120,8 +121,7 @@ static int parse_fragment_or_query(const char *uri_text, size_t *i) {
} else {
return 1;
}
- gpr_log(GPR_ERROR, "should never reach here");
- abort();
+ GPR_UNREACHABLE_CODE(return 0);
default:
(*i) += advance;
break;
diff --git a/src/core/compression/algorithm.c b/src/core/compression/algorithm.c
index d55e499f5e..fd95a3c891 100644
--- a/src/core/compression/algorithm.c
+++ b/src/core/compression/algorithm.c
@@ -101,6 +101,7 @@ grpc_compression_algorithm grpc_compression_algorithm_for_level(
default:
/* we shouldn't be making it here */
abort();
+ return GRPC_COMPRESS_NONE;
}
}
@@ -116,6 +117,7 @@ grpc_compression_level grpc_compression_level_for_algorithm(
}
}
abort();
+ return GRPC_COMPRESS_LEVEL_NONE;
}
void grpc_compression_options_init(grpc_compression_options *opts) {
diff --git a/src/core/httpcli/parser.c b/src/core/httpcli/parser.c
index 404906d5ae..046770c094 100644
--- a/src/core/httpcli/parser.c
+++ b/src/core/httpcli/parser.c
@@ -139,8 +139,7 @@ static int finish_line(grpc_httpcli_parser *parser) {
}
break;
case GRPC_HTTPCLI_BODY:
- gpr_log(GPR_ERROR, "should never reach here");
- abort();
+ GPR_UNREACHABLE_CODE(return 0);
}
parser->cur_line_length = 0;
@@ -165,8 +164,7 @@ static int addbyte(grpc_httpcli_parser *parser, gpr_uint8 byte) {
} else {
return 1;
}
- gpr_log(GPR_ERROR, "should never reach here");
- abort();
+ GPR_UNREACHABLE_CODE(return 0);
case GRPC_HTTPCLI_BODY:
if (parser->r.body_length == parser->body_capacity) {
parser->body_capacity = GPR_MAX(8, parser->body_capacity * 3 / 2);
@@ -177,10 +175,7 @@ static int addbyte(grpc_httpcli_parser *parser, gpr_uint8 byte) {
parser->r.body_length++;
return 1;
}
- gpr_log(GPR_ERROR, "should never reach here");
- abort();
-
- return 0;
+ GPR_UNREACHABLE_CODE(return 0);
}
void grpc_httpcli_parser_init(grpc_httpcli_parser *parser) {
diff --git a/src/core/iomgr/closure.c b/src/core/iomgr/closure.c
index 3265425789..d91681990f 100644
--- a/src/core/iomgr/closure.c
+++ b/src/core/iomgr/closure.c
@@ -33,6 +33,8 @@
#include "src/core/iomgr/closure.h"
+#include <grpc/support/alloc.h>
+
void grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
void *cb_arg) {
closure->cb = cb;
@@ -69,3 +71,25 @@ void grpc_closure_list_move(grpc_closure_list *src, grpc_closure_list *dst) {
}
src->head = src->tail = NULL;
}
+
+typedef struct {
+ grpc_iomgr_cb_func cb;
+ void *cb_arg;
+ grpc_closure wrapper;
+} wrapped_closure;
+
+static void closure_wrapper(grpc_exec_ctx *exec_ctx, void *arg, int success) {
+ wrapped_closure *wc = arg;
+ grpc_iomgr_cb_func cb = wc->cb;
+ void *cb_arg = wc->cb_arg;
+ gpr_free(wc);
+ cb(exec_ctx, cb_arg, success);
+}
+
+grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg) {
+ wrapped_closure *wc = gpr_malloc(sizeof(*wc));
+ wc->cb = cb;
+ wc->cb_arg = cb_arg;
+ grpc_closure_init(&wc->wrapper, closure_wrapper, wc);
+ return &wc->wrapper;
+}
diff --git a/src/core/iomgr/closure.h b/src/core/iomgr/closure.h
index 982ffa4e1b..d812659af0 100644
--- a/src/core/iomgr/closure.h
+++ b/src/core/iomgr/closure.h
@@ -77,6 +77,9 @@ struct grpc_closure {
void grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
void *cb_arg);
+/* Create a heap allocated closure: try to avoid except for very rare events */
+grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg);
+
#define GRPC_CLOSURE_LIST_INIT \
{ NULL, NULL }
diff --git a/src/core/iomgr/exec_ctx.c b/src/core/iomgr/exec_ctx.c
index f2914d376e..410b34c521 100644
--- a/src/core/iomgr/exec_ctx.c
+++ b/src/core/iomgr/exec_ctx.c
@@ -35,18 +35,24 @@
#include <grpc/support/log.h>
+#include "src/core/profiling/timers.h"
+
int grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
int did_something = 0;
+ GPR_TIMER_BEGIN("grpc_exec_ctx_flush", 0);
while (!grpc_closure_list_empty(exec_ctx->closure_list)) {
grpc_closure *c = exec_ctx->closure_list.head;
exec_ctx->closure_list.head = exec_ctx->closure_list.tail = NULL;
while (c != NULL) {
grpc_closure *next = c->next;
- did_something = 1;
+ did_something++;
+ GPR_TIMER_BEGIN("grpc_exec_ctx_flush.cb", 0);
c->cb(exec_ctx, c->cb_arg, c->success);
+ GPR_TIMER_END("grpc_exec_ctx_flush.cb", 0);
c = next;
}
}
+ GPR_TIMER_END("grpc_exec_ctx_flush", 0);
return did_something;
}
diff --git a/src/core/iomgr/fd_posix.c b/src/core/iomgr/fd_posix.c
index b48b7f050a..7ff80e6cf8 100644
--- a/src/core/iomgr/fd_posix.c
+++ b/src/core/iomgr/fd_posix.c
@@ -45,10 +45,8 @@
#include <grpc/support/log.h>
#include <grpc/support/useful.h>
-enum descriptor_state {
- NOT_READY = 0,
- READY = 1
-}; /* or a pointer to a closure to call */
+#define CLOSURE_NOT_READY ((grpc_closure *)0)
+#define CLOSURE_READY ((grpc_closure *)1)
/* We need to keep a freelist not because of any concerns of malloc performance
* but instead so that implementations with multiple threads in (for example)
@@ -88,14 +86,13 @@ static grpc_fd *alloc_fd(int fd) {
gpr_mu_unlock(&fd_freelist_mu);
if (r == NULL) {
r = gpr_malloc(sizeof(grpc_fd));
- gpr_mu_init(&r->set_state_mu);
- gpr_mu_init(&r->watcher_mu);
+ gpr_mu_init(&r->mu);
}
gpr_atm_rel_store(&r->refst, 1);
- gpr_atm_rel_store(&r->readst, NOT_READY);
- gpr_atm_rel_store(&r->writest, NOT_READY);
- gpr_atm_rel_store(&r->shutdown, 0);
+ r->shutdown = 0;
+ r->read_closure = CLOSURE_NOT_READY;
+ r->write_closure = CLOSURE_NOT_READY;
r->fd = fd;
r->inactive_watcher_root.next = r->inactive_watcher_root.prev =
&r->inactive_watcher_root;
@@ -107,8 +104,7 @@ static grpc_fd *alloc_fd(int fd) {
}
static void destroy(grpc_fd *fd) {
- gpr_mu_destroy(&fd->set_state_mu);
- gpr_mu_destroy(&fd->watcher_mu);
+ gpr_mu_destroy(&fd->mu);
gpr_free(fd);
}
@@ -173,39 +169,35 @@ int grpc_fd_is_orphaned(grpc_fd *fd) {
return (gpr_atm_acq_load(&fd->refst) & 1) == 0;
}
-static void pollset_kick_locked(grpc_pollset *pollset) {
- gpr_mu_lock(GRPC_POLLSET_MU(pollset));
- grpc_pollset_kick(pollset, NULL);
- gpr_mu_unlock(GRPC_POLLSET_MU(pollset));
+static void pollset_kick_locked(grpc_fd_watcher *watcher) {
+ gpr_mu_lock(GRPC_POLLSET_MU(watcher->pollset));
+ GPR_ASSERT(watcher->worker);
+ grpc_pollset_kick_ext(watcher->pollset, watcher->worker,
+ GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP);
+ gpr_mu_unlock(GRPC_POLLSET_MU(watcher->pollset));
}
static void maybe_wake_one_watcher_locked(grpc_fd *fd) {
if (fd->inactive_watcher_root.next != &fd->inactive_watcher_root) {
- pollset_kick_locked(fd->inactive_watcher_root.next->pollset);
+ pollset_kick_locked(fd->inactive_watcher_root.next);
} else if (fd->read_watcher) {
- pollset_kick_locked(fd->read_watcher->pollset);
+ pollset_kick_locked(fd->read_watcher);
} else if (fd->write_watcher) {
- pollset_kick_locked(fd->write_watcher->pollset);
+ pollset_kick_locked(fd->write_watcher);
}
}
-static void maybe_wake_one_watcher(grpc_fd *fd) {
- gpr_mu_lock(&fd->watcher_mu);
- maybe_wake_one_watcher_locked(fd);
- gpr_mu_unlock(&fd->watcher_mu);
-}
-
static void wake_all_watchers_locked(grpc_fd *fd) {
grpc_fd_watcher *watcher;
for (watcher = fd->inactive_watcher_root.next;
watcher != &fd->inactive_watcher_root; watcher = watcher->next) {
- pollset_kick_locked(watcher->pollset);
+ pollset_kick_locked(watcher);
}
if (fd->read_watcher) {
- pollset_kick_locked(fd->read_watcher->pollset);
+ pollset_kick_locked(fd->read_watcher);
}
if (fd->write_watcher && fd->write_watcher != fd->read_watcher) {
- pollset_kick_locked(fd->write_watcher->pollset);
+ pollset_kick_locked(fd->write_watcher);
}
}
@@ -218,7 +210,7 @@ void grpc_fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *on_done,
const char *reason) {
fd->on_done_closure = on_done;
shutdown(fd->fd, SHUT_RDWR);
- gpr_mu_lock(&fd->watcher_mu);
+ gpr_mu_lock(&fd->mu);
REF_BY(fd, 1, reason); /* remove active status, but keep referenced */
if (!has_watchers(fd)) {
fd->closed = 1;
@@ -227,7 +219,7 @@ void grpc_fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *on_done,
} else {
wake_all_watchers_locked(fd);
}
- gpr_mu_unlock(&fd->watcher_mu);
+ gpr_mu_unlock(&fd->mu);
UNREF_BY(fd, 2, reason); /* drop the reference */
}
@@ -247,136 +239,121 @@ void grpc_fd_ref(grpc_fd *fd) { ref_by(fd, 2); }
void grpc_fd_unref(grpc_fd *fd) { unref_by(fd, 2); }
#endif
-static void notify_on(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_atm *st,
- grpc_closure *closure) {
- switch (gpr_atm_acq_load(st)) {
- case NOT_READY:
- /* There is no race if the descriptor is already ready, so we skip
- the interlocked op in that case. As long as the app doesn't
- try to set the same upcall twice (which it shouldn't) then
- oldval should never be anything other than READY or NOT_READY. We
- don't
- check for user error on the fast path. */
- if (gpr_atm_rel_cas(st, NOT_READY, (gpr_intptr)closure)) {
- /* swap was successful -- the closure will run after the next
- set_ready call. NOTE: we don't have an ABA problem here,
- since we should never have concurrent calls to the same
- notify_on function. */
- maybe_wake_one_watcher(fd);
- return;
- }
- /* swap was unsuccessful due to an intervening set_ready call.
- Fall through to the READY code below */
- case READY:
- GPR_ASSERT(gpr_atm_no_barrier_load(st) == READY);
- gpr_atm_rel_store(st, NOT_READY);
- grpc_exec_ctx_enqueue(exec_ctx, closure,
- !gpr_atm_acq_load(&fd->shutdown));
- return;
- default: /* WAITING */
- /* upcallptr was set to a different closure. This is an error! */
- gpr_log(GPR_ERROR,
- "User called a notify_on function with a previous callback still "
- "pending");
- abort();
+static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
+ grpc_closure **st, grpc_closure *closure) {
+ if (*st == CLOSURE_NOT_READY) {
+ /* not ready ==> switch to a waiting state by setting the closure */
+ *st = closure;
+ } else if (*st == CLOSURE_READY) {
+ /* already ready ==> queue the closure to run immediately */
+ *st = CLOSURE_NOT_READY;
+ grpc_exec_ctx_enqueue(exec_ctx, closure, !fd->shutdown);
+ maybe_wake_one_watcher_locked(fd);
+ } else {
+ /* upcallptr was set to a different closure. This is an error! */
+ gpr_log(GPR_ERROR,
+ "User called a notify_on function with a previous callback still "
+ "pending");
+ abort();
}
- gpr_log(GPR_ERROR, "Corrupt memory in &st->state");
- abort();
}
-static void set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- gpr_atm *st) {
- gpr_intptr state = gpr_atm_acq_load(st);
-
- switch (state) {
- case READY:
- /* duplicate ready, ignore */
- return;
- case NOT_READY:
- if (gpr_atm_rel_cas(st, NOT_READY, READY)) {
- /* swap was successful -- the closure will run after the next
- notify_on call. */
- return;
- }
- /* swap was unsuccessful due to an intervening set_ready call.
- Fall through to the WAITING code below */
- state = gpr_atm_acq_load(st);
- default: /* waiting */
- GPR_ASSERT(gpr_atm_no_barrier_load(st) != READY &&
- gpr_atm_no_barrier_load(st) != NOT_READY);
- grpc_exec_ctx_enqueue(exec_ctx, (grpc_closure *)state,
- !gpr_atm_acq_load(&fd->shutdown));
- gpr_atm_rel_store(st, NOT_READY);
- return;
+/* returns 1 if state becomes not ready */
+static int set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
+ grpc_closure **st) {
+ if (*st == CLOSURE_READY) {
+ /* duplicate ready ==> ignore */
+ return 0;
+ } else if (*st == CLOSURE_NOT_READY) {
+ /* not ready, and not waiting ==> flag ready */
+ *st = CLOSURE_READY;
+ return 0;
+ } else {
+ /* waiting ==> queue closure */
+ grpc_exec_ctx_enqueue(exec_ctx, *st, !fd->shutdown);
+ *st = CLOSURE_NOT_READY;
+ return 1;
}
}
-static void set_ready(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_atm *st) {
+static void set_ready(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure **st) {
/* only one set_ready can be active at once (but there may be a racing
notify_on) */
- gpr_mu_lock(&fd->set_state_mu);
+ gpr_mu_lock(&fd->mu);
set_ready_locked(exec_ctx, fd, st);
- gpr_mu_unlock(&fd->set_state_mu);
+ gpr_mu_unlock(&fd->mu);
}
void grpc_fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
- gpr_mu_lock(&fd->set_state_mu);
- GPR_ASSERT(!gpr_atm_no_barrier_load(&fd->shutdown));
- gpr_atm_rel_store(&fd->shutdown, 1);
- set_ready_locked(exec_ctx, fd, &fd->readst);
- set_ready_locked(exec_ctx, fd, &fd->writest);
- gpr_mu_unlock(&fd->set_state_mu);
+ gpr_mu_lock(&fd->mu);
+ GPR_ASSERT(!fd->shutdown);
+ fd->shutdown = 1;
+ set_ready_locked(exec_ctx, fd, &fd->read_closure);
+ set_ready_locked(exec_ctx, fd, &fd->write_closure);
+ gpr_mu_unlock(&fd->mu);
}
void grpc_fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) {
- notify_on(exec_ctx, fd, &fd->readst, closure);
+ gpr_mu_lock(&fd->mu);
+ notify_on_locked(exec_ctx, fd, &fd->read_closure, closure);
+ gpr_mu_unlock(&fd->mu);
}
void grpc_fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) {
- notify_on(exec_ctx, fd, &fd->writest, closure);
+ gpr_mu_lock(&fd->mu);
+ notify_on_locked(exec_ctx, fd, &fd->write_closure, closure);
+ gpr_mu_unlock(&fd->mu);
}
gpr_uint32 grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
- gpr_uint32 read_mask, gpr_uint32 write_mask,
- grpc_fd_watcher *watcher) {
+ grpc_pollset_worker *worker, gpr_uint32 read_mask,
+ gpr_uint32 write_mask, grpc_fd_watcher *watcher) {
gpr_uint32 mask = 0;
+ grpc_closure *cur;
+ int requested;
/* keep track of pollers that have requested our events, in case they change
*/
GRPC_FD_REF(fd, "poll");
- gpr_mu_lock(&fd->watcher_mu);
+ gpr_mu_lock(&fd->mu);
+
/* if we are shutdown, then don't add to the watcher set */
- if (gpr_atm_no_barrier_load(&fd->shutdown)) {
+ if (fd->shutdown) {
watcher->fd = NULL;
watcher->pollset = NULL;
- gpr_mu_unlock(&fd->watcher_mu);
+ watcher->worker = NULL;
+ gpr_mu_unlock(&fd->mu);
GRPC_FD_UNREF(fd, "poll");
return 0;
}
+
/* if there is nobody polling for read, but we need to, then start doing so */
- if (read_mask && !fd->read_watcher &&
- (gpr_uintptr)gpr_atm_acq_load(&fd->readst) > READY) {
+ cur = fd->read_closure;
+ requested = cur != CLOSURE_READY;
+ if (read_mask && fd->read_watcher == NULL && requested) {
fd->read_watcher = watcher;
mask |= read_mask;
}
/* if there is nobody polling for write, but we need to, then start doing so
*/
- if (write_mask && !fd->write_watcher &&
- (gpr_uintptr)gpr_atm_acq_load(&fd->writest) > READY) {
+ cur = fd->write_closure;
+ requested = cur != CLOSURE_READY;
+ if (write_mask && fd->write_watcher == NULL && requested) {
fd->write_watcher = watcher;
mask |= write_mask;
}
/* if not polling, remember this watcher in case we need someone to later */
- if (mask == 0) {
+ if (mask == 0 && worker != NULL) {
watcher->next = &fd->inactive_watcher_root;
watcher->prev = watcher->next->prev;
watcher->next->prev = watcher->prev->next = watcher;
}
watcher->pollset = pollset;
+ watcher->worker = worker;
watcher->fd = fd;
- gpr_mu_unlock(&fd->watcher_mu);
+ gpr_mu_unlock(&fd->mu);
return mask;
}
@@ -391,24 +368,39 @@ void grpc_fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher,
return;
}
- gpr_mu_lock(&fd->watcher_mu);
+ gpr_mu_lock(&fd->mu);
+
if (watcher == fd->read_watcher) {
/* remove read watcher, kick if we still need a read */
was_polling = 1;
- kick = kick || !got_read;
+ if (!got_read) {
+ kick = 1;
+ }
fd->read_watcher = NULL;
}
if (watcher == fd->write_watcher) {
/* remove write watcher, kick if we still need a write */
was_polling = 1;
- kick = kick || !got_write;
+ if (!got_write) {
+ kick = 1;
+ }
fd->write_watcher = NULL;
}
- if (!was_polling) {
+ if (!was_polling && watcher->worker != NULL) {
/* remove from inactive list */
watcher->next->prev = watcher->prev;
watcher->prev->next = watcher->next;
}
+ if (got_read) {
+ if (set_ready_locked(exec_ctx, fd, &fd->read_closure)) {
+ kick = 1;
+ }
+ }
+ if (got_write) {
+ if (set_ready_locked(exec_ctx, fd, &fd->write_closure)) {
+ kick = 1;
+ }
+ }
if (kick) {
maybe_wake_one_watcher_locked(fd);
}
@@ -417,17 +409,17 @@ void grpc_fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher,
close(fd->fd);
grpc_exec_ctx_enqueue(exec_ctx, fd->on_done_closure, 1);
}
- gpr_mu_unlock(&fd->watcher_mu);
+ gpr_mu_unlock(&fd->mu);
GRPC_FD_UNREF(fd, "poll");
}
void grpc_fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
- set_ready(exec_ctx, fd, &fd->readst);
+ set_ready(exec_ctx, fd, &fd->read_closure);
}
void grpc_fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
- set_ready(exec_ctx, fd, &fd->writest);
+ set_ready(exec_ctx, fd, &fd->write_closure);
}
#endif
diff --git a/src/core/iomgr/fd_posix.h b/src/core/iomgr/fd_posix.h
index 089aa4d717..dc917ebbc0 100644
--- a/src/core/iomgr/fd_posix.h
+++ b/src/core/iomgr/fd_posix.h
@@ -46,6 +46,7 @@ typedef struct grpc_fd_watcher {
struct grpc_fd_watcher *next;
struct grpc_fd_watcher *prev;
grpc_pollset *pollset;
+ grpc_pollset_worker *worker;
grpc_fd *fd;
} grpc_fd_watcher;
@@ -58,8 +59,8 @@ struct grpc_fd {
and just unref by 1 when we're ready to flag the object as orphaned */
gpr_atm refst;
- gpr_mu set_state_mu;
- gpr_atm shutdown;
+ gpr_mu mu;
+ int shutdown;
int closed;
/* The watcher list.
@@ -84,18 +85,16 @@ struct grpc_fd {
If at a later time there becomes need of a poller to poll, one of
the inactive pollers may be kicked out of their poll loops to take
that responsibility. */
- gpr_mu watcher_mu;
grpc_fd_watcher inactive_watcher_root;
grpc_fd_watcher *read_watcher;
grpc_fd_watcher *write_watcher;
- gpr_atm readst;
- gpr_atm writest;
+ grpc_closure *read_closure;
+ grpc_closure *write_closure;
struct grpc_fd *freelist_next;
grpc_closure *on_done_closure;
- grpc_closure *shutdown_closures[2];
grpc_iomgr_object iomgr_object;
};
@@ -126,10 +125,12 @@ void grpc_fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *on_done,
fd's current interest (such as epoll) do not need to call this function.
MUST NOT be called with a pollset lock taken */
gpr_uint32 grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
- gpr_uint32 read_mask, gpr_uint32 write_mask,
- grpc_fd_watcher *rec);
+ grpc_pollset_worker *worker, gpr_uint32 read_mask,
+ gpr_uint32 write_mask, grpc_fd_watcher *rec);
/* Complete polling previously started with grpc_fd_begin_poll
- MUST NOT be called with a pollset lock taken */
+ MUST NOT be called with a pollset lock taken
+ if got_read or got_write are 1, also does the become_{readable,writable} as
+ appropriate. */
void grpc_fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *rec,
int got_read, int got_write);
diff --git a/src/core/iomgr/pollset_multipoller_with_epoll.c b/src/core/iomgr/pollset_multipoller_with_epoll.c
index faf0a6362b..2aafd21dfb 100644
--- a/src/core/iomgr/pollset_multipoller_with_epoll.c
+++ b/src/core/iomgr/pollset_multipoller_with_epoll.c
@@ -41,10 +41,11 @@
#include <sys/epoll.h>
#include <unistd.h>
-#include "src/core/iomgr/fd_posix.h"
-#include "src/core/support/block_annotate.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
+#include "src/core/iomgr/fd_posix.h"
+#include "src/core/support/block_annotate.h"
+#include "src/core/profiling/timers.h"
typedef struct wakeup_fd_hdl {
grpc_wakeup_fd wakeup_fd;
@@ -72,7 +73,7 @@ static void finally_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
/* We pretend to be polling whilst adding an fd to keep the fd from being
closed during the add. This may result in a spurious wakeup being assigned
to this pollset whilst adding, but that should be benign. */
- GPR_ASSERT(grpc_fd_begin_poll(fd, pollset, 0, 0, &watcher) == 0);
+ GPR_ASSERT(grpc_fd_begin_poll(fd, pollset, NULL, 0, 0, &watcher) == 0);
if (watcher.fd != NULL) {
ev.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET);
ev.data.ptr = fd;
@@ -182,9 +183,11 @@ static void multipoll_with_epoll_pollset_maybe_work_and_unlock(
/* TODO(vpai): Consider first doing a 0 timeout poll here to avoid
even going into the blocking annotation if possible */
+ GPR_TIMER_BEGIN("poll", 0);
GRPC_SCHEDULING_START_BLOCKING_REGION;
poll_rv = grpc_poll_function(pfds, 2, timeout_ms);
GRPC_SCHEDULING_END_BLOCKING_REGION;
+ GPR_TIMER_END("poll", 0);
if (poll_rv < 0) {
if (errno != EINTR) {
diff --git a/src/core/iomgr/pollset_multipoller_with_poll_posix.c b/src/core/iomgr/pollset_multipoller_with_poll_posix.c
index 1356ebe7a0..faa6c14491 100644
--- a/src/core/iomgr/pollset_multipoller_with_poll_posix.c
+++ b/src/core/iomgr/pollset_multipoller_with_poll_posix.c
@@ -102,6 +102,9 @@ static void multipoll_with_poll_pollset_del_fd(grpc_exec_ctx *exec_ctx,
static void multipoll_with_poll_pollset_maybe_work_and_unlock(
grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker *worker,
gpr_timespec deadline, gpr_timespec now) {
+#define POLLOUT_CHECK (POLLOUT | POLLHUP | POLLERR)
+#define POLLIN_CHECK (POLLIN | POLLHUP | POLLERR)
+
int timeout;
int r;
size_t i, j, fd_count;
@@ -147,8 +150,8 @@ static void multipoll_with_poll_pollset_maybe_work_and_unlock(
gpr_mu_unlock(&pollset->mu);
for (i = 2; i < pfd_count; i++) {
- pfds[i].events = (short)grpc_fd_begin_poll(watchers[i].fd, pollset, POLLIN,
- POLLOUT, &watchers[i]);
+ pfds[i].events = (short)grpc_fd_begin_poll(watchers[i].fd, pollset, worker,
+ POLLIN, POLLOUT, &watchers[i]);
}
/* TODO(vpai): Consider first doing a 0 timeout poll here to avoid
@@ -157,34 +160,29 @@ static void multipoll_with_poll_pollset_maybe_work_and_unlock(
r = grpc_poll_function(pfds, pfd_count, timeout);
GRPC_SCHEDULING_END_BLOCKING_REGION;
- for (i = 2; i < pfd_count; i++) {
- grpc_fd_end_poll(exec_ctx, &watchers[i], pfds[i].revents & POLLIN,
- pfds[i].revents & POLLOUT);
- }
-
if (r < 0) {
- if (errno != EINTR) {
- gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
+ gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
+ for (i = 2; i < pfd_count; i++) {
+ grpc_fd_end_poll(exec_ctx, &watchers[i], 0, 0);
}
} else if (r == 0) {
- /* do nothing */
+ for (i = 2; i < pfd_count; i++) {
+ grpc_fd_end_poll(exec_ctx, &watchers[i], 0, 0);
+ }
} else {
- if (pfds[0].revents & POLLIN) {
+ if (pfds[0].revents & POLLIN_CHECK) {
grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd);
}
- if (pfds[1].revents & POLLIN) {
+ if (pfds[1].revents & POLLIN_CHECK) {
grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd);
}
for (i = 2; i < pfd_count; i++) {
if (watchers[i].fd == NULL) {
+ grpc_fd_end_poll(exec_ctx, &watchers[i], 0, 0);
continue;
}
- if (pfds[i].revents & (POLLIN | POLLHUP | POLLERR)) {
- grpc_fd_become_readable(exec_ctx, watchers[i].fd);
- }
- if (pfds[i].revents & (POLLOUT | POLLHUP | POLLERR)) {
- grpc_fd_become_writable(exec_ctx, watchers[i].fd);
- }
+ grpc_fd_end_poll(exec_ctx, &watchers[i], pfds[i].revents & POLLIN_CHECK,
+ pfds[i].revents & POLLOUT_CHECK);
}
}
diff --git a/src/core/iomgr/pollset_posix.c b/src/core/iomgr/pollset_posix.c
index b663780a02..4d8bc5374f 100644
--- a/src/core/iomgr/pollset_posix.c
+++ b/src/core/iomgr/pollset_posix.c
@@ -98,29 +98,70 @@ static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
worker->prev->next = worker->next->prev = worker;
}
-void grpc_pollset_kick(grpc_pollset *p, grpc_pollset_worker *specific_worker) {
+void grpc_pollset_kick_ext(grpc_pollset *p,
+ grpc_pollset_worker *specific_worker,
+ gpr_uint32 flags) {
+ GPR_TIMER_BEGIN("grpc_pollset_kick_ext", 0);
+
/* pollset->mu already held */
if (specific_worker != NULL) {
if (specific_worker == GRPC_POLLSET_KICK_BROADCAST) {
+ GPR_TIMER_BEGIN("grpc_pollset_kick_ext.broadcast", 0);
+ GPR_ASSERT((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) == 0);
for (specific_worker = p->root_worker.next;
specific_worker != &p->root_worker;
specific_worker = specific_worker->next) {
grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd);
}
p->kicked_without_pollers = 1;
+ GPR_TIMER_END("grpc_pollset_kick_ext.broadcast", 0);
} else if (gpr_tls_get(&g_current_thread_worker) !=
(gpr_intptr)specific_worker) {
+ GPR_TIMER_MARK("different_thread_worker", 0);
+ if ((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) != 0) {
+ specific_worker->reevaluate_polling_on_wakeup = 1;
+ }
+ grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd);
+ } else if ((flags & GRPC_POLLSET_CAN_KICK_SELF) != 0) {
+ GPR_TIMER_MARK("kick_yoself", 0);
+ if ((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) != 0) {
+ specific_worker->reevaluate_polling_on_wakeup = 1;
+ }
grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd);
}
} else if (gpr_tls_get(&g_current_thread_poller) != (gpr_intptr)p) {
+ GPR_ASSERT((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) == 0);
+ GPR_TIMER_MARK("kick_anonymous", 0);
specific_worker = pop_front_worker(p);
if (specific_worker != NULL) {
- push_back_worker(p, specific_worker);
- grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd);
+ if (gpr_tls_get(&g_current_thread_worker) ==
+ (gpr_intptr)specific_worker) {
+ GPR_TIMER_MARK("kick_anonymous_not_self", 0);
+ push_back_worker(p, specific_worker);
+ specific_worker = pop_front_worker(p);
+ if ((flags & GRPC_POLLSET_CAN_KICK_SELF) == 0 &&
+ gpr_tls_get(&g_current_thread_worker) ==
+ (gpr_intptr)specific_worker) {
+ push_back_worker(p, specific_worker);
+ specific_worker = NULL;
+ }
+ }
+ if (specific_worker != NULL) {
+ GPR_TIMER_MARK("finally_kick", 0);
+ push_back_worker(p, specific_worker);
+ grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd);
+ }
} else {
+ GPR_TIMER_MARK("kicked_no_pollers", 0);
p->kicked_without_pollers = 1;
}
}
+
+ GPR_TIMER_END("grpc_pollset_kick_ext", 0);
+}
+
+void grpc_pollset_kick(grpc_pollset *p, grpc_pollset_worker *specific_worker) {
+ grpc_pollset_kick_ext(p, specific_worker, 0);
}
/* global state management */
@@ -195,52 +236,91 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
/* pollset->mu already held */
int added_worker = 0;
int locked = 1;
+ int queued_work = 0;
+ int keep_polling = 0;
+ GPR_TIMER_BEGIN("grpc_pollset_work", 0);
/* this must happen before we (potentially) drop pollset->mu */
worker->next = worker->prev = NULL;
+ worker->reevaluate_polling_on_wakeup = 0;
/* TODO(ctiller): pool these */
grpc_wakeup_fd_init(&worker->wakeup_fd);
+ /* If there's work waiting for the pollset to be idle, and the
+ pollset is idle, then do that work */
if (!grpc_pollset_has_workers(pollset) &&
!grpc_closure_list_empty(pollset->idle_jobs)) {
grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs);
goto done;
}
+ /* Check alarms - these are a global resource so we just ping
+ each time through on every pollset.
+ May update deadline to ensure timely wakeups.
+ TODO(ctiller): can this work be localized? */
if (grpc_alarm_check(exec_ctx, now, &deadline)) {
gpr_mu_unlock(&pollset->mu);
locked = 0;
goto done;
}
+ /* If we're shutting down then we don't execute any extended work */
if (pollset->shutting_down) {
goto done;
}
+ /* Give do_promote priority so we don't starve it out */
if (pollset->in_flight_cbs) {
- /* Give do_promote priority so we don't starve it out */
gpr_mu_unlock(&pollset->mu);
locked = 0;
goto done;
}
- if (!pollset->kicked_without_pollers) {
- push_front_worker(pollset, worker);
- added_worker = 1;
- gpr_tls_set(&g_current_thread_poller, (gpr_intptr)pollset);
- gpr_tls_set(&g_current_thread_worker, (gpr_intptr)worker);
- pollset->vtable->maybe_work_and_unlock(exec_ctx, pollset, worker, deadline,
- now);
- locked = 0;
- gpr_tls_set(&g_current_thread_poller, 0);
- gpr_tls_set(&g_current_thread_worker, 0);
- } else {
- pollset->kicked_without_pollers = 0;
- }
-done:
- if (!locked) {
- grpc_exec_ctx_flush(exec_ctx);
- gpr_mu_lock(&pollset->mu);
- locked = 1;
+ /* Start polling, and keep doing so while we're being asked to
+ re-evaluate our pollers (this allows poll() based pollers to
+ ensure they don't miss wakeups) */
+ keep_polling = 1;
+ while (keep_polling) {
+ keep_polling = 0;
+ if (!pollset->kicked_without_pollers) {
+ if (!added_worker) {
+ push_front_worker(pollset, worker);
+ added_worker = 1;
+ }
+ gpr_tls_set(&g_current_thread_poller, (gpr_intptr)pollset);
+ gpr_tls_set(&g_current_thread_worker, (gpr_intptr)worker);
+ GPR_TIMER_BEGIN("maybe_work_and_unlock", 0);
+ pollset->vtable->maybe_work_and_unlock(exec_ctx, pollset, worker,
+ deadline, now);
+ GPR_TIMER_END("maybe_work_and_unlock", 0);
+ locked = 0;
+ gpr_tls_set(&g_current_thread_poller, 0);
+ gpr_tls_set(&g_current_thread_worker, 0);
+ } else {
+ pollset->kicked_without_pollers = 0;
+ }
+ /* Finished execution - start cleaning up.
+ Note that we may arrive here from outside the enclosing while() loop.
+ In that case we won't loop though as we haven't added worker to the
+ worker list, which means nobody could ask us to re-evaluate polling). */
+ done:
+ if (!locked) {
+ queued_work |= grpc_exec_ctx_flush(exec_ctx);
+ gpr_mu_lock(&pollset->mu);
+ locked = 1;
+ }
+ /* If we're forced to re-evaluate polling (via grpc_pollset_kick with
+ GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) then we land here and force
+ a loop */
+ if (worker->reevaluate_polling_on_wakeup) {
+ worker->reevaluate_polling_on_wakeup = 0;
+ pollset->kicked_without_pollers = 0;
+ if (queued_work) {
+ /* If there's queued work on the list, then set the deadline to be
+ immediate so we get back out of the polling loop quickly */
+ deadline = gpr_inf_past(GPR_CLOCK_MONOTONIC);
+ }
+ keep_polling = 1;
+ }
}
- grpc_wakeup_fd_destroy(&worker->wakeup_fd);
if (added_worker) {
remove_worker(pollset, worker);
}
+ grpc_wakeup_fd_destroy(&worker->wakeup_fd);
if (pollset->shutting_down) {
if (grpc_pollset_has_workers(pollset)) {
grpc_pollset_kick(pollset, NULL);
@@ -261,6 +341,7 @@ done:
gpr_mu_lock(&pollset->mu);
}
}
+ GPR_TIMER_END("grpc_pollset_work", 0);
}
void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
@@ -454,6 +535,9 @@ static void basic_pollset_maybe_work_and_unlock(grpc_exec_ctx *exec_ctx,
grpc_pollset_worker *worker,
gpr_timespec deadline,
gpr_timespec now) {
+#define POLLOUT_CHECK (POLLOUT | POLLHUP | POLLERR)
+#define POLLIN_CHECK (POLLIN | POLLHUP | POLLERR)
+
struct pollfd pfd[3];
grpc_fd *fd;
grpc_fd_watcher fd_watcher;
@@ -479,8 +563,8 @@ static void basic_pollset_maybe_work_and_unlock(grpc_exec_ctx *exec_ctx,
pfd[2].revents = 0;
GRPC_FD_REF(fd, "basicpoll_begin");
gpr_mu_unlock(&pollset->mu);
- pfd[2].events =
- (short)grpc_fd_begin_poll(fd, pollset, POLLIN, POLLOUT, &fd_watcher);
+ pfd[2].events = (short)grpc_fd_begin_poll(fd, pollset, worker, POLLIN,
+ POLLOUT, &fd_watcher);
if (pfd[2].events != 0) {
nfds++;
}
@@ -492,36 +576,33 @@ static void basic_pollset_maybe_work_and_unlock(grpc_exec_ctx *exec_ctx,
even going into the blocking annotation if possible */
/* poll fd count (argument 2) is shortened by one if we have no events
to poll on - such that it only includes the kicker */
+ GPR_TIMER_BEGIN("poll", 0);
GRPC_SCHEDULING_START_BLOCKING_REGION;
r = grpc_poll_function(pfd, nfds, timeout);
GRPC_SCHEDULING_END_BLOCKING_REGION;
- GRPC_TIMER_MARK(GRPC_PTAG_POLL_FINISHED, r);
-
- if (fd) {
- grpc_fd_end_poll(exec_ctx, &fd_watcher, pfd[2].revents & POLLIN,
- pfd[2].revents & POLLOUT);
- }
+ GPR_TIMER_END("poll", 0);
if (r < 0) {
- if (errno != EINTR) {
- gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
+ gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
+ if (fd) {
+ grpc_fd_end_poll(exec_ctx, &fd_watcher, 0, 0);
}
} else if (r == 0) {
- /* do nothing */
+ if (fd) {
+ grpc_fd_end_poll(exec_ctx, &fd_watcher, 0, 0);
+ }
} else {
- if (pfd[0].revents & POLLIN) {
+ if (pfd[0].revents & POLLIN_CHECK) {
grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd);
}
- if (pfd[1].revents & POLLIN) {
+ if (pfd[1].revents & POLLIN_CHECK) {
grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd);
}
if (nfds > 2) {
- if (pfd[2].revents & (POLLIN | POLLHUP | POLLERR)) {
- grpc_fd_become_readable(exec_ctx, fd);
- }
- if (pfd[2].revents & (POLLOUT | POLLHUP | POLLERR)) {
- grpc_fd_become_writable(exec_ctx, fd);
- }
+ grpc_fd_end_poll(exec_ctx, &fd_watcher, pfd[2].revents & POLLIN_CHECK,
+ pfd[2].revents & POLLOUT_CHECK);
+ } else if (fd) {
+ grpc_fd_end_poll(exec_ctx, &fd_watcher, 0, 0);
}
}
diff --git a/src/core/iomgr/pollset_posix.h b/src/core/iomgr/pollset_posix.h
index 83c5258539..34f76db2af 100644
--- a/src/core/iomgr/pollset_posix.h
+++ b/src/core/iomgr/pollset_posix.h
@@ -50,6 +50,7 @@ struct grpc_fd;
typedef struct grpc_pollset_worker {
grpc_wakeup_fd wakeup_fd;
+ int reevaluate_polling_on_wakeup;
struct grpc_pollset_worker *next;
struct grpc_pollset_worker *prev;
} grpc_pollset_worker;
@@ -111,6 +112,16 @@ void grpc_kick_drain(grpc_pollset *p);
int grpc_poll_deadline_to_millis_timeout(gpr_timespec deadline,
gpr_timespec now);
+/* Allow kick to wakeup the currently polling worker */
+#define GRPC_POLLSET_CAN_KICK_SELF 1
+/* Force the wakee to repoll when awoken */
+#define GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP 2
+/* As per grpc_pollset_kick, with an extended set of flags (defined above)
+ -- mostly for fd_posix's use. */
+void grpc_pollset_kick_ext(grpc_pollset *p,
+ grpc_pollset_worker *specific_worker,
+ gpr_uint32 flags);
+
/* turn a pollset into a multipoller: platform specific */
typedef void (*grpc_platform_become_multipoller_type)(grpc_exec_ctx *exec_ctx,
grpc_pollset *pollset,
diff --git a/src/core/iomgr/tcp_client_posix.c b/src/core/iomgr/tcp_client_posix.c
index aca2691c41..fe20039264 100644
--- a/src/core/iomgr/tcp_client_posix.c
+++ b/src/core/iomgr/tcp_client_posix.c
@@ -191,7 +191,7 @@ static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, int success) {
goto finish;
}
- abort();
+ GPR_UNREACHABLE_CODE(return );
finish:
if (fd != NULL) {
diff --git a/src/core/iomgr/tcp_posix.c b/src/core/iomgr/tcp_posix.c
index 4a57037a72..915553d509 100644
--- a/src/core/iomgr/tcp_posix.c
+++ b/src/core/iomgr/tcp_posix.c
@@ -180,7 +180,7 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
GPR_ASSERT(!tcp->finished_edge);
GPR_ASSERT(tcp->iov_size <= MAX_READ_IOVEC);
GPR_ASSERT(tcp->incoming_buffer->count <= MAX_READ_IOVEC);
- GRPC_TIMER_BEGIN(GRPC_PTAG_HANDLE_READ, 0);
+ GPR_TIMER_BEGIN("tcp_continue_read", 0);
while (tcp->incoming_buffer->count < (size_t)tcp->iov_size) {
gpr_slice_buffer_add_indexed(tcp->incoming_buffer,
@@ -199,11 +199,11 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
msg.msg_controllen = 0;
msg.msg_flags = 0;
- GRPC_TIMER_BEGIN(GRPC_PTAG_RECVMSG, 0);
+ GPR_TIMER_BEGIN("recvmsg", 1);
do {
read_bytes = recvmsg(tcp->fd, &msg, 0);
} while (read_bytes < 0 && errno == EINTR);
- GRPC_TIMER_END(GRPC_PTAG_RECVMSG, 0);
+ GPR_TIMER_END("recvmsg", 0);
if (read_bytes < 0) {
/* NB: After calling call_read_cb a parallel call of the read handler may
@@ -240,7 +240,7 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
TCP_UNREF(exec_ctx, tcp, "read");
}
- GRPC_TIMER_END(GRPC_PTAG_HANDLE_READ, 0);
+ GPR_TIMER_END("tcp_continue_read", 0);
}
static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
@@ -316,12 +316,12 @@ static flush_result tcp_flush(grpc_tcp *tcp) {
msg.msg_controllen = 0;
msg.msg_flags = 0;
- GRPC_TIMER_BEGIN(GRPC_PTAG_SENDMSG, 0);
+ GPR_TIMER_BEGIN("sendmsg", 1);
do {
/* TODO(klempner): Cork if this is a partial write */
sent_length = sendmsg(tcp->fd, &msg, SENDMSG_FLAGS);
} while (sent_length < 0 && errno == EINTR);
- GRPC_TIMER_END(GRPC_PTAG_SENDMSG, 0);
+ GPR_TIMER_END("sendmsg", 0);
if (sent_length < 0) {
if (errno == EAGAIN) {
@@ -370,17 +370,17 @@ static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
return;
}
- GRPC_TIMER_BEGIN(GRPC_PTAG_TCP_CB_WRITE, 0);
status = tcp_flush(tcp);
if (status == FLUSH_PENDING) {
grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_closure);
} else {
cb = tcp->write_cb;
tcp->write_cb = NULL;
+ GPR_TIMER_BEGIN("tcp_handle_write.cb", 0);
cb->cb(exec_ctx, cb->cb_arg, status == FLUSH_DONE);
+ GPR_TIMER_END("tcp_handle_write.cb", 0);
TCP_UNREF(exec_ctx, tcp, "write");
}
- GRPC_TIMER_END(GRPC_PTAG_TCP_CB_WRITE, 0);
}
static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
@@ -399,11 +399,11 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
}
}
- GRPC_TIMER_BEGIN(GRPC_PTAG_TCP_WRITE, 0);
+ GPR_TIMER_BEGIN("tcp_write", 0);
GPR_ASSERT(tcp->write_cb == NULL);
if (buf->length == 0) {
- GRPC_TIMER_END(GRPC_PTAG_TCP_WRITE, 0);
+ GPR_TIMER_END("tcp_write", 0);
grpc_exec_ctx_enqueue(exec_ctx, cb, 1);
return;
}
@@ -420,7 +420,7 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_exec_ctx_enqueue(exec_ctx, cb, status == FLUSH_DONE);
}
- GRPC_TIMER_END(GRPC_PTAG_TCP_WRITE, 0);
+ GPR_TIMER_END("tcp_write", 0);
}
static void tcp_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
diff --git a/src/core/iomgr/tcp_server_posix.c b/src/core/iomgr/tcp_server_posix.c
index 13bd67576f..99c76dcbe9 100644
--- a/src/core/iomgr/tcp_server_posix.c
+++ b/src/core/iomgr/tcp_server_posix.c
@@ -352,7 +352,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, int success) {
gpr_free(addr_str);
}
- abort();
+ GPR_UNREACHABLE_CODE(return );
error:
gpr_mu_lock(&sp->server->mu);
diff --git a/src/core/iomgr/tcp_server_windows.c b/src/core/iomgr/tcp_server_windows.c
index db3319b3c6..3fea8b5b35 100644
--- a/src/core/iomgr/tcp_server_windows.c
+++ b/src/core/iomgr/tcp_server_windows.c
@@ -336,6 +336,8 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, int from_iocp) {
peer_name_string);
gpr_free(fd_name);
gpr_free(peer_name_string);
+ } else {
+ closesocket(sock);
}
}
diff --git a/src/core/iomgr/udp_server.c b/src/core/iomgr/udp_server.c
index 1304f2067e..9903e970e6 100644
--- a/src/core/iomgr/udp_server.c
+++ b/src/core/iomgr/udp_server.c
@@ -278,7 +278,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, int success) {
/* Tell the registered callback that data is available to read. */
GPR_ASSERT(sp->read_cb);
- sp->read_cb(sp->emfd, sp->server->grpc_server);
+ sp->read_cb(exec_ctx, sp->emfd, sp->server->grpc_server);
/* Re-arm the notification event so we get another chance to read. */
grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
diff --git a/src/core/iomgr/udp_server.h b/src/core/iomgr/udp_server.h
index dbbe097109..de5736c426 100644
--- a/src/core/iomgr/udp_server.h
+++ b/src/core/iomgr/udp_server.h
@@ -43,7 +43,8 @@ typedef struct grpc_server grpc_server;
typedef struct grpc_udp_server grpc_udp_server;
/* Called when data is available to read from the socket. */
-typedef void (*grpc_udp_server_read_cb)(grpc_fd *emfd, grpc_server *server);
+typedef void (*grpc_udp_server_read_cb)(grpc_exec_ctx *exec_ctx, grpc_fd *emfd,
+ grpc_server *server);
/* Create a server, initially not bound to any ports */
grpc_udp_server *grpc_udp_server_create(void);
diff --git a/src/core/iomgr/wakeup_fd_eventfd.c b/src/core/iomgr/wakeup_fd_eventfd.c
index 48eb1afb3d..f67379e4fc 100644
--- a/src/core/iomgr/wakeup_fd_eventfd.c
+++ b/src/core/iomgr/wakeup_fd_eventfd.c
@@ -39,9 +39,11 @@
#include <sys/eventfd.h>
#include <unistd.h>
-#include "src/core/iomgr/wakeup_fd_posix.h"
#include <grpc/support/log.h>
+#include "src/core/iomgr/wakeup_fd_posix.h"
+#include "src/core/profiling/timers.h"
+
static void eventfd_create(grpc_wakeup_fd* fd_info) {
int efd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
/* TODO(klempner): Handle failure more gracefully */
@@ -60,9 +62,11 @@ static void eventfd_consume(grpc_wakeup_fd* fd_info) {
static void eventfd_wakeup(grpc_wakeup_fd* fd_info) {
int err;
+ GPR_TIMER_BEGIN("eventfd_wakeup", 0);
do {
err = eventfd_write(fd_info->read_fd, 1);
} while (err < 0 && errno == EINTR);
+ GPR_TIMER_END("eventfd_wakeup", 0);
}
static void eventfd_destroy(grpc_wakeup_fd* fd_info) {
diff --git a/src/core/profiling/basic_timers.c b/src/core/profiling/basic_timers.c
index 2f6c88daac..b49cdd07b3 100644
--- a/src/core/profiling/basic_timers.c
+++ b/src/core/profiling/basic_timers.c
@@ -44,98 +44,91 @@
#include <grpc/support/thd.h>
#include <stdio.h>
-typedef enum {
- BEGIN = '{',
- END = '}',
- MARK = '.',
- IMPORTANT = '!'
-} marker_type;
-
-typedef struct grpc_timer_entry {
+typedef enum { BEGIN = '{', END = '}', MARK = '.' } marker_type;
+
+typedef struct gpr_timer_entry {
gpr_timespec tm;
- int tag;
const char *tagstr;
- marker_type type;
- void *id;
const char *file;
int line;
-} grpc_timer_entry;
+ char type;
+ gpr_uint8 important;
+} gpr_timer_entry;
+
+#define MAX_COUNT (1024 * 1024 / sizeof(gpr_timer_entry))
-#define MAX_COUNT (1024 * 1024 / sizeof(grpc_timer_entry))
+static __thread gpr_timer_entry g_log[MAX_COUNT];
+static __thread int g_count;
+static gpr_once g_once_init = GPR_ONCE_INIT;
+static FILE *output_file;
-static __thread grpc_timer_entry log[MAX_COUNT];
-static __thread int count;
+static void close_output() { fclose(output_file); }
+
+static void init_output() {
+ output_file = fopen("latency_trace.txt", "w");
+ GPR_ASSERT(output_file);
+ atexit(close_output);
+}
static void log_report() {
int i;
- for (i = 0; i < count; i++) {
- grpc_timer_entry *entry = &(log[i]);
- printf("GRPC_LAT_PROF %ld.%09d %p %c %d(%s) %p %s %d\n", entry->tm.tv_sec,
- entry->tm.tv_nsec, (void *)(gpr_intptr)gpr_thd_currentid(),
- entry->type, entry->tag, entry->tagstr, entry->id, entry->file,
- entry->line);
+ gpr_once_init(&g_once_init, init_output);
+ for (i = 0; i < g_count; i++) {
+ gpr_timer_entry *entry = &(g_log[i]);
+ fprintf(output_file,
+ "{\"t\": %ld.%09d, \"thd\": \"%p\", \"type\": \"%c\", \"tag\": "
+ "\"%s\", \"file\": \"%s\", \"line\": %d, \"imp\": %d}\n",
+ entry->tm.tv_sec, entry->tm.tv_nsec,
+ (void *)(gpr_intptr)gpr_thd_currentid(), entry->type, entry->tagstr,
+ entry->file, entry->line, entry->important);
}
/* Now clear out the log */
- count = 0;
+ g_count = 0;
}
-static void grpc_timers_log_add(int tag, const char *tagstr, marker_type type,
- void *id, const char *file, int line) {
- grpc_timer_entry *entry;
+static void gpr_timers_log_add(const char *tagstr, marker_type type,
+ int important, const char *file, int line) {
+ gpr_timer_entry *entry;
/* TODO (vpai) : Improve concurrency */
- if (count == MAX_COUNT) {
+ if (g_count == MAX_COUNT) {
log_report();
}
- entry = &log[count++];
+ entry = &g_log[g_count++];
entry->tm = gpr_now(GPR_CLOCK_PRECISE);
- entry->tag = tag;
entry->tagstr = tagstr;
entry->type = type;
- entry->id = id;
entry->file = file;
entry->line = line;
+ entry->important = important != 0;
}
/* Latency profiler API implementation. */
-void grpc_timer_add_mark(int tag, const char *tagstr, void *id,
- const char *file, int line) {
- if (tag < GRPC_PTAG_IGNORE_THRESHOLD) {
- grpc_timers_log_add(tag, tagstr, MARK, id, file, line);
- }
+void gpr_timer_add_mark(const char *tagstr, int important, const char *file,
+ int line) {
+ gpr_timers_log_add(tagstr, MARK, important, file, line);
}
-void grpc_timer_add_important_mark(int tag, const char *tagstr, void *id,
- const char *file, int line) {
- if (tag < GRPC_PTAG_IGNORE_THRESHOLD) {
- grpc_timers_log_add(tag, tagstr, IMPORTANT, id, file, line);
- }
+void gpr_timer_begin(const char *tagstr, int important, const char *file,
+ int line) {
+ gpr_timers_log_add(tagstr, BEGIN, important, file, line);
}
-void grpc_timer_begin(int tag, const char *tagstr, void *id, const char *file,
- int line) {
- if (tag < GRPC_PTAG_IGNORE_THRESHOLD) {
- grpc_timers_log_add(tag, tagstr, BEGIN, id, file, line);
- }
-}
-
-void grpc_timer_end(int tag, const char *tagstr, void *id, const char *file,
- int line) {
- if (tag < GRPC_PTAG_IGNORE_THRESHOLD) {
- grpc_timers_log_add(tag, tagstr, END, id, file, line);
- }
+void gpr_timer_end(const char *tagstr, int important, const char *file,
+ int line) {
+ gpr_timers_log_add(tagstr, END, important, file, line);
}
/* Basic profiler specific API functions. */
-void grpc_timers_global_init(void) {}
+void gpr_timers_global_init(void) {}
-void grpc_timers_global_destroy(void) {}
+void gpr_timers_global_destroy(void) {}
#else /* !GRPC_BASIC_PROFILER */
-void grpc_timers_global_init(void) {}
+void gpr_timers_global_init(void) {}
-void grpc_timers_global_destroy(void) {}
+void gpr_timers_global_destroy(void) {}
#endif /* GRPC_BASIC_PROFILER */
diff --git a/src/core/profiling/stap_timers.c b/src/core/profiling/stap_timers.c
index 6868a674a9..efcd1af4a1 100644
--- a/src/core/profiling/stap_timers.c
+++ b/src/core/profiling/stap_timers.c
@@ -42,23 +42,23 @@
#include "src/core/profiling/stap_probes.h"
/* Latency profiler API implementation. */
-void grpc_timer_add_mark(int tag, const char *tagstr, void *id,
- const char *file, int line) {
+void gpr_timer_add_mark(int tag, const char *tagstr, void *id, const char *file,
+ int line) {
_STAP_ADD_MARK(tag);
}
-void grpc_timer_add_important_mark(int tag, const char *tagstr, void *id,
- const char *file, int line) {
+void gpr_timer_add_important_mark(int tag, const char *tagstr, void *id,
+ const char *file, int line) {
_STAP_ADD_IMPORTANT_MARK(tag);
}
-void grpc_timer_begin(int tag, const char *tagstr, void *id, const char *file,
- int line) {
+void gpr_timer_begin(int tag, const char *tagstr, void *id, const char *file,
+ int line) {
_STAP_TIMING_NS_BEGIN(tag);
}
-void grpc_timer_end(int tag, const char *tagstr, void *id, const char *file,
- int line) {
+void gpr_timer_end(int tag, const char *tagstr, void *id, const char *file,
+ int line) {
_STAP_TIMING_NS_END(tag);
}
diff --git a/src/core/profiling/timers.h b/src/core/profiling/timers.h
index a70520408c..0d112e7248 100644
--- a/src/core/profiling/timers.h
+++ b/src/core/profiling/timers.h
@@ -38,65 +38,28 @@
extern "C" {
#endif
-void grpc_timers_global_init(void);
-void grpc_timers_global_destroy(void);
-
-void grpc_timer_add_mark(int tag, const char *tagstr, void *id,
- const char *file, int line);
-void grpc_timer_add_important_mark(int tag, const char *tagstr, void *id,
- const char *file, int line);
-void grpc_timer_begin(int tag, const char *tagstr, void *id, const char *file,
- int line);
-void grpc_timer_end(int tag, const char *tagstr, void *id, const char *file,
- int line);
-
-enum grpc_profiling_tags {
- /* Any GRPC_PTAG_* >= than the threshold won't generate any profiling mark. */
- GRPC_PTAG_IGNORE_THRESHOLD = 1000000,
-
- /* Re. Protos. */
- GRPC_PTAG_PROTO_SERIALIZE = 100 + GRPC_PTAG_IGNORE_THRESHOLD,
- GRPC_PTAG_PROTO_DESERIALIZE = 101 + GRPC_PTAG_IGNORE_THRESHOLD,
-
- /* Re. sockets. */
- GRPC_PTAG_HANDLE_READ = 200 + GRPC_PTAG_IGNORE_THRESHOLD,
- GRPC_PTAG_SENDMSG = 201 + GRPC_PTAG_IGNORE_THRESHOLD,
- GRPC_PTAG_RECVMSG = 202 + GRPC_PTAG_IGNORE_THRESHOLD,
- GRPC_PTAG_POLL_FINISHED = 203 + GRPC_PTAG_IGNORE_THRESHOLD,
- GRPC_PTAG_TCP_CB_WRITE = 204 + GRPC_PTAG_IGNORE_THRESHOLD,
- GRPC_PTAG_TCP_WRITE = 205 + GRPC_PTAG_IGNORE_THRESHOLD,
- GRPC_PTAG_CALL_ON_DONE_RECV = 206 + GRPC_PTAG_IGNORE_THRESHOLD,
-
- /* C++ */
- GRPC_PTAG_CPP_CALL_CREATED = 300 + GRPC_PTAG_IGNORE_THRESHOLD,
- GRPC_PTAG_CPP_PERFORM_OPS = 301 + GRPC_PTAG_IGNORE_THRESHOLD,
-
- /* Transports */
- GRPC_PTAG_HTTP2_UNLOCK = 401 + GRPC_PTAG_IGNORE_THRESHOLD,
- GRPC_PTAG_HTTP2_UNLOCK_CLEANUP = 402 + GRPC_PTAG_IGNORE_THRESHOLD,
-
- /* > 1024 Unassigned reserved. For any miscellaneous use.
- * Use addition to generate tags from this base or take advantage of the 10
- * zero'd bits for OR-ing. */
- GRPC_PTAG_OTHER_BASE = 1024
-};
+void gpr_timers_global_init(void);
+void gpr_timers_global_destroy(void);
+
+void gpr_timer_add_mark(const char *tagstr, int important, const char *file,
+ int line);
+void gpr_timer_begin(const char *tagstr, int important, const char *file,
+ int line);
+void gpr_timer_end(const char *tagstr, int important, const char *file,
+ int line);
#if !(defined(GRPC_STAP_PROFILER) + defined(GRPC_BASIC_PROFILER))
/* No profiling. No-op all the things. */
-#define GRPC_TIMER_MARK(tag, id) \
- do { \
- } while (0)
-
-#define GRPC_TIMER_IMPORTANT_MARK(tag, id) \
- do { \
+#define GPR_TIMER_MARK(tag, important) \
+ do { \
} while (0)
-#define GRPC_TIMER_BEGIN(tag, id) \
- do { \
+#define GPR_TIMER_BEGIN(tag, important) \
+ do { \
} while (0)
-#define GRPC_TIMER_END(tag, id) \
- do { \
+#define GPR_TIMER_END(tag, important) \
+ do { \
} while (0)
#else /* at least one profiler requested... */
@@ -106,28 +69,14 @@ enum grpc_profiling_tags {
#endif
/* Generic profiling interface. */
-#define GRPC_TIMER_MARK(tag, id) \
- if (tag < GRPC_PTAG_IGNORE_THRESHOLD) { \
- grpc_timer_add_mark(tag, #tag, ((void *)(gpr_intptr)(id)), __FILE__, \
- __LINE__); \
- }
-
-#define GRPC_TIMER_IMPORTANT_MARK(tag, id) \
- if (tag < GRPC_PTAG_IGNORE_THRESHOLD) { \
- grpc_timer_add_important_mark(tag, #tag, ((void *)(gpr_intptr)(id)), \
- __FILE__, __LINE__); \
- }
+#define GPR_TIMER_MARK(tag, important) \
+ gpr_timer_add_mark(tag, important, __FILE__, __LINE__);
-#define GRPC_TIMER_BEGIN(tag, id) \
- if (tag < GRPC_PTAG_IGNORE_THRESHOLD) { \
- grpc_timer_begin(tag, #tag, ((void *)(gpr_intptr)(id)), __FILE__, \
- __LINE__); \
- }
+#define GPR_TIMER_BEGIN(tag, important) \
+ gpr_timer_begin(tag, important, __FILE__, __LINE__);
-#define GRPC_TIMER_END(tag, id) \
- if (tag < GRPC_PTAG_IGNORE_THRESHOLD) { \
- grpc_timer_end(tag, #tag, ((void *)(gpr_intptr)(id)), __FILE__, __LINE__); \
- }
+#define GPR_TIMER_END(tag, important) \
+ gpr_timer_end(tag, important, __FILE__, __LINE__);
#ifdef GRPC_STAP_PROFILER
/* Empty placeholder for now. */
@@ -141,6 +90,28 @@ enum grpc_profiling_tags {
#ifdef __cplusplus
}
+
+#if (defined(GRPC_STAP_PROFILER) + defined(GRPC_BASIC_PROFILER))
+namespace grpc {
+class ProfileScope {
+ public:
+ ProfileScope(const char *desc, bool important) : desc_(desc) {
+ GPR_TIMER_BEGIN(desc_, important ? 1 : 0);
+ }
+ ~ProfileScope() { GPR_TIMER_END(desc_, 0); }
+
+ private:
+ const char *const desc_;
+};
+}
+
+#define GPR_TIMER_SCOPE(tag, important) \
+ ::grpc::ProfileScope _profile_scope_##__LINE__((tag), (important))
+#else
+#define GPR_TIMER_SCOPE(tag, important) \
+ do { \
+ } while (false)
+#endif
#endif
#endif /* GRPC_CORE_PROFILING_TIMERS_H */
diff --git a/src/core/security/credentials.c b/src/core/security/credentials.c
index 398db20e8c..5e155d83b9 100644
--- a/src/core/security/credentials.c
+++ b/src/core/security/credentials.c
@@ -181,6 +181,48 @@ void grpc_server_credentials_set_auth_metadata_processor(
creds->processor = processor;
}
+static void server_credentials_pointer_arg_destroy(void *p) {
+ grpc_server_credentials_unref(p);
+}
+
+static void *server_credentials_pointer_arg_copy(void *p) {
+ return grpc_server_credentials_ref(p);
+}
+
+grpc_arg grpc_server_credentials_to_arg(grpc_server_credentials *p) {
+ grpc_arg arg;
+ memset(&arg, 0, sizeof(grpc_arg));
+ arg.type = GRPC_ARG_POINTER;
+ arg.key = GRPC_SERVER_CREDENTIALS_ARG;
+ arg.value.pointer.p = p;
+ arg.value.pointer.copy = server_credentials_pointer_arg_copy;
+ arg.value.pointer.destroy = server_credentials_pointer_arg_destroy;
+ return arg;
+}
+
+grpc_server_credentials *grpc_server_credentials_from_arg(
+ const grpc_arg *arg) {
+ if (strcmp(arg->key, GRPC_SERVER_CREDENTIALS_ARG) != 0) return NULL;
+ if (arg->type != GRPC_ARG_POINTER) {
+ gpr_log(GPR_ERROR, "Invalid type %d for arg %s", arg->type,
+ GRPC_SERVER_CREDENTIALS_ARG);
+ return NULL;
+ }
+ return arg->value.pointer.p;
+}
+
+grpc_server_credentials *grpc_find_server_credentials_in_args(
+ const grpc_channel_args *args) {
+ size_t i;
+ if (args == NULL) return NULL;
+ for (i = 0; i < args->num_args; i++) {
+ grpc_server_credentials *p =
+ grpc_server_credentials_from_arg(&args->args[i]);
+ if (p != NULL) return p;
+ }
+ return NULL;
+}
+
/* -- Ssl credentials. -- */
static void ssl_destruct(grpc_credentials *creds) {
diff --git a/src/core/security/credentials.h b/src/core/security/credentials.h
index b213e052d3..01203b08f1 100644
--- a/src/core/security/credentials.h
+++ b/src/core/security/credentials.h
@@ -215,7 +215,6 @@ typedef struct {
grpc_server_credentials *c, grpc_security_connector **sc);
} grpc_server_credentials_vtable;
-/* TODO(jboeuf): Add a refcount. */
struct grpc_server_credentials {
const grpc_server_credentials_vtable *vtable;
const char *type;
@@ -231,6 +230,13 @@ grpc_server_credentials *grpc_server_credentials_ref(
void grpc_server_credentials_unref(grpc_server_credentials *creds);
+#define GRPC_SERVER_CREDENTIALS_ARG "grpc.server_credentials"
+
+grpc_arg grpc_server_credentials_to_arg(grpc_server_credentials *c);
+grpc_server_credentials *grpc_server_credentials_from_arg(const grpc_arg *arg);
+grpc_server_credentials *grpc_find_server_credentials_in_args(
+ const grpc_channel_args *args);
+
/* -- Ssl credentials. -- */
typedef struct {
diff --git a/src/core/security/security_context.c b/src/core/security/security_context.c
index fb905e0b22..f544c1d943 100644
--- a/src/core/security/security_context.c
+++ b/src/core/security/security_context.c
@@ -305,33 +305,43 @@ void grpc_auth_property_reset(grpc_auth_property *property) {
memset(property, 0, sizeof(grpc_auth_property));
}
-grpc_arg grpc_auth_metadata_processor_to_arg(grpc_auth_metadata_processor *p) {
+static void auth_context_pointer_arg_destroy(void *p) {
+ GRPC_AUTH_CONTEXT_UNREF(p, "auth_context_pointer_arg");
+}
+
+static void *auth_context_pointer_arg_copy(void *p) {
+ return GRPC_AUTH_CONTEXT_REF(p, "auth_context_pointer_arg");
+}
+
+grpc_arg grpc_auth_context_to_arg(grpc_auth_context *p) {
grpc_arg arg;
memset(&arg, 0, sizeof(grpc_arg));
arg.type = GRPC_ARG_POINTER;
- arg.key = GRPC_AUTH_METADATA_PROCESSOR_ARG;
+ arg.key = GRPC_AUTH_CONTEXT_ARG;
arg.value.pointer.p = p;
+ arg.value.pointer.copy = auth_context_pointer_arg_copy;
+ arg.value.pointer.destroy = auth_context_pointer_arg_destroy;
return arg;
}
-grpc_auth_metadata_processor *grpc_auth_metadata_processor_from_arg(
+grpc_auth_context *grpc_auth_context_from_arg(
const grpc_arg *arg) {
- if (strcmp(arg->key, GRPC_AUTH_METADATA_PROCESSOR_ARG) != 0) return NULL;
+ if (strcmp(arg->key, GRPC_AUTH_CONTEXT_ARG) != 0) return NULL;
if (arg->type != GRPC_ARG_POINTER) {
gpr_log(GPR_ERROR, "Invalid type %d for arg %s", arg->type,
- GRPC_AUTH_METADATA_PROCESSOR_ARG);
+ GRPC_AUTH_CONTEXT_ARG);
return NULL;
}
return arg->value.pointer.p;
}
-grpc_auth_metadata_processor *grpc_find_auth_metadata_processor_in_args(
+grpc_auth_context *grpc_find_auth_context_in_args(
const grpc_channel_args *args) {
size_t i;
if (args == NULL) return NULL;
for (i = 0; i < args->num_args; i++) {
- grpc_auth_metadata_processor *p =
- grpc_auth_metadata_processor_from_arg(&args->args[i]);
+ grpc_auth_context *p =
+ grpc_auth_context_from_arg(&args->args[i]);
if (p != NULL) return p;
}
return NULL;
diff --git a/src/core/security/security_context.h b/src/core/security/security_context.h
index a9a0306410..2bbdc4be97 100644
--- a/src/core/security/security_context.h
+++ b/src/core/security/security_context.h
@@ -103,13 +103,12 @@ typedef struct {
grpc_server_security_context *grpc_server_security_context_create(void);
void grpc_server_security_context_destroy(void *ctx);
-/* --- Auth metadata processing. --- */
-#define GRPC_AUTH_METADATA_PROCESSOR_ARG "grpc.auth_metadata_processor"
+/* --- Channel args for auth context --- */
+#define GRPC_AUTH_CONTEXT_ARG "grpc.auth_context"
-grpc_arg grpc_auth_metadata_processor_to_arg(grpc_auth_metadata_processor *p);
-grpc_auth_metadata_processor *grpc_auth_metadata_processor_from_arg(
- const grpc_arg *arg);
-grpc_auth_metadata_processor *grpc_find_auth_metadata_processor_in_args(
+grpc_arg grpc_auth_context_to_arg(grpc_auth_context *c);
+grpc_auth_context *grpc_auth_context_from_arg(const grpc_arg *arg);
+grpc_auth_context *grpc_find_auth_context_in_args(
const grpc_channel_args *args);
#endif /* GRPC_INTERNAL_CORE_SECURITY_SECURITY_CONTEXT_H */
diff --git a/src/core/security/server_auth_filter.c b/src/core/security/server_auth_filter.c
index 30ca9f57a2..2e18369fe8 100644
--- a/src/core/security/server_auth_filter.c
+++ b/src/core/security/server_auth_filter.c
@@ -34,7 +34,7 @@
#include <string.h>
#include "src/core/security/auth_filters.h"
-#include "src/core/security/security_connector.h"
+#include "src/core/security/credentials.h"
#include "src/core/security/security_context.h"
#include <grpc/support/alloc.h>
@@ -58,8 +58,8 @@ typedef struct call_data {
} call_data;
typedef struct channel_data {
- grpc_security_connector *security_connector;
- grpc_auth_metadata_processor processor;
+ grpc_auth_context *auth_context;
+ grpc_server_credentials *creds;
grpc_mdctx *mdctx;
} channel_data;
@@ -160,12 +160,12 @@ static void auth_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_stream_op *op = &ops[i];
if (op->type != GRPC_OP_METADATA || calld->got_client_metadata) continue;
calld->got_client_metadata = 1;
- if (chand->processor.process == NULL) continue;
+ if (chand->creds->processor.process == NULL) continue;
calld->md_op = op;
calld->md = metadata_batch_to_md_array(&op->data.metadata);
- chand->processor.process(chand->processor.state, calld->auth_context,
- calld->md.metadata, calld->md.count,
- on_md_processing_done, elem);
+ chand->creds->processor.process(
+ chand->creds->processor.state, calld->auth_context,
+ calld->md.metadata, calld->md.count, on_md_processing_done, elem);
return;
}
}
@@ -221,7 +221,7 @@ static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
}
server_ctx = grpc_server_security_context_create();
server_ctx->auth_context =
- grpc_auth_context_create(chand->security_connector->auth_context);
+ grpc_auth_context_create(chand->auth_context);
server_ctx->auth_context->pollset = initial_op->bind_pollset;
initial_op->context[GRPC_CONTEXT_SECURITY].value = server_ctx;
initial_op->context[GRPC_CONTEXT_SECURITY].destroy =
@@ -241,9 +241,8 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem, grpc_channel *master,
const grpc_channel_args *args, grpc_mdctx *mdctx,
int is_first, int is_last) {
- grpc_security_connector *sc = grpc_find_security_connector_in_args(args);
- grpc_auth_metadata_processor *processor =
- grpc_find_auth_metadata_processor_in_args(args);
+ grpc_auth_context *auth_context = grpc_find_auth_context_in_args(args);
+ grpc_server_credentials *creds = grpc_find_server_credentials_in_args(args);
/* grab pointers to our data from the channel element */
channel_data *chand = elem->channel_data;
@@ -252,15 +251,14 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
path */
GPR_ASSERT(!is_first);
GPR_ASSERT(!is_last);
- GPR_ASSERT(sc != NULL);
- GPR_ASSERT(processor != NULL);
+ GPR_ASSERT(auth_context != NULL);
+ GPR_ASSERT(creds != NULL);
/* initialize members */
- GPR_ASSERT(!sc->is_client_side);
- chand->security_connector =
- GRPC_SECURITY_CONNECTOR_REF(sc, "server_auth_filter");
+ chand->auth_context =
+ GRPC_AUTH_CONTEXT_REF(auth_context, "server_auth_filter");
+ chand->creds = grpc_server_credentials_ref(creds);
chand->mdctx = mdctx;
- chand->processor = *processor;
}
/* Destructor for channel data */
@@ -268,8 +266,8 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
/* grab pointers to our data from the channel element */
channel_data *chand = elem->channel_data;
- GRPC_SECURITY_CONNECTOR_UNREF(chand->security_connector,
- "server_auth_filter");
+ GRPC_AUTH_CONTEXT_UNREF(chand->auth_context, "server_auth_filter");
+ grpc_server_credentials_unref(chand->creds);
}
const grpc_channel_filter grpc_server_auth_filter = {
diff --git a/src/core/security/server_secure_chttp2.c b/src/core/security/server_secure_chttp2.c
index 881e44a3fe..82c639e830 100644
--- a/src/core/security/server_secure_chttp2.c
+++ b/src/core/security/server_secure_chttp2.c
@@ -93,9 +93,9 @@ static void setup_transport(grpc_exec_ctx *exec_ctx, void *statep,
grpc_server_secure_state *state = statep;
grpc_channel_args *args_copy;
grpc_arg args_to_add[2];
- args_to_add[0] = grpc_security_connector_to_arg(state->sc);
+ args_to_add[0] = grpc_server_credentials_to_arg(state->creds);
args_to_add[1] =
- grpc_auth_metadata_processor_to_arg(&state->creds->processor);
+ grpc_auth_context_to_arg(state->sc->auth_context);
args_copy = grpc_channel_args_copy_and_add(
grpc_server_get_channel_args(state->server), args_to_add,
GPR_ARRAY_SIZE(args_to_add));
diff --git a/src/core/support/alloc.c b/src/core/support/alloc.c
index d2ed82e771..bfcb77956b 100644
--- a/src/core/support/alloc.c
+++ b/src/core/support/alloc.c
@@ -35,22 +35,32 @@
#include <stdlib.h>
#include <grpc/support/port_platform.h>
+#include "src/core/profiling/timers.h"
void *gpr_malloc(size_t size) {
- void *p = malloc(size);
+ void *p;
+ GPR_TIMER_BEGIN("gpr_malloc", 0);
+ p = malloc(size);
if (!p) {
abort();
}
+ GPR_TIMER_END("gpr_malloc", 0);
return p;
}
-void gpr_free(void *p) { free(p); }
+void gpr_free(void *p) {
+ GPR_TIMER_BEGIN("gpr_free", 0);
+ free(p);
+ GPR_TIMER_END("gpr_free", 0);
+}
void *gpr_realloc(void *p, size_t size) {
+ GPR_TIMER_BEGIN("gpr_realloc", 0);
p = realloc(p, size);
if (!p) {
abort();
}
+ GPR_TIMER_END("gpr_realloc", 0);
return p;
}
diff --git a/src/core/support/sync_posix.c b/src/core/support/sync_posix.c
index 91c30989ce..39c96feb13 100644
--- a/src/core/support/sync_posix.c
+++ b/src/core/support/sync_posix.c
@@ -40,14 +40,23 @@
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
+#include "src/core/profiling/timers.h"
void gpr_mu_init(gpr_mu* mu) { GPR_ASSERT(pthread_mutex_init(mu, NULL) == 0); }
void gpr_mu_destroy(gpr_mu* mu) { GPR_ASSERT(pthread_mutex_destroy(mu) == 0); }
-void gpr_mu_lock(gpr_mu* mu) { GPR_ASSERT(pthread_mutex_lock(mu) == 0); }
+void gpr_mu_lock(gpr_mu* mu) {
+ GPR_TIMER_BEGIN("gpr_mu_lock", 0);
+ GPR_ASSERT(pthread_mutex_lock(mu) == 0);
+ GPR_TIMER_END("gpr_mu_lock", 0);
+}
-void gpr_mu_unlock(gpr_mu* mu) { GPR_ASSERT(pthread_mutex_unlock(mu) == 0); }
+void gpr_mu_unlock(gpr_mu* mu) {
+ GPR_TIMER_BEGIN("gpr_mu_unlock", 0);
+ GPR_ASSERT(pthread_mutex_unlock(mu) == 0);
+ GPR_TIMER_END("gpr_mu_unlock", 0);
+}
int gpr_mu_trylock(gpr_mu* mu) {
int err = pthread_mutex_trylock(mu);
diff --git a/src/core/support/time_posix.c b/src/core/support/time_posix.c
index 78f2c2bb77..02cfca8555 100644
--- a/src/core/support/time_posix.c
+++ b/src/core/support/time_posix.c
@@ -63,7 +63,7 @@ static gpr_timespec gpr_from_timespec(struct timespec ts,
/** maps gpr_clock_type --> clockid_t for clock_gettime */
static clockid_t clockid_for_gpr_clock[] = {CLOCK_MONOTONIC, CLOCK_REALTIME};
-void gpr_time_init(void) {}
+void gpr_time_init(void) { gpr_precise_clock_init(); }
gpr_timespec gpr_now(gpr_clock_type clock_type) {
struct timespec now;
@@ -89,6 +89,7 @@ static uint64_t g_time_start;
void gpr_time_init(void) {
mach_timebase_info_data_t tb = {0, 1};
+ gpr_precise_clock_init();
mach_timebase_info(&tb);
g_time_scale = tb.numer;
g_time_scale /= tb.denom;
diff --git a/src/core/support/time_precise.c b/src/core/support/time_precise.c
new file mode 100644
index 0000000000..b37517e639
--- /dev/null
+++ b/src/core/support/time_precise.c
@@ -0,0 +1,89 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/support/log.h>
+#include <grpc/support/time.h>
+#include <stdio.h>
+
+#ifdef GRPC_TIMERS_RDTSC
+#if defined(__i386__)
+static void gpr_get_cycle_counter(long long int *clk) {
+ long long int ret;
+ __asm__ volatile("rdtsc" : "=A"(ret));
+ *clk = ret;
+}
+
+// ----------------------------------------------------------------
+#elif defined(__x86_64__) || defined(__amd64__)
+static void gpr_get_cycle_counter(long long int *clk) {
+ unsigned long long low, high;
+ __asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
+ *clk = (long long)(high << 32) | (long long)low;
+}
+#endif
+
+static double cycles_per_second = 0;
+static long long int start_cycle;
+void gpr_precise_clock_init(void) {
+ time_t start;
+ long long end_cycle;
+ gpr_log(GPR_DEBUG, "Calibrating timers");
+ start = time(NULL);
+ while (time(NULL) == start)
+ ;
+ gpr_get_cycle_counter(&start_cycle);
+ while (time(NULL) <= start + 10)
+ ;
+ gpr_get_cycle_counter(&end_cycle);
+ cycles_per_second = (double)(end_cycle - start_cycle) / 10.0;
+ gpr_log(GPR_DEBUG, "... cycles_per_second = %f\n", cycles_per_second);
+}
+
+void gpr_precise_clock_now(gpr_timespec *clk) {
+ long long int counter;
+ double secs;
+ gpr_get_cycle_counter(&counter);
+ secs = (double)(counter - start_cycle) / cycles_per_second;
+ clk->clock_type = GPR_CLOCK_PRECISE;
+ clk->tv_sec = (time_t)secs;
+ clk->tv_nsec = (int)(1e9 * (secs - (double)clk->tv_sec));
+}
+
+#else /* GRPC_TIMERS_RDTSC */
+void gpr_precise_clock_init(void) {}
+
+void gpr_precise_clock_now(gpr_timespec *clk) {
+ *clk = gpr_now(GPR_CLOCK_REALTIME);
+ clk->clock_type = GPR_CLOCK_PRECISE;
+}
+#endif /* GRPC_TIMERS_RDTSC */
diff --git a/src/core/support/time_precise.h b/src/core/support/time_precise.h
index cd201faab9..80c5000123 100644
--- a/src/core/support/time_precise.h
+++ b/src/core/support/time_precise.h
@@ -34,60 +34,9 @@
#ifndef GRPC_CORE_SUPPORT_TIME_PRECISE_H_
#define GRPC_CORE_SUPPORT_TIME_PRECISE_H_
-#include <grpc/support/sync.h>
#include <grpc/support/time.h>
-#include <stdio.h>
-#ifdef GRPC_TIMERS_RDTSC
-#if defined(__i386__)
-static void gpr_get_cycle_counter(long long int *clk) {
- long long int ret;
- __asm__ volatile("rdtsc" : "=A"(ret));
- *clk = ret;
-}
-
-// ----------------------------------------------------------------
-#elif defined(__x86_64__) || defined(__amd64__)
-static void gpr_get_cycle_counter(long long int *clk) {
- unsigned long long low, high;
- __asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
- *clk = (high << 32) | low;
-}
-#endif
-
-static gpr_once precise_clock_init = GPR_ONCE_INIT;
-static long long cycles_per_second = 0;
-static void gpr_precise_clock_init() {
- time_t start = time(NULL);
- gpr_precise_clock start_cycle;
- gpr_precise_clock end_cycle;
- while (time(NULL) == start)
- ;
- gpr_get_cycle_counter(&start_cycle);
- while (time(NULL) == start + 1)
- ;
- gpr_get_cycle_counter(&end_cycle);
- cycles_per_second = end_cycle - start_cycle;
-}
-
-static double grpc_precise_clock_scaling_factor() {
- gpr_once_init(&precise_clock_init, grpc_precise_clock_init);
- return 1e6 / cycles_per_second;
-}
-
-static void gpr_precise_clock_now(gpr_timespec *clk) {
- long long int counter;
- gpr_get_cycle_counter(&counter);
- clk->clock = GPR_CLOCK_REALTIME;
- clk->tv_sec = counter / cycles_per_second;
- clk->tv_nsec = counter % cycles_per_second;
-}
-
-#else /* GRPC_TIMERS_RDTSC */
-static void gpr_precise_clock_now(gpr_timespec *clk) {
- *clk = gpr_now(GPR_CLOCK_REALTIME);
- clk->clock_type = GPR_CLOCK_PRECISE;
-}
-#endif /* GRPC_TIMERS_RDTSC */
+void gpr_precise_clock_init(void);
+void gpr_precise_clock_now(gpr_timespec *clk);
#endif /* GRPC_CORE_SUPPORT_TIME_PRECISE_ */
diff --git a/src/core/surface/byte_buffer.c b/src/core/surface/byte_buffer.c
index a930949f2d..fb39c4531d 100644
--- a/src/core/surface/byte_buffer.c
+++ b/src/core/surface/byte_buffer.c
@@ -75,9 +75,7 @@ grpc_byte_buffer *grpc_byte_buffer_copy(grpc_byte_buffer *bb) {
return grpc_raw_byte_buffer_create(bb->data.raw.slice_buffer.slices,
bb->data.raw.slice_buffer.count);
}
- gpr_log(GPR_INFO, "should never get here");
- abort();
- return NULL;
+ GPR_UNREACHABLE_CODE(return NULL);
}
void grpc_byte_buffer_destroy(grpc_byte_buffer *bb) {
@@ -95,6 +93,5 @@ size_t grpc_byte_buffer_length(grpc_byte_buffer *bb) {
case GRPC_BB_RAW:
return bb->data.raw.slice_buffer.length;
}
- gpr_log(GPR_ERROR, "should never reach here");
- abort();
+ GPR_UNREACHABLE_CODE(return 0);
}
diff --git a/src/core/surface/call.c b/src/core/surface/call.c
index d15a3bcbad..b40e74d61b 100644
--- a/src/core/surface/call.c
+++ b/src/core/surface/call.c
@@ -306,8 +306,9 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
grpc_transport_stream_op *initial_op_ptr = NULL;
grpc_channel_stack *channel_stack = grpc_channel_get_channel_stack(channel);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_call *call =
- gpr_malloc(sizeof(grpc_call) + channel_stack->call_stack_size);
+ grpc_call *call;
+ GPR_TIMER_BEGIN("grpc_call_create", 0);
+ call = gpr_malloc(sizeof(grpc_call) + channel_stack->call_stack_size);
memset(call, 0, sizeof(grpc_call));
gpr_mu_init(&call->mu);
gpr_mu_init(&call->completion_mu);
@@ -401,6 +402,7 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
set_deadline_alarm(&exec_ctx, call, send_deadline);
}
grpc_exec_ctx_finish(&exec_ctx);
+ GPR_TIMER_END("grpc_call_create", 0);
return call;
}
@@ -425,12 +427,17 @@ static grpc_cq_completion *allocate_completion(grpc_call *call) {
if (call->allocated_completions & (1u << i)) {
continue;
}
- call->allocated_completions |= (gpr_uint8)(1u << i);
+ /* NB: the following integer arithmetic operation needs to be in its
+ * expanded form due to the "integral promotion" performed (see section
+ * 3.2.1.1 of the C89 draft standard). A cast to the smaller container type
+ * is then required to avoid the compiler warning */
+ call->allocated_completions =
+ (gpr_uint8)(call->allocated_completions | (1u << i));
gpr_mu_unlock(&call->completion_mu);
return &call->completions[i];
}
- gpr_log(GPR_ERROR, "should never reach here");
- abort();
+ GPR_UNREACHABLE_CODE(return NULL);
+ return NULL;
}
static void done_completion(grpc_exec_ctx *exec_ctx, void *call,
@@ -456,6 +463,7 @@ void grpc_call_internal_ref(grpc_call *c) {
static void destroy_call(grpc_exec_ctx *exec_ctx, grpc_call *call) {
size_t i;
grpc_call *c = call;
+ GPR_TIMER_BEGIN("destroy_call", 0);
grpc_call_stack_destroy(exec_ctx, CALL_STACK_FROM_CALL(c));
GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, c->channel, "call");
gpr_mu_destroy(&c->mu);
@@ -488,6 +496,7 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, grpc_call *call) {
GRPC_CQ_INTERNAL_UNREF(c->cq, "bind");
}
gpr_free(c);
+ GPR_TIMER_END("destroy_call", 0);
}
#ifdef GRPC_CALL_REF_COUNT_DEBUG
@@ -521,9 +530,13 @@ static void set_compression_algorithm(grpc_call *call,
call->compression_algorithm = algo;
}
-grpc_compression_algorithm grpc_call_get_compression_algorithm(
- const grpc_call *call) {
- return call->compression_algorithm;
+grpc_compression_algorithm grpc_call_test_only_get_compression_algorithm(
+ grpc_call *call) {
+ grpc_compression_algorithm algorithm;
+ gpr_mu_lock(&call->mu);
+ algorithm = call->compression_algorithm;
+ gpr_mu_unlock(&call->mu);
+ return algorithm;
}
static void set_encodings_accepted_by_peer(
@@ -557,12 +570,20 @@ static void set_encodings_accepted_by_peer(
}
}
-gpr_uint32 grpc_call_get_encodings_accepted_by_peer(grpc_call *call) {
- return call->encodings_accepted_by_peer;
+gpr_uint32 grpc_call_test_only_get_encodings_accepted_by_peer(grpc_call *call) {
+ gpr_uint32 encodings_accepted_by_peer;
+ gpr_mu_lock(&call->mu);
+ encodings_accepted_by_peer = call->encodings_accepted_by_peer;
+ gpr_mu_unlock(&call->mu);
+ return encodings_accepted_by_peer;
}
-gpr_uint32 grpc_call_get_message_flags(const grpc_call *call) {
- return call->incoming_message_flags;
+gpr_uint32 grpc_call_test_only_get_message_flags(grpc_call *call) {
+ gpr_uint32 flags;
+ gpr_mu_lock(&call->mu);
+ flags = call->incoming_message_flags;
+ gpr_mu_unlock(&call->mu);
+ return flags;
}
static void set_status_details(grpc_call *call, status_source source,
@@ -607,6 +628,8 @@ static void unlock(grpc_exec_ctx *exec_ctx, grpc_call *call) {
const size_t MAX_RECV_PEEK_AHEAD = 65536;
size_t buffered_bytes;
+ GPR_TIMER_BEGIN("unlock", 0);
+
memset(&op, 0, sizeof(op));
op.cancel_with_status = call->cancel_with_status;
@@ -677,6 +700,8 @@ static void unlock(grpc_exec_ctx *exec_ctx, grpc_call *call) {
unlock(exec_ctx, call);
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completing");
}
+
+ GPR_TIMER_END("unlock", 0);
}
static void get_final_status(grpc_call *call, grpc_ioreq_data out) {
@@ -736,7 +761,11 @@ static void finish_live_ioreq_op(grpc_call *call, grpc_ioreq_op op,
size_t i;
/* ioreq is live: we need to do something */
master = &call->masters[master_set];
- master->complete_mask |= (gpr_uint16)(1u << op);
+ /* NB: the following integer arithmetic operation needs to be in its
+ * expanded form due to the "integral promotion" performed (see section
+ * 3.2.1.1 of the C89 draft standard). A cast to the smaller container type
+ * is then required to avoid the compiler warning */
+ master->complete_mask = (gpr_uint16)(master->complete_mask | (1u << op));
if (!success) {
master->success = 0;
}
@@ -822,6 +851,7 @@ static void early_out_write_ops(grpc_call *call) {
static void call_on_done_send(grpc_exec_ctx *exec_ctx, void *pc, int success) {
grpc_call *call = pc;
+ GPR_TIMER_BEGIN("call_on_done_send", 0);
lock(call);
if (call->last_send_contains & (1 << GRPC_IOREQ_SEND_INITIAL_METADATA)) {
finish_ioreq_op(call, GRPC_IOREQ_SEND_INITIAL_METADATA, success);
@@ -845,9 +875,11 @@ static void call_on_done_send(grpc_exec_ctx *exec_ctx, void *pc, int success) {
call->sending = 0;
unlock(exec_ctx, call);
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "sending");
+ GPR_TIMER_END("call_on_done_send", 0);
}
static void finish_message(grpc_call *call) {
+ GPR_TIMER_BEGIN("finish_message", 0);
if (call->error_status_set == 0) {
/* TODO(ctiller): this could be a lot faster if coded directly */
grpc_byte_buffer *byte_buffer;
@@ -867,6 +899,7 @@ static void finish_message(grpc_call *call) {
gpr_slice_buffer_reset_and_unref(&call->incoming_message);
GPR_ASSERT(call->incoming_message.count == 0);
call->reading_message = 0;
+ GPR_TIMER_END("finish_message", 0);
}
static int begin_message(grpc_call *call, grpc_begin_message msg) {
@@ -927,6 +960,7 @@ static int add_slice_to_message(grpc_call *call, gpr_slice slice) {
}
/* we have to be reading a message to know what to do here */
if (!call->reading_message) {
+ gpr_slice_unref(slice);
cancel_with_status(call, GRPC_STATUS_INVALID_ARGUMENT,
"Received payload data while not reading a message");
return 0;
@@ -955,7 +989,7 @@ static void call_on_done_recv(grpc_exec_ctx *exec_ctx, void *pc, int success) {
grpc_call *child_call;
grpc_call *next_child_call;
size_t i;
- GRPC_TIMER_BEGIN(GRPC_PTAG_CALL_ON_DONE_RECV, 0);
+ GPR_TIMER_BEGIN("call_on_done_recv", 0);
lock(call);
call->receiving = 0;
if (success) {
@@ -965,13 +999,19 @@ static void call_on_done_recv(grpc_exec_ctx *exec_ctx, void *pc, int success) {
case GRPC_NO_OP:
break;
case GRPC_OP_METADATA:
+ GPR_TIMER_BEGIN("recv_metadata", 0);
recv_metadata(exec_ctx, call, &op->data.metadata);
+ GPR_TIMER_END("recv_metadata", 0);
break;
case GRPC_OP_BEGIN_MESSAGE:
+ GPR_TIMER_BEGIN("begin_message", 0);
success = begin_message(call, op->data.begin_message);
+ GPR_TIMER_END("begin_message", 0);
break;
case GRPC_OP_SLICE:
+ GPR_TIMER_BEGIN("add_slice_to_message", 0);
success = add_slice_to_message(call, op->data.slice);
+ GPR_TIMER_END("add_slice_to_message", 0);
break;
}
}
@@ -1017,7 +1057,7 @@ static void call_on_done_recv(grpc_exec_ctx *exec_ctx, void *pc, int success) {
unlock(exec_ctx, call);
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "receiving");
- GRPC_TIMER_END(GRPC_PTAG_CALL_ON_DONE_RECV, 0);
+ GPR_TIMER_END("call_on_done_recv", 0);
}
static int prepare_application_metadata(grpc_call *call, size_t count,
@@ -1246,7 +1286,11 @@ static grpc_call_error start_ioreq(grpc_call *call, const grpc_ioreq *reqs,
GRPC_MDSTR_REF(reqs[i].data.send_status.details));
}
}
- have_ops |= (gpr_uint16)(1u << op);
+ /* NB: the following integer arithmetic operation needs to be in its
+ * expanded form due to the "integral promotion" performed (see section
+ * 3.2.1.1 of the C89 draft standard). A cast to the smaller container type
+ * is then required to avoid the compiler warning */
+ have_ops = (gpr_uint16)(have_ops | (1u << op));
call->request_data[op] = data;
call->request_flags[op] = reqs[i].flags;
@@ -1491,16 +1535,25 @@ static void recv_metadata(grpc_exec_ctx *exec_ctx, grpc_call *call,
grpc_mdelem *mdel = l->md;
grpc_mdstr *key = mdel->key;
if (key == grpc_channel_get_status_string(call->channel)) {
+ GPR_TIMER_BEGIN("status", 0);
set_status_code(call, STATUS_FROM_WIRE, decode_status(mdel));
+ GPR_TIMER_END("status", 0);
} else if (key == grpc_channel_get_message_string(call->channel)) {
+ GPR_TIMER_BEGIN("status-details", 0);
set_status_details(call, STATUS_FROM_WIRE, GRPC_MDSTR_REF(mdel->value));
+ GPR_TIMER_END("status-details", 0);
} else if (key ==
grpc_channel_get_compression_algorithm_string(call->channel)) {
+ GPR_TIMER_BEGIN("compression_algorithm", 0);
set_compression_algorithm(call, decode_compression(mdel));
+ GPR_TIMER_END("compression_algorithm", 0);
} else if (key == grpc_channel_get_encodings_accepted_by_peer_string(
call->channel)) {
+ GPR_TIMER_BEGIN("encodings_accepted_by_peer", 0);
set_encodings_accepted_by_peer(call, mdel->value->slice);
+ GPR_TIMER_END("encodings_accepted_by_peer", 0);
} else {
+ GPR_TIMER_BEGIN("report_up", 0);
dest = &call->buffered_metadata[is_trailing];
if (dest->count == dest->capacity) {
dest->capacity = GPR_MAX(dest->capacity + 8, dest->capacity * 2);
@@ -1521,12 +1574,15 @@ static void recv_metadata(grpc_exec_ctx *exec_ctx, grpc_call *call,
}
call->owned_metadata[call->owned_metadata_count++] = mdel;
l->md = NULL;
+ GPR_TIMER_END("report_up", 0);
}
}
if (gpr_time_cmp(md->deadline, gpr_inf_future(md->deadline.clock_type)) !=
0 &&
!call->is_client) {
+ GPR_TIMER_BEGIN("set_deadline_alarm", 0);
set_deadline_alarm(exec_ctx, call, md->deadline);
+ GPR_TIMER_END("set_deadline_alarm", 0);
}
if (!is_trailing) {
call->read_state = READ_STATE_GOT_INITIAL_METADATA;
@@ -1589,6 +1645,8 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
grpc_call_error error;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ GPR_TIMER_BEGIN("grpc_call_start_batch", 0);
+
GRPC_API_TRACE(
"grpc_call_start_batch(call=%p, ops=%p, nops=%lu, tag=%p, reserved=%p)",
5, (call, ops, (unsigned long)nops, tag, reserved));
@@ -1826,6 +1884,7 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
finish_func, tag);
done:
grpc_exec_ctx_finish(&exec_ctx);
+ GPR_TIMER_END("grpc_call_start_batch", 0);
return error;
}
diff --git a/src/core/surface/call.h b/src/core/surface/call.h
index f421a81619..9b7c6f9bfb 100644
--- a/src/core/surface/call.h
+++ b/src/core/surface/call.h
@@ -169,17 +169,6 @@ void *grpc_call_context_get(grpc_call *call, grpc_context_index elem);
gpr_uint8 grpc_call_is_client(grpc_call *call);
-grpc_compression_algorithm grpc_call_get_compression_algorithm(
- const grpc_call *call);
-
-gpr_uint32 grpc_call_get_message_flags(const grpc_call *call);
-
-/** Returns a bitset for the encodings (compression algorithms) supported by \a
- * call's peer.
- *
- * To be indexed by grpc_compression_algorithm enum values. */
-gpr_uint32 grpc_call_get_encodings_accepted_by_peer(grpc_call *call);
-
#ifdef __cplusplus
}
#endif
diff --git a/src/core/surface/call_test_only.h b/src/core/surface/call_test_only.h
new file mode 100644
index 0000000000..df4be3248b
--- /dev/null
+++ b/src/core/surface/call_test_only.h
@@ -0,0 +1,65 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_INTERNAL_CORE_SURFACE_CALL_TEST_ONLY_H
+#define GRPC_INTERNAL_CORE_SURFACE_CALL_TEST_ONLY_H
+
+#include <grpc/grpc.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** Return the compression algorithm from \a call.
+ *
+ * \warning This function should \b only be used in test code. */
+grpc_compression_algorithm grpc_call_test_only_get_compression_algorithm(
+ grpc_call *call);
+
+/** Return the message flags from \a call.
+ *
+ * \warning This function should \b only be used in test code. */
+gpr_uint32 grpc_call_test_only_get_message_flags(grpc_call *call);
+
+/** Returns a bitset for the encodings (compression algorithms) supported by \a
+ * call's peer.
+ *
+ * To be indexed by grpc_compression_algorithm enum values. */
+gpr_uint32 grpc_call_test_only_get_encodings_accepted_by_peer(grpc_call *call);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRPC_INTERNAL_CORE_SURFACE_CALL_TEST_ONLY_H */
diff --git a/src/core/surface/channel_connectivity.c b/src/core/surface/channel_connectivity.c
index 01430785dc..52a64fb2cd 100644
--- a/src/core/surface/channel_connectivity.c
+++ b/src/core/surface/channel_connectivity.c
@@ -117,9 +117,7 @@ static void finished_completion(grpc_exec_ctx *exec_ctx, void *pw,
switch (w->phase) {
case WAITING:
case CALLED_BACK:
- gpr_log(GPR_ERROR, "should never reach here");
- abort();
- break;
+ GPR_UNREACHABLE_CODE(return );
case CALLING_BACK:
w->phase = CALLED_BACK;
break;
@@ -171,9 +169,7 @@ static void partly_done(grpc_exec_ctx *exec_ctx, state_watcher *w,
w->phase = CALLING_BACK_AND_FINISHED;
break;
case CALLING_BACK_AND_FINISHED:
- gpr_log(GPR_ERROR, "should never reach here");
- abort();
- break;
+ GPR_UNREACHABLE_CODE(return );
case CALLED_BACK:
delete = 1;
break;
diff --git a/src/core/surface/completion_queue.c b/src/core/surface/completion_queue.c
index e818ccba48..bcdb363873 100644
--- a/src/core/surface/completion_queue.c
+++ b/src/core/surface/completion_queue.c
@@ -42,6 +42,7 @@
#include "src/core/surface/call.h"
#include "src/core/surface/event_string.h"
#include "src/core/surface/surface_trace.h"
+#include "src/core/profiling/timers.h"
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
@@ -143,6 +144,8 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
int i;
grpc_pollset_worker *pluck_worker;
+ GPR_TIMER_BEGIN("grpc_cq_end_op", 0);
+
storage->tag = tag;
storage->done = done;
storage->done_arg = done_arg;
@@ -174,6 +177,8 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
grpc_pollset_shutdown(exec_ctx, &cc->pollset, &cc->pollset_destroy_done);
}
+
+ GPR_TIMER_END("grpc_cq_end_op", 0);
}
grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
@@ -184,6 +189,8 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
gpr_timespec now;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ GPR_TIMER_BEGIN("grpc_completion_queue_next", 0);
+
GRPC_API_TRACE(
"grpc_completion_queue_next("
"cc=%p, "
@@ -230,6 +237,9 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
GRPC_CQ_INTERNAL_UNREF(cc, "next");
grpc_exec_ctx_finish(&exec_ctx);
+
+ GPR_TIMER_END("grpc_completion_queue_next", 0);
+
return ret;
}
@@ -254,8 +264,7 @@ static void del_plucker(grpc_completion_queue *cc, void *tag,
return;
}
}
- gpr_log(GPR_ERROR, "should never reach here");
- abort();
+ GPR_UNREACHABLE_CODE(return );
}
grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
@@ -268,6 +277,8 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
int first_loop = 1;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ GPR_TIMER_BEGIN("grpc_completion_queue_pluck", 0);
+
GRPC_API_TRACE(
"grpc_completion_queue_pluck("
"cc=%p, tag=%p, "
@@ -333,6 +344,9 @@ done:
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
GRPC_CQ_INTERNAL_UNREF(cc, "pluck");
grpc_exec_ctx_finish(&exec_ctx);
+
+ GPR_TIMER_END("grpc_completion_queue_pluck", 0);
+
return ret;
}
diff --git a/src/core/surface/init.c b/src/core/surface/init.c
index 95011cab17..715c90a5e1 100644
--- a/src/core/surface/init.c
+++ b/src/core/surface/init.c
@@ -115,7 +115,7 @@ void grpc_init(void) {
gpr_log(GPR_ERROR, "Could not initialize census.");
}
}
- grpc_timers_global_init();
+ gpr_timers_global_init();
for (i = 0; i < g_number_of_plugins; i++) {
if (g_all_of_the_plugins[i].init != NULL) {
g_all_of_the_plugins[i].init();
@@ -133,7 +133,7 @@ void grpc_shutdown(void) {
if (--g_initializations == 0) {
grpc_iomgr_shutdown();
census_shutdown();
- grpc_timers_global_destroy();
+ gpr_timers_global_destroy();
grpc_tracer_shutdown();
grpc_resolver_registry_shutdown();
for (i = 0; i < g_number_of_plugins; i++) {
diff --git a/src/core/transport/chttp2/bin_encoder.c b/src/core/transport/chttp2/bin_encoder.c
index f1bbf9aa91..9c9070ede4 100644
--- a/src/core/transport/chttp2/bin_encoder.c
+++ b/src/core/transport/chttp2/bin_encoder.c
@@ -185,8 +185,12 @@ gpr_slice grpc_chttp2_huffman_compress(gpr_slice input) {
}
if (temp_length) {
- *out++ = (gpr_uint8)(temp << (8u - temp_length)) |
- (gpr_uint8)(0xffu >> temp_length);
+ /* NB: the following integer arithmetic operation needs to be in its
+ * expanded form due to the "integral promotion" performed (see section
+ * 3.2.1.1 of the C89 draft standard). A cast to the smaller container type
+ * is then required to avoid the compiler warning */
+ *out++ = (gpr_uint8)((gpr_uint8)(temp << (8u - temp_length)) |
+ (gpr_uint8)(0xffu >> temp_length));
}
GPR_ASSERT(out == GPR_SLICE_END_PTR(output));
@@ -265,8 +269,12 @@ gpr_slice grpc_chttp2_base64_encode_and_huffman_compress(gpr_slice input) {
}
if (out.temp_length) {
- *out.out++ = (gpr_uint8)(out.temp << (8u - out.temp_length)) |
- (gpr_uint8)(0xffu >> out.temp_length);
+ /* NB: the following integer arithmetic operation needs to be in its
+ * expanded form due to the "integral promotion" performed (see section
+ * 3.2.1.1 of the C89 draft standard). A cast to the smaller container type
+ * is then required to avoid the compiler warning */
+ *out.out++ = (gpr_uint8)((gpr_uint8)(out.temp << (8u - out.temp_length)) |
+ (gpr_uint8)(0xffu >> out.temp_length));
}
GPR_ASSERT(out.out <= GPR_SLICE_END_PTR(output));
diff --git a/src/core/transport/chttp2/frame_data.c b/src/core/transport/chttp2/frame_data.c
index acfa7c002e..07179a4571 100644
--- a/src/core/transport/chttp2/frame_data.c
+++ b/src/core/transport/chttp2/frame_data.c
@@ -168,7 +168,5 @@ grpc_chttp2_parse_error grpc_chttp2_data_parser_parse(
}
}
- gpr_log(GPR_ERROR, "should never reach here");
- abort();
- return GRPC_CHTTP2_CONNECTION_ERROR;
+ GPR_UNREACHABLE_CODE(return GRPC_CHTTP2_CONNECTION_ERROR);
}
diff --git a/src/core/transport/chttp2/frame_goaway.c b/src/core/transport/chttp2/frame_goaway.c
index 2ff1eda89b..c5758bcb71 100644
--- a/src/core/transport/chttp2/frame_goaway.c
+++ b/src/core/transport/chttp2/frame_goaway.c
@@ -152,9 +152,7 @@ grpc_chttp2_parse_error grpc_chttp2_goaway_parser_parse(
}
return GRPC_CHTTP2_PARSE_OK;
}
- gpr_log(GPR_ERROR, "Should never end up here");
- abort();
- return GRPC_CHTTP2_CONNECTION_ERROR;
+ GPR_UNREACHABLE_CODE(return GRPC_CHTTP2_CONNECTION_ERROR);
}
void grpc_chttp2_goaway_append(gpr_uint32 last_stream_id, gpr_uint32 error_code,
diff --git a/src/core/transport/chttp2/hpack_parser.c b/src/core/transport/chttp2/hpack_parser.c
index 20ea513375..20d8312d54 100644
--- a/src/core/transport/chttp2/hpack_parser.c
+++ b/src/core/transport/chttp2/hpack_parser.c
@@ -1166,9 +1166,7 @@ static int append_string(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
append_bytes(str, decoded, 3);
goto b64_byte0;
}
- gpr_log(GPR_ERROR, "should never reach here");
- abort();
- return 1;
+ GPR_UNREACHABLE_CODE(return 1);
}
/* append a null terminator to a string */
@@ -1313,9 +1311,7 @@ static int parse_value_string(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
return 0;
}
/* Add code to prevent return without value error */
- gpr_log(GPR_ERROR, "Should never reach beyond switch in parse_value_string");
- abort();
- return 0;
+ GPR_UNREACHABLE_CODE(return 0);
}
static int parse_value_string_with_indexed_key(grpc_chttp2_hpack_parser *p,
diff --git a/src/core/transport/chttp2/parsing.c b/src/core/transport/chttp2/parsing.c
index f7a0a10581..5d4d8e70c4 100644
--- a/src/core/transport/chttp2/parsing.c
+++ b/src/core/transport/chttp2/parsing.c
@@ -35,6 +35,7 @@
#include <string.h>
+#include "src/core/profiling/timers.h"
#include "src/core/transport/chttp2/http2_errors.h"
#include "src/core/transport/chttp2/status_conversion.h"
#include "src/core/transport/chttp2/timeout_encoding.h"
@@ -68,6 +69,8 @@ void grpc_chttp2_prepare_to_read(
grpc_chttp2_stream_global *stream_global;
grpc_chttp2_stream_parsing *stream_parsing;
+ GPR_TIMER_BEGIN("grpc_chttp2_prepare_to_read", 0);
+
transport_parsing->next_stream_id = transport_global->next_stream_id;
/* update the parsing view of incoming window */
@@ -89,6 +92,8 @@ void grpc_chttp2_prepare_to_read(
stream_parsing->incoming_window = stream_global->incoming_window;
}
}
+
+ GPR_TIMER_END("grpc_chttp2_prepare_to_read", 0);
}
void grpc_chttp2_publish_reads(
@@ -417,14 +422,10 @@ int grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
transport_parsing->incoming_frame_size -= (gpr_uint32)(end - cur);
return 1;
}
- gpr_log(GPR_ERROR, "should never reach here");
- abort();
+ GPR_UNREACHABLE_CODE(return 0);
}
- gpr_log(GPR_ERROR, "should never reach here");
- abort();
-
- return 0;
+ GPR_UNREACHABLE_CODE(return 0);
}
static int init_frame_parser(grpc_chttp2_transport_parsing *transport_parsing) {
@@ -580,9 +581,7 @@ static int init_data_frame_parser(
case GRPC_CHTTP2_CONNECTION_ERROR:
return 0;
}
- gpr_log(GPR_ERROR, "should never reach here");
- abort();
- return 0;
+ GPR_UNREACHABLE_CODE(return 0);
}
static void free_timeout(void *p) { gpr_free(p); }
@@ -820,7 +819,5 @@ static int parse_frame_slice(grpc_exec_ctx *exec_ctx,
case GRPC_CHTTP2_CONNECTION_ERROR:
return 0;
}
- gpr_log(GPR_ERROR, "should never reach here");
- abort();
- return 0;
+ GPR_UNREACHABLE_CODE(return 0);
}
diff --git a/src/core/transport/chttp2/stream_encoder.c b/src/core/transport/chttp2/stream_encoder.c
index 83227e677d..6c7f7a9ea7 100644
--- a/src/core/transport/chttp2/stream_encoder.c
+++ b/src/core/transport/chttp2/stream_encoder.c
@@ -428,7 +428,7 @@ static grpc_mdelem *hpack_enc(grpc_chttp2_hpack_compressor *c,
emit_lithdr_noidx(c, dynidx(c, indices_key), elem, st);
return elem;
}
- abort();
+ GPR_UNREACHABLE_CODE(return NULL);
}
indices_key = c->indices_keys[HASH_FRAGMENT_3(key_hash)];
@@ -442,7 +442,7 @@ static grpc_mdelem *hpack_enc(grpc_chttp2_hpack_compressor *c,
emit_lithdr_noidx(c, dynidx(c, indices_key), elem, st);
return elem;
}
- abort();
+ GPR_UNREACHABLE_CODE(return NULL);
}
/* no elem, key in the table... fall back to literal emission */
@@ -454,7 +454,7 @@ static grpc_mdelem *hpack_enc(grpc_chttp2_hpack_compressor *c,
emit_lithdr_noidx_v(c, elem, st);
return elem;
}
- abort();
+ GPR_UNREACHABLE_CODE(return NULL);
}
#define STRLEN_LIT(x) (sizeof(x) - 1)
diff --git a/src/core/transport/chttp2/writing.c b/src/core/transport/chttp2/writing.c
index d1c9da6df0..69ad8854ba 100644
--- a/src/core/transport/chttp2/writing.c
+++ b/src/core/transport/chttp2/writing.c
@@ -37,6 +37,7 @@
#include <grpc/support/log.h>
+#include "src/core/profiling/timers.h"
#include "src/core/transport/chttp2/http2_errors.h"
static void finalize_outbuf(grpc_chttp2_transport_writing *transport_writing);
@@ -180,6 +181,8 @@ void grpc_chttp2_perform_writes(
static void finalize_outbuf(grpc_chttp2_transport_writing *transport_writing) {
grpc_chttp2_stream_writing *stream_writing;
+ GPR_TIMER_BEGIN("finalize_outbuf", 0);
+
while (
grpc_chttp2_list_pop_writing_stream(transport_writing, &stream_writing)) {
if (stream_writing->sopb.nops > 0 ||
@@ -208,6 +211,8 @@ static void finalize_outbuf(grpc_chttp2_transport_writing *transport_writing) {
}
grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
}
+
+ GPR_TIMER_END("finalize_outbuf", 0);
}
void grpc_chttp2_cleanup_writing(
diff --git a/src/core/transport/chttp2_transport.c b/src/core/transport/chttp2_transport.c
index de74379546..effc3c4b3b 100644
--- a/src/core/transport/chttp2_transport.c
+++ b/src/core/transport/chttp2_transport.c
@@ -510,6 +510,7 @@ grpc_chttp2_stream_parsing *grpc_chttp2_parsing_accept_stream(
static void lock(grpc_chttp2_transport *t) { gpr_mu_lock(&t->mu); }
static void unlock(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) {
+ GPR_TIMER_BEGIN("unlock", 0);
unlock_check_read_write_state(exec_ctx, t);
if (!t->writing_active && !t->closed &&
grpc_chttp2_unlocking_check_writes(&t->global, &t->writing)) {
@@ -520,6 +521,7 @@ static void unlock(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) {
}
gpr_mu_unlock(&t->mu);
+ GPR_TIMER_END("unlock", 0);
}
/*
@@ -546,6 +548,8 @@ void grpc_chttp2_terminate_writing(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_writing *transport_writing = transport_writing_ptr;
grpc_chttp2_transport *t = TRANSPORT_FROM_WRITING(transport_writing);
+ GPR_TIMER_BEGIN("grpc_chttp2_terminate_writing", 0);
+
lock(t);
allow_endpoint_shutdown_locked(exec_ctx, t);
@@ -567,12 +571,16 @@ void grpc_chttp2_terminate_writing(grpc_exec_ctx *exec_ctx,
unlock(exec_ctx, t);
UNREF_TRANSPORT(exec_ctx, t, "writing");
+
+ GPR_TIMER_END("grpc_chttp2_terminate_writing", 0);
}
static void writing_action(grpc_exec_ctx *exec_ctx, void *gt,
int iomgr_success_ignored) {
grpc_chttp2_transport *t = gt;
+ GPR_TIMER_BEGIN("writing_action", 0);
grpc_chttp2_perform_writes(exec_ctx, &t->writing, t->ep);
+ GPR_TIMER_END("writing_action", 0);
}
void grpc_chttp2_add_incoming_goaway(
@@ -642,6 +650,7 @@ static void maybe_start_some_streams(
static void perform_stream_op_locked(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global, grpc_transport_stream_op *op) {
+ GPR_TIMER_BEGIN("perform_stream_op_locked", 0);
if (op->cancel_with_status != GRPC_STATUS_OK) {
cancel_from_api(transport_global, stream_global, op->cancel_with_status);
}
@@ -713,6 +722,7 @@ static void perform_stream_op_locked(
}
grpc_exec_ctx_enqueue(exec_ctx, op->on_consumed, 1);
+ GPR_TIMER_END("perform_stream_op_locked", 0);
}
static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
@@ -1103,6 +1113,8 @@ static void recv_data(grpc_exec_ctx *exec_ctx, void *tp, int success) {
int keep_reading = 0;
grpc_chttp2_transport *t = tp;
+ GPR_TIMER_BEGIN("recv_data", 0);
+
lock(t);
i = 0;
GPR_ASSERT(!t->parsing_active);
@@ -1113,11 +1125,13 @@ static void recv_data(grpc_exec_ctx *exec_ctx, void *tp, int success) {
&t->parsing_stream_map);
grpc_chttp2_prepare_to_read(&t->global, &t->parsing);
gpr_mu_unlock(&t->mu);
+ GPR_TIMER_BEGIN("recv_data.parse", 0);
for (; i < t->read_buffer.count &&
grpc_chttp2_perform_read(exec_ctx, &t->parsing,
t->read_buffer.slices[i]);
i++)
;
+ GPR_TIMER_END("recv_data.parse", 0);
gpr_mu_lock(&t->mu);
if (i != t->read_buffer.count) {
drop_connection(exec_ctx, t);
@@ -1154,6 +1168,8 @@ static void recv_data(grpc_exec_ctx *exec_ctx, void *tp, int success) {
} else {
UNREF_TRANSPORT(exec_ctx, t, "recv_data");
}
+
+ GPR_TIMER_END("recv_data", 0);
}
/*
diff --git a/src/core/transport/stream_op.c b/src/core/transport/stream_op.c
index 1cb2bd7c59..6493e77bc5 100644
--- a/src/core/transport/stream_op.c
+++ b/src/core/transport/stream_op.c
@@ -38,6 +38,8 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
+#include "src/core/profiling/timers.h"
+
/* Exponential growth function: Given x, return a larger x.
Currently we grow by 1.5 times upon reallocation. */
#define GROW(x) (3 * (x) / 2)
@@ -300,6 +302,8 @@ void grpc_metadata_batch_filter(grpc_metadata_batch *batch,
grpc_linked_mdelem *l;
grpc_linked_mdelem *next;
+ GPR_TIMER_BEGIN("grpc_metadata_batch_filter", 0);
+
assert_valid_list(&batch->list);
assert_valid_list(&batch->garbage);
for (l = batch->list.head; l; l = next) {
@@ -328,4 +332,6 @@ void grpc_metadata_batch_filter(grpc_metadata_batch *batch,
}
assert_valid_list(&batch->list);
assert_valid_list(&batch->garbage);
+
+ GPR_TIMER_END("grpc_metadata_batch_filter", 0);
}
diff --git a/src/core/tsi/fake_transport_security.c b/src/core/tsi/fake_transport_security.c
index cbb6f17ae1..99e28ab63b 100644
--- a/src/core/tsi/fake_transport_security.c
+++ b/src/core/tsi/fake_transport_security.c
@@ -118,10 +118,10 @@ static gpr_uint32 load32_little_endian(const unsigned char *buf) {
}
static void store32_little_endian(gpr_uint32 value, unsigned char *buf) {
- buf[3] = (unsigned char)(value >> 24) & 0xFF;
- buf[2] = (unsigned char)(value >> 16) & 0xFF;
- buf[1] = (unsigned char)(value >> 8) & 0xFF;
- buf[0] = (unsigned char)(value)&0xFF;
+ buf[3] = (unsigned char)((value >> 24) & 0xFF);
+ buf[2] = (unsigned char)((value >> 16) & 0xFF);
+ buf[1] = (unsigned char)((value >> 8) & 0xFF);
+ buf[0] = (unsigned char)((value)&0xFF);
}
static void tsi_fake_frame_reset(tsi_fake_frame *frame, int needs_draining) {
diff --git a/src/core/tsi/ssl_transport_security.c b/src/core/tsi/ssl_transport_security.c
index 05789f07d4..22b57964cc 100644
--- a/src/core/tsi/ssl_transport_security.c
+++ b/src/core/tsi/ssl_transport_security.c
@@ -319,8 +319,9 @@ static tsi_result peer_from_x509(X509 *cert, int include_certificate_type,
/* TODO(jboeuf): Maybe add more properties. */
GENERAL_NAMES *subject_alt_names =
X509_get_ext_d2i(cert, NID_subject_alt_name, 0, 0);
- int subject_alt_name_count =
- (subject_alt_names != NULL) ? sk_GENERAL_NAME_num(subject_alt_names) : 0;
+ int subject_alt_name_count = (subject_alt_names != NULL)
+ ? (int)sk_GENERAL_NAME_num(subject_alt_names)
+ : 0;
size_t property_count;
tsi_result result;
GPR_ASSERT(subject_alt_name_count >= 0);
@@ -358,7 +359,7 @@ static void log_ssl_error_stack(void) {
unsigned long err;
while ((err = ERR_get_error()) != 0) {
char details[256];
- ERR_error_string_n(err, details, sizeof(details));
+ ERR_error_string_n((uint32_t)err, details, sizeof(details));
gpr_log(GPR_ERROR, "%s", details);
}
}
@@ -668,7 +669,7 @@ static tsi_result ssl_protector_protect(tsi_frame_protector *self,
tsi_result result = TSI_OK;
/* First see if we have some pending data in the SSL BIO. */
- int pending_in_ssl = BIO_pending(impl->from_ssl);
+ int pending_in_ssl = (int)BIO_pending(impl->from_ssl);
if (pending_in_ssl > 0) {
*unprotected_bytes_size = 0;
GPR_ASSERT(*protected_output_frames_size <= INT_MAX);
@@ -726,7 +727,7 @@ static tsi_result ssl_protector_protect_flush(
impl->buffer_offset = 0;
}
- pending = BIO_pending(impl->from_ssl);
+ pending = (int)BIO_pending(impl->from_ssl);
GPR_ASSERT(pending >= 0);
*still_pending_size = (size_t)pending;
if (*still_pending_size == 0) return TSI_OK;
@@ -739,7 +740,7 @@ static tsi_result ssl_protector_protect_flush(
return TSI_INTERNAL_ERROR;
}
*protected_output_frames_size = (size_t)read_from_ssl;
- pending = BIO_pending(impl->from_ssl);
+ pending = (int)BIO_pending(impl->from_ssl);
GPR_ASSERT(pending >= 0);
*still_pending_size = (size_t)pending;
return TSI_OK;