diff options
Diffstat (limited to 'src/core/lib')
-rw-r--r-- | src/core/lib/debug/stats.c | 107 | ||||
-rw-r--r-- | src/core/lib/debug/stats.h | 17 | ||||
-rw-r--r-- | src/core/lib/debug/stats_data.c | 229 | ||||
-rw-r--r-- | src/core/lib/debug/stats_data.h | 150 | ||||
-rw-r--r-- | src/core/lib/debug/stats_data.yaml | 70 | ||||
-rw-r--r-- | src/core/lib/iomgr/combiner.c | 5 | ||||
-rw-r--r-- | src/core/lib/iomgr/executor.c | 13 | ||||
-rw-r--r-- | src/core/lib/iomgr/tcp_posix.c | 9 |
8 files changed, 588 insertions, 12 deletions
diff --git a/src/core/lib/debug/stats.c b/src/core/lib/debug/stats.c index 4dbd94c724..91ca0aa76e 100644 --- a/src/core/lib/debug/stats.c +++ b/src/core/lib/debug/stats.c @@ -45,7 +45,95 @@ void grpc_stats_collect(grpc_stats_data *output) { output->counters[i] += gpr_atm_no_barrier_load( &grpc_stats_per_cpu_storage[core].counters[i]); } + for (size_t i = 0; i < GRPC_STATS_HISTOGRAM_BUCKETS; i++) { + output->histograms[i] += gpr_atm_no_barrier_load( + &grpc_stats_per_cpu_storage[core].histograms[i]); + } + } +} + +void grpc_stats_diff(const grpc_stats_data *b, const grpc_stats_data *a, + grpc_stats_data *c) { + for (size_t i = 0; i < GRPC_STATS_COUNTER_COUNT; i++) { + c->counters[i] = b->counters[i] - a->counters[i]; + } + for (size_t i = 0; i < GRPC_STATS_HISTOGRAM_BUCKETS; i++) { + c->histograms[i] = b->histograms[i] - a->histograms[i]; + } +} + +int grpc_stats_histo_find_bucket_slow(grpc_exec_ctx *exec_ctx, int value, + const int *table, int table_size) { + GRPC_STATS_INC_HISTOGRAM_SLOW_LOOKUPS(exec_ctx); + const int *const start = table; + while (table_size > 0) { + int step = table_size / 2; + const int *it = table + step; + if (value >= *it) { + table = it + 1; + table_size -= step + 1; + } else { + table_size = step; + } + } + return (int)(table - start) - 1; +} + +size_t grpc_stats_histo_count(const grpc_stats_data *stats, + grpc_stats_histograms histogram) { + size_t sum = 0; + for (int i = 0; i < grpc_stats_histo_buckets[histogram]; i++) { + sum += (size_t)stats->histograms[grpc_stats_histo_start[histogram] + i]; + } + return sum; +} + +static double threshold_for_count_below(const gpr_atm *bucket_counts, + const int *bucket_boundaries, + int num_buckets, double count_below) { + double count_so_far; + double lower_bound; + double upper_bound; + int lower_idx; + int upper_idx; + + /* find the lowest bucket that gets us above count_below */ + count_so_far = 0.0; + for (lower_idx = 0; lower_idx < num_buckets; lower_idx++) { + count_so_far += (double)bucket_counts[lower_idx]; + if (count_so_far >= count_below) { + break; + } } + if (count_so_far == count_below) { + /* this bucket hits the threshold exactly... we should be midway through + any run of zero values following the bucket */ + for (upper_idx = lower_idx + 1; upper_idx < num_buckets; upper_idx++) { + if (bucket_counts[upper_idx]) { + break; + } + } + return (bucket_boundaries[lower_idx] + bucket_boundaries[upper_idx]) / 2.0; + } else { + /* treat values as uniform throughout the bucket, and find where this value + should lie */ + lower_bound = bucket_boundaries[lower_idx]; + upper_bound = bucket_boundaries[lower_idx + 1]; + return upper_bound - + (upper_bound - lower_bound) * (count_so_far - count_below) / + (double)bucket_counts[lower_idx]; + } +} + +double grpc_stats_histo_percentile(const grpc_stats_data *stats, + grpc_stats_histograms histogram, + double percentile) { + size_t count = grpc_stats_histo_count(stats, histogram); + if (count == 0) return 0.0; + return threshold_for_count_below( + stats->histograms + grpc_stats_histo_start[histogram], + grpc_stats_histo_bucket_boundaries[histogram], + grpc_stats_histo_buckets[histogram], (double)count * percentile / 100.0); } char *grpc_stats_data_as_json(const grpc_stats_data *data) { @@ -60,6 +148,25 @@ char *grpc_stats_data_as_json(const grpc_stats_data *data) { gpr_strvec_add(&v, tmp); is_first = false; } + for (size_t i = 0; i < GRPC_STATS_HISTOGRAM_COUNT; i++) { + gpr_asprintf(&tmp, "%s\"%s\": [", is_first ? "" : ", ", + grpc_stats_histogram_name[i]); + gpr_strvec_add(&v, tmp); + for (int j = 0; j < grpc_stats_histo_buckets[i]; j++) { + gpr_asprintf(&tmp, "%s%" PRIdPTR, j == 0 ? "" : ",", + data->histograms[grpc_stats_histo_start[i] + j]); + gpr_strvec_add(&v, tmp); + } + gpr_asprintf(&tmp, "], \"%s_bkt\": [", grpc_stats_histogram_name[i]); + gpr_strvec_add(&v, tmp); + for (int j = 0; j < grpc_stats_histo_buckets[i]; j++) { + gpr_asprintf(&tmp, "%s%d", j == 0 ? "" : ",", + grpc_stats_histo_bucket_boundaries[i][j]); + gpr_strvec_add(&v, tmp); + } + gpr_strvec_add(&v, gpr_strdup("]")); + is_first = false; + } gpr_strvec_add(&v, gpr_strdup("}")); tmp = gpr_strvec_flatten(&v, NULL); gpr_strvec_destroy(&v); diff --git a/src/core/lib/debug/stats.h b/src/core/lib/debug/stats.h index 563b108dff..09d190d488 100644 --- a/src/core/lib/debug/stats.h +++ b/src/core/lib/debug/stats.h @@ -25,6 +25,7 @@ typedef struct grpc_stats_data { gpr_atm counters[GRPC_STATS_COUNTER_COUNT]; + gpr_atm histograms[GRPC_STATS_HISTOGRAM_BUCKETS]; } grpc_stats_data; extern grpc_stats_data *grpc_stats_per_cpu_storage; @@ -36,9 +37,25 @@ extern grpc_stats_data *grpc_stats_per_cpu_storage; (gpr_atm_no_barrier_fetch_add( \ &GRPC_THREAD_STATS_DATA((exec_ctx))->counters[(ctr)], 1)) +#define GRPC_STATS_INC_HISTOGRAM(exec_ctx, histogram, index) \ + (gpr_atm_no_barrier_fetch_add( \ + &GRPC_THREAD_STATS_DATA((exec_ctx)) \ + ->histograms[histogram##_FIRST_SLOT + (index)], \ + 1)) + void grpc_stats_init(void); void grpc_stats_shutdown(void); void grpc_stats_collect(grpc_stats_data *output); +// c = b-a +void grpc_stats_diff(const grpc_stats_data *b, const grpc_stats_data *a, + grpc_stats_data *c); char *grpc_stats_data_as_json(const grpc_stats_data *data); +int grpc_stats_histo_find_bucket_slow(grpc_exec_ctx *exec_ctx, int value, + const int *table, int table_size); +double grpc_stats_histo_percentile(const grpc_stats_data *data, + grpc_stats_histograms histogram, + double percentile); +size_t grpc_stats_histo_count(const grpc_stats_data *data, + grpc_stats_histograms histogram); #endif diff --git a/src/core/lib/debug/stats_data.c b/src/core/lib/debug/stats_data.c index 2203358a7e..490d257d2f 100644 --- a/src/core/lib/debug/stats_data.c +++ b/src/core/lib/debug/stats_data.c @@ -19,7 +19,232 @@ */ #include "src/core/lib/debug/stats_data.h" +#include <grpc/support/useful.h> +#include "src/core/lib/debug/stats.h" +#include "src/core/lib/iomgr/exec_ctx.h" const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = { - "client_calls_created", "server_calls_created", "syscall_write", - "syscall_read", "syscall_poll", "syscall_wait", + "client_calls_created", + "server_calls_created", + "syscall_poll", + "syscall_wait", + "histogram_slow_lookups", + "syscall_write", + "syscall_read", + "tcp_backup_pollers_created", + "tcp_backup_poller_polls", + "http2_op_batches", + "http2_op_cancel", + "http2_op_send_initial_metadata", + "http2_op_send_message", + "http2_op_send_trailing_metadata", + "http2_op_recv_initial_metadata", + "http2_op_recv_message", + "http2_op_recv_trailing_metadata", + "http2_pings_sent", + "http2_writes_begun", + "http2_writes_offloaded", + "combiner_locks_initiated", + "combiner_locks_scheduled_items", + "combiner_locks_scheduled_final_items", + "combiner_locks_offloaded", + "executor_scheduled_short_items", + "executor_scheduled_long_items", + "executor_scheduled_to_self", + "executor_wakeup_initiated", + "executor_queue_drained", + "executor_push_retries", }; +const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = { + "tcp_write_size", "tcp_write_iov_size", "tcp_read_size", + "tcp_read_offer", "tcp_read_iov_size", "http2_send_message_size", +}; +const int grpc_stats_table_0[65] = { + 0, 1, 2, 3, 4, 6, 8, 11, + 15, 20, 26, 34, 44, 57, 73, 94, + 121, 155, 199, 255, 327, 419, 537, 688, + 881, 1128, 1444, 1848, 2365, 3026, 3872, 4954, + 6338, 8108, 10373, 13270, 16976, 21717, 27782, 35541, + 45467, 58165, 74409, 95189, 121772, 155778, 199281, 254933, + 326126, 417200, 533707, 682750, 873414, 1117323, 1429345, 1828502, + 2339127, 2992348, 3827987, 4896985, 6264509, 8013925, 10251880, 13114801, + 16777216}; +const uint8_t grpc_stats_table_1[87] = { + 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 11, + 11, 12, 13, 13, 14, 15, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22, 22, 23, + 24, 25, 25, 26, 27, 27, 28, 29, 29, 30, 31, 31, 32, 33, 34, 34, 35, 36, + 36, 37, 38, 39, 39, 40, 41, 41, 42, 43, 44, 44, 45, 45, 46, 47, 48, 48, + 49, 50, 51, 51, 52, 53, 53, 54, 55, 56, 56, 57, 58, 58, 59}; +const int grpc_stats_table_2[65] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, + 14, 16, 18, 20, 22, 24, 27, 30, 33, 36, 39, 43, 47, + 51, 56, 61, 66, 72, 78, 85, 92, 100, 109, 118, 128, 139, + 151, 164, 178, 193, 209, 226, 244, 264, 285, 308, 333, 359, 387, + 418, 451, 486, 524, 565, 609, 656, 707, 762, 821, 884, 952, 1024}; +const uint8_t grpc_stats_table_3[102] = { + 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, + 6, 7, 7, 7, 8, 8, 9, 9, 10, 11, 11, 12, 12, 13, 13, 14, 14, + 14, 15, 15, 16, 16, 17, 17, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, + 23, 24, 24, 24, 25, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, + 32, 33, 33, 34, 35, 35, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41, + 42, 42, 43, 44, 44, 45, 46, 46, 47, 48, 48, 49, 49, 50, 50, 51, 51}; +void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) { + value = GPR_CLAMP(value, 0, 16777216); + if (value < 5) { + GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, + value); + return; + } + union { + double dbl; + uint64_t uint; + } _val, _bkt; + _val.dbl = value; + if (_val.uint < 4683743612465315840ull) { + int bucket = + grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 5; + _bkt.dbl = grpc_stats_table_0[bucket]; + bucket -= (_val.uint < _bkt.uint); + GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, + bucket); + return; + } + GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, + grpc_stats_histo_find_bucket_slow( + (exec_ctx), value, grpc_stats_table_0, 64)); +} +void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) { + value = GPR_CLAMP(value, 0, 1024); + if (value < 13) { + GRPC_STATS_INC_HISTOGRAM((exec_ctx), + GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, value); + return; + } + union { + double dbl; + uint64_t uint; + } _val, _bkt; + _val.dbl = value; + if (_val.uint < 4637863191261478912ull) { + int bucket = + grpc_stats_table_3[((_val.uint - 4623507967449235456ull) >> 48)] + 13; + _bkt.dbl = grpc_stats_table_2[bucket]; + bucket -= (_val.uint < _bkt.uint); + GRPC_STATS_INC_HISTOGRAM((exec_ctx), + GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, bucket); + return; + } + GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, + grpc_stats_histo_find_bucket_slow( + (exec_ctx), value, grpc_stats_table_2, 64)); +} +void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) { + value = GPR_CLAMP(value, 0, 16777216); + if (value < 5) { + GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, + value); + return; + } + union { + double dbl; + uint64_t uint; + } _val, _bkt; + _val.dbl = value; + if (_val.uint < 4683743612465315840ull) { + int bucket = + grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 5; + _bkt.dbl = grpc_stats_table_0[bucket]; + bucket -= (_val.uint < _bkt.uint); + GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, + bucket); + return; + } + GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, + grpc_stats_histo_find_bucket_slow( + (exec_ctx), value, grpc_stats_table_0, 64)); +} +void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) { + value = GPR_CLAMP(value, 0, 16777216); + if (value < 5) { + GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, + value); + return; + } + union { + double dbl; + uint64_t uint; + } _val, _bkt; + _val.dbl = value; + if (_val.uint < 4683743612465315840ull) { + int bucket = + grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 5; + _bkt.dbl = grpc_stats_table_0[bucket]; + bucket -= (_val.uint < _bkt.uint); + GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, + bucket); + return; + } + GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, + grpc_stats_histo_find_bucket_slow( + (exec_ctx), value, grpc_stats_table_0, 64)); +} +void grpc_stats_inc_tcp_read_iov_size(grpc_exec_ctx *exec_ctx, int value) { + value = GPR_CLAMP(value, 0, 1024); + if (value < 13) { + GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE, + value); + return; + } + union { + double dbl; + uint64_t uint; + } _val, _bkt; + _val.dbl = value; + if (_val.uint < 4637863191261478912ull) { + int bucket = + grpc_stats_table_3[((_val.uint - 4623507967449235456ull) >> 48)] + 13; + _bkt.dbl = grpc_stats_table_2[bucket]; + bucket -= (_val.uint < _bkt.uint); + GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE, + bucket); + return; + } + GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE, + grpc_stats_histo_find_bucket_slow( + (exec_ctx), value, grpc_stats_table_2, 64)); +} +void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx, + int value) { + value = GPR_CLAMP(value, 0, 16777216); + if (value < 5) { + GRPC_STATS_INC_HISTOGRAM( + (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, value); + return; + } + union { + double dbl; + uint64_t uint; + } _val, _bkt; + _val.dbl = value; + if (_val.uint < 4683743612465315840ull) { + int bucket = + grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 5; + _bkt.dbl = grpc_stats_table_0[bucket]; + bucket -= (_val.uint < _bkt.uint); + GRPC_STATS_INC_HISTOGRAM( + (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, bucket); + return; + } + GRPC_STATS_INC_HISTOGRAM((exec_ctx), + GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, + grpc_stats_histo_find_bucket_slow( + (exec_ctx), value, grpc_stats_table_0, 64)); +} +const int grpc_stats_histo_buckets[6] = {64, 64, 64, 64, 64, 64}; +const int grpc_stats_histo_start[6] = {0, 64, 128, 192, 256, 320}; +const int *const grpc_stats_histo_bucket_boundaries[6] = { + grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_0, + grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_0}; +void (*const grpc_stats_inc_histogram[6])(grpc_exec_ctx *exec_ctx, int x) = { + grpc_stats_inc_tcp_write_size, grpc_stats_inc_tcp_write_iov_size, + grpc_stats_inc_tcp_read_size, grpc_stats_inc_tcp_read_offer, + grpc_stats_inc_tcp_read_iov_size, grpc_stats_inc_http2_send_message_size}; diff --git a/src/core/lib/debug/stats_data.h b/src/core/lib/debug/stats_data.h index c9c2f65c30..b0f5946eb0 100644 --- a/src/core/lib/debug/stats_data.h +++ b/src/core/lib/debug/stats_data.h @@ -21,27 +21,163 @@ #ifndef GRPC_CORE_LIB_DEBUG_STATS_DATA_H #define GRPC_CORE_LIB_DEBUG_STATS_DATA_H +#include <inttypes.h> +#include "src/core/lib/iomgr/exec_ctx.h" + typedef enum { GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED, GRPC_STATS_COUNTER_SERVER_CALLS_CREATED, - GRPC_STATS_COUNTER_SYSCALL_WRITE, - GRPC_STATS_COUNTER_SYSCALL_READ, GRPC_STATS_COUNTER_SYSCALL_POLL, GRPC_STATS_COUNTER_SYSCALL_WAIT, + GRPC_STATS_COUNTER_HISTOGRAM_SLOW_LOOKUPS, + GRPC_STATS_COUNTER_SYSCALL_WRITE, + GRPC_STATS_COUNTER_SYSCALL_READ, + GRPC_STATS_COUNTER_TCP_BACKUP_POLLERS_CREATED, + GRPC_STATS_COUNTER_TCP_BACKUP_POLLER_POLLS, + GRPC_STATS_COUNTER_HTTP2_OP_BATCHES, + GRPC_STATS_COUNTER_HTTP2_OP_CANCEL, + GRPC_STATS_COUNTER_HTTP2_OP_SEND_INITIAL_METADATA, + GRPC_STATS_COUNTER_HTTP2_OP_SEND_MESSAGE, + GRPC_STATS_COUNTER_HTTP2_OP_SEND_TRAILING_METADATA, + GRPC_STATS_COUNTER_HTTP2_OP_RECV_INITIAL_METADATA, + GRPC_STATS_COUNTER_HTTP2_OP_RECV_MESSAGE, + GRPC_STATS_COUNTER_HTTP2_OP_RECV_TRAILING_METADATA, + GRPC_STATS_COUNTER_HTTP2_PINGS_SENT, + GRPC_STATS_COUNTER_HTTP2_WRITES_BEGUN, + GRPC_STATS_COUNTER_HTTP2_WRITES_OFFLOADED, + GRPC_STATS_COUNTER_COMBINER_LOCKS_INITIATED, + GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_ITEMS, + GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS, + GRPC_STATS_COUNTER_COMBINER_LOCKS_OFFLOADED, + GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_SHORT_ITEMS, + GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_LONG_ITEMS, + GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_TO_SELF, + GRPC_STATS_COUNTER_EXECUTOR_WAKEUP_INITIATED, + GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED, + GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES, GRPC_STATS_COUNTER_COUNT } grpc_stats_counters; +extern const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT]; +typedef enum { + GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, + GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, + GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, + GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, + GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE, + GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, + GRPC_STATS_HISTOGRAM_COUNT +} grpc_stats_histograms; +extern const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT]; +typedef enum { + GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE_FIRST_SLOT = 0, + GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE_BUCKETS = 64, + GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE_FIRST_SLOT = 64, + GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE_BUCKETS = 64, + GRPC_STATS_HISTOGRAM_TCP_READ_SIZE_FIRST_SLOT = 128, + GRPC_STATS_HISTOGRAM_TCP_READ_SIZE_BUCKETS = 64, + GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_FIRST_SLOT = 192, + GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_BUCKETS = 64, + GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE_FIRST_SLOT = 256, + GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE_BUCKETS = 64, + GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE_FIRST_SLOT = 320, + GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE_BUCKETS = 64, + GRPC_STATS_HISTOGRAM_BUCKETS = 384 +} grpc_stats_histogram_constants; #define GRPC_STATS_INC_CLIENT_CALLS_CREATED(exec_ctx) \ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED) #define GRPC_STATS_INC_SERVER_CALLS_CREATED(exec_ctx) \ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_CALLS_CREATED) -#define GRPC_STATS_INC_SYSCALL_WRITE(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_WRITE) -#define GRPC_STATS_INC_SYSCALL_READ(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_READ) #define GRPC_STATS_INC_SYSCALL_POLL(exec_ctx) \ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_POLL) #define GRPC_STATS_INC_SYSCALL_WAIT(exec_ctx) \ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_WAIT) -extern const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT]; +#define GRPC_STATS_INC_HISTOGRAM_SLOW_LOOKUPS(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HISTOGRAM_SLOW_LOOKUPS) +#define GRPC_STATS_INC_SYSCALL_WRITE(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_WRITE) +#define GRPC_STATS_INC_SYSCALL_READ(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_READ) +#define GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), \ + GRPC_STATS_COUNTER_TCP_BACKUP_POLLERS_CREATED) +#define GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_TCP_BACKUP_POLLER_POLLS) +#define GRPC_STATS_INC_HTTP2_OP_BATCHES(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_OP_BATCHES) +#define GRPC_STATS_INC_HTTP2_OP_CANCEL(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_OP_CANCEL) +#define GRPC_STATS_INC_HTTP2_OP_SEND_INITIAL_METADATA(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), \ + GRPC_STATS_COUNTER_HTTP2_OP_SEND_INITIAL_METADATA) +#define GRPC_STATS_INC_HTTP2_OP_SEND_MESSAGE(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_OP_SEND_MESSAGE) +#define GRPC_STATS_INC_HTTP2_OP_SEND_TRAILING_METADATA(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), \ + GRPC_STATS_COUNTER_HTTP2_OP_SEND_TRAILING_METADATA) +#define GRPC_STATS_INC_HTTP2_OP_RECV_INITIAL_METADATA(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), \ + GRPC_STATS_COUNTER_HTTP2_OP_RECV_INITIAL_METADATA) +#define GRPC_STATS_INC_HTTP2_OP_RECV_MESSAGE(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_OP_RECV_MESSAGE) +#define GRPC_STATS_INC_HTTP2_OP_RECV_TRAILING_METADATA(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), \ + GRPC_STATS_COUNTER_HTTP2_OP_RECV_TRAILING_METADATA) +#define GRPC_STATS_INC_HTTP2_PINGS_SENT(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_PINGS_SENT) +#define GRPC_STATS_INC_HTTP2_WRITES_BEGUN(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_WRITES_BEGUN) +#define GRPC_STATS_INC_HTTP2_WRITES_OFFLOADED(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_WRITES_OFFLOADED) +#define GRPC_STATS_INC_COMBINER_LOCKS_INITIATED(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), \ + GRPC_STATS_COUNTER_COMBINER_LOCKS_INITIATED) +#define GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_ITEMS(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), \ + GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_ITEMS) +#define GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS(exec_ctx) \ + GRPC_STATS_INC_COUNTER( \ + (exec_ctx), GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS) +#define GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), \ + GRPC_STATS_COUNTER_COMBINER_LOCKS_OFFLOADED) +#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), \ + GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_SHORT_ITEMS) +#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), \ + GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_LONG_ITEMS) +#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), \ + GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_TO_SELF) +#define GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), \ + GRPC_STATS_COUNTER_EXECUTOR_WAKEUP_INITIATED) +#define GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED) +#define GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES) +#define GRPC_STATS_INC_TCP_WRITE_SIZE(exec_ctx, value) \ + grpc_stats_inc_tcp_write_size((exec_ctx), (int)(value)) +void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int x); +#define GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(exec_ctx, value) \ + grpc_stats_inc_tcp_write_iov_size((exec_ctx), (int)(value)) +void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int x); +#define GRPC_STATS_INC_TCP_READ_SIZE(exec_ctx, value) \ + grpc_stats_inc_tcp_read_size((exec_ctx), (int)(value)) +void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int x); +#define GRPC_STATS_INC_TCP_READ_OFFER(exec_ctx, value) \ + grpc_stats_inc_tcp_read_offer((exec_ctx), (int)(value)) +void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int x); +#define GRPC_STATS_INC_TCP_READ_IOV_SIZE(exec_ctx, value) \ + grpc_stats_inc_tcp_read_iov_size((exec_ctx), (int)(value)) +void grpc_stats_inc_tcp_read_iov_size(grpc_exec_ctx *exec_ctx, int x); +#define GRPC_STATS_INC_HTTP2_SEND_MESSAGE_SIZE(exec_ctx, value) \ + grpc_stats_inc_http2_send_message_size((exec_ctx), (int)(value)) +void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx, int x); +extern const int grpc_stats_histo_buckets[6]; +extern const int grpc_stats_histo_start[6]; +extern const int *const grpc_stats_histo_bucket_boundaries[6]; +extern void (*const grpc_stats_inc_histogram[6])(grpc_exec_ctx *exec_ctx, + int x); #endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */ diff --git a/src/core/lib/debug/stats_data.yaml b/src/core/lib/debug/stats_data.yaml index 8afe48f5cd..64eb4df6d1 100644 --- a/src/core/lib/debug/stats_data.yaml +++ b/src/core/lib/debug/stats_data.yaml @@ -1,9 +1,73 @@ +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Stats data declaration -# use tools/codegen/core/gen_stats_data.py to turn this into stats_data.h +# use tools / codegen / core / gen_stats_data.py to turn this into stats_data.h +# overall - counter: client_calls_created - counter: server_calls_created -- counter: syscall_write -- counter: syscall_read +# polling - counter: syscall_poll - counter: syscall_wait +# stats system +- counter: histogram_slow_lookups +# tcp +- counter: syscall_write +- counter: syscall_read +- histogram: tcp_write_size + max: 16777216 # 16 meg max write tracked + buckets: 64 +- histogram: tcp_write_iov_size + max: 1024 + buckets: 64 +- histogram: tcp_read_size + max: 16777216 + buckets: 64 +- histogram: tcp_read_offer + max: 16777216 + buckets: 64 +- histogram: tcp_read_iov_size + max: 1024 + buckets: 64 +- counter: tcp_backup_pollers_created +- counter: tcp_backup_poller_polls +# chttp2 +- counter: http2_op_batches +- counter: http2_op_cancel +- counter: http2_op_send_initial_metadata +- counter: http2_op_send_message +- counter: http2_op_send_trailing_metadata +- counter: http2_op_recv_initial_metadata +- counter: http2_op_recv_message +- counter: http2_op_recv_trailing_metadata +- histogram: http2_send_message_size + max: 16777216 + buckets: 64 +- counter: http2_pings_sent +- counter: http2_writes_begun +- counter: http2_writes_offloaded +# combiner locks +- counter: combiner_locks_initiated +- counter: combiner_locks_scheduled_items +- counter: combiner_locks_scheduled_final_items +- counter: combiner_locks_offloaded +# executor +- counter: executor_scheduled_short_items +- counter: executor_scheduled_long_items +- counter: executor_scheduled_to_self +- counter: executor_wakeup_initiated +- counter: executor_queue_drained +- counter: executor_push_retries + diff --git a/src/core/lib/iomgr/combiner.c b/src/core/lib/iomgr/combiner.c index 0e496829f6..c2c2f01f7e 100644 --- a/src/core/lib/iomgr/combiner.c +++ b/src/core/lib/iomgr/combiner.c @@ -24,6 +24,7 @@ #include <grpc/support/alloc.h> #include <grpc/support/log.h> +#include "src/core/lib/debug/stats.h" #include "src/core/lib/iomgr/executor.h" #include "src/core/lib/profiling/timers.h" @@ -154,6 +155,7 @@ static void push_first_on_exec_ctx(grpc_exec_ctx *exec_ctx, static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_closure *cl, grpc_error *error) { + GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_ITEMS(exec_ctx); GPR_TIMER_BEGIN("combiner.execute", 0); grpc_combiner *lock = COMBINER_FROM_CLOSURE_SCHEDULER(cl, scheduler); gpr_atm last = gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT); @@ -161,6 +163,7 @@ static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_closure *cl, "C:%p grpc_combiner_execute c=%p last=%" PRIdPTR, lock, cl, last)); if (last == 1) { + GRPC_STATS_INC_COMBINER_LOCKS_INITIATED(exec_ctx); gpr_atm_no_barrier_store(&lock->initiating_exec_ctx_or_null, (gpr_atm)exec_ctx); // first element on this list: add it to the list of combiner locks @@ -196,6 +199,7 @@ static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { } static void queue_offload(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) { + GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED(exec_ctx); move_next(exec_ctx); GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload", lock)); GRPC_CLOSURE_SCHED(exec_ctx, &lock->offload, GRPC_ERROR_NONE); @@ -326,6 +330,7 @@ static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure, static void combiner_finally_exec(grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_error *error) { + GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS(exec_ctx); grpc_combiner *lock = COMBINER_FROM_CLOSURE_SCHEDULER(closure, finally_scheduler); GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, diff --git a/src/core/lib/iomgr/executor.c b/src/core/lib/iomgr/executor.c index 8496ce23c0..a41c43edfd 100644 --- a/src/core/lib/iomgr/executor.c +++ b/src/core/lib/iomgr/executor.c @@ -28,6 +28,7 @@ #include <grpc/support/tls.h> #include <grpc/support/useful.h> +#include "src/core/lib/debug/stats.h" #include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/support/spinlock.h" @@ -168,6 +169,7 @@ static void executor_thread(void *arg) { gpr_mu_unlock(&ts->mu); break; } + GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED(&exec_ctx); grpc_closure_list exec = ts->elems; ts->elems = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT; gpr_mu_unlock(&ts->mu); @@ -183,6 +185,11 @@ static void executor_thread(void *arg) { static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_error *error, bool is_short) { bool retry_push; + if (is_short) { + GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS(exec_ctx); + } else { + GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS(exec_ctx); + } do { retry_push = false; size_t cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads); @@ -201,6 +208,8 @@ static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure, thread_state *ts = (thread_state *)gpr_tls_get(&g_this_thread_state); if (ts == NULL) { ts = &g_thread_state[GPR_HASH_POINTER(exec_ctx, cur_thread_count)]; + } else { + GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx); } thread_state *orig_ts = ts; @@ -232,6 +241,7 @@ static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure, continue; } if (grpc_closure_list_empty(ts->elems)) { + GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED(exec_ctx); gpr_cv_signal(&ts->cv); } grpc_closure_list_append(&ts->elems, closure, error); @@ -254,6 +264,9 @@ static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure, } gpr_spinlock_unlock(&g_adding_thread_lock); } + if (retry_push) { + GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(exec_ctx); + } } while (retry_push); } diff --git a/src/core/lib/iomgr/tcp_posix.c b/src/core/lib/iomgr/tcp_posix.c index b15b6d8746..fba2bc017b 100644 --- a/src/core/lib/iomgr/tcp_posix.c +++ b/src/core/lib/iomgr/tcp_posix.c @@ -138,6 +138,7 @@ static void run_poller(grpc_exec_ctx *exec_ctx, void *bp, gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); gpr_timespec deadline = gpr_time_add(now, gpr_time_from_seconds(10, GPR_TIMESPAN)); + GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS(exec_ctx); GRPC_LOG_IF_ERROR("backup_poller:pollset_work", grpc_pollset_work(exec_ctx, BACKUP_POLLER_POLLSET(p), NULL, now, deadline)); @@ -186,6 +187,7 @@ static void cover_self(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { 2 + (int)old_count); } if (old_count == 0) { + GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED(exec_ctx); p = (backup_poller *)gpr_malloc(sizeof(*p) + grpc_pollset_size()); if (GRPC_TRACER_ON(grpc_tcp_trace)) { gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p create", p); @@ -396,6 +398,9 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { msg.msg_controllen = 0; msg.msg_flags = 0; + GRPC_STATS_INC_TCP_READ_OFFER(exec_ctx, tcp->incoming_buffer->length); + GRPC_STATS_INC_TCP_READ_IOV_SIZE(exec_ctx, tcp->incoming_buffer->count); + GPR_TIMER_BEGIN("recvmsg", 0); do { GRPC_STATS_INC_SYSCALL_READ(exec_ctx); @@ -426,6 +431,7 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { GRPC_ERROR_CREATE_FROM_STATIC_STRING("Socket closed"), tcp)); TCP_UNREF(exec_ctx, tcp, "read"); } else { + GRPC_STATS_INC_TCP_READ_SIZE(exec_ctx, read_bytes); add_to_estimate(tcp, (size_t)read_bytes); GPR_ASSERT((size_t)read_bytes <= tcp->incoming_buffer->length); if ((size_t)read_bytes < tcp->incoming_buffer->length) { @@ -555,6 +561,9 @@ static bool tcp_flush(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp, msg.msg_controllen = 0; msg.msg_flags = 0; + GRPC_STATS_INC_TCP_WRITE_SIZE(exec_ctx, sending_length); + GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(exec_ctx, iov_size); + GPR_TIMER_BEGIN("sendmsg", 1); do { /* TODO(klempner): Cork if this is a partial write */ |