From 262da45be70ff67dddb3886e77f9761119d58b3b Mon Sep 17 00:00:00 2001 From: Vijay Pai Date: Mon, 22 Feb 2016 09:26:18 -0800 Subject: Put in some performance tests to be run on PRs and master --- tools/jenkins/run_performance.sh | 80 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 78 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tools/jenkins/run_performance.sh b/tools/jenkins/run_performance.sh index d7e73d4272..a5f5181f1e 100755 --- a/tools/jenkins/run_performance.sh +++ b/tools/jenkins/run_performance.sh @@ -49,7 +49,83 @@ PID2=$! export QPS_WORKERS="localhost:10000,localhost:10010" -bins/$config/qps_driver +# big is the size in bytes of large messages (0 is the size otherwise) +big=65536 + +# wide is the number of client channels in multi-channel tests (1 otherwise) +wide=64 + +# deep is the number of RPCs outstanding on a channel in non-ping-pong tests +# (the value used is 1 otherwise) +deep=100 + +# +# Get total core count +cores=`grep -c ^processor /proc/cpuinfo` +halfcores=`expr $cores / 2` + +for secure in true false; do + # Scenario 1: generic async streaming ping-pong (contentionless latency) + bins/$config/qps_driver --rpc_type=STREAMING --client_type=ASYNC_CLIENT \ + --server_type=ASYNC_GENERIC_SERVER --outstanding_rpcs_per_channel=1 \ + --client_channels=1 --bbuf_req_size=0 --bbuf_resp_size=0 \ + --async_client_threads=1 --async_server_threads=1 --secure_test=$secure \ + --num_servers=1 --num_clients=1 \ + --server_core_limit=$halfcores --client_core_limit=0 + + # Scenario 2: generic async streaming "unconstrained" (QPS) + bins/$config/qps_driver --rpc_type=STREAMING --client_type=ASYNC_CLIENT \ + --server_type=ASYNC_GENERIC_SERVER --outstanding_rpcs_per_channel=$deep \ + --client_channels=$wide --bbuf_req_size=0 --bbuf_resp_size=0 \ + --async_client_threads=0 --async_server_threads=0 --secure_test=$secure \ + --num_servers=1 --num_clients=0 \ + --server_core_limit=$halfcores --client_core_limit=0 |& tee /tmp/qps-test.$$ + + # Scenario 2b: QPS with a single server core + bins/$config/qps_driver --rpc_type=STREAMING --client_type=ASYNC_CLIENT \ + --server_type=ASYNC_GENERIC_SERVER --outstanding_rpcs_per_channel=$deep \ + --client_channels=$wide --bbuf_req_size=0 --bbuf_resp_size=0 \ + --async_client_threads=0 --async_server_threads=0 --secure_test=$secure \ + --num_servers=1 --num_clients=0 --server_core_limit=1 --client_core_limit=0 + + # Scenario 2c: protobuf-based QPS + bins/$config/qps_driver --rpc_type=STREAMING --client_type=ASYNC_CLIENT \ + --server_type=ASYNC_SERVER --outstanding_rpcs_per_channel=$deep \ + --client_channels=$wide --simple_req_size=0 --simple_resp_size=0 \ + --async_client_threads=0 --async_server_threads=0 --secure_test=$secure \ + --num_servers=1 --num_clients=0 \ + --server_core_limit=$halfcores --client_core_limit=0 + + # Scenario 3: Latency at sub-peak load (all clients equally loaded) + for loadfactor in 0.7; do + bins/$config/qps_driver --rpc_type=STREAMING --client_type=ASYNC_CLIENT \ + --server_type=ASYNC_GENERIC_SERVER --outstanding_rpcs_per_channel=$deep \ + --client_channels=$wide --bbuf_req_size=0 --bbuf_resp_size=0 \ + --async_client_threads=0 --async_server_threads=0 --secure_test=$secure \ + --num_servers=1 --num_clients=0 --poisson_load=`awk -v lf=$loadfactor \ + '$5 == "QPS:" {print int(lf * $6); exit}' /tmp/qps-test.$$` \ + --server_core_limit=$halfcores --client_core_limit=0 + done + + rm /tmp/qps-test.$$ + + # Scenario 4: Single-channel bidirectional throughput test (like TCP_STREAM). + bins/$config/qps_driver --rpc_type=STREAMING --client_type=ASYNC_CLIENT \ + --server_type=ASYNC_GENERIC_SERVER --outstanding_rpcs_per_channel=$deep \ + --client_channels=1 --bbuf_req_size=$big --bbuf_resp_size=$big \ + --async_client_threads=1 --async_server_threads=1 --secure_test=$secure \ + --num_servers=1 --num_clients=1 \ + --server_core_limit=$halfcores --client_core_limit=0 + + # Scenario 5: Sync unary ping-pong with protobufs + bins/$config/qps_driver --rpc_type=UNARY --client_type=SYNC_CLIENT \ + --server_type=SYNC_SERVER --outstanding_rpcs_per_channel=1 \ + --client_channels=1 --simple_req_size=0 --simple_resp_size=0 \ + --secure_test=$secure --num_servers=1 --num_clients=1 \ + --server_core_limit=$halfcores --client_core_limit=0 + +done + +bins/$config/qps_driver --quit=true -kill -2 $PID1 $PID2 wait -- cgit v1.2.3 From 4699415750c31cf8a7ff803e57b3499735796e27 Mon Sep 17 00:00:00 2001 From: Vijay Pai Date: Mon, 22 Feb 2016 13:26:48 -0800 Subject: Fix sanity --- tools/jenkins/run_performance.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/jenkins/run_performance.sh b/tools/jenkins/run_performance.sh index a5f5181f1e..c80685b23a 100755 --- a/tools/jenkins/run_performance.sh +++ b/tools/jenkins/run_performance.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Copyright 2015, Google Inc. +# Copyright 2015-2016, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without -- cgit v1.2.3 From 777154411167400e19201bc32dce4eccbae1a55b Mon Sep 17 00:00:00 2001 From: Alistair Veitch Date: Mon, 22 Feb 2016 14:20:25 -0800 Subject: rename census log files to avoid build breakage on MacOS --- BUILD | 12 +- Makefile | 8 +- binding.gyp | 2 +- build.yaml | 6 +- gRPC.podspec | 6 +- grpc.gemspec | 4 +- package.json | 4 +- src/core/census/log.c | 600 --------------------- src/core/census/log.h | 93 ---- src/core/census/mlog.c | 600 +++++++++++++++++++++ src/core/census/mlog.h | 95 ++++ src/python/grpcio/grpc_core_dependencies.py | 2 +- test/core/census/log_test.c | 589 -------------------- test/core/census/mlog_test.c | 589 ++++++++++++++++++++ tools/doxygen/Doxyfile.core.internal | 4 +- tools/run_tests/sources_and_headers.json | 14 +- vsprojects/vcxproj/grpc/grpc.vcxproj | 4 +- vsprojects/vcxproj/grpc/grpc.vcxproj.filters | 4 +- .../vcxproj/grpc_unsecure/grpc_unsecure.vcxproj | 4 +- .../grpc_unsecure/grpc_unsecure.vcxproj.filters | 4 +- .../test/census_log_test/census_log_test.vcxproj | 2 +- .../census_log_test.vcxproj.filters | 2 +- 22 files changed, 1325 insertions(+), 1323 deletions(-) delete mode 100644 src/core/census/log.c delete mode 100644 src/core/census/log.h create mode 100644 src/core/census/mlog.c create mode 100644 src/core/census/mlog.h delete mode 100644 test/core/census/log_test.c create mode 100644 test/core/census/mlog_test.c (limited to 'tools') diff --git a/BUILD b/BUILD index 0a5139d447..f6c4338493 100644 --- a/BUILD +++ b/BUILD @@ -283,7 +283,7 @@ cc_library( "src/core/tsi/transport_security.h", "src/core/tsi/transport_security_interface.h", "src/core/census/aggregation.h", - "src/core/census/log.h", + "src/core/census/mlog.h", "src/core/census/rpc_metric_id.h", "src/core/census/grpc_context.c", "src/core/census/grpc_filter.c", @@ -436,7 +436,7 @@ cc_library( "src/core/tsi/transport_security.c", "src/core/census/context.c", "src/core/census/initialize.c", - "src/core/census/log.c", + "src/core/census/mlog.c", "src/core/census/operation.c", "src/core/census/placeholders.c", "src/core/census/tracing.c", @@ -589,7 +589,7 @@ cc_library( "src/core/transport/transport.h", "src/core/transport/transport_impl.h", "src/core/census/aggregation.h", - "src/core/census/log.h", + "src/core/census/mlog.h", "src/core/census/rpc_metric_id.h", "src/core/surface/init_unsecure.c", "src/core/census/grpc_context.c", @@ -722,7 +722,7 @@ cc_library( "src/core/transport/transport_op_string.c", "src/core/census/context.c", "src/core/census/initialize.c", - "src/core/census/log.c", + "src/core/census/mlog.c", "src/core/census/operation.c", "src/core/census/placeholders.c", "src/core/census/tracing.c", @@ -1400,7 +1400,7 @@ objc_library( "src/core/tsi/transport_security.c", "src/core/census/context.c", "src/core/census/initialize.c", - "src/core/census/log.c", + "src/core/census/mlog.c", "src/core/census/operation.c", "src/core/census/placeholders.c", "src/core/census/tracing.c", @@ -1548,7 +1548,7 @@ objc_library( "src/core/tsi/transport_security.h", "src/core/tsi/transport_security_interface.h", "src/core/census/aggregation.h", - "src/core/census/log.h", + "src/core/census/mlog.h", "src/core/census/rpc_metric_id.h", ], includes = [ diff --git a/Makefile b/Makefile index 51b892187b..f285d5a01c 100644 --- a/Makefile +++ b/Makefile @@ -2469,7 +2469,7 @@ LIBGRPC_SRC = \ src/core/tsi/transport_security.c \ src/core/census/context.c \ src/core/census/initialize.c \ - src/core/census/log.c \ + src/core/census/mlog.c \ src/core/census/operation.c \ src/core/census/placeholders.c \ src/core/census/tracing.c \ @@ -2792,7 +2792,7 @@ LIBGRPC_UNSECURE_SRC = \ src/core/transport/transport_op_string.c \ src/core/census/context.c \ src/core/census/initialize.c \ - src/core/census/log.c \ + src/core/census/mlog.c \ src/core/census/operation.c \ src/core/census/placeholders.c \ src/core/census/tracing.c \ @@ -5877,7 +5877,7 @@ endif CENSUS_LOG_TEST_SRC = \ - test/core/census/log_test.c \ + test/core/census/mlog_test.c \ CENSUS_LOG_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(CENSUS_LOG_TEST_SRC)))) ifeq ($(NO_SECURE),true) @@ -5897,7 +5897,7 @@ $(BINDIR)/$(CONFIG)/census_log_test: $(CENSUS_LOG_TEST_OBJS) $(LIBDIR)/$(CONFIG) endif -$(OBJDIR)/$(CONFIG)/test/core/census/log_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a +$(OBJDIR)/$(CONFIG)/test/core/census/mlog_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a deps_census_log_test: $(CENSUS_LOG_TEST_OBJS:.o=.dep) diff --git a/binding.gyp b/binding.gyp index a2a64fff13..0f5817e8dc 100644 --- a/binding.gyp +++ b/binding.gyp @@ -708,7 +708,7 @@ 'src/core/tsi/transport_security.c', 'src/core/census/context.c', 'src/core/census/initialize.c', - 'src/core/census/log.c', + 'src/core/census/mlog.c', 'src/core/census/operation.c', 'src/core/census/placeholders.c', 'src/core/census/tracing.c', diff --git a/build.yaml b/build.yaml index cb6cb56844..793cb8292d 100644 --- a/build.yaml +++ b/build.yaml @@ -14,12 +14,12 @@ filegroups: - include/grpc/census.h headers: - src/core/census/aggregation.h - - src/core/census/log.h + - src/core/census/mlog.h - src/core/census/rpc_metric_id.h src: - src/core/census/context.c - src/core/census/initialize.c - - src/core/census/log.c + - src/core/census/mlog.c - src/core/census/operation.c - src/core/census/placeholders.c - src/core/census/tracing.c @@ -982,7 +982,7 @@ targets: build: test language: c src: - - test/core/census/log_test.c + - test/core/census/mlog_test.c deps: - grpc_test_util - grpc diff --git a/gRPC.podspec b/gRPC.podspec index d2d151b96e..b40c80a081 100644 --- a/gRPC.podspec +++ b/gRPC.podspec @@ -287,7 +287,7 @@ Pod::Spec.new do |s| 'src/core/tsi/transport_security.h', 'src/core/tsi/transport_security_interface.h', 'src/core/census/aggregation.h', - 'src/core/census/log.h', + 'src/core/census/mlog.h', 'src/core/census/rpc_metric_id.h', 'include/grpc/grpc_security.h', 'include/grpc/impl/codegen/byte_buffer.h', @@ -453,7 +453,7 @@ Pod::Spec.new do |s| 'src/core/tsi/transport_security.c', 'src/core/census/context.c', 'src/core/census/initialize.c', - 'src/core/census/log.c', + 'src/core/census/mlog.c', 'src/core/census/operation.c', 'src/core/census/placeholders.c', 'src/core/census/tracing.c' @@ -597,7 +597,7 @@ Pod::Spec.new do |s| 'src/core/tsi/transport_security.h', 'src/core/tsi/transport_security_interface.h', 'src/core/census/aggregation.h', - 'src/core/census/log.h', + 'src/core/census/mlog.h', 'src/core/census/rpc_metric_id.h' ss.header_mappings_dir = '.' diff --git a/grpc.gemspec b/grpc.gemspec index 061b0f2e94..ae8cfaf7c9 100755 --- a/grpc.gemspec +++ b/grpc.gemspec @@ -283,7 +283,7 @@ Gem::Specification.new do |s| s.files += %w( src/core/tsi/transport_security.h ) s.files += %w( src/core/tsi/transport_security_interface.h ) s.files += %w( src/core/census/aggregation.h ) - s.files += %w( src/core/census/log.h ) + s.files += %w( src/core/census/mlog.h ) s.files += %w( src/core/census/rpc_metric_id.h ) s.files += %w( src/core/census/grpc_context.c ) s.files += %w( src/core/census/grpc_filter.c ) @@ -436,7 +436,7 @@ Gem::Specification.new do |s| s.files += %w( src/core/tsi/transport_security.c ) s.files += %w( src/core/census/context.c ) s.files += %w( src/core/census/initialize.c ) - s.files += %w( src/core/census/log.c ) + s.files += %w( src/core/census/mlog.c ) s.files += %w( src/core/census/operation.c ) s.files += %w( src/core/census/placeholders.c ) s.files += %w( src/core/census/tracing.c ) diff --git a/package.json b/package.json index 0f7f4545ce..d843020eaf 100644 --- a/package.json +++ b/package.json @@ -228,7 +228,7 @@ "src/core/tsi/transport_security.h", "src/core/tsi/transport_security_interface.h", "src/core/census/aggregation.h", - "src/core/census/log.h", + "src/core/census/mlog.h", "src/core/census/rpc_metric_id.h", "src/core/census/grpc_context.c", "src/core/census/grpc_filter.c", @@ -381,7 +381,7 @@ "src/core/tsi/transport_security.c", "src/core/census/context.c", "src/core/census/initialize.c", - "src/core/census/log.c", + "src/core/census/mlog.c", "src/core/census/operation.c", "src/core/census/placeholders.c", "src/core/census/tracing.c", diff --git a/src/core/census/log.c b/src/core/census/log.c deleted file mode 100644 index 91b26941b8..0000000000 --- a/src/core/census/log.c +++ /dev/null @@ -1,600 +0,0 @@ -/* - * - * Copyright 2015-2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -// Implements an efficient in-memory log, optimized for multiple writers and -// a single reader. Available log space is divided up in blocks of -// CENSUS_LOG_2_MAX_RECORD_SIZE bytes. A block can be in one of the following -// three data structures: -// - Free blocks (free_block_list) -// - Blocks with unread data (dirty_block_list) -// - Blocks currently attached to cores (core_local_blocks[]) -// -// census_log_start_write() moves a block from core_local_blocks[] to the end of -// dirty_block_list when block: -// - is out-of-space OR -// - has an incomplete record (an incomplete record occurs when a thread calls -// census_log_start_write() and is context-switched before calling -// census_log_end_write() -// So, blocks in dirty_block_list are ordered, from oldest to newest, by the -// time when block is detached from the core. -// -// census_log_read_next() first iterates over dirty_block_list and then -// core_local_blocks[]. It moves completely read blocks from dirty_block_list -// to free_block_list. Blocks in core_local_blocks[] are not freed, even when -// completely read. -// -// If the log is configured to discard old records and free_block_list is empty, -// census_log_start_write() iterates over dirty_block_list to allocate a -// new block. It moves the oldest available block (no pending read/write) to -// core_local_blocks[]. -// -// core_local_block_struct is used to implement a map from core id to the block -// associated with that core. This mapping is advisory. It is possible that the -// block returned by this mapping is no longer associated with that core. This -// mapping is updated, lazily, by census_log_start_write(). -// -// Locking in block struct: -// -// Exclusive g_log.lock must be held before calling any functions operating on -// block structs except census_log_start_write() and census_log_end_write(). -// -// Writes to a block are serialized via writer_lock. census_log_start_write() -// acquires this lock and census_log_end_write() releases it. On failure to -// acquire the lock, writer allocates a new block for the current core and -// updates core_local_block accordingly. -// -// Simultaneous read and write access is allowed. Readers can safely read up to -// committed bytes (bytes_committed). -// -// reader_lock protects the block, currently being read, from getting recycled. -// start_read() acquires reader_lock and end_read() releases the lock. -// -// Read/write access to a block is disabled via try_disable_access(). It returns -// with both writer_lock and reader_lock held. These locks are subsequently -// released by enable_access() to enable access to the block. -// -// A note on naming: Most function/struct names are prepended by cl_ -// (shorthand for census_log). Further, functions that manipulate structures -// include the name of the structure, which will be passed as the first -// argument. E.g. cl_block_initialize() will initialize a cl_block. - -#include "src/core/census/log.h" -#include -#include -#include -#include -#include -#include -#include -#include - -// End of platform specific code - -typedef struct census_log_block_list_struct { - struct census_log_block_list_struct* next; - struct census_log_block_list_struct* prev; - struct census_log_block* block; -} cl_block_list_struct; - -typedef struct census_log_block { - // Pointer to underlying buffer. - char* buffer; - gpr_atm writer_lock; - gpr_atm reader_lock; - // Keeps completely written bytes. Declared atomic because accessed - // simultaneously by reader and writer. - gpr_atm bytes_committed; - // Bytes already read. - size_t bytes_read; - // Links for list. - cl_block_list_struct link; -// We want this structure to be cacheline aligned. We assume the following -// sizes for the various parts on 32/64bit systems: -// type 32b size 64b size -// char* 4 8 -// 3x gpr_atm 12 24 -// size_t 4 8 -// cl_block_list_struct 12 24 -// TOTAL 32 64 -// -// Depending on the size of our cacheline and the architecture, we -// selectively add char buffering to this structure. The size is checked -// via assert in census_log_initialize(). -#if defined(GPR_ARCH_64) -#define CL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 64) -#else -#if defined(GPR_ARCH_32) -#define CL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 32) -#else -#error "Unknown architecture" -#endif -#endif -#if CL_BLOCK_PAD_SIZE > 0 - char padding[CL_BLOCK_PAD_SIZE]; -#endif -} cl_block; - -// A list of cl_blocks, doubly-linked through cl_block::link. -typedef struct census_log_block_list { - int32_t count; // Number of items in list. - cl_block_list_struct ht; // head/tail of linked list. -} cl_block_list; - -// Cacheline aligned block pointers to avoid false sharing. Block pointer must -// be initialized via set_block(), before calling other functions -typedef struct census_log_core_local_block { - gpr_atm block; -// Ensure cachline alignment: we assume sizeof(gpr_atm) == 4 or 8 -#if defined(GPR_ARCH_64) -#define CL_CORE_LOCAL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 8) -#else -#if defined(GPR_ARCH_32) -#define CL_CORE_LOCAL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 4) -#else -#error "Unknown architecture" -#endif -#endif -#if CL_CORE_LOCAL_BLOCK_PAD_SIZE > 0 - char padding[CL_CORE_LOCAL_BLOCK_PAD_SIZE]; -#endif -} cl_core_local_block; - -struct census_log { - int discard_old_records; - // Number of cores (aka hardware-contexts) - unsigned num_cores; - // number of CENSUS_LOG_2_MAX_RECORD_SIZE blocks in log - uint32_t num_blocks; - cl_block* blocks; // Block metadata. - cl_core_local_block* core_local_blocks; // Keeps core to block mappings. - gpr_mu lock; - int initialized; // has log been initialized? - // Keeps the state of the reader iterator. A value of 0 indicates that - // iterator has reached the end. census_log_init_reader() resets the value - // to num_core to restart iteration. - uint32_t read_iterator_state; - // Points to the block being read. If non-NULL, the block is locked for - // reading(block_being_read_->reader_lock is held). - cl_block* block_being_read; - char* buffer; - cl_block_list free_block_list; - cl_block_list dirty_block_list; - gpr_atm out_of_space_count; -}; - -// Single internal log. -static struct census_log g_log; - -// Functions that operate on an atomic memory location used as a lock. - -// Returns non-zero if lock is acquired. -static int cl_try_lock(gpr_atm* lock) { return gpr_atm_acq_cas(lock, 0, 1); } - -static void cl_unlock(gpr_atm* lock) { gpr_atm_rel_store(lock, 0); } - -// Functions that operate on cl_core_local_block's. - -static void cl_core_local_block_set_block(cl_core_local_block* clb, - cl_block* block) { - gpr_atm_rel_store(&clb->block, (gpr_atm)block); -} - -static cl_block* cl_core_local_block_get_block(cl_core_local_block* clb) { - return (cl_block*)gpr_atm_acq_load(&clb->block); -} - -// Functions that operate on cl_block_list_struct's. - -static void cl_block_list_struct_initialize(cl_block_list_struct* bls, - cl_block* block) { - bls->next = bls->prev = bls; - bls->block = block; -} - -// Functions that operate on cl_block_list's. - -static void cl_block_list_initialize(cl_block_list* list) { - list->count = 0; - cl_block_list_struct_initialize(&list->ht, NULL); -} - -// Returns head of *this, or NULL if empty. -static cl_block* cl_block_list_head(cl_block_list* list) { - return list->ht.next->block; -} - -// Insert element *e after *pos. -static void cl_block_list_insert(cl_block_list* list, cl_block_list_struct* pos, - cl_block_list_struct* e) { - list->count++; - e->next = pos->next; - e->prev = pos; - e->next->prev = e; - e->prev->next = e; -} - -// Insert block at the head of the list -static void cl_block_list_insert_at_head(cl_block_list* list, cl_block* block) { - cl_block_list_insert(list, &list->ht, &block->link); -} - -// Insert block at the tail of the list. -static void cl_block_list_insert_at_tail(cl_block_list* list, cl_block* block) { - cl_block_list_insert(list, list->ht.prev, &block->link); -} - -// Removes block *b. Requires *b be in the list. -static void cl_block_list_remove(cl_block_list* list, cl_block* b) { - list->count--; - b->link.next->prev = b->link.prev; - b->link.prev->next = b->link.next; -} - -// Functions that operate on cl_block's - -static void cl_block_initialize(cl_block* block, char* buffer) { - block->buffer = buffer; - gpr_atm_rel_store(&block->writer_lock, 0); - gpr_atm_rel_store(&block->reader_lock, 0); - gpr_atm_rel_store(&block->bytes_committed, 0); - block->bytes_read = 0; - cl_block_list_struct_initialize(&block->link, block); -} - -// Guards against exposing partially written buffer to the reader. -static void cl_block_set_bytes_committed(cl_block* block, - size_t bytes_committed) { - gpr_atm_rel_store(&block->bytes_committed, (gpr_atm)bytes_committed); -} - -static size_t cl_block_get_bytes_committed(cl_block* block) { - return (size_t)gpr_atm_acq_load(&block->bytes_committed); -} - -// Tries to disable future read/write access to this block. Succeeds if: -// - no in-progress write AND -// - no in-progress read AND -// - 'discard_data' set to true OR no unread data -// On success, clears the block state and returns with writer_lock_ and -// reader_lock_ held. These locks are released by a subsequent -// cl_block_access_enable() call. -static bool cl_block_try_disable_access(cl_block* block, int discard_data) { - if (!cl_try_lock(&block->writer_lock)) { - return false; - } - if (!cl_try_lock(&block->reader_lock)) { - cl_unlock(&block->writer_lock); - return false; - } - if (!discard_data && - (block->bytes_read != cl_block_get_bytes_committed(block))) { - cl_unlock(&block->reader_lock); - cl_unlock(&block->writer_lock); - return false; - } - cl_block_set_bytes_committed(block, 0); - block->bytes_read = 0; - return true; -} - -static void cl_block_enable_access(cl_block* block) { - cl_unlock(&block->reader_lock); - cl_unlock(&block->writer_lock); -} - -// Returns with writer_lock held. -static void* cl_block_start_write(cl_block* block, size_t size) { - if (!cl_try_lock(&block->writer_lock)) { - return NULL; - } - size_t bytes_committed = cl_block_get_bytes_committed(block); - if (bytes_committed + size > CENSUS_LOG_MAX_RECORD_SIZE) { - cl_unlock(&block->writer_lock); - return NULL; - } - return block->buffer + bytes_committed; -} - -// Releases writer_lock and increments committed bytes by 'bytes_written'. -// 'bytes_written' must be <= 'size' specified in the corresponding -// StartWrite() call. This function is thread-safe. -static void cl_block_end_write(cl_block* block, size_t bytes_written) { - cl_block_set_bytes_committed( - block, cl_block_get_bytes_committed(block) + bytes_written); - cl_unlock(&block->writer_lock); -} - -// Returns a pointer to the first unread byte in buffer. The number of bytes -// available are returned in 'bytes_available'. Acquires reader lock that is -// released by a subsequent cl_block_end_read() call. Returns NULL if: -// - read in progress -// - no data available -static void* cl_block_start_read(cl_block* block, size_t* bytes_available) { - if (!cl_try_lock(&block->reader_lock)) { - return NULL; - } - // bytes_committed may change from under us. Use bytes_available to update - // bytes_read below. - size_t bytes_committed = cl_block_get_bytes_committed(block); - GPR_ASSERT(bytes_committed >= block->bytes_read); - *bytes_available = bytes_committed - block->bytes_read; - if (*bytes_available == 0) { - cl_unlock(&block->reader_lock); - return NULL; - } - void* record = block->buffer + block->bytes_read; - block->bytes_read += *bytes_available; - return record; -} - -static void cl_block_end_read(cl_block* block) { - cl_unlock(&block->reader_lock); -} - -// Internal functions operating on g_log - -// Allocates a new free block (or recycles an available dirty block if log is -// configured to discard old records). Returns NULL if out-of-space. -static cl_block* cl_allocate_block(void) { - cl_block* block = cl_block_list_head(&g_log.free_block_list); - if (block != NULL) { - cl_block_list_remove(&g_log.free_block_list, block); - return block; - } - if (!g_log.discard_old_records) { - // No free block and log is configured to keep old records. - return NULL; - } - // Recycle dirty block. Start from the oldest. - for (block = cl_block_list_head(&g_log.dirty_block_list); block != NULL; - block = block->link.next->block) { - if (cl_block_try_disable_access(block, 1 /* discard data */)) { - cl_block_list_remove(&g_log.dirty_block_list, block); - return block; - } - } - return NULL; -} - -// Allocates a new block and updates core id => block mapping. 'old_block' -// points to the block that the caller thinks is attached to -// 'core_id'. 'old_block' may be NULL. Returns true if: -// - allocated a new block OR -// - 'core_id' => 'old_block' mapping changed (another thread allocated a -// block before lock was acquired). -static bool cl_allocate_core_local_block(uint32_t core_id, - cl_block* old_block) { - // Now that we have the lock, check if core-local mapping has changed. - cl_core_local_block* core_local_block = &g_log.core_local_blocks[core_id]; - cl_block* block = cl_core_local_block_get_block(core_local_block); - if ((block != NULL) && (block != old_block)) { - return true; - } - if (block != NULL) { - cl_core_local_block_set_block(core_local_block, NULL); - cl_block_list_insert_at_tail(&g_log.dirty_block_list, block); - } - block = cl_allocate_block(); - if (block == NULL) { - return false; - } - cl_core_local_block_set_block(core_local_block, block); - cl_block_enable_access(block); - return true; -} - -static cl_block* cl_get_block(void* record) { - uintptr_t p = (uintptr_t)((char*)record - g_log.buffer); - uintptr_t index = p >> CENSUS_LOG_2_MAX_RECORD_SIZE; - return &g_log.blocks[index]; -} - -// Gets the next block to read and tries to free 'prev' block (if not NULL). -// Returns NULL if reached the end. -static cl_block* cl_next_block_to_read(cl_block* prev) { - cl_block* block = NULL; - if (g_log.read_iterator_state == g_log.num_cores) { - // We are traversing dirty list; find the next dirty block. - if (prev != NULL) { - // Try to free the previous block if there is no unread data. This - // block - // may have unread data if previously incomplete record completed - // between - // read_next() calls. - block = prev->link.next->block; - if (cl_block_try_disable_access(prev, 0 /* do not discard data */)) { - cl_block_list_remove(&g_log.dirty_block_list, prev); - cl_block_list_insert_at_head(&g_log.free_block_list, prev); - } - } else { - block = cl_block_list_head(&g_log.dirty_block_list); - } - if (block != NULL) { - return block; - } - // We are done with the dirty list; moving on to core-local blocks. - } - while (g_log.read_iterator_state > 0) { - g_log.read_iterator_state--; - block = cl_core_local_block_get_block( - &g_log.core_local_blocks[g_log.read_iterator_state]); - if (block != NULL) { - return block; - } - } - return NULL; -} - -#define CL_LOG_2_MB 20 // 2^20 = 1MB - -// External functions: primary stats_log interface -void census_log_initialize(size_t size_in_mb, int discard_old_records) { - // Check cacheline alignment. - GPR_ASSERT(sizeof(cl_block) % GPR_CACHELINE_SIZE == 0); - GPR_ASSERT(sizeof(cl_core_local_block) % GPR_CACHELINE_SIZE == 0); - GPR_ASSERT(!g_log.initialized); - g_log.discard_old_records = discard_old_records; - g_log.num_cores = gpr_cpu_num_cores(); - // Ensure that we will not get any overflow in calaculating num_blocks - GPR_ASSERT(CL_LOG_2_MB >= CENSUS_LOG_2_MAX_RECORD_SIZE); - GPR_ASSERT(size_in_mb < 1000); - // Ensure at least 2x as many blocks as there are cores. - g_log.num_blocks = - (uint32_t)GPR_MAX(2 * g_log.num_cores, (size_in_mb << CL_LOG_2_MB) >> - CENSUS_LOG_2_MAX_RECORD_SIZE); - gpr_mu_init(&g_log.lock); - g_log.read_iterator_state = 0; - g_log.block_being_read = NULL; - g_log.core_local_blocks = (cl_core_local_block*)gpr_malloc_aligned( - g_log.num_cores * sizeof(cl_core_local_block), GPR_CACHELINE_SIZE_LOG); - memset(g_log.core_local_blocks, 0, - g_log.num_cores * sizeof(cl_core_local_block)); - g_log.blocks = (cl_block*)gpr_malloc_aligned( - g_log.num_blocks * sizeof(cl_block), GPR_CACHELINE_SIZE_LOG); - memset(g_log.blocks, 0, g_log.num_blocks * sizeof(cl_block)); - g_log.buffer = gpr_malloc(g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE); - memset(g_log.buffer, 0, g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE); - cl_block_list_initialize(&g_log.free_block_list); - cl_block_list_initialize(&g_log.dirty_block_list); - for (uint32_t i = 0; i < g_log.num_blocks; ++i) { - cl_block* block = g_log.blocks + i; - cl_block_initialize(block, g_log.buffer + (CENSUS_LOG_MAX_RECORD_SIZE * i)); - cl_block_try_disable_access(block, 1 /* discard data */); - cl_block_list_insert_at_tail(&g_log.free_block_list, block); - } - gpr_atm_rel_store(&g_log.out_of_space_count, 0); - g_log.initialized = 1; -} - -void census_log_shutdown(void) { - GPR_ASSERT(g_log.initialized); - gpr_mu_destroy(&g_log.lock); - gpr_free_aligned(g_log.core_local_blocks); - g_log.core_local_blocks = NULL; - gpr_free_aligned(g_log.blocks); - g_log.blocks = NULL; - gpr_free(g_log.buffer); - g_log.buffer = NULL; - g_log.initialized = 0; -} - -void* census_log_start_write(size_t size) { - // Used to bound number of times block allocation is attempted. - GPR_ASSERT(size > 0); - GPR_ASSERT(g_log.initialized); - if (size > CENSUS_LOG_MAX_RECORD_SIZE) { - return NULL; - } - uint32_t attempts_remaining = g_log.num_blocks; - uint32_t core_id = gpr_cpu_current_cpu(); - do { - void* record = NULL; - cl_block* block = - cl_core_local_block_get_block(&g_log.core_local_blocks[core_id]); - if (block && (record = cl_block_start_write(block, size))) { - return record; - } - // Need to allocate a new block. We are here if: - // - No block associated with the core OR - // - Write in-progress on the block OR - // - block is out of space - gpr_mu_lock(&g_log.lock); - bool allocated = cl_allocate_core_local_block(core_id, block); - gpr_mu_unlock(&g_log.lock); - if (!allocated) { - gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1); - return NULL; - } - } while (attempts_remaining--); - // Give up. - gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1); - return NULL; -} - -void census_log_end_write(void* record, size_t bytes_written) { - GPR_ASSERT(g_log.initialized); - cl_block_end_write(cl_get_block(record), bytes_written); -} - -void census_log_init_reader(void) { - GPR_ASSERT(g_log.initialized); - gpr_mu_lock(&g_log.lock); - // If a block is locked for reading unlock it. - if (g_log.block_being_read != NULL) { - cl_block_end_read(g_log.block_being_read); - g_log.block_being_read = NULL; - } - g_log.read_iterator_state = g_log.num_cores; - gpr_mu_unlock(&g_log.lock); -} - -const void* census_log_read_next(size_t* bytes_available) { - GPR_ASSERT(g_log.initialized); - gpr_mu_lock(&g_log.lock); - if (g_log.block_being_read != NULL) { - cl_block_end_read(g_log.block_being_read); - } - do { - g_log.block_being_read = cl_next_block_to_read(g_log.block_being_read); - if (g_log.block_being_read != NULL) { - void* record = - cl_block_start_read(g_log.block_being_read, bytes_available); - if (record != NULL) { - gpr_mu_unlock(&g_log.lock); - return record; - } - } - } while (g_log.block_being_read != NULL); - gpr_mu_unlock(&g_log.lock); - return NULL; -} - -size_t census_log_remaining_space(void) { - GPR_ASSERT(g_log.initialized); - size_t space = 0; - gpr_mu_lock(&g_log.lock); - if (g_log.discard_old_records) { - // Remaining space is not meaningful; just return the entire log space. - space = g_log.num_blocks << CENSUS_LOG_2_MAX_RECORD_SIZE; - } else { - GPR_ASSERT(g_log.free_block_list.count >= 0); - space = (size_t)g_log.free_block_list.count * CENSUS_LOG_MAX_RECORD_SIZE; - } - gpr_mu_unlock(&g_log.lock); - return space; -} - -int64_t census_log_out_of_space_count(void) { - GPR_ASSERT(g_log.initialized); - return gpr_atm_acq_load(&g_log.out_of_space_count); -} diff --git a/src/core/census/log.h b/src/core/census/log.h deleted file mode 100644 index 05daea066f..0000000000 --- a/src/core/census/log.h +++ /dev/null @@ -1,93 +0,0 @@ -/* - * - * Copyright 2015-2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#ifndef GRPC_INTERNAL_CORE_CENSUS_LOG_H -#define GRPC_INTERNAL_CORE_CENSUS_LOG_H - -#include -#include - -/* Maximum record size, in bytes. */ -#define CENSUS_LOG_2_MAX_RECORD_SIZE 14 /* 2^14 = 16KB */ -#define CENSUS_LOG_MAX_RECORD_SIZE (1 << CENSUS_LOG_2_MAX_RECORD_SIZE) - -/* Initialize the statistics logging subsystem with the given log size. A log - size of 0 will result in the smallest possible log for the platform - (approximately CENSUS_LOG_MAX_RECORD_SIZE * gpr_cpu_num_cores()). If - discard_old_records is non-zero, then new records will displace older ones - when the log is full. This function must be called before any other - census_log functions. -*/ -void census_log_initialize(size_t size_in_mb, int discard_old_records); - -/* Shutdown the logging subsystem. Caller must ensure that: - - no in progress or future call to any census_log functions - - no incomplete records -*/ -void census_log_shutdown(void); - -/* Allocates and returns a 'size' bytes record and marks it in use. A - subsequent census_log_end_write() marks the record complete. The - 'bytes_written' census_log_end_write() argument must be <= - 'size'. Returns NULL if out-of-space AND: - - log is configured to keep old records OR - - all blocks are pinned by incomplete records. -*/ -void* census_log_start_write(size_t size); - -void census_log_end_write(void* record, size_t bytes_written); - -void census_log_init_reader(void); - -/* census_log_read_next() iterates over blocks with data and for each block - returns a pointer to the first unread byte. The number of bytes that can be - read are returned in 'bytes_available'. Reader is expected to read all - available data. Reading the data consumes it i.e. it cannot be read again. - census_log_read_next() returns NULL if the end is reached i.e last block - is read. census_log_init_reader() starts the iteration or aborts the - current iteration. -*/ -const void* census_log_read_next(size_t* bytes_available); - -/* Returns estimated remaining space across all blocks, in bytes. If log is - configured to discard old records, returns total log space. Otherwise, - returns space available in empty blocks (partially filled blocks are - treated as full). -*/ -size_t census_log_remaining_space(void); - -/* Returns the number of times gprc_stats_log_start_write() failed due to - out-of-space. */ -int64_t census_log_out_of_space_count(void); - -#endif /* GRPC_INTERNAL_CORE_CENSUS_LOG_H */ diff --git a/src/core/census/mlog.c b/src/core/census/mlog.c new file mode 100644 index 0000000000..a2cc46d3f2 --- /dev/null +++ b/src/core/census/mlog.c @@ -0,0 +1,600 @@ +/* + * + * Copyright 2015-2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Implements an efficient in-memory log, optimized for multiple writers and +// a single reader. Available log space is divided up in blocks of +// CENSUS_LOG_2_MAX_RECORD_SIZE bytes. A block can be in one of the following +// three data structures: +// - Free blocks (free_block_list) +// - Blocks with unread data (dirty_block_list) +// - Blocks currently attached to cores (core_local_blocks[]) +// +// census_log_start_write() moves a block from core_local_blocks[] to the end of +// dirty_block_list when block: +// - is out-of-space OR +// - has an incomplete record (an incomplete record occurs when a thread calls +// census_log_start_write() and is context-switched before calling +// census_log_end_write() +// So, blocks in dirty_block_list are ordered, from oldest to newest, by the +// time when block is detached from the core. +// +// census_log_read_next() first iterates over dirty_block_list and then +// core_local_blocks[]. It moves completely read blocks from dirty_block_list +// to free_block_list. Blocks in core_local_blocks[] are not freed, even when +// completely read. +// +// If the log is configured to discard old records and free_block_list is empty, +// census_log_start_write() iterates over dirty_block_list to allocate a +// new block. It moves the oldest available block (no pending read/write) to +// core_local_blocks[]. +// +// core_local_block_struct is used to implement a map from core id to the block +// associated with that core. This mapping is advisory. It is possible that the +// block returned by this mapping is no longer associated with that core. This +// mapping is updated, lazily, by census_log_start_write(). +// +// Locking in block struct: +// +// Exclusive g_log.lock must be held before calling any functions operating on +// block structs except census_log_start_write() and census_log_end_write(). +// +// Writes to a block are serialized via writer_lock. census_log_start_write() +// acquires this lock and census_log_end_write() releases it. On failure to +// acquire the lock, writer allocates a new block for the current core and +// updates core_local_block accordingly. +// +// Simultaneous read and write access is allowed. Readers can safely read up to +// committed bytes (bytes_committed). +// +// reader_lock protects the block, currently being read, from getting recycled. +// start_read() acquires reader_lock and end_read() releases the lock. +// +// Read/write access to a block is disabled via try_disable_access(). It returns +// with both writer_lock and reader_lock held. These locks are subsequently +// released by enable_access() to enable access to the block. +// +// A note on naming: Most function/struct names are prepended by cl_ +// (shorthand for census_log). Further, functions that manipulate structures +// include the name of the structure, which will be passed as the first +// argument. E.g. cl_block_initialize() will initialize a cl_block. + +#include "src/core/census/mlog.h" +#include +#include +#include +#include +#include +#include +#include +#include + +// End of platform specific code + +typedef struct census_log_block_list_struct { + struct census_log_block_list_struct* next; + struct census_log_block_list_struct* prev; + struct census_log_block* block; +} cl_block_list_struct; + +typedef struct census_log_block { + // Pointer to underlying buffer. + char* buffer; + gpr_atm writer_lock; + gpr_atm reader_lock; + // Keeps completely written bytes. Declared atomic because accessed + // simultaneously by reader and writer. + gpr_atm bytes_committed; + // Bytes already read. + size_t bytes_read; + // Links for list. + cl_block_list_struct link; +// We want this structure to be cacheline aligned. We assume the following +// sizes for the various parts on 32/64bit systems: +// type 32b size 64b size +// char* 4 8 +// 3x gpr_atm 12 24 +// size_t 4 8 +// cl_block_list_struct 12 24 +// TOTAL 32 64 +// +// Depending on the size of our cacheline and the architecture, we +// selectively add char buffering to this structure. The size is checked +// via assert in census_log_initialize(). +#if defined(GPR_ARCH_64) +#define CL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 64) +#else +#if defined(GPR_ARCH_32) +#define CL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 32) +#else +#error "Unknown architecture" +#endif +#endif +#if CL_BLOCK_PAD_SIZE > 0 + char padding[CL_BLOCK_PAD_SIZE]; +#endif +} cl_block; + +// A list of cl_blocks, doubly-linked through cl_block::link. +typedef struct census_log_block_list { + int32_t count; // Number of items in list. + cl_block_list_struct ht; // head/tail of linked list. +} cl_block_list; + +// Cacheline aligned block pointers to avoid false sharing. Block pointer must +// be initialized via set_block(), before calling other functions +typedef struct census_log_core_local_block { + gpr_atm block; +// Ensure cachline alignment: we assume sizeof(gpr_atm) == 4 or 8 +#if defined(GPR_ARCH_64) +#define CL_CORE_LOCAL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 8) +#else +#if defined(GPR_ARCH_32) +#define CL_CORE_LOCAL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 4) +#else +#error "Unknown architecture" +#endif +#endif +#if CL_CORE_LOCAL_BLOCK_PAD_SIZE > 0 + char padding[CL_CORE_LOCAL_BLOCK_PAD_SIZE]; +#endif +} cl_core_local_block; + +struct census_log { + int discard_old_records; + // Number of cores (aka hardware-contexts) + unsigned num_cores; + // number of CENSUS_LOG_2_MAX_RECORD_SIZE blocks in log + uint32_t num_blocks; + cl_block* blocks; // Block metadata. + cl_core_local_block* core_local_blocks; // Keeps core to block mappings. + gpr_mu lock; + int initialized; // has log been initialized? + // Keeps the state of the reader iterator. A value of 0 indicates that + // iterator has reached the end. census_log_init_reader() resets the value + // to num_core to restart iteration. + uint32_t read_iterator_state; + // Points to the block being read. If non-NULL, the block is locked for + // reading(block_being_read_->reader_lock is held). + cl_block* block_being_read; + char* buffer; + cl_block_list free_block_list; + cl_block_list dirty_block_list; + gpr_atm out_of_space_count; +}; + +// Single internal log. +static struct census_log g_log; + +// Functions that operate on an atomic memory location used as a lock. + +// Returns non-zero if lock is acquired. +static int cl_try_lock(gpr_atm* lock) { return gpr_atm_acq_cas(lock, 0, 1); } + +static void cl_unlock(gpr_atm* lock) { gpr_atm_rel_store(lock, 0); } + +// Functions that operate on cl_core_local_block's. + +static void cl_core_local_block_set_block(cl_core_local_block* clb, + cl_block* block) { + gpr_atm_rel_store(&clb->block, (gpr_atm)block); +} + +static cl_block* cl_core_local_block_get_block(cl_core_local_block* clb) { + return (cl_block*)gpr_atm_acq_load(&clb->block); +} + +// Functions that operate on cl_block_list_struct's. + +static void cl_block_list_struct_initialize(cl_block_list_struct* bls, + cl_block* block) { + bls->next = bls->prev = bls; + bls->block = block; +} + +// Functions that operate on cl_block_list's. + +static void cl_block_list_initialize(cl_block_list* list) { + list->count = 0; + cl_block_list_struct_initialize(&list->ht, NULL); +} + +// Returns head of *this, or NULL if empty. +static cl_block* cl_block_list_head(cl_block_list* list) { + return list->ht.next->block; +} + +// Insert element *e after *pos. +static void cl_block_list_insert(cl_block_list* list, cl_block_list_struct* pos, + cl_block_list_struct* e) { + list->count++; + e->next = pos->next; + e->prev = pos; + e->next->prev = e; + e->prev->next = e; +} + +// Insert block at the head of the list +static void cl_block_list_insert_at_head(cl_block_list* list, cl_block* block) { + cl_block_list_insert(list, &list->ht, &block->link); +} + +// Insert block at the tail of the list. +static void cl_block_list_insert_at_tail(cl_block_list* list, cl_block* block) { + cl_block_list_insert(list, list->ht.prev, &block->link); +} + +// Removes block *b. Requires *b be in the list. +static void cl_block_list_remove(cl_block_list* list, cl_block* b) { + list->count--; + b->link.next->prev = b->link.prev; + b->link.prev->next = b->link.next; +} + +// Functions that operate on cl_block's + +static void cl_block_initialize(cl_block* block, char* buffer) { + block->buffer = buffer; + gpr_atm_rel_store(&block->writer_lock, 0); + gpr_atm_rel_store(&block->reader_lock, 0); + gpr_atm_rel_store(&block->bytes_committed, 0); + block->bytes_read = 0; + cl_block_list_struct_initialize(&block->link, block); +} + +// Guards against exposing partially written buffer to the reader. +static void cl_block_set_bytes_committed(cl_block* block, + size_t bytes_committed) { + gpr_atm_rel_store(&block->bytes_committed, (gpr_atm)bytes_committed); +} + +static size_t cl_block_get_bytes_committed(cl_block* block) { + return (size_t)gpr_atm_acq_load(&block->bytes_committed); +} + +// Tries to disable future read/write access to this block. Succeeds if: +// - no in-progress write AND +// - no in-progress read AND +// - 'discard_data' set to true OR no unread data +// On success, clears the block state and returns with writer_lock_ and +// reader_lock_ held. These locks are released by a subsequent +// cl_block_access_enable() call. +static bool cl_block_try_disable_access(cl_block* block, int discard_data) { + if (!cl_try_lock(&block->writer_lock)) { + return false; + } + if (!cl_try_lock(&block->reader_lock)) { + cl_unlock(&block->writer_lock); + return false; + } + if (!discard_data && + (block->bytes_read != cl_block_get_bytes_committed(block))) { + cl_unlock(&block->reader_lock); + cl_unlock(&block->writer_lock); + return false; + } + cl_block_set_bytes_committed(block, 0); + block->bytes_read = 0; + return true; +} + +static void cl_block_enable_access(cl_block* block) { + cl_unlock(&block->reader_lock); + cl_unlock(&block->writer_lock); +} + +// Returns with writer_lock held. +static void* cl_block_start_write(cl_block* block, size_t size) { + if (!cl_try_lock(&block->writer_lock)) { + return NULL; + } + size_t bytes_committed = cl_block_get_bytes_committed(block); + if (bytes_committed + size > CENSUS_LOG_MAX_RECORD_SIZE) { + cl_unlock(&block->writer_lock); + return NULL; + } + return block->buffer + bytes_committed; +} + +// Releases writer_lock and increments committed bytes by 'bytes_written'. +// 'bytes_written' must be <= 'size' specified in the corresponding +// StartWrite() call. This function is thread-safe. +static void cl_block_end_write(cl_block* block, size_t bytes_written) { + cl_block_set_bytes_committed( + block, cl_block_get_bytes_committed(block) + bytes_written); + cl_unlock(&block->writer_lock); +} + +// Returns a pointer to the first unread byte in buffer. The number of bytes +// available are returned in 'bytes_available'. Acquires reader lock that is +// released by a subsequent cl_block_end_read() call. Returns NULL if: +// - read in progress +// - no data available +static void* cl_block_start_read(cl_block* block, size_t* bytes_available) { + if (!cl_try_lock(&block->reader_lock)) { + return NULL; + } + // bytes_committed may change from under us. Use bytes_available to update + // bytes_read below. + size_t bytes_committed = cl_block_get_bytes_committed(block); + GPR_ASSERT(bytes_committed >= block->bytes_read); + *bytes_available = bytes_committed - block->bytes_read; + if (*bytes_available == 0) { + cl_unlock(&block->reader_lock); + return NULL; + } + void* record = block->buffer + block->bytes_read; + block->bytes_read += *bytes_available; + return record; +} + +static void cl_block_end_read(cl_block* block) { + cl_unlock(&block->reader_lock); +} + +// Internal functions operating on g_log + +// Allocates a new free block (or recycles an available dirty block if log is +// configured to discard old records). Returns NULL if out-of-space. +static cl_block* cl_allocate_block(void) { + cl_block* block = cl_block_list_head(&g_log.free_block_list); + if (block != NULL) { + cl_block_list_remove(&g_log.free_block_list, block); + return block; + } + if (!g_log.discard_old_records) { + // No free block and log is configured to keep old records. + return NULL; + } + // Recycle dirty block. Start from the oldest. + for (block = cl_block_list_head(&g_log.dirty_block_list); block != NULL; + block = block->link.next->block) { + if (cl_block_try_disable_access(block, 1 /* discard data */)) { + cl_block_list_remove(&g_log.dirty_block_list, block); + return block; + } + } + return NULL; +} + +// Allocates a new block and updates core id => block mapping. 'old_block' +// points to the block that the caller thinks is attached to +// 'core_id'. 'old_block' may be NULL. Returns true if: +// - allocated a new block OR +// - 'core_id' => 'old_block' mapping changed (another thread allocated a +// block before lock was acquired). +static bool cl_allocate_core_local_block(uint32_t core_id, + cl_block* old_block) { + // Now that we have the lock, check if core-local mapping has changed. + cl_core_local_block* core_local_block = &g_log.core_local_blocks[core_id]; + cl_block* block = cl_core_local_block_get_block(core_local_block); + if ((block != NULL) && (block != old_block)) { + return true; + } + if (block != NULL) { + cl_core_local_block_set_block(core_local_block, NULL); + cl_block_list_insert_at_tail(&g_log.dirty_block_list, block); + } + block = cl_allocate_block(); + if (block == NULL) { + return false; + } + cl_core_local_block_set_block(core_local_block, block); + cl_block_enable_access(block); + return true; +} + +static cl_block* cl_get_block(void* record) { + uintptr_t p = (uintptr_t)((char*)record - g_log.buffer); + uintptr_t index = p >> CENSUS_LOG_2_MAX_RECORD_SIZE; + return &g_log.blocks[index]; +} + +// Gets the next block to read and tries to free 'prev' block (if not NULL). +// Returns NULL if reached the end. +static cl_block* cl_next_block_to_read(cl_block* prev) { + cl_block* block = NULL; + if (g_log.read_iterator_state == g_log.num_cores) { + // We are traversing dirty list; find the next dirty block. + if (prev != NULL) { + // Try to free the previous block if there is no unread data. This + // block + // may have unread data if previously incomplete record completed + // between + // read_next() calls. + block = prev->link.next->block; + if (cl_block_try_disable_access(prev, 0 /* do not discard data */)) { + cl_block_list_remove(&g_log.dirty_block_list, prev); + cl_block_list_insert_at_head(&g_log.free_block_list, prev); + } + } else { + block = cl_block_list_head(&g_log.dirty_block_list); + } + if (block != NULL) { + return block; + } + // We are done with the dirty list; moving on to core-local blocks. + } + while (g_log.read_iterator_state > 0) { + g_log.read_iterator_state--; + block = cl_core_local_block_get_block( + &g_log.core_local_blocks[g_log.read_iterator_state]); + if (block != NULL) { + return block; + } + } + return NULL; +} + +#define CL_LOG_2_MB 20 // 2^20 = 1MB + +// External functions: primary stats_log interface +void census_log_initialize(size_t size_in_mb, int discard_old_records) { + // Check cacheline alignment. + GPR_ASSERT(sizeof(cl_block) % GPR_CACHELINE_SIZE == 0); + GPR_ASSERT(sizeof(cl_core_local_block) % GPR_CACHELINE_SIZE == 0); + GPR_ASSERT(!g_log.initialized); + g_log.discard_old_records = discard_old_records; + g_log.num_cores = gpr_cpu_num_cores(); + // Ensure that we will not get any overflow in calaculating num_blocks + GPR_ASSERT(CL_LOG_2_MB >= CENSUS_LOG_2_MAX_RECORD_SIZE); + GPR_ASSERT(size_in_mb < 1000); + // Ensure at least 2x as many blocks as there are cores. + g_log.num_blocks = + (uint32_t)GPR_MAX(2 * g_log.num_cores, (size_in_mb << CL_LOG_2_MB) >> + CENSUS_LOG_2_MAX_RECORD_SIZE); + gpr_mu_init(&g_log.lock); + g_log.read_iterator_state = 0; + g_log.block_being_read = NULL; + g_log.core_local_blocks = (cl_core_local_block*)gpr_malloc_aligned( + g_log.num_cores * sizeof(cl_core_local_block), GPR_CACHELINE_SIZE_LOG); + memset(g_log.core_local_blocks, 0, + g_log.num_cores * sizeof(cl_core_local_block)); + g_log.blocks = (cl_block*)gpr_malloc_aligned( + g_log.num_blocks * sizeof(cl_block), GPR_CACHELINE_SIZE_LOG); + memset(g_log.blocks, 0, g_log.num_blocks * sizeof(cl_block)); + g_log.buffer = gpr_malloc(g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE); + memset(g_log.buffer, 0, g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE); + cl_block_list_initialize(&g_log.free_block_list); + cl_block_list_initialize(&g_log.dirty_block_list); + for (uint32_t i = 0; i < g_log.num_blocks; ++i) { + cl_block* block = g_log.blocks + i; + cl_block_initialize(block, g_log.buffer + (CENSUS_LOG_MAX_RECORD_SIZE * i)); + cl_block_try_disable_access(block, 1 /* discard data */); + cl_block_list_insert_at_tail(&g_log.free_block_list, block); + } + gpr_atm_rel_store(&g_log.out_of_space_count, 0); + g_log.initialized = 1; +} + +void census_log_shutdown(void) { + GPR_ASSERT(g_log.initialized); + gpr_mu_destroy(&g_log.lock); + gpr_free_aligned(g_log.core_local_blocks); + g_log.core_local_blocks = NULL; + gpr_free_aligned(g_log.blocks); + g_log.blocks = NULL; + gpr_free(g_log.buffer); + g_log.buffer = NULL; + g_log.initialized = 0; +} + +void* census_log_start_write(size_t size) { + // Used to bound number of times block allocation is attempted. + GPR_ASSERT(size > 0); + GPR_ASSERT(g_log.initialized); + if (size > CENSUS_LOG_MAX_RECORD_SIZE) { + return NULL; + } + uint32_t attempts_remaining = g_log.num_blocks; + uint32_t core_id = gpr_cpu_current_cpu(); + do { + void* record = NULL; + cl_block* block = + cl_core_local_block_get_block(&g_log.core_local_blocks[core_id]); + if (block && (record = cl_block_start_write(block, size))) { + return record; + } + // Need to allocate a new block. We are here if: + // - No block associated with the core OR + // - Write in-progress on the block OR + // - block is out of space + gpr_mu_lock(&g_log.lock); + bool allocated = cl_allocate_core_local_block(core_id, block); + gpr_mu_unlock(&g_log.lock); + if (!allocated) { + gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1); + return NULL; + } + } while (attempts_remaining--); + // Give up. + gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1); + return NULL; +} + +void census_log_end_write(void* record, size_t bytes_written) { + GPR_ASSERT(g_log.initialized); + cl_block_end_write(cl_get_block(record), bytes_written); +} + +void census_log_init_reader(void) { + GPR_ASSERT(g_log.initialized); + gpr_mu_lock(&g_log.lock); + // If a block is locked for reading unlock it. + if (g_log.block_being_read != NULL) { + cl_block_end_read(g_log.block_being_read); + g_log.block_being_read = NULL; + } + g_log.read_iterator_state = g_log.num_cores; + gpr_mu_unlock(&g_log.lock); +} + +const void* census_log_read_next(size_t* bytes_available) { + GPR_ASSERT(g_log.initialized); + gpr_mu_lock(&g_log.lock); + if (g_log.block_being_read != NULL) { + cl_block_end_read(g_log.block_being_read); + } + do { + g_log.block_being_read = cl_next_block_to_read(g_log.block_being_read); + if (g_log.block_being_read != NULL) { + void* record = + cl_block_start_read(g_log.block_being_read, bytes_available); + if (record != NULL) { + gpr_mu_unlock(&g_log.lock); + return record; + } + } + } while (g_log.block_being_read != NULL); + gpr_mu_unlock(&g_log.lock); + return NULL; +} + +size_t census_log_remaining_space(void) { + GPR_ASSERT(g_log.initialized); + size_t space = 0; + gpr_mu_lock(&g_log.lock); + if (g_log.discard_old_records) { + // Remaining space is not meaningful; just return the entire log space. + space = g_log.num_blocks << CENSUS_LOG_2_MAX_RECORD_SIZE; + } else { + GPR_ASSERT(g_log.free_block_list.count >= 0); + space = (size_t)g_log.free_block_list.count * CENSUS_LOG_MAX_RECORD_SIZE; + } + gpr_mu_unlock(&g_log.lock); + return space; +} + +int64_t census_log_out_of_space_count(void) { + GPR_ASSERT(g_log.initialized); + return gpr_atm_acq_load(&g_log.out_of_space_count); +} diff --git a/src/core/census/mlog.h b/src/core/census/mlog.h new file mode 100644 index 0000000000..aaba9e1535 --- /dev/null +++ b/src/core/census/mlog.h @@ -0,0 +1,95 @@ +/* + * + * Copyright 2015-2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* A very fast in-memory log, optimized for multiple writers. */ + +#ifndef GRPC_INTERNAL_CORE_CENSUS_MLOG_H +#define GRPC_INTERNAL_CORE_CENSUS_MLOG_H + +#include +#include + +/* Maximum record size, in bytes. */ +#define CENSUS_LOG_2_MAX_RECORD_SIZE 14 /* 2^14 = 16KB */ +#define CENSUS_LOG_MAX_RECORD_SIZE (1 << CENSUS_LOG_2_MAX_RECORD_SIZE) + +/* Initialize the statistics logging subsystem with the given log size. A log + size of 0 will result in the smallest possible log for the platform + (approximately CENSUS_LOG_MAX_RECORD_SIZE * gpr_cpu_num_cores()). If + discard_old_records is non-zero, then new records will displace older ones + when the log is full. This function must be called before any other + census_log functions. +*/ +void census_log_initialize(size_t size_in_mb, int discard_old_records); + +/* Shutdown the logging subsystem. Caller must ensure that: + - no in progress or future call to any census_log functions + - no incomplete records +*/ +void census_log_shutdown(void); + +/* Allocates and returns a 'size' bytes record and marks it in use. A + subsequent census_log_end_write() marks the record complete. The + 'bytes_written' census_log_end_write() argument must be <= + 'size'. Returns NULL if out-of-space AND: + - log is configured to keep old records OR + - all blocks are pinned by incomplete records. +*/ +void* census_log_start_write(size_t size); + +void census_log_end_write(void* record, size_t bytes_written); + +void census_log_init_reader(void); + +/* census_log_read_next() iterates over blocks with data and for each block + returns a pointer to the first unread byte. The number of bytes that can be + read are returned in 'bytes_available'. Reader is expected to read all + available data. Reading the data consumes it i.e. it cannot be read again. + census_log_read_next() returns NULL if the end is reached i.e last block + is read. census_log_init_reader() starts the iteration or aborts the + current iteration. +*/ +const void* census_log_read_next(size_t* bytes_available); + +/* Returns estimated remaining space across all blocks, in bytes. If log is + configured to discard old records, returns total log space. Otherwise, + returns space available in empty blocks (partially filled blocks are + treated as full). +*/ +size_t census_log_remaining_space(void); + +/* Returns the number of times gprc_stats_log_start_write() failed due to + out-of-space. */ +int64_t census_log_out_of_space_count(void); + +#endif /* GRPC_INTERNAL_CORE_CENSUS_LOG_H */ diff --git a/src/python/grpcio/grpc_core_dependencies.py b/src/python/grpcio/grpc_core_dependencies.py index f43ac5c0a5..a002a5a0a8 100644 --- a/src/python/grpcio/grpc_core_dependencies.py +++ b/src/python/grpcio/grpc_core_dependencies.py @@ -224,7 +224,7 @@ CORE_SOURCE_FILES = [ 'src/core/tsi/transport_security.c', 'src/core/census/context.c', 'src/core/census/initialize.c', - 'src/core/census/log.c', + 'src/core/census/mlog.c', 'src/core/census/operation.c', 'src/core/census/placeholders.c', 'src/core/census/tracing.c', diff --git a/test/core/census/log_test.c b/test/core/census/log_test.c deleted file mode 100644 index b68ca11504..0000000000 --- a/test/core/census/log_test.c +++ /dev/null @@ -1,589 +0,0 @@ -/* - * - * Copyright 2015-2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#include "src/core/census/log.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "test/core/util/test_config.h" - -// Change this to non-zero if you want more output. -#define VERBOSE 0 - -// Log size to use for all tests. -#define LOG_SIZE_IN_MB 1 -#define LOG_SIZE_IN_BYTES (LOG_SIZE_IN_MB << 20) - -// Fills in 'record' of size 'size'. Each byte in record is filled in with the -// same value. The value is extracted from 'record' pointer. -static void write_record(char* record, size_t size) { - char data = (char)((uintptr_t)record % 255); - memset(record, data, size); -} - -// Reads fixed size records. Returns the number of records read in -// 'num_records'. -static void read_records(size_t record_size, const char* buffer, - size_t buffer_size, int* num_records) { - GPR_ASSERT(buffer_size >= record_size); - GPR_ASSERT(buffer_size % record_size == 0); - *num_records = (int)(buffer_size / record_size); - for (int i = 0; i < *num_records; ++i) { - const char* record = buffer + (record_size * (size_t)i); - char data = (char)((uintptr_t)record % 255); - for (size_t j = 0; j < record_size; ++j) { - GPR_ASSERT(data == record[j]); - } - } -} - -// Tries to write the specified number of records. Stops when the log gets -// full. Returns the number of records written. Spins for random -// number of times, up to 'max_spin_count', between writes. -static int write_records_to_log(int writer_id, size_t record_size, - int num_records, int max_spin_count) { - int counter = 0; - for (int i = 0; i < num_records; ++i) { - int spin_count = max_spin_count ? rand() % max_spin_count : 0; - if (VERBOSE && (counter++ == num_records / 10)) { - printf(" Writer %d: %d out of %d written\n", writer_id, i, num_records); - counter = 0; - } - char* record = (char*)(census_log_start_write(record_size)); - if (record == NULL) { - return i; - } - write_record(record, record_size); - census_log_end_write(record, record_size); - for (int j = 0; j < spin_count; ++j) { - GPR_ASSERT(j >= 0); - } - } - return num_records; -} - -// Performs a single read iteration. Returns the number of records read. -static int perform_read_iteration(size_t record_size) { - const void* read_buffer = NULL; - size_t bytes_available; - int records_read = 0; - census_log_init_reader(); - while ((read_buffer = census_log_read_next(&bytes_available))) { - int num_records = 0; - read_records(record_size, (const char*)read_buffer, bytes_available, - &num_records); - records_read += num_records; - } - return records_read; -} - -// Asserts that the log is empty. -static void assert_log_empty(void) { - census_log_init_reader(); - size_t bytes_available; - GPR_ASSERT(census_log_read_next(&bytes_available) == NULL); -} - -// Fills the log and verifies data. If 'no fragmentation' is true, records -// are sized such that CENSUS_LOG_2_MAX_RECORD_SIZE is a multiple of record -// size. If not a circular log, verifies that the number of records written -// match the number of records read. -static void fill_log(size_t log_size, int no_fragmentation, int circular_log) { - size_t size; - if (no_fragmentation) { - int log2size = rand() % (CENSUS_LOG_2_MAX_RECORD_SIZE + 1); - size = ((size_t)1 << log2size); - } else { - while (1) { - size = 1 + ((size_t)rand() % CENSUS_LOG_MAX_RECORD_SIZE); - if (CENSUS_LOG_MAX_RECORD_SIZE % size) { - break; - } - } - } - int records_written = - write_records_to_log(0 /* writer id */, size, - (int)((log_size / size) * 2), 0 /* spin count */); - int records_read = perform_read_iteration(size); - if (!circular_log) { - GPR_ASSERT(records_written == records_read); - } - assert_log_empty(); -} - -// Structure to pass args to writer_thread -typedef struct writer_thread_args { - // Index of this thread in the writers vector. - int index; - // Record size. - size_t record_size; - // Number of records to write. - int num_records; - // Used to signal when writer is complete - gpr_cv* done; - gpr_mu* mu; - int* count; -} writer_thread_args; - -// Writes the given number of records of random size (up to kMaxRecordSize) and -// random data to the specified log. -static void writer_thread(void* arg) { - writer_thread_args* args = (writer_thread_args*)arg; - // Maximum number of times to spin between writes. - static const int MAX_SPIN_COUNT = 50; - int records_written = 0; - if (VERBOSE) { - printf(" Writer %d starting\n", args->index); - } - while (records_written < args->num_records) { - records_written += write_records_to_log(args->index, args->record_size, - args->num_records - records_written, - MAX_SPIN_COUNT); - if (records_written < args->num_records) { - // Ran out of log space. Sleep for a bit and let the reader catch up. - // This should never happen for circular logs. - if (VERBOSE) { - printf( - " Writer %d stalled due to out-of-space: %d out of %d " - "written\n", - args->index, records_written, args->num_records); - } - gpr_sleep_until(GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10)); - } - } - // Done. Decrement count and signal. - gpr_mu_lock(args->mu); - (*args->count)--; - gpr_cv_signal(args->done); - if (VERBOSE) { - printf(" Writer %d done\n", args->index); - } - gpr_mu_unlock(args->mu); -} - -// struct to pass args to reader_thread -typedef struct reader_thread_args { - // Record size. - size_t record_size; - // Interval between read iterations. - int read_iteration_interval_in_msec; - // Total number of records. - int total_records; - // Signalled when reader should stop. - gpr_cv stop; - int stop_flag; - // Used to signal when reader has finished - gpr_cv* done; - gpr_mu* mu; - int running; -} reader_thread_args; - -// Reads and verifies the specified number of records. Reader can also be -// stopped via gpr_cv_signal(&args->stop). Sleeps for 'read_interval_in_msec' -// between read iterations. -static void reader_thread(void* arg) { - reader_thread_args* args = (reader_thread_args*)arg; - if (VERBOSE) { - printf(" Reader starting\n"); - } - gpr_timespec interval = gpr_time_from_micros( - args->read_iteration_interval_in_msec * 1000, GPR_TIMESPAN); - gpr_mu_lock(args->mu); - int records_read = 0; - int num_iterations = 0; - int counter = 0; - while (!args->stop_flag && records_read < args->total_records) { - gpr_cv_wait(&args->stop, args->mu, interval); - if (!args->stop_flag) { - records_read += perform_read_iteration(args->record_size); - GPR_ASSERT(records_read <= args->total_records); - if (VERBOSE && (counter++ == 100000)) { - printf(" Reader: %d out of %d read\n", records_read, - args->total_records); - counter = 0; - } - ++num_iterations; - } - } - // Done - args->running = 0; - gpr_cv_signal(args->done); - if (VERBOSE) { - printf(" Reader: records: %d, iterations: %d\n", records_read, - num_iterations); - } - gpr_mu_unlock(args->mu); -} - -// Creates NUM_WRITERS writers where each writer writes NUM_RECORDS_PER_WRITER -// records. Also, starts a reader that iterates over and reads blocks every -// READ_ITERATION_INTERVAL_IN_MSEC. -// Number of writers. -#define NUM_WRITERS 5 -static void multiple_writers_single_reader(int circular_log) { - // Sleep interval between read iterations. - static const int READ_ITERATION_INTERVAL_IN_MSEC = 10; - // Maximum record size. - static const size_t MAX_RECORD_SIZE = 20; - // Number of records written by each writer. This is sized such that we - // will write through the entire log ~10 times. - const int NUM_RECORDS_PER_WRITER = - (int)((10 * census_log_remaining_space()) / (MAX_RECORD_SIZE / 2)) / - NUM_WRITERS; - size_t record_size = ((size_t)rand() % MAX_RECORD_SIZE) + 1; - // Create and start writers. - writer_thread_args writers[NUM_WRITERS]; - int writers_count = NUM_WRITERS; - gpr_cv writers_done; - gpr_mu writers_mu; // protects writers_done and writers_count - gpr_cv_init(&writers_done); - gpr_mu_init(&writers_mu); - gpr_thd_id id; - for (int i = 0; i < NUM_WRITERS; ++i) { - writers[i].index = i; - writers[i].record_size = record_size; - writers[i].num_records = NUM_RECORDS_PER_WRITER; - writers[i].done = &writers_done; - writers[i].count = &writers_count; - writers[i].mu = &writers_mu; - gpr_thd_new(&id, &writer_thread, &writers[i], NULL); - } - // Start reader. - gpr_cv reader_done; - gpr_mu reader_mu; // protects reader_done and reader.running - reader_thread_args reader; - reader.record_size = record_size; - reader.read_iteration_interval_in_msec = READ_ITERATION_INTERVAL_IN_MSEC; - reader.total_records = NUM_WRITERS * NUM_RECORDS_PER_WRITER; - reader.stop_flag = 0; - gpr_cv_init(&reader.stop); - gpr_cv_init(&reader_done); - reader.done = &reader_done; - gpr_mu_init(&reader_mu); - reader.mu = &reader_mu; - reader.running = 1; - gpr_thd_new(&id, &reader_thread, &reader, NULL); - // Wait for writers to finish. - gpr_mu_lock(&writers_mu); - while (writers_count != 0) { - gpr_cv_wait(&writers_done, &writers_mu, gpr_inf_future(GPR_CLOCK_REALTIME)); - } - gpr_mu_unlock(&writers_mu); - gpr_mu_destroy(&writers_mu); - gpr_cv_destroy(&writers_done); - gpr_mu_lock(&reader_mu); - if (circular_log) { - // Stop reader. - reader.stop_flag = 1; - gpr_cv_signal(&reader.stop); - } - // wait for reader to finish - while (reader.running) { - gpr_cv_wait(&reader_done, &reader_mu, gpr_inf_future(GPR_CLOCK_REALTIME)); - } - if (circular_log) { - // Assert that there were no out-of-space errors. - GPR_ASSERT(0 == census_log_out_of_space_count()); - } - gpr_mu_unlock(&reader_mu); - gpr_mu_destroy(&reader_mu); - gpr_cv_destroy(&reader_done); - if (VERBOSE) { - printf(" Reader: finished\n"); - } -} - -static void setup_test(int circular_log) { - census_log_initialize(LOG_SIZE_IN_MB, circular_log); - GPR_ASSERT(census_log_remaining_space() == LOG_SIZE_IN_BYTES); -} - -// Attempts to create a record of invalid size (size > -// CENSUS_LOG_MAX_RECORD_SIZE). -void test_invalid_record_size(void) { - static const size_t INVALID_SIZE = CENSUS_LOG_MAX_RECORD_SIZE + 1; - static const size_t VALID_SIZE = 1; - printf("Starting test: invalid record size\n"); - setup_test(0); - void* record = census_log_start_write(INVALID_SIZE); - GPR_ASSERT(record == NULL); - // Now try writing a valid record. - record = census_log_start_write(VALID_SIZE); - GPR_ASSERT(record != NULL); - census_log_end_write(record, VALID_SIZE); - // Verifies that available space went down by one block. In theory, this - // check can fail if the thread is context switched to a new CPU during the - // start_write execution (multiple blocks get allocated), but this has not - // been observed in practice. - GPR_ASSERT(LOG_SIZE_IN_BYTES - CENSUS_LOG_MAX_RECORD_SIZE == - census_log_remaining_space()); - census_log_shutdown(); -} - -// Tests end_write() with a different size than what was specified in -// start_write(). -void test_end_write_with_different_size(void) { - static const size_t START_WRITE_SIZE = 10; - static const size_t END_WRITE_SIZE = 7; - printf("Starting test: end write with different size\n"); - setup_test(0); - void* record_written = census_log_start_write(START_WRITE_SIZE); - GPR_ASSERT(record_written != NULL); - census_log_end_write(record_written, END_WRITE_SIZE); - census_log_init_reader(); - size_t bytes_available; - const void* record_read = census_log_read_next(&bytes_available); - GPR_ASSERT(record_written == record_read); - GPR_ASSERT(END_WRITE_SIZE == bytes_available); - assert_log_empty(); - census_log_shutdown(); -} - -// Verifies that pending records are not available via read_next(). -void test_read_pending_record(void) { - static const size_t PR_RECORD_SIZE = 1024; - printf("Starting test: read pending record\n"); - setup_test(0); - // Start a write. - void* record_written = census_log_start_write(PR_RECORD_SIZE); - GPR_ASSERT(record_written != NULL); - // As write is pending, read should fail. - census_log_init_reader(); - size_t bytes_available; - const void* record_read = census_log_read_next(&bytes_available); - GPR_ASSERT(record_read == NULL); - // A read followed by end_write() should succeed. - census_log_end_write(record_written, PR_RECORD_SIZE); - census_log_init_reader(); - record_read = census_log_read_next(&bytes_available); - GPR_ASSERT(record_written == record_read); - GPR_ASSERT(PR_RECORD_SIZE == bytes_available); - assert_log_empty(); - census_log_shutdown(); -} - -// Tries reading beyond pending write. -void test_read_beyond_pending_record(void) { - printf("Starting test: read beyond pending record\n"); - setup_test(0); - // Start a write. - const size_t incomplete_record_size = 10; - void* incomplete_record = census_log_start_write(incomplete_record_size); - GPR_ASSERT(incomplete_record != NULL); - const size_t complete_record_size = 20; - void* complete_record = census_log_start_write(complete_record_size); - GPR_ASSERT(complete_record != NULL); - GPR_ASSERT(complete_record != incomplete_record); - census_log_end_write(complete_record, complete_record_size); - // Now iterate over blocks to read completed records. - census_log_init_reader(); - size_t bytes_available; - const void* record_read = census_log_read_next(&bytes_available); - GPR_ASSERT(complete_record == record_read); - GPR_ASSERT(complete_record_size == bytes_available); - // Complete first record. - census_log_end_write(incomplete_record, incomplete_record_size); - // Have read past the incomplete record, so read_next() should return NULL. - // NB: this test also assumes our thread did not get switched to a different - // CPU between the two start_write calls - record_read = census_log_read_next(&bytes_available); - GPR_ASSERT(record_read == NULL); - // Reset reader to get the newly completed record. - census_log_init_reader(); - record_read = census_log_read_next(&bytes_available); - GPR_ASSERT(incomplete_record == record_read); - GPR_ASSERT(incomplete_record_size == bytes_available); - assert_log_empty(); - census_log_shutdown(); -} - -// Tests scenario where block being read is detached from a core and put on the -// dirty list. -void test_detached_while_reading(void) { - printf("Starting test: detached while reading\n"); - setup_test(0); - // Start a write. - static const size_t DWR_RECORD_SIZE = 10; - void* record_written = census_log_start_write(DWR_RECORD_SIZE); - GPR_ASSERT(record_written != NULL); - census_log_end_write(record_written, DWR_RECORD_SIZE); - // Read this record. - census_log_init_reader(); - size_t bytes_available; - const void* record_read = census_log_read_next(&bytes_available); - GPR_ASSERT(record_read != NULL); - GPR_ASSERT(DWR_RECORD_SIZE == bytes_available); - // Now fill the log. This will move the block being read from core-local - // array to the dirty list. - while ((record_written = census_log_start_write(DWR_RECORD_SIZE))) { - census_log_end_write(record_written, DWR_RECORD_SIZE); - } - - // In this iteration, read_next() should only traverse blocks in the - // core-local array. Therefore, we expect at most gpr_cpu_num_cores() more - // blocks. As log is full, if read_next() is traversing the dirty list, we - // will get more than gpr_cpu_num_cores() blocks. - int block_read = 0; - while ((record_read = census_log_read_next(&bytes_available))) { - ++block_read; - GPR_ASSERT(block_read <= (int)gpr_cpu_num_cores()); - } - census_log_shutdown(); -} - -// Fills non-circular log with records sized such that size is a multiple of -// CENSUS_LOG_MAX_RECORD_SIZE (no per-block fragmentation). -void test_fill_log_no_fragmentation(void) { - printf("Starting test: fill log no fragmentation\n"); - const int circular = 0; - setup_test(circular); - fill_log(LOG_SIZE_IN_BYTES, 1 /* no fragmentation */, circular); - census_log_shutdown(); -} - -// Fills circular log with records sized such that size is a multiple of -// CENSUS_LOG_MAX_RECORD_SIZE (no per-block fragmentation). -void test_fill_circular_log_no_fragmentation(void) { - printf("Starting test: fill circular log no fragmentation\n"); - const int circular = 1; - setup_test(circular); - fill_log(LOG_SIZE_IN_BYTES, 1 /* no fragmentation */, circular); - census_log_shutdown(); -} - -// Fills non-circular log with records that may straddle end of a block. -void test_fill_log_with_straddling_records(void) { - printf("Starting test: fill log with straddling records\n"); - const int circular = 0; - setup_test(circular); - fill_log(LOG_SIZE_IN_BYTES, 0 /* block straddling records */, circular); - census_log_shutdown(); -} - -// Fills circular log with records that may straddle end of a block. -void test_fill_circular_log_with_straddling_records(void) { - printf("Starting test: fill circular log with straddling records\n"); - const int circular = 1; - setup_test(circular); - fill_log(LOG_SIZE_IN_BYTES, 0 /* block straddling records */, circular); - census_log_shutdown(); -} - -// Tests scenario where multiple writers and a single reader are using a log -// that is configured to discard old records. -void test_multiple_writers_circular_log(void) { - printf("Starting test: multiple writers circular log\n"); - const int circular = 1; - setup_test(circular); - multiple_writers_single_reader(circular); - census_log_shutdown(); -} - -// Tests scenario where multiple writers and a single reader are using a log -// that is configured to discard old records. -void test_multiple_writers(void) { - printf("Starting test: multiple writers\n"); - const int circular = 0; - setup_test(circular); - multiple_writers_single_reader(circular); - census_log_shutdown(); -} - -// Repeat the straddling records and multiple writers tests with a small log. -void test_small_log(void) { - printf("Starting test: small log\n"); - const int circular = 0; - census_log_initialize(0, circular); - size_t log_size = census_log_remaining_space(); - GPR_ASSERT(log_size > 0); - fill_log(log_size, 0, circular); - census_log_shutdown(); - census_log_initialize(0, circular); - multiple_writers_single_reader(circular); - census_log_shutdown(); -} - -void test_performance(void) { - for (size_t write_size = 1; write_size < CENSUS_LOG_MAX_RECORD_SIZE; - write_size *= 2) { - setup_test(0); - gpr_timespec start_time = gpr_now(GPR_CLOCK_REALTIME); - int nrecords = 0; - while (1) { - void* record = census_log_start_write(write_size); - if (record == NULL) { - break; - } - census_log_end_write(record, write_size); - nrecords++; - } - gpr_timespec write_time = - gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), start_time); - double write_time_micro = - (double)write_time.tv_sec * 1000000 + (double)write_time.tv_nsec / 1000; - census_log_shutdown(); - printf( - "Wrote %d %d byte records in %.3g microseconds: %g records/us " - "(%g ns/record), %g gigabytes/s\n", - nrecords, (int)write_size, write_time_micro, - nrecords / write_time_micro, 1000 * write_time_micro / nrecords, - (double)((int)write_size * nrecords) / write_time_micro / 1000); - } -} - -int main(int argc, char** argv) { - grpc_test_init(argc, argv); - gpr_time_init(); - srand((unsigned)gpr_now(GPR_CLOCK_REALTIME).tv_nsec); - test_invalid_record_size(); - test_end_write_with_different_size(); - test_read_pending_record(); - test_read_beyond_pending_record(); - test_detached_while_reading(); - test_fill_log_no_fragmentation(); - test_fill_circular_log_no_fragmentation(); - test_fill_log_with_straddling_records(); - test_fill_circular_log_with_straddling_records(); - test_small_log(); - test_multiple_writers(); - test_multiple_writers_circular_log(); - test_performance(); - return 0; -} diff --git a/test/core/census/mlog_test.c b/test/core/census/mlog_test.c new file mode 100644 index 0000000000..5b6c5946ab --- /dev/null +++ b/test/core/census/mlog_test.c @@ -0,0 +1,589 @@ +/* + * + * Copyright 2015-2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include "src/core/census/mlog.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "test/core/util/test_config.h" + +// Change this to non-zero if you want more output. +#define VERBOSE 0 + +// Log size to use for all tests. +#define LOG_SIZE_IN_MB 1 +#define LOG_SIZE_IN_BYTES (LOG_SIZE_IN_MB << 20) + +// Fills in 'record' of size 'size'. Each byte in record is filled in with the +// same value. The value is extracted from 'record' pointer. +static void write_record(char* record, size_t size) { + char data = (char)((uintptr_t)record % 255); + memset(record, data, size); +} + +// Reads fixed size records. Returns the number of records read in +// 'num_records'. +static void read_records(size_t record_size, const char* buffer, + size_t buffer_size, int* num_records) { + GPR_ASSERT(buffer_size >= record_size); + GPR_ASSERT(buffer_size % record_size == 0); + *num_records = (int)(buffer_size / record_size); + for (int i = 0; i < *num_records; ++i) { + const char* record = buffer + (record_size * (size_t)i); + char data = (char)((uintptr_t)record % 255); + for (size_t j = 0; j < record_size; ++j) { + GPR_ASSERT(data == record[j]); + } + } +} + +// Tries to write the specified number of records. Stops when the log gets +// full. Returns the number of records written. Spins for random +// number of times, up to 'max_spin_count', between writes. +static int write_records_to_log(int writer_id, size_t record_size, + int num_records, int max_spin_count) { + int counter = 0; + for (int i = 0; i < num_records; ++i) { + int spin_count = max_spin_count ? rand() % max_spin_count : 0; + if (VERBOSE && (counter++ == num_records / 10)) { + printf(" Writer %d: %d out of %d written\n", writer_id, i, num_records); + counter = 0; + } + char* record = (char*)(census_log_start_write(record_size)); + if (record == NULL) { + return i; + } + write_record(record, record_size); + census_log_end_write(record, record_size); + for (int j = 0; j < spin_count; ++j) { + GPR_ASSERT(j >= 0); + } + } + return num_records; +} + +// Performs a single read iteration. Returns the number of records read. +static int perform_read_iteration(size_t record_size) { + const void* read_buffer = NULL; + size_t bytes_available; + int records_read = 0; + census_log_init_reader(); + while ((read_buffer = census_log_read_next(&bytes_available))) { + int num_records = 0; + read_records(record_size, (const char*)read_buffer, bytes_available, + &num_records); + records_read += num_records; + } + return records_read; +} + +// Asserts that the log is empty. +static void assert_log_empty(void) { + census_log_init_reader(); + size_t bytes_available; + GPR_ASSERT(census_log_read_next(&bytes_available) == NULL); +} + +// Fills the log and verifies data. If 'no fragmentation' is true, records +// are sized such that CENSUS_LOG_2_MAX_RECORD_SIZE is a multiple of record +// size. If not a circular log, verifies that the number of records written +// match the number of records read. +static void fill_log(size_t log_size, int no_fragmentation, int circular_log) { + size_t size; + if (no_fragmentation) { + int log2size = rand() % (CENSUS_LOG_2_MAX_RECORD_SIZE + 1); + size = ((size_t)1 << log2size); + } else { + while (1) { + size = 1 + ((size_t)rand() % CENSUS_LOG_MAX_RECORD_SIZE); + if (CENSUS_LOG_MAX_RECORD_SIZE % size) { + break; + } + } + } + int records_written = + write_records_to_log(0 /* writer id */, size, + (int)((log_size / size) * 2), 0 /* spin count */); + int records_read = perform_read_iteration(size); + if (!circular_log) { + GPR_ASSERT(records_written == records_read); + } + assert_log_empty(); +} + +// Structure to pass args to writer_thread +typedef struct writer_thread_args { + // Index of this thread in the writers vector. + int index; + // Record size. + size_t record_size; + // Number of records to write. + int num_records; + // Used to signal when writer is complete + gpr_cv* done; + gpr_mu* mu; + int* count; +} writer_thread_args; + +// Writes the given number of records of random size (up to kMaxRecordSize) and +// random data to the specified log. +static void writer_thread(void* arg) { + writer_thread_args* args = (writer_thread_args*)arg; + // Maximum number of times to spin between writes. + static const int MAX_SPIN_COUNT = 50; + int records_written = 0; + if (VERBOSE) { + printf(" Writer %d starting\n", args->index); + } + while (records_written < args->num_records) { + records_written += write_records_to_log(args->index, args->record_size, + args->num_records - records_written, + MAX_SPIN_COUNT); + if (records_written < args->num_records) { + // Ran out of log space. Sleep for a bit and let the reader catch up. + // This should never happen for circular logs. + if (VERBOSE) { + printf( + " Writer %d stalled due to out-of-space: %d out of %d " + "written\n", + args->index, records_written, args->num_records); + } + gpr_sleep_until(GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10)); + } + } + // Done. Decrement count and signal. + gpr_mu_lock(args->mu); + (*args->count)--; + gpr_cv_signal(args->done); + if (VERBOSE) { + printf(" Writer %d done\n", args->index); + } + gpr_mu_unlock(args->mu); +} + +// struct to pass args to reader_thread +typedef struct reader_thread_args { + // Record size. + size_t record_size; + // Interval between read iterations. + int read_iteration_interval_in_msec; + // Total number of records. + int total_records; + // Signalled when reader should stop. + gpr_cv stop; + int stop_flag; + // Used to signal when reader has finished + gpr_cv* done; + gpr_mu* mu; + int running; +} reader_thread_args; + +// Reads and verifies the specified number of records. Reader can also be +// stopped via gpr_cv_signal(&args->stop). Sleeps for 'read_interval_in_msec' +// between read iterations. +static void reader_thread(void* arg) { + reader_thread_args* args = (reader_thread_args*)arg; + if (VERBOSE) { + printf(" Reader starting\n"); + } + gpr_timespec interval = gpr_time_from_micros( + args->read_iteration_interval_in_msec * 1000, GPR_TIMESPAN); + gpr_mu_lock(args->mu); + int records_read = 0; + int num_iterations = 0; + int counter = 0; + while (!args->stop_flag && records_read < args->total_records) { + gpr_cv_wait(&args->stop, args->mu, interval); + if (!args->stop_flag) { + records_read += perform_read_iteration(args->record_size); + GPR_ASSERT(records_read <= args->total_records); + if (VERBOSE && (counter++ == 100000)) { + printf(" Reader: %d out of %d read\n", records_read, + args->total_records); + counter = 0; + } + ++num_iterations; + } + } + // Done + args->running = 0; + gpr_cv_signal(args->done); + if (VERBOSE) { + printf(" Reader: records: %d, iterations: %d\n", records_read, + num_iterations); + } + gpr_mu_unlock(args->mu); +} + +// Creates NUM_WRITERS writers where each writer writes NUM_RECORDS_PER_WRITER +// records. Also, starts a reader that iterates over and reads blocks every +// READ_ITERATION_INTERVAL_IN_MSEC. +// Number of writers. +#define NUM_WRITERS 5 +static void multiple_writers_single_reader(int circular_log) { + // Sleep interval between read iterations. + static const int READ_ITERATION_INTERVAL_IN_MSEC = 10; + // Maximum record size. + static const size_t MAX_RECORD_SIZE = 20; + // Number of records written by each writer. This is sized such that we + // will write through the entire log ~10 times. + const int NUM_RECORDS_PER_WRITER = + (int)((10 * census_log_remaining_space()) / (MAX_RECORD_SIZE / 2)) / + NUM_WRITERS; + size_t record_size = ((size_t)rand() % MAX_RECORD_SIZE) + 1; + // Create and start writers. + writer_thread_args writers[NUM_WRITERS]; + int writers_count = NUM_WRITERS; + gpr_cv writers_done; + gpr_mu writers_mu; // protects writers_done and writers_count + gpr_cv_init(&writers_done); + gpr_mu_init(&writers_mu); + gpr_thd_id id; + for (int i = 0; i < NUM_WRITERS; ++i) { + writers[i].index = i; + writers[i].record_size = record_size; + writers[i].num_records = NUM_RECORDS_PER_WRITER; + writers[i].done = &writers_done; + writers[i].count = &writers_count; + writers[i].mu = &writers_mu; + gpr_thd_new(&id, &writer_thread, &writers[i], NULL); + } + // Start reader. + gpr_cv reader_done; + gpr_mu reader_mu; // protects reader_done and reader.running + reader_thread_args reader; + reader.record_size = record_size; + reader.read_iteration_interval_in_msec = READ_ITERATION_INTERVAL_IN_MSEC; + reader.total_records = NUM_WRITERS * NUM_RECORDS_PER_WRITER; + reader.stop_flag = 0; + gpr_cv_init(&reader.stop); + gpr_cv_init(&reader_done); + reader.done = &reader_done; + gpr_mu_init(&reader_mu); + reader.mu = &reader_mu; + reader.running = 1; + gpr_thd_new(&id, &reader_thread, &reader, NULL); + // Wait for writers to finish. + gpr_mu_lock(&writers_mu); + while (writers_count != 0) { + gpr_cv_wait(&writers_done, &writers_mu, gpr_inf_future(GPR_CLOCK_REALTIME)); + } + gpr_mu_unlock(&writers_mu); + gpr_mu_destroy(&writers_mu); + gpr_cv_destroy(&writers_done); + gpr_mu_lock(&reader_mu); + if (circular_log) { + // Stop reader. + reader.stop_flag = 1; + gpr_cv_signal(&reader.stop); + } + // wait for reader to finish + while (reader.running) { + gpr_cv_wait(&reader_done, &reader_mu, gpr_inf_future(GPR_CLOCK_REALTIME)); + } + if (circular_log) { + // Assert that there were no out-of-space errors. + GPR_ASSERT(0 == census_log_out_of_space_count()); + } + gpr_mu_unlock(&reader_mu); + gpr_mu_destroy(&reader_mu); + gpr_cv_destroy(&reader_done); + if (VERBOSE) { + printf(" Reader: finished\n"); + } +} + +static void setup_test(int circular_log) { + census_log_initialize(LOG_SIZE_IN_MB, circular_log); + GPR_ASSERT(census_log_remaining_space() == LOG_SIZE_IN_BYTES); +} + +// Attempts to create a record of invalid size (size > +// CENSUS_LOG_MAX_RECORD_SIZE). +void test_invalid_record_size(void) { + static const size_t INVALID_SIZE = CENSUS_LOG_MAX_RECORD_SIZE + 1; + static const size_t VALID_SIZE = 1; + printf("Starting test: invalid record size\n"); + setup_test(0); + void* record = census_log_start_write(INVALID_SIZE); + GPR_ASSERT(record == NULL); + // Now try writing a valid record. + record = census_log_start_write(VALID_SIZE); + GPR_ASSERT(record != NULL); + census_log_end_write(record, VALID_SIZE); + // Verifies that available space went down by one block. In theory, this + // check can fail if the thread is context switched to a new CPU during the + // start_write execution (multiple blocks get allocated), but this has not + // been observed in practice. + GPR_ASSERT(LOG_SIZE_IN_BYTES - CENSUS_LOG_MAX_RECORD_SIZE == + census_log_remaining_space()); + census_log_shutdown(); +} + +// Tests end_write() with a different size than what was specified in +// start_write(). +void test_end_write_with_different_size(void) { + static const size_t START_WRITE_SIZE = 10; + static const size_t END_WRITE_SIZE = 7; + printf("Starting test: end write with different size\n"); + setup_test(0); + void* record_written = census_log_start_write(START_WRITE_SIZE); + GPR_ASSERT(record_written != NULL); + census_log_end_write(record_written, END_WRITE_SIZE); + census_log_init_reader(); + size_t bytes_available; + const void* record_read = census_log_read_next(&bytes_available); + GPR_ASSERT(record_written == record_read); + GPR_ASSERT(END_WRITE_SIZE == bytes_available); + assert_log_empty(); + census_log_shutdown(); +} + +// Verifies that pending records are not available via read_next(). +void test_read_pending_record(void) { + static const size_t PR_RECORD_SIZE = 1024; + printf("Starting test: read pending record\n"); + setup_test(0); + // Start a write. + void* record_written = census_log_start_write(PR_RECORD_SIZE); + GPR_ASSERT(record_written != NULL); + // As write is pending, read should fail. + census_log_init_reader(); + size_t bytes_available; + const void* record_read = census_log_read_next(&bytes_available); + GPR_ASSERT(record_read == NULL); + // A read followed by end_write() should succeed. + census_log_end_write(record_written, PR_RECORD_SIZE); + census_log_init_reader(); + record_read = census_log_read_next(&bytes_available); + GPR_ASSERT(record_written == record_read); + GPR_ASSERT(PR_RECORD_SIZE == bytes_available); + assert_log_empty(); + census_log_shutdown(); +} + +// Tries reading beyond pending write. +void test_read_beyond_pending_record(void) { + printf("Starting test: read beyond pending record\n"); + setup_test(0); + // Start a write. + const size_t incomplete_record_size = 10; + void* incomplete_record = census_log_start_write(incomplete_record_size); + GPR_ASSERT(incomplete_record != NULL); + const size_t complete_record_size = 20; + void* complete_record = census_log_start_write(complete_record_size); + GPR_ASSERT(complete_record != NULL); + GPR_ASSERT(complete_record != incomplete_record); + census_log_end_write(complete_record, complete_record_size); + // Now iterate over blocks to read completed records. + census_log_init_reader(); + size_t bytes_available; + const void* record_read = census_log_read_next(&bytes_available); + GPR_ASSERT(complete_record == record_read); + GPR_ASSERT(complete_record_size == bytes_available); + // Complete first record. + census_log_end_write(incomplete_record, incomplete_record_size); + // Have read past the incomplete record, so read_next() should return NULL. + // NB: this test also assumes our thread did not get switched to a different + // CPU between the two start_write calls + record_read = census_log_read_next(&bytes_available); + GPR_ASSERT(record_read == NULL); + // Reset reader to get the newly completed record. + census_log_init_reader(); + record_read = census_log_read_next(&bytes_available); + GPR_ASSERT(incomplete_record == record_read); + GPR_ASSERT(incomplete_record_size == bytes_available); + assert_log_empty(); + census_log_shutdown(); +} + +// Tests scenario where block being read is detached from a core and put on the +// dirty list. +void test_detached_while_reading(void) { + printf("Starting test: detached while reading\n"); + setup_test(0); + // Start a write. + static const size_t DWR_RECORD_SIZE = 10; + void* record_written = census_log_start_write(DWR_RECORD_SIZE); + GPR_ASSERT(record_written != NULL); + census_log_end_write(record_written, DWR_RECORD_SIZE); + // Read this record. + census_log_init_reader(); + size_t bytes_available; + const void* record_read = census_log_read_next(&bytes_available); + GPR_ASSERT(record_read != NULL); + GPR_ASSERT(DWR_RECORD_SIZE == bytes_available); + // Now fill the log. This will move the block being read from core-local + // array to the dirty list. + while ((record_written = census_log_start_write(DWR_RECORD_SIZE))) { + census_log_end_write(record_written, DWR_RECORD_SIZE); + } + + // In this iteration, read_next() should only traverse blocks in the + // core-local array. Therefore, we expect at most gpr_cpu_num_cores() more + // blocks. As log is full, if read_next() is traversing the dirty list, we + // will get more than gpr_cpu_num_cores() blocks. + int block_read = 0; + while ((record_read = census_log_read_next(&bytes_available))) { + ++block_read; + GPR_ASSERT(block_read <= (int)gpr_cpu_num_cores()); + } + census_log_shutdown(); +} + +// Fills non-circular log with records sized such that size is a multiple of +// CENSUS_LOG_MAX_RECORD_SIZE (no per-block fragmentation). +void test_fill_log_no_fragmentation(void) { + printf("Starting test: fill log no fragmentation\n"); + const int circular = 0; + setup_test(circular); + fill_log(LOG_SIZE_IN_BYTES, 1 /* no fragmentation */, circular); + census_log_shutdown(); +} + +// Fills circular log with records sized such that size is a multiple of +// CENSUS_LOG_MAX_RECORD_SIZE (no per-block fragmentation). +void test_fill_circular_log_no_fragmentation(void) { + printf("Starting test: fill circular log no fragmentation\n"); + const int circular = 1; + setup_test(circular); + fill_log(LOG_SIZE_IN_BYTES, 1 /* no fragmentation */, circular); + census_log_shutdown(); +} + +// Fills non-circular log with records that may straddle end of a block. +void test_fill_log_with_straddling_records(void) { + printf("Starting test: fill log with straddling records\n"); + const int circular = 0; + setup_test(circular); + fill_log(LOG_SIZE_IN_BYTES, 0 /* block straddling records */, circular); + census_log_shutdown(); +} + +// Fills circular log with records that may straddle end of a block. +void test_fill_circular_log_with_straddling_records(void) { + printf("Starting test: fill circular log with straddling records\n"); + const int circular = 1; + setup_test(circular); + fill_log(LOG_SIZE_IN_BYTES, 0 /* block straddling records */, circular); + census_log_shutdown(); +} + +// Tests scenario where multiple writers and a single reader are using a log +// that is configured to discard old records. +void test_multiple_writers_circular_log(void) { + printf("Starting test: multiple writers circular log\n"); + const int circular = 1; + setup_test(circular); + multiple_writers_single_reader(circular); + census_log_shutdown(); +} + +// Tests scenario where multiple writers and a single reader are using a log +// that is configured to discard old records. +void test_multiple_writers(void) { + printf("Starting test: multiple writers\n"); + const int circular = 0; + setup_test(circular); + multiple_writers_single_reader(circular); + census_log_shutdown(); +} + +// Repeat the straddling records and multiple writers tests with a small log. +void test_small_log(void) { + printf("Starting test: small log\n"); + const int circular = 0; + census_log_initialize(0, circular); + size_t log_size = census_log_remaining_space(); + GPR_ASSERT(log_size > 0); + fill_log(log_size, 0, circular); + census_log_shutdown(); + census_log_initialize(0, circular); + multiple_writers_single_reader(circular); + census_log_shutdown(); +} + +void test_performance(void) { + for (size_t write_size = 1; write_size < CENSUS_LOG_MAX_RECORD_SIZE; + write_size *= 2) { + setup_test(0); + gpr_timespec start_time = gpr_now(GPR_CLOCK_REALTIME); + int nrecords = 0; + while (1) { + void* record = census_log_start_write(write_size); + if (record == NULL) { + break; + } + census_log_end_write(record, write_size); + nrecords++; + } + gpr_timespec write_time = + gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), start_time); + double write_time_micro = + (double)write_time.tv_sec * 1000000 + (double)write_time.tv_nsec / 1000; + census_log_shutdown(); + printf( + "Wrote %d %d byte records in %.3g microseconds: %g records/us " + "(%g ns/record), %g gigabytes/s\n", + nrecords, (int)write_size, write_time_micro, + nrecords / write_time_micro, 1000 * write_time_micro / nrecords, + (double)((int)write_size * nrecords) / write_time_micro / 1000); + } +} + +int main(int argc, char** argv) { + grpc_test_init(argc, argv); + gpr_time_init(); + srand((unsigned)gpr_now(GPR_CLOCK_REALTIME).tv_nsec); + test_invalid_record_size(); + test_end_write_with_different_size(); + test_read_pending_record(); + test_read_beyond_pending_record(); + test_detached_while_reading(); + test_fill_log_no_fragmentation(); + test_fill_circular_log_no_fragmentation(); + test_fill_log_with_straddling_records(); + test_fill_circular_log_with_straddling_records(); + test_small_log(); + test_multiple_writers(); + test_multiple_writers_circular_log(); + test_performance(); + return 0; +} diff --git a/tools/doxygen/Doxyfile.core.internal b/tools/doxygen/Doxyfile.core.internal index 84250e74c5..b48901e910 100644 --- a/tools/doxygen/Doxyfile.core.internal +++ b/tools/doxygen/Doxyfile.core.internal @@ -902,7 +902,7 @@ src/core/tsi/ssl_types.h \ src/core/tsi/transport_security.h \ src/core/tsi/transport_security_interface.h \ src/core/census/aggregation.h \ -src/core/census/log.h \ +src/core/census/mlog.h \ src/core/census/rpc_metric_id.h \ src/core/census/grpc_context.c \ src/core/census/grpc_filter.c \ @@ -1055,7 +1055,7 @@ src/core/tsi/ssl_transport_security.c \ src/core/tsi/transport_security.c \ src/core/census/context.c \ src/core/census/initialize.c \ -src/core/census/log.c \ +src/core/census/mlog.c \ src/core/census/operation.c \ src/core/census/placeholders.c \ src/core/census/tracing.c \ diff --git a/tools/run_tests/sources_and_headers.json b/tools/run_tests/sources_and_headers.json index 3cdd7b453c..b22a5a135f 100644 --- a/tools/run_tests/sources_and_headers.json +++ b/tools/run_tests/sources_and_headers.json @@ -92,7 +92,7 @@ "language": "c", "name": "census_log_test", "src": [ - "test/core/census/log_test.c" + "test/core/census/mlog_test.c" ] }, { @@ -2983,7 +2983,7 @@ "include/grpc/status.h", "src/core/census/aggregation.h", "src/core/census/grpc_filter.h", - "src/core/census/log.h", + "src/core/census/mlog.h", "src/core/census/rpc_metric_id.h", "src/core/channel/channel_args.h", "src/core/channel/channel_stack.h", @@ -3135,8 +3135,8 @@ "src/core/census/grpc_filter.c", "src/core/census/grpc_filter.h", "src/core/census/initialize.c", - "src/core/census/log.c", - "src/core/census/log.h", + "src/core/census/mlog.c", + "src/core/census/mlog.h", "src/core/census/operation.c", "src/core/census/placeholders.c", "src/core/census/rpc_metric_id.h", @@ -3524,7 +3524,7 @@ "include/grpc/status.h", "src/core/census/aggregation.h", "src/core/census/grpc_filter.h", - "src/core/census/log.h", + "src/core/census/mlog.h", "src/core/census/rpc_metric_id.h", "src/core/channel/channel_args.h", "src/core/channel/channel_stack.h", @@ -3661,8 +3661,8 @@ "src/core/census/grpc_filter.c", "src/core/census/grpc_filter.h", "src/core/census/initialize.c", - "src/core/census/log.c", - "src/core/census/log.h", + "src/core/census/mlog.c", + "src/core/census/mlog.h", "src/core/census/operation.c", "src/core/census/placeholders.c", "src/core/census/rpc_metric_id.h", diff --git a/vsprojects/vcxproj/grpc/grpc.vcxproj b/vsprojects/vcxproj/grpc/grpc.vcxproj index a426e41d0a..ebab196fcb 100644 --- a/vsprojects/vcxproj/grpc/grpc.vcxproj +++ b/vsprojects/vcxproj/grpc/grpc.vcxproj @@ -411,7 +411,7 @@ - + @@ -717,7 +717,7 @@ - + diff --git a/vsprojects/vcxproj/grpc/grpc.vcxproj.filters b/vsprojects/vcxproj/grpc/grpc.vcxproj.filters index 0b99cba077..68fc952fd6 100644 --- a/vsprojects/vcxproj/grpc/grpc.vcxproj.filters +++ b/vsprojects/vcxproj/grpc/grpc.vcxproj.filters @@ -454,7 +454,7 @@ src\core\census - + src\core\census @@ -896,7 +896,7 @@ src\core\census - + src\core\census diff --git a/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj b/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj index 2092397136..5daa7900f8 100644 --- a/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj +++ b/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj @@ -387,7 +387,7 @@ - + @@ -653,7 +653,7 @@ - + diff --git a/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj.filters b/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj.filters index f6e5275b79..68f136398b 100644 --- a/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj.filters +++ b/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj.filters @@ -394,7 +394,7 @@ src\core\census - + src\core\census @@ -791,7 +791,7 @@ src\core\census - + src\core\census diff --git a/vsprojects/vcxproj/test/census_log_test/census_log_test.vcxproj b/vsprojects/vcxproj/test/census_log_test/census_log_test.vcxproj index 851086d663..9f2480085b 100644 --- a/vsprojects/vcxproj/test/census_log_test/census_log_test.vcxproj +++ b/vsprojects/vcxproj/test/census_log_test/census_log_test.vcxproj @@ -158,7 +158,7 @@ - + diff --git a/vsprojects/vcxproj/test/census_log_test/census_log_test.vcxproj.filters b/vsprojects/vcxproj/test/census_log_test/census_log_test.vcxproj.filters index 135c77847f..8b70dcb2a4 100644 --- a/vsprojects/vcxproj/test/census_log_test/census_log_test.vcxproj.filters +++ b/vsprojects/vcxproj/test/census_log_test/census_log_test.vcxproj.filters @@ -1,7 +1,7 @@ - + test\core\census -- cgit v1.2.3