aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core
diff options
context:
space:
mode:
authorGravatar Craig Tiller <ctiller@google.com>2016-02-18 07:21:17 -0800
committerGravatar Craig Tiller <ctiller@google.com>2016-02-18 07:21:17 -0800
commit9dc184fd9531e5e584047c05c829897611af261d (patch)
tree3411a49d106f384dd703e34a35a0c3c0dfcff1e1 /src/core
parentccdea1900fdad3d507617c8b1b639c7f5914d06b (diff)
parentc15cd723ebabbab4825480032b56a2ddd9a8b76b (diff)
Merge github.com:grpc/grpc into cleaner-posix
Diffstat (limited to 'src/core')
-rw-r--r--src/core/census/log.c600
-rw-r--r--src/core/census/log.h93
-rw-r--r--src/core/channel/channel_args.c73
-rw-r--r--src/core/channel/channel_args.h8
-rw-r--r--src/core/client_config/connector.c5
-rw-r--r--src/core/client_config/connector.h4
-rw-r--r--src/core/client_config/subchannel.c48
-rw-r--r--src/core/client_config/subchannel.h21
-rw-r--r--src/core/client_config/subchannel_index.c259
-rw-r--r--src/core/client_config/subchannel_index.h77
-rw-r--r--src/core/iomgr/udp_server.c9
-rw-r--r--src/core/security/credentials.c11
-rw-r--r--src/core/security/security_connector.c9
-rw-r--r--src/core/security/security_context.c11
-rw-r--r--src/core/support/avl.c4
-rw-r--r--src/core/surface/alarm.c2
-rw-r--r--src/core/surface/channel_create.c2
-rw-r--r--src/core/surface/init.c4
-rw-r--r--src/core/surface/secure_channel_create.c2
19 files changed, 1202 insertions, 40 deletions
diff --git a/src/core/census/log.c b/src/core/census/log.c
new file mode 100644
index 0000000000..91b26941b8
--- /dev/null
+++ b/src/core/census/log.c
@@ -0,0 +1,600 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+// Implements an efficient in-memory log, optimized for multiple writers and
+// a single reader. Available log space is divided up in blocks of
+// CENSUS_LOG_2_MAX_RECORD_SIZE bytes. A block can be in one of the following
+// three data structures:
+// - Free blocks (free_block_list)
+// - Blocks with unread data (dirty_block_list)
+// - Blocks currently attached to cores (core_local_blocks[])
+//
+// census_log_start_write() moves a block from core_local_blocks[] to the end of
+// dirty_block_list when block:
+// - is out-of-space OR
+// - has an incomplete record (an incomplete record occurs when a thread calls
+// census_log_start_write() and is context-switched before calling
+// census_log_end_write()
+// So, blocks in dirty_block_list are ordered, from oldest to newest, by the
+// time when block is detached from the core.
+//
+// census_log_read_next() first iterates over dirty_block_list and then
+// core_local_blocks[]. It moves completely read blocks from dirty_block_list
+// to free_block_list. Blocks in core_local_blocks[] are not freed, even when
+// completely read.
+//
+// If the log is configured to discard old records and free_block_list is empty,
+// census_log_start_write() iterates over dirty_block_list to allocate a
+// new block. It moves the oldest available block (no pending read/write) to
+// core_local_blocks[].
+//
+// core_local_block_struct is used to implement a map from core id to the block
+// associated with that core. This mapping is advisory. It is possible that the
+// block returned by this mapping is no longer associated with that core. This
+// mapping is updated, lazily, by census_log_start_write().
+//
+// Locking in block struct:
+//
+// Exclusive g_log.lock must be held before calling any functions operating on
+// block structs except census_log_start_write() and census_log_end_write().
+//
+// Writes to a block are serialized via writer_lock. census_log_start_write()
+// acquires this lock and census_log_end_write() releases it. On failure to
+// acquire the lock, writer allocates a new block for the current core and
+// updates core_local_block accordingly.
+//
+// Simultaneous read and write access is allowed. Readers can safely read up to
+// committed bytes (bytes_committed).
+//
+// reader_lock protects the block, currently being read, from getting recycled.
+// start_read() acquires reader_lock and end_read() releases the lock.
+//
+// Read/write access to a block is disabled via try_disable_access(). It returns
+// with both writer_lock and reader_lock held. These locks are subsequently
+// released by enable_access() to enable access to the block.
+//
+// A note on naming: Most function/struct names are prepended by cl_
+// (shorthand for census_log). Further, functions that manipulate structures
+// include the name of the structure, which will be passed as the first
+// argument. E.g. cl_block_initialize() will initialize a cl_block.
+
+#include "src/core/census/log.h"
+#include <grpc/support/alloc.h>
+#include <grpc/support/atm.h>
+#include <grpc/support/cpu.h>
+#include <grpc/support/log.h>
+#include <grpc/support/sync.h>
+#include <grpc/support/useful.h>
+#include <stdbool.h>
+#include <string.h>
+
+// End of platform specific code
+
+typedef struct census_log_block_list_struct {
+ struct census_log_block_list_struct* next;
+ struct census_log_block_list_struct* prev;
+ struct census_log_block* block;
+} cl_block_list_struct;
+
+typedef struct census_log_block {
+ // Pointer to underlying buffer.
+ char* buffer;
+ gpr_atm writer_lock;
+ gpr_atm reader_lock;
+ // Keeps completely written bytes. Declared atomic because accessed
+ // simultaneously by reader and writer.
+ gpr_atm bytes_committed;
+ // Bytes already read.
+ size_t bytes_read;
+ // Links for list.
+ cl_block_list_struct link;
+// We want this structure to be cacheline aligned. We assume the following
+// sizes for the various parts on 32/64bit systems:
+// type 32b size 64b size
+// char* 4 8
+// 3x gpr_atm 12 24
+// size_t 4 8
+// cl_block_list_struct 12 24
+// TOTAL 32 64
+//
+// Depending on the size of our cacheline and the architecture, we
+// selectively add char buffering to this structure. The size is checked
+// via assert in census_log_initialize().
+#if defined(GPR_ARCH_64)
+#define CL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 64)
+#else
+#if defined(GPR_ARCH_32)
+#define CL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 32)
+#else
+#error "Unknown architecture"
+#endif
+#endif
+#if CL_BLOCK_PAD_SIZE > 0
+ char padding[CL_BLOCK_PAD_SIZE];
+#endif
+} cl_block;
+
+// A list of cl_blocks, doubly-linked through cl_block::link.
+typedef struct census_log_block_list {
+ int32_t count; // Number of items in list.
+ cl_block_list_struct ht; // head/tail of linked list.
+} cl_block_list;
+
+// Cacheline aligned block pointers to avoid false sharing. Block pointer must
+// be initialized via set_block(), before calling other functions
+typedef struct census_log_core_local_block {
+ gpr_atm block;
+// Ensure cachline alignment: we assume sizeof(gpr_atm) == 4 or 8
+#if defined(GPR_ARCH_64)
+#define CL_CORE_LOCAL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 8)
+#else
+#if defined(GPR_ARCH_32)
+#define CL_CORE_LOCAL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 4)
+#else
+#error "Unknown architecture"
+#endif
+#endif
+#if CL_CORE_LOCAL_BLOCK_PAD_SIZE > 0
+ char padding[CL_CORE_LOCAL_BLOCK_PAD_SIZE];
+#endif
+} cl_core_local_block;
+
+struct census_log {
+ int discard_old_records;
+ // Number of cores (aka hardware-contexts)
+ unsigned num_cores;
+ // number of CENSUS_LOG_2_MAX_RECORD_SIZE blocks in log
+ uint32_t num_blocks;
+ cl_block* blocks; // Block metadata.
+ cl_core_local_block* core_local_blocks; // Keeps core to block mappings.
+ gpr_mu lock;
+ int initialized; // has log been initialized?
+ // Keeps the state of the reader iterator. A value of 0 indicates that
+ // iterator has reached the end. census_log_init_reader() resets the value
+ // to num_core to restart iteration.
+ uint32_t read_iterator_state;
+ // Points to the block being read. If non-NULL, the block is locked for
+ // reading(block_being_read_->reader_lock is held).
+ cl_block* block_being_read;
+ char* buffer;
+ cl_block_list free_block_list;
+ cl_block_list dirty_block_list;
+ gpr_atm out_of_space_count;
+};
+
+// Single internal log.
+static struct census_log g_log;
+
+// Functions that operate on an atomic memory location used as a lock.
+
+// Returns non-zero if lock is acquired.
+static int cl_try_lock(gpr_atm* lock) { return gpr_atm_acq_cas(lock, 0, 1); }
+
+static void cl_unlock(gpr_atm* lock) { gpr_atm_rel_store(lock, 0); }
+
+// Functions that operate on cl_core_local_block's.
+
+static void cl_core_local_block_set_block(cl_core_local_block* clb,
+ cl_block* block) {
+ gpr_atm_rel_store(&clb->block, (gpr_atm)block);
+}
+
+static cl_block* cl_core_local_block_get_block(cl_core_local_block* clb) {
+ return (cl_block*)gpr_atm_acq_load(&clb->block);
+}
+
+// Functions that operate on cl_block_list_struct's.
+
+static void cl_block_list_struct_initialize(cl_block_list_struct* bls,
+ cl_block* block) {
+ bls->next = bls->prev = bls;
+ bls->block = block;
+}
+
+// Functions that operate on cl_block_list's.
+
+static void cl_block_list_initialize(cl_block_list* list) {
+ list->count = 0;
+ cl_block_list_struct_initialize(&list->ht, NULL);
+}
+
+// Returns head of *this, or NULL if empty.
+static cl_block* cl_block_list_head(cl_block_list* list) {
+ return list->ht.next->block;
+}
+
+// Insert element *e after *pos.
+static void cl_block_list_insert(cl_block_list* list, cl_block_list_struct* pos,
+ cl_block_list_struct* e) {
+ list->count++;
+ e->next = pos->next;
+ e->prev = pos;
+ e->next->prev = e;
+ e->prev->next = e;
+}
+
+// Insert block at the head of the list
+static void cl_block_list_insert_at_head(cl_block_list* list, cl_block* block) {
+ cl_block_list_insert(list, &list->ht, &block->link);
+}
+
+// Insert block at the tail of the list.
+static void cl_block_list_insert_at_tail(cl_block_list* list, cl_block* block) {
+ cl_block_list_insert(list, list->ht.prev, &block->link);
+}
+
+// Removes block *b. Requires *b be in the list.
+static void cl_block_list_remove(cl_block_list* list, cl_block* b) {
+ list->count--;
+ b->link.next->prev = b->link.prev;
+ b->link.prev->next = b->link.next;
+}
+
+// Functions that operate on cl_block's
+
+static void cl_block_initialize(cl_block* block, char* buffer) {
+ block->buffer = buffer;
+ gpr_atm_rel_store(&block->writer_lock, 0);
+ gpr_atm_rel_store(&block->reader_lock, 0);
+ gpr_atm_rel_store(&block->bytes_committed, 0);
+ block->bytes_read = 0;
+ cl_block_list_struct_initialize(&block->link, block);
+}
+
+// Guards against exposing partially written buffer to the reader.
+static void cl_block_set_bytes_committed(cl_block* block,
+ size_t bytes_committed) {
+ gpr_atm_rel_store(&block->bytes_committed, (gpr_atm)bytes_committed);
+}
+
+static size_t cl_block_get_bytes_committed(cl_block* block) {
+ return (size_t)gpr_atm_acq_load(&block->bytes_committed);
+}
+
+// Tries to disable future read/write access to this block. Succeeds if:
+// - no in-progress write AND
+// - no in-progress read AND
+// - 'discard_data' set to true OR no unread data
+// On success, clears the block state and returns with writer_lock_ and
+// reader_lock_ held. These locks are released by a subsequent
+// cl_block_access_enable() call.
+static bool cl_block_try_disable_access(cl_block* block, int discard_data) {
+ if (!cl_try_lock(&block->writer_lock)) {
+ return false;
+ }
+ if (!cl_try_lock(&block->reader_lock)) {
+ cl_unlock(&block->writer_lock);
+ return false;
+ }
+ if (!discard_data &&
+ (block->bytes_read != cl_block_get_bytes_committed(block))) {
+ cl_unlock(&block->reader_lock);
+ cl_unlock(&block->writer_lock);
+ return false;
+ }
+ cl_block_set_bytes_committed(block, 0);
+ block->bytes_read = 0;
+ return true;
+}
+
+static void cl_block_enable_access(cl_block* block) {
+ cl_unlock(&block->reader_lock);
+ cl_unlock(&block->writer_lock);
+}
+
+// Returns with writer_lock held.
+static void* cl_block_start_write(cl_block* block, size_t size) {
+ if (!cl_try_lock(&block->writer_lock)) {
+ return NULL;
+ }
+ size_t bytes_committed = cl_block_get_bytes_committed(block);
+ if (bytes_committed + size > CENSUS_LOG_MAX_RECORD_SIZE) {
+ cl_unlock(&block->writer_lock);
+ return NULL;
+ }
+ return block->buffer + bytes_committed;
+}
+
+// Releases writer_lock and increments committed bytes by 'bytes_written'.
+// 'bytes_written' must be <= 'size' specified in the corresponding
+// StartWrite() call. This function is thread-safe.
+static void cl_block_end_write(cl_block* block, size_t bytes_written) {
+ cl_block_set_bytes_committed(
+ block, cl_block_get_bytes_committed(block) + bytes_written);
+ cl_unlock(&block->writer_lock);
+}
+
+// Returns a pointer to the first unread byte in buffer. The number of bytes
+// available are returned in 'bytes_available'. Acquires reader lock that is
+// released by a subsequent cl_block_end_read() call. Returns NULL if:
+// - read in progress
+// - no data available
+static void* cl_block_start_read(cl_block* block, size_t* bytes_available) {
+ if (!cl_try_lock(&block->reader_lock)) {
+ return NULL;
+ }
+ // bytes_committed may change from under us. Use bytes_available to update
+ // bytes_read below.
+ size_t bytes_committed = cl_block_get_bytes_committed(block);
+ GPR_ASSERT(bytes_committed >= block->bytes_read);
+ *bytes_available = bytes_committed - block->bytes_read;
+ if (*bytes_available == 0) {
+ cl_unlock(&block->reader_lock);
+ return NULL;
+ }
+ void* record = block->buffer + block->bytes_read;
+ block->bytes_read += *bytes_available;
+ return record;
+}
+
+static void cl_block_end_read(cl_block* block) {
+ cl_unlock(&block->reader_lock);
+}
+
+// Internal functions operating on g_log
+
+// Allocates a new free block (or recycles an available dirty block if log is
+// configured to discard old records). Returns NULL if out-of-space.
+static cl_block* cl_allocate_block(void) {
+ cl_block* block = cl_block_list_head(&g_log.free_block_list);
+ if (block != NULL) {
+ cl_block_list_remove(&g_log.free_block_list, block);
+ return block;
+ }
+ if (!g_log.discard_old_records) {
+ // No free block and log is configured to keep old records.
+ return NULL;
+ }
+ // Recycle dirty block. Start from the oldest.
+ for (block = cl_block_list_head(&g_log.dirty_block_list); block != NULL;
+ block = block->link.next->block) {
+ if (cl_block_try_disable_access(block, 1 /* discard data */)) {
+ cl_block_list_remove(&g_log.dirty_block_list, block);
+ return block;
+ }
+ }
+ return NULL;
+}
+
+// Allocates a new block and updates core id => block mapping. 'old_block'
+// points to the block that the caller thinks is attached to
+// 'core_id'. 'old_block' may be NULL. Returns true if:
+// - allocated a new block OR
+// - 'core_id' => 'old_block' mapping changed (another thread allocated a
+// block before lock was acquired).
+static bool cl_allocate_core_local_block(uint32_t core_id,
+ cl_block* old_block) {
+ // Now that we have the lock, check if core-local mapping has changed.
+ cl_core_local_block* core_local_block = &g_log.core_local_blocks[core_id];
+ cl_block* block = cl_core_local_block_get_block(core_local_block);
+ if ((block != NULL) && (block != old_block)) {
+ return true;
+ }
+ if (block != NULL) {
+ cl_core_local_block_set_block(core_local_block, NULL);
+ cl_block_list_insert_at_tail(&g_log.dirty_block_list, block);
+ }
+ block = cl_allocate_block();
+ if (block == NULL) {
+ return false;
+ }
+ cl_core_local_block_set_block(core_local_block, block);
+ cl_block_enable_access(block);
+ return true;
+}
+
+static cl_block* cl_get_block(void* record) {
+ uintptr_t p = (uintptr_t)((char*)record - g_log.buffer);
+ uintptr_t index = p >> CENSUS_LOG_2_MAX_RECORD_SIZE;
+ return &g_log.blocks[index];
+}
+
+// Gets the next block to read and tries to free 'prev' block (if not NULL).
+// Returns NULL if reached the end.
+static cl_block* cl_next_block_to_read(cl_block* prev) {
+ cl_block* block = NULL;
+ if (g_log.read_iterator_state == g_log.num_cores) {
+ // We are traversing dirty list; find the next dirty block.
+ if (prev != NULL) {
+ // Try to free the previous block if there is no unread data. This
+ // block
+ // may have unread data if previously incomplete record completed
+ // between
+ // read_next() calls.
+ block = prev->link.next->block;
+ if (cl_block_try_disable_access(prev, 0 /* do not discard data */)) {
+ cl_block_list_remove(&g_log.dirty_block_list, prev);
+ cl_block_list_insert_at_head(&g_log.free_block_list, prev);
+ }
+ } else {
+ block = cl_block_list_head(&g_log.dirty_block_list);
+ }
+ if (block != NULL) {
+ return block;
+ }
+ // We are done with the dirty list; moving on to core-local blocks.
+ }
+ while (g_log.read_iterator_state > 0) {
+ g_log.read_iterator_state--;
+ block = cl_core_local_block_get_block(
+ &g_log.core_local_blocks[g_log.read_iterator_state]);
+ if (block != NULL) {
+ return block;
+ }
+ }
+ return NULL;
+}
+
+#define CL_LOG_2_MB 20 // 2^20 = 1MB
+
+// External functions: primary stats_log interface
+void census_log_initialize(size_t size_in_mb, int discard_old_records) {
+ // Check cacheline alignment.
+ GPR_ASSERT(sizeof(cl_block) % GPR_CACHELINE_SIZE == 0);
+ GPR_ASSERT(sizeof(cl_core_local_block) % GPR_CACHELINE_SIZE == 0);
+ GPR_ASSERT(!g_log.initialized);
+ g_log.discard_old_records = discard_old_records;
+ g_log.num_cores = gpr_cpu_num_cores();
+ // Ensure that we will not get any overflow in calaculating num_blocks
+ GPR_ASSERT(CL_LOG_2_MB >= CENSUS_LOG_2_MAX_RECORD_SIZE);
+ GPR_ASSERT(size_in_mb < 1000);
+ // Ensure at least 2x as many blocks as there are cores.
+ g_log.num_blocks =
+ (uint32_t)GPR_MAX(2 * g_log.num_cores, (size_in_mb << CL_LOG_2_MB) >>
+ CENSUS_LOG_2_MAX_RECORD_SIZE);
+ gpr_mu_init(&g_log.lock);
+ g_log.read_iterator_state = 0;
+ g_log.block_being_read = NULL;
+ g_log.core_local_blocks = (cl_core_local_block*)gpr_malloc_aligned(
+ g_log.num_cores * sizeof(cl_core_local_block), GPR_CACHELINE_SIZE_LOG);
+ memset(g_log.core_local_blocks, 0,
+ g_log.num_cores * sizeof(cl_core_local_block));
+ g_log.blocks = (cl_block*)gpr_malloc_aligned(
+ g_log.num_blocks * sizeof(cl_block), GPR_CACHELINE_SIZE_LOG);
+ memset(g_log.blocks, 0, g_log.num_blocks * sizeof(cl_block));
+ g_log.buffer = gpr_malloc(g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
+ memset(g_log.buffer, 0, g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
+ cl_block_list_initialize(&g_log.free_block_list);
+ cl_block_list_initialize(&g_log.dirty_block_list);
+ for (uint32_t i = 0; i < g_log.num_blocks; ++i) {
+ cl_block* block = g_log.blocks + i;
+ cl_block_initialize(block, g_log.buffer + (CENSUS_LOG_MAX_RECORD_SIZE * i));
+ cl_block_try_disable_access(block, 1 /* discard data */);
+ cl_block_list_insert_at_tail(&g_log.free_block_list, block);
+ }
+ gpr_atm_rel_store(&g_log.out_of_space_count, 0);
+ g_log.initialized = 1;
+}
+
+void census_log_shutdown(void) {
+ GPR_ASSERT(g_log.initialized);
+ gpr_mu_destroy(&g_log.lock);
+ gpr_free_aligned(g_log.core_local_blocks);
+ g_log.core_local_blocks = NULL;
+ gpr_free_aligned(g_log.blocks);
+ g_log.blocks = NULL;
+ gpr_free(g_log.buffer);
+ g_log.buffer = NULL;
+ g_log.initialized = 0;
+}
+
+void* census_log_start_write(size_t size) {
+ // Used to bound number of times block allocation is attempted.
+ GPR_ASSERT(size > 0);
+ GPR_ASSERT(g_log.initialized);
+ if (size > CENSUS_LOG_MAX_RECORD_SIZE) {
+ return NULL;
+ }
+ uint32_t attempts_remaining = g_log.num_blocks;
+ uint32_t core_id = gpr_cpu_current_cpu();
+ do {
+ void* record = NULL;
+ cl_block* block =
+ cl_core_local_block_get_block(&g_log.core_local_blocks[core_id]);
+ if (block && (record = cl_block_start_write(block, size))) {
+ return record;
+ }
+ // Need to allocate a new block. We are here if:
+ // - No block associated with the core OR
+ // - Write in-progress on the block OR
+ // - block is out of space
+ gpr_mu_lock(&g_log.lock);
+ bool allocated = cl_allocate_core_local_block(core_id, block);
+ gpr_mu_unlock(&g_log.lock);
+ if (!allocated) {
+ gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1);
+ return NULL;
+ }
+ } while (attempts_remaining--);
+ // Give up.
+ gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1);
+ return NULL;
+}
+
+void census_log_end_write(void* record, size_t bytes_written) {
+ GPR_ASSERT(g_log.initialized);
+ cl_block_end_write(cl_get_block(record), bytes_written);
+}
+
+void census_log_init_reader(void) {
+ GPR_ASSERT(g_log.initialized);
+ gpr_mu_lock(&g_log.lock);
+ // If a block is locked for reading unlock it.
+ if (g_log.block_being_read != NULL) {
+ cl_block_end_read(g_log.block_being_read);
+ g_log.block_being_read = NULL;
+ }
+ g_log.read_iterator_state = g_log.num_cores;
+ gpr_mu_unlock(&g_log.lock);
+}
+
+const void* census_log_read_next(size_t* bytes_available) {
+ GPR_ASSERT(g_log.initialized);
+ gpr_mu_lock(&g_log.lock);
+ if (g_log.block_being_read != NULL) {
+ cl_block_end_read(g_log.block_being_read);
+ }
+ do {
+ g_log.block_being_read = cl_next_block_to_read(g_log.block_being_read);
+ if (g_log.block_being_read != NULL) {
+ void* record =
+ cl_block_start_read(g_log.block_being_read, bytes_available);
+ if (record != NULL) {
+ gpr_mu_unlock(&g_log.lock);
+ return record;
+ }
+ }
+ } while (g_log.block_being_read != NULL);
+ gpr_mu_unlock(&g_log.lock);
+ return NULL;
+}
+
+size_t census_log_remaining_space(void) {
+ GPR_ASSERT(g_log.initialized);
+ size_t space = 0;
+ gpr_mu_lock(&g_log.lock);
+ if (g_log.discard_old_records) {
+ // Remaining space is not meaningful; just return the entire log space.
+ space = g_log.num_blocks << CENSUS_LOG_2_MAX_RECORD_SIZE;
+ } else {
+ GPR_ASSERT(g_log.free_block_list.count >= 0);
+ space = (size_t)g_log.free_block_list.count * CENSUS_LOG_MAX_RECORD_SIZE;
+ }
+ gpr_mu_unlock(&g_log.lock);
+ return space;
+}
+
+int64_t census_log_out_of_space_count(void) {
+ GPR_ASSERT(g_log.initialized);
+ return gpr_atm_acq_load(&g_log.out_of_space_count);
+}
diff --git a/src/core/census/log.h b/src/core/census/log.h
new file mode 100644
index 0000000000..05daea066f
--- /dev/null
+++ b/src/core/census/log.h
@@ -0,0 +1,93 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_INTERNAL_CORE_CENSUS_LOG_H
+#define GRPC_INTERNAL_CORE_CENSUS_LOG_H
+
+#include <grpc/support/port_platform.h>
+#include <stddef.h>
+
+/* Maximum record size, in bytes. */
+#define CENSUS_LOG_2_MAX_RECORD_SIZE 14 /* 2^14 = 16KB */
+#define CENSUS_LOG_MAX_RECORD_SIZE (1 << CENSUS_LOG_2_MAX_RECORD_SIZE)
+
+/* Initialize the statistics logging subsystem with the given log size. A log
+ size of 0 will result in the smallest possible log for the platform
+ (approximately CENSUS_LOG_MAX_RECORD_SIZE * gpr_cpu_num_cores()). If
+ discard_old_records is non-zero, then new records will displace older ones
+ when the log is full. This function must be called before any other
+ census_log functions.
+*/
+void census_log_initialize(size_t size_in_mb, int discard_old_records);
+
+/* Shutdown the logging subsystem. Caller must ensure that:
+ - no in progress or future call to any census_log functions
+ - no incomplete records
+*/
+void census_log_shutdown(void);
+
+/* Allocates and returns a 'size' bytes record and marks it in use. A
+ subsequent census_log_end_write() marks the record complete. The
+ 'bytes_written' census_log_end_write() argument must be <=
+ 'size'. Returns NULL if out-of-space AND:
+ - log is configured to keep old records OR
+ - all blocks are pinned by incomplete records.
+*/
+void* census_log_start_write(size_t size);
+
+void census_log_end_write(void* record, size_t bytes_written);
+
+void census_log_init_reader(void);
+
+/* census_log_read_next() iterates over blocks with data and for each block
+ returns a pointer to the first unread byte. The number of bytes that can be
+ read are returned in 'bytes_available'. Reader is expected to read all
+ available data. Reading the data consumes it i.e. it cannot be read again.
+ census_log_read_next() returns NULL if the end is reached i.e last block
+ is read. census_log_init_reader() starts the iteration or aborts the
+ current iteration.
+*/
+const void* census_log_read_next(size_t* bytes_available);
+
+/* Returns estimated remaining space across all blocks, in bytes. If log is
+ configured to discard old records, returns total log space. Otherwise,
+ returns space available in empty blocks (partially filled blocks are
+ treated as full).
+*/
+size_t census_log_remaining_space(void);
+
+/* Returns the number of times gprc_stats_log_start_write() failed due to
+ out-of-space. */
+int64_t census_log_out_of_space_count(void);
+
+#endif /* GRPC_INTERNAL_CORE_CENSUS_LOG_H */
diff --git a/src/core/channel/channel_args.c b/src/core/channel/channel_args.c
index 0427ce0b8d..bae7a90a01 100644
--- a/src/core/channel/channel_args.c
+++ b/src/core/channel/channel_args.c
@@ -37,6 +37,7 @@
#include <grpc/census.h>
#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
@@ -55,9 +56,8 @@ static grpc_arg copy_arg(const grpc_arg *src) {
break;
case GRPC_ARG_POINTER:
dst.value.pointer = src->value.pointer;
- dst.value.pointer.p = src->value.pointer.copy
- ? src->value.pointer.copy(src->value.pointer.p)
- : src->value.pointer.p;
+ dst.value.pointer.p =
+ src->value.pointer.vtable->copy(src->value.pointer.p);
break;
}
return dst;
@@ -94,6 +94,58 @@ grpc_channel_args *grpc_channel_args_merge(const grpc_channel_args *a,
return grpc_channel_args_copy_and_add(a, b->args, b->num_args);
}
+static int cmp_arg(const grpc_arg *a, const grpc_arg *b) {
+ int c = GPR_ICMP(a->type, b->type);
+ if (c != 0) return c;
+ c = strcmp(a->key, b->key);
+ if (c != 0) return c;
+ switch (a->type) {
+ case GRPC_ARG_STRING:
+ return strcmp(a->value.string, b->value.string);
+ case GRPC_ARG_INTEGER:
+ return GPR_ICMP(a->value.integer, b->value.integer);
+ case GRPC_ARG_POINTER:
+ c = GPR_ICMP(a->value.pointer.p, b->value.pointer.p);
+ if (c != 0) {
+ c = GPR_ICMP(a->value.pointer.vtable, b->value.pointer.vtable);
+ if (c == 0) {
+ c = a->value.pointer.vtable->cmp(a->value.pointer.p,
+ b->value.pointer.p);
+ }
+ }
+ return c;
+ }
+ GPR_UNREACHABLE_CODE(return 0);
+}
+
+/* stabilizing comparison function: since channel_args ordering matters for
+ * keys with the same name, we need to preserve that ordering */
+static int cmp_key_stable(const void *ap, const void *bp) {
+ const grpc_arg *const *a = ap;
+ const grpc_arg *const *b = bp;
+ int c = strcmp((*a)->key, (*b)->key);
+ if (c == 0) c = GPR_ICMP(*a, *b);
+ return c;
+}
+
+grpc_channel_args *grpc_channel_args_normalize(const grpc_channel_args *a) {
+ grpc_arg **args = gpr_malloc(sizeof(grpc_arg *) * a->num_args);
+ for (size_t i = 0; i < a->num_args; i++) {
+ args[i] = &a->args[i];
+ }
+ qsort(args, a->num_args, sizeof(grpc_arg *), cmp_key_stable);
+
+ grpc_channel_args *b = gpr_malloc(sizeof(grpc_channel_args));
+ b->num_args = a->num_args;
+ b->args = gpr_malloc(sizeof(grpc_arg) * b->num_args);
+ for (size_t i = 0; i < a->num_args; i++) {
+ b->args[i] = copy_arg(args[i]);
+ }
+
+ gpr_free(args);
+ return b;
+}
+
void grpc_channel_args_destroy(grpc_channel_args *a) {
size_t i;
for (i = 0; i < a->num_args; i++) {
@@ -104,9 +156,7 @@ void grpc_channel_args_destroy(grpc_channel_args *a) {
case GRPC_ARG_INTEGER:
break;
case GRPC_ARG_POINTER:
- if (a->args[i].value.pointer.destroy) {
- a->args[i].value.pointer.destroy(a->args[i].value.pointer.p);
- }
+ a->args[i].value.pointer.vtable->destroy(a->args[i].value.pointer.p);
break;
}
gpr_free(a->args[i].key);
@@ -208,3 +258,14 @@ int grpc_channel_args_compression_algorithm_get_states(
return (1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1; /* All algs. enabled */
}
}
+
+int grpc_channel_args_compare(const grpc_channel_args *a,
+ const grpc_channel_args *b) {
+ int c = GPR_ICMP(a->num_args, b->num_args);
+ if (c != 0) return c;
+ for (size_t i = 0; i < a->num_args; i++) {
+ c = cmp_arg(&a->args[i], &b->args[i]);
+ if (c != 0) return c;
+ }
+ return 0;
+}
diff --git a/src/core/channel/channel_args.h b/src/core/channel/channel_args.h
index 480cc9aec2..b3a7c9f434 100644
--- a/src/core/channel/channel_args.h
+++ b/src/core/channel/channel_args.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -40,6 +40,9 @@
/* Copy some arguments */
grpc_channel_args *grpc_channel_args_copy(const grpc_channel_args *src);
+/* Copy some arguments, stably sorting keys */
+grpc_channel_args *grpc_channel_args_normalize(const grpc_channel_args *a);
+
/** Copy some arguments and add the to_add parameter in the end.
If to_add is NULL, it is equivalent to call grpc_channel_args_copy. */
grpc_channel_args *grpc_channel_args_copy_and_add(const grpc_channel_args *src,
@@ -85,4 +88,7 @@ grpc_channel_args *grpc_channel_args_compression_algorithm_set_state(
int grpc_channel_args_compression_algorithm_get_states(
const grpc_channel_args *a);
+int grpc_channel_args_compare(const grpc_channel_args *a,
+ const grpc_channel_args *b);
+
#endif /* GRPC_INTERNAL_CORE_CHANNEL_CHANNEL_ARGS_H */
diff --git a/src/core/client_config/connector.c b/src/core/client_config/connector.c
index 1603ffb8be..aa34aa7fab 100644
--- a/src/core/client_config/connector.c
+++ b/src/core/client_config/connector.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -33,8 +33,9 @@
#include "src/core/client_config/connector.h"
-void grpc_connector_ref(grpc_connector* connector) {
+grpc_connector* grpc_connector_ref(grpc_connector* connector) {
connector->vtable->ref(connector);
+ return connector;
}
void grpc_connector_unref(grpc_exec_ctx* exec_ctx, grpc_connector* connector) {
diff --git a/src/core/client_config/connector.h b/src/core/client_config/connector.h
index b4482fa2ee..b91eb512c3 100644
--- a/src/core/client_config/connector.h
+++ b/src/core/client_config/connector.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -81,7 +81,7 @@ struct grpc_connector_vtable {
grpc_connect_out_args *out_args, grpc_closure *notify);
};
-void grpc_connector_ref(grpc_connector *connector);
+grpc_connector *grpc_connector_ref(grpc_connector *connector);
void grpc_connector_unref(grpc_exec_ctx *exec_ctx, grpc_connector *connector);
/** Connect using the connector: max one outstanding call at a time */
void grpc_connector_connect(grpc_exec_ctx *exec_ctx, grpc_connector *connector,
diff --git a/src/core/client_config/subchannel.c b/src/core/client_config/subchannel.c
index 0a996a1e8b..6599c75dba 100644
--- a/src/core/client_config/subchannel.c
+++ b/src/core/client_config/subchannel.c
@@ -36,16 +36,17 @@
#include <string.h>
#include <grpc/support/alloc.h>
+#include <grpc/support/avl.h>
#include "src/core/channel/channel_args.h"
#include "src/core/channel/client_channel.h"
#include "src/core/channel/connected_channel.h"
#include "src/core/client_config/initial_connect_string.h"
+#include "src/core/client_config/subchannel_index.h"
#include "src/core/iomgr/timer.h"
#include "src/core/profiling/timers.h"
#include "src/core/surface/channel.h"
#include "src/core/transport/connectivity_state.h"
-#include "src/core/transport/connectivity_state.h"
#define INTERNAL_REF_BITS 16
#define STRONG_REF_MASK (~(gpr_atm)((1 << INTERNAL_REF_BITS) - 1))
@@ -94,6 +95,8 @@ struct grpc_subchannel {
struct sockaddr *addr;
size_t addr_len;
+ grpc_subchannel_key *key;
+
/** initial string to send to peer */
gpr_slice initial_connect_string;
@@ -207,6 +210,7 @@ static void subchannel_destroy(grpc_exec_ctx *exec_ctx, void *arg,
grpc_connectivity_state_destroy(exec_ctx, &c->state_tracker);
grpc_connector_unref(exec_ctx, c->connector);
grpc_pollset_set_destroy(&c->pollset_set);
+ grpc_subchannel_key_destroy(exec_ctx, c->key);
gpr_free(c);
}
@@ -222,22 +226,42 @@ static gpr_atm ref_mutate(grpc_subchannel *c, gpr_atm delta,
return old_val;
}
-void grpc_subchannel_ref(grpc_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+grpc_subchannel *grpc_subchannel_ref(grpc_subchannel *c
+ GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
gpr_atm old_refs;
old_refs = ref_mutate(c, (1 << INTERNAL_REF_BITS),
0 REF_MUTATE_PURPOSE("STRONG_REF"));
GPR_ASSERT((old_refs & STRONG_REF_MASK) != 0);
+ return c;
}
-void grpc_subchannel_weak_ref(grpc_subchannel *c
- GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+grpc_subchannel *grpc_subchannel_weak_ref(grpc_subchannel *c
+ GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
gpr_atm old_refs;
old_refs = ref_mutate(c, 1, 0 REF_MUTATE_PURPOSE("WEAK_REF"));
GPR_ASSERT(old_refs != 0);
+ return c;
+}
+
+grpc_subchannel *grpc_subchannel_ref_from_weak_ref(
+ grpc_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+ if (!c) return NULL;
+ for (;;) {
+ gpr_atm old_refs = gpr_atm_acq_load(&c->ref_pair);
+ if (old_refs >= (1 << INTERNAL_REF_BITS)) {
+ gpr_atm new_refs = old_refs + (1 << INTERNAL_REF_BITS);
+ if (gpr_atm_rel_cas(&c->ref_pair, old_refs, new_refs)) {
+ return c;
+ }
+ } else {
+ return NULL;
+ }
+ }
}
static void disconnect(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
grpc_connected_subchannel *con;
+ grpc_subchannel_index_unregister(exec_ctx, c->key, c);
gpr_mu_lock(&c->mu);
GPR_ASSERT(!c->disconnected);
c->disconnected = 1;
@@ -276,10 +300,19 @@ static uint32_t random_seed() {
return (uint32_t)(gpr_time_to_millis(gpr_now(GPR_CLOCK_MONOTONIC)));
}
-grpc_subchannel *grpc_subchannel_create(grpc_connector *connector,
+grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
+ grpc_connector *connector,
grpc_subchannel_args *args) {
- grpc_subchannel *c = gpr_malloc(sizeof(*c));
+ grpc_subchannel_key *key = grpc_subchannel_key_create(connector, args);
+ grpc_subchannel *c = grpc_subchannel_index_find(exec_ctx, key);
+ if (c) {
+ grpc_subchannel_key_destroy(exec_ctx, key);
+ return c;
+ }
+
+ c = gpr_malloc(sizeof(*c));
memset(c, 0, sizeof(*c));
+ c->key = key;
gpr_atm_no_barrier_store(&c->ref_pair, 1 << INTERNAL_REF_BITS);
c->connector = connector;
grpc_connector_ref(c->connector);
@@ -305,7 +338,8 @@ grpc_subchannel *grpc_subchannel_create(grpc_connector *connector,
grpc_connectivity_state_init(&c->state_tracker, GRPC_CHANNEL_IDLE,
"subchannel");
gpr_mu_init(&c->mu);
- return c;
+
+ return grpc_subchannel_index_register(exec_ctx, key, c);
}
static void continue_connect(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
diff --git a/src/core/client_config/subchannel.h b/src/core/client_config/subchannel.h
index 57c7c9dc67..313e63c75c 100644
--- a/src/core/client_config/subchannel.h
+++ b/src/core/client_config/subchannel.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -48,6 +48,8 @@ typedef struct grpc_subchannel_args grpc_subchannel_args;
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
#define GRPC_SUBCHANNEL_REF(p, r) \
grpc_subchannel_ref((p), __FILE__, __LINE__, (r))
+#define GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(p, r) \
+ grpc_subchannel_ref_from_weak_ref((p), __FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_UNREF(cl, p, r) \
grpc_subchannel_unref((cl), (p), __FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_WEAK_REF(p, r) \
@@ -66,6 +68,8 @@ typedef struct grpc_subchannel_args grpc_subchannel_args;
, const char *file, int line, const char *reason
#else
#define GRPC_SUBCHANNEL_REF(p, r) grpc_subchannel_ref((p))
+#define GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(p, r) \
+ grpc_subchannel_ref_from_weak_ref((p))
#define GRPC_SUBCHANNEL_UNREF(cl, p, r) grpc_subchannel_unref((cl), (p))
#define GRPC_SUBCHANNEL_WEAK_REF(p, r) grpc_subchannel_weak_ref((p))
#define GRPC_SUBCHANNEL_WEAK_UNREF(cl, p, r) \
@@ -79,13 +83,15 @@ typedef struct grpc_subchannel_args grpc_subchannel_args;
#define GRPC_SUBCHANNEL_REF_EXTRA_ARGS
#endif
-void grpc_subchannel_ref(grpc_subchannel *channel
- GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+grpc_subchannel *grpc_subchannel_ref(grpc_subchannel *channel
+ GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+grpc_subchannel *grpc_subchannel_ref_from_weak_ref(
+ grpc_subchannel *channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_subchannel_unref(grpc_exec_ctx *exec_ctx,
grpc_subchannel *channel
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-void grpc_subchannel_weak_ref(grpc_subchannel *channel
- GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+grpc_subchannel *grpc_subchannel_weak_ref(grpc_subchannel *channel
+ GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_subchannel_weak_unref(grpc_exec_ctx *exec_ctx,
grpc_subchannel *channel
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
@@ -146,6 +152,8 @@ grpc_call_stack *grpc_subchannel_call_get_call_stack(
grpc_subchannel_call *subchannel_call);
struct grpc_subchannel_args {
+ /* When updating this struct, also update subchannel_index.c */
+
/** Channel filters for this channel - wrapped factories will likely
want to mutate this */
const grpc_channel_filter **filters;
@@ -159,7 +167,8 @@ struct grpc_subchannel_args {
};
/** create a subchannel given a connector */
-grpc_subchannel *grpc_subchannel_create(grpc_connector *connector,
+grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
+ grpc_connector *connector,
grpc_subchannel_args *args);
#endif /* GRPC_INTERNAL_CORE_CLIENT_CONFIG_SUBCHANNEL_H */
diff --git a/src/core/client_config/subchannel_index.c b/src/core/client_config/subchannel_index.c
new file mode 100644
index 0000000000..f78a7fd588
--- /dev/null
+++ b/src/core/client_config/subchannel_index.c
@@ -0,0 +1,259 @@
+//
+//
+// Copyright 2016, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+//
+
+#include "src/core/client_config/subchannel_index.h"
+
+#include <stdbool.h>
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/avl.h>
+#include <grpc/support/tls.h>
+
+#include "src/core/channel/channel_args.h"
+
+// a map of subchannel_key --> subchannel, used for detecting connections
+// to the same destination in order to share them
+static gpr_avl g_subchannel_index;
+
+static gpr_mu g_mu;
+
+struct grpc_subchannel_key {
+ grpc_connector *connector;
+ grpc_subchannel_args args;
+};
+
+GPR_TLS_DECL(subchannel_index_exec_ctx);
+
+static void enter_ctx(grpc_exec_ctx *exec_ctx) {
+ GPR_ASSERT(gpr_tls_get(&subchannel_index_exec_ctx) == 0);
+ gpr_tls_set(&subchannel_index_exec_ctx, (intptr_t)exec_ctx);
+}
+
+static void leave_ctx(grpc_exec_ctx *exec_ctx) {
+ GPR_ASSERT(gpr_tls_get(&subchannel_index_exec_ctx) == (intptr_t)exec_ctx);
+ gpr_tls_set(&subchannel_index_exec_ctx, 0);
+}
+
+static grpc_exec_ctx *current_ctx() {
+ grpc_exec_ctx *c = (grpc_exec_ctx *)gpr_tls_get(&subchannel_index_exec_ctx);
+ GPR_ASSERT(c != NULL);
+ return c;
+}
+
+static grpc_subchannel_key *create_key(
+ grpc_connector *connector, grpc_subchannel_args *args,
+ grpc_channel_args *(*copy_channel_args)(const grpc_channel_args *args)) {
+ grpc_subchannel_key *k = gpr_malloc(sizeof(*k));
+ k->connector = grpc_connector_ref(connector);
+ k->args.filter_count = args->filter_count;
+ k->args.filters = gpr_malloc(sizeof(*k->args.filters) * k->args.filter_count);
+ memcpy((grpc_channel_filter *)k->args.filters, args->filters,
+ sizeof(*k->args.filters) * k->args.filter_count);
+ k->args.addr_len = args->addr_len;
+ k->args.addr = gpr_malloc(args->addr_len);
+ memcpy(k->args.addr, args->addr, k->args.addr_len);
+ k->args.args = copy_channel_args(args->args);
+ return k;
+}
+
+grpc_subchannel_key *grpc_subchannel_key_create(grpc_connector *connector,
+ grpc_subchannel_args *args) {
+ return create_key(connector, args, grpc_channel_args_normalize);
+}
+
+static grpc_subchannel_key *subchannel_key_copy(grpc_subchannel_key *k) {
+ return create_key(k->connector, &k->args, grpc_channel_args_copy);
+}
+
+static int subchannel_key_compare(grpc_subchannel_key *a,
+ grpc_subchannel_key *b) {
+ int c = GPR_ICMP(a->connector, b->connector);
+ if (c != 0) return c;
+ c = GPR_ICMP(a->args.addr_len, b->args.addr_len);
+ if (c != 0) return c;
+ c = GPR_ICMP(a->args.filter_count, b->args.filter_count);
+ if (c != 0) return c;
+ c = memcmp(a->args.addr, b->args.addr, a->args.addr_len);
+ if (c != 0) return c;
+ c = memcmp(a->args.filters, b->args.filters,
+ a->args.filter_count * sizeof(*a->args.filters));
+ return grpc_channel_args_compare(a->args.args, b->args.args);
+}
+
+void grpc_subchannel_key_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_key *k) {
+ grpc_connector_unref(exec_ctx, k->connector);
+ gpr_free(k->args.addr);
+ gpr_free((grpc_channel_args *)k->args.filters);
+ grpc_channel_args_destroy((grpc_channel_args *)k->args.args);
+ gpr_free(k);
+}
+
+static void sck_avl_destroy(void *p) {
+ grpc_subchannel_key_destroy(current_ctx(), p);
+}
+
+static void *sck_avl_copy(void *p) { return subchannel_key_copy(p); }
+
+static long sck_avl_compare(void *a, void *b) {
+ return subchannel_key_compare(a, b);
+}
+
+static void scv_avl_destroy(void *p) {
+ GRPC_SUBCHANNEL_WEAK_UNREF(current_ctx(), p, "subchannel_index");
+}
+
+static void *scv_avl_copy(void *p) {
+ GRPC_SUBCHANNEL_WEAK_REF(p, "subchannel_index");
+ return p;
+}
+
+static const gpr_avl_vtable subchannel_avl_vtable = {
+ .destroy_key = sck_avl_destroy,
+ .copy_key = sck_avl_copy,
+ .compare_keys = sck_avl_compare,
+ .destroy_value = scv_avl_destroy,
+ .copy_value = scv_avl_copy};
+
+void grpc_subchannel_index_init(void) {
+ g_subchannel_index = gpr_avl_create(&subchannel_avl_vtable);
+ gpr_mu_init(&g_mu);
+}
+
+void grpc_subchannel_index_shutdown(void) {
+ gpr_mu_destroy(&g_mu);
+ gpr_avl_unref(g_subchannel_index);
+}
+
+grpc_subchannel *grpc_subchannel_index_find(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_key *key) {
+ enter_ctx(exec_ctx);
+
+ // Lock, and take a reference to the subchannel index.
+ // We don't need to do the search under a lock as avl's are immutable.
+ gpr_mu_lock(&g_mu);
+ gpr_avl index = gpr_avl_ref(g_subchannel_index);
+ gpr_mu_unlock(&g_mu);
+
+ grpc_subchannel *c =
+ GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(gpr_avl_get(index, key), "index_find");
+ gpr_avl_unref(index);
+
+ leave_ctx(exec_ctx);
+ return c;
+}
+
+grpc_subchannel *grpc_subchannel_index_register(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_key *key,
+ grpc_subchannel *constructed) {
+ enter_ctx(exec_ctx);
+
+ grpc_subchannel *c = NULL;
+
+ while (c == NULL) {
+ // Compare and swap loop:
+ // - take a reference to the current index
+ gpr_mu_lock(&g_mu);
+ gpr_avl index = gpr_avl_ref(g_subchannel_index);
+ gpr_mu_unlock(&g_mu);
+
+ // - Check to see if a subchannel already exists
+ c = gpr_avl_get(index, key);
+ if (c != NULL) {
+ // yes -> we're done
+ GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, constructed, "index_register");
+ } else {
+ // no -> update the avl and compare/swap
+ gpr_avl updated =
+ gpr_avl_add(gpr_avl_ref(index), subchannel_key_copy(key),
+ GRPC_SUBCHANNEL_WEAK_REF(constructed, "index_register"));
+
+ // it may happen (but it's expected to be unlikely)
+ // that some other thread has changed the index:
+ // compare/swap here to check that, and retry as necessary
+ gpr_mu_lock(&g_mu);
+ if (index.root == g_subchannel_index.root) {
+ GPR_SWAP(gpr_avl, updated, g_subchannel_index);
+ c = constructed;
+ }
+ gpr_mu_unlock(&g_mu);
+
+ gpr_avl_unref(updated);
+ }
+ gpr_avl_unref(index);
+ }
+
+ leave_ctx(exec_ctx);
+
+ return c;
+}
+
+void grpc_subchannel_index_unregister(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_key *key,
+ grpc_subchannel *constructed) {
+ enter_ctx(exec_ctx);
+
+ bool done = false;
+ while (!done) {
+ // Compare and swap loop:
+ // - take a reference to the current index
+ gpr_mu_lock(&g_mu);
+ gpr_avl index = gpr_avl_ref(g_subchannel_index);
+ gpr_mu_unlock(&g_mu);
+
+ // Check to see if this key still refers to the previously
+ // registered subchannel
+ grpc_subchannel *c = gpr_avl_get(index, key);
+ if (c != constructed) {
+ gpr_avl_unref(index);
+ break;
+ }
+
+ // compare and swap the update (some other thread may have
+ // mutated the index behind us)
+ gpr_avl updated = gpr_avl_remove(gpr_avl_ref(index), key);
+
+ gpr_mu_lock(&g_mu);
+ if (index.root == g_subchannel_index.root) {
+ GPR_SWAP(gpr_avl, updated, g_subchannel_index);
+ done = true;
+ }
+ gpr_mu_unlock(&g_mu);
+
+ gpr_avl_unref(updated);
+ gpr_avl_unref(index);
+ }
+
+ leave_ctx(exec_ctx);
+}
diff --git a/src/core/client_config/subchannel_index.h b/src/core/client_config/subchannel_index.h
new file mode 100644
index 0000000000..095ef17819
--- /dev/null
+++ b/src/core/client_config/subchannel_index.h
@@ -0,0 +1,77 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_INTERNAL_CORE_CLIENT_CONFIG_SUBCHANNEL_INDEX_H
+#define GRPC_INTERNAL_CORE_CLIENT_CONFIG_SUBCHANNEL_INDEX_H
+
+#include "src/core/client_config/connector.h"
+#include "src/core/client_config/subchannel.h"
+
+/** \file Provides an index of active subchannels so that they can be
+ shared amongst channels */
+
+typedef struct grpc_subchannel_key grpc_subchannel_key;
+
+/** Create a key that can be used to uniquely identify a subchannel */
+grpc_subchannel_key *grpc_subchannel_key_create(grpc_connector *con,
+ grpc_subchannel_args *args);
+
+/** Destroy a subchannel key */
+void grpc_subchannel_key_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_key *key);
+
+/** Given a subchannel key, find the subchannel registered for it.
+ Returns NULL if no such channel exists.
+ Thread-safe. */
+grpc_subchannel *grpc_subchannel_index_find(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_key *key);
+
+/** Register a subchannel against a key.
+ Takes ownership of \a constructed.
+ Returns the registered subchannel. This may be different from
+ \a constructed in the case of a registration race. */
+grpc_subchannel *grpc_subchannel_index_register(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_key *key,
+ grpc_subchannel *constructed);
+
+/** Remove \a constructed as the registered subchannel for \a key. */
+void grpc_subchannel_index_unregister(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_key *key,
+ grpc_subchannel *constructed);
+
+/** Initialize the subchannel index (global) */
+void grpc_subchannel_index_init(void);
+/** Shutdown the subchannel index (global) */
+void grpc_subchannel_index_shutdown(void);
+
+#endif /* GRPC_INTERNAL_CORE_CLIENT_CONFIG_SUBCHANNEL_INDEX_H */
diff --git a/src/core/iomgr/udp_server.c b/src/core/iomgr/udp_server.c
index fe006c603c..ef548cfe4d 100644
--- a/src/core/iomgr/udp_server.c
+++ b/src/core/iomgr/udp_server.c
@@ -137,7 +137,7 @@ grpc_udp_server *grpc_udp_server_create(void) {
}
static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
- grpc_exec_ctx_enqueue(exec_ctx, s->shutdown_complete, 1);
+ grpc_exec_ctx_enqueue(exec_ctx, s->shutdown_complete, 1, NULL);
gpr_mu_destroy(&s->mu);
gpr_cv_destroy(&s->cv);
@@ -146,7 +146,8 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
gpr_free(s);
}
-static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server, int success) {
+static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server,
+ bool success) {
grpc_udp_server *s = server;
gpr_mu_lock(&s->mu);
s->destroyed_ports++;
@@ -263,10 +264,10 @@ error:
}
/* event manager callback when reads are ready */
-static void on_read(grpc_exec_ctx *exec_ctx, void *arg, int success) {
+static void on_read(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
server_port *sp = arg;
- if (success == 0) {
+ if (!success) {
gpr_mu_lock(&sp->server->mu);
if (0 == --sp->server->active_ports) {
gpr_mu_unlock(&sp->server->mu);
diff --git a/src/core/security/credentials.c b/src/core/security/credentials.c
index afba0079f5..c58574bd6d 100644
--- a/src/core/security/credentials.c
+++ b/src/core/security/credentials.c
@@ -196,14 +196,21 @@ static void *server_credentials_pointer_arg_copy(void *p) {
return grpc_server_credentials_ref(p);
}
+static int server_credentials_pointer_cmp(void *a, void *b) {
+ return GPR_ICMP(a, b);
+}
+
+static const grpc_arg_pointer_vtable cred_ptr_vtable = {
+ server_credentials_pointer_arg_copy, server_credentials_pointer_arg_destroy,
+ server_credentials_pointer_cmp};
+
grpc_arg grpc_server_credentials_to_arg(grpc_server_credentials *p) {
grpc_arg arg;
memset(&arg, 0, sizeof(grpc_arg));
arg.type = GRPC_ARG_POINTER;
arg.key = GRPC_SERVER_CREDENTIALS_ARG;
arg.value.pointer.p = p;
- arg.value.pointer.copy = server_credentials_pointer_arg_copy;
- arg.value.pointer.destroy = server_credentials_pointer_arg_destroy;
+ arg.value.pointer.vtable = &cred_ptr_vtable;
return arg;
}
diff --git a/src/core/security/security_connector.c b/src/core/security/security_connector.c
index bdccbabfea..b46205323b 100644
--- a/src/core/security/security_connector.c
+++ b/src/core/security/security_connector.c
@@ -202,12 +202,17 @@ static void *connector_pointer_arg_copy(void *p) {
return GRPC_SECURITY_CONNECTOR_REF(p, "connector_pointer_arg");
}
+static int connector_pointer_cmp(void *a, void *b) { return GPR_ICMP(a, b); }
+
+static const grpc_arg_pointer_vtable connector_pointer_vtable = {
+ connector_pointer_arg_copy, connector_pointer_arg_destroy,
+ connector_pointer_cmp};
+
grpc_arg grpc_security_connector_to_arg(grpc_security_connector *sc) {
grpc_arg result;
result.type = GRPC_ARG_POINTER;
result.key = GRPC_SECURITY_CONNECTOR_ARG;
- result.value.pointer.destroy = connector_pointer_arg_destroy;
- result.value.pointer.copy = connector_pointer_arg_copy;
+ result.value.pointer.vtable = &connector_pointer_vtable;
result.value.pointer.p = sc;
return result;
}
diff --git a/src/core/security/security_context.c b/src/core/security/security_context.c
index 2068c97d78..a71b3bc915 100644
--- a/src/core/security/security_context.c
+++ b/src/core/security/security_context.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -309,14 +309,19 @@ static void *auth_context_pointer_arg_copy(void *p) {
return GRPC_AUTH_CONTEXT_REF(p, "auth_context_pointer_arg");
}
+static int auth_context_pointer_cmp(void *a, void *b) { return GPR_ICMP(a, b); }
+
+static const grpc_arg_pointer_vtable auth_context_pointer_vtable = {
+ auth_context_pointer_arg_copy, auth_context_pointer_arg_destroy,
+ auth_context_pointer_cmp};
+
grpc_arg grpc_auth_context_to_arg(grpc_auth_context *p) {
grpc_arg arg;
memset(&arg, 0, sizeof(grpc_arg));
arg.type = GRPC_ARG_POINTER;
arg.key = GRPC_AUTH_CONTEXT_ARG;
arg.value.pointer.p = p;
- arg.value.pointer.copy = auth_context_pointer_arg_copy;
- arg.value.pointer.destroy = auth_context_pointer_arg_destroy;
+ arg.value.pointer.vtable = &auth_context_pointer_vtable;
return arg;
}
diff --git a/src/core/support/avl.c b/src/core/support/avl.c
index 9734c9987f..f378b3ee17 100644
--- a/src/core/support/avl.c
+++ b/src/core/support/avl.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -167,7 +167,7 @@ static gpr_avl_node *rotate_right_left(const gpr_avl_vtable *vtable, void *key,
vtable->copy_key(right->left->key),
vtable->copy_value(right->left->value),
new_node(key, value, left, ref_node(right->left->left)),
- new_node(vtable->copy_key(right->key), vtable->copy_key(right->value),
+ new_node(vtable->copy_key(right->key), vtable->copy_value(right->value),
ref_node(right->left->right), ref_node(right->right)));
unref_node(vtable, right);
return n;
diff --git a/src/core/surface/alarm.c b/src/core/surface/alarm.c
index d753023ca9..fb496f6c47 100644
--- a/src/core/surface/alarm.c
+++ b/src/core/surface/alarm.c
@@ -63,9 +63,9 @@ grpc_alarm *grpc_alarm_create(grpc_completion_queue *cq, gpr_timespec deadline,
alarm->cq = cq;
alarm->tag = tag;
+ grpc_cq_begin_op(cq, tag);
grpc_timer_init(&exec_ctx, &alarm->alarm, deadline, alarm_cb, alarm,
gpr_now(GPR_CLOCK_MONOTONIC));
- grpc_cq_begin_op(cq, tag);
grpc_exec_ctx_finish(&exec_ctx);
return alarm;
}
diff --git a/src/core/surface/channel_create.c b/src/core/surface/channel_create.c
index 4d4337d288..fd7e20e9cc 100644
--- a/src/core/surface/channel_create.c
+++ b/src/core/surface/channel_create.c
@@ -172,7 +172,7 @@ static grpc_subchannel *subchannel_factory_create_subchannel(
c->base.vtable = &connector_vtable;
gpr_ref_init(&c->refs, 1);
args->args = final_args;
- s = grpc_subchannel_create(&c->base, args);
+ s = grpc_subchannel_create(exec_ctx, &c->base, args);
grpc_connector_unref(exec_ctx, &c->base);
grpc_channel_args_destroy(final_args);
return s;
diff --git a/src/core/surface/init.c b/src/core/surface/init.c
index e3ab70dba7..a4a53d3ec1 100644
--- a/src/core/surface/init.c
+++ b/src/core/surface/init.c
@@ -46,6 +46,8 @@
#include "src/core/client_config/resolver_registry.h"
#include "src/core/client_config/resolvers/dns_resolver.h"
#include "src/core/client_config/resolvers/sockaddr_resolver.h"
+#include "src/core/client_config/subchannel.h"
+#include "src/core/client_config/subchannel_index.h"
#include "src/core/debug/trace.h"
#include "src/core/iomgr/executor.h"
#include "src/core/iomgr/iomgr.h"
@@ -127,6 +129,7 @@ void grpc_init(void) {
}
gpr_timers_global_init();
grpc_cq_global_init();
+ grpc_subchannel_index_init();
for (i = 0; i < g_number_of_plugins; i++) {
if (g_all_of_the_plugins[i].init != NULL) {
g_all_of_the_plugins[i].init();
@@ -145,6 +148,7 @@ void grpc_shutdown(void) {
grpc_executor_shutdown();
grpc_cq_global_shutdown();
grpc_iomgr_shutdown();
+ grpc_subchannel_index_shutdown();
census_shutdown();
gpr_timers_global_destroy();
grpc_tracer_shutdown();
diff --git a/src/core/surface/secure_channel_create.c b/src/core/surface/secure_channel_create.c
index dd1441ad67..9c04426d87 100644
--- a/src/core/surface/secure_channel_create.c
+++ b/src/core/surface/secure_channel_create.c
@@ -238,7 +238,7 @@ static grpc_subchannel *subchannel_factory_create_subchannel(
gpr_mu_init(&c->mu);
gpr_ref_init(&c->refs, 1);
args->args = final_args;
- s = grpc_subchannel_create(&c->base, args);
+ s = grpc_subchannel_create(exec_ctx, &c->base, args);
grpc_connector_unref(exec_ctx, &c->base);
grpc_channel_args_destroy(final_args);
return s;