aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/ext
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/ext')
-rw-r--r--src/core/ext/census/README.md61
-rw-r--r--src/core/ext/census/aggregation.h51
-rw-r--r--src/core/ext/census/base_resources.cc56
-rw-r--r--src/core/ext/census/base_resources.h32
-rw-r--r--src/core/ext/census/census_init.cc33
-rw-r--r--src/core/ext/census/census_interface.h69
-rw-r--r--src/core/ext/census/census_log.cc588
-rw-r--r--src/core/ext/census/census_log.h84
-rw-r--r--src/core/ext/census/census_rpc_stats.cc238
-rw-r--r--src/core/ext/census/census_rpc_stats.h86
-rw-r--r--src/core/ext/census/census_tracing.cc226
-rw-r--r--src/core/ext/census/census_tracing.h81
-rw-r--r--src/core/ext/census/context.cc496
-rw-r--r--src/core/ext/census/gen/README.md10
-rw-r--r--src/core/ext/census/gen/census.pb.c161
-rw-r--r--src/core/ext/census/gen/census.pb.h280
-rw-r--r--src/core/ext/census/gen/trace_context.pb.c39
-rw-r--r--src/core/ext/census/gen/trace_context.pb.h78
-rw-r--r--src/core/ext/census/grpc_context.cc3
-rw-r--r--src/core/ext/census/grpc_filter.cc196
-rw-r--r--src/core/ext/census/grpc_plugin.cc70
-rw-r--r--src/core/ext/census/hash_table.cc288
-rw-r--r--src/core/ext/census/hash_table.h124
-rw-r--r--src/core/ext/census/initialize.cc51
-rw-r--r--src/core/ext/census/intrusive_hash_map.cc305
-rw-r--r--src/core/ext/census/intrusive_hash_map.h160
-rw-r--r--src/core/ext/census/intrusive_hash_map_internal.h48
-rw-r--r--src/core/ext/census/mlog.cc586
-rw-r--r--src/core/ext/census/mlog.h88
-rw-r--r--src/core/ext/census/operation.cc48
-rw-r--r--src/core/ext/census/placeholders.cc49
-rw-r--r--src/core/ext/census/resource.cc303
-rw-r--r--src/core/ext/census/resource.h56
-rw-r--r--src/core/ext/census/rpc_metric_id.h36
-rw-r--r--src/core/ext/census/trace_context.cc71
-rw-r--r--src/core/ext/census/trace_context.h64
-rw-r--r--src/core/ext/census/trace_label.h46
-rw-r--r--src/core/ext/census/trace_propagation.h56
-rw-r--r--src/core/ext/census/trace_status.h30
-rw-r--r--src/core/ext/census/trace_string.h35
-rw-r--r--src/core/ext/census/tracing.cc55
-rw-r--r--src/core/ext/census/tracing.h117
-rw-r--r--src/core/ext/census/window_stats.cc301
-rw-r--r--src/core/ext/census/window_stats.h166
-rw-r--r--src/core/ext/filters/client_channel/backup_poller.cc158
-rw-r--r--src/core/ext/filters/client_channel/backup_poller.h (renamed from src/core/ext/census/grpc_filter.h)25
-rw-r--r--src/core/ext/filters/client_channel/client_channel.cc44
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc19
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc715
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc467
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc265
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/subchannel_list.h153
-rw-r--r--src/core/ext/filters/client_channel/subchannel.h4
-rw-r--r--src/core/ext/transport/chttp2/client/chttp2_connector.cc4
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.cc153
-rw-r--r--src/core/ext/transport/chttp2/transport/flow_control.cc573
-rw-r--r--src/core/ext/transport/chttp2/transport/flow_control.h336
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_settings.cc4
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_window_update.cc10
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_parser.cc10
-rw-r--r--src/core/ext/transport/chttp2/transport/internal.h153
-rw-r--r--src/core/ext/transport/chttp2/transport/parsing.cc37
-rw-r--r--src/core/ext/transport/chttp2/transport/writing.cc29
-rw-r--r--src/core/ext/transport/cronet/transport/cronet_transport.cc2
-rw-r--r--src/core/ext/transport/inproc/inproc_transport.cc6
65 files changed, 1755 insertions, 7433 deletions
diff --git a/src/core/ext/census/README.md b/src/core/ext/census/README.md
deleted file mode 100644
index a9826fe889..0000000000
--- a/src/core/ext/census/README.md
+++ /dev/null
@@ -1,61 +0,0 @@
-<!---
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
--->
-
-# Census - a resource measurement and tracing system
-
-This directory contains code for Census, which will ultimately provide the
-following features for any gRPC-using system:
-* A [dapper](http://research.google.com/pubs/pub36356.html)-like tracing
- system, enabling tracing across a distributed infrastructure.
-* RPC statistics and measurements for key metrics, such as latency, bytes
- transferred, number of errors etc.
-* Resource measurement framework which can be used for measuring custom
- metrics. Through the use of [tags](#Tags), these can be broken down across
- the entire distributed stack.
-* Easy integration of the above with
- [Google Cloud Trace](https://cloud.google.com/tools/cloud-trace) and
- [Google Cloud Monitoring](https://cloud.google.com/monitoring/).
-
-## Concepts
-
-### Context
-
-### Operations
-
-### Tags
-
-### Metrics
-
-## API
-
-### Internal/RPC API
-
-### External/Client API
-
-### RPC API
-
-## Files in this directory
-
-Note that files and functions in this directory can be split into two
-categories:
-* Files that define core census library functions. Functions etc. in these
- files are named census\_\*, and constitute the core census library
- functionality. At some time in the future, these will become a standalone
- library.
-* Files that define functions etc. that provide a convenient interface between
- grpc and the core census functionality. These files are all named
- grpc\_\*.{c,h}, and define function names beginning with grpc\_census\_\*.
-
diff --git a/src/core/ext/census/aggregation.h b/src/core/ext/census/aggregation.h
deleted file mode 100644
index 1ba7953ecc..0000000000
--- a/src/core/ext/census/aggregation.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <stddef.h>
-
-#ifndef GRPC_CORE_EXT_CENSUS_AGGREGATION_H
-#define GRPC_CORE_EXT_CENSUS_AGGREGATION_H
-
-/** Structure used to describe an aggregation type. */
-struct census_aggregation_ops {
- /* Create a new aggregation. The pointer returned can be used in future calls
- to clone(), free(), record(), data() and reset(). */
- void *(*create)(const void *create_arg);
- /* Make a copy of an aggregation created by create() */
- void *(*clone)(const void *aggregation);
- /* Destroy an aggregation created by create() */
- void (*free)(void *aggregation);
- /* Record a new value against aggregation. */
- void (*record)(void *aggregation, double value);
- /* Return current aggregation data. The caller must cast this object into
- the correct type for the aggregation result. The object returned can be
- freed by using free_data(). */
- void *(*data)(const void *aggregation);
- /* free data returned by data() */
- void (*free_data)(void *data);
- /* Reset an aggregation to default (zero) values. */
- void (*reset)(void *aggregation);
- /* Merge 'from' aggregation into 'to'. Both aggregations must be compatible */
- void (*merge)(void *to, const void *from);
- /* Fill buffer with printable string version of aggregation contents. For
- debugging only. Returns the number of bytes added to buffer (a value == n
- implies the buffer was of insufficient size). */
- size_t (*print)(const void *aggregation, char *buffer, size_t n);
-};
-
-#endif /* GRPC_CORE_EXT_CENSUS_AGGREGATION_H */
diff --git a/src/core/ext/census/base_resources.cc b/src/core/ext/census/base_resources.cc
deleted file mode 100644
index 3697c6f0e0..0000000000
--- a/src/core/ext/census/base_resources.cc
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/ext/census/base_resources.h"
-
-#include <stdio.h>
-#include <string.h>
-
-#include <grpc/census.h>
-#include <grpc/support/log.h>
-
-#include "src/core/ext/census/resource.h"
-
-// Add base RPC resource definitions for use by RPC runtime.
-//
-// TODO(aveitch): All of these are currently hardwired definitions encoded in
-// the code in this file. These should be converted to use an external
-// configuration mechanism, in which these resources are defined in a text
-// file, which is compiled to .pb format and read by still-to-be-written
-// configuration functions.
-
-// Define all base resources. This should be called by census initialization.
-void define_base_resources() {
- google_census_Resource_BasicUnit numerator =
- google_census_Resource_BasicUnit_SECS;
- resource r = {(char *)"client_rpc_latency", // name
- (char *)"Client RPC latency in seconds", // description
- 0, // prefix
- 1, // n_numerators
- &numerator, // numerators
- 0, // n_denominators
- NULL}; // denominators
- define_resource(&r);
- r = {(char *)"server_rpc_latency", // name
- (char *)"Server RPC latency in seconds", // description
- 0, // prefix
- 1, // n_numerators
- &numerator, // numerators
- 0, // n_denominators
- NULL}; // denominators
- define_resource(&r);
-}
diff --git a/src/core/ext/census/base_resources.h b/src/core/ext/census/base_resources.h
deleted file mode 100644
index d24923b597..0000000000
--- a/src/core/ext/census/base_resources.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_EXT_CENSUS_BASE_RESOURCES_H
-#define GRPC_CORE_EXT_CENSUS_BASE_RESOURCES_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Define all base resources. This should be called by census initialization. */
-void define_base_resources();
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* GRPC_CORE_EXT_CENSUS_BASE_RESOURCES_H */
diff --git a/src/core/ext/census/census_init.cc b/src/core/ext/census/census_init.cc
deleted file mode 100644
index d7f719ff8c..0000000000
--- a/src/core/ext/census/census_init.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/ext/census/census_interface.h"
-
-#include <grpc/support/log.h>
-#include "src/core/ext/census/census_rpc_stats.h"
-#include "src/core/ext/census/census_tracing.h"
-
-void census_init(void) {
- census_tracing_init();
- census_stats_store_init();
-}
-
-void census_shutdown(void) {
- census_stats_store_shutdown();
- census_tracing_shutdown();
-}
diff --git a/src/core/ext/census/census_interface.h b/src/core/ext/census/census_interface.h
deleted file mode 100644
index 113c2b16ef..0000000000
--- a/src/core/ext/census/census_interface.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_EXT_CENSUS_CENSUS_INTERFACE_H
-#define GRPC_CORE_EXT_CENSUS_CENSUS_INTERFACE_H
-
-#include <grpc/support/port_platform.h>
-
-/* Maximum length of an individual census trace annotation. */
-#define CENSUS_MAX_ANNOTATION_LENGTH 200
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Structure of a census op id. Define as structure because 64bit integer is not
- available on every platform for C89. */
-typedef struct census_op_id {
- uint32_t upper;
- uint32_t lower;
-} census_op_id;
-
-typedef struct census_rpc_stats census_rpc_stats;
-
-/* Initializes Census library. No-op if Census is already initialized. */
-void census_init(void);
-
-/* Shutdown Census Library. */
-void census_shutdown(void);
-
-/* Annotates grpc method name on a census_op_id. The method name has the format
- of <full quantified rpc service name>/<rpc function name>. Returns 0 iff
- op_id and method_name are all valid. op_id is valid after its creation and
- before calling census_tracing_end_op().
-
- TODO(hongyu): Figure out valid characters set for service name and command
- name and document requirements here.*/
-int census_add_method_tag(census_op_id op_id, const char *method_name);
-
-/* Annotates tracing information to a specific op_id.
- Up to CENSUS_MAX_ANNOTATION_LENGTH bytes are recorded. */
-void census_tracing_print(census_op_id op_id, const char *annotation);
-
-/* Starts tracing for an RPC. Returns a locally unique census_op_id */
-census_op_id census_tracing_start_op(void);
-
-/* Ends tracing. Calling this function will invalidate the input op_id. */
-void census_tracing_end_op(census_op_id op_id);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* GRPC_CORE_EXT_CENSUS_CENSUS_INTERFACE_H */
diff --git a/src/core/ext/census/census_log.cc b/src/core/ext/census/census_log.cc
deleted file mode 100644
index 100047f12b..0000000000
--- a/src/core/ext/census/census_log.cc
+++ /dev/null
@@ -1,588 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/* Available log space is divided up in blocks of
- CENSUS_LOG_2_MAX_RECORD_SIZE bytes. A block can be in one of the
- following three data structures:
- - Free blocks (free_block_list)
- - Blocks with unread data (dirty_block_list)
- - Blocks currently attached to cores (core_local_blocks[])
-
- census_log_start_write() moves a block from core_local_blocks[] to the
- end of dirty_block_list when block:
- - is out-of-space OR
- - has an incomplete record (an incomplete record occurs when a thread calls
- census_log_start_write() and is context-switched before calling
- census_log_end_write()
- So, blocks in dirty_block_list are ordered, from oldest to newest, by time
- when block is detached from the core.
-
- census_log_read_next() first iterates over dirty_block_list and then
- core_local_blocks[]. It moves completely read blocks from dirty_block_list
- to free_block_list. Blocks in core_local_blocks[] are not freed, even when
- completely read.
-
- If log is configured to discard old records and free_block_list is empty,
- census_log_start_write() iterates over dirty_block_list to allocate a
- new block. It moves the oldest available block (no pending read/write) to
- core_local_blocks[].
-
- core_local_block_struct is used to implement a map from core id to the block
- associated with that core. This mapping is advisory. It is possible that the
- block returned by this mapping is no longer associated with that core. This
- mapping is updated, lazily, by census_log_start_write().
-
- Locking in block struct:
-
- Exclusive g_log.lock must be held before calling any functions operatong on
- block structs except census_log_start_write() and
- census_log_end_write().
-
- Writes to a block are serialized via writer_lock.
- census_log_start_write() acquires this lock and
- census_log_end_write() releases it. On failure to acquire the lock,
- writer allocates a new block for the current core and updates
- core_local_block accordingly.
-
- Simultaneous read and write access is allowed. Reader can safely read up to
- committed bytes (bytes_committed).
-
- reader_lock protects the block, currently being read, from getting recycled.
- start_read() acquires reader_lock and end_read() releases the lock.
-
- Read/write access to a block is disabled via try_disable_access(). It returns
- with both writer_lock and reader_lock held. These locks are subsequently
- released by enable_access() to enable access to the block.
-
- A note on naming: Most function/struct names are prepended by cl_
- (shorthand for census_log). Further, functions that manipulate structures
- include the name of the structure, which will be passed as the first
- argument. E.g. cl_block_initialize() will initialize a cl_block.
-*/
-#include "src/core/ext/census/census_log.h"
-#include <grpc/support/alloc.h>
-#include <grpc/support/atm.h>
-#include <grpc/support/cpu.h>
-#include <grpc/support/log.h>
-#include <grpc/support/port_platform.h>
-#include <grpc/support/sync.h>
-#include <grpc/support/useful.h>
-#include <string.h>
-
-/* End of platform specific code */
-
-typedef struct census_log_block_list_struct {
- struct census_log_block_list_struct *next;
- struct census_log_block_list_struct *prev;
- struct census_log_block *block;
-} cl_block_list_struct;
-
-typedef struct census_log_block {
- /* Pointer to underlying buffer */
- char *buffer;
- gpr_atm writer_lock;
- gpr_atm reader_lock;
- /* Keeps completely written bytes. Declared atomic because accessed
- simultaneously by reader and writer. */
- gpr_atm bytes_committed;
- /* Bytes already read */
- int32_t bytes_read;
- /* Links for list */
- cl_block_list_struct link;
-/* We want this structure to be cacheline aligned. We assume the following
- sizes for the various parts on 32/64bit systems:
- type 32b size 64b size
- char* 4 8
- 3x gpr_atm 12 24
- int32_t 4 8 (assumes padding)
- cl_block_list_struct 12 24
- TOTAL 32 64
-
- Depending on the size of our cacheline and the architecture, we
- selectively add char buffering to this structure. The size is checked
- via assert in census_log_initialize(). */
-#if defined(GPR_ARCH_64)
-#define CL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 64)
-#else
-#if defined(GPR_ARCH_32)
-#define CL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 32)
-#else
-#error "Unknown architecture"
-#endif
-#endif
-#if CL_BLOCK_PAD_SIZE > 0
- char padding[CL_BLOCK_PAD_SIZE];
-#endif
-} cl_block;
-
-/* A list of cl_blocks, doubly-linked through cl_block::link. */
-typedef struct census_log_block_list {
- int32_t count; /* Number of items in list. */
- cl_block_list_struct ht; /* head/tail of linked list. */
-} cl_block_list;
-
-/* Cacheline aligned block pointers to avoid false sharing. Block pointer must
- be initialized via set_block(), before calling other functions */
-typedef struct census_log_core_local_block {
- gpr_atm block;
-/* Ensure cachline alignment: we assume sizeof(gpr_atm) == 4 or 8 */
-#if defined(GPR_ARCH_64)
-#define CL_CORE_LOCAL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 8)
-#else
-#if defined(GPR_ARCH_32)
-#define CL_CORE_LOCAL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 4)
-#else
-#error "Unknown architecture"
-#endif
-#endif
-#if CL_CORE_LOCAL_BLOCK_PAD_SIZE > 0
- char padding[CL_CORE_LOCAL_BLOCK_PAD_SIZE];
-#endif
-} cl_core_local_block;
-
-struct census_log {
- int discard_old_records;
- /* Number of cores (aka hardware-contexts) */
- unsigned num_cores;
- /* number of CENSUS_LOG_2_MAX_RECORD_SIZE blocks in log */
- int32_t num_blocks;
- cl_block *blocks; /* Block metadata. */
- cl_core_local_block *core_local_blocks; /* Keeps core to block mappings. */
- gpr_mu lock;
- int initialized; /* has log been initialized? */
- /* Keeps the state of the reader iterator. A value of 0 indicates that
- iterator has reached the end. census_log_init_reader() resets the
- value to num_core to restart iteration. */
- uint32_t read_iterator_state;
- /* Points to the block being read. If non-NULL, the block is locked for
- reading (block_being_read_->reader_lock is held). */
- cl_block *block_being_read;
- /* A non-zero value indicates that log is full. */
- gpr_atm is_full;
- char *buffer;
- cl_block_list free_block_list;
- cl_block_list dirty_block_list;
- gpr_atm out_of_space_count;
-};
-
-/* Single internal log */
-static struct census_log g_log;
-
-/* Functions that operate on an atomic memory location used as a lock */
-
-/* Returns non-zero if lock is acquired */
-static int cl_try_lock(gpr_atm *lock) { return gpr_atm_acq_cas(lock, 0, 1); }
-
-static void cl_unlock(gpr_atm *lock) { gpr_atm_rel_store(lock, 0); }
-
-/* Functions that operate on cl_core_local_block's */
-
-static void cl_core_local_block_set_block(cl_core_local_block *clb,
- cl_block *block) {
- gpr_atm_rel_store(&clb->block, (gpr_atm)block);
-}
-
-static cl_block *cl_core_local_block_get_block(cl_core_local_block *clb) {
- return (cl_block *)gpr_atm_acq_load(&clb->block);
-}
-
-/* Functions that operate on cl_block_list_struct's */
-
-static void cl_block_list_struct_initialize(cl_block_list_struct *bls,
- cl_block *block) {
- bls->next = bls->prev = bls;
- bls->block = block;
-}
-
-/* Functions that operate on cl_block_list's */
-
-static void cl_block_list_initialize(cl_block_list *list) {
- list->count = 0;
- cl_block_list_struct_initialize(&list->ht, NULL);
-}
-
-/* Returns head of *this, or NULL if empty. */
-static cl_block *cl_block_list_head(cl_block_list *list) {
- return list->ht.next->block;
-}
-
-/* Insert element *e after *pos. */
-static void cl_block_list_insert(cl_block_list *list, cl_block_list_struct *pos,
- cl_block_list_struct *e) {
- list->count++;
- e->next = pos->next;
- e->prev = pos;
- e->next->prev = e;
- e->prev->next = e;
-}
-
-/* Insert block at the head of the list */
-static void cl_block_list_insert_at_head(cl_block_list *list, cl_block *block) {
- cl_block_list_insert(list, &list->ht, &block->link);
-}
-
-/* Insert block at the tail of the list */
-static void cl_block_list_insert_at_tail(cl_block_list *list, cl_block *block) {
- cl_block_list_insert(list, list->ht.prev, &block->link);
-}
-
-/* Removes block *b. Requires *b be in the list. */
-static void cl_block_list_remove(cl_block_list *list, cl_block *b) {
- list->count--;
- b->link.next->prev = b->link.prev;
- b->link.prev->next = b->link.next;
-}
-
-/* Functions that operate on cl_block's */
-
-static void cl_block_initialize(cl_block *block, char *buffer) {
- block->buffer = buffer;
- gpr_atm_rel_store(&block->writer_lock, 0);
- gpr_atm_rel_store(&block->reader_lock, 0);
- gpr_atm_rel_store(&block->bytes_committed, 0);
- block->bytes_read = 0;
- cl_block_list_struct_initialize(&block->link, block);
-}
-
-/* Guards against exposing partially written buffer to the reader. */
-static void cl_block_set_bytes_committed(cl_block *block,
- int32_t bytes_committed) {
- gpr_atm_rel_store(&block->bytes_committed, bytes_committed);
-}
-
-static int32_t cl_block_get_bytes_committed(cl_block *block) {
- return gpr_atm_acq_load(&block->bytes_committed);
-}
-
-/* Tries to disable future read/write access to this block. Succeeds if:
- - no in-progress write AND
- - no in-progress read AND
- - 'discard_data' set to true OR no unread data
- On success, clears the block state and returns with writer_lock_ and
- reader_lock_ held. These locks are released by a subsequent
- cl_block_access_enable() call. */
-static int cl_block_try_disable_access(cl_block *block, int discard_data) {
- if (!cl_try_lock(&block->writer_lock)) {
- return 0;
- }
- if (!cl_try_lock(&block->reader_lock)) {
- cl_unlock(&block->writer_lock);
- return 0;
- }
- if (!discard_data &&
- (block->bytes_read != cl_block_get_bytes_committed(block))) {
- cl_unlock(&block->reader_lock);
- cl_unlock(&block->writer_lock);
- return 0;
- }
- cl_block_set_bytes_committed(block, 0);
- block->bytes_read = 0;
- return 1;
-}
-
-static void cl_block_enable_access(cl_block *block) {
- cl_unlock(&block->reader_lock);
- cl_unlock(&block->writer_lock);
-}
-
-/* Returns with writer_lock held. */
-static void *cl_block_start_write(cl_block *block, size_t size) {
- int32_t bytes_committed;
- if (!cl_try_lock(&block->writer_lock)) {
- return NULL;
- }
- bytes_committed = cl_block_get_bytes_committed(block);
- if (bytes_committed + size > CENSUS_LOG_MAX_RECORD_SIZE) {
- cl_unlock(&block->writer_lock);
- return NULL;
- }
- return block->buffer + bytes_committed;
-}
-
-/* Releases writer_lock and increments committed bytes by 'bytes_written'.
- 'bytes_written' must be <= 'size' specified in the corresponding
- StartWrite() call. This function is thread-safe. */
-static void cl_block_end_write(cl_block *block, size_t bytes_written) {
- cl_block_set_bytes_committed(
- block, cl_block_get_bytes_committed(block) + bytes_written);
- cl_unlock(&block->writer_lock);
-}
-
-/* Returns a pointer to the first unread byte in buffer. The number of bytes
- available are returned in 'bytes_available'. Acquires reader lock that is
- released by a subsequent cl_block_end_read() call. Returns NULL if:
- - read in progress
- - no data available */
-static void *cl_block_start_read(cl_block *block, size_t *bytes_available) {
- void *record;
- if (!cl_try_lock(&block->reader_lock)) {
- return NULL;
- }
- /* bytes_committed may change from under us. Use bytes_available to update
- bytes_read below. */
- *bytes_available = cl_block_get_bytes_committed(block) - block->bytes_read;
- if (*bytes_available == 0) {
- cl_unlock(&block->reader_lock);
- return NULL;
- }
- record = block->buffer + block->bytes_read;
- block->bytes_read += *bytes_available;
- return record;
-}
-
-static void cl_block_end_read(cl_block *block) {
- cl_unlock(&block->reader_lock);
-}
-
-/* Internal functions operating on g_log */
-
-/* Allocates a new free block (or recycles an available dirty block if log is
- configured to discard old records). Returns NULL if out-of-space. */
-static cl_block *cl_allocate_block(void) {
- cl_block *block = cl_block_list_head(&g_log.free_block_list);
- if (block != NULL) {
- cl_block_list_remove(&g_log.free_block_list, block);
- return block;
- }
- if (!g_log.discard_old_records) {
- /* No free block and log is configured to keep old records. */
- return NULL;
- }
- /* Recycle dirty block. Start from the oldest. */
- for (block = cl_block_list_head(&g_log.dirty_block_list); block != NULL;
- block = block->link.next->block) {
- if (cl_block_try_disable_access(block, 1 /* discard data */)) {
- cl_block_list_remove(&g_log.dirty_block_list, block);
- return block;
- }
- }
- return NULL;
-}
-
-/* Allocates a new block and updates core id => block mapping. 'old_block'
- points to the block that the caller thinks is attached to
- 'core_id'. 'old_block' may be NULL. Returns non-zero if:
- - allocated a new block OR
- - 'core_id' => 'old_block' mapping changed (another thread allocated a
- block before lock was acquired). */
-static int cl_allocate_core_local_block(int32_t core_id, cl_block *old_block) {
- /* Now that we have the lock, check if core-local mapping has changed. */
- cl_core_local_block *core_local_block = &g_log.core_local_blocks[core_id];
- cl_block *block = cl_core_local_block_get_block(core_local_block);
- if ((block != NULL) && (block != old_block)) {
- return 1;
- }
- if (block != NULL) {
- cl_core_local_block_set_block(core_local_block, NULL);
- cl_block_list_insert_at_tail(&g_log.dirty_block_list, block);
- }
- block = cl_allocate_block();
- if (block == NULL) {
- gpr_atm_rel_store(&g_log.is_full, 1);
- return 0;
- }
- cl_core_local_block_set_block(core_local_block, block);
- cl_block_enable_access(block);
- return 1;
-}
-
-static cl_block *cl_get_block(void *record) {
- uintptr_t p = (uintptr_t)((char *)record - g_log.buffer);
- uintptr_t index = p >> CENSUS_LOG_2_MAX_RECORD_SIZE;
- return &g_log.blocks[index];
-}
-
-/* Gets the next block to read and tries to free 'prev' block (if not NULL).
- Returns NULL if reached the end. */
-static cl_block *cl_next_block_to_read(cl_block *prev) {
- cl_block *block = NULL;
- if (g_log.read_iterator_state == g_log.num_cores) {
- /* We are traversing dirty list; find the next dirty block. */
- if (prev != NULL) {
- /* Try to free the previous block if there is no unread data. This block
- may have unread data if previously incomplete record completed between
- read_next() calls. */
- block = prev->link.next->block;
- if (cl_block_try_disable_access(prev, 0 /* do not discard data */)) {
- cl_block_list_remove(&g_log.dirty_block_list, prev);
- cl_block_list_insert_at_head(&g_log.free_block_list, prev);
- gpr_atm_rel_store(&g_log.is_full, 0);
- }
- } else {
- block = cl_block_list_head(&g_log.dirty_block_list);
- }
- if (block != NULL) {
- return block;
- }
- /* We are done with the dirty list; moving on to core-local blocks. */
- }
- while (g_log.read_iterator_state > 0) {
- g_log.read_iterator_state--;
- block = cl_core_local_block_get_block(
- &g_log.core_local_blocks[g_log.read_iterator_state]);
- if (block != NULL) {
- return block;
- }
- }
- return NULL;
-}
-
-/* External functions: primary stats_log interface */
-void census_log_initialize(size_t size_in_mb, int discard_old_records) {
- int32_t ix;
- /* Check cacheline alignment. */
- GPR_ASSERT(sizeof(cl_block) % GPR_CACHELINE_SIZE == 0);
- GPR_ASSERT(sizeof(cl_core_local_block) % GPR_CACHELINE_SIZE == 0);
- GPR_ASSERT(!g_log.initialized);
- g_log.discard_old_records = discard_old_records;
- g_log.num_cores = gpr_cpu_num_cores();
- /* Ensure at least as many blocks as there are cores. */
- g_log.num_blocks = GPR_MAX(
- g_log.num_cores, (size_in_mb << 20) >> CENSUS_LOG_2_MAX_RECORD_SIZE);
- gpr_mu_init(&g_log.lock);
- g_log.read_iterator_state = 0;
- g_log.block_being_read = NULL;
- gpr_atm_rel_store(&g_log.is_full, 0);
- g_log.core_local_blocks = (cl_core_local_block *)gpr_malloc_aligned(
- g_log.num_cores * sizeof(cl_core_local_block), GPR_CACHELINE_SIZE_LOG);
- memset(g_log.core_local_blocks, 0,
- g_log.num_cores * sizeof(cl_core_local_block));
- g_log.blocks = (cl_block *)gpr_malloc_aligned(
- g_log.num_blocks * sizeof(cl_block), GPR_CACHELINE_SIZE_LOG);
- memset(g_log.blocks, 0, g_log.num_blocks * sizeof(cl_block));
- g_log.buffer = gpr_malloc(g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
- memset(g_log.buffer, 0, g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
- cl_block_list_initialize(&g_log.free_block_list);
- cl_block_list_initialize(&g_log.dirty_block_list);
- for (ix = 0; ix < g_log.num_blocks; ++ix) {
- cl_block *block = g_log.blocks + ix;
- cl_block_initialize(block,
- g_log.buffer + (CENSUS_LOG_MAX_RECORD_SIZE * ix));
- cl_block_try_disable_access(block, 1 /* discard data */);
- cl_block_list_insert_at_tail(&g_log.free_block_list, block);
- }
- gpr_atm_rel_store(&g_log.out_of_space_count, 0);
- g_log.initialized = 1;
-}
-
-void census_log_shutdown(void) {
- GPR_ASSERT(g_log.initialized);
- gpr_mu_destroy(&g_log.lock);
- gpr_free_aligned(g_log.core_local_blocks);
- g_log.core_local_blocks = NULL;
- gpr_free_aligned(g_log.blocks);
- g_log.blocks = NULL;
- gpr_free(g_log.buffer);
- g_log.buffer = NULL;
- g_log.initialized = 0;
-}
-
-void *census_log_start_write(size_t size) {
- /* Used to bound number of times block allocation is attempted. */
- int32_t attempts_remaining = g_log.num_blocks;
- /* TODO(aveitch): move this inside the do loop when current_cpu is fixed */
- int32_t core_id = gpr_cpu_current_cpu();
- GPR_ASSERT(g_log.initialized);
- if (size > CENSUS_LOG_MAX_RECORD_SIZE) {
- return NULL;
- }
- do {
- int allocated;
- void *record = NULL;
- cl_block *block =
- cl_core_local_block_get_block(&g_log.core_local_blocks[core_id]);
- if (block && (record = cl_block_start_write(block, size))) {
- return record;
- }
- /* Need to allocate a new block. We are here if:
- - No block associated with the core OR
- - Write in-progress on the block OR
- - block is out of space */
- if (gpr_atm_acq_load(&g_log.is_full)) {
- gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1);
- return NULL;
- }
- gpr_mu_lock(&g_log.lock);
- allocated = cl_allocate_core_local_block(core_id, block);
- gpr_mu_unlock(&g_log.lock);
- if (!allocated) {
- gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1);
- return NULL;
- }
- } while (attempts_remaining--);
- /* Give up. */
- gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1);
- return NULL;
-}
-
-void census_log_end_write(void *record, size_t bytes_written) {
- GPR_ASSERT(g_log.initialized);
- cl_block_end_write(cl_get_block(record), bytes_written);
-}
-
-void census_log_init_reader(void) {
- GPR_ASSERT(g_log.initialized);
- gpr_mu_lock(&g_log.lock);
- /* If a block is locked for reading unlock it. */
- if (g_log.block_being_read != NULL) {
- cl_block_end_read(g_log.block_being_read);
- g_log.block_being_read = NULL;
- }
- g_log.read_iterator_state = g_log.num_cores;
- gpr_mu_unlock(&g_log.lock);
-}
-
-const void *census_log_read_next(size_t *bytes_available) {
- GPR_ASSERT(g_log.initialized);
- gpr_mu_lock(&g_log.lock);
- if (g_log.block_being_read != NULL) {
- cl_block_end_read(g_log.block_being_read);
- }
- do {
- g_log.block_being_read = cl_next_block_to_read(g_log.block_being_read);
- if (g_log.block_being_read != NULL) {
- void *record =
- cl_block_start_read(g_log.block_being_read, bytes_available);
- if (record != NULL) {
- gpr_mu_unlock(&g_log.lock);
- return record;
- }
- }
- } while (g_log.block_being_read != NULL);
- gpr_mu_unlock(&g_log.lock);
- return NULL;
-}
-
-size_t census_log_remaining_space(void) {
- size_t space;
- GPR_ASSERT(g_log.initialized);
- gpr_mu_lock(&g_log.lock);
- if (g_log.discard_old_records) {
- /* Remaining space is not meaningful; just return the entire log space. */
- space = g_log.num_blocks << CENSUS_LOG_2_MAX_RECORD_SIZE;
- } else {
- space = g_log.free_block_list.count * CENSUS_LOG_MAX_RECORD_SIZE;
- }
- gpr_mu_unlock(&g_log.lock);
- return space;
-}
-
-int census_log_out_of_space_count(void) {
- GPR_ASSERT(g_log.initialized);
- return gpr_atm_acq_load(&g_log.out_of_space_count);
-}
diff --git a/src/core/ext/census/census_log.h b/src/core/ext/census/census_log.h
deleted file mode 100644
index ee336ae733..0000000000
--- a/src/core/ext/census/census_log.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_EXT_CENSUS_CENSUS_LOG_H
-#define GRPC_CORE_EXT_CENSUS_CENSUS_LOG_H
-
-#include <stddef.h>
-
-/* Maximum record size, in bytes. */
-#define CENSUS_LOG_2_MAX_RECORD_SIZE 14 /* 2^14 = 16KB */
-#define CENSUS_LOG_MAX_RECORD_SIZE (1 << CENSUS_LOG_2_MAX_RECORD_SIZE)
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Initialize the statistics logging subsystem with the given log size. A log
- size of 0 will result in the smallest possible log for the platform
- (approximately CENSUS_LOG_MAX_RECORD_SIZE * gpr_cpu_num_cores()). If
- discard_old_records is non-zero, then new records will displace older ones
- when the log is full. This function must be called before any other
- census_log functions.
-*/
-void census_log_initialize(size_t size_in_mb, int discard_old_records);
-
-/* Shutdown the logging subsystem. Caller must ensure that:
- - no in progress or future call to any census_log functions
- - no incomplete records
-*/
-void census_log_shutdown(void);
-
-/* Allocates and returns a 'size' bytes record and marks it in use. A
- subsequent census_log_end_write() marks the record complete. The
- 'bytes_written' census_log_end_write() argument must be <=
- 'size'. Returns NULL if out-of-space AND:
- - log is configured to keep old records OR
- - all blocks are pinned by incomplete records.
-*/
-void *census_log_start_write(size_t size);
-
-void census_log_end_write(void *record, size_t bytes_written);
-
-/* census_log_read_next() iterates over blocks with data and for each block
- returns a pointer to the first unread byte. The number of bytes that can be
- read are returned in 'bytes_available'. Reader is expected to read all
- available data. Reading the data consumes it i.e. it cannot be read again.
- census_log_read_next() returns NULL if the end is reached i.e last block
- is read. census_log_init_reader() starts the iteration or aborts the
- current iteration.
-*/
-void census_log_init_reader(void);
-const void *census_log_read_next(size_t *bytes_available);
-
-/* Returns estimated remaining space across all blocks, in bytes. If log is
- configured to discard old records, returns total log space. Otherwise,
- returns space available in empty blocks (partially filled blocks are
- treated as full).
-*/
-size_t census_log_remaining_space(void);
-
-/* Returns the number of times grpc_stats_log_start_write() failed due to
- out-of-space. */
-int census_log_out_of_space_count(void);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* GRPC_CORE_EXT_CENSUS_CENSUS_LOG_H */
diff --git a/src/core/ext/census/census_rpc_stats.cc b/src/core/ext/census/census_rpc_stats.cc
deleted file mode 100644
index 0aca1f109e..0000000000
--- a/src/core/ext/census/census_rpc_stats.cc
+++ /dev/null
@@ -1,238 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <string.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/sync.h>
-#include "src/core/ext/census/census_interface.h"
-#include "src/core/ext/census/census_rpc_stats.h"
-#include "src/core/ext/census/census_tracing.h"
-#include "src/core/ext/census/hash_table.h"
-#include "src/core/ext/census/window_stats.h"
-#include "src/core/lib/support/murmur_hash.h"
-#include "src/core/lib/support/string.h"
-
-#define NUM_INTERVALS 3
-#define MINUTE_INTERVAL 0
-#define HOUR_INTERVAL 1
-#define TOTAL_INTERVAL 2
-
-/* for easier typing */
-typedef census_per_method_rpc_stats per_method_stats;
-
-/* Ensure mu is only initialized once. */
-static gpr_once g_stats_store_mu_init = GPR_ONCE_INIT;
-/* Guards two stats stores. */
-static gpr_mu g_mu;
-static census_ht *g_client_stats_store = NULL;
-static census_ht *g_server_stats_store = NULL;
-
-static void init_mutex(void) { gpr_mu_init(&g_mu); }
-
-static void init_mutex_once(void) {
- gpr_once_init(&g_stats_store_mu_init, init_mutex);
-}
-
-static int cmp_str_keys(const void *k1, const void *k2) {
- return strcmp((const char *)k1, (const char *)k2);
-}
-
-/* TODO(hongyu): replace it with cityhash64 */
-static uint64_t simple_hash(const void *k) {
- size_t len = strlen(k);
- uint64_t higher = gpr_murmur_hash3((const char *)k, len / 2, 0);
- return higher << 32 |
- gpr_murmur_hash3((const char *)k + len / 2, len - len / 2, 0);
-}
-
-static void delete_stats(void *stats) {
- census_window_stats_destroy((struct census_window_stats *)stats);
-}
-
-static void delete_key(void *key) { gpr_free(key); }
-
-static const census_ht_option ht_opt = {
- CENSUS_HT_POINTER /* key type */, 1999 /* n_of_buckets */,
- simple_hash /* hash function */, cmp_str_keys /* key comparator */,
- delete_stats /* data deleter */, delete_key /* key deleter */
-};
-
-static void init_rpc_stats(void *stats) {
- memset(stats, 0, sizeof(census_rpc_stats));
-}
-
-static void stat_add_proportion(double p, void *base, const void *addme) {
- census_rpc_stats *b = (census_rpc_stats *)base;
- census_rpc_stats *a = (census_rpc_stats *)addme;
- b->cnt += p * a->cnt;
- b->rpc_error_cnt += p * a->rpc_error_cnt;
- b->app_error_cnt += p * a->app_error_cnt;
- b->elapsed_time_ms += p * a->elapsed_time_ms;
- b->api_request_bytes += p * a->api_request_bytes;
- b->wire_request_bytes += p * a->wire_request_bytes;
- b->api_response_bytes += p * a->api_response_bytes;
- b->wire_response_bytes += p * a->wire_response_bytes;
-}
-
-static void stat_add(void *base, const void *addme) {
- stat_add_proportion(1.0, base, addme);
-}
-
-static gpr_timespec min_hour_total_intervals[3] = {
- {60, 0}, {3600, 0}, {36000000, 0}};
-
-static const census_window_stats_stat_info window_stats_settings = {
- sizeof(census_rpc_stats), init_rpc_stats, stat_add, stat_add_proportion};
-
-census_rpc_stats *census_rpc_stats_create_empty(void) {
- census_rpc_stats *ret =
- (census_rpc_stats *)gpr_malloc(sizeof(census_rpc_stats));
- memset(ret, 0, sizeof(census_rpc_stats));
- return ret;
-}
-
-void census_aggregated_rpc_stats_set_empty(census_aggregated_rpc_stats *data) {
- int i = 0;
- for (i = 0; i < data->num_entries; i++) {
- if (data->stats[i].method != NULL) {
- gpr_free((void *)data->stats[i].method);
- }
- }
- if (data->stats != NULL) {
- gpr_free(data->stats);
- }
- data->num_entries = 0;
- data->stats = NULL;
-}
-
-static void record_stats(census_ht *store, census_op_id op_id,
- const census_rpc_stats *stats) {
- gpr_mu_lock(&g_mu);
- if (store != NULL) {
- census_trace_obj *trace = NULL;
- census_internal_lock_trace_store();
- trace = census_get_trace_obj_locked(op_id);
- if (trace != NULL) {
- const char *method_name = census_get_trace_method_name(trace);
- struct census_window_stats *window_stats = NULL;
- census_ht_key key;
- key.ptr = (void *)method_name;
- window_stats = census_ht_find(store, key);
- census_internal_unlock_trace_store();
- if (window_stats == NULL) {
- window_stats = census_window_stats_create(3, min_hour_total_intervals,
- 30, &window_stats_settings);
- key.ptr = gpr_strdup(key.ptr);
- census_ht_insert(store, key, (void *)window_stats);
- }
- census_window_stats_add(window_stats, gpr_now(GPR_CLOCK_REALTIME), stats);
- } else {
- census_internal_unlock_trace_store();
- }
- }
- gpr_mu_unlock(&g_mu);
-}
-
-void census_record_rpc_client_stats(census_op_id op_id,
- const census_rpc_stats *stats) {
- record_stats(g_client_stats_store, op_id, stats);
-}
-
-void census_record_rpc_server_stats(census_op_id op_id,
- const census_rpc_stats *stats) {
- record_stats(g_server_stats_store, op_id, stats);
-}
-
-/* Get stats from input stats store */
-static void get_stats(census_ht *store, census_aggregated_rpc_stats *data) {
- GPR_ASSERT(data != NULL);
- if (data->num_entries != 0) {
- census_aggregated_rpc_stats_set_empty(data);
- }
- gpr_mu_lock(&g_mu);
- if (store != NULL) {
- size_t n;
- unsigned i, j;
- gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME);
- census_ht_kv *kv = census_ht_get_all_elements(store, &n);
- if (kv != NULL) {
- data->num_entries = n;
- data->stats =
- (per_method_stats *)gpr_malloc(sizeof(per_method_stats) * n);
- for (i = 0; i < n; i++) {
- census_window_stats_sums sums[NUM_INTERVALS];
- for (j = 0; j < NUM_INTERVALS; j++) {
- sums[j].statistic = (void *)census_rpc_stats_create_empty();
- }
- data->stats[i].method = gpr_strdup(kv[i].k.ptr);
- census_window_stats_get_sums(kv[i].v, now, sums);
- data->stats[i].minute_stats =
- *(census_rpc_stats *)sums[MINUTE_INTERVAL].statistic;
- data->stats[i].hour_stats =
- *(census_rpc_stats *)sums[HOUR_INTERVAL].statistic;
- data->stats[i].total_stats =
- *(census_rpc_stats *)sums[TOTAL_INTERVAL].statistic;
- for (j = 0; j < NUM_INTERVALS; j++) {
- gpr_free(sums[j].statistic);
- }
- }
- gpr_free(kv);
- }
- }
- gpr_mu_unlock(&g_mu);
-}
-
-void census_get_client_stats(census_aggregated_rpc_stats *data) {
- get_stats(g_client_stats_store, data);
-}
-
-void census_get_server_stats(census_aggregated_rpc_stats *data) {
- get_stats(g_server_stats_store, data);
-}
-
-void census_stats_store_init(void) {
- init_mutex_once();
- gpr_mu_lock(&g_mu);
- if (g_client_stats_store == NULL && g_server_stats_store == NULL) {
- g_client_stats_store = census_ht_create(&ht_opt);
- g_server_stats_store = census_ht_create(&ht_opt);
- } else {
- gpr_log(GPR_ERROR, "Census stats store already initialized.");
- }
- gpr_mu_unlock(&g_mu);
-}
-
-void census_stats_store_shutdown(void) {
- init_mutex_once();
- gpr_mu_lock(&g_mu);
- if (g_client_stats_store != NULL) {
- census_ht_destroy(g_client_stats_store);
- g_client_stats_store = NULL;
- } else {
- gpr_log(GPR_ERROR, "Census server stats store not initialized.");
- }
- if (g_server_stats_store != NULL) {
- census_ht_destroy(g_server_stats_store);
- g_server_stats_store = NULL;
- } else {
- gpr_log(GPR_ERROR, "Census client stats store not initialized.");
- }
- gpr_mu_unlock(&g_mu);
-}
diff --git a/src/core/ext/census/census_rpc_stats.h b/src/core/ext/census/census_rpc_stats.h
deleted file mode 100644
index 8004ade37d..0000000000
--- a/src/core/ext/census/census_rpc_stats.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_EXT_CENSUS_CENSUS_RPC_STATS_H
-#define GRPC_CORE_EXT_CENSUS_CENSUS_RPC_STATS_H
-
-#include <grpc/support/port_platform.h>
-#include "src/core/ext/census/census_interface.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-struct census_rpc_stats {
- uint64_t cnt;
- uint64_t rpc_error_cnt;
- uint64_t app_error_cnt;
- double elapsed_time_ms;
- double api_request_bytes;
- double wire_request_bytes;
- double api_response_bytes;
- double wire_response_bytes;
-};
-
-/* Creates an empty rpc stats object on heap. */
-census_rpc_stats *census_rpc_stats_create_empty(void);
-
-typedef struct census_per_method_rpc_stats {
- const char *method;
- census_rpc_stats minute_stats; /* cumulative stats in the past minute */
- census_rpc_stats hour_stats; /* cumulative stats in the past hour */
- census_rpc_stats total_stats; /* cumulative stats from last gc */
-} census_per_method_rpc_stats;
-
-typedef struct census_aggregated_rpc_stats {
- int num_entries;
- census_per_method_rpc_stats *stats;
-} census_aggregated_rpc_stats;
-
-/* Initializes an aggregated rpc stats object to an empty state. */
-void census_aggregated_rpc_stats_set_empty(census_aggregated_rpc_stats *data);
-
-/* Records client side stats of a rpc. */
-void census_record_rpc_client_stats(census_op_id op_id,
- const census_rpc_stats *stats);
-
-/* Records server side stats of a rpc. */
-void census_record_rpc_server_stats(census_op_id op_id,
- const census_rpc_stats *stats);
-
-/* The following two functions are intended for inprocess query of
- per-service per-method stats from grpc implementations. */
-
-/* Populates *data_map with server side aggregated per-service per-method
- stats.
- DO NOT CALL from outside of grpc code. */
-void census_get_server_stats(census_aggregated_rpc_stats *data_map);
-
-/* Populates *data_map with client side aggregated per-service per-method
- stats.
- DO NOT CALL from outside of grpc code. */
-void census_get_client_stats(census_aggregated_rpc_stats *data_map);
-
-void census_stats_store_init(void);
-void census_stats_store_shutdown(void);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* GRPC_CORE_EXT_CENSUS_CENSUS_RPC_STATS_H */
diff --git a/src/core/ext/census/census_tracing.cc b/src/core/ext/census/census_tracing.cc
deleted file mode 100644
index 199b260dd1..0000000000
--- a/src/core/ext/census/census_tracing.cc
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/ext/census/census_tracing.h"
-#include "src/core/ext/census/census_interface.h"
-
-#include <stdio.h>
-#include <string.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/port_platform.h>
-#include <grpc/support/sync.h>
-#include "src/core/ext/census/hash_table.h"
-#include "src/core/lib/support/string.h"
-
-void census_trace_obj_destroy(census_trace_obj *obj) {
- census_trace_annotation *p = obj->annotations;
- while (p != NULL) {
- census_trace_annotation *next = p->next;
- gpr_free(p);
- p = next;
- }
- gpr_free(obj->method);
- gpr_free(obj);
-}
-
-static void delete_trace_obj(void *obj) {
- census_trace_obj_destroy((census_trace_obj *)obj);
-}
-
-static const census_ht_option ht_opt = {
- CENSUS_HT_UINT64 /* key type */,
- 571 /* n_of_buckets */,
- NULL /* hash */,
- NULL /* compare_keys */,
- delete_trace_obj /* delete data */,
- NULL /* delete key */
-};
-
-static gpr_once g_init_mutex_once = GPR_ONCE_INIT;
-static gpr_mu g_mu; /* Guards following two static variables. */
-static census_ht *g_trace_store = NULL;
-static uint64_t g_id = 0;
-
-static census_ht_key op_id_as_key(census_op_id *id) {
- return *(census_ht_key *)id;
-}
-
-static uint64_t op_id_2_uint64(census_op_id *id) {
- uint64_t ret;
- memcpy(&ret, id, sizeof(census_op_id));
- return ret;
-}
-
-static void init_mutex(void) { gpr_mu_init(&g_mu); }
-
-static void init_mutex_once(void) {
- gpr_once_init(&g_init_mutex_once, init_mutex);
-}
-
-census_op_id census_tracing_start_op(void) {
- gpr_mu_lock(&g_mu);
- {
- census_trace_obj *ret = gpr_malloc(sizeof(census_trace_obj));
- memset(ret, 0, sizeof(census_trace_obj));
- g_id++;
- memcpy(&ret->id, &g_id, sizeof(census_op_id));
- ret->rpc_stats.cnt = 1;
- ret->ts = gpr_now(GPR_CLOCK_REALTIME);
- census_ht_insert(g_trace_store, op_id_as_key(&ret->id), (void *)ret);
- gpr_log(GPR_DEBUG, "Start tracing for id %lu", g_id);
- gpr_mu_unlock(&g_mu);
- return ret->id;
- }
-}
-
-int census_add_method_tag(census_op_id op_id, const char *method) {
- int ret = 0;
- census_trace_obj *trace = NULL;
- gpr_mu_lock(&g_mu);
- trace = census_ht_find(g_trace_store, op_id_as_key(&op_id));
- if (trace == NULL) {
- ret = 1;
- } else {
- trace->method = gpr_strdup(method);
- }
- gpr_mu_unlock(&g_mu);
- return ret;
-}
-
-void census_tracing_print(census_op_id op_id, const char *anno_txt) {
- census_trace_obj *trace = NULL;
- gpr_mu_lock(&g_mu);
- trace = census_ht_find(g_trace_store, op_id_as_key(&op_id));
- if (trace != NULL) {
- census_trace_annotation *anno = gpr_malloc(sizeof(census_trace_annotation));
- anno->ts = gpr_now(GPR_CLOCK_REALTIME);
- {
- char *d = anno->txt;
- const char *s = anno_txt;
- int n = 0;
- for (; n < CENSUS_MAX_ANNOTATION_LENGTH && *s != '\0'; ++n) {
- *d++ = *s++;
- }
- *d = '\0';
- }
- anno->next = trace->annotations;
- trace->annotations = anno;
- }
- gpr_mu_unlock(&g_mu);
-}
-
-void census_tracing_end_op(census_op_id op_id) {
- census_trace_obj *trace = NULL;
- gpr_mu_lock(&g_mu);
- trace = census_ht_find(g_trace_store, op_id_as_key(&op_id));
- if (trace != NULL) {
- trace->rpc_stats.elapsed_time_ms = gpr_timespec_to_micros(
- gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), trace->ts));
- gpr_log(GPR_DEBUG, "End tracing for id %lu, method %s, latency %f us",
- op_id_2_uint64(&op_id), trace->method,
- trace->rpc_stats.elapsed_time_ms);
- census_ht_erase(g_trace_store, op_id_as_key(&op_id));
- }
- gpr_mu_unlock(&g_mu);
-}
-
-void census_tracing_init(void) {
- init_mutex_once();
- gpr_mu_lock(&g_mu);
- if (g_trace_store == NULL) {
- g_id = 1;
- g_trace_store = census_ht_create(&ht_opt);
- } else {
- gpr_log(GPR_ERROR, "Census trace store already initialized.");
- }
- gpr_mu_unlock(&g_mu);
-}
-
-void census_tracing_shutdown(void) {
- gpr_mu_lock(&g_mu);
- if (g_trace_store != NULL) {
- census_ht_destroy(g_trace_store);
- g_trace_store = NULL;
- } else {
- gpr_log(GPR_ERROR, "Census trace store is not initialized.");
- }
- gpr_mu_unlock(&g_mu);
-}
-
-void census_internal_lock_trace_store(void) { gpr_mu_lock(&g_mu); }
-
-void census_internal_unlock_trace_store(void) { gpr_mu_unlock(&g_mu); }
-
-census_trace_obj *census_get_trace_obj_locked(census_op_id op_id) {
- if (g_trace_store == NULL) {
- gpr_log(GPR_ERROR, "Census trace store is not initialized.");
- return NULL;
- }
- return (census_trace_obj *)census_ht_find(g_trace_store,
- op_id_as_key(&op_id));
-}
-
-const char *census_get_trace_method_name(const census_trace_obj *trace) {
- return trace->method;
-}
-
-static census_trace_annotation *dup_annotation_chain(
- census_trace_annotation *from) {
- census_trace_annotation *ret = NULL;
- census_trace_annotation **to = &ret;
- for (; from != NULL; from = from->next) {
- *to = gpr_malloc(sizeof(census_trace_annotation));
- memcpy(*to, from, sizeof(census_trace_annotation));
- to = &(*to)->next;
- }
- return ret;
-}
-
-static census_trace_obj *trace_obj_dup(census_trace_obj *from) {
- census_trace_obj *to = NULL;
- GPR_ASSERT(from != NULL);
- to = gpr_malloc(sizeof(census_trace_obj));
- to->id = from->id;
- to->ts = from->ts;
- to->rpc_stats = from->rpc_stats;
- to->method = gpr_strdup(from->method);
- to->annotations = dup_annotation_chain(from->annotations);
- return to;
-}
-
-census_trace_obj **census_get_active_ops(int *num_active_ops) {
- census_trace_obj **ret = NULL;
- gpr_mu_lock(&g_mu);
- if (g_trace_store != NULL) {
- size_t n = 0;
- census_ht_kv *all_kvs = census_ht_get_all_elements(g_trace_store, &n);
- *num_active_ops = (int)n;
- if (n != 0) {
- size_t i = 0;
- ret = gpr_malloc(sizeof(census_trace_obj *) * n);
- for (i = 0; i < n; i++) {
- ret[i] = trace_obj_dup((census_trace_obj *)all_kvs[i].v);
- }
- }
- gpr_free(all_kvs);
- }
- gpr_mu_unlock(&g_mu);
- return ret;
-}
diff --git a/src/core/ext/census/census_tracing.h b/src/core/ext/census/census_tracing.h
deleted file mode 100644
index ccb767fcd0..0000000000
--- a/src/core/ext/census/census_tracing.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_EXT_CENSUS_CENSUS_TRACING_H
-#define GRPC_CORE_EXT_CENSUS_CENSUS_TRACING_H
-
-#include <grpc/support/time.h>
-#include "src/core/ext/census/census_rpc_stats.h"
-
-/* WARNING: The data structures and APIs provided by this file are for GRPC
- library's internal use ONLY. They might be changed in backward-incompatible
- ways and are not subject to any deprecation policy.
- They are not recommended for external use.
- */
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Struct for a trace annotation. */
-typedef struct census_trace_annotation {
- gpr_timespec ts; /* timestamp of the annotation */
- char txt[CENSUS_MAX_ANNOTATION_LENGTH + 1]; /* actual txt annotation */
- struct census_trace_annotation *next;
-} census_trace_annotation;
-
-typedef struct census_trace_obj {
- census_op_id id;
- gpr_timespec ts;
- census_rpc_stats rpc_stats;
- char *method;
- census_trace_annotation *annotations;
-} census_trace_obj;
-
-/* Deletes trace object. */
-void census_trace_obj_destroy(census_trace_obj *obj);
-
-/* Initializes trace store. This function is thread safe. */
-void census_tracing_init(void);
-
-/* Shutsdown trace store. This function is thread safe. */
-void census_tracing_shutdown(void);
-
-/* Gets trace obj corresponding to the input op_id. Returns NULL if trace store
- is not initialized or trace obj is not found. Requires trace store being
- locked before calling this function. */
-census_trace_obj *census_get_trace_obj_locked(census_op_id op_id);
-
-/* The following two functions acquire and release the trace store global lock.
- They are for census internal use only. */
-void census_internal_lock_trace_store(void);
-void census_internal_unlock_trace_store(void);
-
-/* Gets method name associated with the input trace object. */
-const char *census_get_trace_method_name(const census_trace_obj *trace);
-
-/* Returns an array of pointers to trace objects of currently active operations
- and fills in number of active operations. Returns NULL if there are no active
- operations.
- Caller owns the returned objects. */
-census_trace_obj **census_get_active_ops(int *num_active_ops);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* GRPC_CORE_EXT_CENSUS_CENSUS_TRACING_H */
diff --git a/src/core/ext/census/context.cc b/src/core/ext/census/context.cc
deleted file mode 100644
index 9b25a32e36..0000000000
--- a/src/core/ext/census/context.cc
+++ /dev/null
@@ -1,496 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpc/census.h>
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/port_platform.h>
-#include <grpc/support/useful.h>
-#include <stdbool.h>
-#include <string.h>
-#include "src/core/lib/support/string.h"
-
-// Functions in this file support the public context API, including
-// encoding/decoding as part of context propagation across RPC's. The overall
-// requirements (in approximate priority order) for the
-// context representation:
-// 1. Efficient conversion to/from wire format
-// 2. Minimal bytes used on-wire
-// 3. Efficient context creation
-// 4. Efficient lookup of tag value for a key
-// 5. Efficient iteration over tags
-// 6. Minimal memory footprint
-//
-// Notes on tradeoffs/decisions:
-// * tag includes 1 byte length of key, as well as nil-terminating byte. These
-// are to aid in efficient parsing and the ability to directly return key
-// strings. This is more important than saving a single byte/tag on the wire.
-// * The wire encoding uses only single byte values. This eliminates the need
-// to handle endian-ness conversions. It also means there is a hard upper
-// limit of 255 for both CENSUS_MAX_TAG_KV_LEN and CENSUS_MAX_PROPAGATED_TAGS.
-// * Keep all tag information (keys/values/flags) in a single memory buffer,
-// that can be directly copied to the wire.
-
-// min and max valid chars in tag keys and values. All printable ASCII is OK.
-#define MIN_VALID_TAG_CHAR 32 // ' '
-#define MAX_VALID_TAG_CHAR 126 // '~'
-
-// Structure representing a set of tags. Essentially a count of number of tags
-// present, and pointer to a chunk of memory that contains the per-tag details.
-struct tag_set {
- int ntags; // number of tags.
- int ntags_alloc; // ntags + number of deleted tags (total number of tags
- // in all of kvm). This will always be == ntags, except during the process
- // of building a new tag set.
- size_t kvm_size; // number of bytes allocated for key/value storage.
- size_t kvm_used; // number of bytes of used key/value memory
- char *kvm; // key/value memory. Consists of repeated entries of:
- // Offset Size Description
- // 0 1 Key length, including trailing 0. (K)
- // 1 1 Value length, including trailing 0 (V)
- // 2 1 Flags
- // 3 K Key bytes
- // 3 + K V Value bytes
- //
- // We refer to the first 3 entries as the 'tag header'. If extra values are
- // introduced in the header, you will need to modify the TAG_HEADER_SIZE
- // constant, the raw_tag structure (and everything that uses it) and the
- // encode/decode functions appropriately.
-};
-
-// Number of bytes in tag header.
-#define TAG_HEADER_SIZE 3 // key length (1) + value length (1) + flags (1)
-// Offsets to tag header entries.
-#define KEY_LEN_OFFSET 0
-#define VALUE_LEN_OFFSET 1
-#define FLAG_OFFSET 2
-
-// raw_tag represents the raw-storage form of a tag in the kvm of a tag_set.
-struct raw_tag {
- uint8_t key_len;
- uint8_t value_len;
- uint8_t flags;
- char *key;
- char *value;
-};
-
-// Use a reserved flag bit for indication of deleted tag.
-#define CENSUS_TAG_DELETED CENSUS_TAG_RESERVED
-#define CENSUS_TAG_IS_DELETED(flags) (flags & CENSUS_TAG_DELETED)
-
-// Primary representation of a context. Composed of 2 underlying tag_set
-// structs, one each for propagated and local (non-propagated) tags. This is
-// to efficiently support tag encoding/decoding.
-// TODO(aveitch): need to add tracing id's/structure.
-struct census_context {
- struct tag_set tags[2];
- census_context_status status;
-};
-
-// Indices into the tags member of census_context
-#define PROPAGATED_TAGS 0
-#define LOCAL_TAGS 1
-
-// Validate (check all characters are in range and size is less than limit) a
-// key or value string. Returns 0 if the string is invalid, or the length
-// (including terminator) if valid.
-static size_t validate_tag(const char *kv) {
- size_t len = 1;
- char ch;
- while ((ch = *kv++) != 0) {
- if (ch < MIN_VALID_TAG_CHAR || ch > MAX_VALID_TAG_CHAR) {
- return 0;
- }
- len++;
- }
- if (len > CENSUS_MAX_TAG_KV_LEN) {
- return 0;
- }
- return len;
-}
-
-// Extract a raw tag given a pointer (raw) to the tag header. Allow for some
-// extra bytes in the tag header (see encode/decode functions for usage: this
-// allows for future expansion of the tag header).
-static char *decode_tag(struct raw_tag *tag, char *header, int offset) {
- tag->key_len = (uint8_t)(*header++);
- tag->value_len = (uint8_t)(*header++);
- tag->flags = (uint8_t)(*header++);
- header += offset;
- tag->key = header;
- header += tag->key_len;
- tag->value = header;
- return header + tag->value_len;
-}
-
-// Make a copy (in 'to') of an existing tag_set.
-static void tag_set_copy(struct tag_set *to, const struct tag_set *from) {
- memcpy(to, from, sizeof(struct tag_set));
- to->kvm = (char *)gpr_malloc(to->kvm_size);
- memcpy(to->kvm, from->kvm, from->kvm_used);
-}
-
-// Delete a tag from a tag_set, if it exists (returns true if it did).
-static bool tag_set_delete_tag(struct tag_set *tags, const char *key,
- size_t key_len) {
- char *kvp = tags->kvm;
- for (int i = 0; i < tags->ntags_alloc; i++) {
- uint8_t *flags = (uint8_t *)(kvp + FLAG_OFFSET);
- struct raw_tag tag;
- kvp = decode_tag(&tag, kvp, 0);
- if (CENSUS_TAG_IS_DELETED(tag.flags)) continue;
- if ((key_len == tag.key_len) && (memcmp(key, tag.key, key_len) == 0)) {
- *flags |= CENSUS_TAG_DELETED;
- tags->ntags--;
- return true;
- }
- }
- return false;
-}
-
-// Delete a tag from a context, return true if it existed.
-static bool context_delete_tag(census_context *context, const census_tag *tag,
- size_t key_len) {
- return (
- tag_set_delete_tag(&context->tags[LOCAL_TAGS], tag->key, key_len) ||
- tag_set_delete_tag(&context->tags[PROPAGATED_TAGS], tag->key, key_len));
-}
-
-// Add a tag to a tag_set. Return true on success, false if the tag could
-// not be added because of constraints on tag set size. This function should
-// not be called if the tag may already exist (in a non-deleted state) in
-// the tag_set, as that would result in two tags with the same key.
-static bool tag_set_add_tag(struct tag_set *tags, const census_tag *tag,
- size_t key_len, size_t value_len) {
- if (tags->ntags == CENSUS_MAX_PROPAGATED_TAGS) {
- return false;
- }
- const size_t tag_size = key_len + value_len + TAG_HEADER_SIZE;
- if (tags->kvm_used + tag_size > tags->kvm_size) {
- // allocate new memory if needed
- tags->kvm_size += 2 * CENSUS_MAX_TAG_KV_LEN + TAG_HEADER_SIZE;
- char *new_kvm = (char *)gpr_malloc(tags->kvm_size);
- if (tags->kvm_used > 0) memcpy(new_kvm, tags->kvm, tags->kvm_used);
- gpr_free(tags->kvm);
- tags->kvm = new_kvm;
- }
- char *kvp = tags->kvm + tags->kvm_used;
- *kvp++ = (char)key_len;
- *kvp++ = (char)value_len;
- // ensure reserved flags are not used.
- *kvp++ = (char)(tag->flags & (CENSUS_TAG_PROPAGATE | CENSUS_TAG_STATS));
- memcpy(kvp, tag->key, key_len);
- kvp += key_len;
- memcpy(kvp, tag->value, value_len);
- tags->kvm_used += tag_size;
- tags->ntags++;
- tags->ntags_alloc++;
- return true;
-}
-
-// Add/modify/delete a tag to/in a context. Caller must validate that tag key
-// etc. are valid.
-static void context_modify_tag(census_context *context, const census_tag *tag,
- size_t key_len, size_t value_len) {
- // First delete the tag if it is already present.
- bool deleted = context_delete_tag(context, tag, key_len);
- bool added = false;
- if (CENSUS_TAG_IS_PROPAGATED(tag->flags)) {
- added = tag_set_add_tag(&context->tags[PROPAGATED_TAGS], tag, key_len,
- value_len);
- } else {
- added =
- tag_set_add_tag(&context->tags[LOCAL_TAGS], tag, key_len, value_len);
- }
-
- if (deleted) {
- context->status.n_modified_tags++;
- } else {
- if (added) {
- context->status.n_added_tags++;
- } else {
- context->status.n_ignored_tags++;
- }
- }
-}
-
-// Remove memory used for deleted tags from a tag set. Basic algorithm:
-// 1) Walk through tag set to find first deleted tag. Record where it is.
-// 2) Find the next not-deleted tag. Copy all of kvm from there to the end
-// "over" the deleted tags
-// 3) repeat #1 and #2 until we have seen all tags
-// 4) if we are still looking for a not-deleted tag, then all the end portion
-// of the kvm is deleted. Just reduce the used amount of memory by the
-// appropriate amount.
-static void tag_set_flatten(struct tag_set *tags) {
- if (tags->ntags == tags->ntags_alloc) return;
- bool found_deleted = false; // found a deleted tag.
- char *kvp = tags->kvm;
- char *dbase = NULL; // record location of deleted tag
- for (int i = 0; i < tags->ntags_alloc; i++) {
- struct raw_tag tag;
- char *next_kvp = decode_tag(&tag, kvp, 0);
- if (found_deleted) {
- if (!CENSUS_TAG_IS_DELETED(tag.flags)) {
- ptrdiff_t reduce = kvp - dbase; // #bytes in deleted tags
- GPR_ASSERT(reduce > 0);
- ptrdiff_t copy_size = tags->kvm + tags->kvm_used - kvp;
- GPR_ASSERT(copy_size > 0);
- memmove(dbase, kvp, (size_t)copy_size);
- tags->kvm_used -= (size_t)reduce;
- next_kvp -= reduce;
- found_deleted = false;
- }
- } else {
- if (CENSUS_TAG_IS_DELETED(tag.flags)) {
- dbase = kvp;
- found_deleted = true;
- }
- }
- kvp = next_kvp;
- }
- if (found_deleted) {
- GPR_ASSERT(dbase > tags->kvm);
- tags->kvm_used = (size_t)(dbase - tags->kvm);
- }
- tags->ntags_alloc = tags->ntags;
-}
-
-census_context *census_context_create(const census_context *base,
- const census_tag *tags, int ntags,
- census_context_status const **status) {
- census_context *context =
- (census_context *)gpr_malloc(sizeof(census_context));
- // If we are given a base, copy it into our new tag set. Otherwise set it
- // to zero/NULL everything.
- if (base == NULL) {
- memset(context, 0, sizeof(census_context));
- } else {
- tag_set_copy(&context->tags[PROPAGATED_TAGS], &base->tags[PROPAGATED_TAGS]);
- tag_set_copy(&context->tags[LOCAL_TAGS], &base->tags[LOCAL_TAGS]);
- memset(&context->status, 0, sizeof(context->status));
- }
- // Walk over the additional tags and, for those that aren't invalid, modify
- // the context to add/replace/delete as required.
- for (int i = 0; i < ntags; i++) {
- const census_tag *tag = &tags[i];
- size_t key_len = validate_tag(tag->key);
- // ignore the tag if it is invalid or too short.
- if (key_len <= 1) {
- context->status.n_invalid_tags++;
- } else {
- if (tag->value != NULL) {
- size_t value_len = validate_tag(tag->value);
- if (value_len != 0) {
- context_modify_tag(context, tag, key_len, value_len);
- } else {
- context->status.n_invalid_tags++;
- }
- } else {
- if (context_delete_tag(context, tag, key_len)) {
- context->status.n_deleted_tags++;
- }
- }
- }
- }
- // Remove any deleted tags, update status if needed, and return.
- tag_set_flatten(&context->tags[PROPAGATED_TAGS]);
- tag_set_flatten(&context->tags[LOCAL_TAGS]);
- context->status.n_propagated_tags = context->tags[PROPAGATED_TAGS].ntags;
- context->status.n_local_tags = context->tags[LOCAL_TAGS].ntags;
- if (status) {
- *status = &context->status;
- }
- return context;
-}
-
-const census_context_status *census_context_get_status(
- const census_context *context) {
- return &context->status;
-}
-
-void census_context_destroy(census_context *context) {
- gpr_free(context->tags[PROPAGATED_TAGS].kvm);
- gpr_free(context->tags[LOCAL_TAGS].kvm);
- gpr_free(context);
-}
-
-void census_context_initialize_iterator(const census_context *context,
- census_context_iterator *iterator) {
- iterator->context = context;
- iterator->index = 0;
- if (context->tags[PROPAGATED_TAGS].ntags != 0) {
- iterator->base = PROPAGATED_TAGS;
- iterator->kvm = context->tags[PROPAGATED_TAGS].kvm;
- } else if (context->tags[LOCAL_TAGS].ntags != 0) {
- iterator->base = LOCAL_TAGS;
- iterator->kvm = context->tags[LOCAL_TAGS].kvm;
- } else {
- iterator->base = -1;
- }
-}
-
-int census_context_next_tag(census_context_iterator *iterator,
- census_tag *tag) {
- if (iterator->base < 0) {
- return 0;
- }
- struct raw_tag raw;
- iterator->kvm = decode_tag(&raw, iterator->kvm, 0);
- tag->key = raw.key;
- tag->value = raw.value;
- tag->flags = raw.flags;
- if (++iterator->index == iterator->context->tags[iterator->base].ntags) {
- do {
- if (iterator->base == LOCAL_TAGS) {
- iterator->base = -1;
- return 1;
- }
- } while (iterator->context->tags[++iterator->base].ntags == 0);
- iterator->index = 0;
- iterator->kvm = iterator->context->tags[iterator->base].kvm;
- }
- return 1;
-}
-
-// Find a tag in a tag_set by key. Return true if found, false otherwise.
-static bool tag_set_get_tag(const struct tag_set *tags, const char *key,
- size_t key_len, census_tag *tag) {
- char *kvp = tags->kvm;
- for (int i = 0; i < tags->ntags; i++) {
- struct raw_tag raw;
- kvp = decode_tag(&raw, kvp, 0);
- if (key_len == raw.key_len && memcmp(raw.key, key, key_len) == 0) {
- tag->key = raw.key;
- tag->value = raw.value;
- tag->flags = raw.flags;
- return true;
- }
- }
- return false;
-}
-
-int census_context_get_tag(const census_context *context, const char *key,
- census_tag *tag) {
- size_t key_len = strlen(key) + 1;
- if (key_len == 1) {
- return 0;
- }
- if (tag_set_get_tag(&context->tags[PROPAGATED_TAGS], key, key_len, tag) ||
- tag_set_get_tag(&context->tags[LOCAL_TAGS], key, key_len, tag)) {
- return 1;
- }
- return 0;
-}
-
-// Context encoding and decoding functions.
-//
-// Wire format for tag_set's on the wire:
-//
-// First, a tag set header:
-//
-// offset bytes description
-// 0 1 version number
-// 1 1 number of bytes in this header. This allows for future
-// expansion.
-// 2 1 number of bytes in each tag header.
-// 3 1 ntags value from tag set.
-//
-// This is followed by the key/value memory from struct tag_set.
-
-#define ENCODED_VERSION 0 // Version number
-#define ENCODED_HEADER_SIZE 4 // size of tag set header
-
-// Encode a tag set. Returns 0 if buffer is too small.
-static size_t tag_set_encode(const struct tag_set *tags, char *buffer,
- size_t buf_size) {
- if (buf_size < ENCODED_HEADER_SIZE + tags->kvm_used) {
- return 0;
- }
- buf_size -= ENCODED_HEADER_SIZE;
- *buffer++ = (char)ENCODED_VERSION;
- *buffer++ = (char)ENCODED_HEADER_SIZE;
- *buffer++ = (char)TAG_HEADER_SIZE;
- *buffer++ = (char)tags->ntags;
- if (tags->ntags == 0) {
- return ENCODED_HEADER_SIZE;
- }
- memcpy(buffer, tags->kvm, tags->kvm_used);
- return ENCODED_HEADER_SIZE + tags->kvm_used;
-}
-
-size_t census_context_encode(const census_context *context, char *buffer,
- size_t buf_size) {
- return tag_set_encode(&context->tags[PROPAGATED_TAGS], buffer, buf_size);
-}
-
-// Decode a tag set.
-static void tag_set_decode(struct tag_set *tags, const char *buffer,
- size_t size) {
- uint8_t version = (uint8_t)(*buffer++);
- uint8_t header_size = (uint8_t)(*buffer++);
- uint8_t tag_header_size = (uint8_t)(*buffer++);
- tags->ntags = tags->ntags_alloc = (int)(*buffer++);
- if (tags->ntags == 0) {
- tags->ntags_alloc = 0;
- tags->kvm_size = 0;
- tags->kvm_used = 0;
- tags->kvm = NULL;
- return;
- }
- if (header_size != ENCODED_HEADER_SIZE) {
- GPR_ASSERT(version != ENCODED_VERSION);
- GPR_ASSERT(ENCODED_HEADER_SIZE < header_size);
- buffer += (header_size - ENCODED_HEADER_SIZE);
- }
- tags->kvm_used = size - header_size;
- tags->kvm_size = tags->kvm_used + CENSUS_MAX_TAG_KV_LEN;
- tags->kvm = (char *)gpr_malloc(tags->kvm_size);
- if (tag_header_size != TAG_HEADER_SIZE) {
- // something new in the tag information. I don't understand it, so
- // don't copy it over.
- GPR_ASSERT(version != ENCODED_VERSION);
- GPR_ASSERT(tag_header_size > TAG_HEADER_SIZE);
- char *kvp = tags->kvm;
- for (int i = 0; i < tags->ntags; i++) {
- memcpy(kvp, buffer, TAG_HEADER_SIZE);
- kvp += header_size;
- struct raw_tag raw;
- buffer =
- decode_tag(&raw, (char *)buffer, tag_header_size - TAG_HEADER_SIZE);
- memcpy(kvp, raw.key, (size_t)raw.key_len + raw.value_len);
- kvp += raw.key_len + raw.value_len;
- }
- } else {
- memcpy(tags->kvm, buffer, tags->kvm_used);
- }
-}
-
-census_context *census_context_decode(const char *buffer, size_t size) {
- census_context *context =
- (census_context *)gpr_malloc(sizeof(census_context));
- memset(&context->tags[LOCAL_TAGS], 0, sizeof(struct tag_set));
- if (buffer == NULL) {
- memset(&context->tags[PROPAGATED_TAGS], 0, sizeof(struct tag_set));
- } else {
- tag_set_decode(&context->tags[PROPAGATED_TAGS], buffer, size);
- }
- memset(&context->status, 0, sizeof(context->status));
- context->status.n_propagated_tags = context->tags[PROPAGATED_TAGS].ntags;
- return context;
-}
diff --git a/src/core/ext/census/gen/README.md b/src/core/ext/census/gen/README.md
deleted file mode 100644
index d4612bc7c8..0000000000
--- a/src/core/ext/census/gen/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
-Files generated for use by Census stats and trace recording subsystem.
-
-# Files
-* census.pb.{h,c} - Generated from src/core/ext/census/census.proto, using the
- script `tools/codegen/core/gen_nano_proto.sh src/proto/census/census.proto
- $PWD/src/core/ext/census/gen src/core/ext/census/gen`
-* trace_context.pb.{h,c} - Generated from
- src/core/ext/census/trace_context.proto, using the script
- `tools/codegen/core/gen_nano_proto.sh src/proto/census/trace_context.proto
- $PWD/src/core/ext/census/gen src/core/ext/census/gen`
diff --git a/src/core/ext/census/gen/census.pb.c b/src/core/ext/census/gen/census.pb.c
deleted file mode 100644
index 88efa73661..0000000000
--- a/src/core/ext/census/gen/census.pb.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/* Automatically generated nanopb constant definitions */
-/* Generated by nanopb-0.3.5-dev */
-
-#include "src/core/ext/census/gen/census.pb.h"
-
-#if PB_PROTO_HEADER_VERSION != 30
-#error Regenerate this file with the current version of nanopb generator.
-#endif
-
-
-
-const pb_field_t google_census_Duration_fields[3] = {
- PB_FIELD( 1, INT64 , OPTIONAL, STATIC , FIRST, google_census_Duration, seconds, seconds, 0),
- PB_FIELD( 2, INT32 , OPTIONAL, STATIC , OTHER, google_census_Duration, nanos, seconds, 0),
- PB_LAST_FIELD
-};
-
-const pb_field_t google_census_Timestamp_fields[3] = {
- PB_FIELD( 1, INT64 , OPTIONAL, STATIC , FIRST, google_census_Timestamp, seconds, seconds, 0),
- PB_FIELD( 2, INT32 , OPTIONAL, STATIC , OTHER, google_census_Timestamp, nanos, seconds, 0),
- PB_LAST_FIELD
-};
-
-const pb_field_t google_census_Resource_fields[4] = {
- PB_FIELD( 1, STRING , OPTIONAL, CALLBACK, FIRST, google_census_Resource, name, name, 0),
- PB_FIELD( 2, STRING , OPTIONAL, CALLBACK, OTHER, google_census_Resource, description, name, 0),
- PB_FIELD( 3, MESSAGE , OPTIONAL, STATIC , OTHER, google_census_Resource, unit, description, &google_census_Resource_MeasurementUnit_fields),
- PB_LAST_FIELD
-};
-
-const pb_field_t google_census_Resource_MeasurementUnit_fields[4] = {
- PB_FIELD( 1, INT32 , OPTIONAL, STATIC , FIRST, google_census_Resource_MeasurementUnit, prefix, prefix, 0),
- PB_FIELD( 2, UENUM , REPEATED, CALLBACK, OTHER, google_census_Resource_MeasurementUnit, numerator, prefix, 0),
- PB_FIELD( 3, UENUM , REPEATED, CALLBACK, OTHER, google_census_Resource_MeasurementUnit, denominator, numerator, 0),
- PB_LAST_FIELD
-};
-
-const pb_field_t google_census_AggregationDescriptor_fields[4] = {
- PB_FIELD( 1, UENUM , OPTIONAL, STATIC , FIRST, google_census_AggregationDescriptor, type, type, 0),
- PB_ONEOF_FIELD(options, 2, MESSAGE , ONEOF, STATIC , OTHER, google_census_AggregationDescriptor, bucket_boundaries, type, &google_census_AggregationDescriptor_BucketBoundaries_fields),
- PB_ONEOF_FIELD(options, 3, MESSAGE , ONEOF, STATIC , OTHER, google_census_AggregationDescriptor, interval_boundaries, type, &google_census_AggregationDescriptor_IntervalBoundaries_fields),
- PB_LAST_FIELD
-};
-
-const pb_field_t google_census_AggregationDescriptor_BucketBoundaries_fields[2] = {
- PB_FIELD( 1, DOUBLE , REPEATED, CALLBACK, FIRST, google_census_AggregationDescriptor_BucketBoundaries, bounds, bounds, 0),
- PB_LAST_FIELD
-};
-
-const pb_field_t google_census_AggregationDescriptor_IntervalBoundaries_fields[2] = {
- PB_FIELD( 1, DOUBLE , REPEATED, CALLBACK, FIRST, google_census_AggregationDescriptor_IntervalBoundaries, window_size, window_size, 0),
- PB_LAST_FIELD
-};
-
-const pb_field_t google_census_Distribution_fields[5] = {
- PB_FIELD( 1, INT64 , OPTIONAL, STATIC , FIRST, google_census_Distribution, count, count, 0),
- PB_FIELD( 2, DOUBLE , OPTIONAL, STATIC , OTHER, google_census_Distribution, mean, count, 0),
- PB_FIELD( 3, MESSAGE , OPTIONAL, STATIC , OTHER, google_census_Distribution, range, mean, &google_census_Distribution_Range_fields),
- PB_FIELD( 4, INT64 , REPEATED, CALLBACK, OTHER, google_census_Distribution, bucket_count, range, 0),
- PB_LAST_FIELD
-};
-
-const pb_field_t google_census_Distribution_Range_fields[3] = {
- PB_FIELD( 1, DOUBLE , OPTIONAL, STATIC , FIRST, google_census_Distribution_Range, min, min, 0),
- PB_FIELD( 2, DOUBLE , OPTIONAL, STATIC , OTHER, google_census_Distribution_Range, max, min, 0),
- PB_LAST_FIELD
-};
-
-const pb_field_t google_census_IntervalStats_fields[2] = {
- PB_FIELD( 1, MESSAGE , REPEATED, CALLBACK, FIRST, google_census_IntervalStats, window, window, &google_census_IntervalStats_Window_fields),
- PB_LAST_FIELD
-};
-
-const pb_field_t google_census_IntervalStats_Window_fields[4] = {
- PB_FIELD( 1, MESSAGE , OPTIONAL, STATIC , FIRST, google_census_IntervalStats_Window, window_size, window_size, &google_census_Duration_fields),
- PB_FIELD( 2, INT64 , OPTIONAL, STATIC , OTHER, google_census_IntervalStats_Window, count, window_size, 0),
- PB_FIELD( 3, DOUBLE , OPTIONAL, STATIC , OTHER, google_census_IntervalStats_Window, mean, count, 0),
- PB_LAST_FIELD
-};
-
-const pb_field_t google_census_Tag_fields[3] = {
- PB_FIELD( 1, STRING , OPTIONAL, STATIC , FIRST, google_census_Tag, key, key, 0),
- PB_FIELD( 2, STRING , OPTIONAL, STATIC , OTHER, google_census_Tag, value, key, 0),
- PB_LAST_FIELD
-};
-
-const pb_field_t google_census_View_fields[6] = {
- PB_FIELD( 1, STRING , OPTIONAL, CALLBACK, FIRST, google_census_View, name, name, 0),
- PB_FIELD( 2, STRING , OPTIONAL, CALLBACK, OTHER, google_census_View, description, name, 0),
- PB_FIELD( 3, STRING , OPTIONAL, CALLBACK, OTHER, google_census_View, resource_name, description, 0),
- PB_FIELD( 4, MESSAGE , OPTIONAL, STATIC , OTHER, google_census_View, aggregation, resource_name, &google_census_AggregationDescriptor_fields),
- PB_FIELD( 5, STRING , REPEATED, CALLBACK, OTHER, google_census_View, tag_key, aggregation, 0),
- PB_LAST_FIELD
-};
-
-const pb_field_t google_census_Aggregation_fields[7] = {
- PB_FIELD( 1, STRING , OPTIONAL, CALLBACK, FIRST, google_census_Aggregation, name, name, 0),
- PB_FIELD( 2, STRING , OPTIONAL, CALLBACK, OTHER, google_census_Aggregation, description, name, 0),
- PB_ONEOF_FIELD(data, 3, UINT64 , ONEOF, STATIC , OTHER, google_census_Aggregation, count, description, 0),
- PB_ONEOF_FIELD(data, 4, MESSAGE , ONEOF, STATIC , OTHER, google_census_Aggregation, distribution, description, &google_census_Distribution_fields),
- PB_ONEOF_FIELD(data, 5, MESSAGE , ONEOF, STATIC , OTHER, google_census_Aggregation, interval_stats, description, &google_census_IntervalStats_fields),
- PB_FIELD( 6, MESSAGE , REPEATED, CALLBACK, OTHER, google_census_Aggregation, tag, data.interval_stats, &google_census_Tag_fields),
- PB_LAST_FIELD
-};
-
-const pb_field_t google_census_Metric_fields[5] = {
- PB_FIELD( 1, STRING , OPTIONAL, CALLBACK, FIRST, google_census_Metric, view_name, view_name, 0),
- PB_FIELD( 2, MESSAGE , REPEATED, CALLBACK, OTHER, google_census_Metric, aggregation, view_name, &google_census_Aggregation_fields),
- PB_FIELD( 3, MESSAGE , OPTIONAL, STATIC , OTHER, google_census_Metric, start, aggregation, &google_census_Timestamp_fields),
- PB_FIELD( 4, MESSAGE , OPTIONAL, STATIC , OTHER, google_census_Metric, end, start, &google_census_Timestamp_fields),
- PB_LAST_FIELD
-};
-
-
-/* Check that field information fits in pb_field_t */
-#if !defined(PB_FIELD_32BIT)
-/* If you get an error here, it means that you need to define PB_FIELD_32BIT
- * compile-time option. You can do that in pb.h or on compiler command line.
- *
- * The reason you need to do this is that some of your messages contain tag
- * numbers or field sizes that are larger than what can fit in 8 or 16 bit
- * field descriptors.
- */
-PB_STATIC_ASSERT((pb_membersize(google_census_Resource, unit) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 65536 && pb_membersize(google_census_Resource, unit) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 65536 && pb_membersize(google_census_Distribution, range) < 65536 && pb_membersize(google_census_IntervalStats, window) < 65536 && pb_membersize(google_census_IntervalStats_Window, window_size) < 65536 && pb_membersize(google_census_View, aggregation) < 65536 && pb_membersize(google_census_Aggregation, data.distribution) < 65536 && pb_membersize(google_census_Aggregation, data.interval_stats) < 65536 && pb_membersize(google_census_Resource, unit) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 65536 && pb_membersize(google_census_Resource, unit) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 65536 && pb_membersize(google_census_Distribution, range) < 65536 && pb_membersize(google_census_IntervalStats, window) < 65536 && pb_membersize(google_census_IntervalStats_Window, window_size) < 65536 && pb_membersize(google_census_View, aggregation) < 65536 && pb_membersize(google_census_Aggregation, data.distribution) < 65536 && pb_membersize(google_census_Aggregation, data.interval_stats) < 65536 && pb_membersize(google_census_Aggregation, tag) < 65536 && pb_membersize(google_census_Metric, aggregation) < 65536 && pb_membersize(google_census_Metric, start) < 65536 && pb_membersize(google_census_Metric, end) < 65536), YOU_MUST_DEFINE_PB_FIELD_32BIT_FOR_MESSAGES_google_census_Duration_google_census_Timestamp_google_census_Resource_google_census_Resource_MeasurementUnit_google_census_AggregationDescriptor_google_census_AggregationDescriptor_BucketBoundaries_google_census_AggregationDescriptor_IntervalBoundaries_google_census_Distribution_google_census_Distribution_Range_google_census_IntervalStats_google_census_IntervalStats_Window_google_census_Tag_google_census_View_google_census_Aggregation_google_census_Metric)
-#endif
-
-#if !defined(PB_FIELD_16BIT) && !defined(PB_FIELD_32BIT)
-/* If you get an error here, it means that you need to define PB_FIELD_16BIT
- * compile-time option. You can do that in pb.h or on compiler command line.
- *
- * The reason you need to do this is that some of your messages contain tag
- * numbers or field sizes that are larger than what can fit in the default
- * 8 bit descriptors.
- */
-PB_STATIC_ASSERT((pb_membersize(google_census_Resource, unit) < 256 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 256 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 256 && pb_membersize(google_census_Resource, unit) < 256 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 256 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 256 && pb_membersize(google_census_Distribution, range) < 256 && pb_membersize(google_census_IntervalStats, window) < 256 && pb_membersize(google_census_IntervalStats_Window, window_size) < 256 && pb_membersize(google_census_View, aggregation) < 256 && pb_membersize(google_census_Aggregation, data.distribution) < 256 && pb_membersize(google_census_Aggregation, data.interval_stats) < 256 && pb_membersize(google_census_Resource, unit) < 256 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 256 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 256 && pb_membersize(google_census_Resource, unit) < 256 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 256 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 256 && pb_membersize(google_census_Distribution, range) < 256 && pb_membersize(google_census_IntervalStats, window) < 256 && pb_membersize(google_census_IntervalStats_Window, window_size) < 256 && pb_membersize(google_census_View, aggregation) < 256 && pb_membersize(google_census_Aggregation, data.distribution) < 256 && pb_membersize(google_census_Aggregation, data.interval_stats) < 256 && pb_membersize(google_census_Aggregation, tag) < 256 && pb_membersize(google_census_Metric, aggregation) < 256 && pb_membersize(google_census_Metric, start) < 256 && pb_membersize(google_census_Metric, end) < 256), YOU_MUST_DEFINE_PB_FIELD_16BIT_FOR_MESSAGES_google_census_Duration_google_census_Timestamp_google_census_Resource_google_census_Resource_MeasurementUnit_google_census_AggregationDescriptor_google_census_AggregationDescriptor_BucketBoundaries_google_census_AggregationDescriptor_IntervalBoundaries_google_census_Distribution_google_census_Distribution_Range_google_census_IntervalStats_google_census_IntervalStats_Window_google_census_Tag_google_census_View_google_census_Aggregation_google_census_Metric)
-#endif
-
-
-/* On some platforms (such as AVR), double is really float.
- * These are not directly supported by nanopb, but see example_avr_double.
- * To get rid of this error, remove any double fields from your .proto.
- */
-PB_STATIC_ASSERT(sizeof(double) == 8, DOUBLE_MUST_BE_8_BYTES)
-
diff --git a/src/core/ext/census/gen/census.pb.h b/src/core/ext/census/gen/census.pb.h
deleted file mode 100644
index 5f28335664..0000000000
--- a/src/core/ext/census/gen/census.pb.h
+++ /dev/null
@@ -1,280 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/* Automatically generated nanopb header */
-/* Generated by nanopb-0.3.5-dev */
-
-#ifndef GRPC_CORE_EXT_CENSUS_GEN_CENSUS_PB_H
-#define GRPC_CORE_EXT_CENSUS_GEN_CENSUS_PB_H
-#include "third_party/nanopb/pb.h"
-#if PB_PROTO_HEADER_VERSION != 30
-#error Regenerate this file with the current version of nanopb generator.
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Enum definitions */
-typedef enum _google_census_Resource_BasicUnit {
- google_census_Resource_BasicUnit_UNKNOWN = 0,
- google_census_Resource_BasicUnit_BITS = 1,
- google_census_Resource_BasicUnit_BYTES = 2,
- google_census_Resource_BasicUnit_SECS = 3,
- google_census_Resource_BasicUnit_CORES = 4,
- google_census_Resource_BasicUnit_MAX_UNITS = 5
-} google_census_Resource_BasicUnit;
-
-typedef enum _google_census_AggregationDescriptor_AggregationType {
- google_census_AggregationDescriptor_AggregationType_UNKNOWN = 0,
- google_census_AggregationDescriptor_AggregationType_COUNT = 1,
- google_census_AggregationDescriptor_AggregationType_DISTRIBUTION = 2,
- google_census_AggregationDescriptor_AggregationType_INTERVAL = 3
-} google_census_AggregationDescriptor_AggregationType;
-
-/* Struct definitions */
-typedef struct _google_census_AggregationDescriptor_BucketBoundaries {
- pb_callback_t bounds;
-} google_census_AggregationDescriptor_BucketBoundaries;
-
-typedef struct _google_census_AggregationDescriptor_IntervalBoundaries {
- pb_callback_t window_size;
-} google_census_AggregationDescriptor_IntervalBoundaries;
-
-typedef struct _google_census_IntervalStats {
- pb_callback_t window;
-} google_census_IntervalStats;
-
-typedef struct _google_census_AggregationDescriptor {
- bool has_type;
- google_census_AggregationDescriptor_AggregationType type;
- pb_size_t which_options;
- union {
- google_census_AggregationDescriptor_BucketBoundaries bucket_boundaries;
- google_census_AggregationDescriptor_IntervalBoundaries interval_boundaries;
- } options;
-} google_census_AggregationDescriptor;
-
-typedef struct _google_census_Distribution_Range {
- bool has_min;
- double min;
- bool has_max;
- double max;
-} google_census_Distribution_Range;
-
-typedef struct _google_census_Duration {
- bool has_seconds;
- int64_t seconds;
- bool has_nanos;
- int32_t nanos;
-} google_census_Duration;
-
-typedef struct _google_census_Resource_MeasurementUnit {
- bool has_prefix;
- int32_t prefix;
- pb_callback_t numerator;
- pb_callback_t denominator;
-} google_census_Resource_MeasurementUnit;
-
-typedef struct _google_census_Tag {
- bool has_key;
- char key[255];
- bool has_value;
- char value[255];
-} google_census_Tag;
-
-typedef struct _google_census_Timestamp {
- bool has_seconds;
- int64_t seconds;
- bool has_nanos;
- int32_t nanos;
-} google_census_Timestamp;
-
-typedef struct _google_census_Distribution {
- bool has_count;
- int64_t count;
- bool has_mean;
- double mean;
- bool has_range;
- google_census_Distribution_Range range;
- pb_callback_t bucket_count;
-} google_census_Distribution;
-
-typedef struct _google_census_IntervalStats_Window {
- bool has_window_size;
- google_census_Duration window_size;
- bool has_count;
- int64_t count;
- bool has_mean;
- double mean;
-} google_census_IntervalStats_Window;
-
-typedef struct _google_census_Metric {
- pb_callback_t view_name;
- pb_callback_t aggregation;
- bool has_start;
- google_census_Timestamp start;
- bool has_end;
- google_census_Timestamp end;
-} google_census_Metric;
-
-typedef struct _google_census_Resource {
- pb_callback_t name;
- pb_callback_t description;
- bool has_unit;
- google_census_Resource_MeasurementUnit unit;
-} google_census_Resource;
-
-typedef struct _google_census_View {
- pb_callback_t name;
- pb_callback_t description;
- pb_callback_t resource_name;
- bool has_aggregation;
- google_census_AggregationDescriptor aggregation;
- pb_callback_t tag_key;
-} google_census_View;
-
-typedef struct _google_census_Aggregation {
- pb_callback_t name;
- pb_callback_t description;
- pb_size_t which_data;
- union {
- uint64_t count;
- google_census_Distribution distribution;
- google_census_IntervalStats interval_stats;
- } data;
- pb_callback_t tag;
-} google_census_Aggregation;
-
-/* Default values for struct fields */
-
-/* Initializer values for message structs */
-#define google_census_Duration_init_default {false, 0, false, 0}
-#define google_census_Timestamp_init_default {false, 0, false, 0}
-#define google_census_Resource_init_default {{{NULL}, NULL}, {{NULL}, NULL}, false, google_census_Resource_MeasurementUnit_init_default}
-#define google_census_Resource_MeasurementUnit_init_default {false, 0, {{NULL}, NULL}, {{NULL}, NULL}}
-#define google_census_AggregationDescriptor_init_default {false, (google_census_AggregationDescriptor_AggregationType)0, 0, {google_census_AggregationDescriptor_BucketBoundaries_init_default}}
-#define google_census_AggregationDescriptor_BucketBoundaries_init_default {{{NULL}, NULL}}
-#define google_census_AggregationDescriptor_IntervalBoundaries_init_default {{{NULL}, NULL}}
-#define google_census_Distribution_init_default {false, 0, false, 0, false, google_census_Distribution_Range_init_default, {{NULL}, NULL}}
-#define google_census_Distribution_Range_init_default {false, 0, false, 0}
-#define google_census_IntervalStats_init_default {{{NULL}, NULL}}
-#define google_census_IntervalStats_Window_init_default {false, google_census_Duration_init_default, false, 0, false, 0}
-#define google_census_Tag_init_default {false, "", false, ""}
-#define google_census_View_init_default {{{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, false, google_census_AggregationDescriptor_init_default, {{NULL}, NULL}}
-#define google_census_Aggregation_init_default {{{NULL}, NULL}, {{NULL}, NULL}, 0, {0}, {{NULL}, NULL}}
-#define google_census_Metric_init_default {{{NULL}, NULL}, {{NULL}, NULL}, false, google_census_Timestamp_init_default, false, google_census_Timestamp_init_default}
-#define google_census_Duration_init_zero {false, 0, false, 0}
-#define google_census_Timestamp_init_zero {false, 0, false, 0}
-#define google_census_Resource_init_zero {{{NULL}, NULL}, {{NULL}, NULL}, false, google_census_Resource_MeasurementUnit_init_zero}
-#define google_census_Resource_MeasurementUnit_init_zero {false, 0, {{NULL}, NULL}, {{NULL}, NULL}}
-#define google_census_AggregationDescriptor_init_zero {false, (google_census_AggregationDescriptor_AggregationType)0, 0, {google_census_AggregationDescriptor_BucketBoundaries_init_zero}}
-#define google_census_AggregationDescriptor_BucketBoundaries_init_zero {{{NULL}, NULL}}
-#define google_census_AggregationDescriptor_IntervalBoundaries_init_zero {{{NULL}, NULL}}
-#define google_census_Distribution_init_zero {false, 0, false, 0, false, google_census_Distribution_Range_init_zero, {{NULL}, NULL}}
-#define google_census_Distribution_Range_init_zero {false, 0, false, 0}
-#define google_census_IntervalStats_init_zero {{{NULL}, NULL}}
-#define google_census_IntervalStats_Window_init_zero {false, google_census_Duration_init_zero, false, 0, false, 0}
-#define google_census_Tag_init_zero {false, "", false, ""}
-#define google_census_View_init_zero {{{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, false, google_census_AggregationDescriptor_init_zero, {{NULL}, NULL}}
-#define google_census_Aggregation_init_zero {{{NULL}, NULL}, {{NULL}, NULL}, 0, {0}, {{NULL}, NULL}}
-#define google_census_Metric_init_zero {{{NULL}, NULL}, {{NULL}, NULL}, false, google_census_Timestamp_init_zero, false, google_census_Timestamp_init_zero}
-
-/* Field tags (for use in manual encoding/decoding) */
-#define google_census_AggregationDescriptor_BucketBoundaries_bounds_tag 1
-#define google_census_AggregationDescriptor_IntervalBoundaries_window_size_tag 1
-#define google_census_IntervalStats_window_tag 1
-#define google_census_AggregationDescriptor_bucket_boundaries_tag 2
-
-#define google_census_AggregationDescriptor_interval_boundaries_tag 3
-#define google_census_AggregationDescriptor_type_tag 1
-#define google_census_Distribution_Range_min_tag 1
-#define google_census_Distribution_Range_max_tag 2
-#define google_census_Duration_seconds_tag 1
-#define google_census_Duration_nanos_tag 2
-#define google_census_Resource_MeasurementUnit_prefix_tag 1
-#define google_census_Resource_MeasurementUnit_numerator_tag 2
-#define google_census_Resource_MeasurementUnit_denominator_tag 3
-#define google_census_Tag_key_tag 1
-#define google_census_Tag_value_tag 2
-#define google_census_Timestamp_seconds_tag 1
-#define google_census_Timestamp_nanos_tag 2
-#define google_census_Distribution_count_tag 1
-#define google_census_Distribution_mean_tag 2
-#define google_census_Distribution_range_tag 3
-#define google_census_Distribution_bucket_count_tag 4
-#define google_census_IntervalStats_Window_window_size_tag 1
-#define google_census_IntervalStats_Window_count_tag 2
-#define google_census_IntervalStats_Window_mean_tag 3
-#define google_census_Metric_view_name_tag 1
-#define google_census_Metric_aggregation_tag 2
-#define google_census_Metric_start_tag 3
-#define google_census_Metric_end_tag 4
-#define google_census_Resource_name_tag 1
-#define google_census_Resource_description_tag 2
-#define google_census_Resource_unit_tag 3
-#define google_census_View_name_tag 1
-#define google_census_View_description_tag 2
-#define google_census_View_resource_name_tag 3
-#define google_census_View_aggregation_tag 4
-#define google_census_View_tag_key_tag 5
-#define google_census_Aggregation_count_tag 3
-
-#define google_census_Aggregation_distribution_tag 4
-
-#define google_census_Aggregation_interval_stats_tag 5
-#define google_census_Aggregation_name_tag 1
-#define google_census_Aggregation_description_tag 2
-#define google_census_Aggregation_tag_tag 6
-
-/* Struct field encoding specification for nanopb */
-extern const pb_field_t google_census_Duration_fields[3];
-extern const pb_field_t google_census_Timestamp_fields[3];
-extern const pb_field_t google_census_Resource_fields[4];
-extern const pb_field_t google_census_Resource_MeasurementUnit_fields[4];
-extern const pb_field_t google_census_AggregationDescriptor_fields[4];
-extern const pb_field_t google_census_AggregationDescriptor_BucketBoundaries_fields[2];
-extern const pb_field_t google_census_AggregationDescriptor_IntervalBoundaries_fields[2];
-extern const pb_field_t google_census_Distribution_fields[5];
-extern const pb_field_t google_census_Distribution_Range_fields[3];
-extern const pb_field_t google_census_IntervalStats_fields[2];
-extern const pb_field_t google_census_IntervalStats_Window_fields[4];
-extern const pb_field_t google_census_Tag_fields[3];
-extern const pb_field_t google_census_View_fields[6];
-extern const pb_field_t google_census_Aggregation_fields[7];
-extern const pb_field_t google_census_Metric_fields[5];
-
-/* Maximum encoded size of messages (where known) */
-#define google_census_Duration_size 22
-#define google_census_Timestamp_size 22
-#define google_census_Distribution_Range_size 18
-#define google_census_IntervalStats_Window_size 44
-#define google_census_Tag_size 516
-
-/* Message IDs (where set with "msgid" option) */
-#ifdef PB_MSGID
-
-#define CENSUS_MESSAGES \
-
-
-#endif
-
-#ifdef __cplusplus
-} /* extern "C" */
-#endif
-
-#endif /* GRPC_CORE_EXT_CENSUS_GEN_CENSUS_PB_H */
diff --git a/src/core/ext/census/gen/trace_context.pb.c b/src/core/ext/census/gen/trace_context.pb.c
deleted file mode 100644
index b5c3d52a71..0000000000
--- a/src/core/ext/census/gen/trace_context.pb.c
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/* Automatically generated nanopb constant definitions */
-/* Generated by nanopb-0.3.7-dev at Fri Jan 20 16:14:22 2017. */
-
-#include "src/core/ext/census/gen/trace_context.pb.h"
-
-/* @@protoc_insertion_point(includes) */
-#if PB_PROTO_HEADER_VERSION != 30
-#error Regenerate this file with the current version of nanopb generator.
-#endif
-
-
-
-const pb_field_t google_trace_TraceContext_fields[5] = {
- PB_FIELD( 1, FIXED64 , OPTIONAL, STATIC , FIRST, google_trace_TraceContext, trace_id_hi, trace_id_hi, 0),
- PB_FIELD( 2, FIXED64 , OPTIONAL, STATIC , OTHER, google_trace_TraceContext, trace_id_lo, trace_id_hi, 0),
- PB_FIELD( 3, FIXED64 , OPTIONAL, STATIC , OTHER, google_trace_TraceContext, span_id, trace_id_lo, 0),
- PB_FIELD( 4, FIXED32 , OPTIONAL, STATIC , OTHER, google_trace_TraceContext, span_options, span_id, 0),
- PB_LAST_FIELD
-};
-
-
-/* @@protoc_insertion_point(eof) */
diff --git a/src/core/ext/census/gen/trace_context.pb.h b/src/core/ext/census/gen/trace_context.pb.h
deleted file mode 100644
index 181925dc9a..0000000000
--- a/src/core/ext/census/gen/trace_context.pb.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/* Automatically generated nanopb header */
-/* Generated by nanopb-0.3.7-dev at Fri Jan 20 16:14:22 2017. */
-
-#ifndef GRPC_CORE_EXT_CENSUS_GEN_TRACE_CONTEXT_PB_H
-#define GRPC_CORE_EXT_CENSUS_GEN_TRACE_CONTEXT_PB_H
-#include "third_party/nanopb/pb.h"
-
-/* @@protoc_insertion_point(includes) */
-#if PB_PROTO_HEADER_VERSION != 30
-#error Regenerate this file with the current version of nanopb generator.
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Struct definitions */
-typedef struct _google_trace_TraceContext {
- bool has_trace_id_hi;
- uint64_t trace_id_hi;
- bool has_trace_id_lo;
- uint64_t trace_id_lo;
- bool has_span_id;
- uint64_t span_id;
- bool has_span_options;
- uint32_t span_options;
-/* @@protoc_insertion_point(struct:google_trace_TraceContext) */
-} google_trace_TraceContext;
-
-/* Default values for struct fields */
-
-/* Initializer values for message structs */
-#define google_trace_TraceContext_init_default {false, 0, false, 0, false, 0, false, 0}
-#define google_trace_TraceContext_init_zero {false, 0, false, 0, false, 0, false, 0}
-
-/* Field tags (for use in manual encoding/decoding) */
-#define google_trace_TraceContext_trace_id_hi_tag 1
-#define google_trace_TraceContext_trace_id_lo_tag 2
-#define google_trace_TraceContext_span_id_tag 3
-#define google_trace_TraceContext_span_options_tag 4
-
-/* Struct field encoding specification for nanopb */
-extern const pb_field_t google_trace_TraceContext_fields[5];
-
-/* Maximum encoded size of messages (where known) */
-#define google_trace_TraceContext_size 32
-
-/* Message IDs (where set with "msgid" option) */
-#ifdef PB_MSGID
-
-#define TRACE_CONTEXT_MESSAGES \
-
-
-#endif
-
-#ifdef __cplusplus
-} /* extern "C" */
-#endif
-/* @@protoc_insertion_point(eof) */
-
-#endif /* GRPC_CORE_EXT_CENSUS_GEN_TRACE_CONTEXT_PB_H */
diff --git a/src/core/ext/census/grpc_context.cc b/src/core/ext/census/grpc_context.cc
index 0bfba63a5e..34eafcab8e 100644
--- a/src/core/ext/census/grpc_context.cc
+++ b/src/core/ext/census/grpc_context.cc
@@ -24,9 +24,6 @@
void grpc_census_call_set_context(grpc_call *call, census_context *context) {
GRPC_API_TRACE("grpc_census_call_set_context(call=%p, census_context=%p)", 2,
(call, context));
- if (census_enabled() == CENSUS_FEATURE_NONE) {
- return;
- }
if (context != NULL) {
grpc_call_context_set(call, GRPC_CONTEXT_TRACING, context, NULL);
}
diff --git a/src/core/ext/census/grpc_filter.cc b/src/core/ext/census/grpc_filter.cc
deleted file mode 100644
index b37ab90389..0000000000
--- a/src/core/ext/census/grpc_filter.cc
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/ext/census/grpc_filter.h"
-
-#include <stdio.h>
-#include <string.h>
-
-#include <grpc/census.h>
-#include <grpc/slice.h>
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/time.h>
-
-#include "src/core/ext/census/census_interface.h"
-#include "src/core/ext/census/census_rpc_stats.h"
-#include "src/core/lib/channel/channel_stack.h"
-#include "src/core/lib/profiling/timers.h"
-#include "src/core/lib/transport/static_metadata.h"
-
-typedef struct call_data {
- census_op_id op_id;
- census_context *ctxt;
- gpr_timespec start_ts;
- int error;
-
- /* recv callback */
- grpc_metadata_batch *recv_initial_metadata;
- grpc_closure *on_done_recv;
- grpc_closure finish_recv;
-} call_data;
-
-typedef struct channel_data { uint8_t unused; } channel_data;
-
-static void extract_and_annotate_method_tag(grpc_metadata_batch *md,
- call_data *calld,
- channel_data *chand) {
- grpc_linked_mdelem *m;
- for (m = md->list.head; m != NULL; m = m->next) {
- if (grpc_slice_eq(GRPC_MDKEY(m->md), GRPC_MDSTR_PATH)) {
- /* Add method tag here */
- }
- }
-}
-
-static void client_mutate_op(grpc_call_element *elem,
- grpc_transport_stream_op_batch *op) {
- call_data *calld = (call_data *)elem->call_data;
- channel_data *chand = (channel_data *)elem->channel_data;
- if (op->send_initial_metadata) {
- extract_and_annotate_method_tag(
- op->payload->send_initial_metadata.send_initial_metadata, calld, chand);
- }
-}
-
-static void client_start_transport_op(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_transport_stream_op_batch *op) {
- client_mutate_op(elem, op);
- grpc_call_next_op(exec_ctx, elem, op);
-}
-
-static void server_on_done_recv(grpc_exec_ctx *exec_ctx, void *ptr,
- grpc_error *error) {
- GPR_TIMER_BEGIN("census-server:server_on_done_recv", 0);
- grpc_call_element *elem = (grpc_call_element *)ptr;
- call_data *calld = (call_data *)elem->call_data;
- channel_data *chand = (channel_data *)elem->channel_data;
- if (error == GRPC_ERROR_NONE) {
- extract_and_annotate_method_tag(calld->recv_initial_metadata, calld, chand);
- }
- calld->on_done_recv->cb(exec_ctx, calld->on_done_recv->cb_arg, error);
- GPR_TIMER_END("census-server:server_on_done_recv", 0);
-}
-
-static void server_mutate_op(grpc_call_element *elem,
- grpc_transport_stream_op_batch *op) {
- call_data *calld = (call_data *)elem->call_data;
- if (op->recv_initial_metadata) {
- /* substitute our callback for the op callback */
- calld->recv_initial_metadata =
- op->payload->recv_initial_metadata.recv_initial_metadata;
- calld->on_done_recv =
- op->payload->recv_initial_metadata.recv_initial_metadata_ready;
- op->payload->recv_initial_metadata.recv_initial_metadata_ready =
- &calld->finish_recv;
- }
-}
-
-static void server_start_transport_op(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_transport_stream_op_batch *op) {
- /* TODO(ctiller): this code fails. I don't know why. I expect it's
- incomplete, and someone should look at it soon.
-
- call_data *calld = elem->call_data;
- GPR_ASSERT((calld->op_id.upper != 0) || (calld->op_id.lower != 0)); */
- server_mutate_op(elem, op);
- grpc_call_next_op(exec_ctx, elem, op);
-}
-
-static grpc_error *client_init_call_elem(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- const grpc_call_element_args *args) {
- call_data *d = (call_data *)elem->call_data;
- GPR_ASSERT(d != NULL);
- memset(d, 0, sizeof(*d));
- d->start_ts = args->start_time;
- return GRPC_ERROR_NONE;
-}
-
-static void client_destroy_call_elem(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- const grpc_call_final_info *final_info,
- grpc_closure *ignored) {
- call_data *d = (call_data *)elem->call_data;
- GPR_ASSERT(d != NULL);
- /* TODO(hongyu): record rpc client stats and census_rpc_end_op here */
-}
-
-static grpc_error *server_init_call_elem(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- const grpc_call_element_args *args) {
- call_data *d = (call_data *)elem->call_data;
- GPR_ASSERT(d != NULL);
- memset(d, 0, sizeof(*d));
- d->start_ts = args->start_time;
- /* TODO(hongyu): call census_tracing_start_op here. */
- GRPC_CLOSURE_INIT(&d->finish_recv, server_on_done_recv, elem,
- grpc_schedule_on_exec_ctx);
- return GRPC_ERROR_NONE;
-}
-
-static void server_destroy_call_elem(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- const grpc_call_final_info *final_info,
- grpc_closure *ignored) {
- call_data *d = (call_data *)elem->call_data;
- GPR_ASSERT(d != NULL);
- /* TODO(hongyu): record rpc server stats and census_tracing_end_op here */
-}
-
-static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_channel_element_args *args) {
- channel_data *chand = (channel_data *)elem->channel_data;
- GPR_ASSERT(chand != NULL);
- return GRPC_ERROR_NONE;
-}
-
-static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem) {
- channel_data *chand = (channel_data *)elem->channel_data;
- GPR_ASSERT(chand != NULL);
-}
-
-const grpc_channel_filter grpc_client_census_filter = {
- client_start_transport_op,
- grpc_channel_next_op,
- sizeof(call_data),
- client_init_call_elem,
- grpc_call_stack_ignore_set_pollset_or_pollset_set,
- client_destroy_call_elem,
- sizeof(channel_data),
- init_channel_elem,
- destroy_channel_elem,
- grpc_channel_next_get_info,
- "census-client"};
-
-const grpc_channel_filter grpc_server_census_filter = {
- server_start_transport_op,
- grpc_channel_next_op,
- sizeof(call_data),
- server_init_call_elem,
- grpc_call_stack_ignore_set_pollset_or_pollset_set,
- server_destroy_call_elem,
- sizeof(channel_data),
- init_channel_elem,
- destroy_channel_elem,
- grpc_channel_next_get_info,
- "census-server"};
diff --git a/src/core/ext/census/grpc_plugin.cc b/src/core/ext/census/grpc_plugin.cc
deleted file mode 100644
index 22b16c6c63..0000000000
--- a/src/core/ext/census/grpc_plugin.cc
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpc/support/port_platform.h>
-
-#include <limits.h>
-#include <string.h>
-
-#include <grpc/census.h>
-
-#include "src/core/ext/census/grpc_filter.h"
-#include "src/core/lib/channel/channel_stack_builder.h"
-#include "src/core/lib/surface/channel_init.h"
-
-static bool is_census_enabled(const grpc_channel_args *a) {
- size_t i;
- if (a == NULL) return 0;
- for (i = 0; i < a->num_args; i++) {
- if (0 == strcmp(a->args[i].key, GRPC_ARG_ENABLE_CENSUS)) {
- return a->args[i].value.integer != 0 && census_enabled();
- }
- }
- return census_enabled() && !grpc_channel_args_want_minimal_stack(a);
-}
-
-static bool maybe_add_census_filter(grpc_exec_ctx *exec_ctx,
- grpc_channel_stack_builder *builder,
- void *arg) {
- const grpc_channel_args *args =
- grpc_channel_stack_builder_get_channel_arguments(builder);
- if (is_census_enabled(args)) {
- return grpc_channel_stack_builder_prepend_filter(
- builder, (const grpc_channel_filter *)arg, NULL, NULL);
- }
- return true;
-}
-
-extern "C" void census_grpc_plugin_init(void) {
- /* Only initialize census if no one else has and some features are
- * available. */
- if (census_enabled() == CENSUS_FEATURE_NONE &&
- census_supported() != CENSUS_FEATURE_NONE) {
- if (census_initialize(census_supported())) { /* enable all features. */
- gpr_log(GPR_ERROR, "Could not initialize census.");
- }
- }
- grpc_channel_init_register_stage(GRPC_CLIENT_CHANNEL, INT_MAX,
- maybe_add_census_filter,
- (void *)&grpc_client_census_filter);
- grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX,
- maybe_add_census_filter,
- (void *)&grpc_server_census_filter);
-}
-
-extern "C" void census_grpc_plugin_shutdown(void) { census_shutdown(); }
diff --git a/src/core/ext/census/hash_table.cc b/src/core/ext/census/hash_table.cc
deleted file mode 100644
index 545b0857c7..0000000000
--- a/src/core/ext/census/hash_table.cc
+++ /dev/null
@@ -1,288 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/ext/census/hash_table.h"
-
-#include <stddef.h>
-#include <stdio.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/port_platform.h>
-
-#define CENSUS_HT_NUM_BUCKETS 1999
-
-/* A single hash table data entry */
-typedef struct ht_entry {
- census_ht_key key;
- void *data;
- struct ht_entry *next;
-} ht_entry;
-
-/* hash table bucket */
-typedef struct bucket {
- /* NULL if bucket is empty */
- ht_entry *next;
- /* -1 if all buckets are empty. */
- int32_t prev_non_empty_bucket;
- /* -1 if all buckets are empty. */
- int32_t next_non_empty_bucket;
-} bucket;
-
-struct unresizable_hash_table {
- /* Number of entries in the table */
- size_t size;
- /* Number of buckets */
- uint32_t num_buckets;
- /* Array of buckets initialized at creation time. Memory consumption is
- 16 bytes per bucket on a 64-bit platform. */
- bucket *buckets;
- /* Index of the first non-empty bucket. -1 iff size == 0. */
- int32_t first_non_empty_bucket;
- /* Index of the last non_empty bucket. -1 iff size == 0. */
- int32_t last_non_empty_bucket;
- /* Immutable options of this hash table, initialized at creation time. */
- census_ht_option options;
-};
-
-typedef struct entry_locator {
- int32_t bucket_idx;
- int is_first_in_chain;
- int found;
- ht_entry *prev_entry;
-} entry_locator;
-
-/* Asserts if option is not valid. */
-void check_options(const census_ht_option *option) {
- GPR_ASSERT(option != NULL);
- GPR_ASSERT(option->num_buckets > 0);
- GPR_ASSERT(option->key_type == CENSUS_HT_UINT64 ||
- option->key_type == CENSUS_HT_POINTER);
- if (option->key_type == CENSUS_HT_UINT64) {
- GPR_ASSERT(option->hash == NULL);
- } else if (option->key_type == CENSUS_HT_POINTER) {
- GPR_ASSERT(option->hash != NULL);
- GPR_ASSERT(option->compare_keys != NULL);
- }
-}
-
-#define REMOVE_NEXT(options, ptr) \
- do { \
- ht_entry *tmp = (ptr)->next; \
- (ptr)->next = tmp->next; \
- delete_entry(options, tmp); \
- } while (0)
-
-static void delete_entry(const census_ht_option *opt, ht_entry *p) {
- if (opt->delete_data != NULL) {
- opt->delete_data(p->data);
- }
- if (opt->delete_key != NULL) {
- opt->delete_key(p->key.ptr);
- }
- gpr_free(p);
-}
-
-static uint64_t hash(const census_ht_option *opt, census_ht_key key) {
- return opt->key_type == CENSUS_HT_UINT64 ? key.val : opt->hash(key.ptr);
-}
-
-census_ht *census_ht_create(const census_ht_option *option) {
- int i;
- census_ht *ret = NULL;
- check_options(option);
- ret = (census_ht *)gpr_malloc(sizeof(census_ht));
- ret->size = 0;
- ret->num_buckets = option->num_buckets;
- ret->buckets = (bucket *)gpr_malloc(sizeof(bucket) * ret->num_buckets);
- ret->options = *option;
- /* initialize each bucket */
- for (i = 0; i < ret->options.num_buckets; i++) {
- ret->buckets[i].prev_non_empty_bucket = -1;
- ret->buckets[i].next_non_empty_bucket = -1;
- ret->buckets[i].next = NULL;
- }
- return ret;
-}
-
-static int32_t find_bucket_idx(const census_ht *ht, census_ht_key key) {
- return hash(&ht->options, key) % ht->num_buckets;
-}
-
-static int keys_match(const census_ht_option *opt, const ht_entry *p,
- const census_ht_key key) {
- GPR_ASSERT(opt->key_type == CENSUS_HT_UINT64 ||
- opt->key_type == CENSUS_HT_POINTER);
- if (opt->key_type == CENSUS_HT_UINT64) return p->key.val == key.val;
- return !opt->compare_keys((p->key).ptr, key.ptr);
-}
-
-static entry_locator ht_find(const census_ht *ht, census_ht_key key) {
- entry_locator loc = {0, 0, 0, NULL};
- int32_t idx = 0;
- ht_entry *ptr = NULL;
- GPR_ASSERT(ht != NULL);
- idx = find_bucket_idx(ht, key);
- ptr = ht->buckets[idx].next;
- if (ptr == NULL) {
- /* bucket is empty */
- return loc;
- }
- if (keys_match(&ht->options, ptr, key)) {
- loc.bucket_idx = idx;
- loc.is_first_in_chain = 1;
- loc.found = 1;
- return loc;
- } else {
- for (; ptr->next != NULL; ptr = ptr->next) {
- if (keys_match(&ht->options, ptr->next, key)) {
- loc.bucket_idx = idx;
- loc.is_first_in_chain = 0;
- loc.found = 1;
- loc.prev_entry = ptr;
- return loc;
- }
- }
- }
- /* Could not find the key */
- return loc;
-}
-
-void *census_ht_find(const census_ht *ht, census_ht_key key) {
- entry_locator loc = ht_find(ht, key);
- if (loc.found == 0) {
- return NULL;
- }
- return loc.is_first_in_chain ? ht->buckets[loc.bucket_idx].next->data
- : loc.prev_entry->next->data;
-}
-
-void census_ht_insert(census_ht *ht, census_ht_key key, void *data) {
- int32_t idx = find_bucket_idx(ht, key);
- ht_entry *ptr = NULL;
- entry_locator loc = ht_find(ht, key);
- if (loc.found) {
- /* Replace old value with new value. */
- ptr = loc.is_first_in_chain ? ht->buckets[loc.bucket_idx].next
- : loc.prev_entry->next;
- if (ht->options.delete_data != NULL) {
- ht->options.delete_data(ptr->data);
- }
- ptr->data = data;
- return;
- }
-
- /* first entry in the table. */
- if (ht->size == 0) {
- ht->buckets[idx].next_non_empty_bucket = -1;
- ht->buckets[idx].prev_non_empty_bucket = -1;
- ht->first_non_empty_bucket = idx;
- ht->last_non_empty_bucket = idx;
- } else if (ht->buckets[idx].next == NULL) {
- /* first entry in the bucket. */
- ht->buckets[ht->last_non_empty_bucket].next_non_empty_bucket = idx;
- ht->buckets[idx].prev_non_empty_bucket = ht->last_non_empty_bucket;
- ht->buckets[idx].next_non_empty_bucket = -1;
- ht->last_non_empty_bucket = idx;
- }
- ptr = (ht_entry *)gpr_malloc(sizeof(ht_entry));
- ptr->key = key;
- ptr->data = data;
- ptr->next = ht->buckets[idx].next;
- ht->buckets[idx].next = ptr;
- ht->size++;
-}
-
-void census_ht_erase(census_ht *ht, census_ht_key key) {
- entry_locator loc = ht_find(ht, key);
- if (loc.found == 0) {
- /* noop if not found */
- return;
- }
- ht->size--;
- if (loc.is_first_in_chain) {
- bucket *b = &ht->buckets[loc.bucket_idx];
- GPR_ASSERT(b->next != NULL);
- /* The only entry in the bucket */
- if (b->next->next == NULL) {
- int prev = b->prev_non_empty_bucket;
- int next = b->next_non_empty_bucket;
- if (prev != -1) {
- ht->buckets[prev].next_non_empty_bucket = next;
- } else {
- ht->first_non_empty_bucket = next;
- }
- if (next != -1) {
- ht->buckets[next].prev_non_empty_bucket = prev;
- } else {
- ht->last_non_empty_bucket = prev;
- }
- }
- REMOVE_NEXT(&ht->options, b);
- } else {
- GPR_ASSERT(loc.prev_entry->next != NULL);
- REMOVE_NEXT(&ht->options, loc.prev_entry);
- }
-}
-
-/* Returns NULL if input table is empty. */
-census_ht_kv *census_ht_get_all_elements(const census_ht *ht, size_t *num) {
- census_ht_kv *ret = NULL;
- int i = 0;
- int32_t idx = -1;
- GPR_ASSERT(ht != NULL && num != NULL);
- *num = ht->size;
- if (*num == 0) {
- return NULL;
- }
-
- ret = (census_ht_kv *)gpr_malloc(sizeof(census_ht_kv) * ht->size);
- idx = ht->first_non_empty_bucket;
- while (idx >= 0) {
- ht_entry *ptr = ht->buckets[idx].next;
- for (; ptr != NULL; ptr = ptr->next) {
- ret[i].k = ptr->key;
- ret[i].v = ptr->data;
- i++;
- }
- idx = ht->buckets[idx].next_non_empty_bucket;
- }
- return ret;
-}
-
-static void ht_delete_entry_chain(const census_ht_option *options,
- ht_entry *first) {
- if (first == NULL) {
- return;
- }
- if (first->next != NULL) {
- ht_delete_entry_chain(options, first->next);
- }
- delete_entry(options, first);
-}
-
-void census_ht_destroy(census_ht *ht) {
- unsigned i;
- for (i = 0; i < ht->num_buckets; ++i) {
- ht_delete_entry_chain(&ht->options, ht->buckets[i].next);
- }
- gpr_free(ht->buckets);
- gpr_free(ht);
-}
-
-size_t census_ht_get_size(const census_ht *ht) { return ht->size; }
diff --git a/src/core/ext/census/hash_table.h b/src/core/ext/census/hash_table.h
deleted file mode 100644
index c3ed94ea14..0000000000
--- a/src/core/ext/census/hash_table.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_EXT_CENSUS_HASH_TABLE_H
-#define GRPC_CORE_EXT_CENSUS_HASH_TABLE_H
-
-#include <stddef.h>
-
-#include <grpc/support/port_platform.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* A chain based hash table with fixed number of buckets.
- Your probably shouldn't use this code directly. It is implemented for the
- use case in census trace store and stats store, where number of entries in
- the table is in the scale of upto several thousands, entries are added and
- removed from the table very frequently (~100k/s), the frequency of find()
- operations is roughly several times of the frequency of insert() and erase()
- Comparing to find(), the insert(), erase() and get_all_entries() operations
- are much less freqent (<1/s).
-
- Per bucket memory overhead is about (8 + sizeof(intptr_t) bytes.
- Per entry memory overhead is about (8 + 2 * sizeof(intptr_t) bytes.
-
- All functions are not thread-safe. Synchronization will be provided in the
- upper layer (in trace store and stats store).
-*/
-
-/* Opaque hash table struct */
-typedef struct unresizable_hash_table census_ht;
-
-/* Currently, the hash_table can take two types of keys. (uint64 for trace
- store and const char* for stats store). */
-typedef union {
- uint64_t val;
- void *ptr;
-} census_ht_key;
-
-typedef enum census_ht_key_type {
- CENSUS_HT_UINT64 = 0,
- CENSUS_HT_POINTER = 1
-} census_ht_key_type;
-
-typedef struct census_ht_option {
- /* Type of hash key */
- census_ht_key_type key_type;
- /* Desired number of buckets, preferably a prime number */
- int32_t num_buckets;
- /* Fucntion to calculate uint64 hash value of the key. Only takes effect if
- key_type is POINTER. */
- uint64_t (*hash)(const void *);
- /* Function to compare two keys, returns 0 iff equal. Only takes effect if
- key_type is POINTER */
- int (*compare_keys)(const void *k1, const void *k2);
- /* Value deleter. NULL if no specialized delete function is needed. */
- void (*delete_data)(void *);
- /* Key deleter. NULL if table does not own the key. (e.g. key is part of the
- value or key is not owned by the table.) */
- void (*delete_key)(void *);
-} census_ht_option;
-
-/* Creates a hashtable with fixed number of buckets according to the settings
- specified in 'options' arg. Function pointers "hash" and "compare_keys" must
- be provided if key_type is POINTER. Asserts if fail to create. */
-census_ht *census_ht_create(const census_ht_option *options);
-
-/* Deletes hash table instance. Frees all dynamic memory owned by ht.*/
-void census_ht_destroy(census_ht *ht);
-
-/* Inserts the input key-val pair into hash_table. If an entry with the same key
- exists in the table, the corresponding value will be overwritten by the input
- val. */
-void census_ht_insert(census_ht *ht, census_ht_key key, void *val);
-
-/* Returns pointer to data, returns NULL if not found. */
-void *census_ht_find(const census_ht *ht, census_ht_key key);
-
-/* Erase hash table entry with input key. Noop if key is not found. */
-void census_ht_erase(census_ht *ht, census_ht_key key);
-
-typedef struct census_ht_kv {
- census_ht_key k;
- void *v;
-} census_ht_kv;
-
-/* Returns an array of pointers to all values in the hash table. Order of the
- elements can be arbitrary. Sets 'num' to the size of returned array. Caller
- owns returned array. */
-census_ht_kv *census_ht_get_all_elements(const census_ht *ht, size_t *num);
-
-/* Returns number of elements kept. */
-size_t census_ht_get_size(const census_ht *ht);
-
-/* Functor applied on each key-value pair while iterating through entries in the
- table. The functor should not mutate data. */
-typedef void (*census_ht_itr_cb)(census_ht_key key, const void *val_ptr,
- void *state);
-
-/* Iterates through all key-value pairs in the hash_table. The callback function
- should not invalidate data entries. */
-uint64_t census_ht_for_all(const census_ht *ht, census_ht_itr_cb);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* GRPC_CORE_EXT_CENSUS_HASH_TABLE_H */
diff --git a/src/core/ext/census/initialize.cc b/src/core/ext/census/initialize.cc
deleted file mode 100644
index 165a1221d4..0000000000
--- a/src/core/ext/census/initialize.cc
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpc/census.h>
-#include "src/core/ext/census/base_resources.h"
-#include "src/core/ext/census/resource.h"
-
-static int features_enabled = CENSUS_FEATURE_NONE;
-
-int census_initialize(int features) {
- if (features_enabled != CENSUS_FEATURE_NONE) {
- // Must have been a previous call to census_initialize; return error
- return -1;
- }
- features_enabled = features & CENSUS_FEATURE_ALL;
- if (features & CENSUS_FEATURE_STATS) {
- initialize_resources();
- define_base_resources();
- }
-
- return features_enabled;
-}
-
-void census_shutdown(void) {
- if (features_enabled & CENSUS_FEATURE_STATS) {
- shutdown_resources();
- }
- features_enabled = CENSUS_FEATURE_NONE;
-}
-
-int census_supported(void) {
- /* TODO(aveitch): improve this as we implement features... */
- return CENSUS_FEATURE_NONE;
-}
-
-int census_enabled(void) { return features_enabled; }
diff --git a/src/core/ext/census/intrusive_hash_map.cc b/src/core/ext/census/intrusive_hash_map.cc
deleted file mode 100644
index 7930486963..0000000000
--- a/src/core/ext/census/intrusive_hash_map.cc
+++ /dev/null
@@ -1,305 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/ext/census/intrusive_hash_map.h"
-#include <string.h>
-
-extern bool hm_index_compare(const hm_index *A, const hm_index *B);
-
-/* Simple hashing function that takes lower 32 bits. */
-static __inline uint32_t chunked_vector_hasher(uint64_t key) {
- return (uint32_t)key;
-}
-
-/* Vector chunks are 1MiB divided by pointer size. */
-static const size_t VECTOR_CHUNK_SIZE = (1 << 20) / sizeof(void *);
-
-/* Helper functions which return buckets from the chunked vector. */
-static __inline void **get_mutable_bucket(const chunked_vector *buckets,
- uint32_t index) {
- if (index < VECTOR_CHUNK_SIZE) {
- return &buckets->first_[index];
- }
- size_t rest_index = (index - VECTOR_CHUNK_SIZE) / VECTOR_CHUNK_SIZE;
- return &buckets->rest_[rest_index][index % VECTOR_CHUNK_SIZE];
-}
-
-static __inline void *get_bucket(const chunked_vector *buckets,
- uint32_t index) {
- if (index < VECTOR_CHUNK_SIZE) {
- return buckets->first_[index];
- }
- size_t rest_index = (index - VECTOR_CHUNK_SIZE) / VECTOR_CHUNK_SIZE;
- return buckets->rest_[rest_index][index % VECTOR_CHUNK_SIZE];
-}
-
-/* Helper function. */
-static __inline size_t RestSize(const chunked_vector *vec) {
- return (vec->size_ <= VECTOR_CHUNK_SIZE)
- ? 0
- : (vec->size_ - VECTOR_CHUNK_SIZE - 1) / VECTOR_CHUNK_SIZE + 1;
-}
-
-/* Initialize chunked vector to size of 0. */
-static void chunked_vector_init(chunked_vector *vec) {
- vec->size_ = 0;
- vec->first_ = NULL;
- vec->rest_ = NULL;
-}
-
-/* Clear chunked vector and free all memory that has been allocated then
- initialize chunked vector. */
-static void chunked_vector_clear(chunked_vector *vec) {
- if (vec->first_ != NULL) {
- gpr_free(vec->first_);
- }
- if (vec->rest_ != NULL) {
- size_t rest_size = RestSize(vec);
- for (size_t i = 0; i < rest_size; ++i) {
- if (vec->rest_[i] != NULL) {
- gpr_free(vec->rest_[i]);
- }
- }
- gpr_free(vec->rest_);
- }
- chunked_vector_init(vec);
-}
-
-/* Clear chunked vector and then resize it to n entries. Allow the first 1MB to
- be read w/o an extra cache miss. The rest of the elements are stored in an
- array of arrays to avoid large mallocs. */
-static void chunked_vector_reset(chunked_vector *vec, size_t n) {
- chunked_vector_clear(vec);
- vec->size_ = n;
- if (n <= VECTOR_CHUNK_SIZE) {
- vec->first_ = (void **)gpr_malloc(sizeof(void *) * n);
- memset(vec->first_, 0, sizeof(void *) * n);
- } else {
- vec->first_ = (void **)gpr_malloc(sizeof(void *) * VECTOR_CHUNK_SIZE);
- memset(vec->first_, 0, sizeof(void *) * VECTOR_CHUNK_SIZE);
- size_t rest_size = RestSize(vec);
- vec->rest_ = (void ***)gpr_malloc(sizeof(void **) * rest_size);
- memset(vec->rest_, 0, sizeof(void **) * rest_size);
- int i = 0;
- n -= VECTOR_CHUNK_SIZE;
- while (n > 0) {
- size_t this_size = GPR_MIN(n, VECTOR_CHUNK_SIZE);
- vec->rest_[i] = (void **)gpr_malloc(sizeof(void *) * this_size);
- memset(vec->rest_[i], 0, sizeof(void *) * this_size);
- n -= this_size;
- ++i;
- }
- }
-}
-
-void intrusive_hash_map_init(intrusive_hash_map *hash_map,
- uint32_t initial_log2_table_size) {
- hash_map->log2_num_buckets = initial_log2_table_size;
- hash_map->num_items = 0;
- uint32_t num_buckets = (uint32_t)1 << hash_map->log2_num_buckets;
- hash_map->extend_threshold = num_buckets >> 1;
- chunked_vector_init(&hash_map->buckets);
- chunked_vector_reset(&hash_map->buckets, num_buckets);
- hash_map->hash_mask = num_buckets - 1;
-}
-
-bool intrusive_hash_map_empty(const intrusive_hash_map *hash_map) {
- return hash_map->num_items == 0;
-}
-
-size_t intrusive_hash_map_size(const intrusive_hash_map *hash_map) {
- return hash_map->num_items;
-}
-
-void intrusive_hash_map_end(const intrusive_hash_map *hash_map, hm_index *idx) {
- idx->bucket_index = (uint32_t)hash_map->buckets.size_;
- GPR_ASSERT(idx->bucket_index <= UINT32_MAX);
- idx->item = NULL;
-}
-
-void intrusive_hash_map_next(const intrusive_hash_map *hash_map,
- hm_index *idx) {
- idx->item = idx->item->hash_link;
- while (idx->item == NULL) {
- idx->bucket_index++;
- if (idx->bucket_index >= hash_map->buckets.size_) {
- /* Reached end of table. */
- idx->item = NULL;
- return;
- }
- idx->item = (hm_item *)get_bucket(&hash_map->buckets, idx->bucket_index);
- }
-}
-
-void intrusive_hash_map_begin(const intrusive_hash_map *hash_map,
- hm_index *idx) {
- for (uint32_t i = 0; i < hash_map->buckets.size_; ++i) {
- if (get_bucket(&hash_map->buckets, i) != NULL) {
- idx->bucket_index = i;
- idx->item = (hm_item *)get_bucket(&hash_map->buckets, i);
- return;
- }
- }
- intrusive_hash_map_end(hash_map, idx);
-}
-
-hm_item *intrusive_hash_map_find(const intrusive_hash_map *hash_map,
- uint64_t key) {
- uint32_t index = chunked_vector_hasher(key) & hash_map->hash_mask;
-
- hm_item *p = (hm_item *)get_bucket(&hash_map->buckets, index);
- while (p != NULL) {
- if (key == p->key) {
- return p;
- }
- p = p->hash_link;
- }
- return NULL;
-}
-
-hm_item *intrusive_hash_map_erase(intrusive_hash_map *hash_map, uint64_t key) {
- uint32_t index = chunked_vector_hasher(key) & hash_map->hash_mask;
-
- hm_item **slot = (hm_item **)get_mutable_bucket(&hash_map->buckets, index);
- hm_item *p = *slot;
- if (p == NULL) {
- return NULL;
- }
-
- if (key == p->key) {
- *slot = p->hash_link;
- p->hash_link = NULL;
- hash_map->num_items--;
- return p;
- }
-
- hm_item *prev = p;
- p = p->hash_link;
-
- while (p) {
- if (key == p->key) {
- prev->hash_link = p->hash_link;
- p->hash_link = NULL;
- hash_map->num_items--;
- return p;
- }
- prev = p;
- p = p->hash_link;
- }
- return NULL;
-}
-
-/* Insert an hm_item* into the underlying chunked vector. hash_mask is
- * array_size-1. Returns true if it is a new hm_item and false if the hm_item
- * already existed.
- */
-static __inline bool intrusive_hash_map_internal_insert(chunked_vector *buckets,
- uint32_t hash_mask,
- hm_item *item) {
- const uint64_t key = item->key;
- uint32_t index = chunked_vector_hasher(key) & hash_mask;
- hm_item **slot = (hm_item **)get_mutable_bucket(buckets, index);
- hm_item *p = *slot;
- item->hash_link = p;
-
- /* Check to see if key already exists. */
- while (p) {
- if (p->key == key) {
- return false;
- }
- p = p->hash_link;
- }
-
- /* Otherwise add new entry. */
- *slot = item;
- return true;
-}
-
-/* Extend the allocated number of elements in the hash map by a factor of 2. */
-void intrusive_hash_map_extend(intrusive_hash_map *hash_map) {
- uint32_t new_log2_num_buckets = 1 + hash_map->log2_num_buckets;
- uint32_t new_num_buckets = (uint32_t)1 << new_log2_num_buckets;
- GPR_ASSERT(new_num_buckets <= UINT32_MAX && new_num_buckets > 0);
- chunked_vector new_buckets;
- chunked_vector_init(&new_buckets);
- chunked_vector_reset(&new_buckets, new_num_buckets);
- uint32_t new_hash_mask = new_num_buckets - 1;
-
- hm_index cur_idx;
- hm_index end_idx;
- intrusive_hash_map_end(hash_map, &end_idx);
- intrusive_hash_map_begin(hash_map, &cur_idx);
- while (!hm_index_compare(&cur_idx, &end_idx)) {
- hm_item *new_item = cur_idx.item;
- intrusive_hash_map_next(hash_map, &cur_idx);
- intrusive_hash_map_internal_insert(&new_buckets, new_hash_mask, new_item);
- }
-
- /* Set values for new chunked_vector. extend_threshold is set to half of
- * new_num_buckets. */
- hash_map->log2_num_buckets = new_log2_num_buckets;
- chunked_vector_clear(&hash_map->buckets);
- hash_map->buckets = new_buckets;
- hash_map->hash_mask = new_hash_mask;
- hash_map->extend_threshold = new_num_buckets >> 1;
-}
-
-/* Insert a hm_item. The hm_item must remain live until it is removed from the
- table. This object does not take the ownership of hm_item. The caller must
- remove this hm_item from the table and delete it before this table is
- deleted. If hm_item exists already num_items is not changed. */
-bool intrusive_hash_map_insert(intrusive_hash_map *hash_map, hm_item *item) {
- if (hash_map->num_items >= hash_map->extend_threshold) {
- intrusive_hash_map_extend(hash_map);
- }
- if (intrusive_hash_map_internal_insert(&hash_map->buckets,
- hash_map->hash_mask, item)) {
- hash_map->num_items++;
- return true;
- }
- return false;
-}
-
-void intrusive_hash_map_clear(intrusive_hash_map *hash_map,
- void (*free_object)(void *)) {
- hm_index cur;
- hm_index end;
- intrusive_hash_map_end(hash_map, &end);
- intrusive_hash_map_begin(hash_map, &cur);
-
- while (!hm_index_compare(&cur, &end)) {
- hm_index next = cur;
- intrusive_hash_map_next(hash_map, &next);
- if (cur.item != NULL) {
- hm_item *item = intrusive_hash_map_erase(hash_map, cur.item->key);
- (*free_object)((void *)item);
- gpr_free(item);
- }
- cur = next;
- }
-}
-
-void intrusive_hash_map_free(intrusive_hash_map *hash_map,
- void (*free_object)(void *)) {
- intrusive_hash_map_clear(hash_map, (*free_object));
- hash_map->num_items = 0;
- hash_map->extend_threshold = 0;
- hash_map->log2_num_buckets = 0;
- hash_map->hash_mask = 0;
- chunked_vector_clear(&hash_map->buckets);
-}
diff --git a/src/core/ext/census/intrusive_hash_map.h b/src/core/ext/census/intrusive_hash_map.h
deleted file mode 100644
index 2c7baa31fb..0000000000
--- a/src/core/ext/census/intrusive_hash_map.h
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_EXT_CENSUS_INTRUSIVE_HASH_MAP_H
-#define GRPC_CORE_EXT_CENSUS_INTRUSIVE_HASH_MAP_H
-
-#include "src/core/ext/census/intrusive_hash_map_internal.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* intrusive_hash_map is a fast chained hash table. This hash map is faster than
- * a dense hash map when the application calls insert and erase more often than
- * find. When the workload is dominated by find() a dense hash map may be
- * faster.
- *
- * intrusive_hash_map uses an intrusive header placed within a user defined
- * struct. The header field IHM_key MUST be set to a valid value before
- * insertion into the hash map or undefined behavior may occur. The header field
- * IHM_hash_link MUST to be set to NULL initially.
- *
- * EXAMPLE USAGE:
- *
- * typedef struct string_item {
- * INTRUSIVE_HASH_MAP_HEADER;
- * // User data.
- * char *str_buf;
- * uint16_t len;
- * } string_item;
- *
- * static string_item *make_string_item(uint64_t key, const char *buf,
- * uint16_t len) {
- * string_item *item = (string_item *)gpr_malloc(sizeof(string_item));
- * item->IHM_key = key;
- * item->IHM_hash_link = NULL;
- * item->len = len;
- * item->str_buf = (char *)malloc(len);
- * memcpy(item->str_buf, buf, len);
- * return item;
- * }
- *
- * intrusive_hash_map hash_map;
- * intrusive_hash_map_init(&hash_map, 4);
- * string_item *new_item1 = make_string_item(10, "test1", 5);
- * bool ok = intrusive_hash_map_insert(&hash_map, (hm_item *)new_item1);
- *
- * string_item *item1 =
- * (string_item *)intrusive_hash_map_find(&hash_map, 10);
- */
-
-/* Hash map item. Stores key and a pointer to the actual object. A user defined
- * version of this can be passed in provided the first 2 entries (key and
- * hash_link) are the same. These entries must be first in the user defined
- * struct. Pointer to struct will need to be cast as (hm_item *) when passed to
- * hash map. This allows it to be intrusive. */
-typedef struct hm_item {
- uint64_t key;
- struct hm_item *hash_link;
- /* Optional user defined data after this. */
-} hm_item;
-
-/* Macro provided for ease of use. This must be first in the user defined
- * struct (i.e. uint64_t key and hm_item * must be the first two elements in
- * that order). */
-#define INTRUSIVE_HASH_MAP_HEADER \
- uint64_t IHM_key; \
- struct hm_item *IHM_hash_link
-
-/* Index struct which acts as a pseudo-iterator within the hash map. */
-typedef struct hm_index {
- uint32_t bucket_index; // hash map bucket index.
- hm_item *item; // Pointer to hm_item within the hash map.
-} hm_index;
-
-/* Returns true if two hm_indices point to the same object within the hash map
- * and false otherwise. */
-__inline bool hm_index_compare(const hm_index *A, const hm_index *B) {
- return (A->item == B->item && A->bucket_index == B->bucket_index);
-}
-
-/*
- * Helper functions for iterating over the hash map.
- */
-
-/* On return idx will contain an invalid index which is always equal to
- * hash_map->buckets.size_ */
-void intrusive_hash_map_end(const intrusive_hash_map *hash_map, hm_index *idx);
-
-/* Iterates index to the next valid entry in the hash map and stores the
- * index within idx. If end of table is reached, idx will contain the same
- * values as if intrusive_hash_map_end() was called. */
-void intrusive_hash_map_next(const intrusive_hash_map *hash_map, hm_index *idx);
-
-/* On return, idx will contain the index of the first non-null entry in the hash
- * map. If the hash map is empty, idx will contain the same values as if
- * intrusive_hash_map_end() was called. */
-void intrusive_hash_map_begin(const intrusive_hash_map *hash_map,
- hm_index *idx);
-
-/* Initialize intrusive hash map data structure. This must be called before
- * the hash map can be used. The initial size of an intrusive hash map will be
- * 2^initial_log2_map_size (valid range is [0, 31]). */
-void intrusive_hash_map_init(intrusive_hash_map *hash_map,
- uint32_t initial_log2_map_size);
-
-/* Returns true if the hash map is empty and false otherwise. */
-bool intrusive_hash_map_empty(const intrusive_hash_map *hash_map);
-
-/* Returns the number of elements currently in the hash map. */
-size_t intrusive_hash_map_size(const intrusive_hash_map *hash_map);
-
-/* Find a hm_item within the hash map by key. Returns NULL if item was not
- * found. */
-hm_item *intrusive_hash_map_find(const intrusive_hash_map *hash_map,
- uint64_t key);
-
-/* Erase the hm_item that corresponds with key. If the hm_item is found, return
- * the pointer to the hm_item. Else returns NULL. */
-hm_item *intrusive_hash_map_erase(intrusive_hash_map *hash_map, uint64_t key);
-
-/* Attempts to insert a new hm_item into the hash map. If an element with the
- * same key already exists, it will not insert the new item and return false.
- * Otherwise, it will insert the new item and return true. */
-bool intrusive_hash_map_insert(intrusive_hash_map *hash_map, hm_item *item);
-
-/* Clears entire contents of the hash map, but leaves internal data structure
- * untouched. Second argument takes a function pointer to a function that will
- * free the object designated by the user and pointed to by hash_map->value. */
-void intrusive_hash_map_clear(intrusive_hash_map *hash_map,
- void (*free_object)(void *));
-
-/* Erase all contents of hash map and free the memory. Hash map is invalid
- * after calling this function and cannot be used until it has been
- * reinitialized (intrusive_hash_map_init()). This function takes a function
- * pointer to a function that will free the object designated by the user and
- * pointed to by hash_map->value. */
-void intrusive_hash_map_free(intrusive_hash_map *hash_map,
- void (*free_object)(void *));
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* GRPC_CORE_EXT_CENSUS_INTRUSIVE_HASH_MAP_H */
diff --git a/src/core/ext/census/intrusive_hash_map_internal.h b/src/core/ext/census/intrusive_hash_map_internal.h
deleted file mode 100644
index e9c81fc85c..0000000000
--- a/src/core/ext/census/intrusive_hash_map_internal.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_EXT_CENSUS_INTRUSIVE_HASH_MAP_INTERNAL_H
-#define GRPC_CORE_EXT_CENSUS_INTRUSIVE_HASH_MAP_INTERNAL_H
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/useful.h>
-#include <stdbool.h>
-
-/* The chunked vector is a data structure that allocates buckets for use in the
- * hash map. ChunkedVector is logically equivalent to T*[N] (cast void* as
- * T*). It's internally implemented as an array of 1MB arrays to avoid
- * allocating large consecutive memory chunks. This is an internal data
- * structure that should never be accessed directly. */
-typedef struct chunked_vector {
- size_t size_;
- void **first_;
- void ***rest_;
-} chunked_vector;
-
-/* Core intrusive hash map data structure. All internal elements are managed by
- * functions and should not be altered manually. */
-typedef struct intrusive_hash_map {
- uint32_t num_items;
- uint32_t extend_threshold;
- uint32_t log2_num_buckets;
- uint32_t hash_mask;
- chunked_vector buckets;
-} intrusive_hash_map;
-
-#endif /* GRPC_CORE_EXT_CENSUS_INTRUSIVE_HASH_MAP_INTERNAL_H */
diff --git a/src/core/ext/census/mlog.cc b/src/core/ext/census/mlog.cc
deleted file mode 100644
index 4b8c8466b3..0000000000
--- a/src/core/ext/census/mlog.cc
+++ /dev/null
@@ -1,586 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Implements an efficient in-memory log, optimized for multiple writers and
-// a single reader. Available log space is divided up in blocks of
-// CENSUS_LOG_2_MAX_RECORD_SIZE bytes. A block can be in one of the following
-// three data structures:
-// - Free blocks (free_block_list)
-// - Blocks with unread data (dirty_block_list)
-// - Blocks currently attached to cores (core_local_blocks[])
-//
-// census_log_start_write() moves a block from core_local_blocks[] to the end of
-// dirty_block_list when block:
-// - is out-of-space OR
-// - has an incomplete record (an incomplete record occurs when a thread calls
-// census_log_start_write() and is context-switched before calling
-// census_log_end_write()
-// So, blocks in dirty_block_list are ordered, from oldest to newest, by the
-// time when block is detached from the core.
-//
-// census_log_read_next() first iterates over dirty_block_list and then
-// core_local_blocks[]. It moves completely read blocks from dirty_block_list
-// to free_block_list. Blocks in core_local_blocks[] are not freed, even when
-// completely read.
-//
-// If the log is configured to discard old records and free_block_list is empty,
-// census_log_start_write() iterates over dirty_block_list to allocate a
-// new block. It moves the oldest available block (no pending read/write) to
-// core_local_blocks[].
-//
-// core_local_block_struct is used to implement a map from core id to the block
-// associated with that core. This mapping is advisory. It is possible that the
-// block returned by this mapping is no longer associated with that core. This
-// mapping is updated, lazily, by census_log_start_write().
-//
-// Locking in block struct:
-//
-// Exclusive g_log.lock must be held before calling any functions operating on
-// block structs except census_log_start_write() and census_log_end_write().
-//
-// Writes to a block are serialized via writer_lock. census_log_start_write()
-// acquires this lock and census_log_end_write() releases it. On failure to
-// acquire the lock, writer allocates a new block for the current core and
-// updates core_local_block accordingly.
-//
-// Simultaneous read and write access is allowed. Readers can safely read up to
-// committed bytes (bytes_committed).
-//
-// reader_lock protects the block, currently being read, from getting recycled.
-// start_read() acquires reader_lock and end_read() releases the lock.
-//
-// Read/write access to a block is disabled via try_disable_access(). It returns
-// with both writer_lock and reader_lock held. These locks are subsequently
-// released by enable_access() to enable access to the block.
-//
-// A note on naming: Most function/struct names are prepended by cl_
-// (shorthand for census_log). Further, functions that manipulate structures
-// include the name of the structure, which will be passed as the first
-// argument. E.g. cl_block_initialize() will initialize a cl_block.
-
-#include "src/core/ext/census/mlog.h"
-#include <grpc/support/alloc.h>
-#include <grpc/support/atm.h>
-#include <grpc/support/cpu.h>
-#include <grpc/support/log.h>
-#include <grpc/support/sync.h>
-#include <grpc/support/useful.h>
-#include <stdbool.h>
-#include <string.h>
-
-// End of platform specific code
-
-typedef struct census_log_block_list_struct {
- struct census_log_block_list_struct* next;
- struct census_log_block_list_struct* prev;
- struct census_log_block* block;
-} cl_block_list_struct;
-
-typedef struct census_log_block {
- // Pointer to underlying buffer.
- char* buffer;
- gpr_atm writer_lock;
- gpr_atm reader_lock;
- // Keeps completely written bytes. Declared atomic because accessed
- // simultaneously by reader and writer.
- gpr_atm bytes_committed;
- // Bytes already read.
- size_t bytes_read;
- // Links for list.
- cl_block_list_struct link;
-// We want this structure to be cacheline aligned. We assume the following
-// sizes for the various parts on 32/64bit systems:
-// type 32b size 64b size
-// char* 4 8
-// 3x gpr_atm 12 24
-// size_t 4 8
-// cl_block_list_struct 12 24
-// TOTAL 32 64
-//
-// Depending on the size of our cacheline and the architecture, we
-// selectively add char buffering to this structure. The size is checked
-// via assert in census_log_initialize().
-#if defined(GPR_ARCH_64)
-#define CL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 64)
-#else
-#if defined(GPR_ARCH_32)
-#define CL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 32)
-#else
-#error "Unknown architecture"
-#endif
-#endif
-#if CL_BLOCK_PAD_SIZE > 0
- char padding[CL_BLOCK_PAD_SIZE];
-#endif
-} cl_block;
-
-// A list of cl_blocks, doubly-linked through cl_block::link.
-typedef struct census_log_block_list {
- int32_t count; // Number of items in list.
- cl_block_list_struct ht; // head/tail of linked list.
-} cl_block_list;
-
-// Cacheline aligned block pointers to avoid false sharing. Block pointer must
-// be initialized via set_block(), before calling other functions
-typedef struct census_log_core_local_block {
- gpr_atm block;
-// Ensure cachline alignment: we assume sizeof(gpr_atm) == 4 or 8
-#if defined(GPR_ARCH_64)
-#define CL_CORE_LOCAL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 8)
-#else
-#if defined(GPR_ARCH_32)
-#define CL_CORE_LOCAL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 4)
-#else
-#error "Unknown architecture"
-#endif
-#endif
-#if CL_CORE_LOCAL_BLOCK_PAD_SIZE > 0
- char padding[CL_CORE_LOCAL_BLOCK_PAD_SIZE];
-#endif
-} cl_core_local_block;
-
-struct census_log {
- int discard_old_records;
- // Number of cores (aka hardware-contexts)
- unsigned num_cores;
- // number of CENSUS_LOG_2_MAX_RECORD_SIZE blocks in log
- uint32_t num_blocks;
- cl_block* blocks; // Block metadata.
- cl_core_local_block* core_local_blocks; // Keeps core to block mappings.
- gpr_mu lock;
- int initialized; // has log been initialized?
- // Keeps the state of the reader iterator. A value of 0 indicates that
- // iterator has reached the end. census_log_init_reader() resets the value
- // to num_core to restart iteration.
- uint32_t read_iterator_state;
- // Points to the block being read. If non-NULL, the block is locked for
- // reading(block_being_read_->reader_lock is held).
- cl_block* block_being_read;
- char* buffer;
- cl_block_list free_block_list;
- cl_block_list dirty_block_list;
- gpr_atm out_of_space_count;
-};
-
-// Single internal log.
-static struct census_log g_log;
-
-// Functions that operate on an atomic memory location used as a lock.
-
-// Returns non-zero if lock is acquired.
-static int cl_try_lock(gpr_atm* lock) { return gpr_atm_acq_cas(lock, 0, 1); }
-
-static void cl_unlock(gpr_atm* lock) { gpr_atm_rel_store(lock, 0); }
-
-// Functions that operate on cl_core_local_block's.
-
-static void cl_core_local_block_set_block(cl_core_local_block* clb,
- cl_block* block) {
- gpr_atm_rel_store(&clb->block, (gpr_atm)block);
-}
-
-static cl_block* cl_core_local_block_get_block(cl_core_local_block* clb) {
- return (cl_block*)gpr_atm_acq_load(&clb->block);
-}
-
-// Functions that operate on cl_block_list_struct's.
-
-static void cl_block_list_struct_initialize(cl_block_list_struct* bls,
- cl_block* block) {
- bls->next = bls->prev = bls;
- bls->block = block;
-}
-
-// Functions that operate on cl_block_list's.
-
-static void cl_block_list_initialize(cl_block_list* list) {
- list->count = 0;
- cl_block_list_struct_initialize(&list->ht, NULL);
-}
-
-// Returns head of *this, or NULL if empty.
-static cl_block* cl_block_list_head(cl_block_list* list) {
- return list->ht.next->block;
-}
-
-// Insert element *e after *pos.
-static void cl_block_list_insert(cl_block_list* list, cl_block_list_struct* pos,
- cl_block_list_struct* e) {
- list->count++;
- e->next = pos->next;
- e->prev = pos;
- e->next->prev = e;
- e->prev->next = e;
-}
-
-// Insert block at the head of the list
-static void cl_block_list_insert_at_head(cl_block_list* list, cl_block* block) {
- cl_block_list_insert(list, &list->ht, &block->link);
-}
-
-// Insert block at the tail of the list.
-static void cl_block_list_insert_at_tail(cl_block_list* list, cl_block* block) {
- cl_block_list_insert(list, list->ht.prev, &block->link);
-}
-
-// Removes block *b. Requires *b be in the list.
-static void cl_block_list_remove(cl_block_list* list, cl_block* b) {
- list->count--;
- b->link.next->prev = b->link.prev;
- b->link.prev->next = b->link.next;
-}
-
-// Functions that operate on cl_block's
-
-static void cl_block_initialize(cl_block* block, char* buffer) {
- block->buffer = buffer;
- gpr_atm_rel_store(&block->writer_lock, 0);
- gpr_atm_rel_store(&block->reader_lock, 0);
- gpr_atm_rel_store(&block->bytes_committed, 0);
- block->bytes_read = 0;
- cl_block_list_struct_initialize(&block->link, block);
-}
-
-// Guards against exposing partially written buffer to the reader.
-static void cl_block_set_bytes_committed(cl_block* block,
- size_t bytes_committed) {
- gpr_atm_rel_store(&block->bytes_committed, (gpr_atm)bytes_committed);
-}
-
-static size_t cl_block_get_bytes_committed(cl_block* block) {
- return (size_t)gpr_atm_acq_load(&block->bytes_committed);
-}
-
-// Tries to disable future read/write access to this block. Succeeds if:
-// - no in-progress write AND
-// - no in-progress read AND
-// - 'discard_data' set to true OR no unread data
-// On success, clears the block state and returns with writer_lock_ and
-// reader_lock_ held. These locks are released by a subsequent
-// cl_block_access_enable() call.
-static bool cl_block_try_disable_access(cl_block* block, int discard_data) {
- if (!cl_try_lock(&block->writer_lock)) {
- return false;
- }
- if (!cl_try_lock(&block->reader_lock)) {
- cl_unlock(&block->writer_lock);
- return false;
- }
- if (!discard_data &&
- (block->bytes_read != cl_block_get_bytes_committed(block))) {
- cl_unlock(&block->reader_lock);
- cl_unlock(&block->writer_lock);
- return false;
- }
- cl_block_set_bytes_committed(block, 0);
- block->bytes_read = 0;
- return true;
-}
-
-static void cl_block_enable_access(cl_block* block) {
- cl_unlock(&block->reader_lock);
- cl_unlock(&block->writer_lock);
-}
-
-// Returns with writer_lock held.
-static void* cl_block_start_write(cl_block* block, size_t size) {
- if (!cl_try_lock(&block->writer_lock)) {
- return NULL;
- }
- size_t bytes_committed = cl_block_get_bytes_committed(block);
- if (bytes_committed + size > CENSUS_LOG_MAX_RECORD_SIZE) {
- cl_unlock(&block->writer_lock);
- return NULL;
- }
- return block->buffer + bytes_committed;
-}
-
-// Releases writer_lock and increments committed bytes by 'bytes_written'.
-// 'bytes_written' must be <= 'size' specified in the corresponding
-// StartWrite() call. This function is thread-safe.
-static void cl_block_end_write(cl_block* block, size_t bytes_written) {
- cl_block_set_bytes_committed(
- block, cl_block_get_bytes_committed(block) + bytes_written);
- cl_unlock(&block->writer_lock);
-}
-
-// Returns a pointer to the first unread byte in buffer. The number of bytes
-// available are returned in 'bytes_available'. Acquires reader lock that is
-// released by a subsequent cl_block_end_read() call. Returns NULL if:
-// - read in progress
-// - no data available
-static void* cl_block_start_read(cl_block* block, size_t* bytes_available) {
- if (!cl_try_lock(&block->reader_lock)) {
- return NULL;
- }
- // bytes_committed may change from under us. Use bytes_available to update
- // bytes_read below.
- size_t bytes_committed = cl_block_get_bytes_committed(block);
- GPR_ASSERT(bytes_committed >= block->bytes_read);
- *bytes_available = bytes_committed - block->bytes_read;
- if (*bytes_available == 0) {
- cl_unlock(&block->reader_lock);
- return NULL;
- }
- void* record = block->buffer + block->bytes_read;
- block->bytes_read += *bytes_available;
- return record;
-}
-
-static void cl_block_end_read(cl_block* block) {
- cl_unlock(&block->reader_lock);
-}
-
-// Internal functions operating on g_log
-
-// Allocates a new free block (or recycles an available dirty block if log is
-// configured to discard old records). Returns NULL if out-of-space.
-static cl_block* cl_allocate_block(void) {
- cl_block* block = cl_block_list_head(&g_log.free_block_list);
- if (block != NULL) {
- cl_block_list_remove(&g_log.free_block_list, block);
- return block;
- }
- if (!g_log.discard_old_records) {
- // No free block and log is configured to keep old records.
- return NULL;
- }
- // Recycle dirty block. Start from the oldest.
- for (block = cl_block_list_head(&g_log.dirty_block_list); block != NULL;
- block = block->link.next->block) {
- if (cl_block_try_disable_access(block, 1 /* discard data */)) {
- cl_block_list_remove(&g_log.dirty_block_list, block);
- return block;
- }
- }
- return NULL;
-}
-
-// Allocates a new block and updates core id => block mapping. 'old_block'
-// points to the block that the caller thinks is attached to
-// 'core_id'. 'old_block' may be NULL. Returns true if:
-// - allocated a new block OR
-// - 'core_id' => 'old_block' mapping changed (another thread allocated a
-// block before lock was acquired).
-static bool cl_allocate_core_local_block(uint32_t core_id,
- cl_block* old_block) {
- // Now that we have the lock, check if core-local mapping has changed.
- cl_core_local_block* core_local_block = &g_log.core_local_blocks[core_id];
- cl_block* block = cl_core_local_block_get_block(core_local_block);
- if ((block != NULL) && (block != old_block)) {
- return true;
- }
- if (block != NULL) {
- cl_core_local_block_set_block(core_local_block, NULL);
- cl_block_list_insert_at_tail(&g_log.dirty_block_list, block);
- }
- block = cl_allocate_block();
- if (block == NULL) {
- return false;
- }
- cl_core_local_block_set_block(core_local_block, block);
- cl_block_enable_access(block);
- return true;
-}
-
-static cl_block* cl_get_block(void* record) {
- uintptr_t p = (uintptr_t)((char*)record - g_log.buffer);
- uintptr_t index = p >> CENSUS_LOG_2_MAX_RECORD_SIZE;
- return &g_log.blocks[index];
-}
-
-// Gets the next block to read and tries to free 'prev' block (if not NULL).
-// Returns NULL if reached the end.
-static cl_block* cl_next_block_to_read(cl_block* prev) {
- cl_block* block = NULL;
- if (g_log.read_iterator_state == g_log.num_cores) {
- // We are traversing dirty list; find the next dirty block.
- if (prev != NULL) {
- // Try to free the previous block if there is no unread data. This
- // block
- // may have unread data if previously incomplete record completed
- // between
- // read_next() calls.
- block = prev->link.next->block;
- if (cl_block_try_disable_access(prev, 0 /* do not discard data */)) {
- cl_block_list_remove(&g_log.dirty_block_list, prev);
- cl_block_list_insert_at_head(&g_log.free_block_list, prev);
- }
- } else {
- block = cl_block_list_head(&g_log.dirty_block_list);
- }
- if (block != NULL) {
- return block;
- }
- // We are done with the dirty list; moving on to core-local blocks.
- }
- while (g_log.read_iterator_state > 0) {
- g_log.read_iterator_state--;
- block = cl_core_local_block_get_block(
- &g_log.core_local_blocks[g_log.read_iterator_state]);
- if (block != NULL) {
- return block;
- }
- }
- return NULL;
-}
-
-#define CL_LOG_2_MB 20 // 2^20 = 1MB
-
-// External functions: primary stats_log interface
-void census_log_initialize(size_t size_in_mb, int discard_old_records) {
- // Check cacheline alignment.
- GPR_ASSERT(sizeof(cl_block) % GPR_CACHELINE_SIZE == 0);
- GPR_ASSERT(sizeof(cl_core_local_block) % GPR_CACHELINE_SIZE == 0);
- GPR_ASSERT(!g_log.initialized);
- g_log.discard_old_records = discard_old_records;
- g_log.num_cores = gpr_cpu_num_cores();
- // Ensure that we will not get any overflow in calaculating num_blocks
- GPR_ASSERT(CL_LOG_2_MB >= CENSUS_LOG_2_MAX_RECORD_SIZE);
- GPR_ASSERT(size_in_mb < 1000);
- // Ensure at least 2x as many blocks as there are cores.
- g_log.num_blocks =
- (uint32_t)GPR_MAX(2 * g_log.num_cores, (size_in_mb << CL_LOG_2_MB) >>
- CENSUS_LOG_2_MAX_RECORD_SIZE);
- gpr_mu_init(&g_log.lock);
- g_log.read_iterator_state = 0;
- g_log.block_being_read = NULL;
- g_log.core_local_blocks = (cl_core_local_block*)gpr_malloc_aligned(
- g_log.num_cores * sizeof(cl_core_local_block), GPR_CACHELINE_SIZE_LOG);
- memset(g_log.core_local_blocks, 0,
- g_log.num_cores * sizeof(cl_core_local_block));
- g_log.blocks = (cl_block*)gpr_malloc_aligned(
- g_log.num_blocks * sizeof(cl_block), GPR_CACHELINE_SIZE_LOG);
- memset(g_log.blocks, 0, g_log.num_blocks * sizeof(cl_block));
- g_log.buffer =
- (char*)gpr_malloc(g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
- memset(g_log.buffer, 0, g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
- cl_block_list_initialize(&g_log.free_block_list);
- cl_block_list_initialize(&g_log.dirty_block_list);
- for (uint32_t i = 0; i < g_log.num_blocks; ++i) {
- cl_block* block = g_log.blocks + i;
- cl_block_initialize(block, g_log.buffer + (CENSUS_LOG_MAX_RECORD_SIZE * i));
- cl_block_try_disable_access(block, 1 /* discard data */);
- cl_block_list_insert_at_tail(&g_log.free_block_list, block);
- }
- gpr_atm_rel_store(&g_log.out_of_space_count, 0);
- g_log.initialized = 1;
-}
-
-void census_log_shutdown(void) {
- GPR_ASSERT(g_log.initialized);
- gpr_mu_destroy(&g_log.lock);
- gpr_free_aligned(g_log.core_local_blocks);
- g_log.core_local_blocks = NULL;
- gpr_free_aligned(g_log.blocks);
- g_log.blocks = NULL;
- gpr_free(g_log.buffer);
- g_log.buffer = NULL;
- g_log.initialized = 0;
-}
-
-void* census_log_start_write(size_t size) {
- // Used to bound number of times block allocation is attempted.
- GPR_ASSERT(size > 0);
- GPR_ASSERT(g_log.initialized);
- if (size > CENSUS_LOG_MAX_RECORD_SIZE) {
- return NULL;
- }
- uint32_t attempts_remaining = g_log.num_blocks;
- uint32_t core_id = gpr_cpu_current_cpu();
- do {
- void* record = NULL;
- cl_block* block =
- cl_core_local_block_get_block(&g_log.core_local_blocks[core_id]);
- if (block && (record = cl_block_start_write(block, size))) {
- return record;
- }
- // Need to allocate a new block. We are here if:
- // - No block associated with the core OR
- // - Write in-progress on the block OR
- // - block is out of space
- gpr_mu_lock(&g_log.lock);
- bool allocated = cl_allocate_core_local_block(core_id, block);
- gpr_mu_unlock(&g_log.lock);
- if (!allocated) {
- gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1);
- return NULL;
- }
- } while (attempts_remaining--);
- // Give up.
- gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1);
- return NULL;
-}
-
-void census_log_end_write(void* record, size_t bytes_written) {
- GPR_ASSERT(g_log.initialized);
- cl_block_end_write(cl_get_block(record), bytes_written);
-}
-
-void census_log_init_reader(void) {
- GPR_ASSERT(g_log.initialized);
- gpr_mu_lock(&g_log.lock);
- // If a block is locked for reading unlock it.
- if (g_log.block_being_read != NULL) {
- cl_block_end_read(g_log.block_being_read);
- g_log.block_being_read = NULL;
- }
- g_log.read_iterator_state = g_log.num_cores;
- gpr_mu_unlock(&g_log.lock);
-}
-
-const void* census_log_read_next(size_t* bytes_available) {
- GPR_ASSERT(g_log.initialized);
- gpr_mu_lock(&g_log.lock);
- if (g_log.block_being_read != NULL) {
- cl_block_end_read(g_log.block_being_read);
- }
- do {
- g_log.block_being_read = cl_next_block_to_read(g_log.block_being_read);
- if (g_log.block_being_read != NULL) {
- void* record =
- cl_block_start_read(g_log.block_being_read, bytes_available);
- if (record != NULL) {
- gpr_mu_unlock(&g_log.lock);
- return record;
- }
- }
- } while (g_log.block_being_read != NULL);
- gpr_mu_unlock(&g_log.lock);
- return NULL;
-}
-
-size_t census_log_remaining_space(void) {
- GPR_ASSERT(g_log.initialized);
- size_t space = 0;
- gpr_mu_lock(&g_log.lock);
- if (g_log.discard_old_records) {
- // Remaining space is not meaningful; just return the entire log space.
- space = g_log.num_blocks << CENSUS_LOG_2_MAX_RECORD_SIZE;
- } else {
- GPR_ASSERT(g_log.free_block_list.count >= 0);
- space = (size_t)g_log.free_block_list.count * CENSUS_LOG_MAX_RECORD_SIZE;
- }
- gpr_mu_unlock(&g_log.lock);
- return space;
-}
-
-int64_t census_log_out_of_space_count(void) {
- GPR_ASSERT(g_log.initialized);
- return gpr_atm_acq_load(&g_log.out_of_space_count);
-}
diff --git a/src/core/ext/census/mlog.h b/src/core/ext/census/mlog.h
deleted file mode 100644
index 8f74ba231d..0000000000
--- a/src/core/ext/census/mlog.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/* A very fast in-memory log, optimized for multiple writers. */
-
-#ifndef GRPC_CORE_EXT_CENSUS_MLOG_H
-#define GRPC_CORE_EXT_CENSUS_MLOG_H
-
-#include <grpc/support/port_platform.h>
-#include <stddef.h>
-
-/* Maximum record size, in bytes. */
-#define CENSUS_LOG_2_MAX_RECORD_SIZE 14 /* 2^14 = 16KB */
-#define CENSUS_LOG_MAX_RECORD_SIZE (1 << CENSUS_LOG_2_MAX_RECORD_SIZE)
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Initialize the statistics logging subsystem with the given log size. A log
- size of 0 will result in the smallest possible log for the platform
- (approximately CENSUS_LOG_MAX_RECORD_SIZE * gpr_cpu_num_cores()). If
- discard_old_records is non-zero, then new records will displace older ones
- when the log is full. This function must be called before any other
- census_log functions.
-*/
-void census_log_initialize(size_t size_in_mb, int discard_old_records);
-
-/* Shutdown the logging subsystem. Caller must ensure that:
- - no in progress or future call to any census_log functions
- - no incomplete records
-*/
-void census_log_shutdown(void);
-
-/* Allocates and returns a 'size' bytes record and marks it in use. A
- subsequent census_log_end_write() marks the record complete. The
- 'bytes_written' census_log_end_write() argument must be <=
- 'size'. Returns NULL if out-of-space AND:
- - log is configured to keep old records OR
- - all blocks are pinned by incomplete records.
-*/
-void* census_log_start_write(size_t size);
-
-void census_log_end_write(void* record, size_t bytes_written);
-
-void census_log_init_reader(void);
-
-/* census_log_read_next() iterates over blocks with data and for each block
- returns a pointer to the first unread byte. The number of bytes that can be
- read are returned in 'bytes_available'. Reader is expected to read all
- available data. Reading the data consumes it i.e. it cannot be read again.
- census_log_read_next() returns NULL if the end is reached i.e last block
- is read. census_log_init_reader() starts the iteration or aborts the
- current iteration.
-*/
-const void* census_log_read_next(size_t* bytes_available);
-
-/* Returns estimated remaining space across all blocks, in bytes. If log is
- configured to discard old records, returns total log space. Otherwise,
- returns space available in empty blocks (partially filled blocks are
- treated as full).
-*/
-size_t census_log_remaining_space(void);
-
-/* Returns the number of times grpc_stats_log_start_write() failed due to
- out-of-space. */
-int64_t census_log_out_of_space_count(void);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* GRPC_CORE_EXT_CENSUS_MLOG_H */
diff --git a/src/core/ext/census/operation.cc b/src/core/ext/census/operation.cc
deleted file mode 100644
index be88ac74e6..0000000000
--- a/src/core/ext/census/operation.cc
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpc/census.h>
-
-/* TODO(aveitch): These are all placeholder implementations. */
-
-census_timestamp census_start_rpc_op_timestamp(void) {
- census_timestamp ct;
- /* TODO(aveitch): assumes gpr_timespec implementation of census_timestamp. */
- ct.ts = gpr_now(GPR_CLOCK_MONOTONIC);
- return ct;
-}
-
-census_context *census_start_client_rpc_op(
- const census_context *context, int64_t rpc_name_id,
- const census_rpc_name_info *rpc_name_info, const char *peer, int trace_mask,
- const census_timestamp *start_time) {
- return NULL;
-}
-
-census_context *census_start_server_rpc_op(
- const char *buffer, int64_t rpc_name_id,
- const census_rpc_name_info *rpc_name_info, const char *peer, int trace_mask,
- census_timestamp *start_time) {
- return NULL;
-}
-
-census_context *census_start_op(census_context *context, const char *family,
- const char *name, int trace_mask) {
- return NULL;
-}
-
-void census_end_op(census_context *context, int status) {}
diff --git a/src/core/ext/census/placeholders.cc b/src/core/ext/census/placeholders.cc
deleted file mode 100644
index bed9837ee3..0000000000
--- a/src/core/ext/census/placeholders.cc
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpc/census.h>
-
-#include <grpc/support/log.h>
-
-/* Placeholders for the pending APIs */
-
-int census_get_trace_record(census_trace_record *trace_record) {
- (void)trace_record;
- abort();
-}
-
-void census_record_values(census_context *context, census_value *values,
- size_t nvalues) {
- (void)context;
- (void)values;
- (void)nvalues;
- abort();
-}
-
-void census_set_rpc_client_peer(census_context *context, const char *peer) {
- (void)context;
- (void)peer;
- abort();
-}
-
-void census_trace_scan_end() { abort(); }
-
-int census_trace_scan_start(int consume) {
- (void)consume;
- abort();
-}
diff --git a/src/core/ext/census/resource.cc b/src/core/ext/census/resource.cc
deleted file mode 100644
index 44a887231c..0000000000
--- a/src/core/ext/census/resource.cc
+++ /dev/null
@@ -1,303 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/ext/census/resource.h"
-#include "third_party/nanopb/pb_decode.h"
-
-#include <grpc/census.h>
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/sync.h>
-
-#include <stdbool.h>
-#include <string.h>
-
-// Protect local resource data structures.
-static gpr_mu resource_lock;
-
-// Deleteing and creating resources are relatively rare events, and should not
-// be done in the critical path of performance sensitive code. We record
-// current resource id's used in a simple array, and just search it each time
-// we need to assign a new id, or look up a resource.
-static resource **resources = NULL;
-
-// Number of entries in *resources
-static size_t n_resources = 0;
-
-// Number of defined resources
-static size_t n_defined_resources = 0;
-
-void initialize_resources(void) {
- gpr_mu_init(&resource_lock);
- gpr_mu_lock(&resource_lock);
- GPR_ASSERT(resources == NULL && n_resources == 0 && n_defined_resources == 0);
- gpr_mu_unlock(&resource_lock);
-}
-
-// Delete a resource given it's ID. The ID must be a valid resource ID. Must be
-// called with resource_lock held.
-static void delete_resource_locked(size_t rid) {
- GPR_ASSERT(resources[rid] != NULL);
- gpr_free(resources[rid]->name);
- gpr_free(resources[rid]->description);
- gpr_free(resources[rid]->numerators);
- gpr_free(resources[rid]->denominators);
- gpr_free(resources[rid]);
- resources[rid] = NULL;
- n_defined_resources--;
-}
-
-void shutdown_resources(void) {
- gpr_mu_lock(&resource_lock);
- for (size_t i = 0; i < n_resources; i++) {
- if (resources[i] != NULL) {
- delete_resource_locked(i);
- }
- }
- GPR_ASSERT(n_defined_resources == 0);
- gpr_free(resources);
- resources = NULL;
- n_resources = 0;
- gpr_mu_unlock(&resource_lock);
-}
-
-// Check the contents of string fields in a resource proto.
-static bool validate_string(pb_istream_t *stream, const pb_field_t *field,
- void **arg) {
- resource *vresource = (resource *)*arg;
- switch (field->tag) {
- case google_census_Resource_name_tag:
- // Name must have at least one character
- if (stream->bytes_left == 0) {
- gpr_log(GPR_INFO, "Zero-length Resource name.");
- return false;
- }
- vresource->name = (char *)gpr_malloc(stream->bytes_left + 1);
- vresource->name[stream->bytes_left] = '\0';
- if (!pb_read(stream, (uint8_t *)vresource->name, stream->bytes_left)) {
- return false;
- }
- // Can't have same name as an existing resource.
- for (size_t i = 0; i < n_resources; i++) {
- resource *compare = resources[i];
- if (compare == vresource || compare == NULL) continue;
- if (strcmp(compare->name, vresource->name) == 0) {
- gpr_log(GPR_INFO, "Duplicate Resource name %s.", vresource->name);
- return false;
- }
- }
- break;
- case google_census_Resource_description_tag:
- if (stream->bytes_left == 0) {
- return true;
- }
- vresource->description = (char *)gpr_malloc(stream->bytes_left + 1);
- vresource->description[stream->bytes_left] = '\0';
- if (!pb_read(stream, (uint8_t *)vresource->description,
- stream->bytes_left)) {
- return false;
- }
- break;
- default:
- // No other string fields in Resource. Print warning and skip.
- gpr_log(GPR_INFO, "Unknown string field type in Resource protobuf.");
- if (!pb_read(stream, NULL, stream->bytes_left)) {
- return false;
- }
- break;
- }
- return true;
-}
-
-// Decode numerators/denominators in a stream. The `count` and `bup`
-// (BasicUnit pointer) are pointers to the approriate fields in a resource
-// struct.
-static bool validate_units_helper(pb_istream_t *stream, int *count,
- google_census_Resource_BasicUnit **bup) {
- while (stream->bytes_left) {
- (*count)++;
- // Have to allocate a new array of values. Normal case is 0 or 1, so
- // this should normally not be an issue.
- google_census_Resource_BasicUnit *new_bup =
- (google_census_Resource_BasicUnit *)gpr_malloc(
- (size_t)*count * sizeof(google_census_Resource_BasicUnit));
- if (*count != 1) {
- memcpy(new_bup, *bup,
- (size_t)(*count - 1) * sizeof(google_census_Resource_BasicUnit));
- gpr_free(*bup);
- }
- *bup = new_bup;
- uint64_t value;
- if (!pb_decode_varint(stream, &value)) {
- return false;
- }
- *(*bup + *count - 1) = (google_census_Resource_BasicUnit)value;
- }
- return true;
-}
-
-// Validate units field of a Resource proto.
-static bool validate_units(pb_istream_t *stream, const pb_field_t *field,
- void **arg) {
- resource *vresource = (resource *)(*arg);
- switch (field->tag) {
- case google_census_Resource_MeasurementUnit_numerator_tag:
- return validate_units_helper(stream, &vresource->n_numerators,
- &vresource->numerators);
- break;
- case google_census_Resource_MeasurementUnit_denominator_tag:
- return validate_units_helper(stream, &vresource->n_denominators,
- &vresource->denominators);
- break;
- default:
- gpr_log(GPR_ERROR, "Unknown field type.");
- return false;
- break;
- }
- return true;
-}
-
-// Validate the contents of a Resource proto. `id` is the intended resource id.
-static bool validate_resource_pb(const uint8_t *resource_pb,
- size_t resource_pb_size, size_t id) {
- GPR_ASSERT(id < n_resources);
- if (resource_pb == NULL) {
- return false;
- }
- google_census_Resource vresource;
- vresource.name.funcs.decode = &validate_string;
- vresource.name.arg = resources[id];
- vresource.description.funcs.decode = &validate_string;
- vresource.description.arg = resources[id];
- vresource.unit.numerator.funcs.decode = &validate_units;
- vresource.unit.numerator.arg = resources[id];
- vresource.unit.denominator.funcs.decode = &validate_units;
- vresource.unit.denominator.arg = resources[id];
-
- pb_istream_t stream =
- pb_istream_from_buffer((uint8_t *)resource_pb, resource_pb_size);
- if (!pb_decode(&stream, google_census_Resource_fields, &vresource)) {
- return false;
- }
- // A Resource must have a name, a unit, with at least one numerator.
- return (resources[id]->name != NULL && vresource.has_unit &&
- resources[id]->n_numerators > 0);
-}
-
-// Allocate a blank resource, and return associated ID. Must be called with
-// resource_lock held.
-size_t allocate_resource(void) {
- // use next_id to optimize expected placement of next new resource.
- static size_t next_id = 0;
- size_t id = n_resources; // resource ID - initialize to invalid value.
- // Expand resources if needed.
- if (n_resources == n_defined_resources) {
- size_t new_n_resources = n_resources ? n_resources * 2 : 2;
- resource **new_resources =
- (resource **)gpr_malloc(new_n_resources * sizeof(resource *));
- if (n_resources != 0) {
- memcpy(new_resources, resources, n_resources * sizeof(resource *));
- }
- memset(new_resources + n_resources, 0,
- (new_n_resources - n_resources) * sizeof(resource *));
- gpr_free(resources);
- resources = new_resources;
- n_resources = new_n_resources;
- id = n_defined_resources;
- } else {
- GPR_ASSERT(n_defined_resources < n_resources);
- // Find a free id.
- for (size_t base = 0; base < n_resources; base++) {
- id = (next_id + base) % n_resources;
- if (resources[id] == NULL) break;
- }
- }
- GPR_ASSERT(id < n_resources && resources[id] == NULL);
- resources[id] = (resource *)gpr_malloc(sizeof(resource));
- memset(resources[id], 0, sizeof(resource));
- n_defined_resources++;
- next_id = (id + 1) % n_resources;
- return id;
-}
-
-int32_t census_define_resource(const uint8_t *resource_pb,
- size_t resource_pb_size) {
- if (resource_pb == NULL) {
- return -1;
- }
- gpr_mu_lock(&resource_lock);
- size_t id = allocate_resource();
- // Validate pb, extract name.
- if (!validate_resource_pb(resource_pb, resource_pb_size, id)) {
- delete_resource_locked(id);
- gpr_mu_unlock(&resource_lock);
- return -1;
- }
- gpr_mu_unlock(&resource_lock);
- return (int32_t)id;
-}
-
-void census_delete_resource(int32_t rid) {
- gpr_mu_lock(&resource_lock);
- if (rid >= 0 && (size_t)rid < n_resources && resources[rid] != NULL) {
- delete_resource_locked((size_t)rid);
- }
- gpr_mu_unlock(&resource_lock);
-}
-
-int32_t census_resource_id(const char *name) {
- gpr_mu_lock(&resource_lock);
- for (int32_t id = 0; (size_t)id < n_resources; id++) {
- if (resources[id] != NULL && strcmp(resources[id]->name, name) == 0) {
- gpr_mu_unlock(&resource_lock);
- return id;
- }
- }
- gpr_mu_unlock(&resource_lock);
- return -1;
-}
-
-int32_t define_resource(const resource *base) {
- GPR_ASSERT(base != NULL && base->name != NULL && base->n_numerators > 0 &&
- base->numerators != NULL);
- gpr_mu_lock(&resource_lock);
- size_t id = allocate_resource();
- size_t len = strlen(base->name) + 1;
- resources[id]->name = (char *)gpr_malloc(len);
- memcpy(resources[id]->name, base->name, len);
- if (base->description) {
- len = strlen(base->description) + 1;
- resources[id]->description = (char *)gpr_malloc(len);
- memcpy(resources[id]->description, base->description, len);
- }
- resources[id]->prefix = base->prefix;
- resources[id]->n_numerators = base->n_numerators;
- len = (size_t)base->n_numerators * sizeof(*base->numerators);
- resources[id]->numerators =
- (google_census_Resource_BasicUnit *)gpr_malloc(len);
- memcpy(resources[id]->numerators, base->numerators, len);
- resources[id]->n_denominators = base->n_denominators;
- if (base->n_denominators != 0) {
- len = (size_t)base->n_denominators * sizeof(*base->denominators);
- resources[id]->denominators =
- (google_census_Resource_BasicUnit *)gpr_malloc(len);
- memcpy(resources[id]->denominators, base->denominators, len);
- }
- gpr_mu_unlock(&resource_lock);
- return (int32_t)id;
-}
diff --git a/src/core/ext/census/resource.h b/src/core/ext/census/resource.h
deleted file mode 100644
index 56aaaaf750..0000000000
--- a/src/core/ext/census/resource.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/* Census-internal resource definition and manipluation functions. */
-#ifndef GRPC_CORE_EXT_CENSUS_RESOURCE_H
-#define GRPC_CORE_EXT_CENSUS_RESOURCE_H
-
-#include <grpc/grpc.h>
-#include "src/core/ext/census/gen/census.pb.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Internal representation of a resource. */
-typedef struct {
- char *name;
- char *description;
- int32_t prefix;
- int n_numerators;
- google_census_Resource_BasicUnit *numerators;
- int n_denominators;
- google_census_Resource_BasicUnit *denominators;
-} resource;
-
-/* Initialize and shutdown the resources subsystem. */
-void initialize_resources(void);
-void shutdown_resources(void);
-
-/* Add a new resource, given a proposed resource structure. Returns the
- resource ID, or -ve on failure.
- TODO(aveitch): this function exists to support addition of the base
- resources. It should be removed when we have the ability to add resources
- from configuration files. */
-int32_t define_resource(const resource *base);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* GRPC_CORE_EXT_CENSUS_RESOURCE_H */
diff --git a/src/core/ext/census/rpc_metric_id.h b/src/core/ext/census/rpc_metric_id.h
deleted file mode 100644
index ea493d7288..0000000000
--- a/src/core/ext/census/rpc_metric_id.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_EXT_CENSUS_RPC_METRIC_ID_H
-#define GRPC_CORE_EXT_CENSUS_RPC_METRIC_ID_H
-
-/* Metric ID's used for RPC measurements. */
-/* Count of client requests sent. */
-#define CENSUS_METRIC_RPC_CLIENT_REQUESTS ((uint32_t)0)
-/* Count of server requests sent. */
-#define CENSUS_METRIC_RPC_SERVER_REQUESTS ((uint32_t)1)
-/* Client error counts. */
-#define CENSUS_METRIC_RPC_CLIENT_ERRORS ((uint32_t)2)
-/* Server error counts. */
-#define CENSUS_METRIC_RPC_SERVER_ERRORS ((uint32_t)3)
-/* Client side request latency. */
-#define CENSUS_METRIC_RPC_CLIENT_LATENCY ((uint32_t)4)
-/* Server side request latency. */
-#define CENSUS_METRIC_RPC_SERVER_LATENCY ((uint32_t)5)
-
-#endif /* GRPC_CORE_EXT_CENSUS_RPC_METRIC_ID_H */
diff --git a/src/core/ext/census/trace_context.cc b/src/core/ext/census/trace_context.cc
deleted file mode 100644
index af92ae6d9e..0000000000
--- a/src/core/ext/census/trace_context.cc
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/ext/census/trace_context.h"
-
-#include <grpc/census.h>
-#include <grpc/support/log.h>
-#include <stdbool.h>
-
-#include "third_party/nanopb/pb_decode.h"
-#include "third_party/nanopb/pb_encode.h"
-
-// This function assumes the TraceContext is valid.
-size_t encode_trace_context(google_trace_TraceContext *ctxt, uint8_t *buffer,
- const size_t buf_size) {
- // Create a stream that will write to our buffer.
- pb_ostream_t stream = pb_ostream_from_buffer(buffer, buf_size);
-
- // encode message
- bool status = pb_encode(&stream, google_trace_TraceContext_fields, ctxt);
-
- if (!status) {
- gpr_log(GPR_DEBUG, "TraceContext encoding failed: %s",
- PB_GET_ERROR(&stream));
- return 0;
- }
-
- return stream.bytes_written;
-}
-
-bool decode_trace_context(google_trace_TraceContext *ctxt, uint8_t *buffer,
- const size_t nbytes) {
- // Create a stream that reads nbytes from the buffer.
- pb_istream_t stream = pb_istream_from_buffer(buffer, nbytes);
-
- // decode message
- bool status = pb_decode(&stream, google_trace_TraceContext_fields, ctxt);
-
- if (!status) {
- gpr_log(GPR_DEBUG, "TraceContext decoding failed: %s",
- PB_GET_ERROR(&stream));
- return false;
- }
-
- // check fields
- if (!ctxt->has_trace_id_hi || !ctxt->has_trace_id_lo) {
- gpr_log(GPR_DEBUG, "Invalid TraceContext: missing trace_id");
- return false;
- }
- if (!ctxt->has_span_id) {
- gpr_log(GPR_DEBUG, "Invalid TraceContext: missing span_id");
- return false;
- }
-
- return true;
-}
diff --git a/src/core/ext/census/trace_context.h b/src/core/ext/census/trace_context.h
deleted file mode 100644
index 2b828ba4da..0000000000
--- a/src/core/ext/census/trace_context.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/* Functions for manipulating trace contexts as defined in
- src/proto/census/trace.proto */
-#ifndef GRPC_CORE_EXT_CENSUS_TRACE_CONTEXT_H
-#define GRPC_CORE_EXT_CENSUS_TRACE_CONTEXT_H
-
-#include "src/core/ext/census/gen/trace_context.pb.h"
-
-/* Span option flags. */
-#define SPAN_OPTIONS_IS_SAMPLED 0x01
-
-/* Maximum number of bytes required to encode a TraceContext (31)
-1 byte for trace_id field
-1 byte for trace_id length
-1 byte for trace_id.hi field
-8 bytes for trace_id.hi (uint64_t)
-1 byte for trace_id.lo field
-8 bytes for trace_id.lo (uint64_t)
-1 byte for span_id field
-8 bytes for span_id (uint64_t)
-1 byte for is_sampled field
-1 byte for is_sampled (bool) */
-#define TRACE_MAX_CONTEXT_SIZE 31
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Encode a trace context (ctxt) into proto format to the buffer provided. The
-size of buffer must be at least TRACE_MAX_CONTEXT_SIZE. On success, returns the
-number of bytes successfully encoded into buffer. On failure, returns 0. */
-size_t encode_trace_context(google_trace_TraceContext *ctxt, uint8_t *buffer,
- const size_t buf_size);
-
-/* Decode a proto-encoded TraceContext from the provided buffer into the
-TraceContext structure (ctxt). The function expects to be supplied the number
-of bytes to be read from buffer (nbytes). This function will also validate that
-the TraceContext has a span_id and a trace_id, and will return false if either
-of these do not exist. On success, returns true and false otherwise. */
-bool decode_trace_context(google_trace_TraceContext *ctxt, uint8_t *buffer,
- const size_t nbytes);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* GRPC_CORE_EXT_CENSUS_TRACE_CONTEXT_H */
diff --git a/src/core/ext/census/trace_label.h b/src/core/ext/census/trace_label.h
deleted file mode 100644
index 97ce399eb5..0000000000
--- a/src/core/ext/census/trace_label.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_EXT_CENSUS_TRACE_LABEL_H
-#define GRPC_CORE_EXT_CENSUS_TRACE_LABEL_H
-
-#include "src/core/ext/census/trace_string.h"
-
-/* Trace label (key/value pair) stores a label name and the label value. The
- value can be one of trace_string/int64_t/bool. */
-typedef struct trace_label {
- trace_string key;
- enum label_type {
- /* Unknown value for debugging/error purposes */
- LABEL_UNKNOWN = 0,
- /* A string value */
- LABEL_STRING = 1,
- /* An integer value. */
- LABEL_INT = 2,
- /* A boolean value. */
- LABEL_BOOL = 3,
- } value_type;
-
- union value {
- trace_string label_str;
- int64_t label_int;
- bool label_bool;
- } value;
-} trace_label;
-
-#endif /* GRPC_CORE_EXT_CENSUS_TRACE_LABEL_H */
diff --git a/src/core/ext/census/trace_propagation.h b/src/core/ext/census/trace_propagation.h
deleted file mode 100644
index e05fd23a1f..0000000000
--- a/src/core/ext/census/trace_propagation.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_EXT_CENSUS_TRACE_PROPAGATION_H
-#define GRPC_CORE_EXT_CENSUS_TRACE_PROPAGATION_H
-
-#include "src/core/ext/census/tracing.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Encoding and decoding functions for receiving and sending trace contexts
- over the wire. Only RPC libraries should be calling these
- functions. These functions return the number of bytes encoded/decoded
- (0 if a failure has occurred). buf_size indicates the size of the
- input/output buffer. trace_span_context is a struct that includes the
- trace ID, span ID, and a set of option flags (is_sampled, etc.). */
-
-/* Converts a span context to a binary byte buffer. */
-size_t trace_span_context_to_binary(const trace_span_context *ctxt,
- uint8_t *buf, size_t buf_size);
-
-/* Reads a binary byte buffer and populates a span context structure. */
-size_t binary_to_trace_span_context(const uint8_t *buf, size_t buf_size,
- trace_span_context *ctxt);
-
-/* Converts a span context to an http metadata compatible string. */
-size_t trace_span_context_to_http_format(const trace_span_context *ctxt,
- char *buf, size_t buf_size);
-
-/* Reads an http metadata compatible string and populates a span context
- structure. */
-size_t http_format_to_trace_span_context(const char *buf, size_t buf_size,
- trace_span_context *ctxt);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* GRPC_CORE_EXT_CENSUS_TRACE_PROPAGATION_H */
diff --git a/src/core/ext/census/trace_status.h b/src/core/ext/census/trace_status.h
deleted file mode 100644
index dd83d3f729..0000000000
--- a/src/core/ext/census/trace_status.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_EXT_CENSUS_TRACE_STATUS_H
-#define GRPC_CORE_EXT_CENSUS_TRACE_STATUS_H
-
-#include "src/core/ext/census/trace_string.h"
-
-/* Stores a status code and status message for a trace. */
-typedef struct trace_status {
- int64_t errorCode;
- trace_string errorMessage;
-} trace_status;
-
-#endif /* GRPC_CORE_EXT_CENSUS_TRACE_STATUS_H */
diff --git a/src/core/ext/census/trace_string.h b/src/core/ext/census/trace_string.h
deleted file mode 100644
index e4da3f590d..0000000000
--- a/src/core/ext/census/trace_string.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_EXT_CENSUS_TRACE_STRING_H
-#define GRPC_CORE_EXT_CENSUS_TRACE_STRING_H
-
-#include <grpc/slice.h>
-
-/* String struct for tracing messages. Since this is a C API, we do not have
- access to a string class. This is intended for use by higher level
- languages which wrap around the C API, as most of them have a string class.
- This will also be more efficient when copying, as we have an explicitly
- specified length. Also, grpc_slice has reference counting which allows for
- interning. */
-typedef struct trace_string {
- char *string;
- size_t length;
-} trace_string;
-
-#endif /* GRPC_CORE_EXT_CENSUS_TRACE_STRING_H */
diff --git a/src/core/ext/census/tracing.cc b/src/core/ext/census/tracing.cc
deleted file mode 100644
index 823c681abf..0000000000
--- a/src/core/ext/census/tracing.cc
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/ext/census/tracing.h"
-
-#include <grpc/census.h>
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include "src/core/ext/census/mlog.h"
-
-void trace_start_span(const trace_span_context *span_ctxt,
- const trace_string name, const start_span_options *opts,
- trace_span_context *new_span_ctxt,
- bool has_remote_parent) {
- // Noop implementation.
-}
-
-void trace_add_span_annotation(const trace_string description,
- const trace_label *labels, const size_t n_labels,
- trace_span_context *span_ctxt) {
- // Noop implementation.
-}
-
-void trace_add_span_network_event_annotation(const trace_string description,
- const trace_label *labels,
- const size_t n_labels,
- const gpr_timespec timestamp,
- bool sent, uint64_t id,
- trace_span_context *span_ctxt) {
- // Noop implementation.
-}
-
-void trace_add_span_labels(const trace_label *labels, const size_t n_labels,
- trace_span_context *span_ctxt) {
- // Noop implementation.
-}
-
-void trace_end_span(const trace_status *status, trace_span_context *span_ctxt) {
- // Noop implementation.
-}
diff --git a/src/core/ext/census/tracing.h b/src/core/ext/census/tracing.h
deleted file mode 100644
index 0690de8655..0000000000
--- a/src/core/ext/census/tracing.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_EXT_CENSUS_TRACING_H
-#define GRPC_CORE_EXT_CENSUS_TRACING_H
-
-#include <grpc/support/time.h>
-#include <stdbool.h>
-#include "src/core/ext/census/trace_context.h"
-#include "src/core/ext/census/trace_label.h"
-#include "src/core/ext/census/trace_status.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* This is the low level tracing API that other languages will interface with.
- This is not intended to be accessed by the end-user, therefore it has been
- designed with performance in mind rather than ease of use. */
-
-/* The tracing level. */
-enum TraceLevel {
- /* Annotations on this context will be silently discarded. */
- NO_TRACING = 0,
- /* Annotations will not be saved to a persistent store. They will be
- available via local APIs only. This setting is not propagated to child
- spans. */
- TRANSIENT_TRACING = 1,
- /* Annotations are recorded for the entire distributed trace and they are
- saved to a persistent store. This setting is propagated to child spans. */
- PERSISTENT_TRACING = 2,
-};
-
-typedef struct trace_span_context {
- /* Trace span context stores Span ID, Trace ID, and option flags. */
- /* Trace ID is 128 bits split into 2 64-bit chunks (hi and lo). */
- uint64_t trace_id_hi;
- uint64_t trace_id_lo;
- /* Span ID is 64 bits. */
- uint64_t span_id;
- /* Span-options is 32-bit value which contains flag options. */
- uint32_t span_options;
-} trace_span_context;
-
-typedef struct start_span_options {
- /* If set, this will override the Span.local_start_time for the Span. */
- gpr_timespec local_start_timestamp;
-
- /* Linked spans can be used to identify spans that are linked to this span in
- a different trace. This can be used (for example) in batching operations,
- where a single batch handler processes multiple requests from different
- traces. If set, points to a list of Spans are linked to the created Span.*/
- trace_span_context *linked_spans;
- /* The number of linked spans. */
- size_t n_linked_spans;
-} start_span_options;
-
-/* Create a new child Span (or root if parent is NULL), with parent being the
- designated Span. The child span will have the provided name and starting
- span options (optional). The bool has_remote_parent marks whether the
- context refers to a remote parent span or not. */
-void trace_start_span(const trace_span_context *span_ctxt,
- const trace_string name, const start_span_options *opts,
- trace_span_context *new_span_ctxt,
- bool has_remote_parent);
-
-/* Add a new Annotation to the Span. Annotations consist of a description
- (trace_string) and a set of n labels (trace_label). This can be populated
- with arbitrary user data. */
-void trace_add_span_annotation(const trace_string description,
- const trace_label *labels, const size_t n_labels,
- trace_span_context *span_ctxt);
-
-/* Add a new NetworkEvent annotation to a Span. This function is only intended
- to be used by RPC systems (either client or server), not by higher level
- applications. The timestamp type will be system-defined, the sent argument
- designates whether this is a network send event (client request, server
- reply)or receive (server request, client reply). The id argument corresponds
- to Span.Annotation.NetworkEvent.id from the data model, and serves to uniquely
- identify each network message. */
-void trace_add_span_network_event(const trace_string description,
- const trace_label *labels,
- const size_t n_labels,
- const gpr_timespec timestamp, bool sent,
- uint64_t id, trace_span_context *span_ctxt);
-
-/* Add a set of labels to the Span. These will correspond to the field
-Span.labels in the data model. */
-void trace_add_span_labels(const trace_label *labels, const size_t n_labels,
- trace_span_context *span_ctxt);
-
-/* Mark the end of Span Execution with the given status. Only the timing of the
-first EndSpan call for a given Span will be recorded, and implementations are
-free to ignore all further calls using the Span. EndSpanOptions can
-optionally be NULL. */
-void trace_end_span(const trace_status *status, trace_span_context *span_ctxt);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* GRPC_CORE_EXT_CENSUS_TRACING_H */
diff --git a/src/core/ext/census/window_stats.cc b/src/core/ext/census/window_stats.cc
deleted file mode 100644
index 0058e4bf9c..0000000000
--- a/src/core/ext/census/window_stats.cc
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/ext/census/window_stats.h"
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/time.h>
-#include <grpc/support/useful.h>
-#include <math.h>
-#include <stddef.h>
-#include <string.h>
-
-/* typedefs make typing long names easier. Use cws (for census_window_stats) */
-typedef census_window_stats_stat_info cws_stat_info;
-typedef struct census_window_stats_sum cws_sum;
-
-/* Each interval is composed of a number of buckets, which hold a count of
- entries and a single statistic */
-typedef struct census_window_stats_bucket {
- int64_t count;
- void *statistic;
-} cws_bucket;
-
-/* Each interval has a set of buckets, and the variables needed to keep
- track of their current state */
-typedef struct census_window_stats_interval_stats {
- /* The buckets. There will be 'granularity' + 1 of these. */
- cws_bucket *buckets;
- /* Index of the bucket containing the smallest time interval. */
- int bottom_bucket;
- /* The smallest time storable in the current window. */
- int64_t bottom;
- /* The largest time storable in the current window + 1ns */
- int64_t top;
- /* The width of each bucket in ns. */
- int64_t width;
-} cws_interval_stats;
-
-typedef struct census_window_stats {
- /* Number of intervals. */
- int nintervals;
- /* Number of buckets in each interval. 'granularity' + 1. */
- int nbuckets;
- /* Record of stat_info. */
- cws_stat_info stat_info;
- /* Stats for each interval. */
- cws_interval_stats *interval_stats;
- /* The time the newset stat was recorded. */
- int64_t newest_time;
-} window_stats;
-
-/* Calculate an actual bucket index from a logical index 'IDX'. Other
- parameters supply information on the interval struct and overall stats. */
-#define BUCKET_IDX(IS, IDX, WSTATS) \
- ((IS->bottom_bucket + (IDX)) % WSTATS->nbuckets)
-
-/* The maximum seconds value we can have in a valid timespec. More than this
- will result in overflow in timespec_to_ns(). This works out to ~292 years.
- TODO: consider using doubles instead of int64. */
-static int64_t max_seconds = (GPR_INT64_MAX - GPR_NS_PER_SEC) / GPR_NS_PER_SEC;
-
-static int64_t timespec_to_ns(const gpr_timespec ts) {
- if (ts.tv_sec > max_seconds) {
- return GPR_INT64_MAX - 1;
- }
- return ts.tv_sec * GPR_NS_PER_SEC + ts.tv_nsec;
-}
-
-static void cws_initialize_statistic(void *statistic,
- const cws_stat_info *stat_info) {
- if (stat_info->stat_initialize == NULL) {
- memset(statistic, 0, stat_info->stat_size);
- } else {
- stat_info->stat_initialize(statistic);
- }
-}
-
-/* Create and initialize a statistic */
-static void *cws_create_statistic(const cws_stat_info *stat_info) {
- void *stat = gpr_malloc(stat_info->stat_size);
- cws_initialize_statistic(stat, stat_info);
- return stat;
-}
-
-window_stats *census_window_stats_create(int nintervals,
- const gpr_timespec intervals[],
- int granularity,
- const cws_stat_info *stat_info) {
- window_stats *ret;
- int i;
- /* validate inputs */
- GPR_ASSERT(nintervals > 0 && granularity > 2 && intervals != NULL &&
- stat_info != NULL);
- for (i = 0; i < nintervals; i++) {
- int64_t ns = timespec_to_ns(intervals[i]);
- GPR_ASSERT(intervals[i].tv_sec >= 0 && intervals[i].tv_nsec >= 0 &&
- intervals[i].tv_nsec < GPR_NS_PER_SEC && ns >= 100 &&
- granularity * 10 <= ns);
- }
- /* Allocate and initialize relevant data structures */
- ret = (window_stats *)gpr_malloc(sizeof(window_stats));
- ret->nintervals = nintervals;
- ret->nbuckets = granularity + 1;
- ret->stat_info = *stat_info;
- ret->interval_stats =
- (cws_interval_stats *)gpr_malloc(nintervals * sizeof(cws_interval_stats));
- for (i = 0; i < nintervals; i++) {
- int64_t size_ns = timespec_to_ns(intervals[i]);
- cws_interval_stats *is = ret->interval_stats + i;
- cws_bucket *buckets = is->buckets =
- (cws_bucket *)gpr_malloc(ret->nbuckets * sizeof(cws_bucket));
- int b;
- for (b = 0; b < ret->nbuckets; b++) {
- buckets[b].statistic = cws_create_statistic(stat_info);
- buckets[b].count = 0;
- }
- is->bottom_bucket = 0;
- is->bottom = 0;
- is->width = size_ns / granularity;
- /* Check for possible overflow issues, and maximize interval size if the
- user requested something large enough. */
- if ((GPR_INT64_MAX - is->width) > size_ns) {
- is->top = size_ns + is->width;
- } else {
- is->top = GPR_INT64_MAX;
- is->width = GPR_INT64_MAX / (granularity + 1);
- }
- /* If size doesn't divide evenly, we can have a width slightly too small;
- better to have it slightly large. */
- if ((size_ns - (granularity + 1) * is->width) > 0) {
- is->width += 1;
- }
- }
- ret->newest_time = 0;
- return ret;
-}
-
-/* When we try adding a measurement above the current interval range, we
- need to "shift" the buckets sufficiently to cover the new range. */
-static void cws_shift_buckets(const window_stats *wstats,
- cws_interval_stats *is, int64_t when_ns) {
- int i;
- /* number of bucket time widths to "shift" */
- int shift;
- /* number of buckets to clear */
- int nclear;
- GPR_ASSERT(when_ns >= is->top);
- /* number of bucket time widths to "shift" */
- shift = ((when_ns - is->top) / is->width) + 1;
- /* number of buckets to clear - limited by actual number of buckets */
- nclear = GPR_MIN(shift, wstats->nbuckets);
- for (i = 0; i < nclear; i++) {
- int b = BUCKET_IDX(is, i, wstats);
- is->buckets[b].count = 0;
- cws_initialize_statistic(is->buckets[b].statistic, &wstats->stat_info);
- }
- /* adjust top/bottom times and current bottom bucket */
- is->bottom_bucket = BUCKET_IDX(is, shift, wstats);
- is->top += shift * is->width;
- is->bottom += shift * is->width;
-}
-
-void census_window_stats_add(window_stats *wstats, const gpr_timespec when,
- const void *stat_value) {
- int i;
- int64_t when_ns = timespec_to_ns(when);
- GPR_ASSERT(wstats->interval_stats != NULL);
- for (i = 0; i < wstats->nintervals; i++) {
- cws_interval_stats *is = wstats->interval_stats + i;
- cws_bucket *bucket;
- if (when_ns < is->bottom) { /* Below smallest time in interval: drop */
- continue;
- }
- if (when_ns >= is->top) { /* above limit: shift buckets */
- cws_shift_buckets(wstats, is, when_ns);
- }
- /* Add the stat. */
- GPR_ASSERT(is->bottom <= when_ns && when_ns < is->top);
- bucket = is->buckets +
- BUCKET_IDX(is, (when_ns - is->bottom) / is->width, wstats);
- bucket->count++;
- wstats->stat_info.stat_add(bucket->statistic, stat_value);
- }
- if (when_ns > wstats->newest_time) {
- wstats->newest_time = when_ns;
- }
-}
-
-/* Add a specific bucket contents to an accumulating total. */
-static void cws_add_bucket_to_sum(cws_sum *sum, const cws_bucket *bucket,
- const cws_stat_info *stat_info) {
- sum->count += bucket->count;
- stat_info->stat_add(sum->statistic, bucket->statistic);
-}
-
-/* Add a proportion to an accumulating sum. */
-static void cws_add_proportion_to_sum(double p, cws_sum *sum,
- const cws_bucket *bucket,
- const cws_stat_info *stat_info) {
- sum->count += p * bucket->count;
- stat_info->stat_add_proportion(p, sum->statistic, bucket->statistic);
-}
-
-void census_window_stats_get_sums(const window_stats *wstats,
- const gpr_timespec when, cws_sum sums[]) {
- int i;
- int64_t when_ns = timespec_to_ns(when);
- GPR_ASSERT(wstats->interval_stats != NULL);
- for (i = 0; i < wstats->nintervals; i++) {
- int when_bucket;
- int new_bucket;
- double last_proportion = 1.0;
- double bottom_proportion;
- cws_interval_stats *is = wstats->interval_stats + i;
- cws_sum *sum = sums + i;
- sum->count = 0;
- cws_initialize_statistic(sum->statistic, &wstats->stat_info);
- if (when_ns < is->bottom) {
- continue;
- }
- if (when_ns >= is->top) {
- cws_shift_buckets(wstats, is, when_ns);
- }
- /* Calculating the appropriate amount of which buckets to use can get
- complicated. Essentially there are two cases:
- 1) if the "top" bucket (new_bucket, where the newest additions to the
- stats recorded are entered) corresponds to 'when', then we need
- to take a proportion of it - (if when < newest_time) or the full
- thing. We also (possibly) need to take a corresponding
- proportion of the bottom bucket.
- 2) Other cases, we just take a straight proportion.
- */
- when_bucket = (when_ns - is->bottom) / is->width;
- new_bucket = (wstats->newest_time - is->bottom) / is->width;
- if (new_bucket == when_bucket) {
- int64_t bottom_bucket_time = is->bottom + when_bucket * is->width;
- if (when_ns < wstats->newest_time) {
- last_proportion = (double)(when_ns - bottom_bucket_time) /
- (double)(wstats->newest_time - bottom_bucket_time);
- bottom_proportion =
- (double)(is->width - (when_ns - bottom_bucket_time)) / is->width;
- } else {
- bottom_proportion =
- (double)(is->width - (wstats->newest_time - bottom_bucket_time)) /
- is->width;
- }
- } else {
- last_proportion =
- (double)(when_ns + 1 - is->bottom - when_bucket * is->width) /
- is->width;
- bottom_proportion = 1.0 - last_proportion;
- }
- cws_add_proportion_to_sum(last_proportion, sum,
- is->buckets + BUCKET_IDX(is, when_bucket, wstats),
- &wstats->stat_info);
- if (when_bucket != 0) { /* last bucket isn't also bottom bucket */
- int b;
- /* Add all of "bottom" bucket if we are looking at a subset of the
- full interval, or a proportion if we are adding full interval. */
- cws_add_proportion_to_sum(
- (when_bucket == wstats->nbuckets - 1 ? bottom_proportion : 1.0), sum,
- is->buckets + is->bottom_bucket, &wstats->stat_info);
- /* Add all the remaining buckets (everything but top and bottom). */
- for (b = 1; b < when_bucket; b++) {
- cws_add_bucket_to_sum(sum, is->buckets + BUCKET_IDX(is, b, wstats),
- &wstats->stat_info);
- }
- }
- }
-}
-
-void census_window_stats_destroy(window_stats *wstats) {
- int i;
- GPR_ASSERT(wstats->interval_stats != NULL);
- for (i = 0; i < wstats->nintervals; i++) {
- int b;
- for (b = 0; b < wstats->nbuckets; b++) {
- gpr_free(wstats->interval_stats[i].buckets[b].statistic);
- }
- gpr_free(wstats->interval_stats[i].buckets);
- }
- gpr_free(wstats->interval_stats);
- /* Ensure any use-after free triggers assert. */
- wstats->interval_stats = NULL;
- gpr_free(wstats);
-}
diff --git a/src/core/ext/census/window_stats.h b/src/core/ext/census/window_stats.h
deleted file mode 100644
index 2a1d6d0d16..0000000000
--- a/src/core/ext/census/window_stats.h
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_EXT_CENSUS_WINDOW_STATS_H
-#define GRPC_CORE_EXT_CENSUS_WINDOW_STATS_H
-
-#include <grpc/support/time.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Keep rolling sums of a user-defined statistic (containing a number of
- measurements) over a a number of time intervals ("windows"). For example,
- you can use a window_stats object to answer questions such as
- "Approximately how many RPCs/s did I receive over the past minute, and
- approximately how many bytes did I send out over that period?".
-
- The type of data to record, and the time intervals to keep are specified
- when creating the object via a call to census_window_stats_create().
-
- A window's interval is divided into one or more "buckets"; the interval
- must be divisible by the number of buckets. Internally, these buckets
- control the granularity of window_stats' measurements. Increasing the
- number of buckets lets the object respond more quickly to changes in the
- overall rate of data added into the object, at the cost of additional
- memory usage.
-
- Here's some code which keeps one minute/hour measurements for two values
- (latency in seconds and bytes transferred), with each interval divided into
- 4 buckets.
-
- typedef struct my_stat {
- double latency;
- int bytes;
- } my_stat;
-
- void add_my_stat(void* base, const void* addme) {
- my_stat* b = (my_stat*)base;
- const my_stat* a = (const my_stat*)addme;
- b->latency += a->latency;
- b->bytes += a->bytes;
- }
-
- void add_proportion_my_stat(double p, void* base, const void* addme) {
- (my_stat*)result->latency += p * (const my_stat*)base->latency;
- (my_stat*)result->bytes += p * (const my_stat*)base->bytes;
- }
-
- #define kNumIntervals 2
- #define kMinInterval 0
- #define kHourInterval 1
- #define kNumBuckets 4
-
- const struct census_window_stats_stat_info kMyStatInfo
- = { sizeof(my_stat), NULL, add_my_stat, add_proportion_my_stat };
- gpr_timespec intervals[kNumIntervals] = {{60, 0}, {3600, 0}};
- my_stat stat;
- my_stat sums[kNumIntervals];
- census_window_stats_sums result[kNumIntervals];
- struct census_window_stats* stats
- = census_window_stats_create(kNumIntervals, intervals, kNumBuckets,
- &kMyStatInfo);
- // Record a new event, taking 15.3ms, transferring 1784 bytes.
- stat.latency = 0.153;
- stat.bytes = 1784;
- census_window_stats_add(stats, gpr_now(GPR_CLOCK_REALTIME), &stat);
- // Get sums and print them out
- result[kMinInterval].statistic = &sums[kMinInterval];
- result[kHourInterval].statistic = &sums[kHourInterval];
- census_window_stats_get_sums(stats, gpr_now(GPR_CLOCK_REALTIME), result);
- printf("%d events/min, average time %gs, average bytes %g\n",
- result[kMinInterval].count,
- (my_stat*)result[kMinInterval].statistic->latency /
- result[kMinInterval].count,
- (my_stat*)result[kMinInterval].statistic->bytes /
- result[kMinInterval].count
- );
- printf("%d events/hr, average time %gs, average bytes %g\n",
- result[kHourInterval].count,
- (my_stat*)result[kHourInterval].statistic->latency /
- result[kHourInterval].count,
- (my_stat*)result[kHourInterval].statistic->bytes /
- result[kHourInterval].count
- );
-*/
-
-/* Opaque structure for representing window_stats object */
-struct census_window_stats;
-
-/* Information provided by API user on the information they want to record */
-typedef struct census_window_stats_stat_info {
- /* Number of bytes in user-defined object. */
- size_t stat_size;
- /* Function to initialize a user-defined statistics object. If this is set
- * to NULL, then the object will be zero-initialized. */
- void (*stat_initialize)(void *stat);
- /* Function to add one user-defined statistics object ('addme') to 'base' */
- void (*stat_add)(void *base, const void *addme);
- /* As for previous function, but only add a proportion 'p'. This API will
- currently only use 'p' values in the range [0,1], but other values are
- possible in the future, and should be supported. */
- void (*stat_add_proportion)(double p, void *base, const void *addme);
-} census_window_stats_stat_info;
-
-/* Create a new window_stats object. 'nintervals' is the number of
- 'intervals', and must be >=1. 'granularity' is the number of buckets, with
- a larger number using more memory, but providing greater accuracy of
- results. 'granularity should be > 2. We also require that each interval be
- at least 10 * 'granularity' nanoseconds in size. 'stat_info' contains
- information about the statistic to be gathered. Intervals greater than ~192
- years will be treated as essentially infinite in size. This function will
- GPR_ASSERT() if the object cannot be created or any of the parameters have
- invalid values. This function is thread-safe. */
-struct census_window_stats *census_window_stats_create(
- int nintervals, const gpr_timespec intervals[], int granularity,
- const census_window_stats_stat_info *stat_info);
-
-/* Add a new measurement (in 'stat_value'), as of a given time ('when').
- This function is thread-compatible. */
-void census_window_stats_add(struct census_window_stats *wstats,
- const gpr_timespec when, const void *stat_value);
-
-/* Structure used to record a single intervals sum for a given statistic */
-typedef struct census_window_stats_sum {
- /* Total count of samples. Note that because some internal interpolation
- is performed, the count of samples returned for each interval may not be an
- integral value. */
- double count;
- /* Sum for statistic */
- void *statistic;
-} census_window_stats_sums;
-
-/* Retrieve a set of all values stored in a window_stats object 'wstats'. The
- number of 'sums' MUST be the same as the number 'nintervals' used in
- census_window_stats_create(). This function is thread-compatible. */
-void census_window_stats_get_sums(const struct census_window_stats *wstats,
- const gpr_timespec when,
- struct census_window_stats_sum sums[]);
-
-/* Destroy a window_stats object. Once this function has been called, the
- object will no longer be usable from any of the above functions (and
- calling them will most likely result in a NULL-pointer dereference or
- assertion failure). This function is thread-compatible. */
-void census_window_stats_destroy(struct census_window_stats *wstats);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* GRPC_CORE_EXT_CENSUS_WINDOW_STATS_H */
diff --git a/src/core/ext/filters/client_channel/backup_poller.cc b/src/core/ext/filters/client_channel/backup_poller.cc
new file mode 100644
index 0000000000..466bf86bc0
--- /dev/null
+++ b/src/core/ext/filters/client_channel/backup_poller.cc
@@ -0,0 +1,158 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "src/core/ext/filters/client_channel/backup_poller.h"
+
+#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/sync.h>
+#include "src/core/ext/filters/client_channel/client_channel.h"
+#include "src/core/lib/iomgr/error.h"
+#include "src/core/lib/iomgr/pollset.h"
+#include "src/core/lib/iomgr/timer.h"
+#include "src/core/lib/support/env.h"
+#include "src/core/lib/support/string.h"
+#include "src/core/lib/surface/channel.h"
+#include "src/core/lib/surface/completion_queue.h"
+
+#define DEFAULT_POLL_INTERVAL_MS 5000
+
+typedef struct backup_poller {
+ grpc_timer polling_timer;
+ grpc_closure run_poller_closure;
+ grpc_closure shutdown_closure;
+ gpr_mu* pollset_mu;
+ grpc_pollset* pollset; // guarded by pollset_mu
+ bool shutting_down; // guarded by pollset_mu
+ gpr_refcount refs;
+ gpr_refcount shutdown_refs;
+} backup_poller;
+
+static gpr_once g_once = GPR_ONCE_INIT;
+static gpr_mu g_poller_mu;
+static backup_poller* g_poller = NULL; // guarded by g_poller_mu
+// g_poll_interval_ms is set only once at the first time
+// grpc_client_channel_start_backup_polling() is called, after that it is
+// treated as const.
+static int g_poll_interval_ms = DEFAULT_POLL_INTERVAL_MS;
+
+static void init_globals() {
+ gpr_mu_init(&g_poller_mu);
+ char* env = gpr_getenv("GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS");
+ if (env != NULL) {
+ int poll_interval_ms = gpr_parse_nonnegative_int(env);
+ if (poll_interval_ms == -1) {
+ gpr_log(GPR_ERROR,
+ "Invalid GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS: %s, "
+ "default value %d will be used.",
+ env, g_poll_interval_ms);
+ } else {
+ g_poll_interval_ms = poll_interval_ms;
+ }
+ }
+ gpr_free(env);
+}
+
+static void backup_poller_shutdown_unref(grpc_exec_ctx* exec_ctx,
+ backup_poller* p) {
+ if (gpr_unref(&p->shutdown_refs)) {
+ grpc_pollset_destroy(exec_ctx, p->pollset);
+ gpr_free(p->pollset);
+ gpr_free(p);
+ }
+}
+
+static void done_poller(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+ backup_poller_shutdown_unref(exec_ctx, (backup_poller*)arg);
+}
+
+static void g_poller_unref(grpc_exec_ctx* exec_ctx) {
+ if (gpr_unref(&g_poller->refs)) {
+ gpr_mu_lock(&g_poller_mu);
+ backup_poller* p = g_poller;
+ g_poller = NULL;
+ gpr_mu_unlock(&g_poller_mu);
+ gpr_mu_lock(p->pollset_mu);
+ p->shutting_down = true;
+ grpc_pollset_shutdown(exec_ctx, p->pollset,
+ GRPC_CLOSURE_INIT(&p->shutdown_closure, done_poller,
+ p, grpc_schedule_on_exec_ctx));
+ gpr_mu_unlock(p->pollset_mu);
+ grpc_timer_cancel(exec_ctx, &p->polling_timer);
+ }
+}
+
+static void run_poller(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+ backup_poller* p = (backup_poller*)arg;
+ if (error != GRPC_ERROR_NONE) {
+ if (error != GRPC_ERROR_CANCELLED) {
+ GRPC_LOG_IF_ERROR("run_poller", GRPC_ERROR_REF(error));
+ }
+ backup_poller_shutdown_unref(exec_ctx, p);
+ return;
+ }
+ gpr_mu_lock(p->pollset_mu);
+ if (p->shutting_down) {
+ gpr_mu_unlock(p->pollset_mu);
+ backup_poller_shutdown_unref(exec_ctx, p);
+ return;
+ }
+ grpc_error* err = grpc_pollset_work(exec_ctx, p->pollset, NULL,
+ grpc_exec_ctx_now(exec_ctx));
+ gpr_mu_unlock(p->pollset_mu);
+ GRPC_LOG_IF_ERROR("Run client channel backup poller", err);
+ grpc_timer_init(exec_ctx, &p->polling_timer,
+ grpc_exec_ctx_now(exec_ctx) + g_poll_interval_ms,
+ &p->run_poller_closure);
+}
+
+void grpc_client_channel_start_backup_polling(
+ grpc_exec_ctx* exec_ctx, grpc_pollset_set* interested_parties) {
+ gpr_once_init(&g_once, init_globals);
+ if (g_poll_interval_ms == 0) {
+ return;
+ }
+ gpr_mu_lock(&g_poller_mu);
+ if (g_poller == NULL) {
+ g_poller = (backup_poller*)gpr_zalloc(sizeof(backup_poller));
+ g_poller->pollset = (grpc_pollset*)gpr_zalloc(grpc_pollset_size());
+ g_poller->shutting_down = false;
+ grpc_pollset_init(g_poller->pollset, &g_poller->pollset_mu);
+ gpr_ref_init(&g_poller->refs, 0);
+ // one for timer cancellation, one for pollset shutdown
+ gpr_ref_init(&g_poller->shutdown_refs, 2);
+ GRPC_CLOSURE_INIT(&g_poller->run_poller_closure, run_poller, g_poller,
+ grpc_schedule_on_exec_ctx);
+ grpc_timer_init(exec_ctx, &g_poller->polling_timer,
+ grpc_exec_ctx_now(exec_ctx) + g_poll_interval_ms,
+ &g_poller->run_poller_closure);
+ }
+ gpr_ref(&g_poller->refs);
+ gpr_mu_unlock(&g_poller_mu);
+ grpc_pollset_set_add_pollset(exec_ctx, interested_parties, g_poller->pollset);
+}
+
+void grpc_client_channel_stop_backup_polling(
+ grpc_exec_ctx* exec_ctx, grpc_pollset_set* interested_parties) {
+ if (g_poll_interval_ms == 0) {
+ return;
+ }
+ grpc_pollset_set_del_pollset(exec_ctx, interested_parties, g_poller->pollset);
+ g_poller_unref(exec_ctx);
+}
diff --git a/src/core/ext/census/grpc_filter.h b/src/core/ext/filters/client_channel/backup_poller.h
index 7940363061..e993d50639 100644
--- a/src/core/ext/census/grpc_filter.h
+++ b/src/core/ext/filters/client_channel/backup_poller.h
@@ -16,22 +16,19 @@
*
*/
-#ifndef GRPC_CORE_EXT_CENSUS_GRPC_FILTER_H
-#define GRPC_CORE_EXT_CENSUS_GRPC_FILTER_H
+#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_BACKUP_POLLER_H
+#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_BACKUP_POLLER_H
+#include <grpc/grpc.h>
#include "src/core/lib/channel/channel_stack.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
-#ifdef __cplusplus
-extern "C" {
-#endif
+/* Start polling \a interested_parties periodically in the timer thread */
+void grpc_client_channel_start_backup_polling(
+ grpc_exec_ctx* exec_ctx, grpc_pollset_set* interested_parties);
-/* Census filters: provides tracing and stats collection functionalities. It
- needs to reside right below the surface filter in the channel stack. */
-extern const grpc_channel_filter grpc_client_census_filter;
-extern const grpc_channel_filter grpc_server_census_filter;
+/* Stop polling \a interested_parties */
+void grpc_client_channel_stop_backup_polling(
+ grpc_exec_ctx* exec_ctx, grpc_pollset_set* interested_parties);
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* GRPC_CORE_EXT_CENSUS_GRPC_FILTER_H */
+#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_BACKUP_POLLER_H */
diff --git a/src/core/ext/filters/client_channel/client_channel.cc b/src/core/ext/filters/client_channel/client_channel.cc
index 22c2bc8880..00c51ba543 100644
--- a/src/core/ext/filters/client_channel/client_channel.cc
+++ b/src/core/ext/filters/client_channel/client_channel.cc
@@ -31,6 +31,7 @@
#include <grpc/support/sync.h>
#include <grpc/support/useful.h>
+#include "src/core/ext/filters/client_channel/backup_poller.h"
#include "src/core/ext/filters/client_channel/http_connect_handshaker.h"
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
@@ -712,6 +713,7 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
chand->interested_parties = grpc_pollset_set_create();
grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
"client_channel");
+ grpc_client_channel_start_backup_polling(exec_ctx, chand->interested_parties);
// Record client channel factory.
const grpc_arg *arg = grpc_channel_args_find(args->channel_args,
GRPC_ARG_CLIENT_CHANNEL_FACTORY);
@@ -790,6 +792,7 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
if (chand->method_params_table != NULL) {
grpc_slice_hash_table_unref(exec_ctx, chand->method_params_table);
}
+ grpc_client_channel_stop_backup_polling(exec_ctx, chand->interested_parties);
grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker);
grpc_pollset_set_destroy(exec_ctx, chand->interested_parties);
GRPC_COMBINER_UNREF(exec_ctx, chand->combiner, "client_channel");
@@ -898,7 +901,7 @@ static void waiting_for_pick_batches_fail(grpc_exec_ctx *exec_ctx,
call_data *calld = (call_data *)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
- "chand=%p calld=%p: failing %" PRIdPTR " pending batches: %s",
+ "chand=%p calld=%p: failing %" PRIuPTR " pending batches: %s",
elem->channel_data, calld, calld->waiting_for_pick_batches_count,
grpc_error_string(error));
}
@@ -940,7 +943,7 @@ static void waiting_for_pick_batches_resume(grpc_exec_ctx *exec_ctx,
channel_data *chand = (channel_data *)elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: sending %" PRIdPTR
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: sending %" PRIuPTR
" pending batches to subchannel_call=%p",
chand, calld, calld->waiting_for_pick_batches_count,
calld->subchannel_call);
@@ -1205,6 +1208,9 @@ static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx,
"Pick cancelled", &error, 1));
}
+static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem);
+
static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
void *arg,
grpc_error *error) {
@@ -1228,7 +1234,7 @@ static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
chand, calld);
}
async_pick_done_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
- } else {
+ } else if (chand->lb_policy != NULL) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick",
chand, calld);
@@ -1242,6 +1248,30 @@ static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
async_pick_done_locked(exec_ctx, elem, GRPC_ERROR_NONE);
}
}
+ // TODO(roth): It should be impossible for chand->lb_policy to be NULL
+ // here, so the rest of this code should never actually be executed.
+ // However, we have reports of a crash on iOS that triggers this case,
+ // so we are temporarily adding this to restore branches that were
+ // removed in https://github.com/grpc/grpc/pull/12297. Need to figure
+ // out what is actually causing this to occur and then figure out the
+ // right way to deal with it.
+ else if (chand->resolver != NULL) {
+ // No LB policy, so try again.
+ if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: resolver returned but no LB policy, "
+ "trying again",
+ chand, calld);
+ }
+ pick_after_resolver_result_start_locked(exec_ctx, elem);
+ } else {
+ if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver disconnected", chand,
+ calld);
+ }
+ async_pick_done_locked(
+ exec_ctx, elem, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
+ }
}
static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
@@ -1599,8 +1629,8 @@ int grpc_client_channel_num_external_connectivity_watchers(
return count;
}
-static void on_external_watch_complete(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
+static void on_external_watch_complete_locked(grpc_exec_ctx *exec_ctx,
+ void *arg, grpc_error *error) {
external_connectivity_watcher *w = (external_connectivity_watcher *)arg;
grpc_closure *follow_up = w->on_complete;
grpc_polling_entity_del_from_pollset_set(exec_ctx, &w->pollent,
@@ -1619,8 +1649,8 @@ static void watch_connectivity_state_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (w->state != NULL) {
external_connectivity_watcher_list_append(w->chand, w);
GRPC_CLOSURE_RUN(exec_ctx, w->watcher_timer_init, GRPC_ERROR_NONE);
- GRPC_CLOSURE_INIT(&w->my_closure, on_external_watch_complete, w,
- grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&w->my_closure, on_external_watch_complete_locked, w,
+ grpc_combiner_scheduler(w->chand->combiner));
grpc_connectivity_state_notify_on_state_change(
exec_ctx, &w->chand->state_tracker, w->state, &w->my_closure);
} else {
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
index ffd58129c6..85e76e68b5 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
@@ -611,7 +611,6 @@ static void update_lb_connectivity_status_locked(
case GRPC_CHANNEL_SHUTDOWN:
GPR_ASSERT(rr_state_error != GRPC_ERROR_NONE);
break;
- case GRPC_CHANNEL_INIT:
case GRPC_CHANNEL_IDLE:
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_READY:
@@ -1027,15 +1026,19 @@ static void glb_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
while (pp != NULL) {
pending_pick *next = pp->next;
*pp->target = NULL;
- GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
- GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(
+ exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
+ gpr_free(pp);
pp = next;
}
while (pping != NULL) {
pending_ping *next = pping->next;
- GRPC_CLOSURE_SCHED(exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
- GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(
+ exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
+ gpr_free(pping);
pping = next;
}
}
@@ -1786,7 +1789,6 @@ static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx *exec_ctx,
// embedded RR policy. Note that the current RR policy, if any, will stay in
// effect until an update from the new lb_call is received.
switch (glb_policy->lb_channel_connectivity) {
- case GRPC_CHANNEL_INIT:
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_TRANSIENT_FAILURE: {
/* resub. */
@@ -1803,9 +1805,8 @@ static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx *exec_ctx,
break;
}
case GRPC_CHANNEL_IDLE:
- // lb channel inactive (probably shutdown prior to update). Restart lb
- // call to kick the lb channel into gear.
- GPR_ASSERT(glb_policy->lb_call == NULL);
+ // lb channel inactive (probably shutdown prior to update). Restart lb
+ // call to kick the lb channel into gear.
/* fallthrough */
case GRPC_CHANNEL_READY:
if (glb_policy->lb_call != NULL) {
diff --git a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
index b07fc3b720..f0c66c68e1 100644
--- a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
@@ -20,6 +20,7 @@
#include <grpc/support/alloc.h>
+#include "src/core/ext/filters/client_channel/lb_policy/subchannel_list.h"
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/ext/filters/client_channel/subchannel_index.h"
@@ -42,99 +43,73 @@ typedef struct {
/** base policy: must be first */
grpc_lb_policy base;
/** all our subchannels */
- grpc_subchannel **subchannels;
- grpc_subchannel **new_subchannels;
- size_t num_subchannels;
- size_t num_new_subchannels;
-
- grpc_closure connectivity_changed;
-
- /** remaining members are protected by the combiner */
-
- /** the selected channel */
- grpc_connected_subchannel *selected;
-
- /** the subchannel key for \a selected, or NULL if \a selected not set */
- const grpc_subchannel_key *selected_key;
-
+ grpc_lb_subchannel_list *subchannel_list;
+ /** latest pending subchannel list */
+ grpc_lb_subchannel_list *latest_pending_subchannel_list;
+ /** selected subchannel in \a subchannel_list */
+ grpc_lb_subchannel_data *selected;
/** have we started picking? */
bool started_picking;
/** are we shut down? */
bool shutdown;
- /** are we updating the selected subchannel? */
- bool updating_selected;
- /** are we updating the subchannel candidates? */
- bool updating_subchannels;
- /** args from the latest update received while already updating, or NULL */
- grpc_lb_policy_args *pending_update_args;
- /** which subchannel are we watching? */
- size_t checking_subchannel;
- /** what is the connectivity of that channel? */
- grpc_connectivity_state checking_connectivity;
/** list of picks that are waiting on connectivity */
pending_pick *pending_picks;
-
/** our connectivity state tracker */
grpc_connectivity_state_tracker state_tracker;
} pick_first_lb_policy;
static void pf_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+ GPR_ASSERT(p->subchannel_list == NULL);
+ GPR_ASSERT(p->latest_pending_subchannel_list == NULL);
GPR_ASSERT(p->pending_picks == NULL);
- for (size_t i = 0; i < p->num_subchannels; i++) {
- GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[i], "pick_first_destroy");
- }
- if (p->selected != NULL) {
- GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, p->selected,
- "picked_first_destroy");
- }
grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
- grpc_subchannel_index_unref();
- if (p->pending_update_args != NULL) {
- grpc_channel_args_destroy(exec_ctx, p->pending_update_args->args);
- gpr_free(p->pending_update_args);
- }
- gpr_free(p->subchannels);
- gpr_free(p->new_subchannels);
gpr_free(p);
+ grpc_subchannel_index_unref();
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_DEBUG, "Pick First %p destroyed.", (void *)p);
}
}
-static void pf_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
- pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
- pending_pick *pp;
- p->shutdown = true;
- pp = p->pending_picks;
- p->pending_picks = NULL;
- grpc_connectivity_state_set(
- exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown"), "shutdown");
- /* cancel subscription */
- if (p->selected != NULL) {
- grpc_connected_subchannel_notify_on_state_change(
- exec_ctx, p->selected, NULL, NULL, &p->connectivity_changed);
- } else if (p->num_subchannels > 0 && p->started_picking) {
- grpc_subchannel_notify_on_state_change(
- exec_ctx, p->subchannels[p->checking_subchannel], NULL, NULL,
- &p->connectivity_changed);
+static void shutdown_locked(grpc_exec_ctx *exec_ctx, pick_first_lb_policy *p,
+ grpc_error *error) {
+ if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ gpr_log(GPR_DEBUG, "Pick First %p Shutting down", p);
}
- while (pp != NULL) {
- pending_pick *next = pp->next;
+ p->shutdown = true;
+ pending_pick *pp;
+ while ((pp = p->pending_picks) != NULL) {
+ p->pending_picks = pp->next;
*pp->target = NULL;
- GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_REF(error));
gpr_free(pp);
- pp = next;
}
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error),
+ "shutdown");
+ if (p->subchannel_list != NULL) {
+ grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
+ "pf_shutdown");
+ p->subchannel_list = NULL;
+ }
+ if (p->latest_pending_subchannel_list != NULL) {
+ grpc_lb_subchannel_list_shutdown_and_unref(
+ exec_ctx, p->latest_pending_subchannel_list, "pf_shutdown");
+ p->latest_pending_subchannel_list = NULL;
+ }
+ GRPC_ERROR_UNREF(error);
+}
+
+static void pf_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
+ shutdown_locked(exec_ctx, (pick_first_lb_policy *)pol,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown"));
}
static void pf_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_connected_subchannel **target,
grpc_error *error) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
- pending_pick *pp;
- pp = p->pending_picks;
+ pending_pick *pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
pending_pick *next = pp->next;
@@ -158,8 +133,7 @@ static void pf_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
uint32_t initial_metadata_flags_eq,
grpc_error *error) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
- pending_pick *pp;
- pp = p->pending_picks;
+ pending_pick *pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
pending_pick *next = pp->next;
@@ -181,15 +155,12 @@ static void pf_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
static void start_picking_locked(grpc_exec_ctx *exec_ctx,
pick_first_lb_policy *p) {
p->started_picking = true;
- if (p->subchannels != NULL) {
- GPR_ASSERT(p->num_subchannels > 0);
- p->checking_subchannel = 0;
- p->checking_connectivity = GRPC_CHANNEL_IDLE;
- GRPC_LB_POLICY_WEAK_REF(&p->base, "pick_first_connectivity");
- grpc_subchannel_notify_on_state_change(
- exec_ctx, p->subchannels[p->checking_subchannel],
- p->base.interested_parties, &p->checking_connectivity,
- &p->connectivity_changed);
+ if (p->subchannel_list != NULL && p->subchannel_list->num_subchannels > 0) {
+ p->subchannel_list->checking_subchannel = 0;
+ grpc_lb_subchannel_list_ref_for_connectivity_watch(
+ p->subchannel_list, "connectivity_watch+start_picking");
+ grpc_lb_subchannel_data_start_connectivity_watch(
+ exec_ctx, &p->subchannel_list->subchannels[0]);
}
}
@@ -206,19 +177,17 @@ static int pf_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_call_context_element *context, void **user_data,
grpc_closure *on_complete) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
- pending_pick *pp;
-
- /* Check atomically for a selected channel */
+ // If we have a selected subchannel already, return synchronously.
if (p->selected != NULL) {
- *target = GRPC_CONNECTED_SUBCHANNEL_REF(p->selected, "picked");
+ *target = GRPC_CONNECTED_SUBCHANNEL_REF(p->selected->connected_subchannel,
+ "picked");
return 1;
}
-
- /* No subchannel selected yet, so try again */
+ // No subchannel selected yet, so handle asynchronously.
if (!p->started_picking) {
start_picking_locked(exec_ctx, p);
}
- pp = (pending_pick *)gpr_malloc(sizeof(*pp));
+ pending_pick *pp = (pending_pick *)gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
pp->target = target;
pp->initial_metadata_flags = pick_args->initial_metadata_flags;
@@ -227,19 +196,15 @@ static int pf_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
return 0;
}
-static void destroy_subchannels_locked(grpc_exec_ctx *exec_ctx,
- pick_first_lb_policy *p) {
- size_t num_subchannels = p->num_subchannels;
- grpc_subchannel **subchannels = p->subchannels;
-
- p->num_subchannels = 0;
- p->subchannels = NULL;
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "destroy_subchannels");
-
- for (size_t i = 0; i < num_subchannels; i++) {
- GRPC_SUBCHANNEL_UNREF(exec_ctx, subchannels[i], "pick_first");
+static void destroy_unselected_subchannels_locked(grpc_exec_ctx *exec_ctx,
+ pick_first_lb_policy *p) {
+ for (size_t i = 0; i < p->subchannel_list->num_subchannels; ++i) {
+ grpc_lb_subchannel_data *sd = &p->subchannel_list->subchannels[i];
+ if (p->selected != sd) {
+ grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd,
+ "selected_different_subchannel");
+ }
}
- gpr_free(subchannels);
}
static grpc_connectivity_state pf_check_connectivity_locked(
@@ -261,46 +226,24 @@ static void pf_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_closure *closure) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
if (p->selected) {
- grpc_connected_subchannel_ping(exec_ctx, p->selected, closure);
+ grpc_connected_subchannel_ping(exec_ctx, p->selected->connected_subchannel,
+ closure);
} else {
GRPC_CLOSURE_SCHED(exec_ctx, closure,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Not connected"));
}
}
-/* unsubscribe all subchannels */
-static void stop_connectivity_watchers(grpc_exec_ctx *exec_ctx,
- pick_first_lb_policy *p) {
- if (p->num_subchannels > 0) {
- GPR_ASSERT(p->selected == NULL);
- if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
- gpr_log(GPR_DEBUG, "Pick First %p unsubscribing from subchannel %p",
- (void *)p, (void *)p->subchannels[p->checking_subchannel]);
- }
- grpc_subchannel_notify_on_state_change(
- exec_ctx, p->subchannels[p->checking_subchannel], NULL, NULL,
- &p->connectivity_changed);
- p->updating_subchannels = true;
- } else if (p->selected != NULL) {
- if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
- gpr_log(GPR_DEBUG,
- "Pick First %p unsubscribing from selected subchannel %p",
- (void *)p, (void *)p->selected);
- }
- grpc_connected_subchannel_notify_on_state_change(
- exec_ctx, p->selected, NULL, NULL, &p->connectivity_changed);
- p->updating_selected = true;
- }
-}
+static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error);
-/* true upon success */
static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const grpc_lb_policy_args *args) {
pick_first_lb_policy *p = (pick_first_lb_policy *)policy;
const grpc_arg *arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
- if (p->subchannels == NULL) {
+ if (p->subchannel_list == NULL) {
// If we don't have a current subchannel list, go into TRANSIENT FAILURE.
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
@@ -317,270 +260,222 @@ static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
}
const grpc_lb_addresses *addresses =
(const grpc_lb_addresses *)arg->value.pointer.p;
- if (addresses->num_addresses == 0) {
- // Empty update. Unsubscribe from all current subchannels and put the
- // channel in TRANSIENT_FAILURE.
+ if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ gpr_log(GPR_INFO, "Pick First %p received update with %lu addresses",
+ (void *)p, (unsigned long)addresses->num_addresses);
+ }
+ grpc_lb_subchannel_list *subchannel_list = grpc_lb_subchannel_list_create(
+ exec_ctx, &p->base, &grpc_lb_pick_first_trace, addresses, args,
+ pf_connectivity_changed_locked);
+ if (subchannel_list->num_subchannels == 0) {
+ // Empty update or no valid subchannels. Unsubscribe from all current
+ // subchannels and put the channel in TRANSIENT_FAILURE.
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Empty update"),
"pf_update_empty");
- stop_connectivity_watchers(exec_ctx, p);
+ if (p->subchannel_list != NULL) {
+ grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
+ "sl_shutdown_empty_update");
+ }
+ p->subchannel_list = subchannel_list; // Empty list.
+ p->selected = NULL;
return;
}
- if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
- gpr_log(GPR_INFO, "Pick First %p received update with %lu addresses",
- (void *)p, (unsigned long)addresses->num_addresses);
- }
- grpc_subchannel_args *sc_args = (grpc_subchannel_args *)gpr_zalloc(
- sizeof(*sc_args) * addresses->num_addresses);
- /* We remove the following keys in order for subchannel keys belonging to
- * subchannels point to the same address to match. */
- static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS,
- GRPC_ARG_LB_ADDRESSES};
- size_t sc_args_count = 0;
-
- /* Create list of subchannel args for new addresses in \a args. */
- for (size_t i = 0; i < addresses->num_addresses; i++) {
- // If there were any balancer, we would have chosen grpclb policy instead.
- GPR_ASSERT(!addresses->addresses[i].is_balancer);
- if (addresses->addresses[i].user_data != NULL) {
- gpr_log(GPR_ERROR,
- "This LB policy doesn't support user data. It will be ignored");
+ if (p->selected == NULL) {
+ // We don't yet have a selected subchannel, so replace the current
+ // subchannel list immediately.
+ if (p->subchannel_list != NULL) {
+ grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
+ "pf_update_before_selected");
}
- grpc_arg addr_arg =
- grpc_create_subchannel_address_arg(&addresses->addresses[i].address);
- grpc_channel_args *new_args = grpc_channel_args_copy_and_add_and_remove(
- args->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &addr_arg,
- 1);
- gpr_free(addr_arg.value.string);
- sc_args[sc_args_count++].args = new_args;
- }
-
- /* Check if p->selected is amongst them. If so, we are done. */
- if (p->selected != NULL) {
- GPR_ASSERT(p->selected_key != NULL);
- for (size_t i = 0; i < sc_args_count; i++) {
- grpc_subchannel_key *ith_sc_key = grpc_subchannel_key_create(&sc_args[i]);
- const bool found_selected =
- grpc_subchannel_key_compare(p->selected_key, ith_sc_key) == 0;
- grpc_subchannel_key_destroy(exec_ctx, ith_sc_key);
- if (found_selected) {
+ p->subchannel_list = subchannel_list;
+ } else {
+ // We do have a selected subchannel.
+ // Check if it's present in the new list. If so, we're done.
+ for (size_t i = 0; i < subchannel_list->num_subchannels; ++i) {
+ grpc_lb_subchannel_data *sd = &subchannel_list->subchannels[i];
+ if (sd->subchannel == p->selected->subchannel) {
// The currently selected subchannel is in the update: we are done.
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO,
- "Pick First %p found already selected subchannel %p amongst "
- "updates. Update done.",
- (void *)p, (void *)p->selected);
+ "Pick First %p found already selected subchannel %p "
+ "at update index %" PRIuPTR " of %" PRIuPTR "; update done",
+ p, p->selected->subchannel, i,
+ subchannel_list->num_subchannels);
}
- for (size_t j = 0; j < sc_args_count; j++) {
- grpc_channel_args_destroy(exec_ctx,
- (grpc_channel_args *)sc_args[j].args);
+ grpc_lb_subchannel_list_ref_for_connectivity_watch(
+ subchannel_list, "connectivity_watch+replace_selected");
+ grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
+ if (p->subchannel_list != NULL) {
+ grpc_lb_subchannel_list_shutdown_and_unref(
+ exec_ctx, p->subchannel_list, "pf_update_includes_selected");
+ }
+ p->subchannel_list = subchannel_list;
+ if (p->selected->connected_subchannel != NULL) {
+ sd->connected_subchannel = GRPC_CONNECTED_SUBCHANNEL_REF(
+ p->selected->connected_subchannel, "pf_update_includes_selected");
+ }
+ p->selected = sd;
+ destroy_unselected_subchannels_locked(exec_ctx, p);
+ // If there was a previously pending update (which may or may
+ // not have contained the currently selected subchannel), drop
+ // it, so that it doesn't override what we've done here.
+ if (p->latest_pending_subchannel_list != NULL) {
+ grpc_lb_subchannel_list_shutdown_and_unref(
+ exec_ctx, p->latest_pending_subchannel_list,
+ "pf_update_includes_selected+outdated");
+ p->latest_pending_subchannel_list = NULL;
}
- gpr_free(sc_args);
return;
}
}
- }
- // We only check for already running updates here because if the previous
- // steps were successful, the update can be considered done without any
- // interference (ie, no callbacks were scheduled).
- if (p->updating_selected || p->updating_subchannels) {
- if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
- gpr_log(GPR_INFO,
- "Update already in progress for pick first %p. Deferring update.",
- (void *)p);
- }
- if (p->pending_update_args != NULL) {
- grpc_channel_args_destroy(exec_ctx, p->pending_update_args->args);
- gpr_free(p->pending_update_args);
- }
- p->pending_update_args =
- (grpc_lb_policy_args *)gpr_zalloc(sizeof(*p->pending_update_args));
- p->pending_update_args->client_channel_factory =
- args->client_channel_factory;
- p->pending_update_args->args = grpc_channel_args_copy(args->args);
- p->pending_update_args->combiner = args->combiner;
- return;
- }
- /* Create the subchannels for the new subchannel args/addresses. */
- grpc_subchannel **new_subchannels =
- (grpc_subchannel **)gpr_zalloc(sizeof(*new_subchannels) * sc_args_count);
- size_t num_new_subchannels = 0;
- for (size_t i = 0; i < sc_args_count; i++) {
- grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
- exec_ctx, args->client_channel_factory, &sc_args[i]);
- if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
- char *address_uri =
- grpc_sockaddr_to_uri(&addresses->addresses[i].address);
- gpr_log(GPR_INFO,
- "Pick First %p created subchannel %p for address uri %s",
- (void *)p, (void *)subchannel, address_uri);
- gpr_free(address_uri);
+ // Not keeping the previous selected subchannel, so set the latest
+ // pending subchannel list to the new subchannel list. We will wait
+ // for it to report READY before swapping it into the current
+ // subchannel list.
+ if (p->latest_pending_subchannel_list != NULL) {
+ if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ gpr_log(GPR_DEBUG,
+ "Pick First %p Shutting down latest pending subchannel list "
+ "%p, about to be replaced by newer latest %p",
+ (void *)p, (void *)p->latest_pending_subchannel_list,
+ (void *)subchannel_list);
+ }
+ grpc_lb_subchannel_list_shutdown_and_unref(
+ exec_ctx, p->latest_pending_subchannel_list,
+ "sl_outdated_dont_smash");
}
- grpc_channel_args_destroy(exec_ctx, (grpc_channel_args *)sc_args[i].args);
- if (subchannel != NULL) new_subchannels[num_new_subchannels++] = subchannel;
+ p->latest_pending_subchannel_list = subchannel_list;
}
- gpr_free(sc_args);
- if (num_new_subchannels == 0) {
- gpr_free(new_subchannels);
- // Empty update. Unsubscribe from all current subchannels and put the
- // channel in TRANSIENT_FAILURE.
- grpc_connectivity_state_set(
- exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("No valid addresses in update"),
- "pf_update_no_valid_addresses");
- stop_connectivity_watchers(exec_ctx, p);
- return;
- }
-
- /* Destroy the current subchannels. Repurpose pf_shutdown/destroy. */
- stop_connectivity_watchers(exec_ctx, p);
-
- /* Save new subchannels. The switch over will happen in
- * pf_connectivity_changed_locked */
- if (p->updating_selected || p->updating_subchannels) {
- p->num_new_subchannels = num_new_subchannels;
- p->new_subchannels = new_subchannels;
- } else { /* nothing is updating. Get things moving from here */
- p->num_subchannels = num_new_subchannels;
- p->subchannels = new_subchannels;
- p->new_subchannels = NULL;
- p->num_new_subchannels = 0;
- if (p->started_picking) {
- p->checking_subchannel = 0;
- p->checking_connectivity = GRPC_CHANNEL_IDLE;
- grpc_subchannel_notify_on_state_change(
- exec_ctx, p->subchannels[p->checking_subchannel],
- p->base.interested_parties, &p->checking_connectivity,
- &p->connectivity_changed);
- }
+ // If we've started picking, start trying to connect to the first
+ // subchannel in the new list.
+ if (p->started_picking) {
+ grpc_lb_subchannel_list_ref_for_connectivity_watch(
+ subchannel_list, "connectivity_watch+update");
+ grpc_lb_subchannel_data_start_connectivity_watch(
+ exec_ctx, &subchannel_list->subchannels[0]);
}
}
static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- pick_first_lb_policy *p = (pick_first_lb_policy *)arg;
- grpc_subchannel *selected_subchannel;
- pending_pick *pp;
-
+ grpc_lb_subchannel_data *sd = (grpc_lb_subchannel_data *)arg;
+ pick_first_lb_policy *p = (pick_first_lb_policy *)sd->subchannel_list->policy;
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
- gpr_log(
- GPR_DEBUG,
- "Pick First %p connectivity changed. Updating selected: %d; Updating "
- "subchannels: %d; Checking %lu index (%lu total); State: %d; ",
- (void *)p, p->updating_selected, p->updating_subchannels,
- (unsigned long)p->checking_subchannel,
- (unsigned long)p->num_subchannels, p->checking_connectivity);
- }
- bool restart = false;
- if (p->updating_selected && error != GRPC_ERROR_NONE) {
- /* Captured the unsubscription for p->selected */
- GPR_ASSERT(p->selected != NULL);
- GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, p->selected,
- "pf_update_connectivity");
- if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
- gpr_log(GPR_DEBUG, "Pick First %p unreffing selected subchannel %p",
- (void *)p, (void *)p->selected);
- }
- p->updating_selected = false;
- if (p->num_new_subchannels == 0) {
- p->selected = NULL;
- return;
- }
- restart = true;
+ gpr_log(GPR_DEBUG,
+ "Pick First %p connectivity changed for subchannel %p (%" PRIuPTR
+ " of %" PRIuPTR
+ "), subchannel_list %p: state=%s p->shutdown=%d "
+ "sd->subchannel_list->shutting_down=%d error=%s",
+ (void *)p, (void *)sd->subchannel,
+ sd->subchannel_list->checking_subchannel,
+ sd->subchannel_list->num_subchannels, (void *)sd->subchannel_list,
+ grpc_connectivity_state_name(sd->pending_connectivity_state_unsafe),
+ p->shutdown, sd->subchannel_list->shutting_down,
+ grpc_error_string(error));
}
- if (p->updating_subchannels && error != GRPC_ERROR_NONE) {
- /* Captured the unsubscription for the checking subchannel */
- GPR_ASSERT(p->selected == NULL);
- for (size_t i = 0; i < p->num_subchannels; i++) {
- GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[i],
- "pf_update_connectivity");
- if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
- gpr_log(GPR_DEBUG, "Pick First %p unreffing subchannel %p", (void *)p,
- (void *)p->subchannels[i]);
- }
- }
- gpr_free(p->subchannels);
- p->subchannels = NULL;
- p->num_subchannels = 0;
- p->updating_subchannels = false;
- if (p->num_new_subchannels == 0) return;
- restart = true;
- }
- if (restart) {
- p->selected = NULL;
- p->selected_key = NULL;
- GPR_ASSERT(p->new_subchannels != NULL);
- GPR_ASSERT(p->num_new_subchannels > 0);
- p->num_subchannels = p->num_new_subchannels;
- p->subchannels = p->new_subchannels;
- p->num_new_subchannels = 0;
- p->new_subchannels = NULL;
- if (p->started_picking) {
- /* If we were picking, continue to do so over the new subchannels,
- * starting from the 0th index. */
- p->checking_subchannel = 0;
- p->checking_connectivity = GRPC_CHANNEL_IDLE;
- /* reuses the weak ref from start_picking_locked */
- grpc_subchannel_notify_on_state_change(
- exec_ctx, p->subchannels[p->checking_subchannel],
- p->base.interested_parties, &p->checking_connectivity,
- &p->connectivity_changed);
- }
- if (p->pending_update_args != NULL) {
- const grpc_lb_policy_args *args = p->pending_update_args;
- p->pending_update_args = NULL;
- pf_update_locked(exec_ctx, &p->base, args);
- }
+ // If the policy is shutting down, unref and return.
+ if (p->shutdown) {
+ grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
+ grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd, "pf_shutdown");
+ grpc_lb_subchannel_list_unref_for_connectivity_watch(
+ exec_ctx, sd->subchannel_list, "pf_shutdown");
return;
}
- GRPC_ERROR_REF(error);
- if (p->shutdown) {
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
- GRPC_ERROR_UNREF(error);
+ // If the subchannel list is shutting down, stop watching.
+ if (sd->subchannel_list->shutting_down || error == GRPC_ERROR_CANCELLED) {
+ grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
+ grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd, "pf_sl_shutdown");
+ grpc_lb_subchannel_list_unref_for_connectivity_watch(
+ exec_ctx, sd->subchannel_list, "pf_sl_shutdown");
return;
- } else if (p->selected != NULL) {
- if (p->checking_connectivity == GRPC_CHANNEL_TRANSIENT_FAILURE) {
- /* if the selected channel goes bad, we're done */
- p->checking_connectivity = GRPC_CHANNEL_SHUTDOWN;
- }
- grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
- p->checking_connectivity, GRPC_ERROR_REF(error),
- "selected_changed");
- if (p->checking_connectivity != GRPC_CHANNEL_SHUTDOWN) {
- grpc_connected_subchannel_notify_on_state_change(
- exec_ctx, p->selected, p->base.interested_parties,
- &p->checking_connectivity, &p->connectivity_changed);
+ }
+ // If we're still here, the notification must be for a subchannel in
+ // either the current or latest pending subchannel lists.
+ GPR_ASSERT(sd->subchannel_list == p->subchannel_list ||
+ sd->subchannel_list == p->latest_pending_subchannel_list);
+ // Update state.
+ sd->curr_connectivity_state = sd->pending_connectivity_state_unsafe;
+ // Handle updates for the currently selected subchannel.
+ if (p->selected == sd) {
+ // If the new state is anything other than READY and there is a
+ // pending update, switch to the pending update.
+ if (sd->curr_connectivity_state != GRPC_CHANNEL_READY &&
+ p->latest_pending_subchannel_list != NULL) {
+ p->selected = NULL;
+ grpc_lb_subchannel_list_shutdown_and_unref(
+ exec_ctx, p->subchannel_list, "selected_not_ready+switch_to_update");
+ p->subchannel_list = p->latest_pending_subchannel_list;
+ p->latest_pending_subchannel_list = NULL;
+ grpc_connectivity_state_set(
+ exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
+ GRPC_ERROR_REF(error), "selected_not_ready+switch_to_update");
} else {
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
+ if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
+ /* if the selected channel goes bad, we're done */
+ sd->curr_connectivity_state = GRPC_CHANNEL_SHUTDOWN;
+ }
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ sd->curr_connectivity_state,
+ GRPC_ERROR_REF(error), "selected_changed");
+ if (sd->curr_connectivity_state != GRPC_CHANNEL_SHUTDOWN) {
+ // Renew notification.
+ grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
+ } else {
+ grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
+ grpc_lb_subchannel_list_unref_for_connectivity_watch(
+ exec_ctx, sd->subchannel_list, "pf_selected_shutdown");
+ shutdown_locked(exec_ctx, p, GRPC_ERROR_REF(error));
+ }
}
- } else {
- loop:
- switch (p->checking_connectivity) {
- case GRPC_CHANNEL_INIT:
- GPR_UNREACHABLE_CODE(return );
- case GRPC_CHANNEL_READY:
+ return;
+ }
+ // If we get here, there are two possible cases:
+ // 1. We do not currently have a selected subchannel, and the update is
+ // for a subchannel in p->subchannel_list that we're trying to
+ // connect to. The goal here is to find a subchannel that we can
+ // select.
+ // 2. We do currently have a selected subchannel, and the update is
+ // for a subchannel in p->latest_pending_subchannel_list. The
+ // goal here is to find a subchannel from the update that we can
+ // select in place of the current one.
+ if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE ||
+ sd->curr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
+ grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
+ }
+ while (true) {
+ switch (sd->curr_connectivity_state) {
+ case GRPC_CHANNEL_READY: {
+ // Case 2. Promote p->latest_pending_subchannel_list to
+ // p->subchannel_list.
+ if (sd->subchannel_list == p->latest_pending_subchannel_list) {
+ GPR_ASSERT(p->subchannel_list != NULL);
+ grpc_lb_subchannel_list_shutdown_and_unref(
+ exec_ctx, p->subchannel_list, "finish_update");
+ p->subchannel_list = p->latest_pending_subchannel_list;
+ p->latest_pending_subchannel_list = NULL;
+ }
+ // Cases 1 and 2.
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_READY, GRPC_ERROR_NONE,
"connecting_ready");
- selected_subchannel = p->subchannels[p->checking_subchannel];
- p->selected = GRPC_CONNECTED_SUBCHANNEL_REF(
- grpc_subchannel_get_connected_subchannel(selected_subchannel),
- "picked_first");
-
+ sd->connected_subchannel = GRPC_CONNECTED_SUBCHANNEL_REF(
+ grpc_subchannel_get_connected_subchannel(sd->subchannel),
+ "connected");
+ p->selected = sd;
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
- gpr_log(GPR_INFO,
- "Pick First %p selected subchannel %p (connected %p)",
- (void *)p, (void *)selected_subchannel, (void *)p->selected);
+ gpr_log(GPR_INFO, "Pick First %p selected subchannel %p", (void *)p,
+ (void *)sd->subchannel);
}
- p->selected_key = grpc_subchannel_get_key(selected_subchannel);
- /* drop the pick list: we are connected now */
- GRPC_LB_POLICY_WEAK_REF(&p->base, "destroy_subchannels");
- destroy_subchannels_locked(exec_ctx, p);
- /* update any calls that were waiting for a pick */
+ // Drop all other subchannels, since we are now connected.
+ destroy_unselected_subchannels_locked(exec_ctx, p);
+ // Update any calls that were waiting for a pick.
+ pending_pick *pp;
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
- *pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(p->selected, "picked");
+ *pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(
+ p->selected->connected_subchannel, "picked");
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO,
"Servicing pending pick with selected subchannel %p",
@@ -589,76 +484,86 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp);
}
- grpc_connected_subchannel_notify_on_state_change(
- exec_ctx, p->selected, p->base.interested_parties,
- &p->checking_connectivity, &p->connectivity_changed);
- break;
- case GRPC_CHANNEL_TRANSIENT_FAILURE:
- p->checking_subchannel =
- (p->checking_subchannel + 1) % p->num_subchannels;
- if (p->checking_subchannel == 0) {
- /* only trigger transient failure when we've tried all alternatives
- */
+ // Renew notification.
+ grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
+ return;
+ }
+ case GRPC_CHANNEL_TRANSIENT_FAILURE: {
+ do {
+ sd->subchannel_list->checking_subchannel =
+ (sd->subchannel_list->checking_subchannel + 1) %
+ sd->subchannel_list->num_subchannels;
+ sd = &sd->subchannel_list
+ ->subchannels[sd->subchannel_list->checking_subchannel];
+ } while (sd->subchannel == NULL);
+ // Case 1: Only set state to TRANSIENT_FAILURE if we've tried
+ // all subchannels.
+ if (sd->subchannel_list->checking_subchannel == 0 &&
+ sd->subchannel_list == p->subchannel_list) {
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error), "connecting_transient_failure");
}
+ sd->curr_connectivity_state =
+ grpc_subchannel_check_connectivity(sd->subchannel, &error);
GRPC_ERROR_UNREF(error);
- p->checking_connectivity = grpc_subchannel_check_connectivity(
- p->subchannels[p->checking_subchannel], &error);
- if (p->checking_connectivity == GRPC_CHANNEL_TRANSIENT_FAILURE) {
- grpc_subchannel_notify_on_state_change(
- exec_ctx, p->subchannels[p->checking_subchannel],
- p->base.interested_parties, &p->checking_connectivity,
- &p->connectivity_changed);
- } else {
- goto loop;
+ if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
+ // Reuses the connectivity refs from the previous watch.
+ grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
+ return;
}
- break;
+ break; // Go back to top of loop.
+ }
case GRPC_CHANNEL_CONNECTING:
- case GRPC_CHANNEL_IDLE:
- grpc_connectivity_state_set(
- exec_ctx, &p->state_tracker, GRPC_CHANNEL_CONNECTING,
- GRPC_ERROR_REF(error), "connecting_changed");
- grpc_subchannel_notify_on_state_change(
- exec_ctx, p->subchannels[p->checking_subchannel],
- p->base.interested_parties, &p->checking_connectivity,
- &p->connectivity_changed);
- break;
- case GRPC_CHANNEL_SHUTDOWN:
- p->num_subchannels--;
- GPR_SWAP(grpc_subchannel *, p->subchannels[p->checking_subchannel],
- p->subchannels[p->num_subchannels]);
- GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[p->num_subchannels],
- "pick_first");
- if (p->num_subchannels == 0) {
+ case GRPC_CHANNEL_IDLE: {
+ // Only update connectivity state in case 1.
+ if (sd->subchannel_list == p->subchannel_list) {
grpc_connectivity_state_set(
- exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
- GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Pick first exhausted channels", &error, 1),
- "no_more_channels");
- while ((pp = p->pending_picks)) {
- p->pending_picks = pp->next;
- *pp->target = NULL;
- GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
- gpr_free(pp);
- }
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base,
- "pick_first_connectivity");
- } else {
+ exec_ctx, &p->state_tracker, GRPC_CHANNEL_CONNECTING,
+ GRPC_ERROR_REF(error), "connecting_changed");
+ }
+ // Renew notification.
+ grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
+ return;
+ }
+ case GRPC_CHANNEL_SHUTDOWN: {
+ grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd,
+ "pf_candidate_shutdown");
+ // Advance to next subchannel and check its state.
+ grpc_lb_subchannel_data *original_sd = sd;
+ do {
+ sd->subchannel_list->checking_subchannel =
+ (sd->subchannel_list->checking_subchannel + 1) %
+ sd->subchannel_list->num_subchannels;
+ sd = &sd->subchannel_list
+ ->subchannels[sd->subchannel_list->checking_subchannel];
+ } while (sd->subchannel == NULL && sd != original_sd);
+ if (sd == original_sd) {
+ grpc_lb_subchannel_list_unref_for_connectivity_watch(
+ exec_ctx, sd->subchannel_list, "pf_candidate_shutdown");
+ shutdown_locked(exec_ctx, p,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Pick first exhausted channels", &error, 1));
+ return;
+ }
+ if (sd->subchannel_list == p->subchannel_list) {
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error), "subchannel_failed");
- p->checking_subchannel %= p->num_subchannels;
- GRPC_ERROR_UNREF(error);
- p->checking_connectivity = grpc_subchannel_check_connectivity(
- p->subchannels[p->checking_subchannel], &error);
- goto loop;
}
+ sd->curr_connectivity_state =
+ grpc_subchannel_check_connectivity(sd->subchannel, &error);
+ GRPC_ERROR_UNREF(error);
+ if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
+ // Reuses the connectivity refs from the previous watch.
+ grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
+ return;
+ }
+ // For any other state, go back to top of loop.
+ // We will reuse the connectivity refs from the previous watch.
+ }
}
}
-
- GRPC_ERROR_UNREF(error);
}
static const grpc_lb_policy_vtable pick_first_lb_policy_vtable = {
@@ -688,8 +593,6 @@ static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx,
pf_update_locked(exec_ctx, &p->base, args);
grpc_lb_policy_init(&p->base, &pick_first_lb_policy_vtable, args->combiner);
grpc_subchannel_index_ref();
- GRPC_CLOSURE_INIT(&p->connectivity_changed, pf_connectivity_changed_locked, p,
- grpc_combiner_scheduler(args->combiner));
return &p->base;
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
index 6812bb50cd..8f29c80130 100644
--- a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
@@ -28,6 +28,7 @@
#include <grpc/support/alloc.h>
+#include "src/core/ext/filters/client_channel/lb_policy/subchannel_list.h"
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/ext/filters/client_channel/subchannel_index.h"
@@ -64,12 +65,11 @@ typedef struct pending_pick {
grpc_closure *on_complete;
} pending_pick;
-typedef struct rr_subchannel_list rr_subchannel_list;
typedef struct round_robin_lb_policy {
/** base policy: must be first */
grpc_lb_policy base;
- rr_subchannel_list *subchannel_list;
+ grpc_lb_subchannel_list *subchannel_list;
/** have we started picking? */
bool started_picking;
@@ -89,157 +89,9 @@ typedef struct round_robin_lb_policy {
* lists if they equal \a latest_pending_subchannel_list. In other words,
* racing callbacks that reference outdated subchannel lists won't perform any
* update. */
- rr_subchannel_list *latest_pending_subchannel_list;
+ grpc_lb_subchannel_list *latest_pending_subchannel_list;
} round_robin_lb_policy;
-typedef struct {
- /** backpointer to owning subchannel list */
- rr_subchannel_list *subchannel_list;
- /** subchannel itself */
- grpc_subchannel *subchannel;
- /** notification that connectivity has changed on subchannel */
- grpc_closure connectivity_changed_closure;
- /** last observed connectivity. Not updated by
- * \a grpc_subchannel_notify_on_state_change. Used to determine the previous
- * state while processing the new state in \a rr_connectivity_changed */
- grpc_connectivity_state prev_connectivity_state;
- /** current connectivity state. Updated by \a
- * grpc_subchannel_notify_on_state_change */
- grpc_connectivity_state curr_connectivity_state;
- /** connectivity state to be updated by the watcher, not guarded by
- * the combiner. Will be moved to curr_connectivity_state inside of
- * the combiner by rr_connectivity_changed_locked(). */
- grpc_connectivity_state pending_connectivity_state_unsafe;
- /** the subchannel's target user data */
- void *user_data;
- /** vtable to operate over \a user_data */
- const grpc_lb_user_data_vtable *user_data_vtable;
-} subchannel_data;
-
-struct rr_subchannel_list {
- /** backpointer to owning policy */
- round_robin_lb_policy *policy;
-
- /** all our subchannels */
- size_t num_subchannels;
- subchannel_data *subchannels;
-
- /** how many subchannels are in state READY */
- size_t num_ready;
- /** how many subchannels are in state TRANSIENT_FAILURE */
- size_t num_transient_failures;
- /** how many subchannels are in state SHUTDOWN */
- size_t num_shutdown;
- /** how many subchannels are in state IDLE */
- size_t num_idle;
-
- /** There will be one ref for each entry in subchannels for which there is a
- * pending connectivity state watcher callback. */
- gpr_refcount refcount;
-
- /** Is this list shutting down? This may be true due to the shutdown of the
- * policy itself or because a newer update has arrived while this one hadn't
- * finished processing. */
- bool shutting_down;
-};
-
-static rr_subchannel_list *rr_subchannel_list_create(round_robin_lb_policy *p,
- size_t num_subchannels) {
- rr_subchannel_list *subchannel_list =
- (rr_subchannel_list *)gpr_zalloc(sizeof(*subchannel_list));
- subchannel_list->policy = p;
- subchannel_list->subchannels =
- (subchannel_data *)gpr_zalloc(sizeof(subchannel_data) * num_subchannels);
- subchannel_list->num_subchannels = num_subchannels;
- gpr_ref_init(&subchannel_list->refcount, 1);
- if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
- gpr_log(GPR_INFO, "[RR %p] Created subchannel list %p for %lu subchannels",
- (void *)p, (void *)subchannel_list, (unsigned long)num_subchannels);
- }
- return subchannel_list;
-}
-
-static void rr_subchannel_list_destroy(grpc_exec_ctx *exec_ctx,
- rr_subchannel_list *subchannel_list) {
- GPR_ASSERT(subchannel_list->shutting_down);
- if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
- gpr_log(GPR_INFO, "[RR %p] Destroying subchannel_list %p",
- (void *)subchannel_list->policy, (void *)subchannel_list);
- }
- for (size_t i = 0; i < subchannel_list->num_subchannels; i++) {
- subchannel_data *sd = &subchannel_list->subchannels[i];
- if (sd->subchannel != NULL) {
- GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel,
- "rr_subchannel_list_destroy");
- }
- sd->subchannel = NULL;
- if (sd->user_data != NULL) {
- GPR_ASSERT(sd->user_data_vtable != NULL);
- sd->user_data_vtable->destroy(exec_ctx, sd->user_data);
- sd->user_data = NULL;
- }
- }
- gpr_free(subchannel_list->subchannels);
- gpr_free(subchannel_list);
-}
-
-static void rr_subchannel_list_ref(rr_subchannel_list *subchannel_list,
- const char *reason) {
- gpr_ref_non_zero(&subchannel_list->refcount);
- if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
- const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count);
- gpr_log(GPR_INFO, "[RR %p] subchannel_list %p REF %lu->%lu (%s)",
- (void *)subchannel_list->policy, (void *)subchannel_list,
- (unsigned long)(count - 1), (unsigned long)count, reason);
- }
-}
-
-static void rr_subchannel_list_unref(grpc_exec_ctx *exec_ctx,
- rr_subchannel_list *subchannel_list,
- const char *reason) {
- const bool done = gpr_unref(&subchannel_list->refcount);
- if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
- const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count);
- gpr_log(GPR_INFO, "[RR %p] subchannel_list %p UNREF %lu->%lu (%s)",
- (void *)subchannel_list->policy, (void *)subchannel_list,
- (unsigned long)(count + 1), (unsigned long)count, reason);
- }
- if (done) {
- rr_subchannel_list_destroy(exec_ctx, subchannel_list);
- }
-}
-
-/** Mark \a subchannel_list as discarded. Unsubscribes all its subchannels. The
- * watcher's callback will ultimately unref \a subchannel_list. */
-static void rr_subchannel_list_shutdown_and_unref(
- grpc_exec_ctx *exec_ctx, rr_subchannel_list *subchannel_list,
- const char *reason) {
- GPR_ASSERT(!subchannel_list->shutting_down);
- if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
- gpr_log(GPR_DEBUG, "[RR %p] Shutting down subchannel_list %p (%s)",
- (void *)subchannel_list->policy, (void *)subchannel_list, reason);
- }
- GPR_ASSERT(!subchannel_list->shutting_down);
- subchannel_list->shutting_down = true;
- for (size_t i = 0; i < subchannel_list->num_subchannels; i++) {
- subchannel_data *sd = &subchannel_list->subchannels[i];
- if (sd->subchannel != NULL) { // if subchannel isn't shutdown, unsubscribe.
- if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
- gpr_log(
- GPR_DEBUG,
- "[RR %p] Unsubscribing from subchannel %p as part of shutting down "
- "subchannel_list %p",
- (void *)subchannel_list->policy, (void *)sd->subchannel,
- (void *)subchannel_list);
- }
- grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL,
- NULL,
- &sd->connectivity_changed_closure);
- }
- }
- rr_subchannel_list_unref(exec_ctx, subchannel_list, reason);
-}
-
/** Returns the index into p->subchannel_list->subchannels of the next
* subchannel in READY state, or p->subchannel_list->num_subchannels if no
* subchannel is READY.
@@ -299,8 +151,8 @@ static void update_last_ready_subchannel_index_locked(round_robin_lb_policy *p,
"[RR %p] setting last_ready_subchannel_index=%lu (SC %p, CSC %p)",
(void *)p, (unsigned long)last_ready_index,
(void *)p->subchannel_list->subchannels[last_ready_index].subchannel,
- (void *)grpc_subchannel_get_connected_subchannel(
- p->subchannel_list->subchannels[last_ready_index].subchannel));
+ (void *)p->subchannel_list->subchannels[last_ready_index]
+ .connected_subchannel);
}
}
@@ -310,42 +162,47 @@ static void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
gpr_log(GPR_DEBUG, "[RR %p] Destroying Round Robin policy at %p",
(void *)pol, (void *)pol);
}
+ GPR_ASSERT(p->subchannel_list == NULL);
+ GPR_ASSERT(p->latest_pending_subchannel_list == NULL);
grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
grpc_subchannel_index_unref();
gpr_free(p);
}
-static void rr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
- round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+static void shutdown_locked(grpc_exec_ctx *exec_ctx, round_robin_lb_policy *p,
+ grpc_error *error) {
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
- gpr_log(GPR_DEBUG, "[RR %p] Shutting down Round Robin policy at %p",
- (void *)pol, (void *)pol);
+ gpr_log(GPR_DEBUG, "[RR %p] Shutting down", p);
}
p->shutdown = true;
pending_pick *pp;
- while ((pp = p->pending_picks)) {
+ while ((pp = p->pending_picks) != NULL) {
p->pending_picks = pp->next;
*pp->target = NULL;
- GRPC_CLOSURE_SCHED(
- exec_ctx, pp->on_complete,
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
+ GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_REF(error));
gpr_free(pp);
}
- grpc_connectivity_state_set(
- exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"), "rr_shutdown");
- const bool latest_is_current =
- p->subchannel_list == p->latest_pending_subchannel_list;
- rr_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
- "sl_shutdown_rr_shutdown");
- p->subchannel_list = NULL;
- if (!latest_is_current && p->latest_pending_subchannel_list != NULL &&
- !p->latest_pending_subchannel_list->shutting_down) {
- rr_subchannel_list_shutdown_and_unref(exec_ctx,
- p->latest_pending_subchannel_list,
- "sl_shutdown_pending_rr_shutdown");
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error),
+ "rr_shutdown");
+ if (p->subchannel_list != NULL) {
+ grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
+ "sl_shutdown_rr_shutdown");
+ p->subchannel_list = NULL;
+ }
+ if (p->latest_pending_subchannel_list != NULL) {
+ grpc_lb_subchannel_list_shutdown_and_unref(
+ exec_ctx, p->latest_pending_subchannel_list,
+ "sl_shutdown_pending_rr_shutdown");
p->latest_pending_subchannel_list = NULL;
}
+ GRPC_ERROR_UNREF(error);
+}
+
+static void rr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
+ round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+ shutdown_locked(exec_ctx, p,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
}
static void rr_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
@@ -400,13 +257,10 @@ static void start_picking_locked(grpc_exec_ctx *exec_ctx,
round_robin_lb_policy *p) {
p->started_picking = true;
for (size_t i = 0; i < p->subchannel_list->num_subchannels; i++) {
- subchannel_data *sd = &p->subchannel_list->subchannels[i];
- GRPC_LB_POLICY_WEAK_REF(&p->base, "start_picking_locked");
- rr_subchannel_list_ref(sd->subchannel_list, "started_picking");
- grpc_subchannel_notify_on_state_change(
- exec_ctx, sd->subchannel, p->base.interested_parties,
- &sd->pending_connectivity_state_unsafe,
- &sd->connectivity_changed_closure);
+ grpc_lb_subchannel_list_ref_for_connectivity_watch(p->subchannel_list,
+ "connectivity_watch");
+ grpc_lb_subchannel_data_start_connectivity_watch(
+ exec_ctx, &p->subchannel_list->subchannels[i]);
}
}
@@ -431,10 +285,10 @@ static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
if (next_ready_index < p->subchannel_list->num_subchannels) {
/* readily available, report right away */
- subchannel_data *sd = &p->subchannel_list->subchannels[next_ready_index];
- *target = GRPC_CONNECTED_SUBCHANNEL_REF(
- grpc_subchannel_get_connected_subchannel(sd->subchannel),
- "rr_picked");
+ grpc_lb_subchannel_data *sd =
+ &p->subchannel_list->subchannels[next_ready_index];
+ *target =
+ GRPC_CONNECTED_SUBCHANNEL_REF(sd->connected_subchannel, "rr_picked");
if (user_data != NULL) {
*user_data = sd->user_data;
}
@@ -465,8 +319,8 @@ static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
return 0;
}
-static void update_state_counters_locked(subchannel_data *sd) {
- rr_subchannel_list *subchannel_list = sd->subchannel_list;
+static void update_state_counters_locked(grpc_lb_subchannel_data *sd) {
+ grpc_lb_subchannel_list *subchannel_list = sd->subchannel_list;
if (sd->prev_connectivity_state == GRPC_CHANNEL_READY) {
GPR_ASSERT(subchannel_list->num_ready > 0);
--subchannel_list->num_ready;
@@ -480,6 +334,7 @@ static void update_state_counters_locked(subchannel_data *sd) {
GPR_ASSERT(subchannel_list->num_idle > 0);
--subchannel_list->num_idle;
}
+ sd->prev_connectivity_state = sd->curr_connectivity_state;
if (sd->curr_connectivity_state == GRPC_CHANNEL_READY) {
++subchannel_list->num_ready;
} else if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
@@ -492,12 +347,12 @@ static void update_state_counters_locked(subchannel_data *sd) {
}
/** Sets the policy's connectivity status based on that of the passed-in \a sd
- * (the subchannel_data associted with the updated subchannel) and the
+ * (the grpc_lb_subchannel_data associted with the updated subchannel) and the
* subchannel list \a sd belongs to (sd->subchannel_list). \a error will only be
* used upon policy transition to TRANSIENT_FAILURE or SHUTDOWN. Returns the
* connectivity status set. */
static grpc_connectivity_state update_lb_connectivity_status_locked(
- grpc_exec_ctx *exec_ctx, subchannel_data *sd, grpc_error *error) {
+ grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd, grpc_error *error) {
/* In priority order. The first rule to match terminates the search (ie, if we
* are on rule n, all previous rules were unfulfilled).
*
@@ -519,8 +374,8 @@ static grpc_connectivity_state update_lb_connectivity_status_locked(
* CHECK: p->num_idle == p->subchannel_list->num_subchannels.
*/
grpc_connectivity_state new_state = sd->curr_connectivity_state;
- rr_subchannel_list *subchannel_list = sd->subchannel_list;
- round_robin_lb_policy *p = subchannel_list->policy;
+ grpc_lb_subchannel_list *subchannel_list = sd->subchannel_list;
+ round_robin_lb_policy *p = (round_robin_lb_policy *)subchannel_list->policy;
if (subchannel_list->num_ready > 0) { /* 1) READY */
grpc_connectivity_state_set(exec_ctx, &p->state_tracker, GRPC_CHANNEL_READY,
GRPC_ERROR_NONE, "rr_ready");
@@ -556,8 +411,9 @@ static grpc_connectivity_state update_lb_connectivity_status_locked(
static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- subchannel_data *sd = (subchannel_data *)arg;
- round_robin_lb_policy *p = sd->subchannel_list->policy;
+ grpc_lb_subchannel_data *sd = (grpc_lb_subchannel_data *)arg;
+ round_robin_lb_policy *p =
+ (round_robin_lb_policy *)sd->subchannel_list->policy;
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(
GPR_DEBUG,
@@ -572,71 +428,50 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
// If the policy is shutting down, unref and return.
if (p->shutdown) {
- rr_subchannel_list_unref(exec_ctx, sd->subchannel_list,
- "pol_shutdown+started_picking");
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pol_shutdown");
+ grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
+ grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd, "rr_shutdown");
+ grpc_lb_subchannel_list_unref_for_connectivity_watch(
+ exec_ctx, sd->subchannel_list, "rr_shutdown");
return;
}
- if (sd->subchannel_list->shutting_down && error == GRPC_ERROR_CANCELLED) {
- // the subchannel list associated with sd has been discarded. This callback
- // corresponds to the unsubscription. The unrefs correspond to the picking
- // ref (start_picking_locked or update_started_picking).
- rr_subchannel_list_unref(exec_ctx, sd->subchannel_list,
- "sl_shutdown+started_picking");
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "sl_shutdown+picking");
- return;
- }
- // Dispose of outdated subchannel lists.
- if (sd->subchannel_list != p->subchannel_list &&
- sd->subchannel_list != p->latest_pending_subchannel_list) {
- const char *reason = NULL;
- if (sd->subchannel_list->shutting_down) {
- reason = "sl_outdated_straggler";
- rr_subchannel_list_unref(exec_ctx, sd->subchannel_list, reason);
- } else {
- reason = "sl_outdated";
- rr_subchannel_list_shutdown_and_unref(exec_ctx, sd->subchannel_list,
- reason);
- }
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, reason);
+ // If the subchannel list is shutting down, stop watching.
+ if (sd->subchannel_list->shutting_down || error == GRPC_ERROR_CANCELLED) {
+ grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
+ grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd, "rr_sl_shutdown");
+ grpc_lb_subchannel_list_unref_for_connectivity_watch(
+ exec_ctx, sd->subchannel_list, "rr_sl_shutdown");
return;
}
+ // If we're still here, the notification must be for a subchannel in
+ // either the current or latest pending subchannel lists.
+ GPR_ASSERT(sd->subchannel_list == p->subchannel_list ||
+ sd->subchannel_list == p->latest_pending_subchannel_list);
// Now that we're inside the combiner, copy the pending connectivity
// state (which was set by the connectivity state watcher) to
// curr_connectivity_state, which is what we use inside of the combiner.
sd->curr_connectivity_state = sd->pending_connectivity_state_unsafe;
// Update state counters and determine new overall state.
update_state_counters_locked(sd);
- sd->prev_connectivity_state = sd->curr_connectivity_state;
const grpc_connectivity_state new_policy_connectivity_state =
update_lb_connectivity_status_locked(exec_ctx, sd, GRPC_ERROR_REF(error));
// If the sd's new state is SHUTDOWN, unref the subchannel, and if the new
// policy's state is SHUTDOWN, clean up.
if (sd->curr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
- GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "rr_subchannel_shutdown");
- sd->subchannel = NULL;
- if (sd->user_data != NULL) {
- GPR_ASSERT(sd->user_data_vtable != NULL);
- sd->user_data_vtable->destroy(exec_ctx, sd->user_data);
- sd->user_data = NULL;
- }
+ grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
+ grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd,
+ "rr_connectivity_shutdown");
+ grpc_lb_subchannel_list_unref_for_connectivity_watch(
+ exec_ctx, sd->subchannel_list, "rr_connectivity_shutdown");
if (new_policy_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
- // the policy is shutting down. Flush all the pending picks...
- pending_pick *pp;
- while ((pp = p->pending_picks)) {
- p->pending_picks = pp->next;
- *pp->target = NULL;
- GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
- gpr_free(pp);
- }
+ shutdown_locked(exec_ctx, p, GRPC_ERROR_REF(error));
}
- rr_subchannel_list_unref(exec_ctx, sd->subchannel_list,
- "sd_shutdown+started_picking");
- // unref the "rr_connectivity_update" weak ref from start_picking.
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base,
- "rr_connectivity_sd_shutdown");
} else { // sd not in SHUTDOWN
if (sd->curr_connectivity_state == GRPC_CHANNEL_READY) {
+ if (sd->connected_subchannel == NULL) {
+ sd->connected_subchannel = GRPC_CONNECTED_SUBCHANNEL_REF(
+ grpc_subchannel_get_connected_subchannel(sd->subchannel),
+ "connected");
+ }
if (sd->subchannel_list != p->subchannel_list) {
// promote sd->subchannel_list to p->subchannel_list.
// sd->subchannel_list must be equal to
@@ -657,8 +492,8 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
if (p->subchannel_list != NULL) {
// dispose of the current subchannel_list
- rr_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
- "sl_phase_out_shutdown");
+ grpc_lb_subchannel_list_shutdown_and_unref(
+ exec_ctx, p->subchannel_list, "sl_phase_out_shutdown");
}
p->subchannel_list = p->latest_pending_subchannel_list;
p->latest_pending_subchannel_list = NULL;
@@ -668,7 +503,7 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
* p->pending_picks. This preemtively replicates rr_pick()'s actions. */
const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
GPR_ASSERT(next_ready_index < p->subchannel_list->num_subchannels);
- subchannel_data *selected =
+ grpc_lb_subchannel_data *selected =
&p->subchannel_list->subchannels[next_ready_index];
if (p->pending_picks != NULL) {
// if the selected subchannel is going to be used for the pending
@@ -679,8 +514,7 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(
- grpc_subchannel_get_connected_subchannel(selected->subchannel),
- "rr_picked");
+ selected->connected_subchannel, "rr_picked");
if (pp->user_data != NULL) {
*pp->user_data = selected->user_data;
}
@@ -695,12 +529,8 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
gpr_free(pp);
}
}
- /* renew notification: reuses the "rr_connectivity_update" weak ref on the
- * policy as well as the sd->subchannel_list ref. */
- grpc_subchannel_notify_on_state_change(
- exec_ctx, sd->subchannel, p->base.interested_parties,
- &sd->pending_connectivity_state_unsafe,
- &sd->connectivity_changed_closure);
+ // Renew notification.
+ grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
}
}
@@ -724,13 +554,12 @@ static void rr_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
if (next_ready_index < p->subchannel_list->num_subchannels) {
- subchannel_data *selected =
+ grpc_lb_subchannel_data *selected =
&p->subchannel_list->subchannels[next_ready_index];
grpc_connected_subchannel *target = GRPC_CONNECTED_SUBCHANNEL_REF(
- grpc_subchannel_get_connected_subchannel(selected->subchannel),
- "rr_picked");
+ selected->connected_subchannel, "rr_ping");
grpc_connected_subchannel_ping(exec_ctx, target, closure);
- GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, target, "rr_picked");
+ GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, target, "rr_ping");
} else {
GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Round Robin not connected"));
@@ -743,130 +572,68 @@ static void rr_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const grpc_arg *arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
+ gpr_log(GPR_ERROR, "[RR %p] update provided no addresses; ignoring", p);
+ // If we don't have a current subchannel list, go into TRANSIENT_FAILURE.
+ // Otherwise, keep using the current subchannel list (ignore this update).
if (p->subchannel_list == NULL) {
- // If we don't have a current subchannel list, go into TRANSIENT FAILURE.
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args"),
"rr_update_missing");
- } else {
- // otherwise, keep using the current subchannel list (ignore this update).
- gpr_log(GPR_ERROR,
- "[RR %p] No valid LB addresses channel arg for update, ignoring.",
- (void *)p);
}
return;
}
grpc_lb_addresses *addresses = (grpc_lb_addresses *)arg->value.pointer.p;
- rr_subchannel_list *subchannel_list =
- rr_subchannel_list_create(p, addresses->num_addresses);
- if (addresses->num_addresses == 0) {
+ if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
+ gpr_log(GPR_DEBUG, "[RR %p] received update with %" PRIuPTR " addresses", p,
+ addresses->num_addresses);
+ }
+ grpc_lb_subchannel_list *subchannel_list = grpc_lb_subchannel_list_create(
+ exec_ctx, &p->base, &grpc_lb_round_robin_trace, addresses, args,
+ rr_connectivity_changed_locked);
+ if (subchannel_list->num_subchannels == 0) {
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Empty update"),
"rr_update_empty");
if (p->subchannel_list != NULL) {
- rr_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
- "sl_shutdown_empty_update");
+ grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
+ "sl_shutdown_empty_update");
}
p->subchannel_list = subchannel_list; // empty list
return;
}
- size_t subchannel_index = 0;
- if (p->latest_pending_subchannel_list != NULL && p->started_picking) {
- if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
- gpr_log(GPR_DEBUG,
- "[RR %p] Shutting down latest pending subchannel list %p, about "
- "to be replaced by newer latest %p",
- (void *)p, (void *)p->latest_pending_subchannel_list,
- (void *)subchannel_list);
- }
- rr_subchannel_list_shutdown_and_unref(
- exec_ctx, p->latest_pending_subchannel_list, "sl_outdated_dont_smash");
- }
- p->latest_pending_subchannel_list = subchannel_list;
- grpc_subchannel_args sc_args;
- /* We need to remove the LB addresses in order to be able to compare the
- * subchannel keys of subchannels from a different batch of addresses. */
- static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS,
- GRPC_ARG_LB_ADDRESSES};
- /* Create subchannels for addresses in the update. */
- for (size_t i = 0; i < addresses->num_addresses; i++) {
- // If there were any balancer, we would have chosen grpclb policy instead.
- GPR_ASSERT(!addresses->addresses[i].is_balancer);
- memset(&sc_args, 0, sizeof(grpc_subchannel_args));
- grpc_arg addr_arg =
- grpc_create_subchannel_address_arg(&addresses->addresses[i].address);
- grpc_channel_args *new_args = grpc_channel_args_copy_and_add_and_remove(
- args->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &addr_arg,
- 1);
- gpr_free(addr_arg.value.string);
- sc_args.args = new_args;
- grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
- exec_ctx, args->client_channel_factory, &sc_args);
- grpc_channel_args_destroy(exec_ctx, new_args);
- grpc_error *error;
- // Get the connectivity state of the subchannel. Already existing ones may
- // be in a state other than INIT.
- const grpc_connectivity_state subchannel_connectivity_state =
- grpc_subchannel_check_connectivity(subchannel, &error);
- if (error != GRPC_ERROR_NONE) {
- // The subchannel is in error (e.g. shutting down). Ignore it.
- GRPC_SUBCHANNEL_UNREF(exec_ctx, subchannel, "new_sc_connectivity_error");
- GRPC_ERROR_UNREF(error);
- continue;
- }
- if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
- char *address_uri =
- grpc_sockaddr_to_uri(&addresses->addresses[i].address);
- gpr_log(
- GPR_DEBUG,
- "[RR %p] index %lu: Created subchannel %p for address uri %s into "
- "subchannel_list %p. Connectivity state %s",
- (void *)p, (unsigned long)subchannel_index, (void *)subchannel,
- address_uri, (void *)subchannel_list,
- grpc_connectivity_state_name(subchannel_connectivity_state));
- gpr_free(address_uri);
- }
- subchannel_data *sd = &subchannel_list->subchannels[subchannel_index++];
- sd->subchannel_list = subchannel_list;
- sd->subchannel = subchannel;
- GRPC_CLOSURE_INIT(&sd->connectivity_changed_closure,
- rr_connectivity_changed_locked, sd,
- grpc_combiner_scheduler(args->combiner));
- /* use some sentinel value outside of the range of
- * grpc_connectivity_state to signal an undefined previous state. We
- * won't be referring to this value again and it'll be overwritten after
- * the first call to rr_connectivity_changed_locked */
- sd->prev_connectivity_state = GRPC_CHANNEL_INIT;
- sd->curr_connectivity_state = subchannel_connectivity_state;
- sd->user_data_vtable = addresses->user_data_vtable;
- if (sd->user_data_vtable != NULL) {
- sd->user_data =
- sd->user_data_vtable->copy(addresses->addresses[i].user_data);
+ if (p->started_picking) {
+ if (p->latest_pending_subchannel_list != NULL) {
+ if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
+ gpr_log(GPR_DEBUG,
+ "[RR %p] Shutting down latest pending subchannel list %p, "
+ "about to be replaced by newer latest %p",
+ (void *)p, (void *)p->latest_pending_subchannel_list,
+ (void *)subchannel_list);
+ }
+ grpc_lb_subchannel_list_shutdown_and_unref(
+ exec_ctx, p->latest_pending_subchannel_list, "sl_outdated");
}
- if (p->started_picking) {
- rr_subchannel_list_ref(sd->subchannel_list, "update_started_picking");
- GRPC_LB_POLICY_WEAK_REF(&p->base, "rr_connectivity_update");
- /* 2. Watch every new subchannel. A subchannel list becomes active the
+ p->latest_pending_subchannel_list = subchannel_list;
+ for (size_t i = 0; i < subchannel_list->num_subchannels; ++i) {
+ /* Watch every new subchannel. A subchannel list becomes active the
* moment one of its subchannels is READY. At that moment, we swap
* p->subchannel_list for sd->subchannel_list, provided the subchannel
* list is still valid (ie, isn't shutting down) */
- grpc_subchannel_notify_on_state_change(
- exec_ctx, sd->subchannel, p->base.interested_parties,
- &sd->pending_connectivity_state_unsafe,
- &sd->connectivity_changed_closure);
+ grpc_lb_subchannel_list_ref_for_connectivity_watch(subchannel_list,
+ "connectivity_watch");
+ grpc_lb_subchannel_data_start_connectivity_watch(
+ exec_ctx, &subchannel_list->subchannels[i]);
}
- }
- if (!p->started_picking) {
+ } else {
// The policy isn't picking yet. Save the update for later, disposing of
// previous version if any.
if (p->subchannel_list != NULL) {
- rr_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
- "rr_update_before_started_picking");
+ grpc_lb_subchannel_list_shutdown_and_unref(
+ exec_ctx, p->subchannel_list, "rr_update_before_started_picking");
}
p->subchannel_list = subchannel_list;
- p->latest_pending_subchannel_list = NULL;
}
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
new file mode 100644
index 0000000000..08ea4f480b
--- /dev/null
+++ b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
@@ -0,0 +1,265 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+
+#include "src/core/ext/filters/client_channel/lb_policy/subchannel_list.h"
+#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/debug/trace.h"
+#include "src/core/lib/iomgr/closure.h"
+#include "src/core/lib/iomgr/combiner.h"
+#include "src/core/lib/iomgr/sockaddr_utils.h"
+#include "src/core/lib/transport/connectivity_state.h"
+
+void grpc_lb_subchannel_data_unref_subchannel(grpc_exec_ctx *exec_ctx,
+ grpc_lb_subchannel_data *sd,
+ const char *reason) {
+ if (sd->subchannel != NULL) {
+ if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
+ gpr_log(
+ GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR
+ " of %" PRIuPTR " (subchannel %p): unreffing subchannel",
+ sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
+ sd->subchannel_list, (size_t)(sd - sd->subchannel_list->subchannels),
+ sd->subchannel_list->num_subchannels, sd->subchannel);
+ }
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, reason);
+ sd->subchannel = NULL;
+ if (sd->connected_subchannel != NULL) {
+ GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, sd->connected_subchannel,
+ reason);
+ sd->connected_subchannel = NULL;
+ }
+ if (sd->user_data != NULL) {
+ GPR_ASSERT(sd->user_data_vtable != NULL);
+ sd->user_data_vtable->destroy(exec_ctx, sd->user_data);
+ sd->user_data = NULL;
+ }
+ }
+}
+
+void grpc_lb_subchannel_data_start_connectivity_watch(
+ grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd) {
+ if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
+ gpr_log(GPR_DEBUG,
+ "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
+ " (subchannel %p): requesting connectivity change notification",
+ sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
+ sd->subchannel_list,
+ (size_t)(sd - sd->subchannel_list->subchannels),
+ sd->subchannel_list->num_subchannels, sd->subchannel);
+ }
+ sd->connectivity_notification_pending = true;
+ grpc_subchannel_notify_on_state_change(
+ exec_ctx, sd->subchannel, sd->subchannel_list->policy->interested_parties,
+ &sd->pending_connectivity_state_unsafe,
+ &sd->connectivity_changed_closure);
+}
+
+void grpc_lb_subchannel_data_stop_connectivity_watch(
+ grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd) {
+ if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
+ gpr_log(
+ GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
+ " (subchannel %p): stopping connectivity watch",
+ sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
+ sd->subchannel_list, (size_t)(sd - sd->subchannel_list->subchannels),
+ sd->subchannel_list->num_subchannels, sd->subchannel);
+ }
+ GPR_ASSERT(sd->connectivity_notification_pending);
+ sd->connectivity_notification_pending = false;
+}
+
+grpc_lb_subchannel_list *grpc_lb_subchannel_list_create(
+ grpc_exec_ctx *exec_ctx, grpc_lb_policy *p, grpc_tracer_flag *tracer,
+ const grpc_lb_addresses *addresses, const grpc_lb_policy_args *args,
+ grpc_iomgr_cb_func connectivity_changed_cb) {
+ grpc_lb_subchannel_list *subchannel_list =
+ (grpc_lb_subchannel_list *)gpr_zalloc(sizeof(*subchannel_list));
+ if (GRPC_TRACER_ON(*tracer)) {
+ gpr_log(GPR_DEBUG,
+ "[%s %p] Creating subchannel list %p for %" PRIuPTR " subchannels",
+ tracer->name, p, subchannel_list, addresses->num_addresses);
+ }
+ subchannel_list->policy = p;
+ subchannel_list->tracer = tracer;
+ gpr_ref_init(&subchannel_list->refcount, 1);
+ subchannel_list->subchannels = (grpc_lb_subchannel_data *)gpr_zalloc(
+ sizeof(grpc_lb_subchannel_data) * addresses->num_addresses);
+ // We need to remove the LB addresses in order to be able to compare the
+ // subchannel keys of subchannels from a different batch of addresses.
+ static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS,
+ GRPC_ARG_LB_ADDRESSES};
+ // Create a subchannel for each address.
+ grpc_subchannel_args sc_args;
+ size_t subchannel_index = 0;
+ for (size_t i = 0; i < addresses->num_addresses; i++) {
+ // If there were any balancer, we would have chosen grpclb policy instead.
+ GPR_ASSERT(!addresses->addresses[i].is_balancer);
+ memset(&sc_args, 0, sizeof(grpc_subchannel_args));
+ grpc_arg addr_arg =
+ grpc_create_subchannel_address_arg(&addresses->addresses[i].address);
+ grpc_channel_args *new_args = grpc_channel_args_copy_and_add_and_remove(
+ args->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &addr_arg,
+ 1);
+ gpr_free(addr_arg.value.string);
+ sc_args.args = new_args;
+ grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
+ exec_ctx, args->client_channel_factory, &sc_args);
+ grpc_channel_args_destroy(exec_ctx, new_args);
+ if (subchannel == NULL) {
+ // Subchannel could not be created.
+ if (GRPC_TRACER_ON(*tracer)) {
+ char *address_uri =
+ grpc_sockaddr_to_uri(&addresses->addresses[i].address);
+ gpr_log(GPR_DEBUG,
+ "[%s %p] could not create subchannel for address uri %s, "
+ "ignoring",
+ tracer->name, subchannel_list->policy, address_uri);
+ gpr_free(address_uri);
+ }
+ continue;
+ }
+ if (GRPC_TRACER_ON(*tracer)) {
+ char *address_uri =
+ grpc_sockaddr_to_uri(&addresses->addresses[i].address);
+ gpr_log(GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR
+ ": Created subchannel %p for address uri %s",
+ tracer->name, p, subchannel_list, subchannel_index, subchannel,
+ address_uri);
+ gpr_free(address_uri);
+ }
+ grpc_lb_subchannel_data *sd =
+ &subchannel_list->subchannels[subchannel_index++];
+ sd->subchannel_list = subchannel_list;
+ sd->subchannel = subchannel;
+ GRPC_CLOSURE_INIT(&sd->connectivity_changed_closure,
+ connectivity_changed_cb, sd,
+ grpc_combiner_scheduler(args->combiner));
+ // We assume that the current state is IDLE. If not, we'll get a
+ // callback telling us that.
+ sd->prev_connectivity_state = GRPC_CHANNEL_IDLE;
+ sd->curr_connectivity_state = GRPC_CHANNEL_IDLE;
+ sd->pending_connectivity_state_unsafe = GRPC_CHANNEL_IDLE;
+ sd->user_data_vtable = addresses->user_data_vtable;
+ if (sd->user_data_vtable != NULL) {
+ sd->user_data =
+ sd->user_data_vtable->copy(addresses->addresses[i].user_data);
+ }
+ }
+ subchannel_list->num_subchannels = subchannel_index;
+ subchannel_list->num_idle = subchannel_index;
+ return subchannel_list;
+}
+
+static void subchannel_list_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_lb_subchannel_list *subchannel_list) {
+ if (GRPC_TRACER_ON(*subchannel_list->tracer)) {
+ gpr_log(GPR_DEBUG, "[%s %p] Destroying subchannel_list %p",
+ subchannel_list->tracer->name, subchannel_list->policy,
+ subchannel_list);
+ }
+ for (size_t i = 0; i < subchannel_list->num_subchannels; i++) {
+ grpc_lb_subchannel_data *sd = &subchannel_list->subchannels[i];
+ grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd,
+ "subchannel_list_destroy");
+ }
+ gpr_free(subchannel_list->subchannels);
+ gpr_free(subchannel_list);
+}
+
+void grpc_lb_subchannel_list_ref(grpc_lb_subchannel_list *subchannel_list,
+ const char *reason) {
+ gpr_ref_non_zero(&subchannel_list->refcount);
+ if (GRPC_TRACER_ON(*subchannel_list->tracer)) {
+ const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count);
+ gpr_log(GPR_DEBUG, "[%s %p] subchannel_list %p REF %lu->%lu (%s)",
+ subchannel_list->tracer->name, subchannel_list->policy,
+ subchannel_list, (unsigned long)(count - 1), (unsigned long)count,
+ reason);
+ }
+}
+
+void grpc_lb_subchannel_list_unref(grpc_exec_ctx *exec_ctx,
+ grpc_lb_subchannel_list *subchannel_list,
+ const char *reason) {
+ const bool done = gpr_unref(&subchannel_list->refcount);
+ if (GRPC_TRACER_ON(*subchannel_list->tracer)) {
+ const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count);
+ gpr_log(GPR_DEBUG, "[%s %p] subchannel_list %p UNREF %lu->%lu (%s)",
+ subchannel_list->tracer->name, subchannel_list->policy,
+ subchannel_list, (unsigned long)(count + 1), (unsigned long)count,
+ reason);
+ }
+ if (done) {
+ subchannel_list_destroy(exec_ctx, subchannel_list);
+ }
+}
+
+void grpc_lb_subchannel_list_ref_for_connectivity_watch(
+ grpc_lb_subchannel_list *subchannel_list, const char *reason) {
+ GRPC_LB_POLICY_WEAK_REF(subchannel_list->policy, reason);
+ grpc_lb_subchannel_list_ref(subchannel_list, reason);
+}
+
+void grpc_lb_subchannel_list_unref_for_connectivity_watch(
+ grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_list *subchannel_list,
+ const char *reason) {
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, subchannel_list->policy, reason);
+ grpc_lb_subchannel_list_unref(exec_ctx, subchannel_list, reason);
+}
+
+static void subchannel_data_cancel_connectivity_watch(
+ grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd, const char *reason) {
+ if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
+ gpr_log(
+ GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
+ " (subchannel %p): canceling connectivity watch (%s)",
+ sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
+ sd->subchannel_list, (size_t)(sd - sd->subchannel_list->subchannels),
+ sd->subchannel_list->num_subchannels, sd->subchannel, reason);
+ }
+ grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL, NULL,
+ &sd->connectivity_changed_closure);
+}
+
+void grpc_lb_subchannel_list_shutdown_and_unref(
+ grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_list *subchannel_list,
+ const char *reason) {
+ if (GRPC_TRACER_ON(*subchannel_list->tracer)) {
+ gpr_log(GPR_DEBUG, "[%s %p] Shutting down subchannel_list %p (%s)",
+ subchannel_list->tracer->name, subchannel_list->policy,
+ subchannel_list, reason);
+ }
+ GPR_ASSERT(!subchannel_list->shutting_down);
+ subchannel_list->shutting_down = true;
+ for (size_t i = 0; i < subchannel_list->num_subchannels; i++) {
+ grpc_lb_subchannel_data *sd = &subchannel_list->subchannels[i];
+ // If there's a pending notification for this subchannel, cancel it;
+ // the callback is responsible for unreffing the subchannel.
+ // Otherwise, unref the subchannel directly.
+ if (sd->connectivity_notification_pending) {
+ subchannel_data_cancel_connectivity_watch(exec_ctx, sd, reason);
+ } else if (sd->subchannel != NULL) {
+ grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd, reason);
+ }
+ }
+ grpc_lb_subchannel_list_unref(exec_ctx, subchannel_list, reason);
+}
diff --git a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
new file mode 100644
index 0000000000..9d5984260f
--- /dev/null
+++ b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
@@ -0,0 +1,153 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_SUBCHANNEL_LIST_H
+#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_SUBCHANNEL_LIST_H
+
+#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
+#include "src/core/ext/filters/client_channel/subchannel.h"
+#include "src/core/lib/debug/trace.h"
+#include "src/core/lib/transport/connectivity_state.h"
+
+// TODO(roth): This code is intended to be shared between pick_first and
+// round_robin. However, the interface needs more work to provide clean
+// encapsulation. For example, the structs here have some fields that are
+// only used in one of the two (e.g., the state counters in
+// grpc_lb_subchannel_list and the prev_connectivity_state field in
+// grpc_lb_subchannel_data are only used in round_robin, and the
+// checking_subchannel field in grpc_lb_subchannel_list is only used by
+// pick_first). Also, there is probably some code duplication between the
+// connectivity state notification callback code in both pick_first and
+// round_robin that could be refactored and moved here. In a future PR,
+// need to clean this up.
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct grpc_lb_subchannel_list grpc_lb_subchannel_list;
+
+typedef struct {
+ /** backpointer to owning subchannel list */
+ grpc_lb_subchannel_list *subchannel_list;
+ /** subchannel itself */
+ grpc_subchannel *subchannel;
+ grpc_connected_subchannel *connected_subchannel;
+ /** Is a connectivity notification pending? */
+ bool connectivity_notification_pending;
+ /** notification that connectivity has changed on subchannel */
+ grpc_closure connectivity_changed_closure;
+ /** previous and current connectivity states. Updated by \a
+ * \a connectivity_changed_closure based on
+ * \a pending_connectivity_state_unsafe. */
+ grpc_connectivity_state prev_connectivity_state;
+ grpc_connectivity_state curr_connectivity_state;
+ /** connectivity state to be updated by
+ * grpc_subchannel_notify_on_state_change(), not guarded by
+ * the combiner. To be copied to \a curr_connectivity_state by
+ * \a connectivity_changed_closure. */
+ grpc_connectivity_state pending_connectivity_state_unsafe;
+ /** the subchannel's target user data */
+ void *user_data;
+ /** vtable to operate over \a user_data */
+ const grpc_lb_user_data_vtable *user_data_vtable;
+} grpc_lb_subchannel_data;
+
+/// Unrefs the subchannel contained in sd.
+void grpc_lb_subchannel_data_unref_subchannel(grpc_exec_ctx *exec_ctx,
+ grpc_lb_subchannel_data *sd,
+ const char *reason);
+
+/// Starts watching the connectivity state of the subchannel.
+/// The connectivity_changed_cb callback must invoke either
+/// grpc_lb_subchannel_data_stop_connectivity_watch() or again call
+/// grpc_lb_subchannel_data_start_connectivity_watch().
+void grpc_lb_subchannel_data_start_connectivity_watch(
+ grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd);
+
+/// Stops watching the connectivity state of the subchannel.
+void grpc_lb_subchannel_data_stop_connectivity_watch(
+ grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd);
+
+struct grpc_lb_subchannel_list {
+ /** backpointer to owning policy */
+ grpc_lb_policy *policy;
+
+ grpc_tracer_flag *tracer;
+
+ /** all our subchannels */
+ size_t num_subchannels;
+ grpc_lb_subchannel_data *subchannels;
+
+ /** Index into subchannels of the one we're currently checking.
+ * Used when connecting to subchannels serially instead of in parallel. */
+ // TODO(roth): When we have time, we can probably make this go away
+ // and compute the index dynamically by subtracting
+ // subchannel_list->subchannels from the subchannel_data pointer.
+ size_t checking_subchannel;
+
+ /** how many subchannels are in state READY */
+ size_t num_ready;
+ /** how many subchannels are in state TRANSIENT_FAILURE */
+ size_t num_transient_failures;
+ /** how many subchannels are in state SHUTDOWN */
+ size_t num_shutdown;
+ /** how many subchannels are in state IDLE */
+ size_t num_idle;
+
+ /** There will be one ref for each entry in subchannels for which there is a
+ * pending connectivity state watcher callback. */
+ gpr_refcount refcount;
+
+ /** Is this list shutting down? This may be true due to the shutdown of the
+ * policy itself or because a newer update has arrived while this one hadn't
+ * finished processing. */
+ bool shutting_down;
+};
+
+grpc_lb_subchannel_list *grpc_lb_subchannel_list_create(
+ grpc_exec_ctx *exec_ctx, grpc_lb_policy *p, grpc_tracer_flag *tracer,
+ const grpc_lb_addresses *addresses, const grpc_lb_policy_args *args,
+ grpc_iomgr_cb_func connectivity_changed_cb);
+
+void grpc_lb_subchannel_list_ref(grpc_lb_subchannel_list *subchannel_list,
+ const char *reason);
+
+void grpc_lb_subchannel_list_unref(grpc_exec_ctx *exec_ctx,
+ grpc_lb_subchannel_list *subchannel_list,
+ const char *reason);
+
+/// Takes and releases refs needed for a connectivity notification.
+/// This includes a ref to subchannel_list and a weak ref to the LB policy.
+void grpc_lb_subchannel_list_ref_for_connectivity_watch(
+ grpc_lb_subchannel_list *subchannel_list, const char *reason);
+void grpc_lb_subchannel_list_unref_for_connectivity_watch(
+ grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_list *subchannel_list,
+ const char *reason);
+
+/// Mark subchannel_list as discarded. Unsubscribes all its subchannels. The
+/// connectivity state notification callback will ultimately unref it.
+void grpc_lb_subchannel_list_shutdown_and_unref(
+ grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_list *subchannel_list,
+ const char *reason);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_SUBCHANNEL_LIST_H */
diff --git a/src/core/ext/filters/client_channel/subchannel.h b/src/core/ext/filters/client_channel/subchannel.h
index 46b29f1fe0..1cd73f3ff4 100644
--- a/src/core/ext/filters/client_channel/subchannel.h
+++ b/src/core/ext/filters/client_channel/subchannel.h
@@ -127,8 +127,8 @@ void grpc_connected_subchannel_process_transport_op(
grpc_connectivity_state grpc_subchannel_check_connectivity(
grpc_subchannel *channel, grpc_error **error);
-/** call notify when the connectivity state of a channel changes from *state.
- Updates *state with the new state of the channel */
+/** Calls notify when the connectivity state of a channel becomes different
+ from *state. Updates *state with the new state of the channel. */
void grpc_subchannel_notify_on_state_change(
grpc_exec_ctx *exec_ctx, grpc_subchannel *channel,
grpc_pollset_set *interested_parties, grpc_connectivity_state *state,
diff --git a/src/core/ext/transport/chttp2/client/chttp2_connector.cc b/src/core/ext/transport/chttp2/client/chttp2_connector.cc
index 202bcd47f5..74839f2156 100644
--- a/src/core/ext/transport/chttp2/client/chttp2_connector.cc
+++ b/src/core/ext/transport/chttp2/client/chttp2_connector.cc
@@ -115,6 +115,8 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
}
memset(c->result, 0, sizeof(*c->result));
} else {
+ grpc_endpoint_delete_from_pollset_set(exec_ctx, args->endpoint,
+ c->args.interested_parties);
c->result->transport =
grpc_create_chttp2_transport(exec_ctx, args->args, args->endpoint, 1);
GPR_ASSERT(c->result->transport);
@@ -136,6 +138,8 @@ static void start_handshake_locked(grpc_exec_ctx *exec_ctx,
c->handshake_mgr = grpc_handshake_manager_create();
grpc_handshakers_add(exec_ctx, HANDSHAKER_CLIENT, c->args.channel_args,
c->handshake_mgr);
+ grpc_endpoint_add_to_pollset_set(exec_ctx, c->endpoint,
+ c->args.interested_parties);
grpc_handshake_manager_do_handshake(
exec_ctx, c->handshake_mgr, c->endpoint, c->args.channel_args,
c->args.deadline, NULL /* acceptor */, on_handshake_done, c);
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.cc b/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
index 9462d1085e..02fc53122d 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
@@ -54,7 +54,6 @@
#include "src/core/lib/transport/transport.h"
#include "src/core/lib/transport/transport_impl.h"
-#define DEFAULT_WINDOW 65535
#define DEFAULT_CONNECTION_WINDOW_TARGET (1024 * 1024)
#define MAX_WINDOW 0x7fffffffu
#define MAX_WRITE_BUFFER_SIZE (64 * 1024 * 1024)
@@ -222,7 +221,7 @@ static void destruct_transport(grpc_exec_ctx *exec_ctx,
t->write_cb_pool = next;
}
- t->flow_control.bdp_estimator.Destroy();
+ t->flow_control.Destroy();
GRPC_ERROR_UNREF(t->closed_with_error);
gpr_free(t->ping_acks);
@@ -282,10 +281,6 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
t->endpoint_reading = 1;
t->next_stream_id = is_client ? 1 : 2;
t->is_client = is_client;
- t->flow_control.remote_window = DEFAULT_WINDOW;
- t->flow_control.announced_window = DEFAULT_WINDOW;
- t->flow_control.target_initial_window_size = DEFAULT_WINDOW;
- t->flow_control.t = t;
t->deframe_state = is_client ? GRPC_DTS_FH_0 : GRPC_DTS_CLIENT_PREFIX_0;
t->is_first_frame = true;
grpc_connectivity_state_init(
@@ -325,8 +320,6 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
keepalive_watchdog_fired_locked, t,
grpc_combiner_scheduler(t->combiner));
- t->flow_control.bdp_estimator.Init(t->peer_string);
-
grpc_chttp2_goaway_parser_init(&t->goaway_parser);
grpc_chttp2_hpack_parser_init(exec_ctx, &t->hpack_parser);
@@ -350,8 +343,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
window -- this should by rights be 0 */
t->force_send_settings = 1 << GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE;
t->sent_local_settings = 0;
- t->write_buffer_size = DEFAULT_WINDOW;
- t->flow_control.enable_bdp_probe = true;
+ t->write_buffer_size = grpc_core::chttp2::kDefaultWindow;
if (is_client) {
grpc_slice_buffer_add(&t->outbuf, grpc_slice_from_copied_string(
@@ -396,6 +388,8 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
t->opt_target = GRPC_CHTTP2_OPTIMIZE_FOR_LATENCY;
+ bool enable_bdp = true;
+
if (channel_args) {
for (i = 0; i < channel_args->num_args; i++) {
if (0 == strcmp(channel_args->args[i].key,
@@ -456,8 +450,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
&channel_args->args[i], {0, 0, MAX_WRITE_BUFFER_SIZE});
} else if (0 ==
strcmp(channel_args->args[i].key, GRPC_ARG_HTTP2_BDP_PROBE)) {
- t->flow_control.enable_bdp_probe =
- grpc_channel_arg_get_integer(&channel_args->args[i], {1, 0, 1});
+ enable_bdp = grpc_channel_arg_get_bool(&channel_args->args[i], true);
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_KEEPALIVE_TIME_MS)) {
const int value = grpc_channel_arg_get_integer(
@@ -552,6 +545,8 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
}
}
+ t->flow_control.Init(exec_ctx, t, enable_bdp);
+
/* No pings allowed before receiving a header or data frame. */
t->ping_state.pings_before_data_required = 0;
t->ping_state.is_delayed_ping_timer_set = false;
@@ -572,15 +567,13 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DISABLED;
}
- if (t->flow_control.enable_bdp_probe) {
+ if (enable_bdp) {
GRPC_CHTTP2_REF_TRANSPORT(t, "bdp_ping");
schedule_bdp_ping_locked(exec_ctx, t);
- }
- grpc_chttp2_act_on_flowctl_action(
- exec_ctx,
- grpc_chttp2_flowctl_get_action(exec_ctx, &t->flow_control, NULL), t,
- NULL);
+ grpc_chttp2_act_on_flowctl_action(
+ exec_ctx, t->flow_control->PeriodicUpdate(exec_ctx), t, NULL);
+ }
grpc_chttp2_initiate_write(exec_ctx, t,
GRPC_CHTTP2_INITIATE_WRITE_INITIAL_WRITE);
@@ -718,7 +711,7 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
post_destructive_reclaimer(exec_ctx, t);
}
- s->flow_control.s = s;
+ s->flow_control.Init(t->flow_control.get(), s);
GPR_TIMER_END("init_stream", 0);
return 0;
@@ -769,7 +762,7 @@ static void destroy_stream_locked(grpc_exec_ctx *exec_ctx, void *sp,
GRPC_ERROR_UNREF(s->write_closed_error);
GRPC_ERROR_UNREF(s->byte_stream_error);
- grpc_chttp2_flowctl_destroy_stream(&t->flow_control, &s->flow_control);
+ s->flow_control.Destroy();
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "stream");
@@ -1638,13 +1631,10 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
if (s->id != 0) {
if (!s->read_closed) {
already_received = s->frame_storage.length;
- grpc_chttp2_flowctl_incoming_bs_update(
- &t->flow_control, &s->flow_control, GRPC_HEADER_SIZE_IN_BYTES,
- already_received);
- grpc_chttp2_act_on_flowctl_action(
- exec_ctx, grpc_chttp2_flowctl_get_action(exec_ctx, &t->flow_control,
- &s->flow_control),
- t, s);
+ s->flow_control->IncomingByteStreamUpdate(GRPC_HEADER_SIZE_IN_BYTES,
+ already_received);
+ grpc_chttp2_act_on_flowctl_action(exec_ctx,
+ s->flow_control->MakeAction(), t, s);
}
}
grpc_chttp2_maybe_complete_recv_message(exec_ctx, t, s);
@@ -2420,49 +2410,44 @@ static void end_all_the_calls(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
* INPUT PROCESSING - PARSING
*/
-void grpc_chttp2_act_on_flowctl_action(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_flowctl_action action,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s) {
- switch (action.send_stream_update) {
- case GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED:
- break;
- case GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY:
- grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
- grpc_chttp2_initiate_write(
- exec_ctx, t, GRPC_CHTTP2_INITIATE_WRITE_STREAM_FLOW_CONTROL);
+template <class F>
+static void WithUrgency(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_core::chttp2::FlowControlAction::Urgency urgency,
+ grpc_chttp2_initiate_write_reason reason, F action) {
+ switch (urgency) {
+ case grpc_core::chttp2::FlowControlAction::Urgency::NO_ACTION_NEEDED:
break;
- case GRPC_CHTTP2_FLOWCTL_QUEUE_UPDATE:
- grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
+ case grpc_core::chttp2::FlowControlAction::Urgency::UPDATE_IMMEDIATELY:
+ grpc_chttp2_initiate_write(exec_ctx, t, reason);
+ // fallthrough
+ case grpc_core::chttp2::FlowControlAction::Urgency::QUEUE_UPDATE:
+ action();
break;
}
- switch (action.send_transport_update) {
- case GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED:
- break;
- case GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY:
- grpc_chttp2_initiate_write(
- exec_ctx, t, GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL);
- break;
- // this is the same as no action b/c every time the transport enters the
- // writing path it will maybe do an update
- case GRPC_CHTTP2_FLOWCTL_QUEUE_UPDATE:
- break;
- }
- if (action.send_setting_update != GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED) {
- if (action.initial_window_size > 0) {
- queue_setting_update(exec_ctx, t,
- GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE,
- (uint32_t)action.initial_window_size);
- }
- if (action.max_frame_size > 0) {
- queue_setting_update(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE,
- (uint32_t)action.max_frame_size);
- }
- if (action.send_setting_update == GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY) {
- grpc_chttp2_initiate_write(exec_ctx, t,
- GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS);
- }
- }
+}
+
+void grpc_chttp2_act_on_flowctl_action(
+ grpc_exec_ctx *exec_ctx, const grpc_core::chttp2::FlowControlAction &action,
+ grpc_chttp2_transport *t, grpc_chttp2_stream *s) {
+ WithUrgency(
+ exec_ctx, t, action.send_stream_update(),
+ GRPC_CHTTP2_INITIATE_WRITE_STREAM_FLOW_CONTROL,
+ [exec_ctx, t, s]() { grpc_chttp2_mark_stream_writable(exec_ctx, t, s); });
+ WithUrgency(exec_ctx, t, action.send_transport_update(),
+ GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL, []() {});
+ WithUrgency(exec_ctx, t, action.send_initial_window_update(),
+ GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS,
+ [exec_ctx, t, &action]() {
+ queue_setting_update(exec_ctx, t,
+ GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE,
+ action.initial_window_size());
+ });
+ WithUrgency(
+ exec_ctx, t, action.send_max_frame_size_update(),
+ GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS, [exec_ctx, t, &action]() {
+ queue_setting_update(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE,
+ action.max_frame_size());
+ });
}
static grpc_error *try_http_parsing(grpc_exec_ctx *exec_ctx,
@@ -2518,7 +2503,7 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *errors[3] = {GRPC_ERROR_REF(error), GRPC_ERROR_NONE,
GRPC_ERROR_NONE};
for (; i < t->read_buffer.count && errors[1] == GRPC_ERROR_NONE; i++) {
- t->flow_control.bdp_estimator->AddIncomingBytes(
+ t->flow_control->bdp_estimator()->AddIncomingBytes(
(int64_t)GRPC_SLICE_LENGTH(t->read_buffer.slices[i]));
errors[1] =
grpc_chttp2_perform_read(exec_ctx, t, t->read_buffer.slices[i]);
@@ -2535,8 +2520,8 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
GPR_TIMER_END("reading_action.parse", 0);
GPR_TIMER_BEGIN("post_parse_locked", 0);
- if (t->flow_control.initial_window_update != 0) {
- if (t->flow_control.initial_window_update > 0) {
+ if (t->initial_window_update != 0) {
+ if (t->initial_window_update > 0) {
grpc_chttp2_stream *s;
while (grpc_chttp2_list_pop_stalled_by_stream(t, &s)) {
grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
@@ -2545,7 +2530,7 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_SETTING);
}
}
- t->flow_control.initial_window_update = 0;
+ t->initial_window_update = 0;
}
GPR_TIMER_END("post_parse_locked", 0);
}
@@ -2568,10 +2553,8 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
if (keep_reading) {
grpc_endpoint_read(exec_ctx, t->ep, &t->read_buffer,
&t->read_action_locked);
- grpc_chttp2_act_on_flowctl_action(
- exec_ctx,
- grpc_chttp2_flowctl_get_action(exec_ctx, &t->flow_control, NULL), t,
- NULL);
+ grpc_chttp2_act_on_flowctl_action(exec_ctx, t->flow_control->MakeAction(),
+ t, NULL);
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "keep_reading");
} else {
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "reading_action");
@@ -2588,7 +2571,7 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
// that kicks off finishes, it's unreffed
static void schedule_bdp_ping_locked(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t) {
- t->flow_control.bdp_estimator->SchedulePing();
+ t->flow_control->bdp_estimator()->SchedulePing();
send_ping_locked(exec_ctx, t, &t->start_bdp_ping_locked,
&t->finish_bdp_ping_locked);
}
@@ -2604,7 +2587,7 @@ static void start_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
if (t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_WAITING) {
grpc_timer_cancel(exec_ctx, &t->keepalive_ping_timer);
}
- t->flow_control.bdp_estimator->StartPing();
+ t->flow_control->bdp_estimator()->StartPing();
}
static void finish_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
@@ -2618,7 +2601,10 @@ static void finish_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "bdp_ping");
return;
}
- grpc_millis next_ping = t->flow_control.bdp_estimator->CompletePing(exec_ctx);
+ grpc_millis next_ping =
+ t->flow_control->bdp_estimator()->CompletePing(exec_ctx);
+ grpc_chttp2_act_on_flowctl_action(
+ exec_ctx, t->flow_control->PeriodicUpdate(exec_ctx), t, nullptr);
GPR_ASSERT(!t->have_next_bdp_ping_timer);
t->have_next_bdp_ping_timer = true;
grpc_timer_init(exec_ctx, &t->next_bdp_ping_timer, next_ping,
@@ -2844,13 +2830,10 @@ static void incoming_byte_stream_next_locked(grpc_exec_ctx *exec_ctx,
size_t cur_length = s->frame_storage.length;
if (!s->read_closed) {
- grpc_chttp2_flowctl_incoming_bs_update(&t->flow_control, &s->flow_control,
- bs->next_action.max_size_hint,
- cur_length);
- grpc_chttp2_act_on_flowctl_action(
- exec_ctx, grpc_chttp2_flowctl_get_action(exec_ctx, &t->flow_control,
- &s->flow_control),
- t, s);
+ s->flow_control->IncomingByteStreamUpdate(bs->next_action.max_size_hint,
+ cur_length);
+ grpc_chttp2_act_on_flowctl_action(exec_ctx, s->flow_control->MakeAction(),
+ t, s);
}
GPR_ASSERT(s->unprocessed_incoming_frames_buffer.length == 0);
if (s->frame_storage.length > 0) {
diff --git a/src/core/ext/transport/chttp2/transport/flow_control.cc b/src/core/ext/transport/chttp2/transport/flow_control.cc
index 716cd71490..40545bc74b 100644
--- a/src/core/ext/transport/chttp2/transport/flow_control.cc
+++ b/src/core/ext/transport/chttp2/transport/flow_control.cc
@@ -16,7 +16,7 @@
*
*/
-#include "src/core/ext/transport/chttp2/transport/internal.h"
+#include "src/core/ext/transport/chttp2/transport/flow_control.h"
#include <inttypes.h>
#include <limits.h>
@@ -28,38 +28,15 @@
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
+#include "src/core/ext/transport/chttp2/transport/internal.h"
#include "src/core/lib/support/string.h"
-static uint32_t grpc_chttp2_target_announced_window(
- const grpc_chttp2_transport_flowctl* tfc);
-
-#ifndef NDEBUG
-
-typedef struct {
- int64_t remote_window;
- int64_t target_window;
- int64_t announced_window;
- int64_t remote_window_delta;
- int64_t local_window_delta;
- int64_t announced_window_delta;
- uint32_t local_init_window;
- uint32_t local_max_frame;
-} shadow_flow_control;
-
-static void pretrace(shadow_flow_control* shadow_fc,
- grpc_chttp2_transport_flowctl* tfc,
- grpc_chttp2_stream_flowctl* sfc) {
- shadow_fc->remote_window = tfc->remote_window;
- shadow_fc->target_window = grpc_chttp2_target_announced_window(tfc);
- shadow_fc->announced_window = tfc->announced_window;
- if (sfc != NULL) {
- shadow_fc->remote_window_delta = sfc->remote_window_delta;
- shadow_fc->local_window_delta = sfc->local_window_delta;
- shadow_fc->announced_window_delta = sfc->announced_window_delta;
- }
-}
+namespace grpc_core {
+namespace chttp2 {
+
+namespace {
-#define TRACE_PADDING 30
+static constexpr const int kTracePadding = 30;
static char* fmt_int64_diff_str(int64_t old_val, int64_t new_val) {
char* str;
@@ -68,7 +45,7 @@ static char* fmt_int64_diff_str(int64_t old_val, int64_t new_val) {
} else {
gpr_asprintf(&str, "%" PRId64 "", old_val);
}
- char* str_lp = gpr_leftpad(str, ' ', TRACE_PADDING);
+ char* str_lp = gpr_leftpad(str, ' ', kTracePadding);
gpr_free(str);
return str_lp;
}
@@ -80,47 +57,58 @@ static char* fmt_uint32_diff_str(uint32_t old_val, uint32_t new_val) {
} else {
gpr_asprintf(&str, "%" PRIu32 "", old_val);
}
- char* str_lp = gpr_leftpad(str, ' ', TRACE_PADDING);
+ char* str_lp = gpr_leftpad(str, ' ', kTracePadding);
gpr_free(str);
return str_lp;
}
+} // namespace
+
+void FlowControlTrace::Init(const char* reason, TransportFlowControl* tfc,
+ StreamFlowControl* sfc) {
+ tfc_ = tfc;
+ sfc_ = sfc;
+ reason_ = reason;
+ remote_window_ = tfc->remote_window();
+ target_window_ = tfc->target_window();
+ announced_window_ = tfc->announced_window();
+ if (sfc != nullptr) {
+ remote_window_delta_ = sfc->remote_window_delta();
+ local_window_delta_ = sfc->local_window_delta();
+ announced_window_delta_ = sfc->announced_window_delta();
+ }
+}
-static void posttrace(shadow_flow_control* shadow_fc,
- grpc_chttp2_transport_flowctl* tfc,
- grpc_chttp2_stream_flowctl* sfc, const char* reason) {
+void FlowControlTrace::Finish() {
uint32_t acked_local_window =
- tfc->t->settings[GRPC_SENT_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
+ tfc_->transport()->settings[GRPC_SENT_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
uint32_t remote_window =
- tfc->t->settings[GRPC_PEER_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
- char* trw_str =
- fmt_int64_diff_str(shadow_fc->remote_window, tfc->remote_window);
- char* tlw_str = fmt_int64_diff_str(shadow_fc->target_window,
- grpc_chttp2_target_announced_window(tfc));
+ tfc_->transport()->settings[GRPC_PEER_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
+ char* trw_str = fmt_int64_diff_str(remote_window_, tfc_->remote_window());
+ char* tlw_str = fmt_int64_diff_str(target_window_, tfc_->target_window());
char* taw_str =
- fmt_int64_diff_str(shadow_fc->announced_window, tfc->announced_window);
+ fmt_int64_diff_str(announced_window_, tfc_->announced_window());
char* srw_str;
char* slw_str;
char* saw_str;
- if (sfc != NULL) {
- srw_str = fmt_int64_diff_str(shadow_fc->remote_window_delta + remote_window,
- sfc->remote_window_delta + remote_window);
- slw_str =
- fmt_int64_diff_str(shadow_fc->local_window_delta + acked_local_window,
- sfc->local_window_delta + acked_local_window);
- saw_str = fmt_int64_diff_str(
- shadow_fc->announced_window_delta + acked_local_window,
- sfc->announced_window_delta + acked_local_window);
+ if (sfc_ != nullptr) {
+ srw_str = fmt_int64_diff_str(remote_window_delta_ + remote_window,
+ sfc_->remote_window_delta() + remote_window);
+ slw_str = fmt_int64_diff_str(local_window_delta_ + acked_local_window,
+ local_window_delta_ + acked_local_window);
+ saw_str = fmt_int64_diff_str(announced_window_delta_ + acked_local_window,
+ announced_window_delta_ + acked_local_window);
} else {
- srw_str = gpr_leftpad("", ' ', TRACE_PADDING);
- slw_str = gpr_leftpad("", ' ', TRACE_PADDING);
- saw_str = gpr_leftpad("", ' ', TRACE_PADDING);
+ srw_str = gpr_leftpad("", ' ', kTracePadding);
+ slw_str = gpr_leftpad("", ' ', kTracePadding);
+ saw_str = gpr_leftpad("", ' ', kTracePadding);
}
gpr_log(GPR_DEBUG,
"%p[%u][%s] | %s | trw:%s, ttw:%s, taw:%s, srw:%s, slw:%s, saw:%s",
- tfc, sfc != NULL ? sfc->s->id : 0, tfc->t->is_client ? "cli" : "svr",
- reason, trw_str, tlw_str, taw_str, srw_str, slw_str, saw_str);
+ tfc_, sfc_ != nullptr ? sfc_->stream()->id : 0,
+ tfc_->transport()->is_client ? "cli" : "svr", reason_, trw_str,
+ tlw_str, taw_str, srw_str, slw_str, saw_str);
gpr_free(trw_str);
gpr_free(tlw_str);
gpr_free(taw_str);
@@ -129,13 +117,13 @@ static void posttrace(shadow_flow_control* shadow_fc,
gpr_free(saw_str);
}
-static const char* urgency_to_string(grpc_chttp2_flowctl_urgency urgency) {
- switch (urgency) {
- case GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED:
+const char* FlowControlAction::UrgencyString(Urgency u) {
+ switch (u) {
+ case Urgency::NO_ACTION_NEEDED:
return "no action";
- case GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY:
+ case Urgency::UPDATE_IMMEDIATELY:
return "update immediately";
- case GRPC_CHTTP2_FLOWCTL_QUEUE_UPDATE:
+ case Urgency::QUEUE_UPDATE:
return "queue update";
default:
GPR_UNREACHABLE_CODE(return "unknown");
@@ -143,209 +131,132 @@ static const char* urgency_to_string(grpc_chttp2_flowctl_urgency urgency) {
GPR_UNREACHABLE_CODE(return "unknown");
}
-static void trace_action(grpc_chttp2_transport_flowctl* tfc,
- grpc_chttp2_flowctl_action action) {
+void FlowControlAction::Trace(grpc_chttp2_transport* t) const {
char* iw_str = fmt_uint32_diff_str(
- tfc->t->settings[GRPC_SENT_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE],
- action.initial_window_size);
+ t->settings[GRPC_SENT_SETTINGS][GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE],
+ initial_window_size_);
char* mf_str = fmt_uint32_diff_str(
- tfc->t->settings[GRPC_SENT_SETTINGS][GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
- action.max_frame_size);
- gpr_log(GPR_DEBUG, "t[%s], s[%s], settings[%s] iw:%s mf:%s",
- urgency_to_string(action.send_transport_update),
- urgency_to_string(action.send_stream_update),
- urgency_to_string(action.send_setting_update), iw_str, mf_str);
+ t->settings[GRPC_SENT_SETTINGS][GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
+ max_frame_size_);
+ gpr_log(GPR_DEBUG, "t[%s], s[%s], iw:%s:%s mf:%s:%s",
+ UrgencyString(send_transport_update_),
+ UrgencyString(send_stream_update_),
+ UrgencyString(send_initial_window_update_), iw_str,
+ UrgencyString(send_max_frame_size_update_), mf_str);
gpr_free(iw_str);
gpr_free(mf_str);
}
-#define PRETRACE(tfc, sfc) \
- shadow_flow_control shadow_fc; \
- GRPC_FLOW_CONTROL_IF_TRACING(pretrace(&shadow_fc, tfc, sfc))
-#define POSTTRACE(tfc, sfc, reason) \
- GRPC_FLOW_CONTROL_IF_TRACING(posttrace(&shadow_fc, tfc, sfc, reason))
-#define TRACEACTION(tfc, action) \
- GRPC_FLOW_CONTROL_IF_TRACING(trace_action(tfc, action))
-#else
-#define PRETRACE(tfc, sfc)
-#define POSTTRACE(tfc, sfc, reason)
-#define TRACEACTION(tfc, action)
-#endif
-
-/* How many bytes of incoming flow control would we like to advertise */
-static uint32_t grpc_chttp2_target_announced_window(
- const grpc_chttp2_transport_flowctl* tfc) {
- return (uint32_t)GPR_MIN((int64_t)((1u << 31) - 1),
- tfc->announced_stream_total_over_incoming_window +
- tfc->target_initial_window_size);
-}
-
-// we have sent data on the wire, we must track this in our bookkeeping for the
-// remote peer's flow control.
-void grpc_chttp2_flowctl_sent_data(grpc_chttp2_transport_flowctl* tfc,
- grpc_chttp2_stream_flowctl* sfc,
- int64_t size) {
- PRETRACE(tfc, sfc);
- tfc->remote_window -= size;
- sfc->remote_window_delta -= size;
- POSTTRACE(tfc, sfc, " data sent");
-}
-
-static void announced_window_delta_preupdate(grpc_chttp2_transport_flowctl* tfc,
- grpc_chttp2_stream_flowctl* sfc) {
- if (sfc->announced_window_delta > 0) {
- tfc->announced_stream_total_over_incoming_window -=
- sfc->announced_window_delta;
- } else {
- tfc->announced_stream_total_under_incoming_window +=
- -sfc->announced_window_delta;
- }
-}
-
-static void announced_window_delta_postupdate(
- grpc_chttp2_transport_flowctl* tfc, grpc_chttp2_stream_flowctl* sfc) {
- if (sfc->announced_window_delta > 0) {
- tfc->announced_stream_total_over_incoming_window +=
- sfc->announced_window_delta;
- } else {
- tfc->announced_stream_total_under_incoming_window -=
- -sfc->announced_window_delta;
+TransportFlowControl::TransportFlowControl(grpc_exec_ctx* exec_ctx,
+ const grpc_chttp2_transport* t,
+ bool enable_bdp_probe)
+ : t_(t),
+ enable_bdp_probe_(enable_bdp_probe),
+ bdp_estimator_(t->peer_string),
+ pid_controller_(grpc_core::PidController::Args()
+ .set_gain_p(4)
+ .set_gain_i(8)
+ .set_gain_d(0)
+ .set_initial_control_value(TargetLogBdp())
+ .set_min_control_value(-1)
+ .set_max_control_value(25)
+ .set_integral_range(10)),
+ last_pid_update_(grpc_exec_ctx_now(exec_ctx)) {}
+
+uint32_t TransportFlowControl::MaybeSendUpdate(bool writing_anyway) {
+ FlowControlTrace trace("t updt sent", this, nullptr);
+ const uint32_t target_announced_window = (const uint32_t)target_window();
+ if ((writing_anyway || announced_window_ <= target_announced_window / 2) &&
+ announced_window_ != target_announced_window) {
+ const uint32_t announce = (uint32_t)GPR_CLAMP(
+ target_announced_window - announced_window_, 0, UINT32_MAX);
+ announced_window_ += announce;
+ return announce;
}
+ return 0;
}
-// We have received data from the wire. We must track this in our own flow
-// control bookkeeping.
-// Returns an error if the incoming frame violates our flow control.
-grpc_error* grpc_chttp2_flowctl_recv_data(grpc_chttp2_transport_flowctl* tfc,
- grpc_chttp2_stream_flowctl* sfc,
- int64_t incoming_frame_size) {
- uint32_t sent_init_window =
- tfc->t->settings[GRPC_SENT_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
- uint32_t acked_init_window =
- tfc->t->settings[GRPC_ACKED_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
- PRETRACE(tfc, sfc);
- if (incoming_frame_size > tfc->announced_window) {
+grpc_error* TransportFlowControl::ValidateRecvData(
+ int64_t incoming_frame_size) {
+ if (incoming_frame_size > announced_window_) {
char* msg;
gpr_asprintf(&msg,
"frame of size %" PRId64 " overflows local window of %" PRId64,
- incoming_frame_size, tfc->announced_window);
+ incoming_frame_size, announced_window_);
grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
+ return GRPC_ERROR_NONE;
+}
- if (sfc != NULL) {
- int64_t acked_stream_window =
- sfc->announced_window_delta + acked_init_window;
- int64_t sent_stream_window = sfc->announced_window_delta + sent_init_window;
- if (incoming_frame_size > acked_stream_window) {
- if (incoming_frame_size <= sent_stream_window) {
- gpr_log(
- GPR_ERROR,
- "Incoming frame of size %" PRId64
- " exceeds local window size of %" PRId64
- ".\n"
- "The (un-acked, future) window size would be %" PRId64
- " which is not exceeded.\n"
- "This would usually cause a disconnection, but allowing it due to"
- "broken HTTP2 implementations in the wild.\n"
- "See (for example) https://github.com/netty/netty/issues/6520.",
- incoming_frame_size, acked_stream_window, sent_stream_window);
- } else {
- char* msg;
- gpr_asprintf(&msg, "frame of size %" PRId64
- " overflows local window of %" PRId64,
- incoming_frame_size, acked_stream_window);
- grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
- gpr_free(msg);
- return err;
- }
- }
+StreamFlowControl::StreamFlowControl(TransportFlowControl* tfc,
+ const grpc_chttp2_stream* s)
+ : tfc_(tfc), s_(s) {}
- announced_window_delta_preupdate(tfc, sfc);
- sfc->announced_window_delta -= incoming_frame_size;
- announced_window_delta_postupdate(tfc, sfc);
- sfc->local_window_delta -= incoming_frame_size;
- }
+grpc_error* StreamFlowControl::RecvData(int64_t incoming_frame_size) {
+ FlowControlTrace trace(" data recv", tfc_, this);
- tfc->announced_window -= incoming_frame_size;
+ grpc_error* error = GRPC_ERROR_NONE;
+ error = tfc_->ValidateRecvData(incoming_frame_size);
+ if (error != GRPC_ERROR_NONE) return error;
- POSTTRACE(tfc, sfc, " data recv");
- return GRPC_ERROR_NONE;
-}
-
-// Returns a non zero announce integer if we should send a transport window
-// update
-uint32_t grpc_chttp2_flowctl_maybe_send_transport_update(
- grpc_chttp2_transport_flowctl* tfc, bool writing_anyway) {
- PRETRACE(tfc, NULL);
- uint32_t target_announced_window = grpc_chttp2_target_announced_window(tfc);
- uint32_t threshold_to_send_transport_window_update =
- tfc->t->outbuf.count > 0 ? 3 * target_announced_window / 4
- : target_announced_window / 2;
- if ((writing_anyway ||
- tfc->announced_window <= threshold_to_send_transport_window_update) &&
- tfc->announced_window != target_announced_window) {
- uint32_t announce = (uint32_t)GPR_CLAMP(
- target_announced_window - tfc->announced_window, 0, UINT32_MAX);
- tfc->announced_window += announce;
- POSTTRACE(tfc, NULL, "t updt sent");
- return announce;
+ uint32_t sent_init_window =
+ tfc_->transport()->settings[GRPC_SENT_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
+ uint32_t acked_init_window =
+ tfc_->transport()->settings[GRPC_ACKED_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
+
+ int64_t acked_stream_window = announced_window_delta_ + acked_init_window;
+ int64_t sent_stream_window = announced_window_delta_ + sent_init_window;
+ if (incoming_frame_size > acked_stream_window) {
+ if (incoming_frame_size <= sent_stream_window) {
+ gpr_log(GPR_ERROR,
+ "Incoming frame of size %" PRId64
+ " exceeds local window size of %" PRId64
+ ".\n"
+ "The (un-acked, future) window size would be %" PRId64
+ " which is not exceeded.\n"
+ "This would usually cause a disconnection, but allowing it due to"
+ "broken HTTP2 implementations in the wild.\n"
+ "See (for example) https://github.com/netty/netty/issues/6520.",
+ incoming_frame_size, acked_stream_window, sent_stream_window);
+ } else {
+ char* msg;
+ gpr_asprintf(&msg, "frame of size %" PRId64
+ " overflows local window of %" PRId64,
+ incoming_frame_size, acked_stream_window);
+ grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
+ gpr_free(msg);
+ return err;
+ }
}
- GRPC_FLOW_CONTROL_IF_TRACING(
- gpr_log(GPR_DEBUG, "%p[0][%s] will not send transport update", tfc,
- tfc->t->is_client ? "cli" : "svr"));
- return 0;
+
+ UpdateAnnouncedWindowDelta(tfc_, -incoming_frame_size);
+ local_window_delta_ -= incoming_frame_size;
+ tfc_->CommitRecvData(incoming_frame_size);
+ return GRPC_ERROR_NONE;
}
-// Returns a non zero announce integer if we should send a stream window update
-uint32_t grpc_chttp2_flowctl_maybe_send_stream_update(
- grpc_chttp2_transport_flowctl* tfc, grpc_chttp2_stream_flowctl* sfc) {
- PRETRACE(tfc, sfc);
- if (sfc->local_window_delta > sfc->announced_window_delta) {
+uint32_t StreamFlowControl::MaybeSendUpdate() {
+ FlowControlTrace trace("s updt sent", tfc_, this);
+ if (local_window_delta_ > announced_window_delta_) {
uint32_t announce = (uint32_t)GPR_CLAMP(
- sfc->local_window_delta - sfc->announced_window_delta, 0, UINT32_MAX);
- announced_window_delta_preupdate(tfc, sfc);
- sfc->announced_window_delta += announce;
- announced_window_delta_postupdate(tfc, sfc);
- POSTTRACE(tfc, sfc, "s updt sent");
+ local_window_delta_ - announced_window_delta_, 0, UINT32_MAX);
+ UpdateAnnouncedWindowDelta(tfc_, announce);
return announce;
}
- GRPC_FLOW_CONTROL_IF_TRACING(
- gpr_log(GPR_DEBUG, "%p[%u][%s] will not send stream update", tfc,
- sfc->s->id, tfc->t->is_client ? "cli" : "svr"));
return 0;
}
-// we have received a WINDOW_UPDATE frame for a transport
-void grpc_chttp2_flowctl_recv_transport_update(
- grpc_chttp2_transport_flowctl* tfc, uint32_t size) {
- PRETRACE(tfc, NULL);
- tfc->remote_window += size;
- POSTTRACE(tfc, NULL, "t updt recv");
-}
-
-// we have received a WINDOW_UPDATE frame for a stream
-void grpc_chttp2_flowctl_recv_stream_update(grpc_chttp2_transport_flowctl* tfc,
- grpc_chttp2_stream_flowctl* sfc,
- uint32_t size) {
- PRETRACE(tfc, sfc);
- sfc->remote_window_delta += size;
- POSTTRACE(tfc, sfc, "s updt recv");
-}
-
-void grpc_chttp2_flowctl_incoming_bs_update(grpc_chttp2_transport_flowctl* tfc,
- grpc_chttp2_stream_flowctl* sfc,
- size_t max_size_hint,
- size_t have_already) {
- PRETRACE(tfc, sfc);
+void StreamFlowControl::IncomingByteStreamUpdate(size_t max_size_hint,
+ size_t have_already) {
+ FlowControlTrace trace("app st recv", tfc_, this);
uint32_t max_recv_bytes;
uint32_t sent_init_window =
- tfc->t->settings[GRPC_SENT_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
+ tfc_->transport()->settings[GRPC_SENT_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
/* clamp max recv hint to an allowable size */
if (max_size_hint >= UINT32_MAX - sent_init_window) {
@@ -363,65 +274,18 @@ void grpc_chttp2_flowctl_incoming_bs_update(grpc_chttp2_transport_flowctl* tfc,
/* add some small lookahead to keep pipelines flowing */
GPR_ASSERT(max_recv_bytes <= UINT32_MAX - sent_init_window);
- if (sfc->local_window_delta < max_recv_bytes) {
+ if (local_window_delta_ < max_recv_bytes) {
uint32_t add_max_recv_bytes =
- (uint32_t)(max_recv_bytes - sfc->local_window_delta);
- sfc->local_window_delta += add_max_recv_bytes;
- }
- POSTTRACE(tfc, sfc, "app st recv");
-}
-
-void grpc_chttp2_flowctl_destroy_stream(grpc_chttp2_transport_flowctl* tfc,
- grpc_chttp2_stream_flowctl* sfc) {
- announced_window_delta_preupdate(tfc, sfc);
-}
-
-// Returns an urgency with which to make an update
-static grpc_chttp2_flowctl_urgency delta_is_significant(
- const grpc_chttp2_transport_flowctl* tfc, int32_t value,
- grpc_chttp2_setting_id setting_id) {
- int64_t delta = (int64_t)value -
- (int64_t)tfc->t->settings[GRPC_LOCAL_SETTINGS][setting_id];
- // TODO(ncteisen): tune this
- if (delta != 0 && (delta <= -value / 5 || delta >= value / 5)) {
- return GRPC_CHTTP2_FLOWCTL_QUEUE_UPDATE;
- } else {
- return GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED;
- }
-}
-
-// Takes in a target and uses the pid controller to return a stabilized
-// guess at the new bdp.
-static double get_pid_controller_guess(grpc_exec_ctx* exec_ctx,
- grpc_chttp2_transport_flowctl* tfc,
- double target) {
- grpc_millis now = grpc_exec_ctx_now(exec_ctx);
- if (!tfc->pid_controller_initialized) {
- tfc->last_pid_update = now;
- tfc->pid_controller_initialized = true;
- tfc->pid_controller.Init(grpc_core::PidController::Args()
- .set_gain_p(4)
- .set_gain_i(8)
- .set_gain_d(0)
- .set_initial_control_value(target)
- .set_min_control_value(-1)
- .set_max_control_value(25)
- .set_integral_range(10));
- return pow(2, target);
+ (uint32_t)(max_recv_bytes - local_window_delta_);
+ local_window_delta_ += add_max_recv_bytes;
}
- double bdp_error = target - tfc->pid_controller->last_control_value();
- double dt = (double)(now - tfc->last_pid_update) * 1e-3;
- double log2_bdp_guess = tfc->pid_controller->Update(bdp_error, dt);
- tfc->last_pid_update = now;
- return pow(2, log2_bdp_guess);
}
// Take in a target and modifies it based on the memory pressure of the system
-static double get_target_under_memory_pressure(
- grpc_chttp2_transport_flowctl* tfc, double target) {
+static double AdjustForMemoryPressure(grpc_resource_quota* quota,
+ double target) {
// do not increase window under heavy memory pressure.
- double memory_pressure = grpc_resource_quota_get_memory_pressure(
- grpc_resource_user_quota(grpc_endpoint_get_resource_user(tfc->t->ep)));
+ double memory_pressure = grpc_resource_quota_get_memory_pressure(quota);
static const double kLowMemPressure = 0.1;
static const double kZeroTarget = 22;
static const double kHighMemPressure = 0.8;
@@ -436,75 +300,82 @@ static double get_target_under_memory_pressure(
return target;
}
-grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_action(
- grpc_exec_ctx* exec_ctx, grpc_chttp2_transport_flowctl* tfc,
- grpc_chttp2_stream_flowctl* sfc) {
- grpc_chttp2_flowctl_action action;
- memset(&action, 0, sizeof(action));
+double TransportFlowControl::TargetLogBdp() {
+ return AdjustForMemoryPressure(
+ grpc_resource_user_quota(grpc_endpoint_get_resource_user(t_->ep)),
+ 1 + log2(bdp_estimator_.EstimateBdp()));
+}
+
+double TransportFlowControl::SmoothLogBdp(grpc_exec_ctx* exec_ctx,
+ double value) {
+ grpc_millis now = grpc_exec_ctx_now(exec_ctx);
+ double bdp_error = value - pid_controller_.last_control_value();
+ const double dt = (double)(now - last_pid_update_) * 1e-3;
+ last_pid_update_ = now;
+ return pid_controller_.Update(bdp_error, dt);
+}
+
+FlowControlAction::Urgency TransportFlowControl::DeltaUrgency(
+ int32_t value, grpc_chttp2_setting_id setting_id) {
+ int64_t delta =
+ (int64_t)value - (int64_t)t_->settings[GRPC_LOCAL_SETTINGS][setting_id];
// TODO(ncteisen): tune this
- if (sfc != NULL && !sfc->s->read_closed) {
- uint32_t sent_init_window =
- tfc->t->settings[GRPC_SENT_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
- if ((int64_t)sfc->local_window_delta >
- (int64_t)sfc->announced_window_delta &&
- (int64_t)sfc->announced_window_delta + sent_init_window <=
- sent_init_window / 2) {
- action.send_stream_update = GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY;
- } else if (sfc->local_window_delta > sfc->announced_window_delta) {
- action.send_stream_update = GRPC_CHTTP2_FLOWCTL_QUEUE_UPDATE;
- }
+ if (delta != 0 && (delta <= -value / 5 || delta >= value / 5)) {
+ return FlowControlAction::Urgency::QUEUE_UPDATE;
+ } else {
+ return FlowControlAction::Urgency::NO_ACTION_NEEDED;
}
- if (tfc->enable_bdp_probe) {
+}
+
+FlowControlAction TransportFlowControl::PeriodicUpdate(
+ grpc_exec_ctx* exec_ctx) {
+ FlowControlAction action;
+ if (enable_bdp_probe_) {
// get bdp estimate and update initial_window accordingly.
- int64_t estimate = -1;
- if (tfc->bdp_estimator->EstimateBdp(&estimate)) {
- double target = 1 + log2((double)estimate);
-
- // target might change based on how much memory pressure we are under
- // TODO(ncteisen): experiment with setting target to be huge under low
- // memory pressure.
- target = get_target_under_memory_pressure(tfc, target);
-
- // run our target through the pid controller to stabilize change.
- // TODO(ncteisen): experiment with other controllers here.
- double bdp_guess = get_pid_controller_guess(exec_ctx, tfc, target);
-
- // Though initial window 'could' drop to 0, we keep the floor at 128
- tfc->target_initial_window_size =
- (int32_t)GPR_CLAMP(bdp_guess, 128, INT32_MAX);
-
- grpc_chttp2_flowctl_urgency init_window_update_urgency =
- delta_is_significant(tfc, tfc->target_initial_window_size,
- GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE);
- if (init_window_update_urgency != GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED) {
- action.send_setting_update = init_window_update_urgency;
- action.initial_window_size = (uint32_t)tfc->target_initial_window_size;
- }
- }
+ // target might change based on how much memory pressure we are under
+ // TODO(ncteisen): experiment with setting target to be huge under low
+ // memory pressure.
+ const double target = pow(2, SmoothLogBdp(exec_ctx, TargetLogBdp()));
+
+ // Though initial window 'could' drop to 0, we keep the floor at 128
+ target_initial_window_size_ = (int32_t)GPR_CLAMP(target, 128, INT32_MAX);
+
+ action.set_send_initial_window_update(
+ DeltaUrgency(target_initial_window_size_,
+ GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE),
+ target_initial_window_size_);
// get bandwidth estimate and update max_frame accordingly.
- double bw_dbl = -1;
- if (tfc->bdp_estimator->EstimateBandwidth(&bw_dbl)) {
- // we target the max of BDP or bandwidth in microseconds.
- int32_t frame_size = (int32_t)GPR_CLAMP(
- GPR_MAX((int32_t)GPR_CLAMP(bw_dbl, 0, INT_MAX) / 1000,
- tfc->target_initial_window_size),
- 16384, 16777215);
- grpc_chttp2_flowctl_urgency frame_size_urgency = delta_is_significant(
- tfc, frame_size, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE);
- if (frame_size_urgency != GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED) {
- if (frame_size_urgency > action.send_setting_update) {
- action.send_setting_update = frame_size_urgency;
- }
- action.max_frame_size = (uint32_t)frame_size;
- }
- }
+ double bw_dbl = bdp_estimator_.EstimateBandwidth();
+ // we target the max of BDP or bandwidth in microseconds.
+ int32_t frame_size = (int32_t)GPR_CLAMP(
+ GPR_MAX((int32_t)GPR_CLAMP(bw_dbl, 0, INT_MAX) / 1000,
+ target_initial_window_size_),
+ 16384, 16777215);
+ action.set_send_max_frame_size_update(
+ DeltaUrgency(frame_size, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE),
+ frame_size);
}
- uint32_t target_announced_window = grpc_chttp2_target_announced_window(tfc);
- if (tfc->announced_window < target_announced_window / 2) {
- action.send_transport_update = GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY;
+ return UpdateAction(action);
+}
+
+FlowControlAction StreamFlowControl::UpdateAction(FlowControlAction action) {
+ // TODO(ncteisen): tune this
+ if (!s_->read_closed) {
+ uint32_t sent_init_window =
+ tfc_->transport()->settings[GRPC_SENT_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
+ if (local_window_delta_ > announced_window_delta_ &&
+ announced_window_delta_ + sent_init_window <= sent_init_window / 2) {
+ action.set_send_stream_update(
+ FlowControlAction::Urgency::UPDATE_IMMEDIATELY);
+ } else if (local_window_delta_ > announced_window_delta_) {
+ action.set_send_stream_update(FlowControlAction::Urgency::QUEUE_UPDATE);
+ }
}
- TRACEACTION(tfc, action);
+
return action;
}
+
+} // namespace chttp2
+} // namespace grpc_core
diff --git a/src/core/ext/transport/chttp2/transport/flow_control.h b/src/core/ext/transport/chttp2/transport/flow_control.h
new file mode 100644
index 0000000000..7dd348ed5f
--- /dev/null
+++ b/src/core/ext/transport/chttp2/transport/flow_control.h
@@ -0,0 +1,336 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FLOW_CONTROL_H
+#define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FLOW_CONTROL_H
+
+#include <stdint.h>
+
+#include <grpc/support/useful.h>
+#include "src/core/ext/transport/chttp2/transport/http2_settings.h"
+#include "src/core/lib/support/manual_constructor.h"
+#include "src/core/lib/transport/bdp_estimator.h"
+#include "src/core/lib/transport/pid_controller.h"
+
+struct grpc_chttp2_transport;
+struct grpc_chttp2_stream;
+
+extern "C" grpc_tracer_flag grpc_flowctl_trace;
+
+namespace grpc {
+namespace testing {
+class TrickledCHTTP2; // to make this a friend
+} // namespace testing
+} // namespace grpc
+
+namespace grpc_core {
+namespace chttp2 {
+
+static constexpr uint32_t kDefaultWindow = 65535;
+
+class TransportFlowControl;
+class StreamFlowControl;
+
+class FlowControlAction {
+ public:
+ enum class Urgency : uint8_t {
+ // Nothing to be done.
+ NO_ACTION_NEEDED = 0,
+ // Initiate a write to update the initial window immediately.
+ UPDATE_IMMEDIATELY,
+ // Push the flow control update into a send buffer, to be sent
+ // out the next time a write is initiated.
+ QUEUE_UPDATE,
+ };
+
+ Urgency send_stream_update() const { return send_stream_update_; }
+ Urgency send_transport_update() const { return send_transport_update_; }
+ Urgency send_initial_window_update() const {
+ return send_initial_window_update_;
+ }
+ Urgency send_max_frame_size_update() const {
+ return send_max_frame_size_update_;
+ }
+ uint32_t initial_window_size() const { return initial_window_size_; }
+ uint32_t max_frame_size() const { return max_frame_size_; }
+
+ FlowControlAction& set_send_stream_update(Urgency u) {
+ send_stream_update_ = u;
+ return *this;
+ }
+ FlowControlAction& set_send_transport_update(Urgency u) {
+ send_transport_update_ = u;
+ return *this;
+ }
+ FlowControlAction& set_send_initial_window_update(Urgency u,
+ uint32_t update) {
+ send_initial_window_update_ = u;
+ initial_window_size_ = update;
+ return *this;
+ }
+ FlowControlAction& set_send_max_frame_size_update(Urgency u,
+ uint32_t update) {
+ send_max_frame_size_update_ = u;
+ max_frame_size_ = update;
+ return *this;
+ }
+
+ static const char* UrgencyString(Urgency u);
+ void Trace(grpc_chttp2_transport* t) const;
+
+ private:
+ Urgency send_stream_update_ = Urgency::NO_ACTION_NEEDED;
+ Urgency send_transport_update_ = Urgency::NO_ACTION_NEEDED;
+ Urgency send_initial_window_update_ = Urgency::NO_ACTION_NEEDED;
+ Urgency send_max_frame_size_update_ = Urgency::NO_ACTION_NEEDED;
+ uint32_t initial_window_size_ = 0;
+ uint32_t max_frame_size_ = 0;
+};
+
+class FlowControlTrace {
+ public:
+ FlowControlTrace(const char* reason, TransportFlowControl* tfc,
+ StreamFlowControl* sfc) {
+ if (enabled_) Init(reason, tfc, sfc);
+ }
+
+ ~FlowControlTrace() {
+ if (enabled_) Finish();
+ }
+
+ private:
+ void Init(const char* reason, TransportFlowControl* tfc,
+ StreamFlowControl* sfc);
+ void Finish();
+
+ const bool enabled_ = GRPC_TRACER_ON(grpc_flowctl_trace);
+
+ TransportFlowControl* tfc_;
+ StreamFlowControl* sfc_;
+ const char* reason_;
+ int64_t remote_window_;
+ int64_t target_window_;
+ int64_t announced_window_;
+ int64_t remote_window_delta_;
+ int64_t local_window_delta_;
+ int64_t announced_window_delta_;
+};
+
+class TransportFlowControl {
+ public:
+ TransportFlowControl(grpc_exec_ctx* exec_ctx, const grpc_chttp2_transport* t,
+ bool enable_bdp_probe);
+ ~TransportFlowControl() {}
+
+ bool bdp_probe() const { return enable_bdp_probe_; }
+
+ // returns an announce if we should send a transport update to our peer,
+ // else returns zero; writing_anyway indicates if a write would happen
+ // regardless of the send - if it is false and this function returns non-zero,
+ // this announce will cause a write to occur
+ uint32_t MaybeSendUpdate(bool writing_anyway);
+
+ // Reads the flow control data and returns and actionable struct that will
+ // tell chttp2 exactly what it needs to do
+ FlowControlAction MakeAction() { return UpdateAction(FlowControlAction()); }
+
+ // Call periodically (at a low-ish rate, 100ms - 10s makes sense)
+ // to perform more complex flow control calculations and return an action
+ // to let chttp2 change its parameters
+ FlowControlAction PeriodicUpdate(grpc_exec_ctx* exec_ctx);
+
+ void StreamSentData(int64_t size) { remote_window_ -= size; }
+
+ grpc_error* ValidateRecvData(int64_t incoming_frame_size);
+ void CommitRecvData(int64_t incoming_frame_size) {
+ announced_window_ -= incoming_frame_size;
+ }
+
+ grpc_error* RecvData(int64_t incoming_frame_size) {
+ FlowControlTrace trace(" data recv", this, nullptr);
+ grpc_error* error = ValidateRecvData(incoming_frame_size);
+ if (error != GRPC_ERROR_NONE) return error;
+ CommitRecvData(incoming_frame_size);
+ return GRPC_ERROR_NONE;
+ }
+
+ // we have received a WINDOW_UPDATE frame for a transport
+ void RecvUpdate(uint32_t size) {
+ FlowControlTrace trace("t updt recv", this, nullptr);
+ remote_window_ += size;
+ }
+
+ int64_t remote_window() const { return remote_window_; }
+ int64_t target_window() const {
+ return (uint32_t)GPR_MIN((int64_t)((1u << 31) - 1),
+ announced_stream_total_over_incoming_window_ +
+ target_initial_window_size_);
+ }
+ int64_t announced_window() const { return announced_window_; }
+
+ const grpc_chttp2_transport* transport() const { return t_; }
+
+ void PreUpdateAnnouncedWindowOverIncomingWindow(int64_t delta) {
+ if (delta > 0) {
+ announced_stream_total_over_incoming_window_ -= delta;
+ } else {
+ announced_stream_total_under_incoming_window_ += -delta;
+ }
+ }
+
+ void PostUpdateAnnouncedWindowOverIncomingWindow(int64_t delta) {
+ if (delta > 0) {
+ announced_stream_total_over_incoming_window_ += delta;
+ } else {
+ announced_stream_total_under_incoming_window_ -= -delta;
+ }
+ }
+
+ BdpEstimator* bdp_estimator() { return &bdp_estimator_; }
+
+ void TestOnlyForceHugeWindow() {
+ announced_window_ = 1024 * 1024 * 1024;
+ remote_window_ = 1024 * 1024 * 1024;
+ }
+
+ private:
+ friend class ::grpc::testing::TrickledCHTTP2;
+ double TargetLogBdp();
+ double SmoothLogBdp(grpc_exec_ctx* exec_ctx, double value);
+ FlowControlAction::Urgency DeltaUrgency(int32_t value,
+ grpc_chttp2_setting_id setting_id);
+
+ FlowControlAction UpdateAction(FlowControlAction action) {
+ if (announced_window_ < target_window() / 2) {
+ action.set_send_transport_update(
+ FlowControlAction::Urgency::UPDATE_IMMEDIATELY);
+ }
+ return action;
+ }
+
+ const grpc_chttp2_transport* const t_;
+
+ /** Our bookkeeping for the remote peer's available window */
+ int64_t remote_window_ = kDefaultWindow;
+
+ /** calculating what we should give for local window:
+ we track the total amount of flow control over initial window size
+ across all streams: this is data that we want to receive right now (it
+ has an outstanding read)
+ and the total amount of flow control under initial window size across all
+ streams: this is data we've read early
+ we want to adjust incoming_window such that:
+ incoming_window = total_over - max(bdp - total_under, 0) */
+ int64_t announced_stream_total_over_incoming_window_ = 0;
+ int64_t announced_stream_total_under_incoming_window_ = 0;
+
+ /** This is out window according to what we have sent to our remote peer. The
+ * difference between this and target window is what we use to decide when
+ * to send WINDOW_UPDATE frames. */
+ int64_t announced_window_ = kDefaultWindow;
+
+ int32_t target_initial_window_size_ = kDefaultWindow;
+
+ /** should we probe bdp? */
+ const bool enable_bdp_probe_;
+
+ /* bdp estimation */
+ grpc_core::BdpEstimator bdp_estimator_;
+
+ /* pid controller */
+ grpc_core::PidController pid_controller_;
+ grpc_millis last_pid_update_ = 0;
+};
+
+class StreamFlowControl {
+ public:
+ StreamFlowControl(TransportFlowControl* tfc, const grpc_chttp2_stream* s);
+ ~StreamFlowControl() {
+ tfc_->PreUpdateAnnouncedWindowOverIncomingWindow(announced_window_delta_);
+ }
+
+ FlowControlAction UpdateAction(FlowControlAction action);
+ FlowControlAction MakeAction() { return UpdateAction(tfc_->MakeAction()); }
+
+ // we have sent data on the wire, we must track this in our bookkeeping for
+ // the remote peer's flow control.
+ void SentData(int64_t outgoing_frame_size) {
+ FlowControlTrace tracer(" data sent", tfc_, this);
+ tfc_->StreamSentData(outgoing_frame_size);
+ remote_window_delta_ -= outgoing_frame_size;
+ }
+
+ // we have received data from the wire
+ grpc_error* RecvData(int64_t incoming_frame_size);
+
+ // returns an announce if we should send a stream update to our peer, else
+ // returns zero
+ uint32_t MaybeSendUpdate();
+
+ // we have received a WINDOW_UPDATE frame for a stream
+ void RecvUpdate(uint32_t size) {
+ FlowControlTrace trace("s updt recv", tfc_, this);
+ remote_window_delta_ += size;
+ }
+
+ // the application is asking for a certain amount of bytes
+ void IncomingByteStreamUpdate(size_t max_size_hint, size_t have_already);
+
+ int64_t remote_window_delta() const { return remote_window_delta_; }
+ int64_t local_window_delta() const { return local_window_delta_; }
+ int64_t announced_window_delta() const { return announced_window_delta_; }
+
+ const grpc_chttp2_stream* stream() const { return s_; }
+
+ void TestOnlyForceHugeWindow() {
+ announced_window_delta_ = 1024 * 1024 * 1024;
+ local_window_delta_ = 1024 * 1024 * 1024;
+ remote_window_delta_ = 1024 * 1024 * 1024;
+ }
+
+ private:
+ friend class ::grpc::testing::TrickledCHTTP2;
+ TransportFlowControl* const tfc_;
+ const grpc_chttp2_stream* const s_;
+
+ void UpdateAnnouncedWindowDelta(TransportFlowControl* tfc, int64_t change) {
+ tfc->PreUpdateAnnouncedWindowOverIncomingWindow(announced_window_delta_);
+ announced_window_delta_ += change;
+ tfc->PostUpdateAnnouncedWindowOverIncomingWindow(announced_window_delta_);
+ }
+
+ /** window available for us to send to peer, over or under the initial
+ * window
+ * size of the transport... ie:
+ * remote_window = remote_window_delta + transport.initial_window_size */
+ int64_t remote_window_delta_ = 0;
+
+ /** window available for peer to send to us (as a delta on
+ * transport.initial_window_size)
+ * local_window = local_window_delta + transport.initial_window_size */
+ int64_t local_window_delta_ = 0;
+
+ /** window available for peer to send to us over this stream that we have
+ * announced to the peer */
+ int64_t announced_window_delta_ = 0;
+};
+
+} // namespace chttp2
+} // namespace grpc_core
+
+#endif
diff --git a/src/core/ext/transport/chttp2/transport/frame_settings.cc b/src/core/ext/transport/chttp2/transport/frame_settings.cc
index 2995bf7310..db0245bb57 100644
--- a/src/core/ext/transport/chttp2/transport/frame_settings.cc
+++ b/src/core/ext/transport/chttp2/transport/frame_settings.cc
@@ -202,13 +202,13 @@ grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx, void *p,
}
if (id == GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE &&
parser->incoming_settings[id] != parser->value) {
- t->flow_control.initial_window_update +=
+ t->initial_window_update +=
(int64_t)parser->value - parser->incoming_settings[id];
if (GRPC_TRACER_ON(grpc_http_trace) ||
GRPC_TRACER_ON(grpc_flowctl_trace)) {
gpr_log(GPR_DEBUG, "%p[%s] adding %d for initial_window change",
t, t->is_client ? "cli" : "svr",
- (int)t->flow_control.initial_window_update);
+ (int)t->initial_window_update);
}
}
parser->incoming_settings[id] = parser->value;
diff --git a/src/core/ext/transport/chttp2/transport/frame_window_update.cc b/src/core/ext/transport/chttp2/transport/frame_window_update.cc
index c9ab8d1b50..15eaf59285 100644
--- a/src/core/ext/transport/chttp2/transport/frame_window_update.cc
+++ b/src/core/ext/transport/chttp2/transport/frame_window_update.cc
@@ -96,8 +96,7 @@ grpc_error *grpc_chttp2_window_update_parser_parse(
if (t->incoming_stream_id != 0) {
if (s != NULL) {
- grpc_chttp2_flowctl_recv_stream_update(
- &t->flow_control, &s->flow_control, received_update);
+ s->flow_control->RecvUpdate(received_update);
if (grpc_chttp2_list_remove_stalled_by_stream(t, s)) {
grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
grpc_chttp2_initiate_write(
@@ -106,10 +105,9 @@ grpc_error *grpc_chttp2_window_update_parser_parse(
}
}
} else {
- bool was_zero = t->flow_control.remote_window <= 0;
- grpc_chttp2_flowctl_recv_transport_update(&t->flow_control,
- received_update);
- bool is_zero = t->flow_control.remote_window <= 0;
+ bool was_zero = t->flow_control->remote_window() <= 0;
+ t->flow_control->RecvUpdate(received_update);
+ bool is_zero = t->flow_control->remote_window() <= 0;
if (was_zero && !is_zero) {
grpc_chttp2_initiate_write(
exec_ctx, t,
diff --git a/src/core/ext/transport/chttp2/transport/hpack_parser.cc b/src/core/ext/transport/chttp2/transport/hpack_parser.cc
index 3d1df19bc3..7c17229122 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_parser.cc
+++ b/src/core/ext/transport/chttp2/transport/hpack_parser.cc
@@ -33,6 +33,7 @@
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
+#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/transport/http2_errors.h"
@@ -650,9 +651,14 @@ static const uint8_t inverse_base64[256] = {
/* emission helpers */
static grpc_error *on_hdr(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p,
grpc_mdelem md, int add_to_table) {
- if (GRPC_TRACER_ON(grpc_http_trace) && !GRPC_MDELEM_IS_INTERNED(md)) {
+ if (GRPC_TRACER_ON(grpc_http_trace)) {
char *k = grpc_slice_to_c_string(GRPC_MDKEY(md));
- char *v = grpc_slice_to_c_string(GRPC_MDVALUE(md));
+ char *v = NULL;
+ if (grpc_is_binary_header(GRPC_MDKEY(md))) {
+ v = grpc_dump_slice(GRPC_MDVALUE(md), GPR_DUMP_HEX);
+ } else {
+ v = grpc_slice_to_c_string(GRPC_MDVALUE(md));
+ }
gpr_log(
GPR_DEBUG,
"Decode: '%s: %s', elem_interned=%d [%d], k_interned=%d, v_interned=%d",
diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h
index c75f813393..9e0796e820 100644
--- a/src/core/ext/transport/chttp2/transport/internal.h
+++ b/src/core/ext/transport/chttp2/transport/internal.h
@@ -22,6 +22,7 @@
#include <assert.h>
#include <stdbool.h>
+#include "src/core/ext/transport/chttp2/transport/flow_control.h"
#include "src/core/ext/transport/chttp2/transport/frame.h"
#include "src/core/ext/transport/chttp2/transport/frame_data.h"
#include "src/core/ext/transport/chttp2/transport/frame_goaway.h"
@@ -38,9 +39,7 @@
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/support/manual_constructor.h"
-#include "src/core/lib/transport/bdp_estimator.h"
#include "src/core/lib/transport/connectivity_state.h"
-#include "src/core/lib/transport/pid_controller.h"
#include "src/core/lib/transport/transport_impl.h"
#ifdef __cplusplus
@@ -238,48 +237,6 @@ typedef enum {
GRPC_CHTTP2_KEEPALIVE_STATE_DISABLED,
} grpc_chttp2_keepalive_state;
-typedef struct {
- /** initial window change. This is tracked as we parse settings frames from
- * the remote peer. If there is a positive delta, then we will make all
- * streams readable since they may have become unstalled */
- int64_t initial_window_update;
-
- /** Our bookkeeping for the remote peer's available window */
- int64_t remote_window;
-
- /** calculating what we should give for local window:
- we track the total amount of flow control over initial window size
- across all streams: this is data that we want to receive right now (it
- has an outstanding read)
- and the total amount of flow control under initial window size across all
- streams: this is data we've read early
- we want to adjust incoming_window such that:
- incoming_window = total_over - max(bdp - total_under, 0) */
- int64_t announced_stream_total_over_incoming_window;
- int64_t announced_stream_total_under_incoming_window;
-
- /** This is out window according to what we have sent to our remote peer. The
- * difference between this and target window is what we use to decide when
- * to send WINDOW_UPDATE frames. */
- int64_t announced_window;
-
- int32_t target_initial_window_size;
-
- /** should we probe bdp? */
- bool enable_bdp_probe;
-
- /* bdp estimation */
- grpc_core::ManualConstructor<grpc_core::BdpEstimator> bdp_estimator;
-
- /* pid controller */
- bool pid_controller_initialized;
- grpc_core::ManualConstructor<grpc_core::PidController> pid_controller;
- grpc_millis last_pid_update;
-
- // pointer back to transport for tracing
- const grpc_chttp2_transport *t;
-} grpc_chttp2_transport_flowctl;
-
struct grpc_chttp2_transport {
grpc_transport base; /* must be first */
gpr_refcount refs;
@@ -395,7 +352,12 @@ struct grpc_chttp2_transport {
/** parser for goaway frames */
grpc_chttp2_goaway_parser goaway_parser;
- grpc_chttp2_transport_flowctl flow_control;
+ grpc_core::ManualConstructor<grpc_core::chttp2::TransportFlowControl>
+ flow_control;
+ /** initial window change. This is tracked as we parse settings frames from
+ * the remote peer. If there is a positive delta, then we will make all
+ * streams readable since they may have become unstalled */
+ int64_t initial_window_update = 0;
/* deframing */
grpc_chttp2_deframe_transport_state deframe_state;
@@ -477,25 +439,6 @@ typedef enum {
GPRC_METADATA_PUBLISHED_AT_CLOSE
} grpc_published_metadata_method;
-typedef struct {
- /** window available for us to send to peer, over or under the initial window
- * size of the transport... ie:
- * remote_window = remote_window_delta + transport.initial_window_size */
- int64_t remote_window_delta;
-
- /** window available for peer to send to us (as a delta on
- * transport.initial_window_size)
- * local_window = local_window_delta + transport.initial_window_size */
- int64_t local_window_delta;
-
- /** window available for peer to send to us over this stream that we have
- * announced to the peer */
- int64_t announced_window_delta;
-
- // read only pointer back to stream for data
- const grpc_chttp2_stream *s;
-} grpc_chttp2_stream_flowctl;
-
struct grpc_chttp2_stream {
grpc_chttp2_transport *t;
grpc_stream_refcount *refcount;
@@ -589,7 +532,8 @@ struct grpc_chttp2_stream {
bool sent_initial_metadata;
bool sent_trailing_metadata;
- grpc_chttp2_stream_flowctl flow_control;
+ grpc_core::ManualConstructor<grpc_core::chttp2::StreamFlowControl>
+ flow_control;
grpc_slice_buffer flow_controlled_buffer;
@@ -700,73 +644,10 @@ bool grpc_chttp2_list_remove_stalled_by_stream(grpc_chttp2_transport *t,
/********* Flow Control ***************/
-// we have sent data on the wire
-void grpc_chttp2_flowctl_sent_data(grpc_chttp2_transport_flowctl *tfc,
- grpc_chttp2_stream_flowctl *sfc,
- int64_t size);
-
-// we have received data from the wire
-grpc_error *grpc_chttp2_flowctl_recv_data(grpc_chttp2_transport_flowctl *tfc,
- grpc_chttp2_stream_flowctl *sfc,
- int64_t incoming_frame_size);
-
-// returns an announce if we should send a transport update to our peer,
-// else returns zero
-uint32_t grpc_chttp2_flowctl_maybe_send_transport_update(
- grpc_chttp2_transport_flowctl *tfc, bool writing_anyway);
-
-// returns an announce if we should send a stream update to our peer, else
-// returns zero
-uint32_t grpc_chttp2_flowctl_maybe_send_stream_update(
- grpc_chttp2_transport_flowctl *tfc, grpc_chttp2_stream_flowctl *sfc);
-
-// we have received a WINDOW_UPDATE frame for a transport
-void grpc_chttp2_flowctl_recv_transport_update(
- grpc_chttp2_transport_flowctl *tfc, uint32_t size);
-
-// we have received a WINDOW_UPDATE frame for a stream
-void grpc_chttp2_flowctl_recv_stream_update(grpc_chttp2_transport_flowctl *tfc,
- grpc_chttp2_stream_flowctl *sfc,
- uint32_t size);
-
-// the application is asking for a certain amount of bytes
-void grpc_chttp2_flowctl_incoming_bs_update(grpc_chttp2_transport_flowctl *tfc,
- grpc_chttp2_stream_flowctl *sfc,
- size_t max_size_hint,
- size_t have_already);
-
-void grpc_chttp2_flowctl_destroy_stream(grpc_chttp2_transport_flowctl *tfc,
- grpc_chttp2_stream_flowctl *sfc);
-
-typedef enum {
- // Nothing to be done.
- GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED = 0,
- // Initiate a write to update the initial window immediately.
- GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY,
- // Push the flow control update into a send buffer, to be sent
- // out the next time a write is initiated.
- GRPC_CHTTP2_FLOWCTL_QUEUE_UPDATE,
-} grpc_chttp2_flowctl_urgency;
-
-typedef struct {
- grpc_chttp2_flowctl_urgency send_stream_update;
- grpc_chttp2_flowctl_urgency send_transport_update;
- grpc_chttp2_flowctl_urgency send_setting_update;
- uint32_t initial_window_size;
- uint32_t max_frame_size;
-} grpc_chttp2_flowctl_action;
-
-// Reads the flow control data and returns and actionable struct that will tell
-// chttp2 exactly what it needs to do
-grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_action(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_flowctl *tfc,
- grpc_chttp2_stream_flowctl *sfc);
-
// Takes in a flow control action and performs all the needed operations.
-void grpc_chttp2_act_on_flowctl_action(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_flowctl_action action,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s);
+void grpc_chttp2_act_on_flowctl_action(
+ grpc_exec_ctx *exec_ctx, const grpc_core::chttp2::FlowControlAction &action,
+ grpc_chttp2_transport *t, grpc_chttp2_stream *s);
/********* End of Flow Control ***************/
@@ -800,16 +681,6 @@ void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
extern grpc_tracer_flag grpc_http_trace;
extern grpc_tracer_flag grpc_flowctl_trace;
-#ifndef NDEBUG
-#define GRPC_FLOW_CONTROL_IF_TRACING(stmt) \
- if (!(GRPC_TRACER_ON(grpc_flowctl_trace))) \
- ; \
- else \
- stmt
-#else
-#define GRPC_FLOW_CONTROL_IF_TRACING(stmt)
-#endif
-
#define GRPC_CHTTP2_IF_TRACING(stmt) \
if (!(GRPC_TRACER_ON(grpc_http_trace))) \
; \
diff --git a/src/core/ext/transport/chttp2/transport/parsing.cc b/src/core/ext/transport/chttp2/transport/parsing.cc
index 78886b497a..efa5791d3f 100644
--- a/src/core/ext/transport/chttp2/transport/parsing.cc
+++ b/src/core/ext/transport/chttp2/transport/parsing.cc
@@ -355,14 +355,15 @@ static grpc_error *init_data_frame_parser(grpc_exec_ctx *exec_ctx,
grpc_chttp2_stream *s =
grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id);
grpc_error *err = GRPC_ERROR_NONE;
- err = grpc_chttp2_flowctl_recv_data(&t->flow_control,
- s == NULL ? NULL : &s->flow_control,
- t->incoming_frame_size);
- grpc_chttp2_act_on_flowctl_action(
- exec_ctx,
- grpc_chttp2_flowctl_get_action(exec_ctx, &t->flow_control,
- s == NULL ? NULL : &s->flow_control),
- t, s);
+ grpc_core::chttp2::FlowControlAction action;
+ if (s == nullptr) {
+ err = t->flow_control->RecvData(t->incoming_frame_size);
+ action = t->flow_control->MakeAction();
+ } else {
+ err = s->flow_control->RecvData(t->incoming_frame_size);
+ action = s->flow_control->MakeAction();
+ }
+ grpc_chttp2_act_on_flowctl_action(exec_ctx, action, t, s);
if (err != GRPC_ERROR_NONE) {
goto error_handler;
}
@@ -434,19 +435,21 @@ static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp,
grpc_millis *cached_timeout =
static_cast<grpc_millis *>(grpc_mdelem_get_user_data(md, free_timeout));
grpc_millis timeout;
- if (cached_timeout == NULL) {
- /* not already parsed: parse it now, and store the result away */
- cached_timeout = (grpc_millis *)gpr_malloc(sizeof(grpc_millis));
- if (!grpc_http2_decode_timeout(GRPC_MDVALUE(md), cached_timeout)) {
+ if (cached_timeout != NULL) {
+ timeout = *cached_timeout;
+ } else {
+ if (!grpc_http2_decode_timeout(GRPC_MDVALUE(md), &timeout)) {
char *val = grpc_slice_to_c_string(GRPC_MDVALUE(md));
gpr_log(GPR_ERROR, "Ignoring bad timeout value '%s'", val);
gpr_free(val);
- *cached_timeout = GRPC_MILLIS_INF_FUTURE;
+ timeout = GRPC_MILLIS_INF_FUTURE;
+ }
+ if (GRPC_MDELEM_IS_INTERNED(md)) {
+ /* store the result */
+ cached_timeout = (grpc_millis *)gpr_malloc(sizeof(grpc_millis));
+ *cached_timeout = timeout;
+ grpc_mdelem_set_user_data(md, free_timeout, cached_timeout);
}
- timeout = *cached_timeout;
- grpc_mdelem_set_user_data(md, free_timeout, cached_timeout);
- } else {
- timeout = *cached_timeout;
}
if (timeout != GRPC_MILLIS_INF_FUTURE) {
grpc_chttp2_incoming_metadata_buffer_set_deadline(
diff --git a/src/core/ext/transport/chttp2/transport/writing.cc b/src/core/ext/transport/chttp2/transport/writing.cc
index c6fecf2ee9..ff76a5fcdb 100644
--- a/src/core/ext/transport/chttp2/transport/writing.cc
+++ b/src/core/ext/transport/chttp2/transport/writing.cc
@@ -146,13 +146,13 @@ static void report_stall(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
s->flow_controlled_bytes_flowed,
t->settings[GRPC_ACKED_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE],
- t->flow_control.remote_window,
+ t->flow_control->remote_window(),
(uint32_t)GPR_MAX(
0,
- s->flow_control.remote_window_delta +
+ s->flow_control->remote_window_delta() +
(int64_t)t->settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]),
- s->flow_control.remote_window_delta);
+ s->flow_control->remote_window_delta());
}
static bool stream_ref_if_not_destroyed(gpr_refcount *r) {
@@ -216,8 +216,7 @@ class WriteContext {
void FlushWindowUpdates(grpc_exec_ctx *exec_ctx) {
uint32_t transport_announce =
- grpc_chttp2_flowctl_maybe_send_transport_update(&t_->flow_control,
- t_->outbuf.count > 0);
+ t_->flow_control->MaybeSendUpdate(t_->outbuf.count > 0);
if (transport_announce) {
grpc_transport_one_way_stats throwaway_stats;
grpc_slice_buffer_add(
@@ -312,7 +311,7 @@ class DataSendContext {
uint32_t stream_remote_window() const {
return (uint32_t)GPR_MAX(
- 0, s_->flow_control.remote_window_delta +
+ 0, s_->flow_control->remote_window_delta() +
(int64_t)t_->settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]);
}
@@ -320,7 +319,7 @@ class DataSendContext {
uint32_t max_outgoing() const {
return (uint32_t)GPR_MIN(
t_->settings[GRPC_PEER_SETTINGS][GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
- GPR_MIN(stream_remote_window(), t_->flow_control.remote_window));
+ GPR_MIN(stream_remote_window(), t_->flow_control->remote_window()));
}
bool AnyOutgoing() const { return max_outgoing() != 0; }
@@ -352,8 +351,7 @@ class DataSendContext {
grpc_metadata_batch_is_empty(s_->send_trailing_metadata);
grpc_chttp2_encode_data(s_->id, &s_->compressed_data_buffer, send_bytes,
is_last_frame_, &s_->stats.outgoing, &t_->outbuf);
- grpc_chttp2_flowctl_sent_data(&t_->flow_control, &s_->flow_control,
- send_bytes);
+ s_->flow_control->SentData(send_bytes);
if (s_->compressed_data_buffer.length == 0) {
s_->sending_bytes += s_->uncompressed_data_size;
}
@@ -400,8 +398,8 @@ class StreamWriteContext {
gpr_log(GPR_DEBUG, "W:%p %s[%d] im-(sent,send)=(%d,%d) announce=%d", t_,
t_->is_client ? "CLIENT" : "SERVER", s->id,
s->sent_initial_metadata, s->send_initial_metadata != NULL,
- (int)(s->flow_control.local_window_delta -
- s->flow_control.announced_window_delta)));
+ (int)(s->flow_control->local_window_delta() -
+ s->flow_control->announced_window_delta())));
}
void FlushInitialMetadata(grpc_exec_ctx *exec_ctx) {
@@ -447,8 +445,7 @@ class StreamWriteContext {
void FlushWindowUpdates(grpc_exec_ctx *exec_ctx) {
/* send any window updates */
- uint32_t stream_announce = grpc_chttp2_flowctl_maybe_send_stream_update(
- &t_->flow_control, &s_->flow_control);
+ const uint32_t stream_announce = s_->flow_control->MaybeSendUpdate();
if (stream_announce == 0) return;
grpc_slice_buffer_add(
@@ -469,10 +466,10 @@ class StreamWriteContext {
DataSendContext data_send_context(write_context_, t_, s_);
if (!data_send_context.AnyOutgoing()) {
- if (t_->flow_control.remote_window == 0) {
+ if (t_->flow_control->remote_window() <= 0) {
report_stall(t_, s_, "transport");
grpc_chttp2_list_add_stalled_by_transport(t_, s_);
- } else if (data_send_context.stream_remote_window() == 0) {
+ } else if (data_send_context.stream_remote_window() <= 0) {
report_stall(t_, s_, "stream");
grpc_chttp2_list_add_stalled_by_stream(t_, s_);
}
@@ -588,7 +585,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
ctx.FlushQueuedBuffers(exec_ctx);
ctx.EnactHpackSettings(exec_ctx);
- if (t->flow_control.remote_window > 0) {
+ if (t->flow_control->remote_window() > 0) {
ctx.UpdateStreamsNoLongerStalled();
}
diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.cc b/src/core/ext/transport/cronet/transport/cronet_transport.cc
index ff1367fb28..97e4f7d72b 100644
--- a/src/core/ext/transport/cronet/transport/cronet_transport.cc
+++ b/src/core/ext/transport/cronet/transport/cronet_transport.cc
@@ -692,7 +692,7 @@ static void create_grpc_frame(grpc_exec_ctx *exec_ctx,
uint8_t *p = (uint8_t *)write_buffer;
/* Append 5 byte header */
/* Compressed flag */
- *p++ = (flags & GRPC_WRITE_INTERNAL_COMPRESS) ? 1 : 0;
+ *p++ = (uint8_t)((flags & GRPC_WRITE_INTERNAL_COMPRESS) ? 1 : 0);
/* Message length */
*p++ = (uint8_t)(length >> 24);
*p++ = (uint8_t)(length >> 16);
diff --git a/src/core/ext/transport/inproc/inproc_transport.cc b/src/core/ext/transport/inproc/inproc_transport.cc
index 67a8358927..1551f5e988 100644
--- a/src/core/ext/transport/inproc/inproc_transport.cc
+++ b/src/core/ext/transport/inproc/inproc_transport.cc
@@ -623,7 +623,7 @@ static void op_state_machine(grpc_exec_ctx *exec_ctx, void *arg,
fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(new_err));
goto done;
} else {
- if (other && !other->closed) {
+ if (!other || !other->closed) {
fill_in_metadata(exec_ctx, s,
s->send_trailing_md_op->payload->send_trailing_metadata
.send_trailing_metadata,
@@ -925,7 +925,7 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
INPROC_LOG(GPR_DEBUG, "Extra initial metadata %p", s);
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Extra initial metadata");
} else {
- if (!other->closed) {
+ if (!other || !other->closed) {
fill_in_metadata(
exec_ctx, s,
op->payload->send_initial_metadata.send_initial_metadata,
@@ -976,7 +976,7 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
(other->recv_trailing_md_op != NULL))) ||
(op->send_trailing_metadata && !op->send_message) ||
(op->recv_initial_metadata && s->to_read_initial_md_filled) ||
- (op->recv_message && (other && other->send_message_op != NULL)) ||
+ (op->recv_message && other && (other->send_message_op != NULL)) ||
(s->to_read_trailing_md_filled || s->trailing_md_recvd)) {
if (!s->op_closure_scheduled) {
GRPC_CLOSURE_SCHED(exec_ctx, &s->op_closure, GRPC_ERROR_NONE);