aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/lib/surface
diff options
context:
space:
mode:
authorGravatar Vijay Pai <vpai@google.com>2016-03-28 11:09:21 -0700
committerGravatar Vijay Pai <vpai@google.com>2016-03-28 11:09:21 -0700
commitd0facacece41cb4ae887cb565d8d8447249d4adb (patch)
treef90fa102c47dd9972f15258a1af3ae0e6e6c3e60 /src/core/lib/surface
parent761f7bc3e4cb87af5521eae8803d27dbee64c958 (diff)
parente5cc05b5c6cee7026a0d28d39925621451506820 (diff)
Merge branch 'master' into make_unix_sockets_great_again
Diffstat (limited to 'src/core/lib/surface')
-rw-r--r--src/core/lib/surface/alarm.c84
-rw-r--r--src/core/lib/surface/api_trace.c36
-rw-r--r--src/core/lib/surface/api_trace.h65
-rw-r--r--src/core/lib/surface/byte_buffer.c97
-rw-r--r--src/core/lib/surface/byte_buffer_reader.c123
-rw-r--r--src/core/lib/surface/call.c1491
-rw-r--r--src/core/lib/surface/call.h116
-rw-r--r--src/core/lib/surface/call_details.c50
-rw-r--r--src/core/lib/surface/call_log_batch.c118
-rw-r--r--src/core/lib/surface/call_test_only.h64
-rw-r--r--src/core/lib/surface/channel.c324
-rw-r--r--src/core/lib/surface/channel.h75
-rw-r--r--src/core/lib/surface/channel_connectivity.c207
-rw-r--r--src/core/lib/surface/channel_create.c223
-rw-r--r--src/core/lib/surface/channel_init.c146
-rw-r--r--src/core/lib/surface/channel_init.h86
-rw-r--r--src/core/lib/surface/channel_ping.c79
-rw-r--r--src/core/lib/surface/channel_stack_type.c54
-rw-r--r--src/core/lib/surface/channel_stack_type.h58
-rw-r--r--src/core/lib/surface/completion_queue.c512
-rw-r--r--src/core/lib/surface/completion_queue.h91
-rw-r--r--src/core/lib/surface/event_string.c81
-rw-r--r--src/core/lib/surface/event_string.h42
-rw-r--r--src/core/lib/surface/init.c239
-rw-r--r--src/core/lib/surface/init.h41
-rw-r--r--src/core/lib/surface/init_secure.c89
-rw-r--r--src/core/lib/surface/init_unsecure.c38
-rw-r--r--src/core/lib/surface/lame_client.c155
-rw-r--r--src/core/lib/surface/lame_client.h41
-rw-r--r--src/core/lib/surface/metadata_array.c49
-rw-r--r--src/core/lib/surface/secure_channel_create.c319
-rw-r--r--src/core/lib/surface/server.c1319
-rw-r--r--src/core/lib/surface/server.h62
-rw-r--r--src/core/lib/surface/server_chttp2.c146
-rw-r--r--src/core/lib/surface/surface_trace.h48
-rw-r--r--src/core/lib/surface/validate_metadata.c73
-rw-r--r--src/core/lib/surface/version.c39
37 files changed, 6880 insertions, 0 deletions
diff --git a/src/core/lib/surface/alarm.c b/src/core/lib/surface/alarm.c
new file mode 100644
index 0000000000..368683378e
--- /dev/null
+++ b/src/core/lib/surface/alarm.c
@@ -0,0 +1,84 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
+#include "src/core/lib/iomgr/timer.h"
+#include "src/core/lib/surface/completion_queue.h"
+
+struct grpc_alarm {
+ grpc_timer alarm;
+ grpc_cq_completion completion;
+ /** completion queue where events about this alarm will be posted */
+ grpc_completion_queue *cq;
+ /** user supplied tag */
+ void *tag;
+};
+
+static void do_nothing_end_completion(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_cq_completion *c) {}
+
+static void alarm_cb(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
+ grpc_alarm *alarm = arg;
+ grpc_cq_end_op(exec_ctx, alarm->cq, alarm->tag, success,
+ do_nothing_end_completion, NULL, &alarm->completion);
+}
+
+grpc_alarm *grpc_alarm_create(grpc_completion_queue *cq, gpr_timespec deadline,
+ void *tag) {
+ grpc_alarm *alarm = gpr_malloc(sizeof(grpc_alarm));
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
+ GRPC_CQ_INTERNAL_REF(cq, "alarm");
+ alarm->cq = cq;
+ alarm->tag = tag;
+
+ grpc_cq_begin_op(cq, tag);
+ grpc_timer_init(&exec_ctx, &alarm->alarm,
+ gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
+ alarm_cb, alarm, gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_exec_ctx_finish(&exec_ctx);
+ return alarm;
+}
+
+void grpc_alarm_cancel(grpc_alarm *alarm) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_timer_cancel(&exec_ctx, &alarm->alarm);
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+void grpc_alarm_destroy(grpc_alarm *alarm) {
+ grpc_alarm_cancel(alarm);
+ GRPC_CQ_INTERNAL_UNREF(alarm->cq, "alarm");
+ gpr_free(alarm);
+}
diff --git a/src/core/lib/surface/api_trace.c b/src/core/lib/surface/api_trace.c
new file mode 100644
index 0000000000..3702c024db
--- /dev/null
+++ b/src/core/lib/surface/api_trace.c
@@ -0,0 +1,36 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/surface/api_trace.h"
+
+int grpc_api_trace = 0;
diff --git a/src/core/lib/surface/api_trace.h b/src/core/lib/surface/api_trace.h
new file mode 100644
index 0000000000..b50011c9e5
--- /dev/null
+++ b/src/core/lib/surface/api_trace.h
@@ -0,0 +1,65 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_SURFACE_API_TRACE_H
+#define GRPC_CORE_LIB_SURFACE_API_TRACE_H
+
+#include <grpc/support/log.h>
+#include "src/core/lib/debug/trace.h"
+
+extern int grpc_api_trace;
+
+/* Provide unwrapping macros because we're in C89 and variadic macros weren't
+ introduced until C99... */
+#define GRPC_API_TRACE_UNWRAP0()
+#define GRPC_API_TRACE_UNWRAP1(a) , a
+#define GRPC_API_TRACE_UNWRAP2(a, b) , a, b
+#define GRPC_API_TRACE_UNWRAP3(a, b, c) , a, b, c
+#define GRPC_API_TRACE_UNWRAP4(a, b, c, d) , a, b, c, d
+#define GRPC_API_TRACE_UNWRAP5(a, b, c, d, e) , a, b, c, d, e
+#define GRPC_API_TRACE_UNWRAP6(a, b, c, d, e, f) , a, b, c, d, e, f
+#define GRPC_API_TRACE_UNWRAP7(a, b, c, d, e, f, g) , a, b, c, d, e, f, g
+#define GRPC_API_TRACE_UNWRAP8(a, b, c, d, e, f, g, h) , a, b, c, d, e, f, g, h
+#define GRPC_API_TRACE_UNWRAP9(a, b, c, d, e, f, g, h, i) \
+ , a, b, c, d, e, f, g, h, i
+#define GRPC_API_TRACE_UNWRAP10(a, b, c, d, e, f, g, h, i, j) \
+ , a, b, c, d, e, f, g, h, i, j
+
+/* Due to the limitations of C89's preprocessor, the arity of the var-arg list
+ 'nargs' must be specified. */
+#define GRPC_API_TRACE(fmt, nargs, args) \
+ if (grpc_api_trace) { \
+ gpr_log(GPR_INFO, fmt GRPC_API_TRACE_UNWRAP##nargs args); \
+ }
+
+#endif /* GRPC_CORE_LIB_SURFACE_API_TRACE_H */
diff --git a/src/core/lib/surface/byte_buffer.c b/src/core/lib/surface/byte_buffer.c
new file mode 100644
index 0000000000..03071ef92c
--- /dev/null
+++ b/src/core/lib/surface/byte_buffer.c
@@ -0,0 +1,97 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/byte_buffer.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+
+grpc_byte_buffer *grpc_raw_byte_buffer_create(gpr_slice *slices,
+ size_t nslices) {
+ return grpc_raw_compressed_byte_buffer_create(slices, nslices,
+ GRPC_COMPRESS_NONE);
+}
+
+grpc_byte_buffer *grpc_raw_compressed_byte_buffer_create(
+ gpr_slice *slices, size_t nslices, grpc_compression_algorithm compression) {
+ size_t i;
+ grpc_byte_buffer *bb = malloc(sizeof(grpc_byte_buffer));
+ bb->type = GRPC_BB_RAW;
+ bb->data.raw.compression = compression;
+ gpr_slice_buffer_init(&bb->data.raw.slice_buffer);
+ for (i = 0; i < nslices; i++) {
+ gpr_slice_ref(slices[i]);
+ gpr_slice_buffer_add(&bb->data.raw.slice_buffer, slices[i]);
+ }
+ return bb;
+}
+
+grpc_byte_buffer *grpc_raw_byte_buffer_from_reader(
+ grpc_byte_buffer_reader *reader) {
+ grpc_byte_buffer *bb = malloc(sizeof(grpc_byte_buffer));
+ gpr_slice slice;
+ bb->type = GRPC_BB_RAW;
+ bb->data.raw.compression = GRPC_COMPRESS_NONE;
+ gpr_slice_buffer_init(&bb->data.raw.slice_buffer);
+
+ while (grpc_byte_buffer_reader_next(reader, &slice)) {
+ gpr_slice_buffer_add(&bb->data.raw.slice_buffer, slice);
+ }
+ return bb;
+}
+
+grpc_byte_buffer *grpc_byte_buffer_copy(grpc_byte_buffer *bb) {
+ switch (bb->type) {
+ case GRPC_BB_RAW:
+ return grpc_raw_byte_buffer_create(bb->data.raw.slice_buffer.slices,
+ bb->data.raw.slice_buffer.count);
+ }
+ GPR_UNREACHABLE_CODE(return NULL);
+}
+
+void grpc_byte_buffer_destroy(grpc_byte_buffer *bb) {
+ if (!bb) return;
+ switch (bb->type) {
+ case GRPC_BB_RAW:
+ gpr_slice_buffer_destroy(&bb->data.raw.slice_buffer);
+ break;
+ }
+ free(bb);
+}
+
+size_t grpc_byte_buffer_length(grpc_byte_buffer *bb) {
+ switch (bb->type) {
+ case GRPC_BB_RAW:
+ return bb->data.raw.slice_buffer.length;
+ }
+ GPR_UNREACHABLE_CODE(return 0);
+}
diff --git a/src/core/lib/surface/byte_buffer_reader.c b/src/core/lib/surface/byte_buffer_reader.c
new file mode 100644
index 0000000000..7248f5fe71
--- /dev/null
+++ b/src/core/lib/surface/byte_buffer_reader.c
@@ -0,0 +1,123 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/byte_buffer_reader.h>
+#include <string.h>
+
+#include <grpc/byte_buffer.h>
+#include <grpc/compression.h>
+#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/slice_buffer.h>
+
+#include "src/core/lib/compression/message_compress.h"
+
+static int is_compressed(grpc_byte_buffer *buffer) {
+ switch (buffer->type) {
+ case GRPC_BB_RAW:
+ if (buffer->data.raw.compression == GRPC_COMPRESS_NONE) {
+ return 0 /* GPR_FALSE */;
+ }
+ break;
+ }
+ return 1 /* GPR_TRUE */;
+}
+
+void grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader,
+ grpc_byte_buffer *buffer) {
+ gpr_slice_buffer decompressed_slices_buffer;
+ reader->buffer_in = buffer;
+ switch (reader->buffer_in->type) {
+ case GRPC_BB_RAW:
+ gpr_slice_buffer_init(&decompressed_slices_buffer);
+ if (is_compressed(reader->buffer_in)) {
+ grpc_msg_decompress(reader->buffer_in->data.raw.compression,
+ &reader->buffer_in->data.raw.slice_buffer,
+ &decompressed_slices_buffer);
+ reader->buffer_out =
+ grpc_raw_byte_buffer_create(decompressed_slices_buffer.slices,
+ decompressed_slices_buffer.count);
+ gpr_slice_buffer_destroy(&decompressed_slices_buffer);
+ } else { /* not compressed, use the input buffer as output */
+ reader->buffer_out = reader->buffer_in;
+ }
+ reader->current.index = 0;
+ break;
+ }
+}
+
+void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader *reader) {
+ switch (reader->buffer_in->type) {
+ case GRPC_BB_RAW:
+ /* keeping the same if-else structure as in the init function */
+ if (is_compressed(reader->buffer_in)) {
+ grpc_byte_buffer_destroy(reader->buffer_out);
+ }
+ break;
+ }
+}
+
+int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader *reader,
+ gpr_slice *slice) {
+ switch (reader->buffer_in->type) {
+ case GRPC_BB_RAW: {
+ gpr_slice_buffer *slice_buffer;
+ slice_buffer = &reader->buffer_out->data.raw.slice_buffer;
+ if (reader->current.index < slice_buffer->count) {
+ *slice = gpr_slice_ref(slice_buffer->slices[reader->current.index]);
+ reader->current.index += 1;
+ return 1;
+ }
+ break;
+ }
+ }
+ return 0;
+}
+
+gpr_slice grpc_byte_buffer_reader_readall(grpc_byte_buffer_reader *reader) {
+ gpr_slice in_slice;
+ size_t bytes_read = 0;
+ const size_t input_size = grpc_byte_buffer_length(reader->buffer_out);
+ gpr_slice out_slice = gpr_slice_malloc(input_size);
+ uint8_t *const outbuf = GPR_SLICE_START_PTR(out_slice); /* just an alias */
+
+ while (grpc_byte_buffer_reader_next(reader, &in_slice) != 0) {
+ const size_t slice_length = GPR_SLICE_LENGTH(in_slice);
+ memcpy(&(outbuf[bytes_read]), GPR_SLICE_START_PTR(in_slice), slice_length);
+ bytes_read += slice_length;
+ gpr_slice_unref(in_slice);
+ GPR_ASSERT(bytes_read <= input_size);
+ }
+ return out_slice;
+}
diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c
new file mode 100644
index 0000000000..d63a4a7401
--- /dev/null
+++ b/src/core/lib/surface/call.c
@@ -0,0 +1,1491 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <assert.h>
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <grpc/compression.h>
+#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/useful.h>
+
+#include "src/core/lib/channel/channel_stack.h"
+#include "src/core/lib/compression/algorithm_metadata.h"
+#include "src/core/lib/iomgr/timer.h"
+#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/support/string.h"
+#include "src/core/lib/surface/api_trace.h"
+#include "src/core/lib/surface/call.h"
+#include "src/core/lib/surface/channel.h"
+#include "src/core/lib/surface/completion_queue.h"
+#include "src/core/lib/transport/static_metadata.h"
+
+/** The maximum number of concurrent batches possible.
+ Based upon the maximum number of individually queueable ops in the batch
+ api:
+ - initial metadata send
+ - message send
+ - status/close send (depending on client/server)
+ - initial metadata recv
+ - message recv
+ - status/close recv (depending on client/server) */
+#define MAX_CONCURRENT_BATCHES 6
+
+typedef struct {
+ grpc_ioreq_completion_func on_complete;
+ void *user_data;
+ int success;
+} completed_request;
+
+#define MAX_SEND_EXTRA_METADATA_COUNT 3
+
+/* Status data for a request can come from several sources; this
+ enumerates them all, and acts as a priority sorting for which
+ status to return to the application - earlier entries override
+ later ones */
+typedef enum {
+ /* Status came from the application layer overriding whatever
+ the wire says */
+ STATUS_FROM_API_OVERRIDE = 0,
+ /* Status was created by some internal channel stack operation */
+ STATUS_FROM_CORE,
+ /* Status came from 'the wire' - or somewhere below the surface
+ layer */
+ STATUS_FROM_WIRE,
+ /* Status came from the server sending status */
+ STATUS_FROM_SERVER_STATUS,
+ STATUS_SOURCE_COUNT
+} status_source;
+
+typedef struct {
+ uint8_t is_set;
+ grpc_status_code code;
+ grpc_mdstr *details;
+} received_status;
+
+/* How far through the GRPC stream have we read? */
+typedef enum {
+ /* We are still waiting for initial metadata to complete */
+ READ_STATE_INITIAL = 0,
+ /* We have gotten initial metadata, and are reading either
+ messages or trailing metadata */
+ READ_STATE_GOT_INITIAL_METADATA,
+ /* The stream is closed for reading */
+ READ_STATE_READ_CLOSED,
+ /* The stream is closed for reading & writing */
+ READ_STATE_STREAM_CLOSED
+} read_state;
+
+typedef enum {
+ WRITE_STATE_INITIAL = 0,
+ WRITE_STATE_STARTED,
+ WRITE_STATE_WRITE_CLOSED
+} write_state;
+
+typedef struct batch_control {
+ grpc_call *call;
+ grpc_cq_completion cq_completion;
+ grpc_closure finish_batch;
+ void *notify_tag;
+ gpr_refcount steps_to_complete;
+
+ uint8_t send_initial_metadata;
+ uint8_t send_message;
+ uint8_t send_final_op;
+ uint8_t recv_initial_metadata;
+ uint8_t recv_message;
+ uint8_t recv_final_op;
+ uint8_t is_notify_tag_closure;
+ uint8_t success;
+} batch_control;
+
+struct grpc_call {
+ grpc_completion_queue *cq;
+ grpc_channel *channel;
+ grpc_call *parent;
+ grpc_call *first_child;
+ /* TODO(ctiller): share with cq if possible? */
+ gpr_mu mu;
+
+ /* client or server call */
+ uint8_t is_client;
+ /* is the alarm set */
+ uint8_t have_alarm;
+ /** has grpc_call_destroy been called */
+ uint8_t destroy_called;
+ /** flag indicating that cancellation is inherited */
+ uint8_t cancellation_is_inherited;
+ /** bitmask of live batches */
+ uint8_t used_batches;
+ /** which ops are in-flight */
+ uint8_t sent_initial_metadata;
+ uint8_t sending_message;
+ uint8_t sent_final_op;
+ uint8_t received_initial_metadata;
+ uint8_t receiving_message;
+ uint8_t received_final_op;
+
+ /* have we received initial metadata */
+ bool has_initial_md_been_received;
+
+ batch_control active_batches[MAX_CONCURRENT_BATCHES];
+
+ /* first idx: is_receiving, second idx: is_trailing */
+ grpc_metadata_batch metadata_batch[2][2];
+
+ /* Buffered read metadata waiting to be returned to the application.
+ Element 0 is initial metadata, element 1 is trailing metadata. */
+ grpc_metadata_array *buffered_metadata[2];
+
+ /* Received call statuses from various sources */
+ received_status status[STATUS_SOURCE_COUNT];
+
+ /* Compression algorithm for the call */
+ grpc_compression_algorithm compression_algorithm;
+ /* Supported encodings (compression algorithms), a bitset */
+ uint32_t encodings_accepted_by_peer;
+
+ /* Contexts for various subsystems (security, tracing, ...). */
+ grpc_call_context_element context[GRPC_CONTEXT_COUNT];
+
+ /* Deadline alarm - if have_alarm is non-zero */
+ grpc_timer alarm;
+
+ /* for the client, extra metadata is initial metadata; for the
+ server, it's trailing metadata */
+ grpc_linked_mdelem send_extra_metadata[MAX_SEND_EXTRA_METADATA_COUNT];
+ int send_extra_metadata_count;
+ gpr_timespec send_deadline;
+
+ /** siblings: children of the same parent form a list, and this list is
+ protected under
+ parent->mu */
+ grpc_call *sibling_next;
+ grpc_call *sibling_prev;
+
+ grpc_slice_buffer_stream sending_stream;
+ grpc_byte_stream *receiving_stream;
+ grpc_byte_buffer **receiving_buffer;
+ gpr_slice receiving_slice;
+ grpc_closure receiving_slice_ready;
+ grpc_closure receiving_stream_ready;
+ grpc_closure receiving_initial_metadata_ready;
+ uint32_t test_only_last_message_flags;
+
+ union {
+ struct {
+ grpc_status_code *status;
+ char **status_details;
+ size_t *status_details_capacity;
+ } client;
+ struct {
+ int *cancelled;
+ } server;
+ } final_op;
+
+ struct {
+ void *bctlp;
+ bool success;
+ } saved_receiving_stream_ready_ctx;
+};
+
+#define CALL_STACK_FROM_CALL(call) ((grpc_call_stack *)((call) + 1))
+#define CALL_FROM_CALL_STACK(call_stack) (((grpc_call *)(call_stack)) - 1)
+#define CALL_ELEM_FROM_CALL(call, idx) \
+ grpc_call_stack_element(CALL_STACK_FROM_CALL(call), idx)
+#define CALL_FROM_TOP_ELEM(top_elem) \
+ CALL_FROM_CALL_STACK(grpc_call_stack_from_top_element(top_elem))
+
+static void set_deadline_alarm(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ gpr_timespec deadline);
+static void execute_op(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ grpc_transport_stream_op *op);
+static grpc_call_error cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
+ grpc_status_code status,
+ const char *description);
+static void destroy_call(grpc_exec_ctx *exec_ctx, void *call_stack,
+ bool success);
+static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
+ bool success);
+
+grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
+ uint32_t propagation_mask,
+ grpc_completion_queue *cq,
+ const void *server_transport_data,
+ grpc_mdelem **add_initial_metadata,
+ size_t add_initial_metadata_count,
+ gpr_timespec send_deadline) {
+ size_t i, j;
+ grpc_channel_stack *channel_stack = grpc_channel_get_channel_stack(channel);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_call *call;
+ GPR_TIMER_BEGIN("grpc_call_create", 0);
+ call = gpr_malloc(sizeof(grpc_call) + channel_stack->call_stack_size);
+ memset(call, 0, sizeof(grpc_call));
+ gpr_mu_init(&call->mu);
+ call->channel = channel;
+ call->cq = cq;
+ call->parent = parent_call;
+ call->is_client = server_transport_data == NULL;
+ if (call->is_client) {
+ GPR_ASSERT(add_initial_metadata_count < MAX_SEND_EXTRA_METADATA_COUNT);
+ for (i = 0; i < add_initial_metadata_count; i++) {
+ call->send_extra_metadata[i].md = add_initial_metadata[i];
+ }
+ call->send_extra_metadata_count = (int)add_initial_metadata_count;
+ } else {
+ GPR_ASSERT(add_initial_metadata_count == 0);
+ call->send_extra_metadata_count = 0;
+ }
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < 2; j++) {
+ call->metadata_batch[i][j].deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ }
+ }
+ call->send_deadline = send_deadline;
+ GRPC_CHANNEL_INTERNAL_REF(channel, "call");
+ /* initial refcount dropped by grpc_call_destroy */
+ grpc_call_stack_init(&exec_ctx, channel_stack, 1, destroy_call, call,
+ call->context, server_transport_data,
+ CALL_STACK_FROM_CALL(call));
+ if (cq != NULL) {
+ GRPC_CQ_INTERNAL_REF(cq, "bind");
+ grpc_call_stack_set_pollset(&exec_ctx, CALL_STACK_FROM_CALL(call),
+ grpc_cq_pollset(cq));
+ }
+ if (parent_call != NULL) {
+ GRPC_CALL_INTERNAL_REF(parent_call, "child");
+ GPR_ASSERT(call->is_client);
+ GPR_ASSERT(!parent_call->is_client);
+
+ gpr_mu_lock(&parent_call->mu);
+
+ if (propagation_mask & GRPC_PROPAGATE_DEADLINE) {
+ send_deadline = gpr_time_min(
+ gpr_convert_clock_type(send_deadline,
+ parent_call->send_deadline.clock_type),
+ parent_call->send_deadline);
+ }
+ /* for now GRPC_PROPAGATE_TRACING_CONTEXT *MUST* be passed with
+ * GRPC_PROPAGATE_STATS_CONTEXT */
+ /* TODO(ctiller): This should change to use the appropriate census start_op
+ * call. */
+ if (propagation_mask & GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT) {
+ GPR_ASSERT(propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT);
+ grpc_call_context_set(call, GRPC_CONTEXT_TRACING,
+ parent_call->context[GRPC_CONTEXT_TRACING].value,
+ NULL);
+ } else {
+ GPR_ASSERT(propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT);
+ }
+ if (propagation_mask & GRPC_PROPAGATE_CANCELLATION) {
+ call->cancellation_is_inherited = 1;
+ }
+
+ if (parent_call->first_child == NULL) {
+ parent_call->first_child = call;
+ call->sibling_next = call->sibling_prev = call;
+ } else {
+ call->sibling_next = parent_call->first_child;
+ call->sibling_prev = parent_call->first_child->sibling_prev;
+ call->sibling_next->sibling_prev = call->sibling_prev->sibling_next =
+ call;
+ }
+
+ gpr_mu_unlock(&parent_call->mu);
+ }
+ if (gpr_time_cmp(send_deadline, gpr_inf_future(send_deadline.clock_type)) !=
+ 0) {
+ set_deadline_alarm(&exec_ctx, call, send_deadline);
+ }
+ grpc_exec_ctx_finish(&exec_ctx);
+ GPR_TIMER_END("grpc_call_create", 0);
+ return call;
+}
+
+void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ grpc_completion_queue *cq) {
+ GPR_ASSERT(cq);
+ call->cq = cq;
+ GRPC_CQ_INTERNAL_REF(cq, "bind");
+ grpc_call_stack_set_pollset(exec_ctx, CALL_STACK_FROM_CALL(call),
+ grpc_cq_pollset(cq));
+}
+
+#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#define REF_REASON reason
+#define REF_ARG , const char *reason
+#else
+#define REF_REASON ""
+#define REF_ARG
+#endif
+void grpc_call_internal_ref(grpc_call *c REF_ARG) {
+ GRPC_CALL_STACK_REF(CALL_STACK_FROM_CALL(c), REF_REASON);
+}
+void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *c REF_ARG) {
+ GRPC_CALL_STACK_UNREF(exec_ctx, CALL_STACK_FROM_CALL(c), REF_REASON);
+}
+
+static void destroy_call(grpc_exec_ctx *exec_ctx, void *call, bool success) {
+ size_t i;
+ int ii;
+ grpc_call *c = call;
+ GPR_TIMER_BEGIN("destroy_call", 0);
+ for (i = 0; i < 2; i++) {
+ grpc_metadata_batch_destroy(
+ &c->metadata_batch[1 /* is_receiving */][i /* is_initial */]);
+ }
+ if (c->receiving_stream != NULL) {
+ grpc_byte_stream_destroy(exec_ctx, c->receiving_stream);
+ }
+ grpc_call_stack_destroy(exec_ctx, CALL_STACK_FROM_CALL(c));
+ GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, c->channel, "call");
+ gpr_mu_destroy(&c->mu);
+ for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
+ if (c->status[i].details) {
+ GRPC_MDSTR_UNREF(c->status[i].details);
+ }
+ }
+ for (ii = 0; ii < c->send_extra_metadata_count; ii++) {
+ GRPC_MDELEM_UNREF(c->send_extra_metadata[ii].md);
+ }
+ for (i = 0; i < GRPC_CONTEXT_COUNT; i++) {
+ if (c->context[i].destroy) {
+ c->context[i].destroy(c->context[i].value);
+ }
+ }
+ if (c->cq) {
+ GRPC_CQ_INTERNAL_UNREF(c->cq, "bind");
+ }
+ gpr_free(c);
+ GPR_TIMER_END("destroy_call", 0);
+}
+
+static void set_status_code(grpc_call *call, status_source source,
+ uint32_t status) {
+ if (call->status[source].is_set) return;
+
+ call->status[source].is_set = 1;
+ call->status[source].code = (grpc_status_code)status;
+
+ /* TODO(ctiller): what to do about the flush that was previously here */
+}
+
+static void set_compression_algorithm(grpc_call *call,
+ grpc_compression_algorithm algo) {
+ call->compression_algorithm = algo;
+}
+
+grpc_compression_algorithm grpc_call_test_only_get_compression_algorithm(
+ grpc_call *call) {
+ grpc_compression_algorithm algorithm;
+ gpr_mu_lock(&call->mu);
+ algorithm = call->compression_algorithm;
+ gpr_mu_unlock(&call->mu);
+ return algorithm;
+}
+
+uint32_t grpc_call_test_only_get_message_flags(grpc_call *call) {
+ uint32_t flags;
+ gpr_mu_lock(&call->mu);
+ flags = call->test_only_last_message_flags;
+ gpr_mu_unlock(&call->mu);
+ return flags;
+}
+
+static void destroy_encodings_accepted_by_peer(void *p) { return; }
+
+static void set_encodings_accepted_by_peer(grpc_call *call, grpc_mdelem *mdel) {
+ size_t i;
+ grpc_compression_algorithm algorithm;
+ gpr_slice_buffer accept_encoding_parts;
+ gpr_slice accept_encoding_slice;
+ void *accepted_user_data;
+
+ accepted_user_data =
+ grpc_mdelem_get_user_data(mdel, destroy_encodings_accepted_by_peer);
+ if (accepted_user_data != NULL) {
+ call->encodings_accepted_by_peer =
+ (uint32_t)(((uintptr_t)accepted_user_data) - 1);
+ return;
+ }
+
+ accept_encoding_slice = mdel->value->slice;
+ gpr_slice_buffer_init(&accept_encoding_parts);
+ gpr_slice_split(accept_encoding_slice, ",", &accept_encoding_parts);
+
+ /* No need to zero call->encodings_accepted_by_peer: grpc_call_create already
+ * zeroes the whole grpc_call */
+ /* Always support no compression */
+ GPR_BITSET(&call->encodings_accepted_by_peer, GRPC_COMPRESS_NONE);
+ for (i = 0; i < accept_encoding_parts.count; i++) {
+ const gpr_slice *accept_encoding_entry_slice =
+ &accept_encoding_parts.slices[i];
+ if (grpc_compression_algorithm_parse(
+ (const char *)GPR_SLICE_START_PTR(*accept_encoding_entry_slice),
+ GPR_SLICE_LENGTH(*accept_encoding_entry_slice), &algorithm)) {
+ GPR_BITSET(&call->encodings_accepted_by_peer, algorithm);
+ } else {
+ char *accept_encoding_entry_str =
+ gpr_dump_slice(*accept_encoding_entry_slice, GPR_DUMP_ASCII);
+ gpr_log(GPR_ERROR,
+ "Invalid entry in accept encoding metadata: '%s'. Ignoring.",
+ accept_encoding_entry_str);
+ gpr_free(accept_encoding_entry_str);
+ }
+ }
+
+ gpr_slice_buffer_destroy(&accept_encoding_parts);
+
+ grpc_mdelem_set_user_data(
+ mdel, destroy_encodings_accepted_by_peer,
+ (void *)(((uintptr_t)call->encodings_accepted_by_peer) + 1));
+}
+
+uint32_t grpc_call_test_only_get_encodings_accepted_by_peer(grpc_call *call) {
+ uint32_t encodings_accepted_by_peer;
+ gpr_mu_lock(&call->mu);
+ encodings_accepted_by_peer = call->encodings_accepted_by_peer;
+ gpr_mu_unlock(&call->mu);
+ return encodings_accepted_by_peer;
+}
+
+static void set_status_details(grpc_call *call, status_source source,
+ grpc_mdstr *status) {
+ if (call->status[source].details != NULL) {
+ GRPC_MDSTR_UNREF(call->status[source].details);
+ }
+ call->status[source].details = status;
+}
+
+static void get_final_status(grpc_call *call,
+ void (*set_value)(grpc_status_code code,
+ void *user_data),
+ void *set_value_user_data) {
+ int i;
+ for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
+ if (call->status[i].is_set) {
+ set_value(call->status[i].code, set_value_user_data);
+ return;
+ }
+ }
+ if (call->is_client) {
+ set_value(GRPC_STATUS_UNKNOWN, set_value_user_data);
+ } else {
+ set_value(GRPC_STATUS_OK, set_value_user_data);
+ }
+}
+
+static void get_final_details(grpc_call *call, char **out_details,
+ size_t *out_details_capacity) {
+ int i;
+ for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
+ if (call->status[i].is_set) {
+ if (call->status[i].details) {
+ gpr_slice details = call->status[i].details->slice;
+ size_t len = GPR_SLICE_LENGTH(details);
+ if (len + 1 > *out_details_capacity) {
+ *out_details_capacity =
+ GPR_MAX(len + 1, *out_details_capacity * 3 / 2);
+ *out_details = gpr_realloc(*out_details, *out_details_capacity);
+ }
+ memcpy(*out_details, GPR_SLICE_START_PTR(details), len);
+ (*out_details)[len] = 0;
+ } else {
+ goto no_details;
+ }
+ return;
+ }
+ }
+
+no_details:
+ if (0 == *out_details_capacity) {
+ *out_details_capacity = 8;
+ *out_details = gpr_malloc(*out_details_capacity);
+ }
+ **out_details = 0;
+}
+
+static grpc_linked_mdelem *linked_from_md(grpc_metadata *md) {
+ return (grpc_linked_mdelem *)&md->internal_data;
+}
+
+static int prepare_application_metadata(grpc_call *call, int count,
+ grpc_metadata *metadata,
+ int is_trailing,
+ int prepend_extra_metadata) {
+ int i;
+ grpc_metadata_batch *batch =
+ &call->metadata_batch[0 /* is_receiving */][is_trailing];
+ if (prepend_extra_metadata) {
+ if (call->send_extra_metadata_count == 0) {
+ prepend_extra_metadata = 0;
+ } else {
+ for (i = 0; i < call->send_extra_metadata_count; i++) {
+ GRPC_MDELEM_REF(call->send_extra_metadata[i].md);
+ }
+ for (i = 1; i < call->send_extra_metadata_count; i++) {
+ call->send_extra_metadata[i].prev = &call->send_extra_metadata[i - 1];
+ }
+ for (i = 0; i < call->send_extra_metadata_count - 1; i++) {
+ call->send_extra_metadata[i].next = &call->send_extra_metadata[i + 1];
+ }
+ }
+ }
+ for (i = 0; i < count; i++) {
+ grpc_metadata *md = &metadata[i];
+ grpc_linked_mdelem *l = (grpc_linked_mdelem *)&md->internal_data;
+ GPR_ASSERT(sizeof(grpc_linked_mdelem) == sizeof(md->internal_data));
+ l->md = grpc_mdelem_from_string_and_buffer(
+ md->key, (const uint8_t *)md->value, md->value_length);
+ if (!grpc_header_key_is_legal(grpc_mdstr_as_c_string(l->md->key),
+ GRPC_MDSTR_LENGTH(l->md->key))) {
+ gpr_log(GPR_ERROR, "attempt to send invalid metadata key: %s",
+ grpc_mdstr_as_c_string(l->md->key));
+ return 0;
+ } else if (!grpc_is_binary_header(grpc_mdstr_as_c_string(l->md->key),
+ GRPC_MDSTR_LENGTH(l->md->key)) &&
+ !grpc_header_nonbin_value_is_legal(
+ grpc_mdstr_as_c_string(l->md->value),
+ GRPC_MDSTR_LENGTH(l->md->value))) {
+ gpr_log(GPR_ERROR, "attempt to send invalid metadata value");
+ return 0;
+ }
+ }
+ for (i = 1; i < count; i++) {
+ linked_from_md(&metadata[i])->prev = linked_from_md(&metadata[i - 1]);
+ }
+ for (i = 0; i < count - 1; i++) {
+ linked_from_md(&metadata[i])->next = linked_from_md(&metadata[i + 1]);
+ }
+ switch (prepend_extra_metadata * 2 + (count != 0)) {
+ case 0:
+ /* no prepend, no metadata => nothing to do */
+ batch->list.head = batch->list.tail = NULL;
+ break;
+ case 1:
+ /* metadata, but no prepend */
+ batch->list.head = linked_from_md(&metadata[0]);
+ batch->list.tail = linked_from_md(&metadata[count - 1]);
+ batch->list.head->prev = NULL;
+ batch->list.tail->next = NULL;
+ break;
+ case 2:
+ /* prepend, but no md */
+ batch->list.head = &call->send_extra_metadata[0];
+ batch->list.tail =
+ &call->send_extra_metadata[call->send_extra_metadata_count - 1];
+ batch->list.head->prev = NULL;
+ batch->list.tail->next = NULL;
+ break;
+ case 3:
+ /* prepend AND md */
+ batch->list.head = &call->send_extra_metadata[0];
+ call->send_extra_metadata[call->send_extra_metadata_count - 1].next =
+ linked_from_md(&metadata[0]);
+ linked_from_md(&metadata[0])->prev =
+ &call->send_extra_metadata[call->send_extra_metadata_count - 1];
+ batch->list.tail = linked_from_md(&metadata[count - 1]);
+ batch->list.head->prev = NULL;
+ batch->list.tail->next = NULL;
+ break;
+ default:
+ GPR_UNREACHABLE_CODE(return 0);
+ }
+
+ return 1;
+}
+
+void grpc_call_destroy(grpc_call *c) {
+ int cancel;
+ grpc_call *parent = c->parent;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
+ GPR_TIMER_BEGIN("grpc_call_destroy", 0);
+ GRPC_API_TRACE("grpc_call_destroy(c=%p)", 1, (c));
+
+ if (parent) {
+ gpr_mu_lock(&parent->mu);
+ if (c == parent->first_child) {
+ parent->first_child = c->sibling_next;
+ if (c == parent->first_child) {
+ parent->first_child = NULL;
+ }
+ c->sibling_prev->sibling_next = c->sibling_next;
+ c->sibling_next->sibling_prev = c->sibling_prev;
+ }
+ gpr_mu_unlock(&parent->mu);
+ GRPC_CALL_INTERNAL_UNREF(&exec_ctx, parent, "child");
+ }
+
+ gpr_mu_lock(&c->mu);
+ GPR_ASSERT(!c->destroy_called);
+ c->destroy_called = 1;
+ if (c->have_alarm) {
+ grpc_timer_cancel(&exec_ctx, &c->alarm);
+ }
+ cancel = !c->received_final_op;
+ gpr_mu_unlock(&c->mu);
+ if (cancel) grpc_call_cancel(c, NULL);
+ GRPC_CALL_INTERNAL_UNREF(&exec_ctx, c, "destroy");
+ grpc_exec_ctx_finish(&exec_ctx);
+ GPR_TIMER_END("grpc_call_destroy", 0);
+}
+
+grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved) {
+ GRPC_API_TRACE("grpc_call_cancel(call=%p, reserved=%p)", 2, (call, reserved));
+ GPR_ASSERT(!reserved);
+ return grpc_call_cancel_with_status(call, GRPC_STATUS_CANCELLED, "Cancelled",
+ NULL);
+}
+
+grpc_call_error grpc_call_cancel_with_status(grpc_call *c,
+ grpc_status_code status,
+ const char *description,
+ void *reserved) {
+ grpc_call_error r;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ GRPC_API_TRACE(
+ "grpc_call_cancel_with_status("
+ "c=%p, status=%d, description=%s, reserved=%p)",
+ 4, (c, (int)status, description, reserved));
+ GPR_ASSERT(reserved == NULL);
+ gpr_mu_lock(&c->mu);
+ r = cancel_with_status(&exec_ctx, c, status, description);
+ gpr_mu_unlock(&c->mu);
+ grpc_exec_ctx_finish(&exec_ctx);
+ return r;
+}
+
+typedef struct cancel_closure {
+ grpc_closure closure;
+ grpc_call *call;
+ grpc_status_code status;
+} cancel_closure;
+
+static void done_cancel(grpc_exec_ctx *exec_ctx, void *ccp, bool success) {
+ cancel_closure *cc = ccp;
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, cc->call, "cancel");
+ gpr_free(cc);
+}
+
+static void send_cancel(grpc_exec_ctx *exec_ctx, void *ccp, bool success) {
+ grpc_transport_stream_op op;
+ cancel_closure *cc = ccp;
+ memset(&op, 0, sizeof(op));
+ op.cancel_with_status = cc->status;
+ /* reuse closure to catch completion */
+ grpc_closure_init(&cc->closure, done_cancel, cc);
+ op.on_complete = &cc->closure;
+ execute_op(exec_ctx, cc->call, &op);
+}
+
+static grpc_call_error cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
+ grpc_status_code status,
+ const char *description) {
+ grpc_mdstr *details =
+ description ? grpc_mdstr_from_string(description) : NULL;
+ cancel_closure *cc = gpr_malloc(sizeof(*cc));
+
+ GPR_ASSERT(status != GRPC_STATUS_OK);
+
+ set_status_code(c, STATUS_FROM_API_OVERRIDE, (uint32_t)status);
+ set_status_details(c, STATUS_FROM_API_OVERRIDE, details);
+
+ grpc_closure_init(&cc->closure, send_cancel, cc);
+ cc->call = c;
+ cc->status = status;
+ GRPC_CALL_INTERNAL_REF(c, "cancel");
+ grpc_exec_ctx_enqueue(exec_ctx, &cc->closure, true, NULL);
+
+ return GRPC_CALL_OK;
+}
+
+static void execute_op(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ grpc_transport_stream_op *op) {
+ grpc_call_element *elem;
+
+ GPR_TIMER_BEGIN("execute_op", 0);
+ elem = CALL_ELEM_FROM_CALL(call, 0);
+ op->context = call->context;
+ elem->filter->start_transport_stream_op(exec_ctx, elem, op);
+ GPR_TIMER_END("execute_op", 0);
+}
+
+char *grpc_call_get_peer(grpc_call *call) {
+ grpc_call_element *elem = CALL_ELEM_FROM_CALL(call, 0);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ char *result;
+ GRPC_API_TRACE("grpc_call_get_peer(%p)", 1, (call));
+ result = elem->filter->get_peer(&exec_ctx, elem);
+ if (result == NULL) {
+ result = grpc_channel_get_target(call->channel);
+ }
+ if (result == NULL) {
+ result = gpr_strdup("unknown");
+ }
+ grpc_exec_ctx_finish(&exec_ctx);
+ return result;
+}
+
+grpc_call *grpc_call_from_top_element(grpc_call_element *elem) {
+ return CALL_FROM_TOP_ELEM(elem);
+}
+
+static void call_alarm(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
+ grpc_call *call = arg;
+ gpr_mu_lock(&call->mu);
+ call->have_alarm = 0;
+ if (success) {
+ cancel_with_status(exec_ctx, call, GRPC_STATUS_DEADLINE_EXCEEDED,
+ "Deadline Exceeded");
+ }
+ gpr_mu_unlock(&call->mu);
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "alarm");
+}
+
+static void set_deadline_alarm(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ gpr_timespec deadline) {
+ if (call->have_alarm) {
+ gpr_log(GPR_ERROR, "Attempt to set deadline alarm twice");
+ assert(0);
+ return;
+ }
+ GRPC_CALL_INTERNAL_REF(call, "alarm");
+ call->have_alarm = 1;
+ call->send_deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
+ grpc_timer_init(exec_ctx, &call->alarm, call->send_deadline, call_alarm, call,
+ gpr_now(GPR_CLOCK_MONOTONIC));
+}
+
+/* we offset status by a small amount when storing it into transport metadata
+ as metadata cannot store a 0 value (which is used as OK for grpc_status_codes
+ */
+#define STATUS_OFFSET 1
+static void destroy_status(void *ignored) {}
+
+static uint32_t decode_status(grpc_mdelem *md) {
+ uint32_t status;
+ void *user_data;
+ if (md == GRPC_MDELEM_GRPC_STATUS_0) return 0;
+ if (md == GRPC_MDELEM_GRPC_STATUS_1) return 1;
+ if (md == GRPC_MDELEM_GRPC_STATUS_2) return 2;
+ user_data = grpc_mdelem_get_user_data(md, destroy_status);
+ if (user_data != NULL) {
+ status = ((uint32_t)(intptr_t)user_data) - STATUS_OFFSET;
+ } else {
+ if (!gpr_parse_bytes_to_uint32(grpc_mdstr_as_c_string(md->value),
+ GPR_SLICE_LENGTH(md->value->slice),
+ &status)) {
+ status = GRPC_STATUS_UNKNOWN; /* could not parse status code */
+ }
+ grpc_mdelem_set_user_data(md, destroy_status,
+ (void *)(intptr_t)(status + STATUS_OFFSET));
+ }
+ return status;
+}
+
+static uint32_t decode_compression(grpc_mdelem *md) {
+ grpc_compression_algorithm algorithm =
+ grpc_compression_algorithm_from_mdstr(md->value);
+ if (algorithm == GRPC_COMPRESS_ALGORITHMS_COUNT) {
+ const char *md_c_str = grpc_mdstr_as_c_string(md->value);
+ gpr_log(GPR_ERROR, "Invalid compression algorithm: '%s'", md_c_str);
+ }
+ return algorithm;
+}
+
+static grpc_mdelem *recv_common_filter(grpc_call *call, grpc_mdelem *elem) {
+ if (elem->key == GRPC_MDSTR_GRPC_STATUS) {
+ GPR_TIMER_BEGIN("status", 0);
+ set_status_code(call, STATUS_FROM_WIRE, decode_status(elem));
+ GPR_TIMER_END("status", 0);
+ return NULL;
+ } else if (elem->key == GRPC_MDSTR_GRPC_MESSAGE) {
+ GPR_TIMER_BEGIN("status-details", 0);
+ set_status_details(call, STATUS_FROM_WIRE, GRPC_MDSTR_REF(elem->value));
+ GPR_TIMER_END("status-details", 0);
+ return NULL;
+ }
+ return elem;
+}
+
+static grpc_mdelem *publish_app_metadata(grpc_call *call, grpc_mdelem *elem,
+ int is_trailing) {
+ grpc_metadata_array *dest;
+ grpc_metadata *mdusr;
+ GPR_TIMER_BEGIN("publish_app_metadata", 0);
+ dest = call->buffered_metadata[is_trailing];
+ if (dest->count == dest->capacity) {
+ dest->capacity = GPR_MAX(dest->capacity + 8, dest->capacity * 2);
+ dest->metadata =
+ gpr_realloc(dest->metadata, sizeof(grpc_metadata) * dest->capacity);
+ }
+ mdusr = &dest->metadata[dest->count++];
+ mdusr->key = grpc_mdstr_as_c_string(elem->key);
+ mdusr->value = grpc_mdstr_as_c_string(elem->value);
+ mdusr->value_length = GPR_SLICE_LENGTH(elem->value->slice);
+ GPR_TIMER_END("publish_app_metadata", 0);
+ return elem;
+}
+
+static grpc_mdelem *recv_initial_filter(void *callp, grpc_mdelem *elem) {
+ grpc_call *call = callp;
+ elem = recv_common_filter(call, elem);
+ if (elem == NULL) {
+ return NULL;
+ } else if (elem->key == GRPC_MDSTR_GRPC_ENCODING) {
+ GPR_TIMER_BEGIN("compression_algorithm", 0);
+ set_compression_algorithm(call, decode_compression(elem));
+ GPR_TIMER_END("compression_algorithm", 0);
+ return NULL;
+ } else if (elem->key == GRPC_MDSTR_GRPC_ACCEPT_ENCODING) {
+ GPR_TIMER_BEGIN("encodings_accepted_by_peer", 0);
+ set_encodings_accepted_by_peer(call, elem);
+ GPR_TIMER_END("encodings_accepted_by_peer", 0);
+ return NULL;
+ } else {
+ return publish_app_metadata(call, elem, 0);
+ }
+}
+
+static grpc_mdelem *recv_trailing_filter(void *callp, grpc_mdelem *elem) {
+ grpc_call *call = callp;
+ elem = recv_common_filter(call, elem);
+ if (elem == NULL) {
+ return NULL;
+ } else {
+ return publish_app_metadata(call, elem, 1);
+ }
+}
+
+grpc_call_stack *grpc_call_get_call_stack(grpc_call *call) {
+ return CALL_STACK_FROM_CALL(call);
+}
+
+/*
+ * BATCH API IMPLEMENTATION
+ */
+
+static void set_status_value_directly(grpc_status_code status, void *dest) {
+ *(grpc_status_code *)dest = status;
+}
+
+static void set_cancelled_value(grpc_status_code status, void *dest) {
+ *(int *)dest = (status != GRPC_STATUS_OK);
+}
+
+static int are_write_flags_valid(uint32_t flags) {
+ /* check that only bits in GRPC_WRITE_(INTERNAL?)_USED_MASK are set */
+ const uint32_t allowed_write_positions =
+ (GRPC_WRITE_USED_MASK | GRPC_WRITE_INTERNAL_USED_MASK);
+ const uint32_t invalid_positions = ~allowed_write_positions;
+ return !(flags & invalid_positions);
+}
+
+static batch_control *allocate_batch_control(grpc_call *call) {
+ size_t i;
+ for (i = 0; i < MAX_CONCURRENT_BATCHES; i++) {
+ if ((call->used_batches & (1 << i)) == 0) {
+ call->used_batches = (uint8_t)(call->used_batches | (uint8_t)(1 << i));
+ return &call->active_batches[i];
+ }
+ }
+ return NULL;
+}
+
+static void finish_batch_completion(grpc_exec_ctx *exec_ctx, void *user_data,
+ grpc_cq_completion *storage) {
+ batch_control *bctl = user_data;
+ grpc_call *call = bctl->call;
+ gpr_mu_lock(&call->mu);
+ call->used_batches = (uint8_t)(
+ call->used_batches & ~(uint8_t)(1 << (bctl - call->active_batches)));
+ gpr_mu_unlock(&call->mu);
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion");
+}
+
+static void post_batch_completion(grpc_exec_ctx *exec_ctx,
+ batch_control *bctl) {
+ grpc_call *call = bctl->call;
+ if (bctl->is_notify_tag_closure) {
+ grpc_exec_ctx_enqueue(exec_ctx, bctl->notify_tag, bctl->success, NULL);
+ gpr_mu_lock(&call->mu);
+ bctl->call->used_batches =
+ (uint8_t)(bctl->call->used_batches &
+ ~(uint8_t)(1 << (bctl - bctl->call->active_batches)));
+ gpr_mu_unlock(&call->mu);
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion");
+ } else {
+ grpc_cq_end_op(exec_ctx, bctl->call->cq, bctl->notify_tag, bctl->success,
+ finish_batch_completion, bctl, &bctl->cq_completion);
+ }
+}
+
+static void continue_receiving_slices(grpc_exec_ctx *exec_ctx,
+ batch_control *bctl) {
+ grpc_call *call = bctl->call;
+ for (;;) {
+ size_t remaining = call->receiving_stream->length -
+ (*call->receiving_buffer)->data.raw.slice_buffer.length;
+ if (remaining == 0) {
+ call->receiving_message = 0;
+ grpc_byte_stream_destroy(exec_ctx, call->receiving_stream);
+ call->receiving_stream = NULL;
+ if (gpr_unref(&bctl->steps_to_complete)) {
+ post_batch_completion(exec_ctx, bctl);
+ }
+ return;
+ }
+ if (grpc_byte_stream_next(exec_ctx, call->receiving_stream,
+ &call->receiving_slice, remaining,
+ &call->receiving_slice_ready)) {
+ gpr_slice_buffer_add(&(*call->receiving_buffer)->data.raw.slice_buffer,
+ call->receiving_slice);
+ } else {
+ return;
+ }
+ }
+}
+
+static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
+ bool success) {
+ batch_control *bctl = bctlp;
+ grpc_call *call = bctl->call;
+
+ if (success) {
+ gpr_slice_buffer_add(&(*call->receiving_buffer)->data.raw.slice_buffer,
+ call->receiving_slice);
+ continue_receiving_slices(exec_ctx, bctl);
+ } else {
+ grpc_byte_stream_destroy(exec_ctx, call->receiving_stream);
+ call->receiving_stream = NULL;
+ grpc_byte_buffer_destroy(*call->receiving_buffer);
+ *call->receiving_buffer = NULL;
+ if (gpr_unref(&bctl->steps_to_complete)) {
+ post_batch_completion(exec_ctx, bctl);
+ }
+ }
+}
+
+static void process_data_after_md(grpc_exec_ctx *exec_ctx, batch_control *bctl,
+ bool success) {
+ grpc_call *call = bctl->call;
+ if (call->receiving_stream == NULL) {
+ *call->receiving_buffer = NULL;
+ call->receiving_message = 0;
+ if (gpr_unref(&bctl->steps_to_complete)) {
+ post_batch_completion(exec_ctx, bctl);
+ }
+ } else if (call->receiving_stream->length >
+ grpc_channel_get_max_message_length(call->channel)) {
+ cancel_with_status(exec_ctx, call, GRPC_STATUS_INTERNAL,
+ "Max message size exceeded");
+ grpc_byte_stream_destroy(exec_ctx, call->receiving_stream);
+ call->receiving_stream = NULL;
+ *call->receiving_buffer = NULL;
+ call->receiving_message = 0;
+ if (gpr_unref(&bctl->steps_to_complete)) {
+ post_batch_completion(exec_ctx, bctl);
+ }
+ } else {
+ call->test_only_last_message_flags = call->receiving_stream->flags;
+ if ((call->receiving_stream->flags & GRPC_WRITE_INTERNAL_COMPRESS) &&
+ (call->compression_algorithm > GRPC_COMPRESS_NONE)) {
+ *call->receiving_buffer = grpc_raw_compressed_byte_buffer_create(
+ NULL, 0, call->compression_algorithm);
+ } else {
+ *call->receiving_buffer = grpc_raw_byte_buffer_create(NULL, 0);
+ }
+ grpc_closure_init(&call->receiving_slice_ready, receiving_slice_ready,
+ bctl);
+ continue_receiving_slices(exec_ctx, bctl);
+ /* early out */
+ return;
+ }
+}
+
+static void receiving_stream_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
+ bool success) {
+ batch_control *bctl = bctlp;
+ grpc_call *call = bctl->call;
+
+ gpr_mu_lock(&bctl->call->mu);
+ if (bctl->call->has_initial_md_been_received) {
+ gpr_mu_unlock(&bctl->call->mu);
+ process_data_after_md(exec_ctx, bctlp, success);
+ } else {
+ call->saved_receiving_stream_ready_ctx.bctlp = bctlp;
+ call->saved_receiving_stream_ready_ctx.success = success;
+ gpr_mu_unlock(&bctl->call->mu);
+ }
+}
+
+static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
+ void *bctlp, bool success) {
+ batch_control *bctl = bctlp;
+ grpc_call *call = bctl->call;
+
+ gpr_mu_lock(&call->mu);
+
+ grpc_metadata_batch *md =
+ &call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */];
+ grpc_metadata_batch_filter(md, recv_initial_filter, call);
+ call->has_initial_md_been_received = true;
+
+ if (gpr_time_cmp(md->deadline, gpr_inf_future(md->deadline.clock_type)) !=
+ 0 &&
+ !call->is_client) {
+ GPR_TIMER_BEGIN("set_deadline_alarm", 0);
+ set_deadline_alarm(exec_ctx, call, md->deadline);
+ GPR_TIMER_END("set_deadline_alarm", 0);
+ }
+
+ if (call->saved_receiving_stream_ready_ctx.bctlp != NULL) {
+ grpc_closure *saved_rsr_closure = grpc_closure_create(
+ receiving_stream_ready, call->saved_receiving_stream_ready_ctx.bctlp);
+ grpc_exec_ctx_enqueue(exec_ctx, saved_rsr_closure,
+ call->saved_receiving_stream_ready_ctx.success, NULL);
+ call->saved_receiving_stream_ready_ctx.bctlp = NULL;
+ }
+
+ gpr_mu_unlock(&call->mu);
+
+ if (gpr_unref(&bctl->steps_to_complete)) {
+ post_batch_completion(exec_ctx, bctl);
+ }
+}
+
+static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp, bool success) {
+ batch_control *bctl = bctlp;
+ grpc_call *call = bctl->call;
+ grpc_call *child_call;
+ grpc_call *next_child_call;
+
+ gpr_mu_lock(&call->mu);
+ if (bctl->send_initial_metadata) {
+ grpc_metadata_batch_destroy(
+ &call->metadata_batch[0 /* is_receiving */][0 /* is_trailing */]);
+ }
+ if (bctl->send_message) {
+ call->sending_message = 0;
+ }
+ if (bctl->send_final_op) {
+ grpc_metadata_batch_destroy(
+ &call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */]);
+ }
+ if (bctl->recv_final_op) {
+ grpc_metadata_batch *md =
+ &call->metadata_batch[1 /* is_receiving */][1 /* is_trailing */];
+ grpc_metadata_batch_filter(md, recv_trailing_filter, call);
+
+ if (call->have_alarm) {
+ grpc_timer_cancel(exec_ctx, &call->alarm);
+ }
+ /* propagate cancellation to any interested children */
+ child_call = call->first_child;
+ if (child_call != NULL) {
+ do {
+ next_child_call = child_call->sibling_next;
+ if (child_call->cancellation_is_inherited) {
+ GRPC_CALL_INTERNAL_REF(child_call, "propagate_cancel");
+ grpc_call_cancel(child_call, NULL);
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, child_call, "propagate_cancel");
+ }
+ child_call = next_child_call;
+ } while (child_call != call->first_child);
+ }
+
+ if (call->is_client) {
+ get_final_status(call, set_status_value_directly,
+ call->final_op.client.status);
+ get_final_details(call, call->final_op.client.status_details,
+ call->final_op.client.status_details_capacity);
+ } else {
+ get_final_status(call, set_cancelled_value,
+ call->final_op.server.cancelled);
+ }
+
+ success = 1;
+ }
+ bctl->success = success != 0;
+ gpr_mu_unlock(&call->mu);
+ if (gpr_unref(&bctl->steps_to_complete)) {
+ post_batch_completion(exec_ctx, bctl);
+ }
+}
+
+static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
+ grpc_call *call, const grpc_op *ops,
+ size_t nops, void *notify_tag,
+ int is_notify_tag_closure) {
+ grpc_transport_stream_op stream_op;
+ size_t i;
+ const grpc_op *op;
+ batch_control *bctl;
+ int num_completion_callbacks_needed = 1;
+ grpc_call_error error = GRPC_CALL_OK;
+
+ GPR_TIMER_BEGIN("grpc_call_start_batch", 0);
+
+ GRPC_CALL_LOG_BATCH(GPR_INFO, call, ops, nops, notify_tag);
+
+ memset(&stream_op, 0, sizeof(stream_op));
+
+ /* TODO(ctiller): this feels like it could be made lock-free */
+ gpr_mu_lock(&call->mu);
+ bctl = allocate_batch_control(call);
+ memset(bctl, 0, sizeof(*bctl));
+ bctl->call = call;
+ bctl->notify_tag = notify_tag;
+ bctl->is_notify_tag_closure = (uint8_t)(is_notify_tag_closure != 0);
+
+ if (nops == 0) {
+ GRPC_CALL_INTERNAL_REF(call, "completion");
+ bctl->success = 1;
+ if (!is_notify_tag_closure) {
+ grpc_cq_begin_op(call->cq, notify_tag);
+ }
+ gpr_mu_unlock(&call->mu);
+ post_batch_completion(exec_ctx, bctl);
+ error = GRPC_CALL_OK;
+ goto done;
+ }
+
+ /* rewrite batch ops into a transport op */
+ for (i = 0; i < nops; i++) {
+ op = &ops[i];
+ if (op->reserved != NULL) {
+ error = GRPC_CALL_ERROR;
+ goto done_with_error;
+ }
+ switch (op->op) {
+ case GRPC_OP_SEND_INITIAL_METADATA:
+ /* Flag validation: currently allow no flags */
+ if (op->flags != 0) {
+ error = GRPC_CALL_ERROR_INVALID_FLAGS;
+ goto done_with_error;
+ }
+ if (call->sent_initial_metadata) {
+ error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
+ goto done_with_error;
+ }
+ if (op->data.send_initial_metadata.count > INT_MAX) {
+ error = GRPC_CALL_ERROR_INVALID_METADATA;
+ goto done_with_error;
+ }
+ bctl->send_initial_metadata = 1;
+ call->sent_initial_metadata = 1;
+ if (!prepare_application_metadata(
+ call, (int)op->data.send_initial_metadata.count,
+ op->data.send_initial_metadata.metadata, 0, call->is_client)) {
+ error = GRPC_CALL_ERROR_INVALID_METADATA;
+ goto done_with_error;
+ }
+ /* TODO(ctiller): just make these the same variable? */
+ call->metadata_batch[0][0].deadline = call->send_deadline;
+ stream_op.send_initial_metadata =
+ &call->metadata_batch[0 /* is_receiving */][0 /* is_trailing */];
+ break;
+ case GRPC_OP_SEND_MESSAGE:
+ if (!are_write_flags_valid(op->flags)) {
+ error = GRPC_CALL_ERROR_INVALID_FLAGS;
+ goto done_with_error;
+ }
+ if (op->data.send_message == NULL) {
+ error = GRPC_CALL_ERROR_INVALID_MESSAGE;
+ goto done_with_error;
+ }
+ if (call->sending_message) {
+ error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
+ goto done_with_error;
+ }
+ bctl->send_message = 1;
+ call->sending_message = 1;
+ grpc_slice_buffer_stream_init(
+ &call->sending_stream,
+ &op->data.send_message->data.raw.slice_buffer, op->flags);
+ stream_op.send_message = &call->sending_stream.base;
+ break;
+ case GRPC_OP_SEND_CLOSE_FROM_CLIENT:
+ /* Flag validation: currently allow no flags */
+ if (op->flags != 0) {
+ error = GRPC_CALL_ERROR_INVALID_FLAGS;
+ goto done_with_error;
+ }
+ if (!call->is_client) {
+ error = GRPC_CALL_ERROR_NOT_ON_SERVER;
+ goto done_with_error;
+ }
+ if (call->sent_final_op) {
+ error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
+ goto done_with_error;
+ }
+ bctl->send_final_op = 1;
+ call->sent_final_op = 1;
+ stream_op.send_trailing_metadata =
+ &call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */];
+ break;
+ case GRPC_OP_SEND_STATUS_FROM_SERVER:
+ /* Flag validation: currently allow no flags */
+ if (op->flags != 0) {
+ error = GRPC_CALL_ERROR_INVALID_FLAGS;
+ goto done_with_error;
+ }
+ if (call->is_client) {
+ error = GRPC_CALL_ERROR_NOT_ON_CLIENT;
+ goto done_with_error;
+ }
+ if (call->sent_final_op) {
+ error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
+ goto done_with_error;
+ }
+ if (op->data.send_status_from_server.trailing_metadata_count >
+ INT_MAX) {
+ error = GRPC_CALL_ERROR_INVALID_METADATA;
+ goto done_with_error;
+ }
+ bctl->send_final_op = 1;
+ call->sent_final_op = 1;
+ call->send_extra_metadata_count = 1;
+ call->send_extra_metadata[0].md = grpc_channel_get_reffed_status_elem(
+ call->channel, op->data.send_status_from_server.status);
+ if (op->data.send_status_from_server.status_details != NULL) {
+ call->send_extra_metadata[1].md = grpc_mdelem_from_metadata_strings(
+ GRPC_MDSTR_GRPC_MESSAGE,
+ grpc_mdstr_from_string(
+ op->data.send_status_from_server.status_details));
+ call->send_extra_metadata_count++;
+ set_status_details(
+ call, STATUS_FROM_API_OVERRIDE,
+ GRPC_MDSTR_REF(call->send_extra_metadata[1].md->value));
+ }
+ set_status_code(call, STATUS_FROM_API_OVERRIDE,
+ (uint32_t)op->data.send_status_from_server.status);
+ if (!prepare_application_metadata(
+ call,
+ (int)op->data.send_status_from_server.trailing_metadata_count,
+ op->data.send_status_from_server.trailing_metadata, 1, 1)) {
+ error = GRPC_CALL_ERROR_INVALID_METADATA;
+ goto done_with_error;
+ }
+ stream_op.send_trailing_metadata =
+ &call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */];
+ break;
+ case GRPC_OP_RECV_INITIAL_METADATA:
+ /* Flag validation: currently allow no flags */
+ if (op->flags != 0) {
+ error = GRPC_CALL_ERROR_INVALID_FLAGS;
+ goto done_with_error;
+ }
+ if (call->received_initial_metadata) {
+ error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
+ goto done_with_error;
+ }
+ call->received_initial_metadata = 1;
+ call->buffered_metadata[0] = op->data.recv_initial_metadata;
+ grpc_closure_init(&call->receiving_initial_metadata_ready,
+ receiving_initial_metadata_ready, bctl);
+ bctl->recv_initial_metadata = 1;
+ stream_op.recv_initial_metadata =
+ &call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */];
+ stream_op.recv_initial_metadata_ready =
+ &call->receiving_initial_metadata_ready;
+ num_completion_callbacks_needed++;
+ break;
+ case GRPC_OP_RECV_MESSAGE:
+ /* Flag validation: currently allow no flags */
+ if (op->flags != 0) {
+ error = GRPC_CALL_ERROR_INVALID_FLAGS;
+ goto done_with_error;
+ }
+ if (call->receiving_message) {
+ error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
+ goto done_with_error;
+ }
+ call->receiving_message = 1;
+ bctl->recv_message = 1;
+ call->receiving_buffer = op->data.recv_message;
+ stream_op.recv_message = &call->receiving_stream;
+ grpc_closure_init(&call->receiving_stream_ready, receiving_stream_ready,
+ bctl);
+ stream_op.recv_message_ready = &call->receiving_stream_ready;
+ num_completion_callbacks_needed++;
+ break;
+ case GRPC_OP_RECV_STATUS_ON_CLIENT:
+ /* Flag validation: currently allow no flags */
+ if (op->flags != 0) {
+ error = GRPC_CALL_ERROR_INVALID_FLAGS;
+ goto done_with_error;
+ }
+ if (!call->is_client) {
+ error = GRPC_CALL_ERROR_NOT_ON_SERVER;
+ goto done_with_error;
+ }
+ if (call->received_final_op) {
+ error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
+ goto done_with_error;
+ }
+ call->received_final_op = 1;
+ call->buffered_metadata[1] =
+ op->data.recv_status_on_client.trailing_metadata;
+ call->final_op.client.status = op->data.recv_status_on_client.status;
+ call->final_op.client.status_details =
+ op->data.recv_status_on_client.status_details;
+ call->final_op.client.status_details_capacity =
+ op->data.recv_status_on_client.status_details_capacity;
+ bctl->recv_final_op = 1;
+ stream_op.recv_trailing_metadata =
+ &call->metadata_batch[1 /* is_receiving */][1 /* is_trailing */];
+ break;
+ case GRPC_OP_RECV_CLOSE_ON_SERVER:
+ /* Flag validation: currently allow no flags */
+ if (op->flags != 0) {
+ error = GRPC_CALL_ERROR_INVALID_FLAGS;
+ goto done_with_error;
+ }
+ if (call->is_client) {
+ error = GRPC_CALL_ERROR_NOT_ON_CLIENT;
+ goto done_with_error;
+ }
+ if (call->received_final_op) {
+ error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
+ goto done_with_error;
+ }
+ call->received_final_op = 1;
+ call->final_op.server.cancelled =
+ op->data.recv_close_on_server.cancelled;
+ bctl->recv_final_op = 1;
+ stream_op.recv_trailing_metadata =
+ &call->metadata_batch[1 /* is_receiving */][1 /* is_trailing */];
+ break;
+ }
+ }
+
+ GRPC_CALL_INTERNAL_REF(call, "completion");
+ if (!is_notify_tag_closure) {
+ grpc_cq_begin_op(call->cq, notify_tag);
+ }
+ gpr_ref_init(&bctl->steps_to_complete, num_completion_callbacks_needed);
+
+ stream_op.context = call->context;
+ grpc_closure_init(&bctl->finish_batch, finish_batch, bctl);
+ stream_op.on_complete = &bctl->finish_batch;
+ gpr_mu_unlock(&call->mu);
+
+ execute_op(exec_ctx, call, &stream_op);
+
+done:
+ GPR_TIMER_END("grpc_call_start_batch", 0);
+ return error;
+
+done_with_error:
+ /* reverse any mutations that occured */
+ if (bctl->send_initial_metadata) {
+ call->sent_initial_metadata = 0;
+ grpc_metadata_batch_clear(&call->metadata_batch[0][0]);
+ }
+ if (bctl->send_message) {
+ call->sending_message = 0;
+ grpc_byte_stream_destroy(exec_ctx, &call->sending_stream.base);
+ }
+ if (bctl->send_final_op) {
+ call->sent_final_op = 0;
+ grpc_metadata_batch_clear(&call->metadata_batch[0][1]);
+ }
+ if (bctl->recv_initial_metadata) {
+ call->received_initial_metadata = 0;
+ }
+ if (bctl->recv_message) {
+ call->receiving_message = 0;
+ }
+ if (bctl->recv_final_op) {
+ call->received_final_op = 0;
+ }
+ gpr_mu_unlock(&call->mu);
+ goto done;
+}
+
+grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
+ size_t nops, void *tag, void *reserved) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_call_error err;
+
+ GRPC_API_TRACE(
+ "grpc_call_start_batch(call=%p, ops=%p, nops=%lu, tag=%p, reserved=%p)",
+ 5, (call, ops, (unsigned long)nops, tag, reserved));
+
+ if (reserved != NULL) {
+ err = GRPC_CALL_ERROR;
+ } else {
+ err = call_start_batch(&exec_ctx, call, ops, nops, tag, 0);
+ }
+
+ grpc_exec_ctx_finish(&exec_ctx);
+ return err;
+}
+
+grpc_call_error grpc_call_start_batch_and_execute(grpc_exec_ctx *exec_ctx,
+ grpc_call *call,
+ const grpc_op *ops,
+ size_t nops,
+ grpc_closure *closure) {
+ return call_start_batch(exec_ctx, call, ops, nops, closure, 1);
+}
+
+void grpc_call_context_set(grpc_call *call, grpc_context_index elem,
+ void *value, void (*destroy)(void *value)) {
+ if (call->context[elem].destroy) {
+ call->context[elem].destroy(call->context[elem].value);
+ }
+ call->context[elem].value = value;
+ call->context[elem].destroy = destroy;
+}
+
+void *grpc_call_context_get(grpc_call *call, grpc_context_index elem) {
+ return call->context[elem].value;
+}
+
+uint8_t grpc_call_is_client(grpc_call *call) { return call->is_client; }
+
+grpc_compression_algorithm grpc_call_compression_for_level(
+ grpc_call *call, grpc_compression_level level) {
+ gpr_mu_lock(&call->mu);
+ const uint32_t accepted_encodings = call->encodings_accepted_by_peer;
+ gpr_mu_unlock(&call->mu);
+ return grpc_compression_algorithm_for_level(level, accepted_encodings);
+}
diff --git a/src/core/lib/surface/call.h b/src/core/lib/surface/call.h
new file mode 100644
index 0000000000..e2e75865be
--- /dev/null
+++ b/src/core/lib/surface/call.h
@@ -0,0 +1,116 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_SURFACE_CALL_H
+#define GRPC_CORE_LIB_SURFACE_CALL_H
+
+#include "src/core/lib/channel/channel_stack.h"
+#include "src/core/lib/channel/context.h"
+#include "src/core/lib/surface/api_trace.h"
+#include "src/core/lib/surface/surface_trace.h"
+
+#include <grpc/grpc.h>
+#include <grpc/impl/codegen/compression_types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef void (*grpc_ioreq_completion_func)(grpc_exec_ctx *exec_ctx,
+ grpc_call *call, int success,
+ void *user_data);
+
+grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
+ uint32_t propagation_mask,
+ grpc_completion_queue *cq,
+ const void *server_transport_data,
+ grpc_mdelem **add_initial_metadata,
+ size_t add_initial_metadata_count,
+ gpr_timespec send_deadline);
+
+void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ grpc_completion_queue *cq);
+
+#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+void grpc_call_internal_ref(grpc_call *call, const char *reason);
+void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ const char *reason);
+#define GRPC_CALL_INTERNAL_REF(call, reason) \
+ grpc_call_internal_ref(call, reason)
+#define GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, reason) \
+ grpc_call_internal_unref(exec_ctx, call, reason)
+#else
+void grpc_call_internal_ref(grpc_call *call);
+void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *call);
+#define GRPC_CALL_INTERNAL_REF(call, reason) grpc_call_internal_ref(call)
+#define GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, reason) \
+ grpc_call_internal_unref(exec_ctx, call)
+#endif
+
+grpc_call_stack *grpc_call_get_call_stack(grpc_call *call);
+
+grpc_call_error grpc_call_start_batch_and_execute(grpc_exec_ctx *exec_ctx,
+ grpc_call *call,
+ const grpc_op *ops,
+ size_t nops,
+ grpc_closure *closure);
+
+/* Given the top call_element, get the call object. */
+grpc_call *grpc_call_from_top_element(grpc_call_element *surface_element);
+
+void grpc_call_log_batch(char *file, int line, gpr_log_severity severity,
+ grpc_call *call, const grpc_op *ops, size_t nops,
+ void *tag);
+
+/* Set a context pointer.
+ No thread safety guarantees are made wrt this value. */
+void grpc_call_context_set(grpc_call *call, grpc_context_index elem,
+ void *value, void (*destroy)(void *value));
+/* Get a context pointer. */
+void *grpc_call_context_get(grpc_call *call, grpc_context_index elem);
+
+#define GRPC_CALL_LOG_BATCH(sev, call, ops, nops, tag) \
+ if (grpc_api_trace) grpc_call_log_batch(sev, call, ops, nops, tag)
+
+uint8_t grpc_call_is_client(grpc_call *call);
+
+/* Return an appropriate compression algorithm for the requested compression \a
+ * level in the context of \a call. */
+grpc_compression_algorithm grpc_call_compression_for_level(
+ grpc_call *call, grpc_compression_level level);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRPC_CORE_LIB_SURFACE_CALL_H */
diff --git a/src/core/lib/surface/call_details.c b/src/core/lib/surface/call_details.c
new file mode 100644
index 0000000000..08f606d84a
--- /dev/null
+++ b/src/core/lib/surface/call_details.c
@@ -0,0 +1,50 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
+
+#include <string.h>
+
+#include "src/core/lib/surface/api_trace.h"
+
+void grpc_call_details_init(grpc_call_details* cd) {
+ GRPC_API_TRACE("grpc_call_details_init(cd=%p)", 1, (cd));
+ memset(cd, 0, sizeof(*cd));
+}
+
+void grpc_call_details_destroy(grpc_call_details* cd) {
+ GRPC_API_TRACE("grpc_call_details_destroy(cd=%p)", 1, (cd));
+ gpr_free(cd->method);
+ gpr_free(cd->host);
+}
diff --git a/src/core/lib/surface/call_log_batch.c b/src/core/lib/surface/call_log_batch.c
new file mode 100644
index 0000000000..bc5a2ffb65
--- /dev/null
+++ b/src/core/lib/surface/call_log_batch.c
@@ -0,0 +1,118 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/surface/call.h"
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/string_util.h>
+#include "src/core/lib/support/string.h"
+
+static void add_metadata(gpr_strvec *b, const grpc_metadata *md, size_t count) {
+ size_t i;
+ for (i = 0; i < count; i++) {
+ gpr_strvec_add(b, gpr_strdup("\nkey="));
+ gpr_strvec_add(b, gpr_strdup(md[i].key));
+
+ gpr_strvec_add(b, gpr_strdup(" value="));
+ gpr_strvec_add(b, gpr_dump(md[i].value, md[i].value_length,
+ GPR_DUMP_HEX | GPR_DUMP_ASCII));
+ }
+}
+
+char *grpc_op_string(const grpc_op *op) {
+ char *tmp;
+ char *out;
+
+ gpr_strvec b;
+ gpr_strvec_init(&b);
+
+ switch (op->op) {
+ case GRPC_OP_SEND_INITIAL_METADATA:
+ gpr_strvec_add(&b, gpr_strdup("SEND_INITIAL_METADATA"));
+ add_metadata(&b, op->data.send_initial_metadata.metadata,
+ op->data.send_initial_metadata.count);
+ break;
+ case GRPC_OP_SEND_MESSAGE:
+ gpr_asprintf(&tmp, "SEND_MESSAGE ptr=%p", op->data.send_message);
+ gpr_strvec_add(&b, tmp);
+ break;
+ case GRPC_OP_SEND_CLOSE_FROM_CLIENT:
+ gpr_strvec_add(&b, gpr_strdup("SEND_CLOSE_FROM_CLIENT"));
+ break;
+ case GRPC_OP_SEND_STATUS_FROM_SERVER:
+ gpr_asprintf(&tmp, "SEND_STATUS_FROM_SERVER status=%d details=%s",
+ op->data.send_status_from_server.status,
+ op->data.send_status_from_server.status_details);
+ gpr_strvec_add(&b, tmp);
+ add_metadata(&b, op->data.send_status_from_server.trailing_metadata,
+ op->data.send_status_from_server.trailing_metadata_count);
+ break;
+ case GRPC_OP_RECV_INITIAL_METADATA:
+ gpr_asprintf(&tmp, "RECV_INITIAL_METADATA ptr=%p",
+ op->data.recv_initial_metadata);
+ gpr_strvec_add(&b, tmp);
+ break;
+ case GRPC_OP_RECV_MESSAGE:
+ gpr_asprintf(&tmp, "RECV_MESSAGE ptr=%p", op->data.recv_message);
+ gpr_strvec_add(&b, tmp);
+ break;
+ case GRPC_OP_RECV_STATUS_ON_CLIENT:
+ gpr_asprintf(&tmp,
+ "RECV_STATUS_ON_CLIENT metadata=%p status=%p details=%p",
+ op->data.recv_status_on_client.trailing_metadata,
+ op->data.recv_status_on_client.status,
+ op->data.recv_status_on_client.status_details);
+ gpr_strvec_add(&b, tmp);
+ break;
+ case GRPC_OP_RECV_CLOSE_ON_SERVER:
+ gpr_asprintf(&tmp, "RECV_CLOSE_ON_SERVER cancelled=%p",
+ op->data.recv_close_on_server.cancelled);
+ gpr_strvec_add(&b, tmp);
+ }
+ out = gpr_strvec_flatten(&b, NULL);
+ gpr_strvec_destroy(&b);
+
+ return out;
+}
+
+void grpc_call_log_batch(char *file, int line, gpr_log_severity severity,
+ grpc_call *call, const grpc_op *ops, size_t nops,
+ void *tag) {
+ char *tmp;
+ size_t i;
+ for (i = 0; i < nops; i++) {
+ tmp = grpc_op_string(&ops[i]);
+ gpr_log(file, line, severity, "ops[%d]: %s", i, tmp);
+ gpr_free(tmp);
+ }
+}
diff --git a/src/core/lib/surface/call_test_only.h b/src/core/lib/surface/call_test_only.h
new file mode 100644
index 0000000000..400214189e
--- /dev/null
+++ b/src/core/lib/surface/call_test_only.h
@@ -0,0 +1,64 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_SURFACE_CALL_TEST_ONLY_H
+#define GRPC_CORE_LIB_SURFACE_CALL_TEST_ONLY_H
+
+#include <grpc/grpc.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** Return the compression algorithm from \a call.
+ *
+ * \warning This function should \b only be used in test code. */
+grpc_compression_algorithm grpc_call_test_only_get_compression_algorithm(
+ grpc_call *call);
+
+/** Return the message flags from \a call.
+ *
+ * \warning This function should \b only be used in test code. */
+uint32_t grpc_call_test_only_get_message_flags(grpc_call *call);
+
+/** Returns a bitset for the encodings (compression algorithms) supported by \a
+ * call's peer.
+ *
+ * To be indexed by grpc_compression_algorithm enum values. */
+uint32_t grpc_call_test_only_get_encodings_accepted_by_peer(grpc_call *call);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRPC_CORE_LIB_SURFACE_CALL_TEST_ONLY_H */
diff --git a/src/core/lib/surface/channel.c b/src/core/lib/surface/channel.c
new file mode 100644
index 0000000000..d815daa70c
--- /dev/null
+++ b/src/core/lib/surface/channel.c
@@ -0,0 +1,324 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/surface/channel.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+
+#include "src/core/lib/client_config/resolver_registry.h"
+#include "src/core/lib/iomgr/iomgr.h"
+#include "src/core/lib/support/string.h"
+#include "src/core/lib/surface/api_trace.h"
+#include "src/core/lib/surface/call.h"
+#include "src/core/lib/surface/channel_init.h"
+#include "src/core/lib/surface/init.h"
+#include "src/core/lib/transport/static_metadata.h"
+
+/** Cache grpc-status: X mdelems for X = 0..NUM_CACHED_STATUS_ELEMS.
+ * Avoids needing to take a metadata context lock for sending status
+ * if the status code is <= NUM_CACHED_STATUS_ELEMS.
+ * Sized to allow the most commonly used codes to fit in
+ * (OK, Cancelled, Unknown). */
+#define NUM_CACHED_STATUS_ELEMS 3
+
+typedef struct registered_call {
+ grpc_mdelem *path;
+ grpc_mdelem *authority;
+ struct registered_call *next;
+} registered_call;
+
+struct grpc_channel {
+ int is_client;
+ uint32_t max_message_length;
+ grpc_mdelem *default_authority;
+
+ gpr_mu registered_call_mu;
+ registered_call *registered_calls;
+ char *target;
+};
+
+#define CHANNEL_STACK_FROM_CHANNEL(c) ((grpc_channel_stack *)((c) + 1))
+#define CHANNEL_FROM_CHANNEL_STACK(channel_stack) \
+ (((grpc_channel *)(channel_stack)) - 1)
+#define CHANNEL_FROM_TOP_ELEM(top_elem) \
+ CHANNEL_FROM_CHANNEL_STACK(grpc_channel_stack_from_top_element(top_elem))
+
+/* the protobuf library will (by default) start warning at 100megs */
+#define DEFAULT_MAX_MESSAGE_LENGTH (100 * 1024 * 1024)
+
+static void destroy_channel(grpc_exec_ctx *exec_ctx, void *arg, bool success);
+
+grpc_channel *grpc_channel_create(grpc_exec_ctx *exec_ctx, const char *target,
+ const grpc_channel_args *args,
+ grpc_channel_stack_type channel_stack_type,
+ grpc_transport *optional_transport) {
+ bool is_client = grpc_channel_stack_type_is_client(channel_stack_type);
+
+ grpc_channel *channel = grpc_channel_init_create_stack(
+ exec_ctx, channel_stack_type, sizeof(grpc_channel), args, 1,
+ destroy_channel, NULL, optional_transport);
+
+ memset(channel, 0, sizeof(*channel));
+ channel->target = gpr_strdup(target);
+ channel->is_client = is_client;
+ gpr_mu_init(&channel->registered_call_mu);
+ channel->registered_calls = NULL;
+
+ channel->max_message_length = DEFAULT_MAX_MESSAGE_LENGTH;
+ if (args) {
+ for (size_t i = 0; i < args->num_args; i++) {
+ if (0 == strcmp(args->args[i].key, GRPC_ARG_MAX_MESSAGE_LENGTH)) {
+ if (args->args[i].type != GRPC_ARG_INTEGER) {
+ gpr_log(GPR_ERROR, "%s ignored: it must be an integer",
+ GRPC_ARG_MAX_MESSAGE_LENGTH);
+ } else if (args->args[i].value.integer < 0) {
+ gpr_log(GPR_ERROR, "%s ignored: it must be >= 0",
+ GRPC_ARG_MAX_MESSAGE_LENGTH);
+ } else {
+ channel->max_message_length = (uint32_t)args->args[i].value.integer;
+ }
+ } else if (0 == strcmp(args->args[i].key, GRPC_ARG_DEFAULT_AUTHORITY)) {
+ if (args->args[i].type != GRPC_ARG_STRING) {
+ gpr_log(GPR_ERROR, "%s ignored: it must be a string",
+ GRPC_ARG_DEFAULT_AUTHORITY);
+ } else {
+ if (channel->default_authority) {
+ /* setting this takes precedence over anything else */
+ GRPC_MDELEM_UNREF(channel->default_authority);
+ }
+ channel->default_authority = grpc_mdelem_from_strings(
+ ":authority", args->args[i].value.string);
+ }
+ } else if (0 ==
+ strcmp(args->args[i].key, GRPC_SSL_TARGET_NAME_OVERRIDE_ARG)) {
+ if (args->args[i].type != GRPC_ARG_STRING) {
+ gpr_log(GPR_ERROR, "%s ignored: it must be a string",
+ GRPC_SSL_TARGET_NAME_OVERRIDE_ARG);
+ } else {
+ if (channel->default_authority) {
+ /* other ways of setting this (notably ssl) take precedence */
+ gpr_log(GPR_ERROR,
+ "%s ignored: default host already set some other way",
+ GRPC_SSL_TARGET_NAME_OVERRIDE_ARG);
+ } else {
+ channel->default_authority = grpc_mdelem_from_strings(
+ ":authority", args->args[i].value.string);
+ }
+ }
+ }
+ }
+ }
+
+ if (channel->is_client && channel->default_authority == NULL &&
+ target != NULL) {
+ char *default_authority = grpc_get_default_authority(target);
+ if (default_authority) {
+ channel->default_authority =
+ grpc_mdelem_from_strings(":authority", default_authority);
+ }
+ gpr_free(default_authority);
+ }
+
+ return channel;
+}
+
+char *grpc_channel_get_target(grpc_channel *channel) {
+ GRPC_API_TRACE("grpc_channel_get_target(channel=%p)", 1, (channel));
+ return gpr_strdup(channel->target);
+}
+
+static grpc_call *grpc_channel_create_call_internal(
+ grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask,
+ grpc_completion_queue *cq, grpc_mdelem *path_mdelem,
+ grpc_mdelem *authority_mdelem, gpr_timespec deadline) {
+ grpc_mdelem *send_metadata[2];
+ size_t num_metadata = 0;
+
+ GPR_ASSERT(channel->is_client);
+
+ send_metadata[num_metadata++] = path_mdelem;
+ if (authority_mdelem != NULL) {
+ send_metadata[num_metadata++] = authority_mdelem;
+ } else if (channel->default_authority != NULL) {
+ send_metadata[num_metadata++] = GRPC_MDELEM_REF(channel->default_authority);
+ }
+
+ return grpc_call_create(channel, parent_call, propagation_mask, cq, NULL,
+ send_metadata, num_metadata, deadline);
+}
+
+grpc_call *grpc_channel_create_call(grpc_channel *channel,
+ grpc_call *parent_call,
+ uint32_t propagation_mask,
+ grpc_completion_queue *cq,
+ const char *method, const char *host,
+ gpr_timespec deadline, void *reserved) {
+ GRPC_API_TRACE(
+ "grpc_channel_create_call("
+ "channel=%p, parent_call=%p, propagation_mask=%x, cq=%p, method=%s, "
+ "host=%s, "
+ "deadline=gpr_timespec { tv_sec: %lld, tv_nsec: %d, clock_type: %d }, "
+ "reserved=%p)",
+ 10, (channel, parent_call, (unsigned)propagation_mask, cq, method, host,
+ (long long)deadline.tv_sec, (int)deadline.tv_nsec,
+ (int)deadline.clock_type, reserved));
+ GPR_ASSERT(!reserved);
+ return grpc_channel_create_call_internal(
+ channel, parent_call, propagation_mask, cq,
+ grpc_mdelem_from_metadata_strings(GRPC_MDSTR_PATH,
+ grpc_mdstr_from_string(method)),
+ host ? grpc_mdelem_from_metadata_strings(GRPC_MDSTR_AUTHORITY,
+ grpc_mdstr_from_string(host))
+ : NULL,
+ deadline);
+}
+
+void *grpc_channel_register_call(grpc_channel *channel, const char *method,
+ const char *host, void *reserved) {
+ registered_call *rc = gpr_malloc(sizeof(registered_call));
+ GRPC_API_TRACE(
+ "grpc_channel_register_call(channel=%p, method=%s, host=%s, reserved=%p)",
+ 4, (channel, method, host, reserved));
+ GPR_ASSERT(!reserved);
+ rc->path = grpc_mdelem_from_metadata_strings(GRPC_MDSTR_PATH,
+ grpc_mdstr_from_string(method));
+ rc->authority = host ? grpc_mdelem_from_metadata_strings(
+ GRPC_MDSTR_AUTHORITY, grpc_mdstr_from_string(host))
+ : NULL;
+ gpr_mu_lock(&channel->registered_call_mu);
+ rc->next = channel->registered_calls;
+ channel->registered_calls = rc;
+ gpr_mu_unlock(&channel->registered_call_mu);
+ return rc;
+}
+
+grpc_call *grpc_channel_create_registered_call(
+ grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask,
+ grpc_completion_queue *completion_queue, void *registered_call_handle,
+ gpr_timespec deadline, void *reserved) {
+ registered_call *rc = registered_call_handle;
+ GRPC_API_TRACE(
+ "grpc_channel_create_registered_call("
+ "channel=%p, parent_call=%p, propagation_mask=%x, completion_queue=%p, "
+ "registered_call_handle=%p, "
+ "deadline=gpr_timespec { tv_sec: %lld, tv_nsec: %d, clock_type: %d }, "
+ "reserved=%p)",
+ 9, (channel, parent_call, (unsigned)propagation_mask, completion_queue,
+ registered_call_handle, (long long)deadline.tv_sec,
+ (int)deadline.tv_nsec, (int)deadline.clock_type, reserved));
+ GPR_ASSERT(!reserved);
+ return grpc_channel_create_call_internal(
+ channel, parent_call, propagation_mask, completion_queue,
+ GRPC_MDELEM_REF(rc->path),
+ rc->authority ? GRPC_MDELEM_REF(rc->authority) : NULL, deadline);
+}
+
+#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#define REF_REASON reason
+#define REF_ARG , const char *reason
+#else
+#define REF_REASON ""
+#define REF_ARG
+#endif
+void grpc_channel_internal_ref(grpc_channel *c REF_ARG) {
+ GRPC_CHANNEL_STACK_REF(CHANNEL_STACK_FROM_CHANNEL(c), REF_REASON);
+}
+
+void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx,
+ grpc_channel *c REF_ARG) {
+ GRPC_CHANNEL_STACK_UNREF(exec_ctx, CHANNEL_STACK_FROM_CHANNEL(c), REF_REASON);
+}
+
+static void destroy_channel(grpc_exec_ctx *exec_ctx, void *arg,
+ bool iomgr_success) {
+ grpc_channel *channel = arg;
+ grpc_channel_stack_destroy(exec_ctx, CHANNEL_STACK_FROM_CHANNEL(channel));
+ while (channel->registered_calls) {
+ registered_call *rc = channel->registered_calls;
+ channel->registered_calls = rc->next;
+ GRPC_MDELEM_UNREF(rc->path);
+ if (rc->authority) {
+ GRPC_MDELEM_UNREF(rc->authority);
+ }
+ gpr_free(rc);
+ }
+ if (channel->default_authority != NULL) {
+ GRPC_MDELEM_UNREF(channel->default_authority);
+ }
+ gpr_mu_destroy(&channel->registered_call_mu);
+ gpr_free(channel->target);
+ gpr_free(channel);
+}
+
+void grpc_channel_destroy(grpc_channel *channel) {
+ grpc_transport_op op;
+ grpc_channel_element *elem;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ GRPC_API_TRACE("grpc_channel_destroy(channel=%p)", 1, (channel));
+ memset(&op, 0, sizeof(op));
+ op.disconnect = 1;
+ elem = grpc_channel_stack_element(CHANNEL_STACK_FROM_CHANNEL(channel), 0);
+ elem->filter->start_transport_op(&exec_ctx, elem, &op);
+
+ GRPC_CHANNEL_INTERNAL_UNREF(&exec_ctx, channel, "channel");
+
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+grpc_channel_stack *grpc_channel_get_channel_stack(grpc_channel *channel) {
+ return CHANNEL_STACK_FROM_CHANNEL(channel);
+}
+
+grpc_mdelem *grpc_channel_get_reffed_status_elem(grpc_channel *channel, int i) {
+ char tmp[GPR_LTOA_MIN_BUFSIZE];
+ switch (i) {
+ case 0:
+ return GRPC_MDELEM_GRPC_STATUS_0;
+ case 1:
+ return GRPC_MDELEM_GRPC_STATUS_1;
+ case 2:
+ return GRPC_MDELEM_GRPC_STATUS_2;
+ }
+ gpr_ltoa(i, tmp);
+ return grpc_mdelem_from_metadata_strings(GRPC_MDSTR_GRPC_STATUS,
+ grpc_mdstr_from_string(tmp));
+}
+
+uint32_t grpc_channel_get_max_message_length(grpc_channel *channel) {
+ return channel->max_message_length;
+}
diff --git a/src/core/lib/surface/channel.h b/src/core/lib/surface/channel.h
new file mode 100644
index 0000000000..09de0fccc9
--- /dev/null
+++ b/src/core/lib/surface/channel.h
@@ -0,0 +1,75 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_SURFACE_CHANNEL_H
+#define GRPC_CORE_LIB_SURFACE_CHANNEL_H
+
+#include "src/core/lib/channel/channel_stack.h"
+#include "src/core/lib/client_config/subchannel_factory.h"
+#include "src/core/lib/surface/channel_stack_type.h"
+
+grpc_channel *grpc_channel_create(grpc_exec_ctx *exec_ctx, const char *target,
+ const grpc_channel_args *args,
+ grpc_channel_stack_type channel_stack_type,
+ grpc_transport *optional_transport);
+
+/** Get a (borrowed) pointer to this channels underlying channel stack */
+grpc_channel_stack *grpc_channel_get_channel_stack(grpc_channel *channel);
+
+/** Get a grpc_mdelem of grpc-status: X where X is the numeric value of
+ status_code.
+
+ The returned elem is owned by the caller. */
+grpc_mdelem *grpc_channel_get_reffed_status_elem(grpc_channel *channel,
+ int status_code);
+uint32_t grpc_channel_get_max_message_length(grpc_channel *channel);
+
+#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+void grpc_channel_internal_ref(grpc_channel *channel, const char *reason);
+void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx, grpc_channel *channel,
+ const char *reason);
+#define GRPC_CHANNEL_INTERNAL_REF(channel, reason) \
+ grpc_channel_internal_ref(channel, reason)
+#define GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, reason) \
+ grpc_channel_internal_unref(exec_ctx, channel, reason)
+#else
+void grpc_channel_internal_ref(grpc_channel *channel);
+void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx,
+ grpc_channel *channel);
+#define GRPC_CHANNEL_INTERNAL_REF(channel, reason) \
+ grpc_channel_internal_ref(channel)
+#define GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, reason) \
+ grpc_channel_internal_unref(exec_ctx, channel)
+#endif
+
+#endif /* GRPC_CORE_LIB_SURFACE_CHANNEL_H */
diff --git a/src/core/lib/surface/channel_connectivity.c b/src/core/lib/surface/channel_connectivity.c
new file mode 100644
index 0000000000..2f5d763e70
--- /dev/null
+++ b/src/core/lib/surface/channel_connectivity.c
@@ -0,0 +1,207 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/surface/channel.h"
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+
+#include "src/core/lib/channel/client_channel.h"
+#include "src/core/lib/iomgr/timer.h"
+#include "src/core/lib/surface/api_trace.h"
+#include "src/core/lib/surface/completion_queue.h"
+
+grpc_connectivity_state grpc_channel_check_connectivity_state(
+ grpc_channel *channel, int try_to_connect) {
+ /* forward through to the underlying client channel */
+ grpc_channel_element *client_channel_elem =
+ grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_connectivity_state state;
+ GRPC_API_TRACE(
+ "grpc_channel_check_connectivity_state(channel=%p, try_to_connect=%d)", 2,
+ (channel, try_to_connect));
+ if (client_channel_elem->filter == &grpc_client_channel_filter) {
+ state = grpc_client_channel_check_connectivity_state(
+ &exec_ctx, client_channel_elem, try_to_connect);
+ grpc_exec_ctx_finish(&exec_ctx);
+ return state;
+ }
+ gpr_log(GPR_ERROR,
+ "grpc_channel_check_connectivity_state called on something that is "
+ "not a (u)client channel, but '%s'",
+ client_channel_elem->filter->name);
+ grpc_exec_ctx_finish(&exec_ctx);
+ return GRPC_CHANNEL_FATAL_FAILURE;
+}
+
+typedef enum {
+ WAITING,
+ CALLING_BACK,
+ CALLING_BACK_AND_FINISHED,
+ CALLED_BACK
+} callback_phase;
+
+typedef struct {
+ gpr_mu mu;
+ callback_phase phase;
+ int success;
+ grpc_closure on_complete;
+ grpc_timer alarm;
+ grpc_connectivity_state state;
+ grpc_completion_queue *cq;
+ grpc_cq_completion completion_storage;
+ grpc_channel *channel;
+ void *tag;
+} state_watcher;
+
+static void delete_state_watcher(grpc_exec_ctx *exec_ctx, state_watcher *w) {
+ grpc_channel_element *client_channel_elem = grpc_channel_stack_last_element(
+ grpc_channel_get_channel_stack(w->channel));
+ if (client_channel_elem->filter == &grpc_client_channel_filter) {
+ GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, w->channel,
+ "watch_channel_connectivity");
+ } else {
+ abort();
+ }
+ gpr_mu_destroy(&w->mu);
+ gpr_free(w);
+}
+
+static void finished_completion(grpc_exec_ctx *exec_ctx, void *pw,
+ grpc_cq_completion *ignored) {
+ int delete = 0;
+ state_watcher *w = pw;
+ gpr_mu_lock(&w->mu);
+ switch (w->phase) {
+ case WAITING:
+ case CALLED_BACK:
+ GPR_UNREACHABLE_CODE(return );
+ case CALLING_BACK:
+ w->phase = CALLED_BACK;
+ break;
+ case CALLING_BACK_AND_FINISHED:
+ delete = 1;
+ break;
+ }
+ gpr_mu_unlock(&w->mu);
+
+ if (delete) {
+ delete_state_watcher(exec_ctx, w);
+ }
+}
+
+static void partly_done(grpc_exec_ctx *exec_ctx, state_watcher *w,
+ int due_to_completion) {
+ int delete = 0;
+
+ if (due_to_completion) {
+ grpc_timer_cancel(exec_ctx, &w->alarm);
+ }
+
+ gpr_mu_lock(&w->mu);
+ if (due_to_completion) {
+ w->success = 1;
+ }
+ switch (w->phase) {
+ case WAITING:
+ w->phase = CALLING_BACK;
+ grpc_cq_end_op(exec_ctx, w->cq, w->tag, w->success, finished_completion,
+ w, &w->completion_storage);
+ break;
+ case CALLING_BACK:
+ w->phase = CALLING_BACK_AND_FINISHED;
+ break;
+ case CALLING_BACK_AND_FINISHED:
+ GPR_UNREACHABLE_CODE(return );
+ case CALLED_BACK:
+ delete = 1;
+ break;
+ }
+ gpr_mu_unlock(&w->mu);
+
+ if (delete) {
+ delete_state_watcher(exec_ctx, w);
+ }
+}
+
+static void watch_complete(grpc_exec_ctx *exec_ctx, void *pw, bool success) {
+ partly_done(exec_ctx, pw, 1);
+}
+
+static void timeout_complete(grpc_exec_ctx *exec_ctx, void *pw, bool success) {
+ partly_done(exec_ctx, pw, 0);
+}
+
+void grpc_channel_watch_connectivity_state(
+ grpc_channel *channel, grpc_connectivity_state last_observed_state,
+ gpr_timespec deadline, grpc_completion_queue *cq, void *tag) {
+ grpc_channel_element *client_channel_elem =
+ grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ state_watcher *w = gpr_malloc(sizeof(*w));
+
+ GRPC_API_TRACE(
+ "grpc_channel_watch_connectivity_state("
+ "channel=%p, last_observed_state=%d, "
+ "deadline=gpr_timespec { tv_sec: %lld, tv_nsec: %d, clock_type: %d }, "
+ "cq=%p, tag=%p)",
+ 7, (channel, (int)last_observed_state, (long long)deadline.tv_sec,
+ (int)deadline.tv_nsec, (int)deadline.clock_type, cq, tag));
+
+ grpc_cq_begin_op(cq, tag);
+
+ gpr_mu_init(&w->mu);
+ grpc_closure_init(&w->on_complete, watch_complete, w);
+ w->phase = WAITING;
+ w->state = last_observed_state;
+ w->success = 0;
+ w->cq = cq;
+ w->tag = tag;
+ w->channel = channel;
+
+ grpc_timer_init(&exec_ctx, &w->alarm,
+ gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
+ timeout_complete, w, gpr_now(GPR_CLOCK_MONOTONIC));
+
+ if (client_channel_elem->filter == &grpc_client_channel_filter) {
+ GRPC_CHANNEL_INTERNAL_REF(channel, "watch_channel_connectivity");
+ grpc_client_channel_watch_connectivity_state(&exec_ctx, client_channel_elem,
+ grpc_cq_pollset(cq), &w->state,
+ &w->on_complete);
+ } else {
+ abort();
+ }
+
+ grpc_exec_ctx_finish(&exec_ctx);
+}
diff --git a/src/core/lib/surface/channel_create.c b/src/core/lib/surface/channel_create.c
new file mode 100644
index 0000000000..e8777ce816
--- /dev/null
+++ b/src/core/lib/surface/channel_create.c
@@ -0,0 +1,223 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/grpc.h>
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/slice.h>
+#include <grpc/support/slice_buffer.h>
+
+#include "src/core/lib/census/grpc_filter.h"
+#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/channel/client_channel.h"
+#include "src/core/lib/channel/compress_filter.h"
+#include "src/core/lib/channel/http_client_filter.h"
+#include "src/core/lib/client_config/resolver_registry.h"
+#include "src/core/lib/iomgr/tcp_client.h"
+#include "src/core/lib/surface/api_trace.h"
+#include "src/core/lib/surface/channel.h"
+#include "src/core/lib/transport/chttp2_transport.h"
+
+typedef struct {
+ grpc_connector base;
+ gpr_refcount refs;
+
+ grpc_closure *notify;
+ grpc_connect_in_args args;
+ grpc_connect_out_args *result;
+ grpc_closure initial_string_sent;
+ gpr_slice_buffer initial_string_buffer;
+
+ grpc_endpoint *tcp;
+
+ grpc_closure connected;
+} connector;
+
+static void connector_ref(grpc_connector *con) {
+ connector *c = (connector *)con;
+ gpr_ref(&c->refs);
+}
+
+static void connector_unref(grpc_exec_ctx *exec_ctx, grpc_connector *con) {
+ connector *c = (connector *)con;
+ if (gpr_unref(&c->refs)) {
+ /* c->initial_string_buffer does not need to be destroyed */
+ gpr_free(c);
+ }
+}
+
+static void on_initial_connect_string_sent(grpc_exec_ctx *exec_ctx, void *arg,
+ bool success) {
+ connector_unref(exec_ctx, arg);
+}
+
+static void connected(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
+ connector *c = arg;
+ grpc_closure *notify;
+ grpc_endpoint *tcp = c->tcp;
+ if (tcp != NULL) {
+ if (!GPR_SLICE_IS_EMPTY(c->args.initial_connect_string)) {
+ grpc_closure_init(&c->initial_string_sent, on_initial_connect_string_sent,
+ c);
+ gpr_slice_buffer_init(&c->initial_string_buffer);
+ gpr_slice_buffer_add(&c->initial_string_buffer,
+ c->args.initial_connect_string);
+ connector_ref(arg);
+ grpc_endpoint_write(exec_ctx, tcp, &c->initial_string_buffer,
+ &c->initial_string_sent);
+ }
+ c->result->transport =
+ grpc_create_chttp2_transport(exec_ctx, c->args.channel_args, tcp, 1);
+ grpc_chttp2_transport_start_reading(exec_ctx, c->result->transport, NULL,
+ 0);
+ GPR_ASSERT(c->result->transport);
+ c->result->channel_args = c->args.channel_args;
+ } else {
+ memset(c->result, 0, sizeof(*c->result));
+ }
+ notify = c->notify;
+ c->notify = NULL;
+ notify->cb(exec_ctx, notify->cb_arg, 1);
+}
+
+static void connector_shutdown(grpc_exec_ctx *exec_ctx, grpc_connector *con) {}
+
+static void connector_connect(grpc_exec_ctx *exec_ctx, grpc_connector *con,
+ const grpc_connect_in_args *args,
+ grpc_connect_out_args *result,
+ grpc_closure *notify) {
+ connector *c = (connector *)con;
+ GPR_ASSERT(c->notify == NULL);
+ GPR_ASSERT(notify->cb);
+ c->notify = notify;
+ c->args = *args;
+ c->result = result;
+ c->tcp = NULL;
+ grpc_closure_init(&c->connected, connected, c);
+ grpc_tcp_client_connect(exec_ctx, &c->connected, &c->tcp,
+ args->interested_parties, args->addr, args->addr_len,
+ args->deadline);
+}
+
+static const grpc_connector_vtable connector_vtable = {
+ connector_ref, connector_unref, connector_shutdown, connector_connect};
+
+typedef struct {
+ grpc_subchannel_factory base;
+ gpr_refcount refs;
+ grpc_channel_args *merge_args;
+ grpc_channel *master;
+} subchannel_factory;
+
+static void subchannel_factory_ref(grpc_subchannel_factory *scf) {
+ subchannel_factory *f = (subchannel_factory *)scf;
+ gpr_ref(&f->refs);
+}
+
+static void subchannel_factory_unref(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_factory *scf) {
+ subchannel_factory *f = (subchannel_factory *)scf;
+ if (gpr_unref(&f->refs)) {
+ GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, f->master, "subchannel_factory");
+ grpc_channel_args_destroy(f->merge_args);
+ gpr_free(f);
+ }
+}
+
+static grpc_subchannel *subchannel_factory_create_subchannel(
+ grpc_exec_ctx *exec_ctx, grpc_subchannel_factory *scf,
+ grpc_subchannel_args *args) {
+ subchannel_factory *f = (subchannel_factory *)scf;
+ connector *c = gpr_malloc(sizeof(*c));
+ grpc_channel_args *final_args =
+ grpc_channel_args_merge(args->args, f->merge_args);
+ grpc_subchannel *s;
+ memset(c, 0, sizeof(*c));
+ c->base.vtable = &connector_vtable;
+ gpr_ref_init(&c->refs, 1);
+ args->args = final_args;
+ s = grpc_subchannel_create(exec_ctx, &c->base, args);
+ grpc_connector_unref(exec_ctx, &c->base);
+ grpc_channel_args_destroy(final_args);
+ return s;
+}
+
+static const grpc_subchannel_factory_vtable subchannel_factory_vtable = {
+ subchannel_factory_ref, subchannel_factory_unref,
+ subchannel_factory_create_subchannel};
+
+/* Create a client channel:
+ Asynchronously: - resolve target
+ - connect to it (trying alternatives as presented)
+ - perform handshakes */
+grpc_channel *grpc_insecure_channel_create(const char *target,
+ const grpc_channel_args *args,
+ void *reserved) {
+ grpc_channel *channel = NULL;
+ grpc_resolver *resolver;
+ subchannel_factory *f;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ GRPC_API_TRACE(
+ "grpc_insecure_channel_create(target=%p, args=%p, reserved=%p)", 3,
+ (target, args, reserved));
+ GPR_ASSERT(!reserved);
+
+ channel =
+ grpc_channel_create(&exec_ctx, target, args, GRPC_CLIENT_CHANNEL, NULL);
+
+ f = gpr_malloc(sizeof(*f));
+ f->base.vtable = &subchannel_factory_vtable;
+ gpr_ref_init(&f->refs, 1);
+ f->merge_args = grpc_channel_args_copy(args);
+ f->master = channel;
+ GRPC_CHANNEL_INTERNAL_REF(f->master, "subchannel_factory");
+ resolver = grpc_resolver_create(target, &f->base);
+ if (!resolver) {
+ GRPC_CHANNEL_INTERNAL_UNREF(&exec_ctx, f->master, "subchannel_factory");
+ grpc_subchannel_factory_unref(&exec_ctx, &f->base);
+ grpc_exec_ctx_finish(&exec_ctx);
+ return NULL;
+ }
+
+ grpc_client_channel_set_resolver(
+ &exec_ctx, grpc_channel_get_channel_stack(channel), resolver);
+ GRPC_RESOLVER_UNREF(&exec_ctx, resolver, "create");
+ grpc_subchannel_factory_unref(&exec_ctx, &f->base);
+
+ grpc_exec_ctx_finish(&exec_ctx);
+
+ return channel;
+}
diff --git a/src/core/lib/surface/channel_init.c b/src/core/lib/surface/channel_init.c
new file mode 100644
index 0000000000..fc69f61f77
--- /dev/null
+++ b/src/core/lib/surface/channel_init.c
@@ -0,0 +1,146 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/surface/channel_init.h"
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/useful.h>
+
+typedef struct stage_slot {
+ grpc_channel_init_stage fn;
+ void *arg;
+ int priority;
+ size_t insertion_order;
+} stage_slot;
+
+typedef struct stage_slots {
+ stage_slot *slots;
+ size_t num_slots;
+ size_t cap_slots;
+} stage_slots;
+
+static stage_slots g_slots[GRPC_NUM_CHANNEL_STACK_TYPES];
+static bool g_finalized;
+
+void grpc_channel_init_init(void) {
+ for (int i = 0; i < GRPC_NUM_CHANNEL_STACK_TYPES; i++) {
+ g_slots[i].slots = NULL;
+ g_slots[i].num_slots = 0;
+ g_slots[i].cap_slots = 0;
+ }
+ g_finalized = false;
+}
+
+void grpc_channel_init_register_stage(grpc_channel_stack_type type,
+ int priority,
+ grpc_channel_init_stage stage,
+ void *stage_arg) {
+ GPR_ASSERT(!g_finalized);
+ if (g_slots[type].cap_slots == g_slots[type].num_slots) {
+ g_slots[type].cap_slots = GPR_MAX(8, 3 * g_slots[type].cap_slots / 2);
+ g_slots[type].slots =
+ gpr_realloc(g_slots[type].slots,
+ g_slots[type].cap_slots * sizeof(*g_slots[type].slots));
+ }
+ stage_slot *s = &g_slots[type].slots[g_slots[type].num_slots++];
+ s->insertion_order = g_slots[type].num_slots;
+ s->priority = priority;
+ s->fn = stage;
+ s->arg = stage_arg;
+}
+
+static int compare_slots(const void *a, const void *b) {
+ const stage_slot *sa = a;
+ const stage_slot *sb = b;
+
+ int c = GPR_ICMP(sa->priority, sb->priority);
+ if (c != 0) return c;
+ return GPR_ICMP(sa->insertion_order, sb->insertion_order);
+}
+
+void grpc_channel_init_finalize(void) {
+ GPR_ASSERT(!g_finalized);
+ for (int i = 0; i < GRPC_NUM_CHANNEL_STACK_TYPES; i++) {
+ qsort(g_slots[i].slots, g_slots[i].num_slots, sizeof(*g_slots[i].slots),
+ compare_slots);
+ }
+ g_finalized = true;
+}
+
+void grpc_channel_init_shutdown(void) {
+ for (int i = 0; i < GRPC_NUM_CHANNEL_STACK_TYPES; i++) {
+ gpr_free(g_slots[i].slots);
+ g_slots[i].slots = (void *)(uintptr_t)0xdeadbeef;
+ }
+}
+
+static const char *name_for_type(grpc_channel_stack_type type) {
+ switch (type) {
+ case GRPC_CLIENT_CHANNEL:
+ return "CLIENT_CHANNEL";
+ case GRPC_CLIENT_SUBCHANNEL:
+ return "CLIENT_SUBCHANNEL";
+ case GRPC_SERVER_CHANNEL:
+ return "SERVER_CHANNEL";
+ case GRPC_CLIENT_LAME_CHANNEL:
+ return "CLIENT_LAME_CHANNEL";
+ case GRPC_CLIENT_DIRECT_CHANNEL:
+ return "CLIENT_DIRECT_CHANNEL";
+ case GRPC_NUM_CHANNEL_STACK_TYPES:
+ break;
+ }
+ GPR_UNREACHABLE_CODE(return "UNKNOWN");
+}
+
+void *grpc_channel_init_create_stack(
+ grpc_exec_ctx *exec_ctx, grpc_channel_stack_type type, size_t prefix_bytes,
+ const grpc_channel_args *args, int initial_refs, grpc_iomgr_cb_func destroy,
+ void *destroy_arg, grpc_transport *transport) {
+ GPR_ASSERT(g_finalized);
+
+ grpc_channel_stack_builder *builder = grpc_channel_stack_builder_create();
+ grpc_channel_stack_builder_set_name(builder, name_for_type(type));
+ grpc_channel_stack_builder_set_channel_arguments(builder, args);
+ grpc_channel_stack_builder_set_transport(builder, transport);
+
+ for (size_t i = 0; i < g_slots[type].num_slots; i++) {
+ const stage_slot *slot = &g_slots[type].slots[i];
+ if (!slot->fn(builder, slot->arg)) {
+ grpc_channel_stack_builder_destroy(builder);
+ return NULL;
+ }
+ }
+
+ return grpc_channel_stack_builder_finish(exec_ctx, builder, prefix_bytes,
+ initial_refs, destroy, destroy_arg);
+}
diff --git a/src/core/lib/surface/channel_init.h b/src/core/lib/surface/channel_init.h
new file mode 100644
index 0000000000..a4d8271ca6
--- /dev/null
+++ b/src/core/lib/surface/channel_init.h
@@ -0,0 +1,86 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_SURFACE_CHANNEL_INIT_H
+#define GRPC_CORE_LIB_SURFACE_CHANNEL_INIT_H
+
+#include "src/core/lib/channel/channel_stack_builder.h"
+#include "src/core/lib/surface/channel_stack_type.h"
+#include "src/core/lib/transport/transport.h"
+
+/// This module provides a way for plugins (and the grpc core library itself)
+/// to register mutators for channel stacks.
+/// It also provides a universal entry path to run those mutators to build
+/// a channel stack for various subsystems.
+
+/// One stage of mutation: call functions against \a builder to influence the
+/// finally constructed channel stack
+typedef bool (*grpc_channel_init_stage)(grpc_channel_stack_builder *builder,
+ void *arg);
+
+/// Global initialization of the system
+void grpc_channel_init_init(void);
+
+/// Register one stage of mutators.
+/// Stages are run in priority order (lowest to highest), and then in
+/// registration order (in the case of a tie).
+/// Stages are registered against one of the pre-determined channel stack
+/// types.
+void grpc_channel_init_register_stage(grpc_channel_stack_type type,
+ int priority,
+ grpc_channel_init_stage stage_fn,
+ void *stage_arg);
+
+/// Finalize registration. No more calls to grpc_channel_init_register_stage are
+/// allowed.
+void grpc_channel_init_finalize(void);
+/// Shutdown the channel init system
+void grpc_channel_init_shutdown(void);
+
+/// Construct a channel stack of some sort: see channel_stack.h for details
+/// \a type is the type of channel stack to create
+/// \a prefix_bytes is the number of bytes before the channel stack to allocate
+/// \a args are configuration arguments for the channel stack
+/// \a initial_refs is the initial refcount to give the channel stack
+/// \a destroy and \a destroy_arg specify how to destroy the channel stack
+/// if destroy_arg is NULL, the returned value from this function will be
+/// substituted
+/// \a optional_transport is either NULL or a constructed transport object
+/// Returns a pointer to the base of the memory allocated (the actual channel
+/// stack object will be prefix_bytes past that pointer)
+void *grpc_channel_init_create_stack(
+ grpc_exec_ctx *exec_ctx, grpc_channel_stack_type type, size_t prefix_bytes,
+ const grpc_channel_args *args, int initial_refs, grpc_iomgr_cb_func destroy,
+ void *destroy_arg, grpc_transport *optional_transport);
+
+#endif /* GRPC_CORE_LIB_SURFACE_CHANNEL_INIT_H */
diff --git a/src/core/lib/surface/channel_ping.c b/src/core/lib/surface/channel_ping.c
new file mode 100644
index 0000000000..dd862cdadd
--- /dev/null
+++ b/src/core/lib/surface/channel_ping.c
@@ -0,0 +1,79 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/surface/channel.h"
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+
+#include "src/core/lib/surface/api_trace.h"
+#include "src/core/lib/surface/completion_queue.h"
+
+typedef struct {
+ grpc_closure closure;
+ void *tag;
+ grpc_completion_queue *cq;
+ grpc_cq_completion completion_storage;
+} ping_result;
+
+static void ping_destroy(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_cq_completion *storage) {
+ gpr_free(arg);
+}
+
+static void ping_done(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
+ ping_result *pr = arg;
+ grpc_cq_end_op(exec_ctx, pr->cq, pr->tag, success, ping_destroy, pr,
+ &pr->completion_storage);
+}
+
+void grpc_channel_ping(grpc_channel *channel, grpc_completion_queue *cq,
+ void *tag, void *reserved) {
+ grpc_transport_op op;
+ ping_result *pr = gpr_malloc(sizeof(*pr));
+ grpc_channel_element *top_elem =
+ grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ GPR_ASSERT(reserved == NULL);
+ memset(&op, 0, sizeof(op));
+ pr->tag = tag;
+ pr->cq = cq;
+ grpc_closure_init(&pr->closure, ping_done, pr);
+ op.send_ping = &pr->closure;
+ op.bind_pollset = grpc_cq_pollset(cq);
+ grpc_cq_begin_op(cq, tag);
+ top_elem->filter->start_transport_op(&exec_ctx, top_elem, &op);
+ grpc_exec_ctx_finish(&exec_ctx);
+}
diff --git a/src/core/lib/surface/channel_stack_type.c b/src/core/lib/surface/channel_stack_type.c
new file mode 100644
index 0000000000..c35d603ca3
--- /dev/null
+++ b/src/core/lib/surface/channel_stack_type.c
@@ -0,0 +1,54 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/surface/channel_stack_type.h"
+#include <grpc/support/log.h>
+#include <grpc/support/port_platform.h>
+
+bool grpc_channel_stack_type_is_client(grpc_channel_stack_type type) {
+ switch (type) {
+ case GRPC_CLIENT_CHANNEL:
+ return true;
+ case GRPC_CLIENT_SUBCHANNEL:
+ return true;
+ case GRPC_CLIENT_LAME_CHANNEL:
+ return true;
+ case GRPC_CLIENT_DIRECT_CHANNEL:
+ return true;
+ case GRPC_SERVER_CHANNEL:
+ return false;
+ case GRPC_NUM_CHANNEL_STACK_TYPES:
+ break;
+ }
+ GPR_UNREACHABLE_CODE(return true;);
+}
diff --git a/src/core/lib/surface/channel_stack_type.h b/src/core/lib/surface/channel_stack_type.h
new file mode 100644
index 0000000000..16608fa386
--- /dev/null
+++ b/src/core/lib/surface/channel_stack_type.h
@@ -0,0 +1,58 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_SURFACE_CHANNEL_STACK_TYPE_H
+#define GRPC_CORE_LIB_SURFACE_CHANNEL_STACK_TYPE_H
+
+#include <stdbool.h>
+
+typedef enum {
+ // normal top-half client channel with load-balancing, connection management
+ GRPC_CLIENT_CHANNEL,
+ // bottom-half of a client channel: everything that happens post-load
+ // balancing (bound to a specific transport)
+ GRPC_CLIENT_SUBCHANNEL,
+ // a permanently broken client channel
+ GRPC_CLIENT_LAME_CHANNEL,
+ // a directly connected client channel (without load-balancing, directly talks
+ // to a transport)
+ GRPC_CLIENT_DIRECT_CHANNEL,
+ // server side channel
+ GRPC_SERVER_CHANNEL,
+ // must be last
+ GRPC_NUM_CHANNEL_STACK_TYPES
+} grpc_channel_stack_type;
+
+bool grpc_channel_stack_type_is_client(grpc_channel_stack_type type);
+
+#endif /* GRPC_CORE_LIB_SURFACE_CHANNEL_STACK_TYPE_H */
diff --git a/src/core/lib/surface/completion_queue.c b/src/core/lib/surface/completion_queue.c
new file mode 100644
index 0000000000..a0d7002053
--- /dev/null
+++ b/src/core/lib/surface/completion_queue.c
@@ -0,0 +1,512 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/surface/completion_queue.h"
+
+#include <stdio.h>
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/atm.h>
+#include <grpc/support/log.h>
+#include <grpc/support/time.h>
+
+#include "src/core/lib/iomgr/pollset.h"
+#include "src/core/lib/iomgr/timer.h"
+#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/support/string.h"
+#include "src/core/lib/surface/api_trace.h"
+#include "src/core/lib/surface/call.h"
+#include "src/core/lib/surface/event_string.h"
+#include "src/core/lib/surface/surface_trace.h"
+
+typedef struct {
+ grpc_pollset_worker **worker;
+ void *tag;
+} plucker;
+
+/* Completion queue structure */
+struct grpc_completion_queue {
+ /** owned by pollset */
+ gpr_mu *mu;
+ /** completed events */
+ grpc_cq_completion completed_head;
+ grpc_cq_completion *completed_tail;
+ /** Number of pending events (+1 if we're not shutdown) */
+ gpr_refcount pending_events;
+ /** Once owning_refs drops to zero, we will destroy the cq */
+ gpr_refcount owning_refs;
+ /** 0 initially, 1 once we've begun shutting down */
+ int shutdown;
+ int shutdown_called;
+ int is_server_cq;
+ int num_pluckers;
+ plucker pluckers[GRPC_MAX_COMPLETION_QUEUE_PLUCKERS];
+ grpc_closure pollset_shutdown_done;
+
+#ifndef NDEBUG
+ void **outstanding_tags;
+ size_t outstanding_tag_count;
+ size_t outstanding_tag_capacity;
+#endif
+
+ grpc_completion_queue *next_free;
+};
+
+#define POLLSET_FROM_CQ(cq) ((grpc_pollset *)(cq + 1))
+
+static gpr_mu g_freelist_mu;
+static grpc_completion_queue *g_freelist;
+
+static void on_pollset_shutdown_done(grpc_exec_ctx *exec_ctx, void *cc,
+ bool success);
+
+void grpc_cq_global_init(void) { gpr_mu_init(&g_freelist_mu); }
+
+void grpc_cq_global_shutdown(void) {
+ gpr_mu_destroy(&g_freelist_mu);
+ while (g_freelist) {
+ grpc_completion_queue *next = g_freelist->next_free;
+ grpc_pollset_destroy(POLLSET_FROM_CQ(g_freelist));
+#ifndef NDEBUG
+ gpr_free(g_freelist->outstanding_tags);
+#endif
+ gpr_free(g_freelist);
+ g_freelist = next;
+ }
+}
+
+struct grpc_cq_alarm {
+ grpc_timer alarm;
+ grpc_cq_completion completion;
+ /** completion queue where events about this alarm will be posted */
+ grpc_completion_queue *cq;
+ /** user supplied tag */
+ void *tag;
+};
+
+grpc_completion_queue *grpc_completion_queue_create(void *reserved) {
+ grpc_completion_queue *cc;
+ GPR_ASSERT(!reserved);
+
+ GPR_TIMER_BEGIN("grpc_completion_queue_create", 0);
+
+ GRPC_API_TRACE("grpc_completion_queue_create(reserved=%p)", 1, (reserved));
+
+ gpr_mu_lock(&g_freelist_mu);
+ if (g_freelist == NULL) {
+ gpr_mu_unlock(&g_freelist_mu);
+
+ cc = gpr_malloc(sizeof(grpc_completion_queue) + grpc_pollset_size());
+ grpc_pollset_init(POLLSET_FROM_CQ(cc), &cc->mu);
+#ifndef NDEBUG
+ cc->outstanding_tags = NULL;
+ cc->outstanding_tag_capacity = 0;
+#endif
+ } else {
+ cc = g_freelist;
+ g_freelist = g_freelist->next_free;
+ gpr_mu_unlock(&g_freelist_mu);
+ /* pollset already initialized */
+ }
+
+ /* Initial ref is dropped by grpc_completion_queue_shutdown */
+ gpr_ref_init(&cc->pending_events, 1);
+ /* One for destroy(), one for pollset_shutdown */
+ gpr_ref_init(&cc->owning_refs, 2);
+ cc->completed_tail = &cc->completed_head;
+ cc->completed_head.next = (uintptr_t)cc->completed_tail;
+ cc->shutdown = 0;
+ cc->shutdown_called = 0;
+ cc->is_server_cq = 0;
+ cc->num_pluckers = 0;
+#ifndef NDEBUG
+ cc->outstanding_tag_count = 0;
+#endif
+ grpc_closure_init(&cc->pollset_shutdown_done, on_pollset_shutdown_done, cc);
+
+ GPR_TIMER_END("grpc_completion_queue_create", 0);
+
+ return cc;
+}
+
+#ifdef GRPC_CQ_REF_COUNT_DEBUG
+void grpc_cq_internal_ref(grpc_completion_queue *cc, const char *reason,
+ const char *file, int line) {
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "CQ:%p ref %d -> %d %s", cc,
+ (int)cc->owning_refs.count, (int)cc->owning_refs.count + 1, reason);
+#else
+void grpc_cq_internal_ref(grpc_completion_queue *cc) {
+#endif
+ gpr_ref(&cc->owning_refs);
+}
+
+static void on_pollset_shutdown_done(grpc_exec_ctx *exec_ctx, void *arg,
+ bool success) {
+ grpc_completion_queue *cc = arg;
+ GRPC_CQ_INTERNAL_UNREF(cc, "pollset_destroy");
+}
+
+#ifdef GRPC_CQ_REF_COUNT_DEBUG
+void grpc_cq_internal_unref(grpc_completion_queue *cc, const char *reason,
+ const char *file, int line) {
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "CQ:%p unref %d -> %d %s", cc,
+ (int)cc->owning_refs.count, (int)cc->owning_refs.count - 1, reason);
+#else
+void grpc_cq_internal_unref(grpc_completion_queue *cc) {
+#endif
+ if (gpr_unref(&cc->owning_refs)) {
+ GPR_ASSERT(cc->completed_head.next == (uintptr_t)&cc->completed_head);
+ grpc_pollset_reset(POLLSET_FROM_CQ(cc));
+ gpr_mu_lock(&g_freelist_mu);
+ cc->next_free = g_freelist;
+ g_freelist = cc;
+ gpr_mu_unlock(&g_freelist_mu);
+ }
+}
+
+void grpc_cq_begin_op(grpc_completion_queue *cc, void *tag) {
+#ifndef NDEBUG
+ gpr_mu_lock(cc->mu);
+ GPR_ASSERT(!cc->shutdown_called);
+ if (cc->outstanding_tag_count == cc->outstanding_tag_capacity) {
+ cc->outstanding_tag_capacity = GPR_MAX(4, 2 * cc->outstanding_tag_capacity);
+ cc->outstanding_tags =
+ gpr_realloc(cc->outstanding_tags, sizeof(*cc->outstanding_tags) *
+ cc->outstanding_tag_capacity);
+ }
+ cc->outstanding_tags[cc->outstanding_tag_count++] = tag;
+ gpr_mu_unlock(cc->mu);
+#endif
+ gpr_ref(&cc->pending_events);
+}
+
+/* Signal the end of an operation - if this is the last waiting-to-be-queued
+ event, then enter shutdown mode */
+/* Queue a GRPC_OP_COMPLETED operation */
+void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
+ void *tag, int success,
+ void (*done)(grpc_exec_ctx *exec_ctx, void *done_arg,
+ grpc_cq_completion *storage),
+ void *done_arg, grpc_cq_completion *storage) {
+ int shutdown;
+ int i;
+ grpc_pollset_worker *pluck_worker;
+#ifndef NDEBUG
+ int found = 0;
+#endif
+
+ GPR_TIMER_BEGIN("grpc_cq_end_op", 0);
+
+ storage->tag = tag;
+ storage->done = done;
+ storage->done_arg = done_arg;
+ storage->next =
+ ((uintptr_t)&cc->completed_head) | ((uintptr_t)(success != 0));
+
+ gpr_mu_lock(cc->mu);
+#ifndef NDEBUG
+ for (i = 0; i < (int)cc->outstanding_tag_count; i++) {
+ if (cc->outstanding_tags[i] == tag) {
+ cc->outstanding_tag_count--;
+ GPR_SWAP(void *, cc->outstanding_tags[i],
+ cc->outstanding_tags[cc->outstanding_tag_count]);
+ found = 1;
+ break;
+ }
+ }
+ GPR_ASSERT(found);
+#endif
+ shutdown = gpr_unref(&cc->pending_events);
+ if (!shutdown) {
+ cc->completed_tail->next =
+ ((uintptr_t)storage) | (1u & (uintptr_t)cc->completed_tail->next);
+ cc->completed_tail = storage;
+ pluck_worker = NULL;
+ for (i = 0; i < cc->num_pluckers; i++) {
+ if (cc->pluckers[i].tag == tag) {
+ pluck_worker = *cc->pluckers[i].worker;
+ break;
+ }
+ }
+ grpc_pollset_kick(POLLSET_FROM_CQ(cc), pluck_worker);
+ gpr_mu_unlock(cc->mu);
+ } else {
+ cc->completed_tail->next =
+ ((uintptr_t)storage) | (1u & (uintptr_t)cc->completed_tail->next);
+ cc->completed_tail = storage;
+ GPR_ASSERT(!cc->shutdown);
+ GPR_ASSERT(cc->shutdown_called);
+ cc->shutdown = 1;
+ grpc_pollset_shutdown(exec_ctx, POLLSET_FROM_CQ(cc),
+ &cc->pollset_shutdown_done);
+ gpr_mu_unlock(cc->mu);
+ }
+
+ GPR_TIMER_END("grpc_cq_end_op", 0);
+}
+
+grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
+ gpr_timespec deadline, void *reserved) {
+ grpc_event ret;
+ grpc_pollset_worker *worker = NULL;
+ int first_loop = 1;
+ gpr_timespec now;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
+ GPR_TIMER_BEGIN("grpc_completion_queue_next", 0);
+
+ GRPC_API_TRACE(
+ "grpc_completion_queue_next("
+ "cc=%p, "
+ "deadline=gpr_timespec { tv_sec: %lld, tv_nsec: %d, clock_type: %d }, "
+ "reserved=%p)",
+ 5, (cc, (long long)deadline.tv_sec, (int)deadline.tv_nsec,
+ (int)deadline.clock_type, reserved));
+ GPR_ASSERT(!reserved);
+
+ deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
+
+ GRPC_CQ_INTERNAL_REF(cc, "next");
+ gpr_mu_lock(cc->mu);
+ for (;;) {
+ if (cc->completed_tail != &cc->completed_head) {
+ grpc_cq_completion *c = (grpc_cq_completion *)cc->completed_head.next;
+ cc->completed_head.next = c->next & ~(uintptr_t)1;
+ if (c == cc->completed_tail) {
+ cc->completed_tail = &cc->completed_head;
+ }
+ gpr_mu_unlock(cc->mu);
+ ret.type = GRPC_OP_COMPLETE;
+ ret.success = c->next & 1u;
+ ret.tag = c->tag;
+ c->done(&exec_ctx, c->done_arg, c);
+ break;
+ }
+ if (cc->shutdown) {
+ gpr_mu_unlock(cc->mu);
+ memset(&ret, 0, sizeof(ret));
+ ret.type = GRPC_QUEUE_SHUTDOWN;
+ break;
+ }
+ now = gpr_now(GPR_CLOCK_MONOTONIC);
+ if (!first_loop && gpr_time_cmp(now, deadline) >= 0) {
+ gpr_mu_unlock(cc->mu);
+ memset(&ret, 0, sizeof(ret));
+ ret.type = GRPC_QUEUE_TIMEOUT;
+ break;
+ }
+ first_loop = 0;
+ /* Check alarms - these are a global resource so we just ping
+ each time through on every pollset.
+ May update deadline to ensure timely wakeups.
+ TODO(ctiller): can this work be localized? */
+ gpr_timespec iteration_deadline = deadline;
+ if (grpc_timer_check(&exec_ctx, now, &iteration_deadline)) {
+ GPR_TIMER_MARK("alarm_triggered", 0);
+ gpr_mu_unlock(cc->mu);
+ grpc_exec_ctx_flush(&exec_ctx);
+ gpr_mu_lock(cc->mu);
+ continue;
+ } else {
+ grpc_pollset_work(&exec_ctx, POLLSET_FROM_CQ(cc), &worker, now,
+ iteration_deadline);
+ }
+ }
+ GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
+ GRPC_CQ_INTERNAL_UNREF(cc, "next");
+ grpc_exec_ctx_finish(&exec_ctx);
+
+ GPR_TIMER_END("grpc_completion_queue_next", 0);
+
+ return ret;
+}
+
+static int add_plucker(grpc_completion_queue *cc, void *tag,
+ grpc_pollset_worker **worker) {
+ if (cc->num_pluckers == GRPC_MAX_COMPLETION_QUEUE_PLUCKERS) {
+ return 0;
+ }
+ cc->pluckers[cc->num_pluckers].tag = tag;
+ cc->pluckers[cc->num_pluckers].worker = worker;
+ cc->num_pluckers++;
+ return 1;
+}
+
+static void del_plucker(grpc_completion_queue *cc, void *tag,
+ grpc_pollset_worker **worker) {
+ int i;
+ for (i = 0; i < cc->num_pluckers; i++) {
+ if (cc->pluckers[i].tag == tag && cc->pluckers[i].worker == worker) {
+ cc->num_pluckers--;
+ GPR_SWAP(plucker, cc->pluckers[i], cc->pluckers[cc->num_pluckers]);
+ return;
+ }
+ }
+ GPR_UNREACHABLE_CODE(return );
+}
+
+grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
+ gpr_timespec deadline, void *reserved) {
+ grpc_event ret;
+ grpc_cq_completion *c;
+ grpc_cq_completion *prev;
+ grpc_pollset_worker *worker = NULL;
+ gpr_timespec now;
+ int first_loop = 1;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
+ GPR_TIMER_BEGIN("grpc_completion_queue_pluck", 0);
+
+ GRPC_API_TRACE(
+ "grpc_completion_queue_pluck("
+ "cc=%p, tag=%p, "
+ "deadline=gpr_timespec { tv_sec: %lld, tv_nsec: %d, clock_type: %d }, "
+ "reserved=%p)",
+ 6, (cc, tag, (long long)deadline.tv_sec, (int)deadline.tv_nsec,
+ (int)deadline.clock_type, reserved));
+ GPR_ASSERT(!reserved);
+
+ deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
+
+ GRPC_CQ_INTERNAL_REF(cc, "pluck");
+ gpr_mu_lock(cc->mu);
+ for (;;) {
+ prev = &cc->completed_head;
+ while ((c = (grpc_cq_completion *)(prev->next & ~(uintptr_t)1)) !=
+ &cc->completed_head) {
+ if (c->tag == tag) {
+ prev->next = (prev->next & (uintptr_t)1) | (c->next & ~(uintptr_t)1);
+ if (c == cc->completed_tail) {
+ cc->completed_tail = prev;
+ }
+ gpr_mu_unlock(cc->mu);
+ ret.type = GRPC_OP_COMPLETE;
+ ret.success = c->next & 1u;
+ ret.tag = c->tag;
+ c->done(&exec_ctx, c->done_arg, c);
+ goto done;
+ }
+ prev = c;
+ }
+ if (cc->shutdown) {
+ gpr_mu_unlock(cc->mu);
+ memset(&ret, 0, sizeof(ret));
+ ret.type = GRPC_QUEUE_SHUTDOWN;
+ break;
+ }
+ if (!add_plucker(cc, tag, &worker)) {
+ gpr_log(GPR_DEBUG,
+ "Too many outstanding grpc_completion_queue_pluck calls: maximum "
+ "is %d",
+ GRPC_MAX_COMPLETION_QUEUE_PLUCKERS);
+ gpr_mu_unlock(cc->mu);
+ memset(&ret, 0, sizeof(ret));
+ /* TODO(ctiller): should we use a different result here */
+ ret.type = GRPC_QUEUE_TIMEOUT;
+ break;
+ }
+ now = gpr_now(GPR_CLOCK_MONOTONIC);
+ if (!first_loop && gpr_time_cmp(now, deadline) >= 0) {
+ del_plucker(cc, tag, &worker);
+ gpr_mu_unlock(cc->mu);
+ memset(&ret, 0, sizeof(ret));
+ ret.type = GRPC_QUEUE_TIMEOUT;
+ break;
+ }
+ first_loop = 0;
+ /* Check alarms - these are a global resource so we just ping
+ each time through on every pollset.
+ May update deadline to ensure timely wakeups.
+ TODO(ctiller): can this work be localized? */
+ gpr_timespec iteration_deadline = deadline;
+ if (grpc_timer_check(&exec_ctx, now, &iteration_deadline)) {
+ GPR_TIMER_MARK("alarm_triggered", 0);
+ gpr_mu_unlock(cc->mu);
+ grpc_exec_ctx_flush(&exec_ctx);
+ gpr_mu_lock(cc->mu);
+ } else {
+ grpc_pollset_work(&exec_ctx, POLLSET_FROM_CQ(cc), &worker, now,
+ iteration_deadline);
+ }
+ del_plucker(cc, tag, &worker);
+ }
+done:
+ GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
+ GRPC_CQ_INTERNAL_UNREF(cc, "pluck");
+ grpc_exec_ctx_finish(&exec_ctx);
+
+ GPR_TIMER_END("grpc_completion_queue_pluck", 0);
+
+ return ret;
+}
+
+/* Shutdown simply drops a ref that we reserved at creation time; if we drop
+ to zero here, then enter shutdown mode and wake up any waiters */
+void grpc_completion_queue_shutdown(grpc_completion_queue *cc) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ GPR_TIMER_BEGIN("grpc_completion_queue_shutdown", 0);
+ GRPC_API_TRACE("grpc_completion_queue_shutdown(cc=%p)", 1, (cc));
+ gpr_mu_lock(cc->mu);
+ if (cc->shutdown_called) {
+ gpr_mu_unlock(cc->mu);
+ GPR_TIMER_END("grpc_completion_queue_shutdown", 0);
+ return;
+ }
+ cc->shutdown_called = 1;
+ if (gpr_unref(&cc->pending_events)) {
+ GPR_ASSERT(!cc->shutdown);
+ cc->shutdown = 1;
+ grpc_pollset_shutdown(&exec_ctx, POLLSET_FROM_CQ(cc),
+ &cc->pollset_shutdown_done);
+ }
+ gpr_mu_unlock(cc->mu);
+ grpc_exec_ctx_finish(&exec_ctx);
+ GPR_TIMER_END("grpc_completion_queue_shutdown", 0);
+}
+
+void grpc_completion_queue_destroy(grpc_completion_queue *cc) {
+ GRPC_API_TRACE("grpc_completion_queue_destroy(cc=%p)", 1, (cc));
+ GPR_TIMER_BEGIN("grpc_completion_queue_destroy", 0);
+ grpc_completion_queue_shutdown(cc);
+ GRPC_CQ_INTERNAL_UNREF(cc, "destroy");
+ GPR_TIMER_END("grpc_completion_queue_destroy", 0);
+}
+
+grpc_pollset *grpc_cq_pollset(grpc_completion_queue *cc) {
+ return POLLSET_FROM_CQ(cc);
+}
+
+void grpc_cq_mark_server_cq(grpc_completion_queue *cc) { cc->is_server_cq = 1; }
+
+int grpc_cq_is_server_cq(grpc_completion_queue *cc) { return cc->is_server_cq; }
diff --git a/src/core/lib/surface/completion_queue.h b/src/core/lib/surface/completion_queue.h
new file mode 100644
index 0000000000..35591cb6f4
--- /dev/null
+++ b/src/core/lib/surface/completion_queue.h
@@ -0,0 +1,91 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_SURFACE_COMPLETION_QUEUE_H
+#define GRPC_CORE_LIB_SURFACE_COMPLETION_QUEUE_H
+
+/* Internal API for completion queues */
+
+#include <grpc/grpc.h>
+#include "src/core/lib/iomgr/pollset.h"
+
+typedef struct grpc_cq_completion {
+ /** user supplied tag */
+ void *tag;
+ /** done callback - called when this queue element is no longer
+ needed by the completion queue */
+ void (*done)(grpc_exec_ctx *exec_ctx, void *done_arg,
+ struct grpc_cq_completion *c);
+ void *done_arg;
+ /** next pointer; low bit is used to indicate success or not */
+ uintptr_t next;
+} grpc_cq_completion;
+
+#ifdef GRPC_CQ_REF_COUNT_DEBUG
+void grpc_cq_internal_ref(grpc_completion_queue *cc, const char *reason,
+ const char *file, int line);
+void grpc_cq_internal_unref(grpc_completion_queue *cc, const char *reason,
+ const char *file, int line);
+#define GRPC_CQ_INTERNAL_REF(cc, reason) \
+ grpc_cq_internal_ref(cc, reason, __FILE__, __LINE__)
+#define GRPC_CQ_INTERNAL_UNREF(cc, reason) \
+ grpc_cq_internal_unref(cc, reason, __FILE__, __LINE__)
+#else
+void grpc_cq_internal_ref(grpc_completion_queue *cc);
+void grpc_cq_internal_unref(grpc_completion_queue *cc);
+#define GRPC_CQ_INTERNAL_REF(cc, reason) grpc_cq_internal_ref(cc)
+#define GRPC_CQ_INTERNAL_UNREF(cc, reason) grpc_cq_internal_unref(cc)
+#endif
+
+/* Flag that an operation is beginning: the completion channel will not finish
+ shutdown until a corrensponding grpc_cq_end_* call is made.
+ \a tag is currently used only in debug builds. */
+void grpc_cq_begin_op(grpc_completion_queue *cc, void *tag);
+
+/* Queue a GRPC_OP_COMPLETED operation; tag must correspond to the tag passed to
+ grpc_cq_begin_op */
+void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
+ void *tag, int success,
+ void (*done)(grpc_exec_ctx *exec_ctx, void *done_arg,
+ grpc_cq_completion *storage),
+ void *done_arg, grpc_cq_completion *storage);
+
+grpc_pollset *grpc_cq_pollset(grpc_completion_queue *cc);
+
+void grpc_cq_mark_server_cq(grpc_completion_queue *cc);
+int grpc_cq_is_server_cq(grpc_completion_queue *cc);
+
+void grpc_cq_global_init(void);
+void grpc_cq_global_shutdown(void);
+
+#endif /* GRPC_CORE_LIB_SURFACE_COMPLETION_QUEUE_H */
diff --git a/src/core/lib/surface/event_string.c b/src/core/lib/surface/event_string.c
new file mode 100644
index 0000000000..360c718a17
--- /dev/null
+++ b/src/core/lib/surface/event_string.c
@@ -0,0 +1,81 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/surface/event_string.h"
+
+#include <stdio.h>
+
+#include <grpc/byte_buffer.h>
+#include <grpc/support/string_util.h>
+#include "src/core/lib/support/string.h"
+
+static void addhdr(gpr_strvec *buf, grpc_event *ev) {
+ char *tmp;
+ gpr_asprintf(&tmp, "tag:%p", ev->tag);
+ gpr_strvec_add(buf, tmp);
+}
+
+static const char *errstr(int success) { return success ? "OK" : "ERROR"; }
+
+static void adderr(gpr_strvec *buf, int success) {
+ char *tmp;
+ gpr_asprintf(&tmp, " %s", errstr(success));
+ gpr_strvec_add(buf, tmp);
+}
+
+char *grpc_event_string(grpc_event *ev) {
+ char *out;
+ gpr_strvec buf;
+
+ if (ev == NULL) return gpr_strdup("null");
+
+ gpr_strvec_init(&buf);
+
+ switch (ev->type) {
+ case GRPC_QUEUE_TIMEOUT:
+ gpr_strvec_add(&buf, gpr_strdup("QUEUE_TIMEOUT"));
+ break;
+ case GRPC_QUEUE_SHUTDOWN:
+ gpr_strvec_add(&buf, gpr_strdup("QUEUE_SHUTDOWN"));
+ break;
+ case GRPC_OP_COMPLETE:
+ gpr_strvec_add(&buf, gpr_strdup("OP_COMPLETE: "));
+ addhdr(&buf, ev);
+ adderr(&buf, ev->success);
+ break;
+ }
+
+ out = gpr_strvec_flatten(&buf, NULL);
+ gpr_strvec_destroy(&buf);
+ return out;
+}
diff --git a/src/core/lib/surface/event_string.h b/src/core/lib/surface/event_string.h
new file mode 100644
index 0000000000..577e9c718f
--- /dev/null
+++ b/src/core/lib/surface/event_string.h
@@ -0,0 +1,42 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_SURFACE_EVENT_STRING_H
+#define GRPC_CORE_LIB_SURFACE_EVENT_STRING_H
+
+#include <grpc/grpc.h>
+
+/* Returns a string describing an event. Must be later freed with gpr_free() */
+char *grpc_event_string(grpc_event *ev);
+
+#endif /* GRPC_CORE_LIB_SURFACE_EVENT_STRING_H */
diff --git a/src/core/lib/surface/init.c b/src/core/lib/surface/init.c
new file mode 100644
index 0000000000..b5546fb44a
--- /dev/null
+++ b/src/core/lib/surface/init.c
@@ -0,0 +1,239 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include <limits.h>
+#include <memory.h>
+
+#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/time.h>
+/* TODO(ctiller): find another way? - better not to include census here */
+#include "src/core/lib/census/grpc_plugin.h"
+#include "src/core/lib/channel/channel_stack.h"
+#include "src/core/lib/channel/client_channel.h"
+#include "src/core/lib/channel/compress_filter.h"
+#include "src/core/lib/channel/connected_channel.h"
+#include "src/core/lib/channel/http_client_filter.h"
+#include "src/core/lib/channel/http_server_filter.h"
+#include "src/core/lib/client_config/lb_policies/pick_first.h"
+#include "src/core/lib/client_config/lb_policies/round_robin.h"
+#include "src/core/lib/client_config/lb_policy_registry.h"
+#include "src/core/lib/client_config/resolver_registry.h"
+#include "src/core/lib/client_config/resolvers/dns_resolver.h"
+#include "src/core/lib/client_config/resolvers/sockaddr_resolver.h"
+#include "src/core/lib/client_config/subchannel.h"
+#include "src/core/lib/client_config/subchannel_index.h"
+#include "src/core/lib/debug/trace.h"
+#include "src/core/lib/iomgr/executor.h"
+#include "src/core/lib/iomgr/iomgr.h"
+#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/surface/api_trace.h"
+#include "src/core/lib/surface/call.h"
+#include "src/core/lib/surface/channel_init.h"
+#include "src/core/lib/surface/completion_queue.h"
+#include "src/core/lib/surface/init.h"
+#include "src/core/lib/surface/lame_client.h"
+#include "src/core/lib/surface/server.h"
+#include "src/core/lib/surface/surface_trace.h"
+#include "src/core/lib/transport/chttp2_transport.h"
+#include "src/core/lib/transport/connectivity_state.h"
+#include "src/core/lib/transport/transport_impl.h"
+
+#ifndef GRPC_DEFAULT_NAME_PREFIX
+#define GRPC_DEFAULT_NAME_PREFIX "dns:///"
+#endif
+
+#define MAX_PLUGINS 128
+
+static gpr_once g_basic_init = GPR_ONCE_INIT;
+static gpr_mu g_init_mu;
+static int g_initializations;
+
+static void do_basic_init(void) {
+ gpr_mu_init(&g_init_mu);
+ /* TODO(ctiller): ideally remove this strict linkage */
+ grpc_register_plugin(census_grpc_plugin_init, census_grpc_plugin_destroy);
+ g_initializations = 0;
+}
+
+static bool append_filter(grpc_channel_stack_builder *builder, void *arg) {
+ return grpc_channel_stack_builder_append_filter(
+ builder, (const grpc_channel_filter *)arg, NULL, NULL);
+}
+
+static bool prepend_filter(grpc_channel_stack_builder *builder, void *arg) {
+ return grpc_channel_stack_builder_prepend_filter(
+ builder, (const grpc_channel_filter *)arg, NULL, NULL);
+}
+
+static bool maybe_add_http_filter(grpc_channel_stack_builder *builder,
+ void *arg) {
+ grpc_transport *t = grpc_channel_stack_builder_get_transport(builder);
+ if (t && strstr(t->vtable->name, "http")) {
+ return grpc_channel_stack_builder_prepend_filter(
+ builder, (const grpc_channel_filter *)arg, NULL, NULL);
+ }
+ return true;
+}
+
+static void register_builtin_channel_init() {
+ grpc_channel_init_register_stage(GRPC_CLIENT_CHANNEL, INT_MAX, prepend_filter,
+ (void *)&grpc_compress_filter);
+ grpc_channel_init_register_stage(GRPC_CLIENT_DIRECT_CHANNEL, INT_MAX,
+ prepend_filter,
+ (void *)&grpc_compress_filter);
+ grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX, prepend_filter,
+ (void *)&grpc_compress_filter);
+ grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL, INT_MAX,
+ maybe_add_http_filter,
+ (void *)&grpc_http_client_filter);
+ grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL, INT_MAX,
+ grpc_add_connected_filter, NULL);
+ grpc_channel_init_register_stage(GRPC_CLIENT_DIRECT_CHANNEL, INT_MAX,
+ maybe_add_http_filter,
+ (void *)&grpc_http_client_filter);
+ grpc_channel_init_register_stage(GRPC_CLIENT_DIRECT_CHANNEL, INT_MAX,
+ grpc_add_connected_filter, NULL);
+ grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX,
+ maybe_add_http_filter,
+ (void *)&grpc_http_server_filter);
+ grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX,
+ grpc_add_connected_filter, NULL);
+ grpc_channel_init_register_stage(GRPC_CLIENT_CHANNEL, INT_MAX, append_filter,
+ (void *)&grpc_client_channel_filter);
+ grpc_channel_init_register_stage(GRPC_CLIENT_LAME_CHANNEL, INT_MAX,
+ append_filter, (void *)&grpc_lame_filter);
+ grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX, prepend_filter,
+ (void *)&grpc_server_top_filter);
+}
+
+typedef struct grpc_plugin {
+ void (*init)();
+ void (*destroy)();
+} grpc_plugin;
+
+static grpc_plugin g_all_of_the_plugins[MAX_PLUGINS];
+static int g_number_of_plugins = 0;
+
+void grpc_register_plugin(void (*init)(void), void (*destroy)(void)) {
+ GRPC_API_TRACE("grpc_register_plugin(init=%p, destroy=%p)", 2,
+ ((void *)(intptr_t)init, (void *)(intptr_t)destroy));
+ GPR_ASSERT(g_number_of_plugins != MAX_PLUGINS);
+ g_all_of_the_plugins[g_number_of_plugins].init = init;
+ g_all_of_the_plugins[g_number_of_plugins].destroy = destroy;
+ g_number_of_plugins++;
+}
+
+void grpc_init(void) {
+ int i;
+ gpr_once_init(&g_basic_init, do_basic_init);
+
+ gpr_mu_lock(&g_init_mu);
+ if (++g_initializations == 1) {
+ gpr_time_init();
+ grpc_mdctx_global_init();
+ grpc_channel_init_init();
+ grpc_lb_policy_registry_init(grpc_pick_first_lb_factory_create());
+ grpc_register_lb_policy(grpc_pick_first_lb_factory_create());
+ grpc_register_lb_policy(grpc_round_robin_lb_factory_create());
+ grpc_resolver_registry_init(GRPC_DEFAULT_NAME_PREFIX);
+ grpc_register_resolver_type(grpc_dns_resolver_factory_create());
+ grpc_register_resolver_type(grpc_ipv4_resolver_factory_create());
+ grpc_register_resolver_type(grpc_ipv6_resolver_factory_create());
+#ifdef GPR_HAVE_UNIX_SOCKET
+ grpc_register_resolver_type(grpc_unix_resolver_factory_create());
+#endif
+ grpc_register_tracer("api", &grpc_api_trace);
+ grpc_register_tracer("channel", &grpc_trace_channel);
+ grpc_register_tracer("http", &grpc_http_trace);
+ grpc_register_tracer("flowctl", &grpc_flowctl_trace);
+ grpc_register_tracer("connectivity_state", &grpc_connectivity_state_trace);
+ grpc_register_tracer("channel_stack_builder",
+ &grpc_trace_channel_stack_builder);
+ grpc_security_pre_init();
+ grpc_iomgr_init();
+ grpc_executor_init();
+ grpc_tracer_init("GRPC_TRACE");
+ gpr_timers_global_init();
+ grpc_cq_global_init();
+ grpc_subchannel_index_init();
+ for (i = 0; i < g_number_of_plugins; i++) {
+ if (g_all_of_the_plugins[i].init != NULL) {
+ g_all_of_the_plugins[i].init();
+ }
+ }
+ /* register channel finalization AFTER all plugins, to ensure that it's run
+ * at the appropriate time */
+ grpc_register_security_filters();
+ register_builtin_channel_init();
+ /* no more changes to channel init pipelines */
+ grpc_channel_init_finalize();
+ }
+ gpr_mu_unlock(&g_init_mu);
+ GRPC_API_TRACE("grpc_init(void)", 0, ());
+}
+
+void grpc_shutdown(void) {
+ int i;
+ GRPC_API_TRACE("grpc_shutdown(void)", 0, ());
+ gpr_mu_lock(&g_init_mu);
+ if (--g_initializations == 0) {
+ grpc_executor_shutdown();
+ grpc_cq_global_shutdown();
+ grpc_iomgr_shutdown();
+ grpc_subchannel_index_shutdown();
+ gpr_timers_global_destroy();
+ grpc_tracer_shutdown();
+ grpc_resolver_registry_shutdown();
+ grpc_lb_policy_registry_shutdown();
+ for (i = 0; i < g_number_of_plugins; i++) {
+ if (g_all_of_the_plugins[i].destroy != NULL) {
+ g_all_of_the_plugins[i].destroy();
+ }
+ }
+ grpc_channel_init_shutdown();
+ grpc_mdctx_global_shutdown();
+ }
+ gpr_mu_unlock(&g_init_mu);
+}
+
+int grpc_is_initialized(void) {
+ int r;
+ gpr_once_init(&g_basic_init, do_basic_init);
+ gpr_mu_lock(&g_init_mu);
+ r = g_initializations > 0;
+ gpr_mu_unlock(&g_init_mu);
+ return r;
+}
diff --git a/src/core/lib/surface/init.h b/src/core/lib/surface/init.h
new file mode 100644
index 0000000000..10e2a5896e
--- /dev/null
+++ b/src/core/lib/surface/init.h
@@ -0,0 +1,41 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_SURFACE_INIT_H
+#define GRPC_CORE_LIB_SURFACE_INIT_H
+
+void grpc_register_security_filters(void);
+void grpc_security_pre_init(void);
+int grpc_is_initialized(void);
+
+#endif /* GRPC_CORE_LIB_SURFACE_INIT_H */
diff --git a/src/core/lib/surface/init_secure.c b/src/core/lib/surface/init_secure.c
new file mode 100644
index 0000000000..d3c2f645a7
--- /dev/null
+++ b/src/core/lib/surface/init_secure.c
@@ -0,0 +1,89 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/surface/init.h"
+
+#include <limits.h>
+#include <string.h>
+
+#include "src/core/lib/debug/trace.h"
+#include "src/core/lib/security/auth_filters.h"
+#include "src/core/lib/security/credentials.h"
+#include "src/core/lib/security/secure_endpoint.h"
+#include "src/core/lib/security/security_connector.h"
+#include "src/core/lib/surface/channel_init.h"
+#include "src/core/lib/tsi/transport_security_interface.h"
+
+void grpc_security_pre_init(void) {
+ grpc_register_tracer("secure_endpoint", &grpc_trace_secure_endpoint);
+ grpc_register_tracer("transport_security", &tsi_tracing_enabled);
+}
+
+static bool maybe_prepend_client_auth_filter(
+ grpc_channel_stack_builder *builder, void *arg) {
+ const grpc_channel_args *args =
+ grpc_channel_stack_builder_get_channel_arguments(builder);
+ if (args) {
+ for (size_t i = 0; i < args->num_args; i++) {
+ if (0 == strcmp(GRPC_SECURITY_CONNECTOR_ARG, args->args[i].key)) {
+ return grpc_channel_stack_builder_prepend_filter(
+ builder, &grpc_client_auth_filter, NULL, NULL);
+ }
+ }
+ }
+ return true;
+}
+
+static bool maybe_prepend_server_auth_filter(
+ grpc_channel_stack_builder *builder, void *arg) {
+ const grpc_channel_args *args =
+ grpc_channel_stack_builder_get_channel_arguments(builder);
+ if (args) {
+ for (size_t i = 0; i < args->num_args; i++) {
+ if (0 == strcmp(GRPC_SERVER_CREDENTIALS_ARG, args->args[i].key)) {
+ return grpc_channel_stack_builder_prepend_filter(
+ builder, &grpc_server_auth_filter, NULL, NULL);
+ }
+ }
+ }
+ return true;
+}
+
+void grpc_register_security_filters(void) {
+ grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL, INT_MAX,
+ maybe_prepend_client_auth_filter, NULL);
+ grpc_channel_init_register_stage(GRPC_CLIENT_DIRECT_CHANNEL, INT_MAX,
+ maybe_prepend_client_auth_filter, NULL);
+ grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX,
+ maybe_prepend_server_auth_filter, NULL);
+}
diff --git a/src/core/lib/surface/init_unsecure.c b/src/core/lib/surface/init_unsecure.c
new file mode 100644
index 0000000000..243c005d86
--- /dev/null
+++ b/src/core/lib/surface/init_unsecure.c
@@ -0,0 +1,38 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/surface/init.h"
+
+void grpc_security_pre_init(void) {}
+
+void grpc_register_security_filters(void) {}
diff --git a/src/core/lib/surface/lame_client.c b/src/core/lib/surface/lame_client.c
new file mode 100644
index 0000000000..95ec4b06c3
--- /dev/null
+++ b/src/core/lib/surface/lame_client.c
@@ -0,0 +1,155 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/surface/lame_client.h"
+
+#include <grpc/grpc.h>
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include "src/core/lib/channel/channel_stack.h"
+#include "src/core/lib/support/string.h"
+#include "src/core/lib/surface/api_trace.h"
+#include "src/core/lib/surface/call.h"
+#include "src/core/lib/surface/channel.h"
+
+typedef struct {
+ grpc_linked_mdelem status;
+ grpc_linked_mdelem details;
+} call_data;
+
+typedef struct {
+ grpc_status_code error_code;
+ const char *error_message;
+} channel_data;
+
+static void fill_metadata(grpc_call_element *elem, grpc_metadata_batch *mdb) {
+ call_data *calld = elem->call_data;
+ channel_data *chand = elem->channel_data;
+ char tmp[GPR_LTOA_MIN_BUFSIZE];
+ gpr_ltoa(chand->error_code, tmp);
+ calld->status.md = grpc_mdelem_from_strings("grpc-status", tmp);
+ calld->details.md =
+ grpc_mdelem_from_strings("grpc-message", chand->error_message);
+ calld->status.prev = calld->details.next = NULL;
+ calld->status.next = &calld->details;
+ calld->details.prev = &calld->status;
+ mdb->list.head = &calld->status;
+ mdb->list.tail = &calld->details;
+ mdb->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
+}
+
+static void lame_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_transport_stream_op *op) {
+ GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
+ if (op->recv_initial_metadata != NULL) {
+ fill_metadata(elem, op->recv_initial_metadata);
+ } else if (op->recv_trailing_metadata != NULL) {
+ fill_metadata(elem, op->recv_trailing_metadata);
+ }
+ grpc_transport_stream_op_finish_with_failure(exec_ctx, op);
+}
+
+static char *lame_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
+ return NULL;
+}
+
+static void lame_start_transport_op(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ grpc_transport_op *op) {
+ if (op->on_connectivity_state_change) {
+ GPR_ASSERT(*op->connectivity_state != GRPC_CHANNEL_FATAL_FAILURE);
+ *op->connectivity_state = GRPC_CHANNEL_FATAL_FAILURE;
+ op->on_connectivity_state_change->cb(
+ exec_ctx, op->on_connectivity_state_change->cb_arg, 1);
+ }
+ if (op->on_consumed != NULL) {
+ op->on_consumed->cb(exec_ctx, op->on_consumed->cb_arg, 1);
+ }
+}
+
+static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_call_element_args *args) {}
+
+static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {}
+
+static void init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
+ GPR_ASSERT(args->is_first);
+ GPR_ASSERT(args->is_last);
+}
+
+static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem) {}
+
+const grpc_channel_filter grpc_lame_filter = {
+ lame_start_transport_stream_op,
+ lame_start_transport_op,
+ sizeof(call_data),
+ init_call_elem,
+ grpc_call_stack_ignore_set_pollset,
+ destroy_call_elem,
+ sizeof(channel_data),
+ init_channel_elem,
+ destroy_channel_elem,
+ lame_get_peer,
+ "lame-client",
+};
+
+#define CHANNEL_STACK_FROM_CHANNEL(c) ((grpc_channel_stack *)((c) + 1))
+
+grpc_channel *grpc_lame_client_channel_create(const char *target,
+ grpc_status_code error_code,
+ const char *error_message) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_channel_element *elem;
+ channel_data *chand;
+ grpc_channel *channel = grpc_channel_create(&exec_ctx, target, NULL,
+ GRPC_CLIENT_LAME_CHANNEL, NULL);
+ elem = grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0);
+ GRPC_API_TRACE(
+ "grpc_lame_client_channel_create(target=%s, error_code=%d, "
+ "error_message=%s)",
+ 3, (target, (int)error_code, error_message));
+ GPR_ASSERT(elem->filter == &grpc_lame_filter);
+ chand = (channel_data *)elem->channel_data;
+ chand->error_code = error_code;
+ chand->error_message = error_message;
+ grpc_exec_ctx_finish(&exec_ctx);
+ return channel;
+}
diff --git a/src/core/lib/surface/lame_client.h b/src/core/lib/surface/lame_client.h
new file mode 100644
index 0000000000..5f6ea34d4b
--- /dev/null
+++ b/src/core/lib/surface/lame_client.h
@@ -0,0 +1,41 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_SURFACE_LAME_CLIENT_H
+#define GRPC_CORE_LIB_SURFACE_LAME_CLIENT_H
+
+#include "src/core/lib/channel/channel_stack.h"
+
+extern const grpc_channel_filter grpc_lame_filter;
+
+#endif /* GRPC_CORE_LIB_SURFACE_LAME_CLIENT_H */
diff --git a/src/core/lib/surface/metadata_array.c b/src/core/lib/surface/metadata_array.c
new file mode 100644
index 0000000000..4436f2da87
--- /dev/null
+++ b/src/core/lib/surface/metadata_array.c
@@ -0,0 +1,49 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
+
+#include <string.h>
+
+#include "src/core/lib/surface/api_trace.h"
+
+void grpc_metadata_array_init(grpc_metadata_array* array) {
+ GRPC_API_TRACE("grpc_metadata_array_init(array=%p)", 1, (array));
+ memset(array, 0, sizeof(*array));
+}
+
+void grpc_metadata_array_destroy(grpc_metadata_array* array) {
+ GRPC_API_TRACE("grpc_metadata_array_destroy(array=%p)", 1, (array));
+ gpr_free(array->metadata);
+}
diff --git a/src/core/lib/surface/secure_channel_create.c b/src/core/lib/surface/secure_channel_create.c
new file mode 100644
index 0000000000..dcb367023e
--- /dev/null
+++ b/src/core/lib/surface/secure_channel_create.c
@@ -0,0 +1,319 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/grpc.h>
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/slice.h>
+#include <grpc/support/slice_buffer.h>
+
+#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/channel/client_channel.h"
+#include "src/core/lib/client_config/resolver_registry.h"
+#include "src/core/lib/iomgr/tcp_client.h"
+#include "src/core/lib/security/auth_filters.h"
+#include "src/core/lib/security/credentials.h"
+#include "src/core/lib/security/security_context.h"
+#include "src/core/lib/surface/api_trace.h"
+#include "src/core/lib/surface/channel.h"
+#include "src/core/lib/transport/chttp2_transport.h"
+#include "src/core/lib/tsi/transport_security_interface.h"
+
+typedef struct {
+ grpc_connector base;
+ gpr_refcount refs;
+
+ grpc_channel_security_connector *security_connector;
+
+ grpc_closure *notify;
+ grpc_connect_in_args args;
+ grpc_connect_out_args *result;
+ grpc_closure initial_string_sent;
+ gpr_slice_buffer initial_string_buffer;
+
+ gpr_mu mu;
+ grpc_endpoint *connecting_endpoint;
+ grpc_endpoint *newly_connecting_endpoint;
+
+ grpc_closure connected_closure;
+} connector;
+
+static void connector_ref(grpc_connector *con) {
+ connector *c = (connector *)con;
+ gpr_ref(&c->refs);
+}
+
+static void connector_unref(grpc_exec_ctx *exec_ctx, grpc_connector *con) {
+ connector *c = (connector *)con;
+ if (gpr_unref(&c->refs)) {
+ /* c->initial_string_buffer does not need to be destroyed */
+ gpr_free(c);
+ }
+}
+
+static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_security_status status,
+ grpc_endpoint *secure_endpoint,
+ grpc_auth_context *auth_context) {
+ connector *c = arg;
+ grpc_closure *notify;
+ grpc_channel_args *args_copy = NULL;
+ gpr_mu_lock(&c->mu);
+ if (c->connecting_endpoint == NULL) {
+ memset(c->result, 0, sizeof(*c->result));
+ gpr_mu_unlock(&c->mu);
+ } else if (status != GRPC_SECURITY_OK) {
+ gpr_log(GPR_ERROR, "Secure handshake failed with error %d.", status);
+ memset(c->result, 0, sizeof(*c->result));
+ c->connecting_endpoint = NULL;
+ gpr_mu_unlock(&c->mu);
+ } else {
+ grpc_arg auth_context_arg;
+ c->connecting_endpoint = NULL;
+ gpr_mu_unlock(&c->mu);
+ c->result->transport = grpc_create_chttp2_transport(
+ exec_ctx, c->args.channel_args, secure_endpoint, 1);
+ grpc_chttp2_transport_start_reading(exec_ctx, c->result->transport, NULL,
+ 0);
+ auth_context_arg = grpc_auth_context_to_arg(auth_context);
+ args_copy = grpc_channel_args_copy_and_add(c->args.channel_args,
+ &auth_context_arg, 1);
+ c->result->channel_args = args_copy;
+ }
+ notify = c->notify;
+ c->notify = NULL;
+ /* look at c->args which are connector args. */
+ notify->cb(exec_ctx, notify->cb_arg, 1);
+ if (args_copy != NULL) grpc_channel_args_destroy(args_copy);
+}
+
+static void on_initial_connect_string_sent(grpc_exec_ctx *exec_ctx, void *arg,
+ bool success) {
+ connector *c = arg;
+ grpc_channel_security_connector_do_handshake(exec_ctx, c->security_connector,
+ c->connecting_endpoint,
+ on_secure_handshake_done, c);
+}
+
+static void connected(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
+ connector *c = arg;
+ grpc_closure *notify;
+ grpc_endpoint *tcp = c->newly_connecting_endpoint;
+ if (tcp != NULL) {
+ gpr_mu_lock(&c->mu);
+ GPR_ASSERT(c->connecting_endpoint == NULL);
+ c->connecting_endpoint = tcp;
+ gpr_mu_unlock(&c->mu);
+ if (!GPR_SLICE_IS_EMPTY(c->args.initial_connect_string)) {
+ grpc_closure_init(&c->initial_string_sent, on_initial_connect_string_sent,
+ c);
+ gpr_slice_buffer_init(&c->initial_string_buffer);
+ gpr_slice_buffer_add(&c->initial_string_buffer,
+ c->args.initial_connect_string);
+ grpc_endpoint_write(exec_ctx, tcp, &c->initial_string_buffer,
+ &c->initial_string_sent);
+ } else {
+ grpc_channel_security_connector_do_handshake(
+ exec_ctx, c->security_connector, tcp, on_secure_handshake_done, c);
+ }
+ } else {
+ memset(c->result, 0, sizeof(*c->result));
+ notify = c->notify;
+ c->notify = NULL;
+ notify->cb(exec_ctx, notify->cb_arg, 1);
+ }
+}
+
+static void connector_shutdown(grpc_exec_ctx *exec_ctx, grpc_connector *con) {
+ connector *c = (connector *)con;
+ grpc_endpoint *ep;
+ gpr_mu_lock(&c->mu);
+ ep = c->connecting_endpoint;
+ c->connecting_endpoint = NULL;
+ gpr_mu_unlock(&c->mu);
+ if (ep) {
+ grpc_endpoint_shutdown(exec_ctx, ep);
+ }
+}
+
+static void connector_connect(grpc_exec_ctx *exec_ctx, grpc_connector *con,
+ const grpc_connect_in_args *args,
+ grpc_connect_out_args *result,
+ grpc_closure *notify) {
+ connector *c = (connector *)con;
+ GPR_ASSERT(c->notify == NULL);
+ GPR_ASSERT(notify->cb);
+ c->notify = notify;
+ c->args = *args;
+ c->result = result;
+ gpr_mu_lock(&c->mu);
+ GPR_ASSERT(c->connecting_endpoint == NULL);
+ gpr_mu_unlock(&c->mu);
+ grpc_closure_init(&c->connected_closure, connected, c);
+ grpc_tcp_client_connect(
+ exec_ctx, &c->connected_closure, &c->newly_connecting_endpoint,
+ args->interested_parties, args->addr, args->addr_len, args->deadline);
+}
+
+static const grpc_connector_vtable connector_vtable = {
+ connector_ref, connector_unref, connector_shutdown, connector_connect};
+
+typedef struct {
+ grpc_subchannel_factory base;
+ gpr_refcount refs;
+ grpc_channel_args *merge_args;
+ grpc_channel_security_connector *security_connector;
+ grpc_channel *master;
+} subchannel_factory;
+
+static void subchannel_factory_ref(grpc_subchannel_factory *scf) {
+ subchannel_factory *f = (subchannel_factory *)scf;
+ gpr_ref(&f->refs);
+}
+
+static void subchannel_factory_unref(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_factory *scf) {
+ subchannel_factory *f = (subchannel_factory *)scf;
+ if (gpr_unref(&f->refs)) {
+ GRPC_SECURITY_CONNECTOR_UNREF(&f->security_connector->base,
+ "subchannel_factory");
+ GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, f->master, "subchannel_factory");
+ grpc_channel_args_destroy(f->merge_args);
+ gpr_free(f);
+ }
+}
+
+static grpc_subchannel *subchannel_factory_create_subchannel(
+ grpc_exec_ctx *exec_ctx, grpc_subchannel_factory *scf,
+ grpc_subchannel_args *args) {
+ subchannel_factory *f = (subchannel_factory *)scf;
+ connector *c = gpr_malloc(sizeof(*c));
+ grpc_channel_args *final_args =
+ grpc_channel_args_merge(args->args, f->merge_args);
+ grpc_subchannel *s;
+ memset(c, 0, sizeof(*c));
+ c->base.vtable = &connector_vtable;
+ c->security_connector = f->security_connector;
+ gpr_mu_init(&c->mu);
+ gpr_ref_init(&c->refs, 1);
+ args->args = final_args;
+ s = grpc_subchannel_create(exec_ctx, &c->base, args);
+ grpc_connector_unref(exec_ctx, &c->base);
+ grpc_channel_args_destroy(final_args);
+ return s;
+}
+
+static const grpc_subchannel_factory_vtable subchannel_factory_vtable = {
+ subchannel_factory_ref, subchannel_factory_unref,
+ subchannel_factory_create_subchannel};
+
+/* Create a secure client channel:
+ Asynchronously: - resolve target
+ - connect to it (trying alternatives as presented)
+ - perform handshakes */
+grpc_channel *grpc_secure_channel_create(grpc_channel_credentials *creds,
+ const char *target,
+ const grpc_channel_args *args,
+ void *reserved) {
+ grpc_channel *channel;
+ grpc_arg connector_arg;
+ grpc_channel_args *args_copy;
+ grpc_channel_args *new_args_from_connector;
+ grpc_channel_security_connector *security_connector;
+ grpc_resolver *resolver;
+ subchannel_factory *f;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
+ GRPC_API_TRACE(
+ "grpc_secure_channel_create(creds=%p, target=%s, args=%p, "
+ "reserved=%p)",
+ 4, (creds, target, args, reserved));
+ GPR_ASSERT(reserved == NULL);
+
+ if (grpc_find_security_connector_in_args(args) != NULL) {
+ gpr_log(GPR_ERROR, "Cannot set security context in channel args.");
+ grpc_exec_ctx_finish(&exec_ctx);
+ return grpc_lame_client_channel_create(
+ target, GRPC_STATUS_INVALID_ARGUMENT,
+ "Security connector exists in channel args.");
+ }
+
+ if (grpc_channel_credentials_create_security_connector(
+ creds, target, args, &security_connector, &new_args_from_connector) !=
+ GRPC_SECURITY_OK) {
+ grpc_exec_ctx_finish(&exec_ctx);
+ return grpc_lame_client_channel_create(
+ target, GRPC_STATUS_INVALID_ARGUMENT,
+ "Failed to create security connector.");
+ }
+
+ connector_arg = grpc_security_connector_to_arg(&security_connector->base);
+ args_copy = grpc_channel_args_copy_and_add(
+ new_args_from_connector != NULL ? new_args_from_connector : args,
+ &connector_arg, 1);
+
+ channel = grpc_channel_create(&exec_ctx, target, args_copy,
+ GRPC_CLIENT_CHANNEL, NULL);
+
+ f = gpr_malloc(sizeof(*f));
+ f->base.vtable = &subchannel_factory_vtable;
+ gpr_ref_init(&f->refs, 1);
+ GRPC_SECURITY_CONNECTOR_REF(&security_connector->base, "subchannel_factory");
+ f->security_connector = security_connector;
+ f->merge_args = grpc_channel_args_copy(args_copy);
+ f->master = channel;
+ GRPC_CHANNEL_INTERNAL_REF(channel, "subchannel_factory");
+ resolver = grpc_resolver_create(target, &f->base);
+ if (resolver) {
+ grpc_client_channel_set_resolver(
+ &exec_ctx, grpc_channel_get_channel_stack(channel), resolver);
+ GRPC_RESOLVER_UNREF(&exec_ctx, resolver, "create");
+ }
+ grpc_subchannel_factory_unref(&exec_ctx, &f->base);
+ GRPC_SECURITY_CONNECTOR_UNREF(&security_connector->base, "channel_create");
+ grpc_channel_args_destroy(args_copy);
+ if (new_args_from_connector != NULL) {
+ grpc_channel_args_destroy(new_args_from_connector);
+ }
+
+ if (!resolver) {
+ GRPC_CHANNEL_INTERNAL_UNREF(&exec_ctx, channel, "subchannel_factory");
+ channel = NULL;
+ }
+ grpc_exec_ctx_finish(&exec_ctx);
+
+ return channel;
+}
diff --git a/src/core/lib/surface/server.c b/src/core/lib/surface/server.c
new file mode 100644
index 0000000000..080734e9d5
--- /dev/null
+++ b/src/core/lib/surface/server.c
@@ -0,0 +1,1319 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/surface/server.h"
+
+#include <limits.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/useful.h>
+
+#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/channel/connected_channel.h"
+#include "src/core/lib/iomgr/iomgr.h"
+#include "src/core/lib/support/stack_lockfree.h"
+#include "src/core/lib/support/string.h"
+#include "src/core/lib/surface/api_trace.h"
+#include "src/core/lib/surface/call.h"
+#include "src/core/lib/surface/channel.h"
+#include "src/core/lib/surface/completion_queue.h"
+#include "src/core/lib/surface/init.h"
+#include "src/core/lib/transport/metadata.h"
+#include "src/core/lib/transport/static_metadata.h"
+
+typedef struct listener {
+ void *arg;
+ void (*start)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg,
+ grpc_pollset **pollsets, size_t pollset_count);
+ void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg,
+ grpc_closure *closure);
+ struct listener *next;
+ grpc_closure destroy_done;
+} listener;
+
+typedef struct call_data call_data;
+typedef struct channel_data channel_data;
+typedef struct registered_method registered_method;
+
+typedef struct {
+ call_data *next;
+ call_data *prev;
+} call_link;
+
+typedef enum { BATCH_CALL, REGISTERED_CALL } requested_call_type;
+
+typedef struct requested_call {
+ requested_call_type type;
+ void *tag;
+ grpc_server *server;
+ grpc_completion_queue *cq_bound_to_call;
+ grpc_completion_queue *cq_for_notification;
+ grpc_call **call;
+ grpc_cq_completion completion;
+ grpc_metadata_array *initial_metadata;
+ union {
+ struct {
+ grpc_call_details *details;
+ } batch;
+ struct {
+ registered_method *registered_method;
+ gpr_timespec *deadline;
+ grpc_byte_buffer **optional_payload;
+ } registered;
+ } data;
+ grpc_closure publish;
+} requested_call;
+
+typedef struct channel_registered_method {
+ registered_method *server_registered_method;
+ grpc_mdstr *method;
+ grpc_mdstr *host;
+} channel_registered_method;
+
+struct channel_data {
+ grpc_server *server;
+ grpc_connectivity_state connectivity_state;
+ grpc_channel *channel;
+ /* linked list of all channels on a server */
+ channel_data *next;
+ channel_data *prev;
+ channel_registered_method *registered_methods;
+ uint32_t registered_method_slots;
+ uint32_t registered_method_max_probes;
+ grpc_closure finish_destroy_channel_closure;
+ grpc_closure channel_connectivity_changed;
+};
+
+typedef struct shutdown_tag {
+ void *tag;
+ grpc_completion_queue *cq;
+ grpc_cq_completion completion;
+} shutdown_tag;
+
+typedef enum {
+ /* waiting for metadata */
+ NOT_STARTED,
+ /* inital metadata read, not flow controlled in yet */
+ PENDING,
+ /* flow controlled in, on completion queue */
+ ACTIVATED,
+ /* cancelled before being queued */
+ ZOMBIED
+} call_state;
+
+typedef struct request_matcher request_matcher;
+
+struct call_data {
+ grpc_call *call;
+
+ /** protects state */
+ gpr_mu mu_state;
+ /** the current state of a call - see call_state */
+ call_state state;
+
+ grpc_mdstr *path;
+ grpc_mdstr *host;
+ gpr_timespec deadline;
+
+ grpc_completion_queue *cq_new;
+
+ grpc_metadata_batch *recv_initial_metadata;
+ grpc_metadata_array initial_metadata;
+
+ grpc_closure got_initial_metadata;
+ grpc_closure server_on_recv_initial_metadata;
+ grpc_closure kill_zombie_closure;
+ grpc_closure *on_done_recv_initial_metadata;
+
+ call_data *pending_next;
+};
+
+struct request_matcher {
+ call_data *pending_head;
+ call_data *pending_tail;
+ gpr_stack_lockfree *requests;
+};
+
+struct registered_method {
+ char *method;
+ char *host;
+ request_matcher request_matcher;
+ registered_method *next;
+};
+
+typedef struct {
+ grpc_channel **channels;
+ size_t num_channels;
+} channel_broadcaster;
+
+struct grpc_server {
+ grpc_channel_args *channel_args;
+
+ grpc_completion_queue **cqs;
+ grpc_pollset **pollsets;
+ size_t cq_count;
+
+ /* The two following mutexes control access to server-state
+ mu_global controls access to non-call-related state (e.g., channel state)
+ mu_call controls access to call-related state (e.g., the call lists)
+
+ If they are ever required to be nested, you must lock mu_global
+ before mu_call. This is currently used in shutdown processing
+ (grpc_server_shutdown_and_notify and maybe_finish_shutdown) */
+ gpr_mu mu_global; /* mutex for server and channel state */
+ gpr_mu mu_call; /* mutex for call-specific state */
+
+ registered_method *registered_methods;
+ request_matcher unregistered_request_matcher;
+ /** free list of available requested_calls indices */
+ gpr_stack_lockfree *request_freelist;
+ /** requested call backing data */
+ requested_call *requested_calls;
+ size_t max_requested_calls;
+
+ gpr_atm shutdown_flag;
+ uint8_t shutdown_published;
+ size_t num_shutdown_tags;
+ shutdown_tag *shutdown_tags;
+
+ channel_data root_channel_data;
+
+ listener *listeners;
+ int listeners_destroyed;
+ gpr_refcount internal_refcount;
+
+ /** when did we print the last shutdown progress message */
+ gpr_timespec last_shutdown_message_time;
+};
+
+#define SERVER_FROM_CALL_ELEM(elem) \
+ (((channel_data *)(elem)->channel_data)->server)
+
+static void begin_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
+ call_data *calld, requested_call *rc);
+static void fail_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
+ requested_call *rc);
+/* Before calling maybe_finish_shutdown, we must hold mu_global and not
+ hold mu_call */
+static void maybe_finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_server *server);
+
+/*
+ * channel broadcaster
+ */
+
+/* assumes server locked */
+static void channel_broadcaster_init(grpc_server *s, channel_broadcaster *cb) {
+ channel_data *c;
+ size_t count = 0;
+ for (c = s->root_channel_data.next; c != &s->root_channel_data; c = c->next) {
+ count++;
+ }
+ cb->num_channels = count;
+ cb->channels = gpr_malloc(sizeof(*cb->channels) * cb->num_channels);
+ count = 0;
+ for (c = s->root_channel_data.next; c != &s->root_channel_data; c = c->next) {
+ cb->channels[count++] = c->channel;
+ GRPC_CHANNEL_INTERNAL_REF(c->channel, "broadcast");
+ }
+}
+
+struct shutdown_cleanup_args {
+ grpc_closure closure;
+ gpr_slice slice;
+};
+
+static void shutdown_cleanup(grpc_exec_ctx *exec_ctx, void *arg,
+ bool iomgr_status_ignored) {
+ struct shutdown_cleanup_args *a = arg;
+ gpr_slice_unref(a->slice);
+ gpr_free(a);
+}
+
+static void send_shutdown(grpc_exec_ctx *exec_ctx, grpc_channel *channel,
+ int send_goaway, int send_disconnect) {
+ grpc_transport_op op;
+ struct shutdown_cleanup_args *sc;
+ grpc_channel_element *elem;
+
+ memset(&op, 0, sizeof(op));
+ op.send_goaway = send_goaway;
+ sc = gpr_malloc(sizeof(*sc));
+ sc->slice = gpr_slice_from_copied_string("Server shutdown");
+ op.goaway_message = &sc->slice;
+ op.goaway_status = GRPC_STATUS_OK;
+ op.disconnect = send_disconnect;
+ grpc_closure_init(&sc->closure, shutdown_cleanup, sc);
+ op.on_consumed = &sc->closure;
+
+ elem = grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0);
+ elem->filter->start_transport_op(exec_ctx, elem, &op);
+}
+
+static void channel_broadcaster_shutdown(grpc_exec_ctx *exec_ctx,
+ channel_broadcaster *cb,
+ int send_goaway,
+ int force_disconnect) {
+ size_t i;
+
+ for (i = 0; i < cb->num_channels; i++) {
+ send_shutdown(exec_ctx, cb->channels[i], send_goaway, force_disconnect);
+ GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, cb->channels[i], "broadcast");
+ }
+ gpr_free(cb->channels);
+}
+
+/*
+ * request_matcher
+ */
+
+static void request_matcher_init(request_matcher *rm, size_t entries) {
+ memset(rm, 0, sizeof(*rm));
+ rm->requests = gpr_stack_lockfree_create(entries);
+}
+
+static void request_matcher_destroy(request_matcher *rm) {
+ GPR_ASSERT(gpr_stack_lockfree_pop(rm->requests) == -1);
+ gpr_stack_lockfree_destroy(rm->requests);
+}
+
+static void kill_zombie(grpc_exec_ctx *exec_ctx, void *elem, bool success) {
+ grpc_call_destroy(grpc_call_from_top_element(elem));
+}
+
+static void request_matcher_zombify_all_pending_calls(grpc_exec_ctx *exec_ctx,
+ request_matcher *rm) {
+ while (rm->pending_head) {
+ call_data *calld = rm->pending_head;
+ rm->pending_head = calld->pending_next;
+ gpr_mu_lock(&calld->mu_state);
+ calld->state = ZOMBIED;
+ gpr_mu_unlock(&calld->mu_state);
+ grpc_closure_init(
+ &calld->kill_zombie_closure, kill_zombie,
+ grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0));
+ grpc_exec_ctx_enqueue(exec_ctx, &calld->kill_zombie_closure, true, NULL);
+ }
+}
+
+static void request_matcher_kill_requests(grpc_exec_ctx *exec_ctx,
+ grpc_server *server,
+ request_matcher *rm) {
+ int request_id;
+ while ((request_id = gpr_stack_lockfree_pop(rm->requests)) != -1) {
+ fail_call(exec_ctx, server, &server->requested_calls[request_id]);
+ }
+}
+
+/*
+ * server proper
+ */
+
+static void server_ref(grpc_server *server) {
+ gpr_ref(&server->internal_refcount);
+}
+
+static void server_delete(grpc_exec_ctx *exec_ctx, grpc_server *server) {
+ registered_method *rm;
+ size_t i;
+ grpc_channel_args_destroy(server->channel_args);
+ gpr_mu_destroy(&server->mu_global);
+ gpr_mu_destroy(&server->mu_call);
+ while ((rm = server->registered_methods) != NULL) {
+ server->registered_methods = rm->next;
+ request_matcher_destroy(&rm->request_matcher);
+ gpr_free(rm->method);
+ gpr_free(rm->host);
+ gpr_free(rm);
+ }
+ for (i = 0; i < server->cq_count; i++) {
+ GRPC_CQ_INTERNAL_UNREF(server->cqs[i], "server");
+ }
+ request_matcher_destroy(&server->unregistered_request_matcher);
+ gpr_stack_lockfree_destroy(server->request_freelist);
+ gpr_free(server->cqs);
+ gpr_free(server->pollsets);
+ gpr_free(server->shutdown_tags);
+ gpr_free(server->requested_calls);
+ gpr_free(server);
+}
+
+static void server_unref(grpc_exec_ctx *exec_ctx, grpc_server *server) {
+ if (gpr_unref(&server->internal_refcount)) {
+ server_delete(exec_ctx, server);
+ }
+}
+
+static int is_channel_orphaned(channel_data *chand) {
+ return chand->next == chand;
+}
+
+static void orphan_channel(channel_data *chand) {
+ chand->next->prev = chand->prev;
+ chand->prev->next = chand->next;
+ chand->next = chand->prev = chand;
+}
+
+static void finish_destroy_channel(grpc_exec_ctx *exec_ctx, void *cd,
+ bool success) {
+ channel_data *chand = cd;
+ grpc_server *server = chand->server;
+ GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, chand->channel, "server");
+ server_unref(exec_ctx, server);
+}
+
+static void destroy_channel(grpc_exec_ctx *exec_ctx, channel_data *chand) {
+ if (is_channel_orphaned(chand)) return;
+ GPR_ASSERT(chand->server != NULL);
+ orphan_channel(chand);
+ server_ref(chand->server);
+ maybe_finish_shutdown(exec_ctx, chand->server);
+ chand->finish_destroy_channel_closure.cb = finish_destroy_channel;
+ chand->finish_destroy_channel_closure.cb_arg = chand;
+
+ grpc_transport_op op;
+ memset(&op, 0, sizeof(op));
+ op.set_accept_stream = true;
+ op.on_consumed = &chand->finish_destroy_channel_closure;
+ grpc_channel_next_op(exec_ctx,
+ grpc_channel_stack_element(
+ grpc_channel_get_channel_stack(chand->channel), 0),
+ &op);
+}
+
+static void finish_start_new_rpc(grpc_exec_ctx *exec_ctx, grpc_server *server,
+ grpc_call_element *elem, request_matcher *rm) {
+ call_data *calld = elem->call_data;
+ int request_id;
+
+ if (gpr_atm_acq_load(&server->shutdown_flag)) {
+ gpr_mu_lock(&calld->mu_state);
+ calld->state = ZOMBIED;
+ gpr_mu_unlock(&calld->mu_state);
+ grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
+ grpc_exec_ctx_enqueue(exec_ctx, &calld->kill_zombie_closure, true, NULL);
+ return;
+ }
+
+ request_id = gpr_stack_lockfree_pop(rm->requests);
+ if (request_id == -1) {
+ gpr_mu_lock(&server->mu_call);
+ gpr_mu_lock(&calld->mu_state);
+ calld->state = PENDING;
+ gpr_mu_unlock(&calld->mu_state);
+ if (rm->pending_head == NULL) {
+ rm->pending_tail = rm->pending_head = calld;
+ } else {
+ rm->pending_tail->pending_next = calld;
+ rm->pending_tail = calld;
+ }
+ calld->pending_next = NULL;
+ gpr_mu_unlock(&server->mu_call);
+ } else {
+ gpr_mu_lock(&calld->mu_state);
+ calld->state = ACTIVATED;
+ gpr_mu_unlock(&calld->mu_state);
+ begin_call(exec_ctx, server, calld, &server->requested_calls[request_id]);
+ }
+}
+
+static void start_new_rpc(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
+ channel_data *chand = elem->channel_data;
+ call_data *calld = elem->call_data;
+ grpc_server *server = chand->server;
+ uint32_t i;
+ uint32_t hash;
+ channel_registered_method *rm;
+
+ if (chand->registered_methods && calld->path && calld->host) {
+ /* TODO(ctiller): unify these two searches */
+ /* check for an exact match with host */
+ hash = GRPC_MDSTR_KV_HASH(calld->host->hash, calld->path->hash);
+ for (i = 0; i <= chand->registered_method_max_probes; i++) {
+ rm = &chand->registered_methods[(hash + i) %
+ chand->registered_method_slots];
+ if (!rm) break;
+ if (rm->host != calld->host) continue;
+ if (rm->method != calld->path) continue;
+ finish_start_new_rpc(exec_ctx, server, elem,
+ &rm->server_registered_method->request_matcher);
+ return;
+ }
+ /* check for a wildcard method definition (no host set) */
+ hash = GRPC_MDSTR_KV_HASH(0, calld->path->hash);
+ for (i = 0; i <= chand->registered_method_max_probes; i++) {
+ rm = &chand->registered_methods[(hash + i) %
+ chand->registered_method_slots];
+ if (!rm) break;
+ if (rm->host != NULL) continue;
+ if (rm->method != calld->path) continue;
+ finish_start_new_rpc(exec_ctx, server, elem,
+ &rm->server_registered_method->request_matcher);
+ return;
+ }
+ }
+ finish_start_new_rpc(exec_ctx, server, elem,
+ &server->unregistered_request_matcher);
+}
+
+static int num_listeners(grpc_server *server) {
+ listener *l;
+ int n = 0;
+ for (l = server->listeners; l; l = l->next) {
+ n++;
+ }
+ return n;
+}
+
+static void done_shutdown_event(grpc_exec_ctx *exec_ctx, void *server,
+ grpc_cq_completion *completion) {
+ server_unref(exec_ctx, server);
+}
+
+static int num_channels(grpc_server *server) {
+ channel_data *chand;
+ int n = 0;
+ for (chand = server->root_channel_data.next;
+ chand != &server->root_channel_data; chand = chand->next) {
+ n++;
+ }
+ return n;
+}
+
+static void kill_pending_work_locked(grpc_exec_ctx *exec_ctx,
+ grpc_server *server) {
+ registered_method *rm;
+ request_matcher_kill_requests(exec_ctx, server,
+ &server->unregistered_request_matcher);
+ request_matcher_zombify_all_pending_calls(
+ exec_ctx, &server->unregistered_request_matcher);
+ for (rm = server->registered_methods; rm; rm = rm->next) {
+ request_matcher_kill_requests(exec_ctx, server, &rm->request_matcher);
+ request_matcher_zombify_all_pending_calls(exec_ctx, &rm->request_matcher);
+ }
+}
+
+static void maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
+ grpc_server *server) {
+ size_t i;
+ if (!gpr_atm_acq_load(&server->shutdown_flag) || server->shutdown_published) {
+ return;
+ }
+
+ kill_pending_work_locked(exec_ctx, server);
+
+ if (server->root_channel_data.next != &server->root_channel_data ||
+ server->listeners_destroyed < num_listeners(server)) {
+ if (gpr_time_cmp(gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME),
+ server->last_shutdown_message_time),
+ gpr_time_from_seconds(1, GPR_TIMESPAN)) >= 0) {
+ server->last_shutdown_message_time = gpr_now(GPR_CLOCK_REALTIME);
+ gpr_log(GPR_DEBUG,
+ "Waiting for %d channels and %d/%d listeners to be destroyed"
+ " before shutting down server",
+ num_channels(server),
+ num_listeners(server) - server->listeners_destroyed,
+ num_listeners(server));
+ }
+ return;
+ }
+ server->shutdown_published = 1;
+ for (i = 0; i < server->num_shutdown_tags; i++) {
+ server_ref(server);
+ grpc_cq_end_op(exec_ctx, server->shutdown_tags[i].cq,
+ server->shutdown_tags[i].tag, 1, done_shutdown_event, server,
+ &server->shutdown_tags[i].completion);
+ }
+}
+
+static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
+ grpc_call_element *elem = user_data;
+ call_data *calld = elem->call_data;
+ if (md->key == GRPC_MDSTR_PATH) {
+ calld->path = GRPC_MDSTR_REF(md->value);
+ return NULL;
+ } else if (md->key == GRPC_MDSTR_AUTHORITY) {
+ calld->host = GRPC_MDSTR_REF(md->value);
+ return NULL;
+ }
+ return md;
+}
+
+static void server_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
+ bool success) {
+ grpc_call_element *elem = ptr;
+ call_data *calld = elem->call_data;
+ gpr_timespec op_deadline;
+
+ grpc_metadata_batch_filter(calld->recv_initial_metadata, server_filter, elem);
+ op_deadline = calld->recv_initial_metadata->deadline;
+ if (0 != gpr_time_cmp(op_deadline, gpr_inf_future(op_deadline.clock_type))) {
+ calld->deadline = op_deadline;
+ }
+ if (calld->host && calld->path) {
+ /* do nothing */
+ } else {
+ success = 0;
+ }
+
+ calld->on_done_recv_initial_metadata->cb(
+ exec_ctx, calld->on_done_recv_initial_metadata->cb_arg, success);
+}
+
+static void server_mutate_op(grpc_call_element *elem,
+ grpc_transport_stream_op *op) {
+ call_data *calld = elem->call_data;
+
+ if (op->recv_initial_metadata != NULL) {
+ calld->recv_initial_metadata = op->recv_initial_metadata;
+ calld->on_done_recv_initial_metadata = op->recv_initial_metadata_ready;
+ op->recv_initial_metadata_ready = &calld->server_on_recv_initial_metadata;
+ }
+}
+
+static void server_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_transport_stream_op *op) {
+ GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
+ server_mutate_op(elem, op);
+ grpc_call_next_op(exec_ctx, elem, op);
+}
+
+static void got_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
+ bool success) {
+ grpc_call_element *elem = ptr;
+ call_data *calld = elem->call_data;
+ if (success) {
+ start_new_rpc(exec_ctx, elem);
+ } else {
+ gpr_mu_lock(&calld->mu_state);
+ if (calld->state == NOT_STARTED) {
+ calld->state = ZOMBIED;
+ gpr_mu_unlock(&calld->mu_state);
+ grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
+ grpc_exec_ctx_enqueue(exec_ctx, &calld->kill_zombie_closure, true, NULL);
+ } else if (calld->state == PENDING) {
+ calld->state = ZOMBIED;
+ gpr_mu_unlock(&calld->mu_state);
+ /* zombied call will be destroyed when it's removed from the pending
+ queue... later */
+ } else {
+ gpr_mu_unlock(&calld->mu_state);
+ }
+ }
+}
+
+static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd,
+ grpc_transport *transport,
+ const void *transport_server_data) {
+ channel_data *chand = cd;
+ /* create a call */
+ grpc_call *call =
+ grpc_call_create(chand->channel, NULL, 0, NULL, transport_server_data,
+ NULL, 0, gpr_inf_future(GPR_CLOCK_MONOTONIC));
+ grpc_call_element *elem =
+ grpc_call_stack_element(grpc_call_get_call_stack(call), 0);
+ call_data *calld = elem->call_data;
+ grpc_op op;
+ memset(&op, 0, sizeof(op));
+ op.op = GRPC_OP_RECV_INITIAL_METADATA;
+ op.data.recv_initial_metadata = &calld->initial_metadata;
+ grpc_closure_init(&calld->got_initial_metadata, got_initial_metadata, elem);
+ grpc_call_start_batch_and_execute(exec_ctx, call, &op, 1,
+ &calld->got_initial_metadata);
+}
+
+static void channel_connectivity_changed(grpc_exec_ctx *exec_ctx, void *cd,
+ bool iomgr_status_ignored) {
+ channel_data *chand = cd;
+ grpc_server *server = chand->server;
+ if (chand->connectivity_state != GRPC_CHANNEL_FATAL_FAILURE) {
+ grpc_transport_op op;
+ memset(&op, 0, sizeof(op));
+ op.on_connectivity_state_change = &chand->channel_connectivity_changed,
+ op.connectivity_state = &chand->connectivity_state;
+ grpc_channel_next_op(exec_ctx,
+ grpc_channel_stack_element(
+ grpc_channel_get_channel_stack(chand->channel), 0),
+ &op);
+ } else {
+ gpr_mu_lock(&server->mu_global);
+ destroy_channel(exec_ctx, chand);
+ gpr_mu_unlock(&server->mu_global);
+ GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, chand->channel, "connectivity");
+ }
+}
+
+static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_call_element_args *args) {
+ call_data *calld = elem->call_data;
+ channel_data *chand = elem->channel_data;
+ memset(calld, 0, sizeof(call_data));
+ calld->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
+ calld->call = grpc_call_from_top_element(elem);
+ gpr_mu_init(&calld->mu_state);
+
+ grpc_closure_init(&calld->server_on_recv_initial_metadata,
+ server_on_recv_initial_metadata, elem);
+
+ server_ref(chand->server);
+}
+
+static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
+ channel_data *chand = elem->channel_data;
+ call_data *calld = elem->call_data;
+
+ GPR_ASSERT(calld->state != PENDING);
+
+ if (calld->host) {
+ GRPC_MDSTR_UNREF(calld->host);
+ }
+ if (calld->path) {
+ GRPC_MDSTR_UNREF(calld->path);
+ }
+ grpc_metadata_array_destroy(&calld->initial_metadata);
+
+ gpr_mu_destroy(&calld->mu_state);
+
+ server_unref(exec_ctx, chand->server);
+}
+
+static void init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
+ channel_data *chand = elem->channel_data;
+ GPR_ASSERT(args->is_first);
+ GPR_ASSERT(!args->is_last);
+ chand->server = NULL;
+ chand->channel = NULL;
+ chand->next = chand->prev = chand;
+ chand->registered_methods = NULL;
+ chand->connectivity_state = GRPC_CHANNEL_IDLE;
+ grpc_closure_init(&chand->channel_connectivity_changed,
+ channel_connectivity_changed, chand);
+}
+
+static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem) {
+ size_t i;
+ channel_data *chand = elem->channel_data;
+ if (chand->registered_methods) {
+ for (i = 0; i < chand->registered_method_slots; i++) {
+ if (chand->registered_methods[i].method) {
+ GRPC_MDSTR_UNREF(chand->registered_methods[i].method);
+ }
+ if (chand->registered_methods[i].host) {
+ GRPC_MDSTR_UNREF(chand->registered_methods[i].host);
+ }
+ }
+ gpr_free(chand->registered_methods);
+ }
+ if (chand->server) {
+ gpr_mu_lock(&chand->server->mu_global);
+ chand->next->prev = chand->prev;
+ chand->prev->next = chand->next;
+ chand->next = chand->prev = chand;
+ maybe_finish_shutdown(exec_ctx, chand->server);
+ gpr_mu_unlock(&chand->server->mu_global);
+ server_unref(exec_ctx, chand->server);
+ }
+}
+
+const grpc_channel_filter grpc_server_top_filter = {
+ server_start_transport_stream_op,
+ grpc_channel_next_op,
+ sizeof(call_data),
+ init_call_elem,
+ grpc_call_stack_ignore_set_pollset,
+ destroy_call_elem,
+ sizeof(channel_data),
+ init_channel_elem,
+ destroy_channel_elem,
+ grpc_call_next_get_peer,
+ "server",
+};
+
+void grpc_server_register_completion_queue(grpc_server *server,
+ grpc_completion_queue *cq,
+ void *reserved) {
+ size_t i, n;
+ GRPC_API_TRACE(
+ "grpc_server_register_completion_queue(server=%p, cq=%p, reserved=%p)", 3,
+ (server, cq, reserved));
+ GPR_ASSERT(!reserved);
+ for (i = 0; i < server->cq_count; i++) {
+ if (server->cqs[i] == cq) return;
+ }
+ GRPC_CQ_INTERNAL_REF(cq, "server");
+ grpc_cq_mark_server_cq(cq);
+ n = server->cq_count++;
+ server->cqs = gpr_realloc(server->cqs,
+ server->cq_count * sizeof(grpc_completion_queue *));
+ server->cqs[n] = cq;
+}
+
+grpc_server *grpc_server_create(const grpc_channel_args *args, void *reserved) {
+ size_t i;
+
+ GRPC_API_TRACE("grpc_server_create(%p, %p)", 2, (args, reserved));
+
+ grpc_server *server = gpr_malloc(sizeof(grpc_server));
+
+ GPR_ASSERT(grpc_is_initialized() && "call grpc_init()");
+
+ memset(server, 0, sizeof(grpc_server));
+
+ gpr_mu_init(&server->mu_global);
+ gpr_mu_init(&server->mu_call);
+
+ /* decremented by grpc_server_destroy */
+ gpr_ref_init(&server->internal_refcount, 1);
+ server->root_channel_data.next = server->root_channel_data.prev =
+ &server->root_channel_data;
+
+ /* TODO(ctiller): expose a channel_arg for this */
+ server->max_requested_calls = 32768;
+ server->request_freelist =
+ gpr_stack_lockfree_create(server->max_requested_calls);
+ for (i = 0; i < (size_t)server->max_requested_calls; i++) {
+ gpr_stack_lockfree_push(server->request_freelist, (int)i);
+ }
+ request_matcher_init(&server->unregistered_request_matcher,
+ server->max_requested_calls);
+ server->requested_calls = gpr_malloc(server->max_requested_calls *
+ sizeof(*server->requested_calls));
+
+ server->channel_args = grpc_channel_args_copy(args);
+
+ return server;
+}
+
+static int streq(const char *a, const char *b) {
+ if (a == NULL && b == NULL) return 1;
+ if (a == NULL) return 0;
+ if (b == NULL) return 0;
+ return 0 == strcmp(a, b);
+}
+
+void *grpc_server_register_method(grpc_server *server, const char *method,
+ const char *host) {
+ registered_method *m;
+ GRPC_API_TRACE("grpc_server_register_method(server=%p, method=%s, host=%s)",
+ 3, (server, method, host));
+ if (!method) {
+ gpr_log(GPR_ERROR,
+ "grpc_server_register_method method string cannot be NULL");
+ return NULL;
+ }
+ for (m = server->registered_methods; m; m = m->next) {
+ if (streq(m->method, method) && streq(m->host, host)) {
+ gpr_log(GPR_ERROR, "duplicate registration for %s@%s", method,
+ host ? host : "*");
+ return NULL;
+ }
+ }
+ m = gpr_malloc(sizeof(registered_method));
+ memset(m, 0, sizeof(*m));
+ request_matcher_init(&m->request_matcher, server->max_requested_calls);
+ m->method = gpr_strdup(method);
+ m->host = gpr_strdup(host);
+ m->next = server->registered_methods;
+ server->registered_methods = m;
+ return m;
+}
+
+void grpc_server_start(grpc_server *server) {
+ listener *l;
+ size_t i;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
+ GRPC_API_TRACE("grpc_server_start(server=%p)", 1, (server));
+
+ server->pollsets = gpr_malloc(sizeof(grpc_pollset *) * server->cq_count);
+ for (i = 0; i < server->cq_count; i++) {
+ server->pollsets[i] = grpc_cq_pollset(server->cqs[i]);
+ }
+
+ for (l = server->listeners; l; l = l->next) {
+ l->start(&exec_ctx, server, l->arg, server->pollsets, server->cq_count);
+ }
+
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s,
+ grpc_transport *transport,
+ const grpc_channel_args *args) {
+ size_t i;
+ size_t num_registered_methods;
+ size_t alloc;
+ registered_method *rm;
+ channel_registered_method *crm;
+ grpc_channel *channel;
+ channel_data *chand;
+ grpc_mdstr *host;
+ grpc_mdstr *method;
+ uint32_t hash;
+ size_t slots;
+ uint32_t probes;
+ uint32_t max_probes = 0;
+ grpc_transport_op op;
+
+ for (i = 0; i < s->cq_count; i++) {
+ memset(&op, 0, sizeof(op));
+ op.bind_pollset = grpc_cq_pollset(s->cqs[i]);
+ grpc_transport_perform_op(exec_ctx, transport, &op);
+ }
+
+ channel =
+ grpc_channel_create(exec_ctx, NULL, args, GRPC_SERVER_CHANNEL, transport);
+ chand = (channel_data *)grpc_channel_stack_element(
+ grpc_channel_get_channel_stack(channel), 0)
+ ->channel_data;
+ chand->server = s;
+ server_ref(s);
+ chand->channel = channel;
+
+ num_registered_methods = 0;
+ for (rm = s->registered_methods; rm; rm = rm->next) {
+ num_registered_methods++;
+ }
+ /* build a lookup table phrased in terms of mdstr's in this channels context
+ to quickly find registered methods */
+ if (num_registered_methods > 0) {
+ slots = 2 * num_registered_methods;
+ alloc = sizeof(channel_registered_method) * slots;
+ chand->registered_methods = gpr_malloc(alloc);
+ memset(chand->registered_methods, 0, alloc);
+ for (rm = s->registered_methods; rm; rm = rm->next) {
+ host = rm->host ? grpc_mdstr_from_string(rm->host) : NULL;
+ method = grpc_mdstr_from_string(rm->method);
+ hash = GRPC_MDSTR_KV_HASH(host ? host->hash : 0, method->hash);
+ for (probes = 0; chand->registered_methods[(hash + probes) % slots]
+ .server_registered_method != NULL;
+ probes++)
+ ;
+ if (probes > max_probes) max_probes = probes;
+ crm = &chand->registered_methods[(hash + probes) % slots];
+ crm->server_registered_method = rm;
+ crm->host = host;
+ crm->method = method;
+ }
+ GPR_ASSERT(slots <= UINT32_MAX);
+ chand->registered_method_slots = (uint32_t)slots;
+ chand->registered_method_max_probes = max_probes;
+ }
+
+ gpr_mu_lock(&s->mu_global);
+ chand->next = &s->root_channel_data;
+ chand->prev = chand->next->prev;
+ chand->next->prev = chand->prev->next = chand;
+ gpr_mu_unlock(&s->mu_global);
+
+ GRPC_CHANNEL_INTERNAL_REF(channel, "connectivity");
+ memset(&op, 0, sizeof(op));
+ op.set_accept_stream = true;
+ op.set_accept_stream_fn = accept_stream;
+ op.set_accept_stream_user_data = chand;
+ op.on_connectivity_state_change = &chand->channel_connectivity_changed;
+ op.connectivity_state = &chand->connectivity_state;
+ op.disconnect = gpr_atm_acq_load(&s->shutdown_flag) != 0;
+ grpc_transport_perform_op(exec_ctx, transport, &op);
+}
+
+void done_published_shutdown(grpc_exec_ctx *exec_ctx, void *done_arg,
+ grpc_cq_completion *storage) {
+ (void)done_arg;
+ gpr_free(storage);
+}
+
+static void listener_destroy_done(grpc_exec_ctx *exec_ctx, void *s,
+ bool success) {
+ grpc_server *server = s;
+ gpr_mu_lock(&server->mu_global);
+ server->listeners_destroyed++;
+ maybe_finish_shutdown(exec_ctx, server);
+ gpr_mu_unlock(&server->mu_global);
+}
+
+void grpc_server_shutdown_and_notify(grpc_server *server,
+ grpc_completion_queue *cq, void *tag) {
+ listener *l;
+ shutdown_tag *sdt;
+ channel_broadcaster broadcaster;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
+ GRPC_API_TRACE("grpc_server_shutdown_and_notify(server=%p, cq=%p, tag=%p)", 3,
+ (server, cq, tag));
+
+ /* lock, and gather up some stuff to do */
+ gpr_mu_lock(&server->mu_global);
+ grpc_cq_begin_op(cq, tag);
+ if (server->shutdown_published) {
+ grpc_cq_end_op(&exec_ctx, cq, tag, 1, done_published_shutdown, NULL,
+ gpr_malloc(sizeof(grpc_cq_completion)));
+ gpr_mu_unlock(&server->mu_global);
+ goto done;
+ }
+ server->shutdown_tags =
+ gpr_realloc(server->shutdown_tags,
+ sizeof(shutdown_tag) * (server->num_shutdown_tags + 1));
+ sdt = &server->shutdown_tags[server->num_shutdown_tags++];
+ sdt->tag = tag;
+ sdt->cq = cq;
+ if (gpr_atm_acq_load(&server->shutdown_flag)) {
+ gpr_mu_unlock(&server->mu_global);
+ goto done;
+ }
+
+ server->last_shutdown_message_time = gpr_now(GPR_CLOCK_REALTIME);
+
+ channel_broadcaster_init(server, &broadcaster);
+
+ gpr_atm_rel_store(&server->shutdown_flag, 1);
+
+ /* collect all unregistered then registered calls */
+ gpr_mu_lock(&server->mu_call);
+ kill_pending_work_locked(&exec_ctx, server);
+ gpr_mu_unlock(&server->mu_call);
+
+ maybe_finish_shutdown(&exec_ctx, server);
+ gpr_mu_unlock(&server->mu_global);
+
+ /* Shutdown listeners */
+ for (l = server->listeners; l; l = l->next) {
+ grpc_closure_init(&l->destroy_done, listener_destroy_done, server);
+ l->destroy(&exec_ctx, server, l->arg, &l->destroy_done);
+ }
+
+ channel_broadcaster_shutdown(&exec_ctx, &broadcaster, 1, 0);
+
+done:
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+void grpc_server_cancel_all_calls(grpc_server *server) {
+ channel_broadcaster broadcaster;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
+ GRPC_API_TRACE("grpc_server_cancel_all_calls(server=%p)", 1, (server));
+
+ gpr_mu_lock(&server->mu_global);
+ channel_broadcaster_init(server, &broadcaster);
+ gpr_mu_unlock(&server->mu_global);
+
+ channel_broadcaster_shutdown(&exec_ctx, &broadcaster, 0, 1);
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+void grpc_server_destroy(grpc_server *server) {
+ listener *l;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
+ GRPC_API_TRACE("grpc_server_destroy(server=%p)", 1, (server));
+
+ gpr_mu_lock(&server->mu_global);
+ GPR_ASSERT(gpr_atm_acq_load(&server->shutdown_flag) || !server->listeners);
+ GPR_ASSERT(server->listeners_destroyed == num_listeners(server));
+
+ while (server->listeners) {
+ l = server->listeners;
+ server->listeners = l->next;
+ gpr_free(l);
+ }
+
+ gpr_mu_unlock(&server->mu_global);
+
+ server_unref(&exec_ctx, server);
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+void grpc_server_add_listener(
+ grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg,
+ void (*start)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg,
+ grpc_pollset **pollsets, size_t pollset_count),
+ void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg,
+ grpc_closure *on_done)) {
+ listener *l = gpr_malloc(sizeof(listener));
+ l->arg = arg;
+ l->start = start;
+ l->destroy = destroy;
+ l->next = server->listeners;
+ server->listeners = l;
+}
+
+static grpc_call_error queue_call_request(grpc_exec_ctx *exec_ctx,
+ grpc_server *server,
+ requested_call *rc) {
+ call_data *calld = NULL;
+ request_matcher *rm = NULL;
+ int request_id;
+ if (gpr_atm_acq_load(&server->shutdown_flag)) {
+ fail_call(exec_ctx, server, rc);
+ return GRPC_CALL_OK;
+ }
+ request_id = gpr_stack_lockfree_pop(server->request_freelist);
+ if (request_id == -1) {
+ /* out of request ids: just fail this one */
+ fail_call(exec_ctx, server, rc);
+ return GRPC_CALL_OK;
+ }
+ switch (rc->type) {
+ case BATCH_CALL:
+ rm = &server->unregistered_request_matcher;
+ break;
+ case REGISTERED_CALL:
+ rm = &rc->data.registered.registered_method->request_matcher;
+ break;
+ }
+ server->requested_calls[request_id] = *rc;
+ gpr_free(rc);
+ if (gpr_stack_lockfree_push(rm->requests, request_id)) {
+ /* this was the first queued request: we need to lock and start
+ matching calls */
+ gpr_mu_lock(&server->mu_call);
+ while ((calld = rm->pending_head) != NULL) {
+ request_id = gpr_stack_lockfree_pop(rm->requests);
+ if (request_id == -1) break;
+ rm->pending_head = calld->pending_next;
+ gpr_mu_unlock(&server->mu_call);
+ gpr_mu_lock(&calld->mu_state);
+ if (calld->state == ZOMBIED) {
+ gpr_mu_unlock(&calld->mu_state);
+ grpc_closure_init(
+ &calld->kill_zombie_closure, kill_zombie,
+ grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0));
+ grpc_exec_ctx_enqueue(exec_ctx, &calld->kill_zombie_closure, true,
+ NULL);
+ } else {
+ GPR_ASSERT(calld->state == PENDING);
+ calld->state = ACTIVATED;
+ gpr_mu_unlock(&calld->mu_state);
+ begin_call(exec_ctx, server, calld,
+ &server->requested_calls[request_id]);
+ }
+ gpr_mu_lock(&server->mu_call);
+ }
+ gpr_mu_unlock(&server->mu_call);
+ }
+ return GRPC_CALL_OK;
+}
+
+grpc_call_error grpc_server_request_call(
+ grpc_server *server, grpc_call **call, grpc_call_details *details,
+ grpc_metadata_array *initial_metadata,
+ grpc_completion_queue *cq_bound_to_call,
+ grpc_completion_queue *cq_for_notification, void *tag) {
+ grpc_call_error error;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ requested_call *rc = gpr_malloc(sizeof(*rc));
+ GRPC_API_TRACE(
+ "grpc_server_request_call("
+ "server=%p, call=%p, details=%p, initial_metadata=%p, "
+ "cq_bound_to_call=%p, cq_for_notification=%p, tag=%p)",
+ 7, (server, call, details, initial_metadata, cq_bound_to_call,
+ cq_for_notification, tag));
+ if (!grpc_cq_is_server_cq(cq_for_notification)) {
+ gpr_free(rc);
+ error = GRPC_CALL_ERROR_NOT_SERVER_COMPLETION_QUEUE;
+ goto done;
+ }
+ grpc_cq_begin_op(cq_for_notification, tag);
+ details->reserved = NULL;
+ rc->type = BATCH_CALL;
+ rc->server = server;
+ rc->tag = tag;
+ rc->cq_bound_to_call = cq_bound_to_call;
+ rc->cq_for_notification = cq_for_notification;
+ rc->call = call;
+ rc->data.batch.details = details;
+ rc->initial_metadata = initial_metadata;
+ error = queue_call_request(&exec_ctx, server, rc);
+done:
+ grpc_exec_ctx_finish(&exec_ctx);
+ return error;
+}
+
+grpc_call_error grpc_server_request_registered_call(
+ grpc_server *server, void *rmp, grpc_call **call, gpr_timespec *deadline,
+ grpc_metadata_array *initial_metadata, grpc_byte_buffer **optional_payload,
+ grpc_completion_queue *cq_bound_to_call,
+ grpc_completion_queue *cq_for_notification, void *tag) {
+ grpc_call_error error;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ requested_call *rc = gpr_malloc(sizeof(*rc));
+ registered_method *rm = rmp;
+ GRPC_API_TRACE(
+ "grpc_server_request_registered_call("
+ "server=%p, rmp=%p, call=%p, deadline=%p, initial_metadata=%p, "
+ "optional_payload=%p, cq_bound_to_call=%p, cq_for_notification=%p, "
+ "tag=%p)",
+ 9, (server, rmp, call, deadline, initial_metadata, optional_payload,
+ cq_bound_to_call, cq_for_notification, tag));
+ if (!grpc_cq_is_server_cq(cq_for_notification)) {
+ gpr_free(rc);
+ error = GRPC_CALL_ERROR_NOT_SERVER_COMPLETION_QUEUE;
+ goto done;
+ }
+ grpc_cq_begin_op(cq_for_notification, tag);
+ rc->type = REGISTERED_CALL;
+ rc->server = server;
+ rc->tag = tag;
+ rc->cq_bound_to_call = cq_bound_to_call;
+ rc->cq_for_notification = cq_for_notification;
+ rc->call = call;
+ rc->data.registered.registered_method = rm;
+ rc->data.registered.deadline = deadline;
+ rc->initial_metadata = initial_metadata;
+ rc->data.registered.optional_payload = optional_payload;
+ error = queue_call_request(&exec_ctx, server, rc);
+done:
+ grpc_exec_ctx_finish(&exec_ctx);
+ return error;
+}
+
+static void publish_registered_or_batch(grpc_exec_ctx *exec_ctx,
+ void *user_data, bool success);
+
+static void cpstr(char **dest, size_t *capacity, grpc_mdstr *value) {
+ gpr_slice slice = value->slice;
+ size_t len = GPR_SLICE_LENGTH(slice);
+
+ if (len + 1 > *capacity) {
+ *capacity = GPR_MAX(len + 1, *capacity * 2);
+ *dest = gpr_realloc(*dest, *capacity);
+ }
+ memcpy(*dest, grpc_mdstr_as_c_string(value), len + 1);
+}
+
+static void begin_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
+ call_data *calld, requested_call *rc) {
+ grpc_op ops[1];
+ grpc_op *op = ops;
+
+ memset(ops, 0, sizeof(ops));
+
+ /* called once initial metadata has been read by the call, but BEFORE
+ the ioreq to fetch it out of the call has been executed.
+ This means metadata related fields can be relied on in calld, but to
+ fill in the metadata array passed by the client, we need to perform
+ an ioreq op, that should complete immediately. */
+
+ grpc_call_set_completion_queue(exec_ctx, calld->call, rc->cq_bound_to_call);
+ grpc_closure_init(&rc->publish, publish_registered_or_batch, rc);
+ *rc->call = calld->call;
+ calld->cq_new = rc->cq_for_notification;
+ GPR_SWAP(grpc_metadata_array, *rc->initial_metadata, calld->initial_metadata);
+ switch (rc->type) {
+ case BATCH_CALL:
+ GPR_ASSERT(calld->host != NULL);
+ GPR_ASSERT(calld->path != NULL);
+ cpstr(&rc->data.batch.details->host,
+ &rc->data.batch.details->host_capacity, calld->host);
+ cpstr(&rc->data.batch.details->method,
+ &rc->data.batch.details->method_capacity, calld->path);
+ rc->data.batch.details->deadline = calld->deadline;
+ break;
+ case REGISTERED_CALL:
+ *rc->data.registered.deadline = calld->deadline;
+ if (rc->data.registered.optional_payload) {
+ op->op = GRPC_OP_RECV_MESSAGE;
+ op->data.recv_message = rc->data.registered.optional_payload;
+ op++;
+ }
+ break;
+ default:
+ GPR_UNREACHABLE_CODE(return );
+ }
+
+ GRPC_CALL_INTERNAL_REF(calld->call, "server");
+ grpc_call_start_batch_and_execute(exec_ctx, calld->call, ops,
+ (size_t)(op - ops), &rc->publish);
+}
+
+static void done_request_event(grpc_exec_ctx *exec_ctx, void *req,
+ grpc_cq_completion *c) {
+ requested_call *rc = req;
+ grpc_server *server = rc->server;
+
+ if (rc >= server->requested_calls &&
+ rc < server->requested_calls + server->max_requested_calls) {
+ GPR_ASSERT(rc - server->requested_calls <= INT_MAX);
+ gpr_stack_lockfree_push(server->request_freelist,
+ (int)(rc - server->requested_calls));
+ } else {
+ gpr_free(req);
+ }
+
+ server_unref(exec_ctx, server);
+}
+
+static void fail_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
+ requested_call *rc) {
+ *rc->call = NULL;
+ rc->initial_metadata->count = 0;
+
+ server_ref(server);
+ grpc_cq_end_op(exec_ctx, rc->cq_for_notification, rc->tag, 0,
+ done_request_event, rc, &rc->completion);
+}
+
+static void publish_registered_or_batch(grpc_exec_ctx *exec_ctx, void *prc,
+ bool success) {
+ requested_call *rc = prc;
+ grpc_call *call = *rc->call;
+ grpc_call_element *elem =
+ grpc_call_stack_element(grpc_call_get_call_stack(call), 0);
+ call_data *calld = elem->call_data;
+ channel_data *chand = elem->channel_data;
+ server_ref(chand->server);
+ grpc_cq_end_op(exec_ctx, calld->cq_new, rc->tag, success, done_request_event,
+ rc, &rc->completion);
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "server");
+}
+
+const grpc_channel_args *grpc_server_get_channel_args(grpc_server *server) {
+ return server->channel_args;
+}
+
+int grpc_server_has_open_connections(grpc_server *server) {
+ int r;
+ gpr_mu_lock(&server->mu_global);
+ r = server->root_channel_data.next != &server->root_channel_data;
+ gpr_mu_unlock(&server->mu_global);
+ return r;
+}
diff --git a/src/core/lib/surface/server.h b/src/core/lib/surface/server.h
new file mode 100644
index 0000000000..3845eb2981
--- /dev/null
+++ b/src/core/lib/surface/server.h
@@ -0,0 +1,62 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_SURFACE_SERVER_H
+#define GRPC_CORE_LIB_SURFACE_SERVER_H
+
+#include <grpc/grpc.h>
+#include "src/core/lib/channel/channel_stack.h"
+#include "src/core/lib/transport/transport.h"
+
+extern const grpc_channel_filter grpc_server_top_filter;
+
+/* Add a listener to the server: when the server starts, it will call start,
+ and when it shuts down, it will call destroy */
+void grpc_server_add_listener(
+ grpc_exec_ctx *exec_ctx, grpc_server *server, void *listener,
+ void (*start)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg,
+ grpc_pollset **pollsets, size_t npollsets),
+ void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg,
+ grpc_closure *on_done));
+
+/* Setup a transport - creates a channel stack, binds the transport to the
+ server */
+void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *server,
+ grpc_transport *transport,
+ const grpc_channel_args *args);
+
+const grpc_channel_args *grpc_server_get_channel_args(grpc_server *server);
+
+int grpc_server_has_open_connections(grpc_server *server);
+
+#endif /* GRPC_CORE_LIB_SURFACE_SERVER_H */
diff --git a/src/core/lib/surface/server_chttp2.c b/src/core/lib/surface/server_chttp2.c
new file mode 100644
index 0000000000..f0c2ee5153
--- /dev/null
+++ b/src/core/lib/surface/server_chttp2.c
@@ -0,0 +1,146 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/grpc.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/useful.h>
+#include "src/core/lib/channel/http_server_filter.h"
+#include "src/core/lib/iomgr/resolve_address.h"
+#include "src/core/lib/iomgr/tcp_server.h"
+#include "src/core/lib/surface/api_trace.h"
+#include "src/core/lib/surface/server.h"
+#include "src/core/lib/transport/chttp2_transport.h"
+
+static void setup_transport(grpc_exec_ctx *exec_ctx, void *server,
+ grpc_transport *transport) {
+ grpc_server_setup_transport(exec_ctx, server, transport,
+ grpc_server_get_channel_args(server));
+}
+
+static void new_transport(grpc_exec_ctx *exec_ctx, void *server,
+ grpc_endpoint *tcp,
+ grpc_tcp_server_acceptor *acceptor) {
+ /*
+ * Beware that the call to grpc_create_chttp2_transport() has to happen before
+ * grpc_tcp_server_destroy(). This is fine here, but similar code
+ * asynchronously doing a handshake instead of calling grpc_tcp_server_start()
+ * (as in server_secure_chttp2.c) needs to add synchronization to avoid this
+ * case.
+ */
+ grpc_transport *transport = grpc_create_chttp2_transport(
+ exec_ctx, grpc_server_get_channel_args(server), tcp, 0);
+ setup_transport(exec_ctx, server, transport);
+ grpc_chttp2_transport_start_reading(exec_ctx, transport, NULL, 0);
+}
+
+/* Server callback: start listening on our ports */
+static void start(grpc_exec_ctx *exec_ctx, grpc_server *server, void *tcpp,
+ grpc_pollset **pollsets, size_t pollset_count) {
+ grpc_tcp_server *tcp = tcpp;
+ grpc_tcp_server_start(exec_ctx, tcp, pollsets, pollset_count, new_transport,
+ server);
+}
+
+/* Server callback: destroy the tcp listener (so we don't generate further
+ callbacks) */
+static void destroy(grpc_exec_ctx *exec_ctx, grpc_server *server, void *tcpp,
+ grpc_closure *destroy_done) {
+ grpc_tcp_server *tcp = tcpp;
+ grpc_tcp_server_unref(exec_ctx, tcp);
+ grpc_exec_ctx_enqueue(exec_ctx, destroy_done, true, NULL);
+}
+
+int grpc_server_add_insecure_http2_port(grpc_server *server, const char *addr) {
+ grpc_resolved_addresses *resolved = NULL;
+ grpc_tcp_server *tcp = NULL;
+ size_t i;
+ unsigned count = 0;
+ int port_num = -1;
+ int port_temp;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
+ GRPC_API_TRACE("grpc_server_add_insecure_http2_port(server=%p, addr=%s)", 2,
+ (server, addr));
+
+ resolved = grpc_blocking_resolve_address(addr, "http");
+ if (!resolved) {
+ goto error;
+ }
+
+ tcp = grpc_tcp_server_create(NULL);
+ GPR_ASSERT(tcp);
+
+ for (i = 0; i < resolved->naddrs; i++) {
+ port_temp = grpc_tcp_server_add_port(
+ tcp, (struct sockaddr *)&resolved->addrs[i].addr,
+ resolved->addrs[i].len);
+ if (port_temp > 0) {
+ if (port_num == -1) {
+ port_num = port_temp;
+ } else {
+ GPR_ASSERT(port_num == port_temp);
+ }
+ count++;
+ }
+ }
+ if (count == 0) {
+ gpr_log(GPR_ERROR, "No address added out of total %d resolved",
+ resolved->naddrs);
+ goto error;
+ }
+ if (count != resolved->naddrs) {
+ gpr_log(GPR_ERROR, "Only %d addresses added out of total %d resolved",
+ count, resolved->naddrs);
+ }
+ grpc_resolved_addresses_destroy(resolved);
+
+ /* Register with the server only upon success */
+ grpc_server_add_listener(&exec_ctx, server, tcp, start, destroy);
+ goto done;
+
+/* Error path: cleanup and return */
+error:
+ if (resolved) {
+ grpc_resolved_addresses_destroy(resolved);
+ }
+ if (tcp) {
+ grpc_tcp_server_unref(&exec_ctx, tcp);
+ }
+ port_num = 0;
+
+done:
+ grpc_exec_ctx_finish(&exec_ctx);
+ return port_num;
+}
diff --git a/src/core/lib/surface/surface_trace.h b/src/core/lib/surface/surface_trace.h
new file mode 100644
index 0000000000..6b3f673924
--- /dev/null
+++ b/src/core/lib/surface/surface_trace.h
@@ -0,0 +1,48 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_SURFACE_SURFACE_TRACE_H
+#define GRPC_CORE_LIB_SURFACE_SURFACE_TRACE_H
+
+#include <grpc/support/log.h>
+#include "src/core/lib/debug/trace.h"
+#include "src/core/lib/surface/api_trace.h"
+
+#define GRPC_SURFACE_TRACE_RETURNED_EVENT(cq, event) \
+ if (grpc_api_trace) { \
+ char *_ev = grpc_event_string(event); \
+ gpr_log(GPR_INFO, "RETURN_EVENT[%p]: %s", cq, _ev); \
+ gpr_free(_ev); \
+ }
+
+#endif /* GRPC_CORE_LIB_SURFACE_SURFACE_TRACE_H */
diff --git a/src/core/lib/surface/validate_metadata.c b/src/core/lib/surface/validate_metadata.c
new file mode 100644
index 0000000000..bf4126867f
--- /dev/null
+++ b/src/core/lib/surface/validate_metadata.c
@@ -0,0 +1,73 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <grpc/support/port_platform.h>
+
+static int conforms_to(const char *s, size_t len, const uint8_t *legal_bits) {
+ const char *p = s;
+ const char *e = s + len;
+ for (; p != e; p++) {
+ int idx = *p;
+ int byte = idx / 8;
+ int bit = idx % 8;
+ if ((legal_bits[byte] & (1 << bit)) == 0) return 0;
+ }
+ return 1;
+}
+
+int grpc_header_key_is_legal(const char *key, size_t length) {
+ static const uint8_t legal_header_bits[256 / 8] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0xff, 0x03, 0x00, 0x00, 0x00,
+ 0x80, 0xfe, 0xff, 0xff, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ if (length == 0) {
+ return 0;
+ }
+ return conforms_to(key, length, legal_header_bits);
+}
+
+int grpc_header_nonbin_value_is_legal(const char *value, size_t length) {
+ static const uint8_t legal_header_bits[256 / 8] = {
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ return conforms_to(value, length, legal_header_bits);
+}
+
+int grpc_is_binary_header(const char *key, size_t length) {
+ if (length < 5) return 0;
+ return 0 == memcmp(key + length - 4, "-bin", 4);
+}
diff --git a/src/core/lib/surface/version.c b/src/core/lib/surface/version.c
new file mode 100644
index 0000000000..7723f39401
--- /dev/null
+++ b/src/core/lib/surface/version.c
@@ -0,0 +1,39 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/* This file is autogenerated from:
+ templates/src/core/surface/version.c.template */
+
+#include <grpc/grpc.h>
+
+const char *grpc_version_string(void) { return "0.14.0-dev"; }