aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/ext/lb_policy
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/ext/lb_policy')
-rw-r--r--src/core/ext/lb_policy/grpclb/load_balancer_api.c163
-rw-r--r--src/core/ext/lb_policy/grpclb/load_balancer_api.h85
-rw-r--r--src/core/ext/lb_policy/grpclb/proto/grpc/lb/v0/load_balancer.pb.c119
-rw-r--r--src/core/ext/lb_policy/grpclb/proto/grpc/lb/v0/load_balancer.pb.h182
-rw-r--r--src/core/ext/lb_policy/pick_first/pick_first.c452
-rw-r--r--src/core/ext/lb_policy/round_robin/round_robin.c574
6 files changed, 1575 insertions, 0 deletions
diff --git a/src/core/ext/lb_policy/grpclb/load_balancer_api.c b/src/core/ext/lb_policy/grpclb/load_balancer_api.c
new file mode 100644
index 0000000000..d8af644870
--- /dev/null
+++ b/src/core/ext/lb_policy/grpclb/load_balancer_api.c
@@ -0,0 +1,163 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/ext/lb_policy/grpclb/load_balancer_api.h"
+#include "third_party/nanopb/pb_decode.h"
+#include "third_party/nanopb/pb_encode.h"
+
+#include <grpc/support/alloc.h>
+
+typedef struct decode_serverlist_arg {
+ int first_pass;
+ int i;
+ size_t num_servers;
+ grpc_grpclb_server **servers;
+} decode_serverlist_arg;
+
+/* invoked once for every Server in ServerList */
+static bool decode_serverlist(pb_istream_t *stream, const pb_field_t *field,
+ void **arg) {
+ decode_serverlist_arg *dec_arg = *arg;
+ if (dec_arg->first_pass != 0) { /* first pass */
+ grpc_grpclb_server server;
+ if (!pb_decode(stream, grpc_lb_v0_Server_fields, &server)) {
+ return false;
+ }
+ dec_arg->num_servers++;
+ } else { /* second pass */
+ grpc_grpclb_server *server = gpr_malloc(sizeof(grpc_grpclb_server));
+ GPR_ASSERT(dec_arg->num_servers > 0);
+ if (dec_arg->i == 0) { /* first iteration of second pass */
+ dec_arg->servers =
+ gpr_malloc(sizeof(grpc_grpclb_server *) * dec_arg->num_servers);
+ }
+ if (!pb_decode(stream, grpc_lb_v0_Server_fields, server)) {
+ return false;
+ }
+ dec_arg->servers[dec_arg->i++] = server;
+ }
+
+ return true;
+}
+
+grpc_grpclb_request *grpc_grpclb_request_create(const char *lb_service_name) {
+ grpc_grpclb_request *req = gpr_malloc(sizeof(grpc_grpclb_request));
+
+ req->has_client_stats = 0; /* TODO(dgq): add support for stats once defined */
+ req->has_initial_request = 1;
+ req->initial_request.has_name = 1;
+ strncpy(req->initial_request.name, lb_service_name,
+ GRPC_GRPCLB_SERVICE_NAME_MAX_LENGTH);
+ return req;
+}
+
+gpr_slice grpc_grpclb_request_encode(const grpc_grpclb_request *request) {
+ size_t encoded_length;
+ pb_ostream_t sizestream;
+ pb_ostream_t outputstream;
+ gpr_slice slice;
+ memset(&sizestream, 0, sizeof(pb_ostream_t));
+ pb_encode(&sizestream, grpc_lb_v0_LoadBalanceRequest_fields, request);
+ encoded_length = sizestream.bytes_written;
+
+ slice = gpr_slice_malloc(encoded_length);
+ outputstream =
+ pb_ostream_from_buffer(GPR_SLICE_START_PTR(slice), encoded_length);
+ GPR_ASSERT(pb_encode(&outputstream, grpc_lb_v0_LoadBalanceRequest_fields,
+ request) != 0);
+ return slice;
+}
+
+void grpc_grpclb_request_destroy(grpc_grpclb_request *request) {
+ gpr_free(request);
+}
+
+grpc_grpclb_response *grpc_grpclb_response_parse(gpr_slice encoded_response) {
+ bool status;
+ pb_istream_t stream =
+ pb_istream_from_buffer(GPR_SLICE_START_PTR(encoded_response),
+ GPR_SLICE_LENGTH(encoded_response));
+ grpc_grpclb_response *res = gpr_malloc(sizeof(grpc_grpclb_response));
+ memset(res, 0, sizeof(*res));
+ status = pb_decode(&stream, grpc_lb_v0_LoadBalanceResponse_fields, res);
+ GPR_ASSERT(status == true);
+ return res;
+}
+
+grpc_grpclb_serverlist *grpc_grpclb_response_parse_serverlist(
+ gpr_slice encoded_response) {
+ grpc_grpclb_serverlist *sl = gpr_malloc(sizeof(grpc_grpclb_serverlist));
+ bool status;
+ decode_serverlist_arg arg;
+ pb_istream_t stream =
+ pb_istream_from_buffer(GPR_SLICE_START_PTR(encoded_response),
+ GPR_SLICE_LENGTH(encoded_response));
+ pb_istream_t stream_at_start = stream;
+ grpc_grpclb_response *res = gpr_malloc(sizeof(grpc_grpclb_response));
+ memset(res, 0, sizeof(*res));
+ memset(&arg, 0, sizeof(decode_serverlist_arg));
+
+ res->server_list.servers.funcs.decode = decode_serverlist;
+ res->server_list.servers.arg = &arg;
+ arg.first_pass = 1;
+ status = pb_decode(&stream, grpc_lb_v0_LoadBalanceResponse_fields, res);
+ GPR_ASSERT(status == true);
+ GPR_ASSERT(arg.num_servers > 0);
+
+ arg.first_pass = 0;
+ status =
+ pb_decode(&stream_at_start, grpc_lb_v0_LoadBalanceResponse_fields, res);
+ GPR_ASSERT(status == true);
+ GPR_ASSERT(arg.servers != NULL);
+
+ sl->num_servers = arg.num_servers;
+ sl->servers = arg.servers;
+ if (res->server_list.has_expiration_interval) {
+ sl->expiration_interval = res->server_list.expiration_interval;
+ }
+ grpc_grpclb_response_destroy(res);
+ return sl;
+}
+
+void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist *serverlist) {
+ size_t i;
+ for (i = 0; i < serverlist->num_servers; i++) {
+ gpr_free(serverlist->servers[i]);
+ }
+ gpr_free(serverlist->servers);
+ gpr_free(serverlist);
+}
+
+void grpc_grpclb_response_destroy(grpc_grpclb_response *response) {
+ gpr_free(response);
+}
diff --git a/src/core/ext/lb_policy/grpclb/load_balancer_api.h b/src/core/ext/lb_policy/grpclb/load_balancer_api.h
new file mode 100644
index 0000000000..d329a2ffe8
--- /dev/null
+++ b/src/core/ext/lb_policy/grpclb/load_balancer_api.h
@@ -0,0 +1,85 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_EXT_LB_POLICY_GRPCLB_LOAD_BALANCER_API_H
+#define GRPC_CORE_EXT_LB_POLICY_GRPCLB_LOAD_BALANCER_API_H
+
+#include <grpc/support/slice_buffer.h>
+
+#include "src/core/ext/lb_policy/grpclb/proto/grpc/lb/v0/load_balancer.pb.h"
+#include "src/core/lib/client_config/lb_policy_factory.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define GRPC_GRPCLB_SERVICE_NAME_MAX_LENGTH 128
+
+typedef grpc_lb_v0_LoadBalanceRequest grpc_grpclb_request;
+typedef grpc_lb_v0_LoadBalanceResponse grpc_grpclb_response;
+typedef grpc_lb_v0_Server grpc_grpclb_server;
+typedef grpc_lb_v0_Duration grpc_grpclb_duration;
+typedef struct grpc_grpclb_serverlist {
+ grpc_grpclb_server **servers;
+ size_t num_servers;
+ grpc_grpclb_duration expiration_interval;
+} grpc_grpclb_serverlist;
+
+/** Create a request for a gRPC LB service under \a lb_service_name */
+grpc_grpclb_request *grpc_grpclb_request_create(const char *lb_service_name);
+
+/** Protocol Buffers v3-encode \a request */
+gpr_slice grpc_grpclb_request_encode(const grpc_grpclb_request *request);
+
+/** Destroy \a request */
+void grpc_grpclb_request_destroy(grpc_grpclb_request *request);
+
+/** Parse (ie, decode) the bytes in \a encoded_response as a \a
+ * grpc_grpclb_response */
+grpc_grpclb_response *grpc_grpclb_response_parse(gpr_slice encoded_response);
+
+/** Destroy \a serverlist */
+void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist *serverlist);
+
+/** Parse the list of servers from an encoded \a grpc_grpclb_response */
+grpc_grpclb_serverlist *grpc_grpclb_response_parse_serverlist(
+ gpr_slice encoded_response);
+
+/** Destroy \a response */
+void grpc_grpclb_response_destroy(grpc_grpclb_response *response);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRPC_CORE_EXT_LB_POLICY_GRPCLB_LOAD_BALANCER_API_H */
diff --git a/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v0/load_balancer.pb.c b/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v0/load_balancer.pb.c
new file mode 100644
index 0000000000..9719673181
--- /dev/null
+++ b/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v0/load_balancer.pb.c
@@ -0,0 +1,119 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+/* Automatically generated nanopb constant definitions */
+/* Generated by nanopb-0.3.5-dev */
+
+#include "src/core/ext/lb_policy/grpclb/proto/grpc/lb/v0/load_balancer.pb.h"
+
+#if PB_PROTO_HEADER_VERSION != 30
+#error Regenerate this file with the current version of nanopb generator.
+#endif
+
+
+
+const pb_field_t grpc_lb_v0_Duration_fields[3] = {
+ PB_FIELD( 1, INT64 , OPTIONAL, STATIC , FIRST, grpc_lb_v0_Duration, seconds, seconds, 0),
+ PB_FIELD( 2, INT32 , OPTIONAL, STATIC , OTHER, grpc_lb_v0_Duration, nanos, seconds, 0),
+ PB_LAST_FIELD
+};
+
+const pb_field_t grpc_lb_v0_LoadBalanceRequest_fields[3] = {
+ PB_FIELD( 1, MESSAGE , OPTIONAL, STATIC , FIRST, grpc_lb_v0_LoadBalanceRequest, initial_request, initial_request, &grpc_lb_v0_InitialLoadBalanceRequest_fields),
+ PB_FIELD( 2, MESSAGE , OPTIONAL, STATIC , OTHER, grpc_lb_v0_LoadBalanceRequest, client_stats, initial_request, &grpc_lb_v0_ClientStats_fields),
+ PB_LAST_FIELD
+};
+
+const pb_field_t grpc_lb_v0_InitialLoadBalanceRequest_fields[2] = {
+ PB_FIELD( 1, STRING , OPTIONAL, STATIC , FIRST, grpc_lb_v0_InitialLoadBalanceRequest, name, name, 0),
+ PB_LAST_FIELD
+};
+
+const pb_field_t grpc_lb_v0_ClientStats_fields[4] = {
+ PB_FIELD( 1, INT64 , OPTIONAL, STATIC , FIRST, grpc_lb_v0_ClientStats, total_requests, total_requests, 0),
+ PB_FIELD( 2, INT64 , OPTIONAL, STATIC , OTHER, grpc_lb_v0_ClientStats, client_rpc_errors, total_requests, 0),
+ PB_FIELD( 3, INT64 , OPTIONAL, STATIC , OTHER, grpc_lb_v0_ClientStats, dropped_requests, client_rpc_errors, 0),
+ PB_LAST_FIELD
+};
+
+const pb_field_t grpc_lb_v0_LoadBalanceResponse_fields[3] = {
+ PB_FIELD( 1, MESSAGE , OPTIONAL, STATIC , FIRST, grpc_lb_v0_LoadBalanceResponse, initial_response, initial_response, &grpc_lb_v0_InitialLoadBalanceResponse_fields),
+ PB_FIELD( 2, MESSAGE , OPTIONAL, STATIC , OTHER, grpc_lb_v0_LoadBalanceResponse, server_list, initial_response, &grpc_lb_v0_ServerList_fields),
+ PB_LAST_FIELD
+};
+
+const pb_field_t grpc_lb_v0_InitialLoadBalanceResponse_fields[4] = {
+ PB_FIELD( 1, STRING , OPTIONAL, STATIC , FIRST, grpc_lb_v0_InitialLoadBalanceResponse, client_config, client_config, 0),
+ PB_FIELD( 2, STRING , OPTIONAL, STATIC , OTHER, grpc_lb_v0_InitialLoadBalanceResponse, load_balancer_delegate, client_config, 0),
+ PB_FIELD( 3, MESSAGE , OPTIONAL, STATIC , OTHER, grpc_lb_v0_InitialLoadBalanceResponse, client_stats_report_interval, load_balancer_delegate, &grpc_lb_v0_Duration_fields),
+ PB_LAST_FIELD
+};
+
+const pb_field_t grpc_lb_v0_ServerList_fields[3] = {
+ PB_FIELD( 1, MESSAGE , REPEATED, CALLBACK, FIRST, grpc_lb_v0_ServerList, servers, servers, &grpc_lb_v0_Server_fields),
+ PB_FIELD( 3, MESSAGE , OPTIONAL, STATIC , OTHER, grpc_lb_v0_ServerList, expiration_interval, servers, &grpc_lb_v0_Duration_fields),
+ PB_LAST_FIELD
+};
+
+const pb_field_t grpc_lb_v0_Server_fields[5] = {
+ PB_FIELD( 1, STRING , OPTIONAL, STATIC , FIRST, grpc_lb_v0_Server, ip_address, ip_address, 0),
+ PB_FIELD( 2, INT32 , OPTIONAL, STATIC , OTHER, grpc_lb_v0_Server, port, ip_address, 0),
+ PB_FIELD( 3, BYTES , OPTIONAL, STATIC , OTHER, grpc_lb_v0_Server, load_balance_token, port, 0),
+ PB_FIELD( 4, BOOL , OPTIONAL, STATIC , OTHER, grpc_lb_v0_Server, drop_request, load_balance_token, 0),
+ PB_LAST_FIELD
+};
+
+
+/* Check that field information fits in pb_field_t */
+#if !defined(PB_FIELD_32BIT)
+/* If you get an error here, it means that you need to define PB_FIELD_32BIT
+ * compile-time option. You can do that in pb.h or on compiler command line.
+ *
+ * The reason you need to do this is that some of your messages contain tag
+ * numbers or field sizes that are larger than what can fit in 8 or 16 bit
+ * field descriptors.
+ */
+PB_STATIC_ASSERT((pb_membersize(grpc_lb_v0_LoadBalanceRequest, initial_request) < 65536 && pb_membersize(grpc_lb_v0_LoadBalanceRequest, client_stats) < 65536 && pb_membersize(grpc_lb_v0_LoadBalanceResponse, initial_response) < 65536 && pb_membersize(grpc_lb_v0_LoadBalanceResponse, server_list) < 65536 && pb_membersize(grpc_lb_v0_InitialLoadBalanceResponse, client_stats_report_interval) < 65536 && pb_membersize(grpc_lb_v0_ServerList, servers) < 65536 && pb_membersize(grpc_lb_v0_ServerList, expiration_interval) < 65536), YOU_MUST_DEFINE_PB_FIELD_32BIT_FOR_MESSAGES_grpc_lb_v0_Duration_grpc_lb_v0_LoadBalanceRequest_grpc_lb_v0_InitialLoadBalanceRequest_grpc_lb_v0_ClientStats_grpc_lb_v0_LoadBalanceResponse_grpc_lb_v0_InitialLoadBalanceResponse_grpc_lb_v0_ServerList_grpc_lb_v0_Server)
+#endif
+
+#if !defined(PB_FIELD_16BIT) && !defined(PB_FIELD_32BIT)
+/* If you get an error here, it means that you need to define PB_FIELD_16BIT
+ * compile-time option. You can do that in pb.h or on compiler command line.
+ *
+ * The reason you need to do this is that some of your messages contain tag
+ * numbers or field sizes that are larger than what can fit in the default
+ * 8 bit descriptors.
+ */
+PB_STATIC_ASSERT((pb_membersize(grpc_lb_v0_LoadBalanceRequest, initial_request) < 256 && pb_membersize(grpc_lb_v0_LoadBalanceRequest, client_stats) < 256 && pb_membersize(grpc_lb_v0_LoadBalanceResponse, initial_response) < 256 && pb_membersize(grpc_lb_v0_LoadBalanceResponse, server_list) < 256 && pb_membersize(grpc_lb_v0_InitialLoadBalanceResponse, client_stats_report_interval) < 256 && pb_membersize(grpc_lb_v0_ServerList, servers) < 256 && pb_membersize(grpc_lb_v0_ServerList, expiration_interval) < 256), YOU_MUST_DEFINE_PB_FIELD_16BIT_FOR_MESSAGES_grpc_lb_v0_Duration_grpc_lb_v0_LoadBalanceRequest_grpc_lb_v0_InitialLoadBalanceRequest_grpc_lb_v0_ClientStats_grpc_lb_v0_LoadBalanceResponse_grpc_lb_v0_InitialLoadBalanceResponse_grpc_lb_v0_ServerList_grpc_lb_v0_Server)
+#endif
+
+
diff --git a/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v0/load_balancer.pb.h b/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v0/load_balancer.pb.h
new file mode 100644
index 0000000000..3599f881bb
--- /dev/null
+++ b/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v0/load_balancer.pb.h
@@ -0,0 +1,182 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+/* Automatically generated nanopb header */
+/* Generated by nanopb-0.3.5-dev */
+
+#ifndef PB_LOAD_BALANCER_PB_H_INCLUDED
+#define PB_LOAD_BALANCER_PB_H_INCLUDED
+#include "third_party/nanopb/pb.h"
+#if PB_PROTO_HEADER_VERSION != 30
+#error Regenerate this file with the current version of nanopb generator.
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Struct definitions */
+typedef struct _grpc_lb_v0_ClientStats {
+ bool has_total_requests;
+ int64_t total_requests;
+ bool has_client_rpc_errors;
+ int64_t client_rpc_errors;
+ bool has_dropped_requests;
+ int64_t dropped_requests;
+} grpc_lb_v0_ClientStats;
+
+typedef struct _grpc_lb_v0_Duration {
+ bool has_seconds;
+ int64_t seconds;
+ bool has_nanos;
+ int32_t nanos;
+} grpc_lb_v0_Duration;
+
+typedef struct _grpc_lb_v0_InitialLoadBalanceRequest {
+ bool has_name;
+ char name[128];
+} grpc_lb_v0_InitialLoadBalanceRequest;
+
+typedef PB_BYTES_ARRAY_T(64) grpc_lb_v0_Server_load_balance_token_t;
+typedef struct _grpc_lb_v0_Server {
+ bool has_ip_address;
+ char ip_address[46];
+ bool has_port;
+ int32_t port;
+ bool has_load_balance_token;
+ grpc_lb_v0_Server_load_balance_token_t load_balance_token;
+ bool has_drop_request;
+ bool drop_request;
+} grpc_lb_v0_Server;
+
+typedef struct _grpc_lb_v0_InitialLoadBalanceResponse {
+ bool has_client_config;
+ char client_config[64];
+ bool has_load_balancer_delegate;
+ char load_balancer_delegate[64];
+ bool has_client_stats_report_interval;
+ grpc_lb_v0_Duration client_stats_report_interval;
+} grpc_lb_v0_InitialLoadBalanceResponse;
+
+typedef struct _grpc_lb_v0_LoadBalanceRequest {
+ bool has_initial_request;
+ grpc_lb_v0_InitialLoadBalanceRequest initial_request;
+ bool has_client_stats;
+ grpc_lb_v0_ClientStats client_stats;
+} grpc_lb_v0_LoadBalanceRequest;
+
+typedef struct _grpc_lb_v0_ServerList {
+ pb_callback_t servers;
+ bool has_expiration_interval;
+ grpc_lb_v0_Duration expiration_interval;
+} grpc_lb_v0_ServerList;
+
+typedef struct _grpc_lb_v0_LoadBalanceResponse {
+ bool has_initial_response;
+ grpc_lb_v0_InitialLoadBalanceResponse initial_response;
+ bool has_server_list;
+ grpc_lb_v0_ServerList server_list;
+} grpc_lb_v0_LoadBalanceResponse;
+
+/* Default values for struct fields */
+
+/* Initializer values for message structs */
+#define grpc_lb_v0_Duration_init_default {false, 0, false, 0}
+#define grpc_lb_v0_LoadBalanceRequest_init_default {false, grpc_lb_v0_InitialLoadBalanceRequest_init_default, false, grpc_lb_v0_ClientStats_init_default}
+#define grpc_lb_v0_InitialLoadBalanceRequest_init_default {false, ""}
+#define grpc_lb_v0_ClientStats_init_default {false, 0, false, 0, false, 0}
+#define grpc_lb_v0_LoadBalanceResponse_init_default {false, grpc_lb_v0_InitialLoadBalanceResponse_init_default, false, grpc_lb_v0_ServerList_init_default}
+#define grpc_lb_v0_InitialLoadBalanceResponse_init_default {false, "", false, "", false, grpc_lb_v0_Duration_init_default}
+#define grpc_lb_v0_ServerList_init_default {{{NULL}, NULL}, false, grpc_lb_v0_Duration_init_default}
+#define grpc_lb_v0_Server_init_default {false, "", false, 0, false, {0, {0}}, false, 0}
+#define grpc_lb_v0_Duration_init_zero {false, 0, false, 0}
+#define grpc_lb_v0_LoadBalanceRequest_init_zero {false, grpc_lb_v0_InitialLoadBalanceRequest_init_zero, false, grpc_lb_v0_ClientStats_init_zero}
+#define grpc_lb_v0_InitialLoadBalanceRequest_init_zero {false, ""}
+#define grpc_lb_v0_ClientStats_init_zero {false, 0, false, 0, false, 0}
+#define grpc_lb_v0_LoadBalanceResponse_init_zero {false, grpc_lb_v0_InitialLoadBalanceResponse_init_zero, false, grpc_lb_v0_ServerList_init_zero}
+#define grpc_lb_v0_InitialLoadBalanceResponse_init_zero {false, "", false, "", false, grpc_lb_v0_Duration_init_zero}
+#define grpc_lb_v0_ServerList_init_zero {{{NULL}, NULL}, false, grpc_lb_v0_Duration_init_zero}
+#define grpc_lb_v0_Server_init_zero {false, "", false, 0, false, {0, {0}}, false, 0}
+
+/* Field tags (for use in manual encoding/decoding) */
+#define grpc_lb_v0_ClientStats_total_requests_tag 1
+#define grpc_lb_v0_ClientStats_client_rpc_errors_tag 2
+#define grpc_lb_v0_ClientStats_dropped_requests_tag 3
+#define grpc_lb_v0_Duration_seconds_tag 1
+#define grpc_lb_v0_Duration_nanos_tag 2
+#define grpc_lb_v0_InitialLoadBalanceRequest_name_tag 1
+#define grpc_lb_v0_Server_ip_address_tag 1
+#define grpc_lb_v0_Server_port_tag 2
+#define grpc_lb_v0_Server_load_balance_token_tag 3
+#define grpc_lb_v0_Server_drop_request_tag 4
+#define grpc_lb_v0_InitialLoadBalanceResponse_client_config_tag 1
+#define grpc_lb_v0_InitialLoadBalanceResponse_load_balancer_delegate_tag 2
+#define grpc_lb_v0_InitialLoadBalanceResponse_client_stats_report_interval_tag 3
+#define grpc_lb_v0_LoadBalanceRequest_initial_request_tag 1
+#define grpc_lb_v0_LoadBalanceRequest_client_stats_tag 2
+#define grpc_lb_v0_ServerList_servers_tag 1
+#define grpc_lb_v0_ServerList_expiration_interval_tag 3
+#define grpc_lb_v0_LoadBalanceResponse_initial_response_tag 1
+#define grpc_lb_v0_LoadBalanceResponse_server_list_tag 2
+
+/* Struct field encoding specification for nanopb */
+extern const pb_field_t grpc_lb_v0_Duration_fields[3];
+extern const pb_field_t grpc_lb_v0_LoadBalanceRequest_fields[3];
+extern const pb_field_t grpc_lb_v0_InitialLoadBalanceRequest_fields[2];
+extern const pb_field_t grpc_lb_v0_ClientStats_fields[4];
+extern const pb_field_t grpc_lb_v0_LoadBalanceResponse_fields[3];
+extern const pb_field_t grpc_lb_v0_InitialLoadBalanceResponse_fields[4];
+extern const pb_field_t grpc_lb_v0_ServerList_fields[3];
+extern const pb_field_t grpc_lb_v0_Server_fields[5];
+
+/* Maximum encoded size of messages (where known) */
+#define grpc_lb_v0_Duration_size 22
+#define grpc_lb_v0_LoadBalanceRequest_size 169
+#define grpc_lb_v0_InitialLoadBalanceRequest_size 131
+#define grpc_lb_v0_ClientStats_size 33
+#define grpc_lb_v0_LoadBalanceResponse_size (165 + grpc_lb_v0_ServerList_size)
+#define grpc_lb_v0_InitialLoadBalanceResponse_size 156
+#define grpc_lb_v0_Server_size 127
+
+/* Message IDs (where set with "msgid" option) */
+#ifdef PB_MSGID
+
+#define LOAD_BALANCER_MESSAGES \
+
+
+#endif
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif
diff --git a/src/core/ext/lb_policy/pick_first/pick_first.c b/src/core/ext/lb_policy/pick_first/pick_first.c
new file mode 100644
index 0000000000..cb5c40501e
--- /dev/null
+++ b/src/core/ext/lb_policy/pick_first/pick_first.c
@@ -0,0 +1,452 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include "src/core/lib/client_config/lb_policy_registry.h"
+#include "src/core/lib/transport/connectivity_state.h"
+
+typedef struct pending_pick {
+ struct pending_pick *next;
+ grpc_pollset *pollset;
+ grpc_connected_subchannel **target;
+ grpc_closure *on_complete;
+} pending_pick;
+
+typedef struct {
+ /** base policy: must be first */
+ grpc_lb_policy base;
+ /** all our subchannels */
+ grpc_subchannel **subchannels;
+ size_t num_subchannels;
+
+ grpc_closure connectivity_changed;
+
+ /** the selected channel (a grpc_connected_subchannel) */
+ gpr_atm selected;
+
+ /** mutex protecting remaining members */
+ gpr_mu mu;
+ /** have we started picking? */
+ int started_picking;
+ /** are we shut down? */
+ int shutdown;
+ /** which subchannel are we watching? */
+ size_t checking_subchannel;
+ /** what is the connectivity of that channel? */
+ grpc_connectivity_state checking_connectivity;
+ /** list of picks that are waiting on connectivity */
+ pending_pick *pending_picks;
+
+ /** our connectivity state tracker */
+ grpc_connectivity_state_tracker state_tracker;
+} pick_first_lb_policy;
+
+#define GET_SELECTED(p) \
+ ((grpc_connected_subchannel *)gpr_atm_acq_load(&(p)->selected))
+
+static void pf_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
+ pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+ grpc_connected_subchannel *selected = GET_SELECTED(p);
+ size_t i;
+ GPR_ASSERT(p->pending_picks == NULL);
+ for (i = 0; i < p->num_subchannels; i++) {
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[i], "pick_first");
+ }
+ if (selected != NULL) {
+ GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, selected, "picked_first");
+ }
+ grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
+ gpr_free(p->subchannels);
+ gpr_mu_destroy(&p->mu);
+ gpr_free(p);
+}
+
+static void pf_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
+ pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+ pending_pick *pp;
+ grpc_connected_subchannel *selected;
+ gpr_mu_lock(&p->mu);
+ selected = GET_SELECTED(p);
+ p->shutdown = 1;
+ pp = p->pending_picks;
+ p->pending_picks = NULL;
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_FATAL_FAILURE, "shutdown");
+ /* cancel subscription */
+ if (selected != NULL) {
+ grpc_connected_subchannel_notify_on_state_change(
+ exec_ctx, selected, NULL, NULL, &p->connectivity_changed);
+ } else {
+ grpc_subchannel_notify_on_state_change(
+ exec_ctx, p->subchannels[p->checking_subchannel], NULL, NULL,
+ &p->connectivity_changed);
+ }
+ gpr_mu_unlock(&p->mu);
+ while (pp != NULL) {
+ pending_pick *next = pp->next;
+ *pp->target = NULL;
+ grpc_pollset_set_del_pollset(exec_ctx, p->base.interested_parties,
+ pp->pollset);
+ grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, true, NULL);
+ gpr_free(pp);
+ pp = next;
+ }
+}
+
+static void pf_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ grpc_connected_subchannel **target) {
+ pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+ pending_pick *pp;
+ gpr_mu_lock(&p->mu);
+ pp = p->pending_picks;
+ p->pending_picks = NULL;
+ while (pp != NULL) {
+ pending_pick *next = pp->next;
+ if (pp->target == target) {
+ grpc_pollset_set_del_pollset(exec_ctx, p->base.interested_parties,
+ pp->pollset);
+ *target = NULL;
+ grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, false, NULL);
+ gpr_free(pp);
+ } else {
+ pp->next = p->pending_picks;
+ p->pending_picks = pp;
+ }
+ pp = next;
+ }
+ gpr_mu_unlock(&p->mu);
+}
+
+static void start_picking(grpc_exec_ctx *exec_ctx, pick_first_lb_policy *p) {
+ p->started_picking = 1;
+ p->checking_subchannel = 0;
+ p->checking_connectivity = GRPC_CHANNEL_IDLE;
+ GRPC_LB_POLICY_WEAK_REF(&p->base, "pick_first_connectivity");
+ grpc_subchannel_notify_on_state_change(
+ exec_ctx, p->subchannels[p->checking_subchannel],
+ p->base.interested_parties, &p->checking_connectivity,
+ &p->connectivity_changed);
+}
+
+static void pf_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
+ pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+ gpr_mu_lock(&p->mu);
+ if (!p->started_picking) {
+ start_picking(exec_ctx, p);
+ }
+ gpr_mu_unlock(&p->mu);
+}
+
+static int pf_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ grpc_pollset *pollset, grpc_metadata_batch *initial_metadata,
+ grpc_connected_subchannel **target,
+ grpc_closure *on_complete) {
+ pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+ pending_pick *pp;
+
+ /* Check atomically for a selected channel */
+ grpc_connected_subchannel *selected = GET_SELECTED(p);
+ if (selected != NULL) {
+ *target = selected;
+ return 1;
+ }
+
+ /* No subchannel selected yet, so acquire lock and then attempt again */
+ gpr_mu_lock(&p->mu);
+ selected = GET_SELECTED(p);
+ if (selected) {
+ gpr_mu_unlock(&p->mu);
+ *target = selected;
+ return 1;
+ } else {
+ if (!p->started_picking) {
+ start_picking(exec_ctx, p);
+ }
+ grpc_pollset_set_add_pollset(exec_ctx, p->base.interested_parties, pollset);
+ pp = gpr_malloc(sizeof(*pp));
+ pp->next = p->pending_picks;
+ pp->pollset = pollset;
+ pp->target = target;
+ pp->on_complete = on_complete;
+ p->pending_picks = pp;
+ gpr_mu_unlock(&p->mu);
+ return 0;
+ }
+}
+
+static void destroy_subchannels(grpc_exec_ctx *exec_ctx, void *arg,
+ bool iomgr_success) {
+ pick_first_lb_policy *p = arg;
+ size_t i;
+ size_t num_subchannels = p->num_subchannels;
+ grpc_subchannel **subchannels;
+
+ gpr_mu_lock(&p->mu);
+ subchannels = p->subchannels;
+ p->num_subchannels = 0;
+ p->subchannels = NULL;
+ gpr_mu_unlock(&p->mu);
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "destroy_subchannels");
+
+ for (i = 0; i < num_subchannels; i++) {
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, subchannels[i], "pick_first");
+ }
+
+ gpr_free(subchannels);
+}
+
+static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
+ bool iomgr_success) {
+ pick_first_lb_policy *p = arg;
+ grpc_subchannel *selected_subchannel;
+ pending_pick *pp;
+ grpc_connected_subchannel *selected;
+
+ gpr_mu_lock(&p->mu);
+
+ selected = GET_SELECTED(p);
+
+ if (p->shutdown) {
+ gpr_mu_unlock(&p->mu);
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
+ return;
+ } else if (selected != NULL) {
+ if (p->checking_connectivity == GRPC_CHANNEL_TRANSIENT_FAILURE) {
+ /* if the selected channel goes bad, we're done */
+ p->checking_connectivity = GRPC_CHANNEL_FATAL_FAILURE;
+ }
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ p->checking_connectivity, "selected_changed");
+ if (p->checking_connectivity != GRPC_CHANNEL_FATAL_FAILURE) {
+ grpc_connected_subchannel_notify_on_state_change(
+ exec_ctx, selected, p->base.interested_parties,
+ &p->checking_connectivity, &p->connectivity_changed);
+ } else {
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
+ }
+ } else {
+ loop:
+ switch (p->checking_connectivity) {
+ case GRPC_CHANNEL_READY:
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_READY, "connecting_ready");
+ selected_subchannel = p->subchannels[p->checking_subchannel];
+ selected =
+ grpc_subchannel_get_connected_subchannel(selected_subchannel);
+ GPR_ASSERT(selected != NULL);
+ GRPC_CONNECTED_SUBCHANNEL_REF(selected, "picked_first");
+ /* drop the pick list: we are connected now */
+ GRPC_LB_POLICY_WEAK_REF(&p->base, "destroy_subchannels");
+ gpr_atm_rel_store(&p->selected, (gpr_atm)selected);
+ grpc_exec_ctx_enqueue(
+ exec_ctx, grpc_closure_create(destroy_subchannels, p), true, NULL);
+ /* update any calls that were waiting for a pick */
+ while ((pp = p->pending_picks)) {
+ p->pending_picks = pp->next;
+ *pp->target = selected;
+ grpc_pollset_set_del_pollset(exec_ctx, p->base.interested_parties,
+ pp->pollset);
+ grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, true, NULL);
+ gpr_free(pp);
+ }
+ grpc_connected_subchannel_notify_on_state_change(
+ exec_ctx, selected, p->base.interested_parties,
+ &p->checking_connectivity, &p->connectivity_changed);
+ break;
+ case GRPC_CHANNEL_TRANSIENT_FAILURE:
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_TRANSIENT_FAILURE,
+ "connecting_transient_failure");
+ p->checking_subchannel =
+ (p->checking_subchannel + 1) % p->num_subchannels;
+ p->checking_connectivity = grpc_subchannel_check_connectivity(
+ p->subchannels[p->checking_subchannel]);
+ if (p->checking_connectivity == GRPC_CHANNEL_TRANSIENT_FAILURE) {
+ grpc_subchannel_notify_on_state_change(
+ exec_ctx, p->subchannels[p->checking_subchannel],
+ p->base.interested_parties, &p->checking_connectivity,
+ &p->connectivity_changed);
+ } else {
+ goto loop;
+ }
+ break;
+ case GRPC_CHANNEL_CONNECTING:
+ case GRPC_CHANNEL_IDLE:
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_CONNECTING,
+ "connecting_changed");
+ grpc_subchannel_notify_on_state_change(
+ exec_ctx, p->subchannels[p->checking_subchannel],
+ p->base.interested_parties, &p->checking_connectivity,
+ &p->connectivity_changed);
+ break;
+ case GRPC_CHANNEL_FATAL_FAILURE:
+ p->num_subchannels--;
+ GPR_SWAP(grpc_subchannel *, p->subchannels[p->checking_subchannel],
+ p->subchannels[p->num_subchannels]);
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[p->num_subchannels],
+ "pick_first");
+ if (p->num_subchannels == 0) {
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_FATAL_FAILURE,
+ "no_more_channels");
+ while ((pp = p->pending_picks)) {
+ p->pending_picks = pp->next;
+ *pp->target = NULL;
+ grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, true, NULL);
+ gpr_free(pp);
+ }
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base,
+ "pick_first_connectivity");
+ } else {
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_TRANSIENT_FAILURE,
+ "subchannel_failed");
+ p->checking_subchannel %= p->num_subchannels;
+ p->checking_connectivity = grpc_subchannel_check_connectivity(
+ p->subchannels[p->checking_subchannel]);
+ goto loop;
+ }
+ }
+ }
+
+ gpr_mu_unlock(&p->mu);
+}
+
+static grpc_connectivity_state pf_check_connectivity(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *pol) {
+ pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+ grpc_connectivity_state st;
+ gpr_mu_lock(&p->mu);
+ st = grpc_connectivity_state_check(&p->state_tracker);
+ gpr_mu_unlock(&p->mu);
+ return st;
+}
+
+static void pf_notify_on_state_change(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *pol,
+ grpc_connectivity_state *current,
+ grpc_closure *notify) {
+ pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+ gpr_mu_lock(&p->mu);
+ grpc_connectivity_state_notify_on_state_change(exec_ctx, &p->state_tracker,
+ current, notify);
+ gpr_mu_unlock(&p->mu);
+}
+
+static void pf_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ grpc_closure *closure) {
+ pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+ grpc_connected_subchannel *selected = GET_SELECTED(p);
+ if (selected) {
+ grpc_connected_subchannel_ping(exec_ctx, selected, closure);
+ } else {
+ grpc_exec_ctx_enqueue(exec_ctx, closure, false, NULL);
+ }
+}
+
+static const grpc_lb_policy_vtable pick_first_lb_policy_vtable = {
+ pf_destroy,
+ pf_shutdown,
+ pf_pick,
+ pf_cancel_pick,
+ pf_ping_one,
+ pf_exit_idle,
+ pf_check_connectivity,
+ pf_notify_on_state_change};
+
+static void pick_first_factory_ref(grpc_lb_policy_factory *factory) {}
+
+static void pick_first_factory_unref(grpc_lb_policy_factory *factory) {}
+
+static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy_factory *factory,
+ grpc_lb_policy_args *args) {
+ GPR_ASSERT(args->addresses != NULL);
+ GPR_ASSERT(args->subchannel_factory != NULL);
+
+ if (args->addresses->naddrs == 0) return NULL;
+
+ pick_first_lb_policy *p = gpr_malloc(sizeof(*p));
+ memset(p, 0, sizeof(*p));
+
+ p->subchannels =
+ gpr_malloc(sizeof(grpc_subchannel *) * args->addresses->naddrs);
+ memset(p->subchannels, 0, sizeof(*p->subchannels) * args->addresses->naddrs);
+ grpc_subchannel_args sc_args;
+ size_t subchannel_idx = 0;
+ for (size_t i = 0; i < args->addresses->naddrs; i++) {
+ memset(&sc_args, 0, sizeof(grpc_subchannel_args));
+ sc_args.addr = (struct sockaddr *)(args->addresses->addrs[i].addr);
+ sc_args.addr_len = (size_t)args->addresses->addrs[i].len;
+
+ grpc_subchannel *subchannel = grpc_subchannel_factory_create_subchannel(
+ exec_ctx, args->subchannel_factory, &sc_args);
+
+ if (subchannel != NULL) {
+ p->subchannels[subchannel_idx++] = subchannel;
+ }
+ }
+ if (subchannel_idx == 0) {
+ gpr_free(p->subchannels);
+ gpr_free(p);
+ return NULL;
+ }
+ p->num_subchannels = subchannel_idx;
+
+ grpc_lb_policy_init(&p->base, &pick_first_lb_policy_vtable);
+ grpc_closure_init(&p->connectivity_changed, pf_connectivity_changed, p);
+ gpr_mu_init(&p->mu);
+ return &p->base;
+}
+
+static const grpc_lb_policy_factory_vtable pick_first_factory_vtable = {
+ pick_first_factory_ref, pick_first_factory_unref, create_pick_first,
+ "pick_first"};
+
+static grpc_lb_policy_factory pick_first_lb_policy_factory = {
+ &pick_first_factory_vtable};
+
+static grpc_lb_policy_factory *pick_first_lb_factory_create() {
+ return &pick_first_lb_policy_factory;
+}
+
+/* Plugin registration */
+
+void grpc_lb_policy_pick_first_init() {
+ grpc_register_lb_policy(pick_first_lb_factory_create());
+}
+
+void grpc_lb_policy_pick_first_shutdown() {}
diff --git a/src/core/ext/lb_policy/round_robin/round_robin.c b/src/core/ext/lb_policy/round_robin/round_robin.c
new file mode 100644
index 0000000000..d94c081494
--- /dev/null
+++ b/src/core/ext/lb_policy/round_robin/round_robin.c
@@ -0,0 +1,574 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+
+#include "src/core/lib/client_config/lb_policy_registry.h"
+#include "src/core/lib/debug/trace.h"
+#include "src/core/lib/transport/connectivity_state.h"
+
+typedef struct round_robin_lb_policy round_robin_lb_policy;
+
+int grpc_lb_round_robin_trace = 0;
+
+/** List of entities waiting for a pick.
+ *
+ * Once a pick is available, \a target is updated and \a on_complete called. */
+typedef struct pending_pick {
+ struct pending_pick *next;
+ grpc_pollset *pollset;
+ grpc_connected_subchannel **target;
+ grpc_closure *on_complete;
+} pending_pick;
+
+/** List of subchannels in a connectivity READY state */
+typedef struct ready_list {
+ grpc_subchannel *subchannel;
+ struct ready_list *next;
+ struct ready_list *prev;
+} ready_list;
+
+typedef struct {
+ /** index within policy->subchannels */
+ size_t index;
+ /** backpointer to owning policy */
+ round_robin_lb_policy *policy;
+ /** subchannel itself */
+ grpc_subchannel *subchannel;
+ /** notification that connectivity has changed on subchannel */
+ grpc_closure connectivity_changed_closure;
+ /** this subchannels current position in subchannel->ready_list */
+ ready_list *ready_list_node;
+ /** last observed connectivity */
+ grpc_connectivity_state connectivity_state;
+} subchannel_data;
+
+struct round_robin_lb_policy {
+ /** base policy: must be first */
+ grpc_lb_policy base;
+
+ /** all our subchannels */
+ size_t num_subchannels;
+ subchannel_data **subchannels;
+
+ /** mutex protecting remaining members */
+ gpr_mu mu;
+ /** have we started picking? */
+ int started_picking;
+ /** are we shutting down? */
+ int shutdown;
+ /** List of picks that are waiting on connectivity */
+ pending_pick *pending_picks;
+
+ /** our connectivity state tracker */
+ grpc_connectivity_state_tracker state_tracker;
+
+ /** (Dummy) root of the doubly linked list containing READY subchannels */
+ ready_list ready_list;
+ /** Last pick from the ready list. */
+ ready_list *ready_list_last_pick;
+};
+
+/** Returns the next subchannel from the connected list or NULL if the list is
+ * empty.
+ *
+ * Note that this function does *not* advance p->ready_list_last_pick. Use \a
+ * advance_last_picked_locked() for that. */
+static ready_list *peek_next_connected_locked(const round_robin_lb_policy *p) {
+ ready_list *selected;
+ selected = p->ready_list_last_pick->next;
+
+ while (selected != NULL) {
+ if (selected == &p->ready_list) {
+ GPR_ASSERT(selected->subchannel == NULL);
+ /* skip dummy root */
+ selected = selected->next;
+ } else {
+ GPR_ASSERT(selected->subchannel != NULL);
+ return selected;
+ }
+ }
+ return NULL;
+}
+
+/** Advance the \a ready_list picking head. */
+static void advance_last_picked_locked(round_robin_lb_policy *p) {
+ if (p->ready_list_last_pick->next != NULL) { /* non-empty list */
+ p->ready_list_last_pick = p->ready_list_last_pick->next;
+ if (p->ready_list_last_pick == &p->ready_list) {
+ /* skip dummy root */
+ p->ready_list_last_pick = p->ready_list_last_pick->next;
+ }
+ } else { /* should be an empty list */
+ GPR_ASSERT(p->ready_list_last_pick == &p->ready_list);
+ }
+
+ if (grpc_lb_round_robin_trace) {
+ gpr_log(GPR_DEBUG, "[READYLIST] ADVANCED LAST PICK. NOW AT NODE %p (SC %p)",
+ p->ready_list_last_pick, p->ready_list_last_pick->subchannel);
+ }
+}
+
+/** Prepends (relative to the root at p->ready_list) the connected subchannel \a
+ * csc to the list of ready subchannels. */
+static ready_list *add_connected_sc_locked(round_robin_lb_policy *p,
+ grpc_subchannel *sc) {
+ ready_list *new_elem = gpr_malloc(sizeof(ready_list));
+ new_elem->subchannel = sc;
+ if (p->ready_list.prev == NULL) {
+ /* first element */
+ new_elem->next = &p->ready_list;
+ new_elem->prev = &p->ready_list;
+ p->ready_list.next = new_elem;
+ p->ready_list.prev = new_elem;
+ } else {
+ new_elem->next = &p->ready_list;
+ new_elem->prev = p->ready_list.prev;
+ p->ready_list.prev->next = new_elem;
+ p->ready_list.prev = new_elem;
+ }
+ if (grpc_lb_round_robin_trace) {
+ gpr_log(GPR_DEBUG, "[READYLIST] ADDING NODE %p (SC %p)", new_elem, sc);
+ }
+ return new_elem;
+}
+
+/** Removes \a node from the list of connected subchannels */
+static void remove_disconnected_sc_locked(round_robin_lb_policy *p,
+ ready_list *node) {
+ if (node == NULL) {
+ return;
+ }
+ if (node == p->ready_list_last_pick) {
+ /* If removing the lastly picked node, reset the last pick pointer to the
+ * dummy root of the list */
+ p->ready_list_last_pick = &p->ready_list;
+ }
+
+ /* removing last item */
+ if (node->next == &p->ready_list && node->prev == &p->ready_list) {
+ GPR_ASSERT(p->ready_list.next == node);
+ GPR_ASSERT(p->ready_list.prev == node);
+ p->ready_list.next = NULL;
+ p->ready_list.prev = NULL;
+ } else {
+ node->prev->next = node->next;
+ node->next->prev = node->prev;
+ }
+
+ if (grpc_lb_round_robin_trace) {
+ gpr_log(GPR_DEBUG, "[READYLIST] REMOVED NODE %p (SC %p)", node,
+ node->subchannel);
+ }
+
+ node->next = NULL;
+ node->prev = NULL;
+ node->subchannel = NULL;
+
+ gpr_free(node);
+}
+
+static void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
+ round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+ size_t i;
+ ready_list *elem;
+ for (i = 0; i < p->num_subchannels; i++) {
+ subchannel_data *sd = p->subchannels[i];
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "round_robin");
+ gpr_free(sd);
+ }
+
+ grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
+ gpr_free(p->subchannels);
+ gpr_mu_destroy(&p->mu);
+
+ elem = p->ready_list.next;
+ while (elem != NULL && elem != &p->ready_list) {
+ ready_list *tmp;
+ tmp = elem->next;
+ elem->next = NULL;
+ elem->prev = NULL;
+ elem->subchannel = NULL;
+ gpr_free(elem);
+ elem = tmp;
+ }
+ gpr_free(p);
+}
+
+static void rr_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
+ round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+ pending_pick *pp;
+ size_t i;
+
+ gpr_mu_lock(&p->mu);
+
+ p->shutdown = 1;
+ while ((pp = p->pending_picks)) {
+ p->pending_picks = pp->next;
+ *pp->target = NULL;
+ grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, false, NULL);
+ gpr_free(pp);
+ }
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_FATAL_FAILURE, "shutdown");
+ for (i = 0; i < p->num_subchannels; i++) {
+ subchannel_data *sd = p->subchannels[i];
+ grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL, NULL,
+ &sd->connectivity_changed_closure);
+ }
+ gpr_mu_unlock(&p->mu);
+}
+
+static void rr_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ grpc_connected_subchannel **target) {
+ round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+ pending_pick *pp;
+ gpr_mu_lock(&p->mu);
+ pp = p->pending_picks;
+ p->pending_picks = NULL;
+ while (pp != NULL) {
+ pending_pick *next = pp->next;
+ if (pp->target == target) {
+ grpc_pollset_set_del_pollset(exec_ctx, p->base.interested_parties,
+ pp->pollset);
+ *target = NULL;
+ grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, false, NULL);
+ gpr_free(pp);
+ } else {
+ pp->next = p->pending_picks;
+ p->pending_picks = pp;
+ }
+ pp = next;
+ }
+ gpr_mu_unlock(&p->mu);
+}
+
+static void start_picking(grpc_exec_ctx *exec_ctx, round_robin_lb_policy *p) {
+ size_t i;
+ p->started_picking = 1;
+
+ gpr_log(GPR_DEBUG, "LB_POLICY: p=%p num_subchannels=%d", p,
+ p->num_subchannels);
+
+ for (i = 0; i < p->num_subchannels; i++) {
+ subchannel_data *sd = p->subchannels[i];
+ sd->connectivity_state = GRPC_CHANNEL_IDLE;
+ grpc_subchannel_notify_on_state_change(
+ exec_ctx, sd->subchannel, p->base.interested_parties,
+ &sd->connectivity_state, &sd->connectivity_changed_closure);
+ GRPC_LB_POLICY_WEAK_REF(&p->base, "round_robin_connectivity");
+ }
+}
+
+static void rr_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
+ round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+ gpr_mu_lock(&p->mu);
+ if (!p->started_picking) {
+ start_picking(exec_ctx, p);
+ }
+ gpr_mu_unlock(&p->mu);
+}
+
+static int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ grpc_pollset *pollset, grpc_metadata_batch *initial_metadata,
+ grpc_connected_subchannel **target,
+ grpc_closure *on_complete) {
+ round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+ pending_pick *pp;
+ ready_list *selected;
+ gpr_mu_lock(&p->mu);
+ if ((selected = peek_next_connected_locked(p))) {
+ gpr_mu_unlock(&p->mu);
+ *target = grpc_subchannel_get_connected_subchannel(selected->subchannel);
+ if (grpc_lb_round_robin_trace) {
+ gpr_log(GPR_DEBUG,
+ "[RR PICK] TARGET <-- CONNECTED SUBCHANNEL %p (NODE %p)",
+ selected->subchannel, selected);
+ }
+ /* only advance the last picked pointer if the selection was used */
+ advance_last_picked_locked(p);
+ return 1;
+ } else {
+ if (!p->started_picking) {
+ start_picking(exec_ctx, p);
+ }
+ grpc_pollset_set_add_pollset(exec_ctx, p->base.interested_parties, pollset);
+ pp = gpr_malloc(sizeof(*pp));
+ pp->next = p->pending_picks;
+ pp->pollset = pollset;
+ pp->target = target;
+ pp->on_complete = on_complete;
+ p->pending_picks = pp;
+ gpr_mu_unlock(&p->mu);
+ return 0;
+ }
+}
+
+static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
+ bool iomgr_success) {
+ subchannel_data *sd = arg;
+ round_robin_lb_policy *p = sd->policy;
+ pending_pick *pp;
+ ready_list *selected;
+
+ int unref = 0;
+
+ gpr_mu_lock(&p->mu);
+
+ if (p->shutdown) {
+ unref = 1;
+ } else {
+ switch (sd->connectivity_state) {
+ case GRPC_CHANNEL_READY:
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_READY, "connecting_ready");
+ /* add the newly connected subchannel to the list of connected ones.
+ * Note that it goes to the "end of the line". */
+ sd->ready_list_node = add_connected_sc_locked(p, sd->subchannel);
+ /* at this point we know there's at least one suitable subchannel. Go
+ * ahead and pick one and notify the pending suitors in
+ * p->pending_picks. This preemtively replicates rr_pick()'s actions. */
+ selected = peek_next_connected_locked(p);
+ if (p->pending_picks != NULL) {
+ /* if the selected subchannel is going to be used for the pending
+ * picks, update the last picked pointer */
+ advance_last_picked_locked(p);
+ }
+ while ((pp = p->pending_picks)) {
+ p->pending_picks = pp->next;
+ *pp->target =
+ grpc_subchannel_get_connected_subchannel(selected->subchannel);
+ if (grpc_lb_round_robin_trace) {
+ gpr_log(GPR_DEBUG,
+ "[RR CONN CHANGED] TARGET <-- SUBCHANNEL %p (NODE %p)",
+ selected->subchannel, selected);
+ }
+ grpc_pollset_set_del_pollset(exec_ctx, p->base.interested_parties,
+ pp->pollset);
+ grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, true, NULL);
+ gpr_free(pp);
+ }
+ grpc_subchannel_notify_on_state_change(
+ exec_ctx, sd->subchannel, p->base.interested_parties,
+ &sd->connectivity_state, &sd->connectivity_changed_closure);
+ break;
+ case GRPC_CHANNEL_CONNECTING:
+ case GRPC_CHANNEL_IDLE:
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ sd->connectivity_state,
+ "connecting_changed");
+ grpc_subchannel_notify_on_state_change(
+ exec_ctx, sd->subchannel, p->base.interested_parties,
+ &sd->connectivity_state, &sd->connectivity_changed_closure);
+ break;
+ case GRPC_CHANNEL_TRANSIENT_FAILURE:
+ /* renew state notification */
+ grpc_subchannel_notify_on_state_change(
+ exec_ctx, sd->subchannel, p->base.interested_parties,
+ &sd->connectivity_state, &sd->connectivity_changed_closure);
+
+ /* remove from ready list if still present */
+ if (sd->ready_list_node != NULL) {
+ remove_disconnected_sc_locked(p, sd->ready_list_node);
+ sd->ready_list_node = NULL;
+ }
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_TRANSIENT_FAILURE,
+ "connecting_transient_failure");
+ break;
+ case GRPC_CHANNEL_FATAL_FAILURE:
+ if (sd->ready_list_node != NULL) {
+ remove_disconnected_sc_locked(p, sd->ready_list_node);
+ sd->ready_list_node = NULL;
+ }
+
+ p->num_subchannels--;
+ GPR_SWAP(subchannel_data *, p->subchannels[sd->index],
+ p->subchannels[p->num_subchannels]);
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "round_robin");
+ p->subchannels[sd->index]->index = sd->index;
+ gpr_free(sd);
+
+ unref = 1;
+ if (p->num_subchannels == 0) {
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_FATAL_FAILURE,
+ "no_more_channels");
+ while ((pp = p->pending_picks)) {
+ p->pending_picks = pp->next;
+ *pp->target = NULL;
+ grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, true, NULL);
+ gpr_free(pp);
+ }
+ } else {
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_TRANSIENT_FAILURE,
+ "subchannel_failed");
+ }
+ } /* switch */
+ } /* !unref */
+
+ gpr_mu_unlock(&p->mu);
+
+ if (unref) {
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "round_robin_connectivity");
+ }
+}
+
+static grpc_connectivity_state rr_check_connectivity(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *pol) {
+ round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+ grpc_connectivity_state st;
+ gpr_mu_lock(&p->mu);
+ st = grpc_connectivity_state_check(&p->state_tracker);
+ gpr_mu_unlock(&p->mu);
+ return st;
+}
+
+static void rr_notify_on_state_change(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *pol,
+ grpc_connectivity_state *current,
+ grpc_closure *notify) {
+ round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+ gpr_mu_lock(&p->mu);
+ grpc_connectivity_state_notify_on_state_change(exec_ctx, &p->state_tracker,
+ current, notify);
+ gpr_mu_unlock(&p->mu);
+}
+
+static void rr_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ grpc_closure *closure) {
+ round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+ ready_list *selected;
+ grpc_connected_subchannel *target;
+ gpr_mu_lock(&p->mu);
+ if ((selected = peek_next_connected_locked(p))) {
+ gpr_mu_unlock(&p->mu);
+ target = grpc_subchannel_get_connected_subchannel(selected->subchannel);
+ grpc_connected_subchannel_ping(exec_ctx, target, closure);
+ } else {
+ gpr_mu_unlock(&p->mu);
+ grpc_exec_ctx_enqueue(exec_ctx, closure, false, NULL);
+ }
+}
+
+static const grpc_lb_policy_vtable round_robin_lb_policy_vtable = {
+ rr_destroy,
+ rr_shutdown,
+ rr_pick,
+ rr_cancel_pick,
+ rr_ping_one,
+ rr_exit_idle,
+ rr_check_connectivity,
+ rr_notify_on_state_change};
+
+static void round_robin_factory_ref(grpc_lb_policy_factory *factory) {}
+
+static void round_robin_factory_unref(grpc_lb_policy_factory *factory) {}
+
+static grpc_lb_policy *create_round_robin(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy_factory *factory,
+ grpc_lb_policy_args *args) {
+ GPR_ASSERT(args->addresses != NULL);
+ GPR_ASSERT(args->subchannel_factory != NULL);
+
+ round_robin_lb_policy *p = gpr_malloc(sizeof(*p));
+ memset(p, 0, sizeof(*p));
+
+ p->subchannels =
+ gpr_malloc(sizeof(*p->subchannels) * args->addresses->naddrs);
+ memset(p->subchannels, 0, sizeof(*p->subchannels) * args->addresses->naddrs);
+
+ grpc_subchannel_args sc_args;
+ size_t subchannel_idx = 0;
+ for (size_t i = 0; i < args->addresses->naddrs; i++) {
+ memset(&sc_args, 0, sizeof(grpc_subchannel_args));
+ sc_args.addr = (struct sockaddr *)(args->addresses->addrs[i].addr);
+ sc_args.addr_len = (size_t)args->addresses->addrs[i].len;
+
+ grpc_subchannel *subchannel = grpc_subchannel_factory_create_subchannel(
+ exec_ctx, args->subchannel_factory, &sc_args);
+
+ if (subchannel != NULL) {
+ subchannel_data *sd = gpr_malloc(sizeof(*sd));
+ memset(sd, 0, sizeof(*sd));
+ p->subchannels[subchannel_idx] = sd;
+ sd->policy = p;
+ sd->index = subchannel_idx;
+ sd->subchannel = subchannel;
+ ++subchannel_idx;
+ grpc_closure_init(&sd->connectivity_changed_closure,
+ rr_connectivity_changed, sd);
+ }
+ }
+ if (subchannel_idx == 0) {
+ gpr_free(p->subchannels);
+ gpr_free(p);
+ return NULL;
+ }
+ p->num_subchannels = subchannel_idx;
+
+ /* The (dummy node) root of the ready list */
+ p->ready_list.subchannel = NULL;
+ p->ready_list.prev = NULL;
+ p->ready_list.next = NULL;
+ p->ready_list_last_pick = &p->ready_list;
+
+ grpc_lb_policy_init(&p->base, &round_robin_lb_policy_vtable);
+ grpc_connectivity_state_init(&p->state_tracker, GRPC_CHANNEL_IDLE,
+ "round_robin");
+ gpr_mu_init(&p->mu);
+ return &p->base;
+}
+
+static const grpc_lb_policy_factory_vtable round_robin_factory_vtable = {
+ round_robin_factory_ref, round_robin_factory_unref, create_round_robin,
+ "round_robin"};
+
+static grpc_lb_policy_factory round_robin_lb_policy_factory = {
+ &round_robin_factory_vtable};
+
+static grpc_lb_policy_factory *round_robin_lb_factory_create() {
+ return &round_robin_lb_policy_factory;
+}
+
+/* Plugin registration */
+
+void grpc_lb_policy_round_robin_init() {
+ grpc_register_lb_policy(round_robin_lb_factory_create());
+ grpc_register_tracer("round_robin", &grpc_lb_round_robin_trace);
+}
+
+void grpc_lb_policy_round_robin_shutdown() {}