aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
authorGravatar Sree Kuchibhotla <sreek@google.com>2018-07-23 10:52:27 -0700
committerGravatar Sree Kuchibhotla <sreek@google.com>2018-07-23 10:52:27 -0700
commit7b8be4d6fd0a3b7374d5a28bea1eff319c49fefe (patch)
treeb058501d6776d69a115f6b3e29e2bdd335915ac8 /src
parentec1c112cc17cd1290a901ca606ac916422d3342c (diff)
parent53d2899374e74b4185a55e3fc26b64d0a02840a8 (diff)
Merge branch 'master' into rq-threads
Diffstat (limited to 'src')
-rw-r--r--src/android/test/interop/app/src/main/cpp/grpc-interop.cc5
-rw-r--r--src/core/ext/filters/client_channel/client_channel_channelz.cc4
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc26
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc26
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc59
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/subchannel_list.h13
-rw-r--r--src/core/lib/channel/channelz_registry.cc21
-rw-r--r--src/core/lib/iomgr/executor.cc220
-rw-r--r--src/core/lib/iomgr/executor.h45
-rw-r--r--src/core/lib/iomgr/lockfree_event.cc6
-rw-r--r--src/core/lib/iomgr/resolve_address_posix.cc5
-rw-r--r--src/core/lib/iomgr/resolve_address_windows.cc5
-rw-r--r--src/core/lib/security/credentials/alts/check_gcp_environment_linux.cc5
-rw-r--r--src/core/lib/security/security_connector/security_connector.cc9
-rw-r--r--src/cpp/server/channelz/channelz_service.cc57
-rw-r--r--src/cpp/server/channelz/channelz_service.h43
-rw-r--r--src/cpp/server/channelz/channelz_service_plugin.cc79
-rwxr-xr-xsrc/csharp/Grpc.Core/Grpc.Core.csproj14
-rw-r--r--src/csharp/Grpc.Core/Internal/NativeExtension.cs24
-rw-r--r--src/csharp/Grpc.Core/Internal/NativeLogRedirector.cs19
-rw-r--r--src/csharp/Grpc.Core/Internal/PlatformApis.cs33
-rw-r--r--src/csharp/Grpc.Core/build/MonoAndroid/Grpc.Core.targets21
-rw-r--r--src/csharp/Grpc.Core/build/net45/Grpc.Core.targets (renamed from src/csharp/Grpc.Core/Grpc.Core.targets)0
-rw-r--r--src/csharp/doc/docfx.json2
-rwxr-xr-xsrc/csharp/experimental/build_native_ext_for_android.sh24
-rw-r--r--src/proto/grpc/testing/echo_messages.proto1
-rw-r--r--src/ruby/ext/grpc/rb_grpc_imports.generated.c4
-rw-r--r--src/ruby/ext/grpc/rb_grpc_imports.generated.h6
28 files changed, 649 insertions, 127 deletions
diff --git a/src/android/test/interop/app/src/main/cpp/grpc-interop.cc b/src/android/test/interop/app/src/main/cpp/grpc-interop.cc
index bbdc84abdd..07834250d2 100644
--- a/src/android/test/interop/app/src/main/cpp/grpc-interop.cc
+++ b/src/android/test/interop/app/src/main/cpp/grpc-interop.cc
@@ -45,9 +45,10 @@ std::shared_ptr<grpc::testing::InteropClient> GetClient(const char* host,
credentials = grpc::InsecureChannelCredentials();
}
+ grpc::testing::ChannelCreationFunc channel_creation_func =
+ std::bind(grpc::CreateChannel, host_port, credentials);
return std::shared_ptr<grpc::testing::InteropClient>(
- new grpc::testing::InteropClient(
- grpc::CreateChannel(host_port, credentials), true, false));
+ new grpc::testing::InteropClient(channel_creation_func, true, false));
}
extern "C" JNIEXPORT jboolean JNICALL
diff --git a/src/core/ext/filters/client_channel/client_channel_channelz.cc b/src/core/ext/filters/client_channel/client_channel_channelz.cc
index d43e9ea67a..4c9c9a6bd6 100644
--- a/src/core/ext/filters/client_channel/client_channel_channelz.cc
+++ b/src/core/ext/filters/client_channel/client_channel_channelz.cc
@@ -85,12 +85,12 @@ void ClientChannelNode::PopulateChildRefs(grpc_json* json) {
grpc_json* array_parent = grpc_json_create_child(
nullptr, json, "channelRef", nullptr, GRPC_JSON_ARRAY, false);
json_iterator = nullptr;
- for (size_t i = 0; i < child_subchannels.size(); ++i) {
+ for (size_t i = 0; i < child_channels.size(); ++i) {
json_iterator =
grpc_json_create_child(json_iterator, array_parent, nullptr, nullptr,
GRPC_JSON_OBJECT, false);
grpc_json_add_number_string_child(json_iterator, nullptr, "channelId",
- child_subchannels[i]);
+ child_channels[i]);
}
}
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
index 85534412cf..959c7441a3 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
@@ -135,9 +135,8 @@ class GrpcLb : public LoadBalancingPolicy {
void HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) override;
void PingOneLocked(grpc_closure* on_initiate, grpc_closure* on_ack) override;
void ExitIdleLocked() override;
- // TODO(ncteisen): implement this in a follow up PR
void FillChildRefsForChannelz(ChildRefsList* child_subchannels,
- ChildRefsList* child_channels) override {}
+ ChildRefsList* child_channels) override;
private:
/// Linked list of pending pick requests. It stores all information needed to
@@ -301,6 +300,9 @@ class GrpcLb : public LoadBalancingPolicy {
// The channel for communicating with the LB server.
grpc_channel* lb_channel_ = nullptr;
+ // Mutex to protect the channel to the LB server. This is used when
+ // processing a channelz request.
+ gpr_mu lb_channel_mu_;
grpc_connectivity_state lb_channel_connectivity_;
grpc_closure lb_channel_on_connectivity_changed_;
// Are we already watching the LB channel's connectivity?
@@ -1040,6 +1042,7 @@ GrpcLb::GrpcLb(const grpc_lb_addresses* addresses,
.set_max_backoff(GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS *
1000)) {
// Initialization.
+ gpr_mu_init(&lb_channel_mu_);
grpc_subchannel_index_ref();
GRPC_CLOSURE_INIT(&lb_channel_on_connectivity_changed_,
&GrpcLb::OnBalancerChannelConnectivityChangedLocked, this,
@@ -1078,6 +1081,7 @@ GrpcLb::GrpcLb(const grpc_lb_addresses* addresses,
GrpcLb::~GrpcLb() {
GPR_ASSERT(pending_picks_ == nullptr);
GPR_ASSERT(pending_pings_ == nullptr);
+ gpr_mu_destroy(&lb_channel_mu_);
gpr_free((void*)server_name_);
grpc_channel_args_destroy(args_);
grpc_connectivity_state_destroy(&state_tracker_);
@@ -1107,8 +1111,10 @@ void GrpcLb::ShutdownLocked() {
// OnBalancerChannelConnectivityChangedLocked(), and we need to be
// alive when that callback is invoked.
if (lb_channel_ != nullptr) {
+ gpr_mu_lock(&lb_channel_mu_);
grpc_channel_destroy(lb_channel_);
lb_channel_ = nullptr;
+ gpr_mu_unlock(&lb_channel_mu_);
}
grpc_connectivity_state_set(&state_tracker_, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(error), "grpclb_shutdown");
@@ -1279,6 +1285,20 @@ void GrpcLb::PingOneLocked(grpc_closure* on_initiate, grpc_closure* on_ack) {
}
}
+void GrpcLb::FillChildRefsForChannelz(ChildRefsList* child_subchannels,
+ ChildRefsList* child_channels) {
+ // delegate to the RoundRobin to fill the children subchannels.
+ rr_policy_->FillChildRefsForChannelz(child_subchannels, child_channels);
+ mu_guard guard(&lb_channel_mu_);
+ if (lb_channel_ != nullptr) {
+ grpc_core::channelz::ChannelNode* channel_node =
+ grpc_channel_get_channelz_node(lb_channel_);
+ if (channel_node != nullptr) {
+ child_channels->push_back(channel_node->channel_uuid());
+ }
+ }
+}
+
grpc_connectivity_state GrpcLb::CheckConnectivityLocked(
grpc_error** connectivity_error) {
return grpc_connectivity_state_get(&state_tracker_, connectivity_error);
@@ -1322,9 +1342,11 @@ void GrpcLb::ProcessChannelArgsLocked(const grpc_channel_args& args) {
if (lb_channel_ == nullptr) {
char* uri_str;
gpr_asprintf(&uri_str, "fake:///%s", server_name_);
+ gpr_mu_lock(&lb_channel_mu_);
lb_channel_ = grpc_client_channel_factory_create_channel(
client_channel_factory(), uri_str,
GRPC_CLIENT_CHANNEL_TYPE_LOAD_BALANCING, lb_channel_args);
+ gpr_mu_unlock(&lb_channel_mu_);
GPR_ASSERT(lb_channel_ != nullptr);
gpr_free(uri_str);
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
index 18e983d6f7..023281db97 100644
--- a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
@@ -181,7 +181,7 @@ void PickFirst::HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) {
}
void PickFirst::ShutdownLocked() {
- AutoChildRefsUpdater gaurd(this);
+ AutoChildRefsUpdater guard(this);
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO, "Pick First %p Shutting down", this);
@@ -327,30 +327,10 @@ void PickFirst::FillChildRefsForChannelz(
void PickFirst::UpdateChildRefsLocked() {
ChildRefsList cs;
if (subchannel_list_ != nullptr) {
- for (size_t i = 0; i < subchannel_list_->num_subchannels(); ++i) {
- if (subchannel_list_->subchannel(i)->subchannel() != nullptr) {
- grpc_core::channelz::SubchannelNode* subchannel_node =
- grpc_subchannel_get_channelz_node(
- subchannel_list_->subchannel(i)->subchannel());
- if (subchannel_node != nullptr) {
- cs.push_back(subchannel_node->subchannel_uuid());
- }
- }
- }
+ subchannel_list_->PopulateChildRefsList(&cs);
}
if (latest_pending_subchannel_list_ != nullptr) {
- for (size_t i = 0; i < latest_pending_subchannel_list_->num_subchannels();
- ++i) {
- if (latest_pending_subchannel_list_->subchannel(i)->subchannel() !=
- nullptr) {
- grpc_core::channelz::SubchannelNode* subchannel_node =
- grpc_subchannel_get_channelz_node(
- latest_pending_subchannel_list_->subchannel(i)->subchannel());
- if (subchannel_node != nullptr) {
- cs.push_back(subchannel_node->subchannel_uuid());
- }
- }
- }
+ latest_pending_subchannel_list_->PopulateChildRefsList(&cs);
}
// atomically update the data that channelz will actually be looking at.
mu_guard guard(&child_refs_mu_);
diff --git a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
index 09634a2ad4..fc56a4961f 100644
--- a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
@@ -69,9 +69,8 @@ class RoundRobin : public LoadBalancingPolicy {
void HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) override;
void PingOneLocked(grpc_closure* on_initiate, grpc_closure* on_ack) override;
void ExitIdleLocked() override;
- // TODO(ncteisen): implement this in a follow up PR
void FillChildRefsForChannelz(ChildRefsList* child_subchannels,
- ChildRefsList* child_channels) override {}
+ ChildRefsList* ignored) override;
private:
~RoundRobin();
@@ -183,11 +182,24 @@ class RoundRobin : public LoadBalancingPolicy {
size_t last_ready_index_ = -1; // Index into list of last pick.
};
+ // Helper class to ensure that any function that modifies the child refs
+ // data structures will update the channelz snapshot data structures before
+ // returning.
+ class AutoChildRefsUpdater {
+ public:
+ explicit AutoChildRefsUpdater(RoundRobin* rr) : rr_(rr) {}
+ ~AutoChildRefsUpdater() { rr_->UpdateChildRefsLocked(); }
+
+ private:
+ RoundRobin* rr_;
+ };
+
void ShutdownLocked() override;
void StartPickingLocked();
bool DoPickLocked(PickState* pick);
void DrainPendingPicksLocked();
+ void UpdateChildRefsLocked();
/** list of subchannels */
OrphanablePtr<RoundRobinSubchannelList> subchannel_list_;
@@ -205,10 +217,16 @@ class RoundRobin : public LoadBalancingPolicy {
PickState* pending_picks_ = nullptr;
/** our connectivity state tracker */
grpc_connectivity_state_tracker state_tracker_;
+ /// Lock and data used to capture snapshots of this channel's child
+ /// channels and subchannels. This data is consumed by channelz.
+ gpr_mu child_refs_mu_;
+ ChildRefsList child_subchannels_;
+ ChildRefsList child_channels_;
};
RoundRobin::RoundRobin(const Args& args) : LoadBalancingPolicy(args) {
GPR_ASSERT(args.client_channel_factory != nullptr);
+ gpr_mu_init(&child_refs_mu_);
grpc_connectivity_state_init(&state_tracker_, GRPC_CHANNEL_IDLE,
"round_robin");
UpdateLocked(*args.args);
@@ -223,6 +241,7 @@ RoundRobin::~RoundRobin() {
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO, "[RR %p] Destroying Round Robin policy", this);
}
+ gpr_mu_destroy(&child_refs_mu_);
GPR_ASSERT(subchannel_list_ == nullptr);
GPR_ASSERT(latest_pending_subchannel_list_ == nullptr);
GPR_ASSERT(pending_picks_ == nullptr);
@@ -242,6 +261,7 @@ void RoundRobin::HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) {
}
void RoundRobin::ShutdownLocked() {
+ AutoChildRefsUpdater guard(this);
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO, "[RR %p] Shutting down", this);
@@ -365,6 +385,39 @@ bool RoundRobin::PickLocked(PickState* pick) {
return false;
}
+void RoundRobin::FillChildRefsForChannelz(
+ ChildRefsList* child_subchannels_to_fill, ChildRefsList* ignored) {
+ mu_guard guard(&child_refs_mu_);
+ for (size_t i = 0; i < child_subchannels_.size(); ++i) {
+ // TODO(ncteisen): implement a de dup loop that is not O(n^2). Might
+ // have to implement lightweight set. For now, we don't care about
+ // performance when channelz requests are made.
+ bool found = false;
+ for (size_t j = 0; j < child_subchannels_to_fill->size(); ++j) {
+ if ((*child_subchannels_to_fill)[j] == child_subchannels_[i]) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ child_subchannels_to_fill->push_back(child_subchannels_[i]);
+ }
+ }
+}
+
+void RoundRobin::UpdateChildRefsLocked() {
+ ChildRefsList cs;
+ if (subchannel_list_ != nullptr) {
+ subchannel_list_->PopulateChildRefsList(&cs);
+ }
+ if (latest_pending_subchannel_list_ != nullptr) {
+ latest_pending_subchannel_list_->PopulateChildRefsList(&cs);
+ }
+ // atomically update the data that channelz will actually be looking at.
+ mu_guard guard(&child_refs_mu_);
+ child_subchannels_ = std::move(cs);
+}
+
void RoundRobin::RoundRobinSubchannelList::StartWatchingLocked() {
if (num_subchannels() == 0) return;
// Check current state of each subchannel synchronously, since any
@@ -455,6 +508,7 @@ void RoundRobin::RoundRobinSubchannelList::
void RoundRobin::RoundRobinSubchannelList::
UpdateRoundRobinStateFromSubchannelStateCountsLocked() {
RoundRobin* p = static_cast<RoundRobin*>(policy());
+ AutoChildRefsUpdater guard(p);
if (num_ready_ > 0) {
if (p->subchannel_list_.get() != this) {
// Promote this list to p->subchannel_list_.
@@ -611,6 +665,7 @@ void RoundRobin::PingOneLocked(grpc_closure* on_initiate,
void RoundRobin::UpdateLocked(const grpc_channel_args& args) {
const grpc_arg* arg = grpc_channel_args_find(&args, GRPC_ARG_LB_ADDRESSES);
+ AutoChildRefsUpdater guard(this);
if (GPR_UNLIKELY(arg == nullptr || arg->type != GRPC_ARG_POINTER)) {
gpr_log(GPR_ERROR, "[RR %p] update provided no addresses; ignoring", this);
// If we don't have a current subchannel list, go into TRANSIENT_FAILURE.
diff --git a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
index 7e2046bcdc..018ac3bb86 100644
--- a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
+++ b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
@@ -189,6 +189,19 @@ class SubchannelList
// Returns true if the subchannel list is shutting down.
bool shutting_down() const { return shutting_down_; }
+ // Populates refs_list with the uuids of this SubchannelLists's subchannels.
+ void PopulateChildRefsList(ChildRefsList* refs_list) {
+ for (size_t i = 0; i < subchannels_.size(); ++i) {
+ if (subchannels_[i].subchannel() != nullptr) {
+ grpc_core::channelz::SubchannelNode* subchannel_node =
+ grpc_subchannel_get_channelz_node(subchannels_[i].subchannel());
+ if (subchannel_node != nullptr) {
+ refs_list->push_back(subchannel_node->subchannel_uuid());
+ }
+ }
+ }
+ }
+
// Accessors.
LoadBalancingPolicy* policy() const { return policy_; }
TraceFlag* tracer() const { return tracer_; }
diff --git a/src/core/lib/channel/channelz_registry.cc b/src/core/lib/channel/channelz_registry.cc
index a16798d524..38496b3d78 100644
--- a/src/core/lib/channel/channelz_registry.cc
+++ b/src/core/lib/channel/channelz_registry.cc
@@ -121,3 +121,24 @@ char* ChannelzRegistry::InternalGetTopChannels(intptr_t start_channel_id) {
} // namespace channelz
} // namespace grpc_core
+
+char* grpc_channelz_get_top_channels(intptr_t start_channel_id) {
+ return grpc_core::channelz::ChannelzRegistry::GetTopChannels(
+ start_channel_id);
+}
+
+char* grpc_channelz_get_channel(intptr_t channel_id) {
+ grpc_core::channelz::ChannelNode* channel_node =
+ grpc_core::channelz::ChannelzRegistry::GetChannelNode(channel_id);
+ if (channel_node == nullptr) {
+ return nullptr;
+ }
+ grpc_json* top_level_json = grpc_json_create(GRPC_JSON_OBJECT);
+ grpc_json* json = top_level_json;
+ grpc_json* channel_json = channel_node->RenderJson();
+ channel_json->key = "channel";
+ grpc_json_link_child(json, channel_json, nullptr);
+ char* json_str = grpc_json_dump_to_string(top_level_json, 0);
+ grpc_json_destroy(top_level_json);
+ return json_str;
+}
diff --git a/src/core/lib/iomgr/executor.cc b/src/core/lib/iomgr/executor.cc
index 1ad13b831d..45d96b80eb 100644
--- a/src/core/lib/iomgr/executor.cc
+++ b/src/core/lib/iomgr/executor.cc
@@ -40,19 +40,25 @@
gpr_log(GPR_INFO, "EXECUTOR " format, __VA_ARGS__); \
}
+#define EXECUTOR_TRACE0(str) \
+ if (executor_trace.enabled()) { \
+ gpr_log(GPR_INFO, "EXECUTOR " str); \
+ }
+
grpc_core::TraceFlag executor_trace(false, "executor");
GPR_TLS_DECL(g_this_thread_state);
-GrpcExecutor::GrpcExecutor(const char* executor_name) : name_(executor_name) {
+GrpcExecutor::GrpcExecutor(const char* name) : name_(name) {
adding_thread_lock_ = GPR_SPINLOCK_STATIC_INITIALIZER;
- gpr_atm_no_barrier_store(&num_threads_, 0);
+ gpr_atm_rel_store(&num_threads_, 0);
max_threads_ = GPR_MAX(1, 2 * gpr_cpu_num_cores());
}
void GrpcExecutor::Init() { SetThreading(true); }
-size_t GrpcExecutor::RunClosures(grpc_closure_list list) {
+size_t GrpcExecutor::RunClosures(const char* executor_name,
+ grpc_closure_list list) {
size_t n = 0;
grpc_closure* c = list.head;
@@ -60,11 +66,11 @@ size_t GrpcExecutor::RunClosures(grpc_closure_list list) {
grpc_closure* next = c->next_data.next;
grpc_error* error = c->error_data.error;
#ifndef NDEBUG
- EXECUTOR_TRACE("run %p [created by %s:%d]", c, c->file_created,
- c->line_created);
+ EXECUTOR_TRACE("(%s) run %p [created by %s:%d]", executor_name, c,
+ c->file_created, c->line_created);
c->scheduled = false;
#else
- EXECUTOR_TRACE("run %p", c);
+ EXECUTOR_TRACE("(%s) run %p", executor_name, c);
#endif
c->cb(c->cb_arg, error);
GRPC_ERROR_UNREF(error);
@@ -77,17 +83,21 @@ size_t GrpcExecutor::RunClosures(grpc_closure_list list) {
}
bool GrpcExecutor::IsThreaded() const {
- return gpr_atm_no_barrier_load(&num_threads_) > 0;
+ return gpr_atm_acq_load(&num_threads_) > 0;
}
void GrpcExecutor::SetThreading(bool threading) {
- gpr_atm curr_num_threads = gpr_atm_no_barrier_load(&num_threads_);
+ gpr_atm curr_num_threads = gpr_atm_acq_load(&num_threads_);
+ EXECUTOR_TRACE("(%s) SetThreading(%d) begin", name_, threading);
if (threading) {
- if (curr_num_threads > 0) return;
+ if (curr_num_threads > 0) {
+ EXECUTOR_TRACE("(%s) SetThreading(true). curr_num_threads == 0", name_);
+ return;
+ }
GPR_ASSERT(num_threads_ == 0);
- gpr_atm_no_barrier_store(&num_threads_, 1);
+ gpr_atm_rel_store(&num_threads_, 1);
gpr_tls_init(&g_this_thread_state);
thd_state_ = static_cast<ThreadState*>(
gpr_zalloc(sizeof(ThreadState) * max_threads_));
@@ -96,6 +106,7 @@ void GrpcExecutor::SetThreading(bool threading) {
gpr_mu_init(&thd_state_[i].mu);
gpr_cv_init(&thd_state_[i].cv);
thd_state_[i].id = i;
+ thd_state_[i].name = name_;
thd_state_[i].thd = grpc_core::Thread();
thd_state_[i].elems = GRPC_CLOSURE_LIST_INIT;
}
@@ -104,7 +115,10 @@ void GrpcExecutor::SetThreading(bool threading) {
grpc_core::Thread(name_, &GrpcExecutor::ThreadMain, &thd_state_[0]);
thd_state_[0].thd.Start();
} else { // !threading
- if (curr_num_threads == 0) return;
+ if (curr_num_threads == 0) {
+ EXECUTOR_TRACE("(%s) SetThreading(false). curr_num_threads == 0", name_);
+ return;
+ }
for (size_t i = 0; i < max_threads_; i++) {
gpr_mu_lock(&thd_state_[i].mu);
@@ -121,20 +135,22 @@ void GrpcExecutor::SetThreading(bool threading) {
curr_num_threads = gpr_atm_no_barrier_load(&num_threads_);
for (gpr_atm i = 0; i < curr_num_threads; i++) {
thd_state_[i].thd.Join();
- EXECUTOR_TRACE(" Thread %" PRIdPTR " of %" PRIdPTR " joined", i,
- curr_num_threads);
+ EXECUTOR_TRACE("(%s) Thread %" PRIdPTR " of %" PRIdPTR " joined", name_,
+ i + 1, curr_num_threads);
}
- gpr_atm_no_barrier_store(&num_threads_, 0);
+ gpr_atm_rel_store(&num_threads_, 0);
for (size_t i = 0; i < max_threads_; i++) {
gpr_mu_destroy(&thd_state_[i].mu);
gpr_cv_destroy(&thd_state_[i].cv);
- RunClosures(thd_state_[i].elems);
+ RunClosures(thd_state_[i].name, thd_state_[i].elems);
}
gpr_free(thd_state_);
gpr_tls_destroy(&g_this_thread_state);
}
+
+ EXECUTOR_TRACE("(%s) SetThreading(%d) done", name_, threading);
}
void GrpcExecutor::Shutdown() { SetThreading(false); }
@@ -147,8 +163,8 @@ void GrpcExecutor::ThreadMain(void* arg) {
size_t subtract_depth = 0;
for (;;) {
- EXECUTOR_TRACE("[%" PRIdPTR "]: step (sub_depth=%" PRIdPTR ")", ts->id,
- subtract_depth);
+ EXECUTOR_TRACE("(%s) [%" PRIdPTR "]: step (sub_depth=%" PRIdPTR ")",
+ ts->name, ts->id, subtract_depth);
gpr_mu_lock(&ts->mu);
ts->depth -= subtract_depth;
@@ -159,7 +175,7 @@ void GrpcExecutor::ThreadMain(void* arg) {
}
if (ts->shutdown) {
- EXECUTOR_TRACE("[%" PRIdPTR "]: shutdown", ts->id);
+ EXECUTOR_TRACE("(%s) [%" PRIdPTR "]: shutdown", ts->name, ts->id);
gpr_mu_unlock(&ts->mu);
break;
}
@@ -169,10 +185,10 @@ void GrpcExecutor::ThreadMain(void* arg) {
ts->elems = GRPC_CLOSURE_LIST_INIT;
gpr_mu_unlock(&ts->mu);
- EXECUTOR_TRACE("[%" PRIdPTR "]: execute", ts->id);
+ EXECUTOR_TRACE("(%s) [%" PRIdPTR "]: execute", ts->name, ts->id);
grpc_core::ExecCtx::Get()->InvalidateNow();
- subtract_depth = RunClosures(closures);
+ subtract_depth = RunClosures(ts->name, closures);
}
}
@@ -188,16 +204,16 @@ void GrpcExecutor::Enqueue(grpc_closure* closure, grpc_error* error,
do {
retry_push = false;
size_t cur_thread_count =
- static_cast<size_t>(gpr_atm_no_barrier_load(&num_threads_));
+ static_cast<size_t>(gpr_atm_acq_load(&num_threads_));
// If the number of threads is zero(i.e either the executor is not threaded
// or already shutdown), then queue the closure on the exec context itself
if (cur_thread_count == 0) {
#ifndef NDEBUG
- EXECUTOR_TRACE("schedule %p (created %s:%d) inline", closure,
+ EXECUTOR_TRACE("(%s) schedule %p (created %s:%d) inline", name_, closure,
closure->file_created, closure->line_created);
#else
- EXECUTOR_TRACE("schedule %p inline", closure);
+ EXECUTOR_TRACE("(%s) schedule %p inline", name_, closure);
#endif
grpc_closure_list_append(grpc_core::ExecCtx::Get()->closure_list(),
closure, error);
@@ -213,18 +229,18 @@ void GrpcExecutor::Enqueue(grpc_closure* closure, grpc_error* error,
}
ThreadState* orig_ts = ts;
-
bool try_new_thread = false;
+
for (;;) {
#ifndef NDEBUG
EXECUTOR_TRACE(
- "try to schedule %p (%s) (created %s:%d) to thread "
+ "(%s) try to schedule %p (%s) (created %s:%d) to thread "
"%" PRIdPTR,
- closure, is_short ? "short" : "long", closure->file_created,
+ name_, closure, is_short ? "short" : "long", closure->file_created,
closure->line_created, ts->id);
#else
- EXECUTOR_TRACE("try to schedule %p (%s) to thread %" PRIdPTR, closure,
- is_short ? "short" : "long", ts->id);
+ EXECUTOR_TRACE("(%s) try to schedule %p (%s) to thread %" PRIdPTR, name_,
+ closure, is_short ? "short" : "long", ts->id);
#endif
gpr_mu_lock(&ts->mu);
@@ -236,18 +252,22 @@ void GrpcExecutor::Enqueue(grpc_closure* closure, grpc_error* error,
size_t idx = ts->id;
ts = &thd_state_[(idx + 1) % cur_thread_count];
if (ts == orig_ts) {
- // We cycled through all the threads. Retry enqueue again (by creating
- // a new thread)
+ // We cycled through all the threads. Retry enqueue again by creating
+ // a new thread
+ //
+ // TODO (sreek): There is a potential issue here. We are
+ // unconditionally setting try_new_thread to true here. What if the
+ // executor is shutdown OR if cur_thread_count is already equal to
+ // max_threads ?
+ // (Fortunately, this is not an issue yet (as of july 2018) because
+ // there is only one instance of long job in gRPC and hence we will
+ // not hit this code path)
retry_push = true;
- // TODO (sreek): What if the executor is shutdown OR if
- // cur_thread_count is already equal to max_threads ? (currently - as
- // of July 2018, we do not run in to this issue because there is only
- // one instance of long job in gRPC. This has to be fixed soon)
try_new_thread = true;
break;
}
- continue;
+ continue; // Try the next thread-state
}
// == Found the thread state (i.e thread) to enqueue this closure! ==
@@ -277,13 +297,11 @@ void GrpcExecutor::Enqueue(grpc_closure* closure, grpc_error* error,
}
if (try_new_thread && gpr_spinlock_trylock(&adding_thread_lock_)) {
- cur_thread_count =
- static_cast<size_t>(gpr_atm_no_barrier_load(&num_threads_));
+ cur_thread_count = static_cast<size_t>(gpr_atm_acq_load(&num_threads_));
if (cur_thread_count < max_threads_) {
- // Increment num_threads (Safe to do a no_barrier_store instead of a
- // cas because we always increment num_threads under the
- // 'adding_thread_lock')
- gpr_atm_no_barrier_store(&num_threads_, cur_thread_count + 1);
+ // Increment num_threads (safe to do a store instead of a cas because we
+ // always increment num_threads under the 'adding_thread_lock')
+ gpr_atm_rel_store(&num_threads_, cur_thread_count + 1);
thd_state_[cur_thread_count].thd = grpc_core::Thread(
name_, &GrpcExecutor::ThreadMain, &thd_state_[cur_thread_count]);
@@ -298,60 +316,118 @@ void GrpcExecutor::Enqueue(grpc_closure* closure, grpc_error* error,
} while (retry_push);
}
-static GrpcExecutor* global_executor;
+static GrpcExecutor* executors[GRPC_NUM_EXECUTORS];
-void enqueue_long(grpc_closure* closure, grpc_error* error) {
- global_executor->Enqueue(closure, error, false /* is_short */);
+void default_enqueue_short(grpc_closure* closure, grpc_error* error) {
+ executors[GRPC_DEFAULT_EXECUTOR]->Enqueue(closure, error,
+ true /* is_short */);
}
-void enqueue_short(grpc_closure* closure, grpc_error* error) {
- global_executor->Enqueue(closure, error, true /* is_short */);
+void default_enqueue_long(grpc_closure* closure, grpc_error* error) {
+ executors[GRPC_DEFAULT_EXECUTOR]->Enqueue(closure, error,
+ false /* is_short */);
}
-// Short-Job executor scheduler
-static const grpc_closure_scheduler_vtable global_executor_vtable_short = {
- enqueue_short, enqueue_short, "executor-short"};
-static grpc_closure_scheduler global_scheduler_short = {
- &global_executor_vtable_short};
+void resolver_enqueue_short(grpc_closure* closure, grpc_error* error) {
+ executors[GRPC_RESOLVER_EXECUTOR]->Enqueue(closure, error,
+ true /* is_short */);
+}
-// Long-job executor scheduler
-static const grpc_closure_scheduler_vtable global_executor_vtable_long = {
- enqueue_long, enqueue_long, "executor-long"};
-static grpc_closure_scheduler global_scheduler_long = {
- &global_executor_vtable_long};
+void resolver_enqueue_long(grpc_closure* closure, grpc_error* error) {
+ executors[GRPC_RESOLVER_EXECUTOR]->Enqueue(closure, error,
+ false /* is_short */);
+}
+
+static const grpc_closure_scheduler_vtable
+ vtables_[GRPC_NUM_EXECUTORS][GRPC_NUM_EXECUTOR_JOB_TYPES] = {
+ {{&default_enqueue_short, &default_enqueue_short, "def-ex-short"},
+ {&default_enqueue_long, &default_enqueue_long, "def-ex-long"}},
+ {{&resolver_enqueue_short, &resolver_enqueue_short, "res-ex-short"},
+ {&resolver_enqueue_long, &resolver_enqueue_long, "res-ex-long"}}};
+
+static grpc_closure_scheduler
+ schedulers_[GRPC_NUM_EXECUTORS][GRPC_NUM_EXECUTOR_JOB_TYPES] = {
+ {{&vtables_[GRPC_DEFAULT_EXECUTOR][GRPC_EXECUTOR_SHORT]},
+ {&vtables_[GRPC_DEFAULT_EXECUTOR][GRPC_EXECUTOR_LONG]}},
+ {{&vtables_[GRPC_RESOLVER_EXECUTOR][GRPC_EXECUTOR_SHORT]},
+ {&vtables_[GRPC_RESOLVER_EXECUTOR][GRPC_EXECUTOR_LONG]}}};
// grpc_executor_init() and grpc_executor_shutdown() functions are called in the
// the grpc_init() and grpc_shutdown() code paths which are protected by a
// global mutex. So it is okay to assume that these functions are thread-safe
void grpc_executor_init() {
- if (global_executor != nullptr) {
- // grpc_executor_init() already called once (and grpc_executor_shutdown()
- // wasn't called)
+ EXECUTOR_TRACE0("grpc_executor_init() enter");
+
+ // Return if grpc_executor_init() is already called earlier
+ if (executors[GRPC_DEFAULT_EXECUTOR] != nullptr) {
+ GPR_ASSERT(executors[GRPC_RESOLVER_EXECUTOR] != nullptr);
return;
}
- global_executor = grpc_core::New<GrpcExecutor>("global-executor");
- global_executor->Init();
+ executors[GRPC_DEFAULT_EXECUTOR] =
+ grpc_core::New<GrpcExecutor>("default-executor");
+ executors[GRPC_RESOLVER_EXECUTOR] =
+ grpc_core::New<GrpcExecutor>("resolver-executor");
+
+ executors[GRPC_DEFAULT_EXECUTOR]->Init();
+ executors[GRPC_RESOLVER_EXECUTOR]->Init();
+
+ EXECUTOR_TRACE0("grpc_executor_init() done");
+}
+
+grpc_closure_scheduler* grpc_executor_scheduler(GrpcExecutorType executor_type,
+ GrpcExecutorJobType job_type) {
+ return &schedulers_[executor_type][job_type];
+}
+
+grpc_closure_scheduler* grpc_executor_scheduler(GrpcExecutorJobType job_type) {
+ return grpc_executor_scheduler(GRPC_DEFAULT_EXECUTOR, job_type);
}
void grpc_executor_shutdown() {
- // Shutdown already called
- if (global_executor == nullptr) {
+ EXECUTOR_TRACE0("grpc_executor_shutdown() enter");
+
+ // Return if grpc_executor_shutdown() is already called earlier
+ if (executors[GRPC_DEFAULT_EXECUTOR] == nullptr) {
+ GPR_ASSERT(executors[GRPC_RESOLVER_EXECUTOR] == nullptr);
return;
}
- global_executor->Shutdown();
- grpc_core::Delete<GrpcExecutor>(global_executor);
- global_executor = nullptr;
+ executors[GRPC_DEFAULT_EXECUTOR]->Shutdown();
+ executors[GRPC_RESOLVER_EXECUTOR]->Shutdown();
+
+ // Delete the executor objects.
+ //
+ // NOTE: It is important to call Shutdown() on all executors first before
+ // calling Delete() because it is possible for one executor (that is not
+ // shutdown yet) to call Enqueue() on a different executor which is already
+ // shutdown. This is legal and in such cases, the Enqueue() operation
+ // effectively "fails" and enqueues that closure on the calling thread's
+ // exec_ctx.
+ //
+ // By ensuring that all executors are shutdown first, we are also ensuring
+ // that no thread is active across all executors.
+
+ grpc_core::Delete<GrpcExecutor>(executors[GRPC_DEFAULT_EXECUTOR]);
+ grpc_core::Delete<GrpcExecutor>(executors[GRPC_RESOLVER_EXECUTOR]);
+ executors[GRPC_DEFAULT_EXECUTOR] = nullptr;
+ executors[GRPC_RESOLVER_EXECUTOR] = nullptr;
+
+ EXECUTOR_TRACE0("grpc_executor_shutdown() done");
}
-bool grpc_executor_is_threaded() { return global_executor->IsThreaded(); }
+bool grpc_executor_is_threaded(GrpcExecutorType executor_type) {
+ GPR_ASSERT(executor_type < GRPC_NUM_EXECUTORS);
+ return executors[executor_type]->IsThreaded();
+}
-void grpc_executor_set_threading(bool enable) {
- global_executor->SetThreading(enable);
+bool grpc_executor_is_threaded() {
+ return grpc_executor_is_threaded(GRPC_DEFAULT_EXECUTOR);
}
-grpc_closure_scheduler* grpc_executor_scheduler(GrpcExecutorJobType job_type) {
- return job_type == GRPC_EXECUTOR_SHORT ? &global_scheduler_short
- : &global_scheduler_long;
+void grpc_executor_set_threading(bool enable) {
+ EXECUTOR_TRACE("grpc_executor_set_threading(%d) called", enable);
+ for (int i = 0; i < GRPC_NUM_EXECUTORS; i++) {
+ executors[i]->SetThreading(enable);
+ }
}
diff --git a/src/core/lib/iomgr/executor.h b/src/core/lib/iomgr/executor.h
index 395fc52863..8829138c5f 100644
--- a/src/core/lib/iomgr/executor.h
+++ b/src/core/lib/iomgr/executor.h
@@ -27,7 +27,8 @@
typedef struct {
gpr_mu mu;
- size_t id; // For debugging purposes
+ size_t id; // For debugging purposes
+ const char* name; // Thread state name
gpr_cv cv;
grpc_closure_list elems;
size_t depth; // Number of closures in the closure list
@@ -36,7 +37,11 @@ typedef struct {
grpc_core::Thread thd;
} ThreadState;
-typedef enum { GRPC_EXECUTOR_SHORT, GRPC_EXECUTOR_LONG } GrpcExecutorJobType;
+typedef enum {
+ GRPC_EXECUTOR_SHORT = 0,
+ GRPC_EXECUTOR_LONG,
+ GRPC_NUM_EXECUTOR_JOB_TYPES // Add new values above this
+} GrpcExecutorJobType;
class GrpcExecutor {
public:
@@ -58,7 +63,7 @@ class GrpcExecutor {
void Enqueue(grpc_closure* closure, grpc_error* error, bool is_short);
private:
- static size_t RunClosures(grpc_closure_list list);
+ static size_t RunClosures(const char* executor_name, grpc_closure_list list);
static void ThreadMain(void* arg);
const char* name_;
@@ -70,14 +75,42 @@ class GrpcExecutor {
// == Global executor functions ==
+typedef enum {
+ GRPC_DEFAULT_EXECUTOR = 0,
+ GRPC_RESOLVER_EXECUTOR,
+
+ GRPC_NUM_EXECUTORS // Add new values above this
+} GrpcExecutorType;
+
+// TODO(sreek): Currently we have two executors (available globally): The
+// default executor and the resolver executor.
+//
+// Some of the functions below operate on the DEFAULT executor only while some
+// operate of ALL the executors. This is a bit confusing and should be cleaned
+// up in future (where we make all the following functions take executor_type
+// and/or job_type)
+
+// Initialize ALL the executors
void grpc_executor_init();
+// Shutdown ALL the executors
+void grpc_executor_shutdown();
+
+// Set the threading mode for ALL the executors
+void grpc_executor_set_threading(bool enable);
+
+// Get the DEFAULT executor scheduler for the given job_type
grpc_closure_scheduler* grpc_executor_scheduler(GrpcExecutorJobType job_type);
-void grpc_executor_shutdown();
+// Get the executor scheduler for a given executor_type and a job_type
+grpc_closure_scheduler* grpc_executor_scheduler(GrpcExecutorType executor_type,
+ GrpcExecutorJobType job_type);
-bool grpc_executor_is_threaded();
+// Return if a given executor is running in threaded mode (i.e if
+// grpc_executor_set_threading(true) was called previously on that executor)
+bool grpc_executor_is_threaded(GrpcExecutorType executor_type);
-void grpc_executor_set_threading(bool enable);
+// Return if the DEFAULT executor is threaded
+bool grpc_executor_is_threaded();
#endif /* GRPC_CORE_LIB_IOMGR_EXECUTOR_H */
diff --git a/src/core/lib/iomgr/lockfree_event.cc b/src/core/lib/iomgr/lockfree_event.cc
index 5b6b79fa91..085fea40a4 100644
--- a/src/core/lib/iomgr/lockfree_event.cc
+++ b/src/core/lib/iomgr/lockfree_event.cc
@@ -89,7 +89,11 @@ void LockfreeEvent::DestroyEvent() {
void LockfreeEvent::NotifyOn(grpc_closure* closure) {
while (true) {
- gpr_atm curr = gpr_atm_no_barrier_load(&state_);
+ /* This load needs to be an acquire load because this can be a shutdown
+ * error that we might need to reference. Adding acquire semantics makes
+ * sure that the shutdown error has been initialized properly before us
+ * referencing it. */
+ gpr_atm curr = gpr_atm_acq_load(&state_);
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_ERROR, "LockfreeEvent::NotifyOn: %p curr=%p closure=%p", this,
(void*)curr, closure);
diff --git a/src/core/lib/iomgr/resolve_address_posix.cc b/src/core/lib/iomgr/resolve_address_posix.cc
index 7a825643e1..c285d7eca6 100644
--- a/src/core/lib/iomgr/resolve_address_posix.cc
+++ b/src/core/lib/iomgr/resolve_address_posix.cc
@@ -166,8 +166,9 @@ static void posix_resolve_address(const char* name, const char* default_port,
grpc_closure* on_done,
grpc_resolved_addresses** addrs) {
request* r = static_cast<request*>(gpr_malloc(sizeof(request)));
- GRPC_CLOSURE_INIT(&r->request_closure, do_request_thread, r,
- grpc_executor_scheduler(GRPC_EXECUTOR_SHORT));
+ GRPC_CLOSURE_INIT(
+ &r->request_closure, do_request_thread, r,
+ grpc_executor_scheduler(GRPC_RESOLVER_EXECUTOR, GRPC_EXECUTOR_SHORT));
r->name = gpr_strdup(name);
r->default_port = gpr_strdup(default_port);
r->on_done = on_done;
diff --git a/src/core/lib/iomgr/resolve_address_windows.cc b/src/core/lib/iomgr/resolve_address_windows.cc
index 71c92615ad..3e977dca2d 100644
--- a/src/core/lib/iomgr/resolve_address_windows.cc
+++ b/src/core/lib/iomgr/resolve_address_windows.cc
@@ -151,8 +151,9 @@ static void windows_resolve_address(const char* name, const char* default_port,
grpc_closure* on_done,
grpc_resolved_addresses** addresses) {
request* r = (request*)gpr_malloc(sizeof(request));
- GRPC_CLOSURE_INIT(&r->request_closure, do_request_thread, r,
- grpc_executor_scheduler(GRPC_EXECUTOR_SHORT));
+ GRPC_CLOSURE_INIT(
+ &r->request_closure, do_request_thread, r,
+ grpc_executor_scheduler(GRPC_RESOLVER_EXECUTOR, GRPC_EXECUTOR_SHORT));
r->name = gpr_strdup(name);
r->default_port = gpr_strdup(default_port);
r->on_done = on_done;
diff --git a/src/core/lib/security/credentials/alts/check_gcp_environment_linux.cc b/src/core/lib/security/credentials/alts/check_gcp_environment_linux.cc
index 7c4d7a71cd..8454fd7558 100644
--- a/src/core/lib/security/credentials/alts/check_gcp_environment_linux.cc
+++ b/src/core/lib/security/credentials/alts/check_gcp_environment_linux.cc
@@ -41,8 +41,9 @@ namespace internal {
bool check_bios_data(const char* bios_data_file) {
char* bios_data = read_bios_file(bios_data_file);
- bool result = (!strcmp(bios_data, GRPC_ALTS_EXPECT_NAME_GOOGLE)) ||
- (!strcmp(bios_data, GRPC_ALTS_EXPECT_NAME_GCE));
+ bool result =
+ bios_data && ((!strcmp(bios_data, GRPC_ALTS_EXPECT_NAME_GOOGLE)) ||
+ (!strcmp(bios_data, GRPC_ALTS_EXPECT_NAME_GCE)));
gpr_free(bios_data);
return result;
}
diff --git a/src/core/lib/security/security_connector/security_connector.cc b/src/core/lib/security/security_connector/security_connector.cc
index cc72bb6164..59cf3a0af1 100644
--- a/src/core/lib/security/security_connector/security_connector.cc
+++ b/src/core/lib/security/security_connector/security_connector.cc
@@ -57,6 +57,10 @@ static const char* installed_roots_path =
INSTALL_PREFIX "/share/grpc/roots.pem";
#endif
+#ifndef TSI_OPENSSL_ALPN_SUPPORT
+#define TSI_OPENSSL_ALPN_SUPPORT 1
+#endif
+
/* -- Overridden default roots. -- */
static grpc_ssl_roots_override_callback ssl_roots_override_cb = nullptr;
@@ -850,7 +854,8 @@ grpc_auth_context* grpc_ssl_peer_to_auth_context(const tsi_peer* peer) {
static grpc_error* ssl_check_peer(grpc_security_connector* sc,
const char* peer_name, const tsi_peer* peer,
grpc_auth_context** auth_context) {
- /* Check the ALPN. */
+#if TSI_OPENSSL_ALPN_SUPPORT
+ /* Check the ALPN if ALPN is supported. */
const tsi_peer_property* p =
tsi_peer_get_property_by_name(peer, TSI_SSL_ALPN_SELECTED_PROTOCOL);
if (p == nullptr) {
@@ -861,7 +866,7 @@ static grpc_error* ssl_check_peer(grpc_security_connector* sc,
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Cannot check peer: invalid ALPN value.");
}
-
+#endif /* TSI_OPENSSL_ALPN_SUPPORT */
/* Check the peer name if specified. */
if (peer_name != nullptr && !grpc_ssl_host_matches_name(peer, peer_name)) {
char* msg;
diff --git a/src/cpp/server/channelz/channelz_service.cc b/src/cpp/server/channelz/channelz_service.cc
new file mode 100644
index 0000000000..77c175e5b8
--- /dev/null
+++ b/src/cpp/server/channelz/channelz_service.cc
@@ -0,0 +1,57 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include "src/cpp/server/channelz/channelz_service.h"
+
+#include <google/protobuf/text_format.h>
+#include <google/protobuf/util/json_util.h>
+
+#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
+
+namespace grpc {
+
+Status ChannelzService::GetTopChannels(
+ ServerContext* unused, const channelz::v1::GetTopChannelsRequest* request,
+ channelz::v1::GetTopChannelsResponse* response) {
+ char* json_str = grpc_channelz_get_top_channels(request->start_channel_id());
+ google::protobuf::util::Status s =
+ google::protobuf::util::JsonStringToMessage(json_str, response);
+ gpr_free(json_str);
+ if (s != google::protobuf::util::Status::OK) {
+ return Status(INTERNAL, s.ToString());
+ }
+ return Status::OK;
+}
+
+Status ChannelzService::GetChannel(
+ ServerContext* unused, const channelz::v1::GetChannelRequest* request,
+ channelz::v1::GetChannelResponse* response) {
+ char* json_str = grpc_channelz_get_channel(request->channel_id());
+ google::protobuf::util::Status s =
+ google::protobuf::util::JsonStringToMessage(json_str, response);
+ gpr_free(json_str);
+ if (s != google::protobuf::util::Status::OK) {
+ return Status(INTERNAL, s.ToString());
+ }
+ return Status::OK;
+}
+
+} // namespace grpc
diff --git a/src/cpp/server/channelz/channelz_service.h b/src/cpp/server/channelz/channelz_service.h
new file mode 100644
index 0000000000..f619ea49e0
--- /dev/null
+++ b/src/cpp/server/channelz/channelz_service.h
@@ -0,0 +1,43 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_INTERNAL_CPP_SERVER_CHANNELZ_SERVICE_H
+#define GRPC_INTERNAL_CPP_SERVER_CHANNELZ_SERVICE_H
+
+#include <grpc/support/port_platform.h>
+
+#include <grpcpp/grpcpp.h>
+#include "src/proto/grpc/channelz/channelz.grpc.pb.h"
+
+namespace grpc {
+
+class ChannelzService final : public channelz::v1::Channelz::Service {
+ private:
+ // implementation of GetTopChannels rpc
+ Status GetTopChannels(
+ ServerContext* unused, const channelz::v1::GetTopChannelsRequest* request,
+ channelz::v1::GetTopChannelsResponse* response) override;
+ // implementation of GetChannel rpc
+ Status GetChannel(ServerContext* unused,
+ const channelz::v1::GetChannelRequest* request,
+ channelz::v1::GetChannelResponse* response) override;
+};
+
+} // namespace grpc
+
+#endif // GRPC_INTERNAL_CPP_SERVER_CHANNELZ_SERVICE_H
diff --git a/src/cpp/server/channelz/channelz_service_plugin.cc b/src/cpp/server/channelz/channelz_service_plugin.cc
new file mode 100644
index 0000000000..b93e5b551e
--- /dev/null
+++ b/src/cpp/server/channelz/channelz_service_plugin.cc
@@ -0,0 +1,79 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include <grpcpp/ext/channelz_service_plugin.h>
+#include <grpcpp/impl/server_builder_plugin.h>
+#include <grpcpp/impl/server_initializer.h>
+#include <grpcpp/server.h>
+
+#include "src/cpp/server/channelz/channelz_service.h"
+
+namespace grpc {
+namespace channelz {
+namespace experimental {
+
+class ChannelzServicePlugin : public ::grpc::ServerBuilderPlugin {
+ public:
+ ChannelzServicePlugin() : channelz_service_(new grpc::ChannelzService()) {}
+
+ grpc::string name() override { return "channelz_service"; }
+
+ void InitServer(grpc::ServerInitializer* si) override {
+ si->RegisterService(channelz_service_);
+ }
+
+ void Finish(grpc::ServerInitializer* si) override {}
+
+ void ChangeArguments(const grpc::string& name, void* value) override {}
+
+ bool has_sync_methods() const override {
+ if (channelz_service_) {
+ return channelz_service_->has_synchronous_methods();
+ }
+ return false;
+ }
+
+ bool has_async_methods() const override {
+ if (channelz_service_) {
+ return channelz_service_->has_async_methods();
+ }
+ return false;
+ }
+
+ private:
+ std::shared_ptr<grpc::ChannelzService> channelz_service_;
+};
+
+static std::unique_ptr< ::grpc::ServerBuilderPlugin>
+CreateChannelzServicePlugin() {
+ return std::unique_ptr< ::grpc::ServerBuilderPlugin>(
+ new ChannelzServicePlugin());
+}
+
+void InitChannelzService() {
+ static bool already_here = false;
+ if (already_here) return;
+ already_here = true;
+ ::grpc::ServerBuilder::InternalAddPluginFactory(&CreateChannelzServicePlugin);
+}
+
+} // namespace experimental
+} // namespace channelz
+} // namespace grpc
diff --git a/src/csharp/Grpc.Core/Grpc.Core.csproj b/src/csharp/Grpc.Core/Grpc.Core.csproj
index 6d44be7ddd..0da95d203c 100755
--- a/src/csharp/Grpc.Core/Grpc.Core.csproj
+++ b/src/csharp/Grpc.Core/Grpc.Core.csproj
@@ -46,10 +46,22 @@
<PackagePath>runtimes/win/native/grpc_csharp_ext.x86.dll</PackagePath>
<Pack>true</Pack>
</Content>
- <Content Include="Grpc.Core.targets">
+ <Content Include="..\nativelibs\csharp_ext_linux_android_armeabi-v7a\libgrpc_csharp_ext.so">
+ <PackagePath>runtimes/monoandroid/armeabi-v7a/libgrpc_csharp_ext.so</PackagePath>
+ <Pack>true</Pack>
+ </Content>
+ <Content Include="..\nativelibs\csharp_ext_linux_android_arm64-v8a\libgrpc_csharp_ext.so">
+ <PackagePath>runtimes/monoandroid/arm64-v8a/libgrpc_csharp_ext.so</PackagePath>
+ <Pack>true</Pack>
+ </Content>
+ <Content Include="build\net45\Grpc.Core.targets">
<PackagePath>build/net45/</PackagePath>
<Pack>true</Pack>
</Content>
+ <Content Include="build\MonoAndroid\Grpc.Core.targets">
+ <PackagePath>build/MonoAndroid/</PackagePath>
+ <Pack>true</Pack>
+ </Content>
</ItemGroup>
<ItemGroup>
diff --git a/src/csharp/Grpc.Core/Internal/NativeExtension.cs b/src/csharp/Grpc.Core/Internal/NativeExtension.cs
index d5ec998bbd..f526b913af 100644
--- a/src/csharp/Grpc.Core/Internal/NativeExtension.cs
+++ b/src/csharp/Grpc.Core/Internal/NativeExtension.cs
@@ -106,7 +106,15 @@ namespace Grpc.Core.Internal
/// </summary>
private static NativeMethods LoadNativeMethods()
{
- return PlatformApis.IsUnity ? LoadNativeMethodsUnity() : new NativeMethods(LoadUnmanagedLibrary());
+ if (PlatformApis.IsUnity)
+ {
+ return LoadNativeMethodsUnity();
+ }
+ if (PlatformApis.IsXamarin)
+ {
+ return LoadNativeMethodsXamarin();
+ }
+ return new NativeMethods(LoadUnmanagedLibrary());
}
/// <summary>
@@ -128,6 +136,20 @@ namespace Grpc.Core.Internal
}
}
+ /// <summary>
+ /// Return native method delegates when running on the Xamarin platform.
+ /// WARNING: Xamarin support is experimental and work-in-progress. Don't expect it to work.
+ /// </summary>
+ private static NativeMethods LoadNativeMethodsXamarin()
+ {
+ if (PlatformApis.IsXamarinAndroid)
+ {
+ return new NativeMethods(new NativeMethods.DllImportsFromSharedLib());
+ }
+ // not tested yet
+ return new NativeMethods(new NativeMethods.DllImportsFromStaticLib());
+ }
+
private static string GetAssemblyPath()
{
var assembly = typeof(NativeExtension).GetTypeInfo().Assembly;
diff --git a/src/csharp/Grpc.Core/Internal/NativeLogRedirector.cs b/src/csharp/Grpc.Core/Internal/NativeLogRedirector.cs
index bf6440123a..30264acb10 100644
--- a/src/csharp/Grpc.Core/Internal/NativeLogRedirector.cs
+++ b/src/csharp/Grpc.Core/Internal/NativeLogRedirector.cs
@@ -51,6 +51,7 @@ namespace Grpc.Core.Internal
}
}
+ [MonoPInvokeCallback(typeof(GprLogDelegate))]
private static void HandleWrite(IntPtr fileStringPtr, int line, ulong threadId, IntPtr severityStringPtr, IntPtr msgPtr)
{
try
@@ -86,4 +87,22 @@ namespace Grpc.Core.Internal
}
}
}
+
+ /// <summary>
+ /// Use this attribute to mark methods that will be called back from P/Invoke calls.
+ /// iOS (and probably other AOT platforms) needs to have delegates registered.
+ /// Instead of depending on Xamarin.iOS for this, we can just create our own,
+ /// the iOS runtime just checks for the type name.
+ /// See: https://docs.microsoft.com/en-gb/xamarin/ios/internals/limitations#reverse-callbacks
+ /// </summary>
+ [AttributeUsage(AttributeTargets.Method)]
+ internal sealed class MonoPInvokeCallbackAttribute : Attribute
+ {
+ public MonoPInvokeCallbackAttribute(Type type)
+ {
+ Type = type;
+ }
+
+ public Type Type { get; private set; }
+ }
}
diff --git a/src/csharp/Grpc.Core/Internal/PlatformApis.cs b/src/csharp/Grpc.Core/Internal/PlatformApis.cs
index b90fbccb2b..6c4ee0bdb7 100644
--- a/src/csharp/Grpc.Core/Internal/PlatformApis.cs
+++ b/src/csharp/Grpc.Core/Internal/PlatformApis.cs
@@ -33,12 +33,17 @@ namespace Grpc.Core.Internal
internal static class PlatformApis
{
const string UnityEngineApplicationClassName = "UnityEngine.Application, UnityEngine";
+ const string XamarinAndroidActivityClassName = "Android.App.Activity, Mono.Android";
+ const string XamariniOSEnumClassName = "Mono.CSharp.Enum, Mono.CSharp";
static readonly bool isLinux;
static readonly bool isMacOSX;
static readonly bool isWindows;
static readonly bool isMono;
static readonly bool isNetCore;
static readonly bool isUnity;
+ static readonly bool isXamarin;
+ static readonly bool isXamariniOS;
+ static readonly bool isXamarinAndroid;
static PlatformApis()
{
@@ -58,6 +63,9 @@ namespace Grpc.Core.Internal
#endif
isMono = Type.GetType("Mono.Runtime") != null;
isUnity = Type.GetType(UnityEngineApplicationClassName) != null;
+ isXamariniOS = Type.GetType(XamariniOSEnumClassName) != null;
+ isXamarinAndroid = Type.GetType(XamarinAndroidActivityClassName) != null;
+ isXamarin = isXamariniOS || isXamarinAndroid;
}
public static bool IsLinux
@@ -89,6 +97,31 @@ namespace Grpc.Core.Internal
}
/// <summary>
+ /// true if running on a Xamarin platform (either Xamarin.Android or Xamarin.iOS),
+ /// false otherwise.
+ /// </summary>
+ public static bool IsXamarin
+ {
+ get { return isXamarin; }
+ }
+
+ /// <summary>
+ /// true if running on Xamarin.iOS, false otherwise.
+ /// </summary>
+ public static bool IsXamariniOS
+ {
+ get { return isXamariniOS; }
+ }
+
+ /// <summary>
+ /// true if running on Xamarin.Android, false otherwise.
+ /// </summary>
+ public static bool IsXamarinAndroid
+ {
+ get { return isXamarinAndroid; }
+ }
+
+ /// <summary>
/// true if running on .NET Core (CoreCLR), false otherwise.
/// </summary>
public static bool IsNetCore
diff --git a/src/csharp/Grpc.Core/build/MonoAndroid/Grpc.Core.targets b/src/csharp/Grpc.Core/build/MonoAndroid/Grpc.Core.targets
new file mode 100644
index 0000000000..f764f4cae1
--- /dev/null
+++ b/src/csharp/Grpc.Core/build/MonoAndroid/Grpc.Core.targets
@@ -0,0 +1,21 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <PropertyGroup>
+ <_GrpcCoreNugetNativePath Condition="'$(_GrpcCoreNugetNativePath)' == ''">$(MSBuildThisFileDirectory)..\..\</_GrpcCoreNugetNativePath>
+ </PropertyGroup>
+
+ <ItemGroup Condition="'$(TargetFrameworkIdentifier)' == 'MonoAndroid'">
+ <AndroidNativeLibrary Include="$(_GrpcCoreNugetNativePath)runtimes\monoandroid\arm64-v8a\libgrpc_csharp_ext.so">
+ <CopyToOutputDirectory>Always</CopyToOutputDirectory>
+ <Abi>arm64-v8a</Abi>
+ </AndroidNativeLibrary>
+ </ItemGroup>
+
+ <ItemGroup Condition="'$(TargetFrameworkIdentifier)' == 'MonoAndroid'">
+ <AndroidNativeLibrary Include="$(_GrpcCoreNugetNativePath)runtimes\monoandroid\armeabi-v7a\libgrpc_csharp_ext.so">
+ <CopyToOutputDirectory>Always</CopyToOutputDirectory>
+ <Abi>armeabi-v7a</Abi>
+ </AndroidNativeLibrary>
+ </ItemGroup>
+
+</Project>
diff --git a/src/csharp/Grpc.Core/Grpc.Core.targets b/src/csharp/Grpc.Core/build/net45/Grpc.Core.targets
index cce53db82b..cce53db82b 100644
--- a/src/csharp/Grpc.Core/Grpc.Core.targets
+++ b/src/csharp/Grpc.Core/build/net45/Grpc.Core.targets
diff --git a/src/csharp/doc/docfx.json b/src/csharp/doc/docfx.json
index 7219d0e7a6..0ce5f7262a 100644
--- a/src/csharp/doc/docfx.json
+++ b/src/csharp/doc/docfx.json
@@ -24,7 +24,7 @@
"dest": "api"
},
{
- "files": [ "toc.yml"],
+ "files": [ "toc.yml"]
}
],
"globalMetadata": {
diff --git a/src/csharp/experimental/build_native_ext_for_android.sh b/src/csharp/experimental/build_native_ext_for_android.sh
index 8197df7c53..5687a43a4e 100755
--- a/src/csharp/experimental/build_native_ext_for_android.sh
+++ b/src/csharp/experimental/build_native_ext_for_android.sh
@@ -23,17 +23,29 @@ mkdir -p build
cd build
# set to the location where Android SDK is installed
-# e.g. ANDROID_NDK_PATH="$HOME/android-ndk-r16b"
+# e.g. ANDROID_SDK_PATH="$HOME/Android/Sdk"
-cmake ../.. \
- -DCMAKE_SYSTEM_NAME=Android \
- -DCMAKE_SYSTEM_VERSION=15 \
- -DCMAKE_ANDROID_ARCH_ABI=armeabi-v7a \
+# set to location where Android NDK is installed, usually a subfolder of Android SDK
+# to install the Android NKD, use the "sdkmanager" tool
+# e.g. ANDROID_NDK_PATH=${ANDROID_SDK_PATH}/ndk-bundle
+
+# set to location of the cmake executable from the Android SDK
+# to install cmake, use the "sdkmanager" tool
+# e.g. ANDROID_SDK_CMAKE=${ANDROID_SDK_PATH}/cmake/3.6.4111459/bin/cmake
+
+# ANDROID_ABI in ('arm64-v8a', 'armeabi-v7a')
+# e.g. ANDROID_ABI=armeabi-v7a
+
+${ANDROID_SDK_CMAKE} ../.. \
+ -DCMAKE_TOOLCHAIN_FILE="${ANDROID_NDK_PATH}/build/cmake/android.toolchain.cmake" \
-DCMAKE_ANDROID_NDK="${ANDROID_NDK_PATH}" \
-DCMAKE_ANDROID_STL_TYPE=c++_static \
-DRUN_HAVE_POSIX_REGEX=0 \
-DRUN_HAVE_STD_REGEX=0 \
-DRUN_HAVE_STEADY_CLOCK=0 \
- -DCMAKE_BUILD_TYPE=Release
+ -DCMAKE_BUILD_TYPE=Release \
+ -DANDROID_PLATFORM=android-28 \
+ -DANDROID_ABI="${ANDROID_ABI}" \
+ -DANDROID_NDK="${ANDROID_NDK_PATH}"
make -j4 grpc_csharp_ext
diff --git a/src/proto/grpc/testing/echo_messages.proto b/src/proto/grpc/testing/echo_messages.proto
index 5396a2fd39..2f935304ab 100644
--- a/src/proto/grpc/testing/echo_messages.proto
+++ b/src/proto/grpc/testing/echo_messages.proto
@@ -46,6 +46,7 @@ message RequestParams {
string binary_error_details = 13;
ErrorStatus expected_error = 14;
int32 server_sleep_us = 15; // Amount to sleep when invoking server
+ int32 backend_channel_idx = 16; // which backend to send request to
}
message EchoRequest {
diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.c b/src/ruby/ext/grpc/rb_grpc_imports.generated.c
index 4e235121e2..2443532bb8 100644
--- a/src/ruby/ext/grpc/rb_grpc_imports.generated.c
+++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.c
@@ -92,6 +92,8 @@ grpc_resource_quota_ref_type grpc_resource_quota_ref_import;
grpc_resource_quota_unref_type grpc_resource_quota_unref_import;
grpc_resource_quota_resize_type grpc_resource_quota_resize_import;
grpc_resource_quota_arg_vtable_type grpc_resource_quota_arg_vtable_import;
+grpc_channelz_get_top_channels_type grpc_channelz_get_top_channels_import;
+grpc_channelz_get_channel_type grpc_channelz_get_channel_import;
grpc_insecure_channel_create_from_fd_type grpc_insecure_channel_create_from_fd_import;
grpc_server_add_insecure_channel_from_fd_type grpc_server_add_insecure_channel_from_fd_import;
grpc_use_signal_type grpc_use_signal_import;
@@ -340,6 +342,8 @@ void grpc_rb_load_imports(HMODULE library) {
grpc_resource_quota_unref_import = (grpc_resource_quota_unref_type) GetProcAddress(library, "grpc_resource_quota_unref");
grpc_resource_quota_resize_import = (grpc_resource_quota_resize_type) GetProcAddress(library, "grpc_resource_quota_resize");
grpc_resource_quota_arg_vtable_import = (grpc_resource_quota_arg_vtable_type) GetProcAddress(library, "grpc_resource_quota_arg_vtable");
+ grpc_channelz_get_top_channels_import = (grpc_channelz_get_top_channels_type) GetProcAddress(library, "grpc_channelz_get_top_channels");
+ grpc_channelz_get_channel_import = (grpc_channelz_get_channel_type) GetProcAddress(library, "grpc_channelz_get_channel");
grpc_insecure_channel_create_from_fd_import = (grpc_insecure_channel_create_from_fd_type) GetProcAddress(library, "grpc_insecure_channel_create_from_fd");
grpc_server_add_insecure_channel_from_fd_import = (grpc_server_add_insecure_channel_from_fd_type) GetProcAddress(library, "grpc_server_add_insecure_channel_from_fd");
grpc_use_signal_import = (grpc_use_signal_type) GetProcAddress(library, "grpc_use_signal");
diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.h b/src/ruby/ext/grpc/rb_grpc_imports.generated.h
index f01c9c8248..b08a1f94f7 100644
--- a/src/ruby/ext/grpc/rb_grpc_imports.generated.h
+++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.h
@@ -251,6 +251,12 @@ extern grpc_resource_quota_resize_type grpc_resource_quota_resize_import;
typedef const grpc_arg_pointer_vtable*(*grpc_resource_quota_arg_vtable_type)(void);
extern grpc_resource_quota_arg_vtable_type grpc_resource_quota_arg_vtable_import;
#define grpc_resource_quota_arg_vtable grpc_resource_quota_arg_vtable_import
+typedef char*(*grpc_channelz_get_top_channels_type)(intptr_t start_channel_id);
+extern grpc_channelz_get_top_channels_type grpc_channelz_get_top_channels_import;
+#define grpc_channelz_get_top_channels grpc_channelz_get_top_channels_import
+typedef char*(*grpc_channelz_get_channel_type)(intptr_t channel_id);
+extern grpc_channelz_get_channel_type grpc_channelz_get_channel_import;
+#define grpc_channelz_get_channel grpc_channelz_get_channel_import
typedef grpc_channel*(*grpc_insecure_channel_create_from_fd_type)(const char* target, int fd, const grpc_channel_args* args);
extern grpc_insecure_channel_create_from_fd_type grpc_insecure_channel_create_from_fd_import;
#define grpc_insecure_channel_create_from_fd grpc_insecure_channel_create_from_fd_import