aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--src/core/ext/filters/client_channel/client_channel_channelz.h3
-rw-r--r--src/core/ext/filters/client_channel/lb_policy.h1
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc1
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc8
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc1
-rw-r--r--src/core/ext/filters/client_channel/subchannel.cc5
-rw-r--r--src/core/ext/filters/client_channel/subchannel.h5
-rw-r--r--src/core/lib/channel/channelz.cc2
-rw-r--r--src/core/lib/channel/channelz.h4
-rw-r--r--src/core/lib/gprpp/inlined_vector.h88
-rw-r--r--test/core/gprpp/inlined_vector_test.cc236
11 files changed, 232 insertions, 122 deletions
diff --git a/src/core/ext/filters/client_channel/client_channel_channelz.h b/src/core/ext/filters/client_channel/client_channel_channelz.h
index e5be52e778..0547109d36 100644
--- a/src/core/ext/filters/client_channel/client_channel_channelz.h
+++ b/src/core/ext/filters/client_channel/client_channel_channelz.h
@@ -28,6 +28,9 @@
namespace grpc_core {
+// TODO(ncteisen), this only contains the uuids of the children for now,
+// since that is all that is strictly needed. In a future enhancement we will
+// add human readable names as in the channelz.proto
typedef InlinedVector<intptr_t, 10> ChildRefsList;
namespace channelz {
diff --git a/src/core/ext/filters/client_channel/lb_policy.h b/src/core/ext/filters/client_channel/lb_policy.h
index 7bc224ad81..3150df8847 100644
--- a/src/core/ext/filters/client_channel/lb_policy.h
+++ b/src/core/ext/filters/client_channel/lb_policy.h
@@ -208,6 +208,7 @@ class LoadBalancingPolicy
// Dummy classes needed for alignment issues.
// See https://github.com/grpc/grpc/issues/16032 for context.
+ // TODO(ncteisen): remove this as soon as the issue is resolved.
ChildRefsList dummy_list_foo;
ChildRefsList dummy_list_bar;
};
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
index 622c03a8d1..f757d6057c 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
@@ -135,6 +135,7 @@ class GrpcLb : public LoadBalancingPolicy {
void HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) override;
void PingOneLocked(grpc_closure* on_initiate, grpc_closure* on_ack) override;
void ExitIdleLocked() override;
+ // TODO(ncteisen): implement this in a follow up PR
void FillChildRefsForChannelz(ChildRefsList* child_subchannels,
ChildRefsList* child_channels) override {}
diff --git a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
index 390b4ee1bb..c50deb9679 100644
--- a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
@@ -311,13 +311,14 @@ void PickFirst::FillChildRefsForChannelz(
// TODO(ncteisen): implement a de dup loop that is not O(n^2). Might
// have to implement lightweight set. For now, we don't care about
// performance when channelz requests are made.
- bool add_elt = true;
+ bool found = false;
for (size_t j = 0; j < child_subchannels_to_fill->size(); ++j) {
if ((*child_subchannels_to_fill)[j] == child_subchannels_[i]) {
- add_elt = false;
+ found = true;
+ break;
}
}
- if (add_elt) {
+ if (!found) {
child_subchannels_to_fill->push_back(child_subchannels_[i]);
}
}
@@ -351,7 +352,6 @@ void PickFirst::UpdateChildRefsLocked() {
}
}
}
-
// atomically update the data that channelz will actually be looking at.
mu_guard guard(&child_refs_mu_);
child_subchannels_ = std::move(cs);
diff --git a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
index e6bc94a008..4c42b2063f 100644
--- a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
@@ -69,6 +69,7 @@ class RoundRobin : public LoadBalancingPolicy {
void HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) override;
void PingOneLocked(grpc_closure* on_initiate, grpc_closure* on_ack) override;
void ExitIdleLocked() override;
+ // TODO(ncteisen): implement this in a follow up PR
void FillChildRefsForChannelz(ChildRefsList* child_subchannels,
ChildRefsList* child_channels) override {}
diff --git a/src/core/ext/filters/client_channel/subchannel.cc b/src/core/ext/filters/client_channel/subchannel.cc
index 86026b70a5..9d608c3c55 100644
--- a/src/core/ext/filters/client_channel/subchannel.cc
+++ b/src/core/ext/filters/client_channel/subchannel.cc
@@ -181,9 +181,7 @@ static void connection_destroy(void* arg, grpc_error* error) {
static void subchannel_destroy(void* arg, grpc_error* error) {
grpc_subchannel* c = static_cast<grpc_subchannel*>(arg);
- if (c->channelz_subchannel != nullptr) {
- c->channelz_subchannel.reset();
- }
+ c->channelz_subchannel.reset();
gpr_free((void*)c->filters);
grpc_channel_args_destroy(c->args);
grpc_connectivity_state_destroy(&c->state_tracker);
@@ -380,7 +378,6 @@ grpc_subchannel* grpc_subchannel_create(grpc_connector* connector,
c->backoff.Init(backoff_options);
gpr_mu_init(&c->mu);
- // This is just a placeholder channelz class for for now.
const grpc_arg* arg =
grpc_channel_args_find(c->args, GRPC_ARG_ENABLE_CHANNELZ);
bool channelz_enabled = grpc_channel_arg_get_bool(arg, false);
diff --git a/src/core/ext/filters/client_channel/subchannel.h b/src/core/ext/filters/client_channel/subchannel.h
index f76be81543..9e53f7d542 100644
--- a/src/core/ext/filters/client_channel/subchannel.h
+++ b/src/core/ext/filters/client_channel/subchannel.h
@@ -21,6 +21,7 @@
#include <grpc/support/port_platform.h>
+#include "src/core/ext/filters/client_channel/client_channel_channelz.h"
#include "src/core/ext/filters/client_channel/connector.h"
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/gpr/arena.h"
@@ -71,10 +72,6 @@ typedef struct grpc_subchannel_key grpc_subchannel_key;
namespace grpc_core {
-namespace channelz {
-class SubchannelNode;
-}
-
class ConnectedSubchannel : public RefCountedWithTracing<ConnectedSubchannel> {
public:
struct CallArgs {
diff --git a/src/core/lib/channel/channelz.cc b/src/core/lib/channel/channelz.cc
index 5a0620c51f..79a9220503 100644
--- a/src/core/lib/channel/channelz.cc
+++ b/src/core/lib/channel/channelz.cc
@@ -131,7 +131,7 @@ RefCountedPtr<ChannelNode> ChannelNode::MakeChannelNode(
channel, channel_tracer_max_nodes);
}
-SubchannelNode::SubchannelNode() : subchannel_uuid_(-1) {
+SubchannelNode::SubchannelNode() {
subchannel_uuid_ = ChannelzRegistry::Register(this);
}
diff --git a/src/core/lib/channel/channelz.h b/src/core/lib/channel/channelz.h
index f2c5ecddbf..7184af93ec 100644
--- a/src/core/lib/channel/channelz.h
+++ b/src/core/lib/channel/channelz.h
@@ -96,8 +96,8 @@ class ChannelNode : public RefCounted<ChannelNode> {
};
// Placeholds channelz class for subchannels. All this can do now is track its
-// uuid (this information is needed by the parent channelz class). In the next
-// PR I will build this out to support the GetSubchannel channelz request.
+// uuid (this information is needed by the parent channelz class).
+// TODO(ncteisen): build this out to support the GetSubchannel channelz request.
class SubchannelNode : public RefCounted<SubchannelNode> {
public:
SubchannelNode();
diff --git a/src/core/lib/gprpp/inlined_vector.h b/src/core/lib/gprpp/inlined_vector.h
index 9c2e131455..508fb2eed1 100644
--- a/src/core/lib/gprpp/inlined_vector.h
+++ b/src/core/lib/gprpp/inlined_vector.h
@@ -51,73 +51,30 @@ class InlinedVector {
InlinedVector() { init_data(); }
~InlinedVector() { destroy_elements(); }
- // copy constructors
+ // copy constructor
InlinedVector(const InlinedVector& v) {
init_data();
- // if v is allocated, then we copy it's buffer
- if (v.dynamic_ != nullptr) {
- reserve(v.capacity_);
- memcpy(dynamic_, v.dynamic_, v.capacity_ * sizeof(T));
- } else {
- memcpy(inline_, v.inline_, v.capacity_ * sizeof(T));
- dynamic_ = nullptr;
- }
- // copy over metadata
- size_ = v.size_;
- capacity_ = v.capacity_;
+ copy_from(v);
}
+
InlinedVector& operator=(const InlinedVector& v) {
if (this != &v) {
clear();
- // if v is allocated, then we copy it's buffer
- if (v.dynamic_ != nullptr) {
- reserve(v.capacity_);
- memcpy(dynamic_, v.dynamic_, v.capacity_ * sizeof(T));
- } else {
- memcpy(inline_, v.inline_, v.capacity_ * sizeof(T));
- dynamic_ = nullptr;
- }
- // copy over metadata
- size_ = v.size_;
- capacity_ = v.capacity_;
+ copy_from(v);
}
return *this;
}
- // move constructors
+ // move constructor
InlinedVector(InlinedVector&& v) {
- // if v is allocated, then we steal it's buffer
- if (v.dynamic_ != nullptr) {
- dynamic_ = v.dynamic_;
- } else {
- memcpy(inline_, v.inline_, v.capacity_ * sizeof(T));
- dynamic_ = nullptr;
- }
- // copy over metadata
- size_ = v.size_;
- capacity_ = v.capacity_;
- // null out the original
- v.dynamic_ = nullptr;
- v.size_ = 0;
- v.capacity_ = 0;
+ init_data();
+ move_from(v);
}
+
InlinedVector& operator=(InlinedVector&& v) {
if (this != &v) {
clear();
- // if v is allocated, then we steal it's buffer
- if (v.dynamic_ != nullptr) {
- dynamic_ = v.dynamic_;
- } else {
- memcpy(inline_, v.inline_, v.capacity_ * sizeof(T));
- dynamic_ = nullptr;
- }
- // copy over metadata
- size_ = v.size_;
- capacity_ = v.capacity_;
- // null out the original
- v.dynamic_ = nullptr;
- v.size_ = 0;
- v.capacity_ = 0;
+ move_from(v);
}
return *this;
}
@@ -166,6 +123,33 @@ class InlinedVector {
void push_back(T&& value) { emplace_back(std::move(value)); }
+ void copy_from(const InlinedVector& v) {
+ // if copy over the buffer from v.
+ if (v.dynamic_ != nullptr) {
+ reserve(v.capacity_);
+ memcpy(dynamic_, v.dynamic_, v.size_ * sizeof(T));
+ } else {
+ memcpy(inline_, v.inline_, v.size_ * sizeof(T));
+ }
+ // copy over metadata
+ size_ = v.size_;
+ capacity_ = v.capacity_;
+ }
+
+ void move_from(InlinedVector& v) {
+ // if v is allocated, then we steal its buffer, else we copy it.
+ if (v.dynamic_ != nullptr) {
+ dynamic_ = v.dynamic_;
+ } else {
+ memcpy(inline_, v.inline_, v.size_ * sizeof(T));
+ }
+ // copy over metadata
+ size_ = v.size_;
+ capacity_ = v.capacity_;
+ // null out the original
+ v.init_data();
+ }
+
size_t size() const { return size_; }
bool empty() const { return size_ == 0; }
diff --git a/test/core/gprpp/inlined_vector_test.cc b/test/core/gprpp/inlined_vector_test.cc
index ba2f4d9e6f..1ef7da465c 100644
--- a/test/core/gprpp/inlined_vector_test.cc
+++ b/test/core/gprpp/inlined_vector_test.cc
@@ -27,9 +27,9 @@ namespace testing {
namespace {
template <typename Vector>
-static void FillVector(Vector* v, int len, int offset = 0) {
+static void FillVector(Vector* v, int len, int start = 0) {
for (int i = 0; i < len; i++) {
- v->push_back(i + offset);
+ v->push_back(i + start);
EXPECT_EQ(i + 1UL, v->size());
}
}
@@ -107,69 +107,195 @@ TEST(InlinedVectorTest, ConstIndexOperator) {
const_func(v);
}
-TEST(InlinedVectorTest, CopyConstructorAndAssignment) {
- typedef InlinedVector<int, 8> IntVec8;
- for (size_t len = 0; len < 20; len++) {
- IntVec8 original;
- FillVector(&original, len);
- EXPECT_EQ(len, original.size());
- EXPECT_LE(len, original.capacity());
+TEST(InlinedVectorTest, CopyConstructerInlined) {
+ const size_t kInlinedLength = 8;
+ const size_t kFillSize = kInlinedLength - 1;
+ typedef InlinedVector<int, kInlinedLength> IntVec8;
+ IntVec8 original;
+ FillVector(&original, kFillSize);
+ EXPECT_EQ(kFillSize, original.size());
+ EXPECT_LE(kFillSize, original.capacity());
+ IntVec8 copy_constructed(original);
+ for (size_t i = 0; i < original.size(); ++i) {
+ EXPECT_EQ(original[i], copy_constructed[i]);
+ }
+}
+
+TEST(InlinedVectorTest, CopyConstructerAllocated) {
+ const size_t kInlinedLength = 8;
+ const size_t kFillSize = kInlinedLength + 1;
+ typedef InlinedVector<int, kInlinedLength> IntVec8;
+ IntVec8 original;
+ FillVector(&original, kFillSize);
+ EXPECT_EQ(kFillSize, original.size());
+ EXPECT_LE(kFillSize, original.capacity());
+ IntVec8 copy_constructed(original);
+ for (size_t i = 0; i < original.size(); ++i) {
+ EXPECT_EQ(original[i], copy_constructed[i]);
+ }
+}
- IntVec8 copy_constructed(original);
+TEST(InlinedVectorTest, CopyAssignementInlined) {
+ const size_t kInlinedLength = 8;
+ const size_t kFillSize = kInlinedLength - 1;
+ typedef InlinedVector<int, kInlinedLength> IntVec8;
+ IntVec8 original;
+ FillVector(&original, kFillSize);
+ EXPECT_EQ(kFillSize, original.size());
+ EXPECT_LE(kFillSize, original.capacity());
+ // copy assigned vector is inlined
+ {
+ IntVec8 copy_assigned;
+ FillVector(&copy_assigned, kInlinedLength - 1, 99);
+ copy_assigned = original;
+ for (size_t i = 0; i < original.size(); ++i) {
+ EXPECT_EQ(original[i], copy_assigned[i]);
+ }
+ }
+ // copy assigned vector is allocated
+ {
+ IntVec8 copy_assigned;
+ FillVector(&copy_assigned, kInlinedLength + 1, 99);
+ copy_assigned = original;
for (size_t i = 0; i < original.size(); ++i) {
- EXPECT_TRUE(original[i] == copy_constructed[i]);
+ EXPECT_EQ(original[i], copy_assigned[i]);
}
+ }
+}
- for (size_t start_len = 0; start_len < 20; start_len++) {
- IntVec8 copy_assigned;
- FillVector(&copy_assigned, start_len, 99); // Add dummy elements
- copy_assigned = original;
- for (size_t i = 0; i < original.size(); ++i) {
- EXPECT_TRUE(original[i] == copy_assigned[i]);
- }
+TEST(InlinedVectorTest, CopyAssignementAllocated) {
+ const size_t kInlinedLength = 8;
+ const size_t kFillSize = kInlinedLength + 1;
+ typedef InlinedVector<int, kInlinedLength> IntVec8;
+ IntVec8 original;
+ FillVector(&original, kFillSize);
+ EXPECT_EQ(kFillSize, original.size());
+ EXPECT_LE(kFillSize, original.capacity());
+ // copy assigned vector is inlined
+ {
+ IntVec8 copy_assigned;
+ FillVector(&copy_assigned, kInlinedLength - 1, 99);
+ copy_assigned = original;
+ for (size_t i = 0; i < original.size(); ++i) {
+ EXPECT_EQ(original[i], copy_assigned[i]);
+ }
+ }
+ // copy assigned vector is allocated
+ {
+ IntVec8 copy_assigned;
+ FillVector(&copy_assigned, kInlinedLength + 1, 99);
+ copy_assigned = original;
+ for (size_t i = 0; i < original.size(); ++i) {
+ EXPECT_EQ(original[i], copy_assigned[i]);
}
}
}
-TEST(InlinedVectorTest, MoveConstructorAndAssignment) {
- typedef InlinedVector<int, 8> IntVec8;
- for (size_t len = 0; len < 20; len++) {
- IntVec8 original;
- const size_t inlined_capacity = original.capacity();
- FillVector(&original, len);
- EXPECT_EQ(len, original.size());
- EXPECT_LE(len, original.capacity());
-
- {
- IntVec8 tmp(original);
- auto* old_data = tmp.data();
- IntVec8 move_constructed(std::move(tmp));
- for (size_t i = 0; i < original.size(); ++i) {
- EXPECT_TRUE(original[i] == move_constructed[i]);
- }
- if (original.size() > inlined_capacity) {
- // Allocation is moved as a whole, data stays in place.
- EXPECT_TRUE(move_constructed.data() == old_data);
- } else {
- EXPECT_FALSE(move_constructed.data() == old_data);
- }
+TEST(InlinedVectorTest, MoveConstructorInlined) {
+ const size_t kInlinedLength = 8;
+ const size_t kFillSize = kInlinedLength - 1;
+ typedef InlinedVector<int, kInlinedLength> IntVec8;
+ IntVec8 original;
+ FillVector(&original, kFillSize);
+ EXPECT_EQ(kFillSize, original.size());
+ EXPECT_LE(kFillSize, original.capacity());
+ IntVec8 tmp(original);
+ auto* old_data = tmp.data();
+ IntVec8 move_constructed(std::move(tmp));
+ for (size_t i = 0; i < original.size(); ++i) {
+ EXPECT_EQ(original[i], move_constructed[i]);
+ }
+ // original data was inlined so it should have been copied, not moved.
+ EXPECT_NE(move_constructed.data(), old_data);
+}
+
+TEST(InlinedVectorTest, MoveConstructorAllocated) {
+ const size_t kInlinedLength = 8;
+ const size_t kFillSize = kInlinedLength + 1;
+ typedef InlinedVector<int, kInlinedLength> IntVec8;
+ IntVec8 original;
+ FillVector(&original, kFillSize);
+ EXPECT_EQ(kFillSize, original.size());
+ EXPECT_LE(kFillSize, original.capacity());
+ IntVec8 tmp(original);
+ auto* old_data = tmp.data();
+ IntVec8 move_constructed(std::move(tmp));
+ for (size_t i = 0; i < original.size(); ++i) {
+ EXPECT_EQ(original[i], move_constructed[i]);
+ }
+ // original data was allocated, so it should been moved, not copied
+ EXPECT_EQ(move_constructed.data(), old_data);
+}
+
+TEST(InlinedVectorTest, MoveAssignmentInlined) {
+ const size_t kInlinedLength = 8;
+ const size_t kFillSize = kInlinedLength - 1;
+ typedef InlinedVector<int, kInlinedLength> IntVec8;
+ IntVec8 original;
+ FillVector(&original, kFillSize);
+ EXPECT_EQ(kFillSize, original.size());
+ EXPECT_LE(kFillSize, original.capacity());
+ // move assigned vector is inlined
+ {
+ IntVec8 move_assigned;
+ FillVector(&move_assigned, kInlinedLength - 1, 99); // Add dummy elements
+ IntVec8 tmp(original);
+ auto* old_data = tmp.data();
+ move_assigned = std::move(tmp);
+ for (size_t i = 0; i < original.size(); ++i) {
+ EXPECT_EQ(original[i], move_assigned[i]);
+ }
+ // original data was inlined so it should have been copied, not moved.
+ EXPECT_NE(move_assigned.data(), old_data);
+ }
+ // move assigned vector is allocated
+ {
+ IntVec8 move_assigned;
+ FillVector(&move_assigned, kInlinedLength + 1, 99); // Add dummy elements
+ IntVec8 tmp(original);
+ auto* old_data = tmp.data();
+ move_assigned = std::move(tmp);
+ for (size_t i = 0; i < original.size(); ++i) {
+ EXPECT_EQ(original[i], move_assigned[i]);
}
- for (size_t start_len = 0; start_len < 20; start_len++) {
- IntVec8 move_assigned;
- FillVector(&move_assigned, start_len, 99); // Add dummy elements
- IntVec8 tmp(original);
- auto* old_data = tmp.data();
- move_assigned = std::move(tmp);
- for (size_t i = 0; i < original.size(); ++i) {
- EXPECT_TRUE(original[i] == move_assigned[i]);
- }
- if (original.size() > inlined_capacity) {
- // Allocation is moved as a whole, data stays in place.
- EXPECT_TRUE(move_assigned.data() == old_data);
- } else {
- EXPECT_FALSE(move_assigned.data() == old_data);
- }
+ // original data was inlined so it should have been copied, not moved.
+ EXPECT_NE(move_assigned.data(), old_data);
+ }
+}
+
+TEST(InlinedVectorTest, MoveAssignmentAllocated) {
+ const size_t kInlinedLength = 8;
+ const size_t kFillSize = kInlinedLength + 1;
+ typedef InlinedVector<int, kInlinedLength> IntVec8;
+ IntVec8 original;
+ FillVector(&original, kFillSize);
+ EXPECT_EQ(kFillSize, original.size());
+ EXPECT_LE(kFillSize, original.capacity());
+ // move assigned vector is inlined
+ {
+ IntVec8 move_assigned;
+ FillVector(&move_assigned, kInlinedLength - 1, 99); // Add dummy elements
+ IntVec8 tmp(original);
+ auto* old_data = tmp.data();
+ move_assigned = std::move(tmp);
+ for (size_t i = 0; i < original.size(); ++i) {
+ EXPECT_EQ(original[i], move_assigned[i]);
+ }
+ // original data was allocated so it should have been moved, not copied.
+ EXPECT_EQ(move_assigned.data(), old_data);
+ }
+ // move assigned vector is allocated
+ {
+ IntVec8 move_assigned;
+ FillVector(&move_assigned, kInlinedLength + 1, 99); // Add dummy elements
+ IntVec8 tmp(original);
+ auto* old_data = tmp.data();
+ move_assigned = std::move(tmp);
+ for (size_t i = 0; i < original.size(); ++i) {
+ EXPECT_EQ(original[i], move_assigned[i]);
}
+ // original data was allocated so it should have been moved, not copied.
+ EXPECT_EQ(move_assigned.data(), old_data);
}
}