summaryrefslogtreecommitdiff
path: root/absl/container/internal
diff options
context:
space:
mode:
authorGravatar Abseil Team <absl-team@google.com>2021-12-03 10:01:02 -0800
committerGravatar Andy Getz <durandal@google.com>2021-12-03 13:15:13 -0500
commit9336be04a242237cd41a525bedfcf3be1bb55377 (patch)
tree781262c8d79eb78d942bdc2c945dda8b225ca51e /absl/container/internal
parente11e039e02ef30a98a7928ce6c59cebe096dd753 (diff)
Export of internal Abseil changes
-- e7f53dfbf809812e84770217777f81b6308a3084 by Abseil Team <absl-team@google.com>: Add a parameter pack to absl profile to allow profiles to separate dynamic data from static data that is available at constructor-time. Background: `inline_element_size` is effectively constant, but there is a data race between its initialization and its access. We had fixed that race by making inline_element_size atomic. This CL changes `inline_element_size` back to a non-atomic integer, and provides a way for all profiles to provide Register()-time values. PiperOrigin-RevId: 413960559 -- 70234c5943f8e37e17c1d9c54d8ed61d39880abf by Chris Kennelly <ckennelly@google.com>: Document that absl::FunctionRef does not allocate. PiperOrigin-RevId: 413946831 -- 3308ae571412c4be3cc32d088c6edac98ff2d1ed by Samuel Benzaquen <sbenza@google.com>: Internal change PiperOrigin-RevId: 413933619 -- 1617093a730d055edcf7bc04fdd6509783f5f75d by Martijn Vels <mvels@google.com>: Internal Change PiperOrigin-RevId: 413778735 -- 03ad683f059c806a6c8b04f5b79b2662c3df8c73 by Evan Brown <ezb@google.com>: Unify btree erase_if definitions and optimize them so that we only do rebalancing once per leaf node. PiperOrigin-RevId: 413757280 -- 5ba402f70801938178e486617063f01c7862525d by Martijn Vels <mvels@google.com>: Cleanup up cord sampling internals PiperOrigin-RevId: 413755011 -- 522da8f9d3e0f11630d89fb41952004742bc335a by Evan Brown <ezb@google.com>: Add b-tree benchmark for erase_if. Since this benchmark doesn't work for std:: containers before C++20, disable it for them. PiperOrigin-RevId: 413740844 -- a690ea42de8ed4a761d00235d8b2fb7548ba9732 by Andy Getzendanner <durandal@google.com>: Import of CCTZ from GitHub. PiperOrigin-RevId: 413735737 GitOrigin-RevId: e7f53dfbf809812e84770217777f81b6308a3084 Change-Id: I4f9f9039ba92831bc48971964aa063244c9fed72
Diffstat (limited to 'absl/container/internal')
-rw-r--r--absl/container/internal/btree.h46
-rw-r--r--absl/container/internal/btree_container.h1
-rw-r--r--absl/container/internal/hashtablez_sampler.cc18
-rw-r--r--absl/container/internal/hashtablez_sampler.h16
-rw-r--r--absl/container/internal/hashtablez_sampler_test.cc41
-rw-r--r--absl/container/internal/raw_hash_set_test.cc3
6 files changed, 79 insertions, 46 deletions
diff --git a/absl/container/internal/btree.h b/absl/container/internal/btree.h
index bf65a03d..29603379 100644
--- a/absl/container/internal/btree.h
+++ b/absl/container/internal/btree.h
@@ -830,6 +830,7 @@ class btree_node {
template <typename N, typename R, typename P>
friend struct btree_iterator;
friend class BtreeNodePeer;
+ friend struct btree_access;
};
template <typename Node, typename Reference, typename Pointer>
@@ -964,6 +965,7 @@ struct btree_iterator {
friend class btree_multiset_container;
template <typename TreeType, typename CheckerType>
friend class base_checker;
+ friend struct btree_access;
const key_type &key() const { return node->key(position); }
slot_type *slot() { return node->slot(position); }
@@ -1336,6 +1338,8 @@ class btree {
allocator_type get_allocator() const { return allocator(); }
private:
+ friend struct btree_access;
+
// Internal accessor routines.
node_type *root() { return root_.template get<2>(); }
const node_type *root() const { return root_.template get<2>(); }
@@ -2530,6 +2534,48 @@ int btree<P>::internal_verify(const node_type *node, const key_type *lo,
return count;
}
+struct btree_access {
+ template <typename BtreeContainer, typename Pred>
+ static auto erase_if(BtreeContainer &container, Pred pred)
+ -> typename BtreeContainer::size_type {
+ const auto initial_size = container.size();
+ auto &tree = container.tree_;
+ auto *alloc = tree.mutable_allocator();
+ for (auto it = container.begin(); it != container.end();) {
+ if (!pred(*it)) {
+ ++it;
+ continue;
+ }
+ auto *node = it.node;
+ if (!node->leaf()) {
+ // Handle internal nodes normally.
+ it = container.erase(it);
+ continue;
+ }
+ // If this is a leaf node, then we do all the erases from this node
+ // at once before doing rebalancing.
+
+ // The current position to transfer slots to.
+ int to_pos = it.position;
+ node->value_destroy(it.position, alloc);
+ while (++it.position < node->finish()) {
+ if (pred(*it)) {
+ node->value_destroy(it.position, alloc);
+ } else {
+ node->transfer(node->slot(to_pos++), node->slot(it.position),
+ alloc);
+ }
+ }
+ const int num_deleted = node->finish() - to_pos;
+ tree.size_ -= num_deleted;
+ node->set_finish(to_pos);
+ it.position = to_pos;
+ it = tree.rebalance_after_delete(it);
+ }
+ return initial_size - container.size();
+ }
+};
+
} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/container/internal/btree_container.h b/absl/container/internal/btree_container.h
index a99668c7..d28b2446 100644
--- a/absl/container/internal/btree_container.h
+++ b/absl/container/internal/btree_container.h
@@ -228,6 +228,7 @@ class btree_container {
}
protected:
+ friend struct btree_access;
Tree tree_;
};
diff --git a/absl/container/internal/hashtablez_sampler.cc b/absl/container/internal/hashtablez_sampler.cc
index a3e15a70..1d24db5f 100644
--- a/absl/container/internal/hashtablez_sampler.cc
+++ b/absl/container/internal/hashtablez_sampler.cc
@@ -61,13 +61,10 @@ HashtablezSampler& GlobalHashtablezSampler() {
return *sampler;
}
-// TODO(bradleybear): The comments at this constructors declaration say that the
-// fields are not initialized, but this definition does initialize the fields.
-// Something needs to be cleaned up.
-HashtablezInfo::HashtablezInfo() { PrepareForSampling(); }
+HashtablezInfo::HashtablezInfo() = default;
HashtablezInfo::~HashtablezInfo() = default;
-void HashtablezInfo::PrepareForSampling() {
+void HashtablezInfo::PrepareForSampling(size_t inline_element_size_value) {
capacity.store(0, std::memory_order_relaxed);
size.store(0, std::memory_order_relaxed);
num_erases.store(0, std::memory_order_relaxed);
@@ -85,6 +82,7 @@ void HashtablezInfo::PrepareForSampling() {
// instead.
depth = absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth,
/* skip_count= */ 0);
+ inline_element_size = inline_element_size_value;
}
static bool ShouldForceSampling() {
@@ -110,9 +108,8 @@ static bool ShouldForceSampling() {
HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) {
if (ABSL_PREDICT_FALSE(ShouldForceSampling())) {
*next_sample = 1;
- HashtablezInfo* result = GlobalHashtablezSampler().Register();
- result->inline_element_size.store(inline_element_size,
- std::memory_order_relaxed);
+ HashtablezInfo* result =
+ GlobalHashtablezSampler().Register(inline_element_size);
return result;
}
@@ -138,10 +135,7 @@ HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) {
return SampleSlow(next_sample, inline_element_size);
}
- HashtablezInfo* result = GlobalHashtablezSampler().Register();
- result->inline_element_size.store(inline_element_size,
- std::memory_order_relaxed);
- return result;
+ return GlobalHashtablezSampler().Register(inline_element_size);
#endif
}
diff --git a/absl/container/internal/hashtablez_sampler.h b/absl/container/internal/hashtablez_sampler.h
index 0fd9349f..6738786c 100644
--- a/absl/container/internal/hashtablez_sampler.h
+++ b/absl/container/internal/hashtablez_sampler.h
@@ -67,7 +67,8 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
// Puts the object into a clean state, fills in the logically `const` members,
// blocking for any readers that are currently sampling the object.
- void PrepareForSampling() ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu);
+ void PrepareForSampling(size_t inline_element_size_value)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu);
// These fields are mutated by the various Record* APIs and need to be
// thread-safe.
@@ -82,18 +83,6 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
std::atomic<size_t> hashes_bitwise_xor;
std::atomic<size_t> max_reserve;
- // One could imagine that inline_element_size could be non-atomic, since it
- // *almost* follows the rules for the fields that are set by
- // `PrepareForSampling`. However, TSAN reports a race (see b/207323922) in
- // which
- // A: Thread 1: Register() returns, unlocking init_mu.
- // B: Thread 2: Iterate() is called, locking init_mu.
- // C: Thread 1: inlined_element_size is stored.
- // D: Thread 2: inlined_element_size is accessed (a race).
- // A simple solution is to make inline_element_size atomic so that we treat it
- // at as we do the other atomic fields.
- std::atomic<size_t> inline_element_size;
-
// All of the fields below are set by `PrepareForSampling`, they must not be
// mutated in `Record*` functions. They are logically `const` in that sense.
// These are guarded by init_mu, but that is not externalized to clients,
@@ -103,6 +92,7 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
absl::Time create_time;
int32_t depth;
void* stack[kMaxStackDepth];
+ size_t inline_element_size; // How big is the slot?
};
inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) {
diff --git a/absl/container/internal/hashtablez_sampler_test.cc b/absl/container/internal/hashtablez_sampler_test.cc
index a7307a20..ac6ed9b1 100644
--- a/absl/container/internal/hashtablez_sampler_test.cc
+++ b/absl/container/internal/hashtablez_sampler_test.cc
@@ -70,7 +70,8 @@ std::vector<size_t> GetSizes(HashtablezSampler* s) {
}
HashtablezInfo* Register(HashtablezSampler* s, size_t size) {
- auto* info = s->Register();
+ const size_t test_element_size = 17;
+ auto* info = s->Register(test_element_size);
assert(info != nullptr);
info->size.store(size);
return info;
@@ -81,9 +82,8 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
const size_t test_element_size = 17;
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
- info.PrepareForSampling();
+ info.PrepareForSampling(test_element_size);
- info.inline_element_size.store(test_element_size, std::memory_order_relaxed);
EXPECT_EQ(info.capacity.load(), 0);
EXPECT_EQ(info.size.load(), 0);
EXPECT_EQ(info.num_erases.load(), 0);
@@ -95,7 +95,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
EXPECT_EQ(info.hashes_bitwise_xor.load(), 0);
EXPECT_EQ(info.max_reserve.load(), 0);
EXPECT_GE(info.create_time, test_start);
- EXPECT_EQ(info.inline_element_size.load(), test_element_size);
+ EXPECT_EQ(info.inline_element_size, test_element_size);
info.capacity.store(1, std::memory_order_relaxed);
info.size.store(1, std::memory_order_relaxed);
@@ -108,7 +108,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
info.max_reserve.store(1, std::memory_order_relaxed);
info.create_time = test_start - absl::Hours(20);
- info.PrepareForSampling();
+ info.PrepareForSampling(test_element_size);
EXPECT_EQ(info.capacity.load(), 0);
EXPECT_EQ(info.size.load(), 0);
EXPECT_EQ(info.num_erases.load(), 0);
@@ -119,14 +119,15 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{});
EXPECT_EQ(info.hashes_bitwise_xor.load(), 0);
EXPECT_EQ(info.max_reserve.load(), 0);
- EXPECT_EQ(info.inline_element_size.load(), test_element_size);
+ EXPECT_EQ(info.inline_element_size, test_element_size);
EXPECT_GE(info.create_time, test_start);
}
TEST(HashtablezInfoTest, RecordStorageChanged) {
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
- info.PrepareForSampling();
+ const size_t test_element_size = 19;
+ info.PrepareForSampling(test_element_size);
RecordStorageChangedSlow(&info, 17, 47);
EXPECT_EQ(info.size.load(), 17);
EXPECT_EQ(info.capacity.load(), 47);
@@ -138,7 +139,8 @@ TEST(HashtablezInfoTest, RecordStorageChanged) {
TEST(HashtablezInfoTest, RecordInsert) {
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
- info.PrepareForSampling();
+ const size_t test_element_size = 23;
+ info.PrepareForSampling(test_element_size);
EXPECT_EQ(info.max_probe_length.load(), 0);
RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength);
EXPECT_EQ(info.max_probe_length.load(), 6);
@@ -161,8 +163,7 @@ TEST(HashtablezInfoTest, RecordErase) {
const size_t test_element_size = 29;
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
- info.PrepareForSampling();
- info.inline_element_size.store(test_element_size);
+ info.PrepareForSampling(test_element_size);
EXPECT_EQ(info.num_erases.load(), 0);
EXPECT_EQ(info.size.load(), 0);
RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength);
@@ -170,15 +171,14 @@ TEST(HashtablezInfoTest, RecordErase) {
RecordEraseSlow(&info);
EXPECT_EQ(info.size.load(), 0);
EXPECT_EQ(info.num_erases.load(), 1);
- EXPECT_EQ(info.inline_element_size.load(), test_element_size);
+ EXPECT_EQ(info.inline_element_size, test_element_size);
}
TEST(HashtablezInfoTest, RecordRehash) {
const size_t test_element_size = 31;
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
- info.PrepareForSampling();
- info.inline_element_size.store(test_element_size);
+ info.PrepareForSampling(test_element_size);
RecordInsertSlow(&info, 0x1, 0);
RecordInsertSlow(&info, 0x2, kProbeLength);
RecordInsertSlow(&info, 0x4, kProbeLength);
@@ -197,13 +197,14 @@ TEST(HashtablezInfoTest, RecordRehash) {
EXPECT_EQ(info.total_probe_length.load(), 3);
EXPECT_EQ(info.num_erases.load(), 0);
EXPECT_EQ(info.num_rehashes.load(), 1);
- EXPECT_EQ(info.inline_element_size.load(), test_element_size);
+ EXPECT_EQ(info.inline_element_size, test_element_size);
}
TEST(HashtablezInfoTest, RecordReservation) {
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
- info.PrepareForSampling();
+ const size_t test_element_size = 33;
+ info.PrepareForSampling(test_element_size);
RecordReservationSlow(&info, 3);
EXPECT_EQ(info.max_reserve.load(), 3);
@@ -266,7 +267,8 @@ TEST(HashtablezSamplerTest, Sample) {
TEST(HashtablezSamplerTest, Handle) {
auto& sampler = GlobalHashtablezSampler();
- HashtablezInfoHandle h(sampler.Register());
+ const size_t test_element_size = 39;
+ HashtablezInfoHandle h(sampler.Register(test_element_size));
auto* info = HashtablezInfoHandlePeer::GetInfo(&h);
info->hashes_bitwise_and.store(0x12345678, std::memory_order_relaxed);
@@ -338,18 +340,19 @@ TEST(HashtablezSamplerTest, MultiThreaded) {
ThreadPool pool(10);
for (int i = 0; i < 10; ++i) {
- pool.Schedule([&sampler, &stop]() {
+ const size_t elt_size = 10 + i % 2;
+ pool.Schedule([&sampler, &stop, elt_size]() {
std::random_device rd;
std::mt19937 gen(rd());
std::vector<HashtablezInfo*> infoz;
while (!stop.HasBeenNotified()) {
if (infoz.empty()) {
- infoz.push_back(sampler.Register());
+ infoz.push_back(sampler.Register(elt_size));
}
switch (std::uniform_int_distribution<>(0, 2)(gen)) {
case 0: {
- infoz.push_back(sampler.Register());
+ infoz.push_back(sampler.Register(elt_size));
break;
}
case 1: {
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
index 4b9f6cc0..362b3cae 100644
--- a/absl/container/internal/raw_hash_set_test.cc
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -2075,8 +2075,7 @@ TEST(RawHashSamplerTest, Sample) {
std::memory_order_relaxed)]++;
reservations[info.max_reserve.load(std::memory_order_relaxed)]++;
}
- EXPECT_EQ(info.inline_element_size.load(std::memory_order_relaxed),
- sizeof(int64_t));
+ EXPECT_EQ(info.inline_element_size, sizeof(int64_t));
++end_size;
});