diff options
Diffstat (limited to 'absl')
-rw-r--r-- | absl/container/internal/hashtablez_sampler.cc | 1 | ||||
-rw-r--r-- | absl/container/internal/hashtablez_sampler.h | 25 | ||||
-rw-r--r-- | absl/container/internal/hashtablez_sampler_test.cc | 19 | ||||
-rw-r--r-- | absl/container/internal/raw_hash_set.h | 13 | ||||
-rw-r--r-- | absl/container/internal/raw_hash_set_test.cc | 25 | ||||
-rw-r--r-- | absl/strings/internal/cord_rep_btree_reader.h | 2 |
6 files changed, 84 insertions, 1 deletions
diff --git a/absl/container/internal/hashtablez_sampler.cc b/absl/container/internal/hashtablez_sampler.cc index ca03d9b6..4b133705 100644 --- a/absl/container/internal/hashtablez_sampler.cc +++ b/absl/container/internal/hashtablez_sampler.cc @@ -68,6 +68,7 @@ void HashtablezInfo::PrepareForSampling() { hashes_bitwise_or.store(0, std::memory_order_relaxed); hashes_bitwise_and.store(~size_t{}, std::memory_order_relaxed); hashes_bitwise_xor.store(0, std::memory_order_relaxed); + max_reserve.store(0, std::memory_order_relaxed); create_time = absl::Now(); // The inliner makes hardcoded skip_count difficult (especially when combined diff --git a/absl/container/internal/hashtablez_sampler.h b/absl/container/internal/hashtablez_sampler.h index d86207f5..812118e3 100644 --- a/absl/container/internal/hashtablez_sampler.h +++ b/absl/container/internal/hashtablez_sampler.h @@ -80,6 +80,7 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> { std::atomic<size_t> hashes_bitwise_or; std::atomic<size_t> hashes_bitwise_and; std::atomic<size_t> hashes_bitwise_xor; + std::atomic<size_t> max_reserve; // All of the fields below are set by `PrepareForSampling`, they must not be // mutated in `Record*` functions. They are logically `const` in that sense. @@ -107,6 +108,18 @@ inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) { std::memory_order_relaxed); } +inline void RecordReservationSlow(HashtablezInfo* info, + size_t target_capacity) { + info->max_reserve.store( + (std::max)(info->max_reserve.load(std::memory_order_relaxed), + target_capacity), + std::memory_order_relaxed); +} + +inline void RecordClearedReservationSlow(HashtablezInfo* info) { + info->max_reserve.store(0, std::memory_order_relaxed); +} + inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size, size_t capacity) { info->size.store(size, std::memory_order_relaxed); @@ -170,6 +183,16 @@ class HashtablezInfoHandle { RecordRehashSlow(info_, total_probe_length); } + inline void RecordReservation(size_t target_capacity) { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; + RecordReservationSlow(info_, target_capacity); + } + + inline void RecordClearedReservation() { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; + RecordClearedReservationSlow(info_); + } + inline void RecordInsert(size_t hash, size_t distance_from_desired) { if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; RecordInsertSlow(info_, hash, distance_from_desired); @@ -199,6 +222,8 @@ class HashtablezInfoHandle { inline void RecordStorageChanged(size_t /*size*/, size_t /*capacity*/) {} inline void RecordRehash(size_t /*total_probe_length*/) {} + inline void RecordReservation(size_t /*target_capacity*/) {} + inline void RecordClearedReservation() {} inline void RecordInsert(size_t /*hash*/, size_t /*distance_from_desired*/) {} inline void RecordErase() {} diff --git a/absl/container/internal/hashtablez_sampler_test.cc b/absl/container/internal/hashtablez_sampler_test.cc index 53fcfe6f..f053c19b 100644 --- a/absl/container/internal/hashtablez_sampler_test.cc +++ b/absl/container/internal/hashtablez_sampler_test.cc @@ -91,6 +91,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) { EXPECT_EQ(info.hashes_bitwise_or.load(), 0); EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{}); EXPECT_EQ(info.hashes_bitwise_xor.load(), 0); + EXPECT_EQ(info.max_reserve.load(), 0); EXPECT_GE(info.create_time, test_start); info.capacity.store(1, std::memory_order_relaxed); @@ -101,6 +102,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) { info.hashes_bitwise_or.store(1, std::memory_order_relaxed); info.hashes_bitwise_and.store(1, std::memory_order_relaxed); info.hashes_bitwise_xor.store(1, std::memory_order_relaxed); + info.max_reserve.store(1, std::memory_order_relaxed); info.create_time = test_start - absl::Hours(20); info.PrepareForSampling(); @@ -113,6 +115,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) { EXPECT_EQ(info.hashes_bitwise_or.load(), 0); EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{}); EXPECT_EQ(info.hashes_bitwise_xor.load(), 0); + EXPECT_EQ(info.max_reserve.load(), 0); EXPECT_GE(info.create_time, test_start); } @@ -187,6 +190,22 @@ TEST(HashtablezInfoTest, RecordRehash) { EXPECT_EQ(info.num_rehashes.load(), 1); } +TEST(HashtablezInfoTest, RecordReservation) { + HashtablezInfo info; + absl::MutexLock l(&info.init_mu); + info.PrepareForSampling(); + RecordReservationSlow(&info, 3); + EXPECT_EQ(info.max_reserve.load(), 3); + + RecordReservationSlow(&info, 2); + // High watermark does not change + EXPECT_EQ(info.max_reserve.load(), 3); + + RecordReservationSlow(&info, 10); + // High watermark does change + EXPECT_EQ(info.max_reserve.load(), 10); +} + #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) TEST(HashtablezSamplerTest, SmallSampleParameter) { SetHashtablezEnabled(true); diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h index f4919ce3..212052ea 100644 --- a/absl/container/internal/raw_hash_set.h +++ b/absl/container/internal/raw_hash_set.h @@ -1069,6 +1069,8 @@ class raw_hash_set { // past that we simply deallocate the array. if (capacity_ > 127) { destroy_slots(); + + infoz().RecordClearedReservation(); } else if (capacity_) { for (size_t i = 0; i != capacity_; ++i) { if (IsFull(ctrl_[i])) { @@ -1382,14 +1384,20 @@ class raw_hash_set { if (n == 0 && size_ == 0) { destroy_slots(); infoz().RecordStorageChanged(0, 0); + infoz().RecordClearedReservation(); return; } + // bitor is a faster way of doing `max` here. We will round up to the next // power-of-2-minus-1, so bitor is good enough. auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size())); // n == 0 unconditionally rehashes as per the standard. if (n == 0 || m > capacity_) { resize(m); + + // This is after resize, to ensure that we have completed the allocation + // and have potentially sampled the hashtable. + infoz().RecordReservation(n); } } @@ -1397,6 +1405,10 @@ class raw_hash_set { if (n > size() + growth_left()) { size_t m = GrowthToLowerboundCapacity(n); resize(NormalizeCapacity(m)); + + // This is after resize, to ensure that we have completed the allocation + // and have potentially sampled the hashtable. + infoz().RecordReservation(n); } } @@ -1638,6 +1650,7 @@ class raw_hash_set { PolicyTraits::destroy(&alloc_ref(), slots_ + i); } } + // Unpoison before returning the memory to the allocator. SanitizerUnpoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_); Deallocate<alignof(slot_type)>( diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc index 4012a3aa..b46c4920 100644 --- a/absl/container/internal/raw_hash_set_test.cc +++ b/absl/container/internal/raw_hash_set_test.cc @@ -2049,15 +2049,31 @@ TEST(RawHashSamplerTest, Sample) { std::vector<IntTable> tables; for (int i = 0; i < 1000000; ++i) { tables.emplace_back(); + + const bool do_reserve = (i % 10 > 5); + const bool do_rehash = !do_reserve && (i % 10 > 0); + + if (do_reserve) { + // Don't reserve on all tables. + tables.back().reserve(10 * (i % 10)); + } + tables.back().insert(1); tables.back().insert(i % 5); + + if (do_rehash) { + // Rehash some other tables. + tables.back().rehash(10 * (i % 10)); + } } size_t end_size = 0; std::unordered_map<size_t, int> observed_checksums; + std::unordered_map<ssize_t, int> reservations; end_size += sampler.Iterate([&](const HashtablezInfo& info) { if (preexisting_info.count(&info) == 0) { observed_checksums[info.hashes_bitwise_xor.load( std::memory_order_relaxed)]++; + reservations[info.max_reserve.load(std::memory_order_relaxed)]++; } ++end_size; }); @@ -2068,6 +2084,15 @@ TEST(RawHashSamplerTest, Sample) { for (const auto& [_, count] : observed_checksums) { EXPECT_NEAR((100 * count) / static_cast<double>(tables.size()), 0.2, 0.05); } + + EXPECT_EQ(reservations.size(), 10); + for (const auto& [reservation, count] : reservations) { + EXPECT_GE(reservation, 0); + EXPECT_LT(reservation, 100); + + EXPECT_NEAR((100 * count) / static_cast<double>(tables.size()), 0.1, 0.05) + << reservation; + } } #endif // ABSL_INTERNAL_HASHTABLEZ_SAMPLE diff --git a/absl/strings/internal/cord_rep_btree_reader.h b/absl/strings/internal/cord_rep_btree_reader.h index 66e97f5d..7aa79dbf 100644 --- a/absl/strings/internal/cord_rep_btree_reader.h +++ b/absl/strings/internal/cord_rep_btree_reader.h @@ -166,7 +166,7 @@ inline size_t CordRepBtreeReader::length() const { inline absl::string_view CordRepBtreeReader::Init(CordRepBtree* tree) { assert(tree != nullptr); const CordRep* edge = navigator_.InitFirst(tree); - remaining_ = tree->length - edge->length;; + remaining_ = tree->length - edge->length; return CordRepBtree::EdgeData(edge); } |