diff options
author | Evan Brown <ezb@google.com> | 2023-07-26 10:33:14 -0700 |
---|---|---|
committer | Copybara-Service <copybara-worker@google.com> | 2023-07-26 10:34:46 -0700 |
commit | 7fc3c7fe7283a8b04fe0f79fe6180b6e688a565f (patch) | |
tree | e4c8a51e15a7d5d27597b3703f1ba5cb0dbfef52 /absl/container/internal/raw_hash_set_test.cc | |
parent | c108cd0382a3659eaf2981b22392b4d5fbc122db (diff) |
Change the API constraints of erase(const_iterator, const_iterator) so that calling erase(begin(), end()) resets reserved growth.
PiperOrigin-RevId: 551248712
Change-Id: I34755c63e3ee40da4ba7047e0d24eec567d28173
Diffstat (limited to 'absl/container/internal/raw_hash_set_test.cc')
-rw-r--r-- | absl/container/internal/raw_hash_set_test.cc | 58 |
1 files changed, 58 insertions, 0 deletions
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc index 66621cf0..f0947b97 100644 --- a/absl/container/internal/raw_hash_set_test.cc +++ b/absl/container/internal/raw_hash_set_test.cc @@ -60,6 +60,10 @@ struct RawHashSetTestOnlyAccess { static auto GetSlots(const C& c) -> decltype(c.slot_array()) { return c.slot_array(); } + template <typename C> + static size_t CountTombstones(const C& c) { + return c.common().TombstonesCount(); + } }; namespace { @@ -472,6 +476,28 @@ struct MinimumAlignmentUint8Table using Base::Base; }; +// Allows for freezing the allocator to expect no further allocations. +template <typename T> +struct FreezableAlloc : std::allocator<T> { + explicit FreezableAlloc(bool* f) : frozen(f) {} + + template <typename U> + explicit FreezableAlloc(const FreezableAlloc<U>& other) + : frozen(other.frozen) {} + + template <class U> + struct rebind { + using other = FreezableAlloc<U>; + }; + + T* allocate(size_t n) { + EXPECT_FALSE(*frozen); + return std::allocator<T>::allocate(n); + } + + bool* frozen; +}; + struct BadFastHash { template <class T> size_t operator()(const T&) const { @@ -479,6 +505,13 @@ struct BadFastHash { } }; +struct BadHashFreezableIntTable + : raw_hash_set<IntPolicy, BadFastHash, std::equal_to<int64_t>, + FreezableAlloc<int64_t>> { + using Base = typename BadHashFreezableIntTable::raw_hash_set; + using Base::Base; +}; + struct BadTable : raw_hash_set<IntPolicy, BadFastHash, std::equal_to<int>, std::allocator<int>> { using Base = typename BadTable::raw_hash_set; @@ -512,6 +545,7 @@ TEST(Table, EmptyFunctorOptimization) { struct GenerationData { size_t reserved_growth; + size_t reservation_size; GenerationType* generation; }; @@ -2387,6 +2421,30 @@ TEST(Table, ReservedGrowthUpdatesWhenTableDoesntGrow) { EXPECT_EQ(*it, 0); } +TEST(Table, EraseBeginEndResetsReservedGrowth) { + bool frozen = false; + BadHashFreezableIntTable t{FreezableAlloc<int64_t>(&frozen)}; + t.reserve(100); + const size_t cap = t.capacity(); + frozen = true; // no further allocs allowed + + for (int i = 0; i < 10; ++i) { + // Create a long run (hash function returns constant). + for (int j = 0; j < 100; ++j) t.insert(j); + // Erase elements from the middle of the long run, which creates tombstones. + for (int j = 30; j < 60; ++j) t.erase(j); + EXPECT_EQ(t.size(), 70); + EXPECT_EQ(t.capacity(), cap); + ASSERT_EQ(RawHashSetTestOnlyAccess::CountTombstones(t), 30); + + t.erase(t.begin(), t.end()); + + EXPECT_EQ(t.size(), 0); + EXPECT_EQ(t.capacity(), cap); + ASSERT_EQ(RawHashSetTestOnlyAccess::CountTombstones(t), 0); + } +} + TEST(Table, GenerationInfoResetsOnClear) { if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled."; if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp."; |