summaryrefslogtreecommitdiff
path: root/absl/container/internal/raw_hash_set_test.cc
diff options
context:
space:
mode:
Diffstat (limited to 'absl/container/internal/raw_hash_set_test.cc')
-rw-r--r--absl/container/internal/raw_hash_set_test.cc58
1 files changed, 58 insertions, 0 deletions
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
index 66621cf0..f0947b97 100644
--- a/absl/container/internal/raw_hash_set_test.cc
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -60,6 +60,10 @@ struct RawHashSetTestOnlyAccess {
static auto GetSlots(const C& c) -> decltype(c.slot_array()) {
return c.slot_array();
}
+ template <typename C>
+ static size_t CountTombstones(const C& c) {
+ return c.common().TombstonesCount();
+ }
};
namespace {
@@ -472,6 +476,28 @@ struct MinimumAlignmentUint8Table
using Base::Base;
};
+// Allows for freezing the allocator to expect no further allocations.
+template <typename T>
+struct FreezableAlloc : std::allocator<T> {
+ explicit FreezableAlloc(bool* f) : frozen(f) {}
+
+ template <typename U>
+ explicit FreezableAlloc(const FreezableAlloc<U>& other)
+ : frozen(other.frozen) {}
+
+ template <class U>
+ struct rebind {
+ using other = FreezableAlloc<U>;
+ };
+
+ T* allocate(size_t n) {
+ EXPECT_FALSE(*frozen);
+ return std::allocator<T>::allocate(n);
+ }
+
+ bool* frozen;
+};
+
struct BadFastHash {
template <class T>
size_t operator()(const T&) const {
@@ -479,6 +505,13 @@ struct BadFastHash {
}
};
+struct BadHashFreezableIntTable
+ : raw_hash_set<IntPolicy, BadFastHash, std::equal_to<int64_t>,
+ FreezableAlloc<int64_t>> {
+ using Base = typename BadHashFreezableIntTable::raw_hash_set;
+ using Base::Base;
+};
+
struct BadTable : raw_hash_set<IntPolicy, BadFastHash, std::equal_to<int>,
std::allocator<int>> {
using Base = typename BadTable::raw_hash_set;
@@ -512,6 +545,7 @@ TEST(Table, EmptyFunctorOptimization) {
struct GenerationData {
size_t reserved_growth;
+ size_t reservation_size;
GenerationType* generation;
};
@@ -2387,6 +2421,30 @@ TEST(Table, ReservedGrowthUpdatesWhenTableDoesntGrow) {
EXPECT_EQ(*it, 0);
}
+TEST(Table, EraseBeginEndResetsReservedGrowth) {
+ bool frozen = false;
+ BadHashFreezableIntTable t{FreezableAlloc<int64_t>(&frozen)};
+ t.reserve(100);
+ const size_t cap = t.capacity();
+ frozen = true; // no further allocs allowed
+
+ for (int i = 0; i < 10; ++i) {
+ // Create a long run (hash function returns constant).
+ for (int j = 0; j < 100; ++j) t.insert(j);
+ // Erase elements from the middle of the long run, which creates tombstones.
+ for (int j = 30; j < 60; ++j) t.erase(j);
+ EXPECT_EQ(t.size(), 70);
+ EXPECT_EQ(t.capacity(), cap);
+ ASSERT_EQ(RawHashSetTestOnlyAccess::CountTombstones(t), 30);
+
+ t.erase(t.begin(), t.end());
+
+ EXPECT_EQ(t.size(), 0);
+ EXPECT_EQ(t.capacity(), cap);
+ ASSERT_EQ(RawHashSetTestOnlyAccess::CountTombstones(t), 0);
+ }
+}
+
TEST(Table, GenerationInfoResetsOnClear) {
if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";