summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar Vitaly Goldshteyn <goldvitaly@google.com>2024-06-27 02:09:33 -0700
committerGravatar Copybara-Service <copybara-worker@google.com>2024-06-27 02:10:24 -0700
commit0ccc51f9ddbb407d579f8158d5421fbf3eea0524 (patch)
treeac234b48655a0d53ce43c827f23dd17865a58b28
parent16452e1418c1c2a8bcf4a99238e190ba901a20a6 (diff)
Add assertions to detect reentrance in `IterateOverFullSlots` and `absl::erase_if`.
Since we have potential plans to use this function more widely including `absl::c_for_each`, we need to have good error detection. PiperOrigin-RevId: 647236725 Change-Id: I5035bfb8cef24f80f1bbed83a42380e57d84e428
-rw-r--r--absl/container/internal/raw_hash_set.h18
-rw-r--r--absl/container/internal/raw_hash_set_test.cc117
2 files changed, 132 insertions, 3 deletions
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h
index 724df193..02e389b9 100644
--- a/absl/container/internal/raw_hash_set.h
+++ b/absl/container/internal/raw_hash_set.h
@@ -1860,8 +1860,8 @@ inline void* SlotAddress(void* slot_array, size_t slot, size_t slot_size) {
}
// Iterates over all full slots and calls `cb(const ctrl_t*, SlotType*)`.
-// If kAllowRemoveReentrance is false, no erasure from this table allowed during
-// Callback call. This mode is slightly faster.
+// No insertion to the table allowed during Callback call.
+// Erasure is allowed only for the element passed to the callback.
template <class SlotType, class Callback>
ABSL_ATTRIBUTE_ALWAYS_INLINE inline void IterateOverFullSlots(
const CommonFields& c, SlotType* slot, Callback cb) {
@@ -1890,16 +1890,22 @@ ABSL_ATTRIBUTE_ALWAYS_INLINE inline void IterateOverFullSlots(
return;
}
size_t remaining = c.size();
+ ABSL_ATTRIBUTE_UNUSED const size_t original_size_for_assert = remaining;
while (remaining != 0) {
for (uint32_t i : GroupFullEmptyOrDeleted(ctrl).MaskFull()) {
+ assert(IsFull(ctrl[i]) && "hash table was modified unexpectedly");
cb(ctrl + i, slot + i);
--remaining;
}
ctrl += Group::kWidth;
slot += Group::kWidth;
assert((remaining == 0 || *(ctrl - 1) != ctrl_t::kSentinel) &&
- "element was erased from hash table unexpectedly");
+ "hash table was modified unexpectedly");
}
+ // NOTE: erasure of the current element is allowed in callback for
+ // absl::erase_if specialization. So we use `>=`.
+ assert(original_size_for_assert >= c.size() &&
+ "hash table was modified unexpectedly");
}
template <typename CharAlloc>
@@ -4049,12 +4055,14 @@ struct HashtableFreeFunctionsAccess {
if (c->is_soo()) {
auto it = c->soo_iterator();
if (!pred(*it)) {
+ assert(c->size() == 1 && "hash table was modified unexpectedly");
return 0;
}
c->destroy(it.slot());
c->common().set_empty_soo();
return 1;
}
+ ABSL_ATTRIBUTE_UNUSED const size_t original_size_for_assert = c->size();
size_t num_deleted = 0;
IterateOverFullSlots(
c->common(), c->slot_array(), [&](const ctrl_t* ctrl, auto* slot) {
@@ -4065,6 +4073,10 @@ struct HashtableFreeFunctionsAccess {
++num_deleted;
}
});
+ // NOTE: IterateOverFullSlots allow removal of the current element, so we
+ // verify the size additionally here.
+ assert(original_size_for_assert - num_deleted == c->size() &&
+ "hash table was modified unexpectedly");
return num_deleted;
}
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
index ba603d2b..f1257d4b 100644
--- a/absl/container/internal/raw_hash_set_test.cc
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -3173,6 +3173,54 @@ TEST(Table, ForEachMutate) {
}
}
+TYPED_TEST(SooTest, EraseIfReentryDeath) {
+ if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
+
+ auto erase_if_with_removal_reentrance = [](size_t reserve_size) {
+ TypeParam t;
+ t.reserve(reserve_size);
+ int64_t first_value = -1;
+ t.insert(1024);
+ t.insert(5078);
+ auto pred = [&](const auto& x) {
+ if (first_value == -1) {
+ first_value = static_cast<int64_t>(x);
+ return false;
+ }
+ // We erase on second call to `pred` to reduce the chance that assertion
+ // will happen in IterateOverFullSlots.
+ t.erase(first_value);
+ return true;
+ };
+ absl::container_internal::EraseIf(pred, &t);
+ };
+ // Removal will likely happen in a different group.
+ EXPECT_DEATH_IF_SUPPORTED(erase_if_with_removal_reentrance(1024 * 16),
+ "hash table was modified unexpectedly");
+ // Removal will happen in the same group.
+ EXPECT_DEATH_IF_SUPPORTED(
+ erase_if_with_removal_reentrance(CapacityToGrowth(Group::kWidth - 1)),
+ "hash table was modified unexpectedly");
+}
+
+// This test is useful to test soo branch.
+TYPED_TEST(SooTest, EraseIfReentrySingleElementDeath) {
+ if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
+
+ auto erase_if_with_removal_reentrance = []() {
+ TypeParam t;
+ t.insert(1024);
+ auto pred = [&](const auto& x) {
+ // We erase ourselves in order to confuse the erase_if.
+ t.erase(static_cast<int64_t>(x));
+ return false;
+ };
+ absl::container_internal::EraseIf(pred, &t);
+ };
+ EXPECT_DEATH_IF_SUPPORTED(erase_if_with_removal_reentrance(),
+ "hash table was modified unexpectedly");
+}
+
TEST(Table, EraseBeginEndResetsReservedGrowth) {
bool frozen = false;
BadHashFreezableIntTable t{FreezableAlloc<int64_t>(&frozen)};
@@ -3368,6 +3416,75 @@ TEST(Table, IterateOverFullSlotsFull) {
}
}
+TEST(Table, IterateOverFullSlotsDeathOnRemoval) {
+ if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
+
+ auto iterate_with_reentrant_removal = [](int64_t size,
+ int64_t reserve_size = -1) {
+ if (reserve_size == -1) reserve_size = size;
+ for (int64_t idx = 0; idx < size; ++idx) {
+ NonSooIntTable t;
+ t.reserve(static_cast<size_t>(reserve_size));
+ for (int val = 0; val <= idx; ++val) {
+ t.insert(val);
+ }
+
+ container_internal::IterateOverFullSlots(
+ RawHashSetTestOnlyAccess::GetCommon(t),
+ RawHashSetTestOnlyAccess::GetSlots(t),
+ [&t](const ctrl_t*, auto* i) {
+ int64_t value = **i;
+ // Erase the other element from 2*k and 2*k+1 pair.
+ t.erase(value ^ 1);
+ });
+ }
+ };
+
+ EXPECT_DEATH_IF_SUPPORTED(iterate_with_reentrant_removal(128),
+ "hash table was modified unexpectedly");
+ // Removal will likely happen in a different group.
+ EXPECT_DEATH_IF_SUPPORTED(iterate_with_reentrant_removal(14, 1024 * 16),
+ "hash table was modified unexpectedly");
+ // Removal will happen in the same group.
+ EXPECT_DEATH_IF_SUPPORTED(iterate_with_reentrant_removal(static_cast<int64_t>(
+ CapacityToGrowth(Group::kWidth - 1))),
+ "hash table was modified unexpectedly");
+}
+
+TEST(Table, IterateOverFullSlotsDeathOnInsert) {
+ if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
+
+ auto iterate_with_reentrant_insert = [](int64_t reserve_size,
+ int64_t size_divisor = 2) {
+ int64_t size = reserve_size / size_divisor;
+ for (int64_t idx = 1; idx <= size; ++idx) {
+ NonSooIntTable t;
+ t.reserve(static_cast<size_t>(reserve_size));
+ for (int val = 1; val <= idx; ++val) {
+ t.insert(val);
+ }
+
+ container_internal::IterateOverFullSlots(
+ RawHashSetTestOnlyAccess::GetCommon(t),
+ RawHashSetTestOnlyAccess::GetSlots(t),
+ [&t](const ctrl_t*, auto* i) {
+ int64_t value = **i;
+ t.insert(-value);
+ });
+ }
+ };
+
+ EXPECT_DEATH_IF_SUPPORTED(iterate_with_reentrant_insert(128),
+ "hash table was modified unexpectedly");
+ // Insert will likely happen in a different group.
+ EXPECT_DEATH_IF_SUPPORTED(iterate_with_reentrant_insert(1024 * 16, 1024 * 2),
+ "hash table was modified unexpectedly");
+ // Insert will happen in the same group.
+ EXPECT_DEATH_IF_SUPPORTED(iterate_with_reentrant_insert(static_cast<int64_t>(
+ CapacityToGrowth(Group::kWidth - 1))),
+ "hash table was modified unexpectedly");
+}
+
template <typename T>
class SooTable : public testing::Test {};
using FreezableSooTableTypes =