summaryrefslogtreecommitdiff
path: root/absl/container/btree_test.cc
diff options
context:
space:
mode:
Diffstat (limited to 'absl/container/btree_test.cc')
-rw-r--r--absl/container/btree_test.cc174
1 files changed, 157 insertions, 17 deletions
diff --git a/absl/container/btree_test.cc b/absl/container/btree_test.cc
index cc763b29..72f446b2 100644
--- a/absl/container/btree_test.cc
+++ b/absl/container/btree_test.cc
@@ -18,6 +18,7 @@
#include <array>
#include <cstdint>
#include <functional>
+#include <iostream>
#include <iterator>
#include <limits>
#include <map>
@@ -46,6 +47,7 @@
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/types/compare.h"
+#include "absl/types/optional.h"
ABSL_FLAG(int, test_values, 10000, "The number of values to use for tests");
@@ -1231,8 +1233,10 @@ class BtreeNodePeer {
}
template <typename Btree>
- constexpr static bool UsesGenerations() {
- return Btree::params_type::kEnableGenerations;
+ constexpr static bool FieldTypeEqualsSlotType() {
+ return std::is_same<
+ typename btree_node<typename Btree::params_type>::field_type,
+ typename btree_node<typename Btree::params_type>::slot_type>::value;
}
};
@@ -1461,7 +1465,7 @@ class SizedBtreeSet
using Base = typename SizedBtreeSet::btree_set_container;
public:
- SizedBtreeSet() {}
+ SizedBtreeSet() = default;
using Base::Base;
};
@@ -1479,9 +1483,17 @@ void ExpectOperationCounts(const int expected_moves,
tracker->ResetCopiesMovesSwaps();
}
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+constexpr bool kAsan = true;
+#else
+constexpr bool kAsan = false;
+#endif
+
// Note: when the values in this test change, it is expected to have an impact
// on performance.
TEST(Btree, MovesComparisonsCopiesSwapsTracking) {
+ if (kAsan) GTEST_SKIP() << "We do extra operations in ASan mode.";
+
InstanceTracker tracker;
// Note: this is minimum number of values per node.
SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/4> set4;
@@ -1499,10 +1511,9 @@ TEST(Btree, MovesComparisonsCopiesSwapsTracking) {
EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set61)>(), 61);
EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set100)>(), 100);
if (sizeof(void *) == 8) {
- EXPECT_EQ(
- BtreeNodePeer::GetNumSlotsPerNode<absl::btree_set<int32_t>>(),
- // When we have generations, there is one fewer slot.
- BtreeNodePeer::UsesGenerations<absl::btree_set<int32_t>>() ? 60 : 61);
+ EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<absl::btree_set<int32_t>>(),
+ // When we have generations, there is one fewer slot.
+ BtreeGenerationsEnabled() ? 60 : 61);
}
// Test key insertion/deletion in random order.
@@ -1533,6 +1544,8 @@ struct MovableOnlyInstanceThreeWayCompare {
// Note: when the values in this test change, it is expected to have an impact
// on performance.
TEST(Btree, MovesComparisonsCopiesSwapsTrackingThreeWayCompare) {
+ if (kAsan) GTEST_SKIP() << "We do extra operations in ASan mode.";
+
InstanceTracker tracker;
// Note: this is minimum number of values per node.
SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/4,
@@ -1556,10 +1569,9 @@ TEST(Btree, MovesComparisonsCopiesSwapsTrackingThreeWayCompare) {
EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set61)>(), 61);
EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set100)>(), 100);
if (sizeof(void *) == 8) {
- EXPECT_EQ(
- BtreeNodePeer::GetNumSlotsPerNode<absl::btree_set<int32_t>>(),
- // When we have generations, there is one fewer slot.
- BtreeNodePeer::UsesGenerations<absl::btree_set<int32_t>>() ? 60 : 61);
+ EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<absl::btree_set<int32_t>>(),
+ // When we have generations, there is one fewer slot.
+ BtreeGenerationsEnabled() ? 60 : 61);
}
// Test key insertion/deletion in random order.
@@ -3137,27 +3149,104 @@ TEST(Btree, InvalidComparatorsCaught) {
absl::btree_set<int, ThreeWaySumGreaterZeroCmp> set;
EXPECT_DEATH(set.insert({0, 1, 2}), "lhs_comp_rhs < 0 -> rhs_comp_lhs > 0");
}
+ // Verify that we detect cases of comparators that violate transitivity.
+ // When the comparators below check for the presence of an optional field,
+ // they violate transitivity because instances that have the optional field
+ // compare differently with each other from how they compare with instances
+ // that don't have the optional field.
+ struct ClockTime {
+ absl::optional<int> hour;
+ int minute;
+ };
+ // `comp(a,b) && comp(b,c) && !comp(a,c)` violates transitivity.
+ ClockTime a = {absl::nullopt, 1};
+ ClockTime b = {2, 5};
+ ClockTime c = {6, 0};
+ {
+ struct NonTransitiveTimeCmp {
+ bool operator()(ClockTime lhs, ClockTime rhs) const {
+ if (lhs.hour.has_value() && rhs.hour.has_value() &&
+ *lhs.hour != *rhs.hour) {
+ return *lhs.hour < *rhs.hour;
+ }
+ return lhs.minute < rhs.minute;
+ }
+ };
+ NonTransitiveTimeCmp cmp;
+ ASSERT_TRUE(cmp(a, b) && cmp(b, c) && !cmp(a, c));
+ absl::btree_set<ClockTime, NonTransitiveTimeCmp> set;
+ EXPECT_DEATH(set.insert({a, b, c}), "is_ordered_correctly");
+ absl::btree_multiset<ClockTime, NonTransitiveTimeCmp> mset;
+ EXPECT_DEATH(mset.insert({a, a, b, b, c, c}), "is_ordered_correctly");
+ }
+ {
+ struct ThreeWayNonTransitiveTimeCmp {
+ absl::weak_ordering operator()(ClockTime lhs, ClockTime rhs) const {
+ if (lhs.hour.has_value() && rhs.hour.has_value() &&
+ *lhs.hour != *rhs.hour) {
+ return *lhs.hour < *rhs.hour ? absl::weak_ordering::less
+ : absl::weak_ordering::greater;
+ }
+ return lhs.minute < rhs.minute ? absl::weak_ordering::less
+ : lhs.minute == rhs.minute ? absl::weak_ordering::equivalent
+ : absl::weak_ordering::greater;
+ }
+ };
+ ThreeWayNonTransitiveTimeCmp cmp;
+ ASSERT_TRUE(cmp(a, b) < 0 && cmp(b, c) < 0 && cmp(a, c) > 0);
+ absl::btree_set<ClockTime, ThreeWayNonTransitiveTimeCmp> set;
+ EXPECT_DEATH(set.insert({a, b, c}), "is_ordered_correctly");
+ absl::btree_multiset<ClockTime, ThreeWayNonTransitiveTimeCmp> mset;
+ EXPECT_DEATH(mset.insert({a, a, b, b, c, c}), "is_ordered_correctly");
+ }
+}
+
+TEST(Btree, MutatedKeysCaught) {
+ if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
+
+ struct IntPtrCmp {
+ bool operator()(int *lhs, int *rhs) const { return *lhs < *rhs; }
+ };
+ {
+ absl::btree_set<int *, IntPtrCmp> set;
+ int arr[] = {0, 1, 2};
+ set.insert({&arr[0], &arr[1], &arr[2]});
+ arr[0] = 100;
+ EXPECT_DEATH(set.insert(&arr[0]), "is_ordered_correctly");
+ }
+ {
+ absl::btree_multiset<int *, IntPtrCmp> set;
+ int arr[] = {0, 1, 2};
+ set.insert({&arr[0], &arr[0], &arr[1], &arr[1], &arr[2], &arr[2]});
+ arr[0] = 100;
+ EXPECT_DEATH(set.insert(&arr[0]), "is_ordered_correctly");
+ }
}
#ifndef _MSC_VER
// This test crashes on MSVC.
TEST(Btree, InvalidIteratorUse) {
- if (!BtreeNodePeer::UsesGenerations<absl::btree_set<int>>())
+ if (!BtreeGenerationsEnabled())
GTEST_SKIP() << "Generation validation for iterators is disabled.";
+ // Invalid memory use can trigger heap-use-after-free in ASan or invalidated
+ // iterator assertions.
+ constexpr const char *kInvalidMemoryDeathMessage =
+ "heap-use-after-free|invalidated iterator";
+
{
absl::btree_set<int> set;
for (int i = 0; i < 10; ++i) set.insert(i);
auto it = set.begin();
set.erase(it++);
- EXPECT_DEATH(set.erase(it++), "invalidated iterator");
+ EXPECT_DEATH(set.erase(it++), kInvalidMemoryDeathMessage);
}
{
absl::btree_set<int> set;
for (int i = 0; i < 10; ++i) set.insert(i);
auto it = set.insert(20).first;
set.insert(30);
- EXPECT_DEATH(*it, "invalidated iterator");
+ EXPECT_DEATH(*it, kInvalidMemoryDeathMessage);
}
{
absl::btree_set<int> set;
@@ -3165,15 +3254,15 @@ TEST(Btree, InvalidIteratorUse) {
auto it = set.find(5000);
ASSERT_NE(it, set.end());
set.erase(1);
- EXPECT_DEATH(*it, "invalidated iterator");
+ EXPECT_DEATH(*it, kInvalidMemoryDeathMessage);
}
{
absl::btree_set<int> set;
for (int i = 0; i < 10; ++i) set.insert(i);
auto it = set.insert(20).first;
set.insert(30);
- EXPECT_DEATH(void(it == set.begin()), "invalidated iterator");
- EXPECT_DEATH(void(set.begin() == it), "invalidated iterator");
+ EXPECT_DEATH(void(it == set.begin()), kInvalidMemoryDeathMessage);
+ EXPECT_DEATH(void(set.begin() == it), kInvalidMemoryDeathMessage);
}
}
#endif
@@ -3464,6 +3553,57 @@ TEST(Btree, InvalidIteratorComparison) {
EXPECT_DEATH(void(iter2 == iter1), kDifferentContainerDeathMessage);
}
+TEST(Btree, InvalidPointerUse) {
+ if (!kAsan)
+ GTEST_SKIP() << "We only detect invalid pointer use in ASan mode.";
+
+ absl::btree_set<int> set;
+ set.insert(0);
+ const int *ptr = &*set.begin();
+ set.insert(1);
+ EXPECT_DEATH(std::cout << *ptr, "heap-use-after-free");
+ size_t slots_per_node = BtreeNodePeer::GetNumSlotsPerNode<decltype(set)>();
+ for (int i = 2; i < slots_per_node - 1; ++i) set.insert(i);
+ ptr = &*set.begin();
+ set.insert(static_cast<int>(slots_per_node));
+ EXPECT_DEATH(std::cout << *ptr, "heap-use-after-free");
+}
+
+template<typename Set>
+void TestBasicFunctionality(Set set) {
+ using value_type = typename Set::value_type;
+ for (int i = 0; i < 100; ++i) { set.insert(value_type(i)); }
+ for (int i = 50; i < 100; ++i) { set.erase(value_type(i)); }
+ auto it = set.begin();
+ for (int i = 0; i < 50; ++i, ++it) {
+ ASSERT_EQ(set.find(value_type(i)), it) << i;
+ }
+}
+
+template<size_t align>
+struct alignas(align) OveralignedKey {
+ explicit OveralignedKey(int i) : key(i) {}
+ bool operator<(const OveralignedKey &other) const { return key < other.key; }
+ int key = 0;
+};
+
+TEST(Btree, OveralignedKey) {
+ // Test basic functionality with both even and odd numbers of slots per node.
+ // The goal here is to detect cases where alignment may be incorrect.
+ TestBasicFunctionality(
+ SizedBtreeSet<OveralignedKey<16>, /*TargetValuesPerNode=*/8>());
+ TestBasicFunctionality(
+ SizedBtreeSet<OveralignedKey<16>, /*TargetValuesPerNode=*/9>());
+}
+
+TEST(Btree, FieldTypeEqualsSlotType) {
+ // This breaks if we try to do layout_type::Pointer<slot_type> because
+ // slot_type is the same as field_type.
+ using set_type = absl::btree_set<uint8_t>;
+ static_assert(BtreeNodePeer::FieldTypeEqualsSlotType<set_type>(), "");
+ TestBasicFunctionality(set_type());
+}
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END