summaryrefslogtreecommitdiff
path: root/absl/container/internal/raw_hash_set_test.cc
diff options
context:
space:
mode:
Diffstat (limited to 'absl/container/internal/raw_hash_set_test.cc')
-rw-r--r--absl/container/internal/raw_hash_set_test.cc304
1 files changed, 195 insertions, 109 deletions
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
index 302f9758..2783f5c4 100644
--- a/absl/container/internal/raw_hash_set_test.cc
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -35,7 +35,7 @@
#include "absl/strings/string_view.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
struct RawHashSetTestOnlyAccess {
@@ -49,18 +49,47 @@ namespace {
using ::testing::DoubleNear;
using ::testing::ElementsAre;
+using ::testing::Ge;
+using ::testing::Lt;
using ::testing::Optional;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
TEST(Util, NormalizeCapacity) {
- constexpr size_t kMinCapacity = Group::kWidth - 1;
- EXPECT_EQ(kMinCapacity, NormalizeCapacity(0));
- EXPECT_EQ(kMinCapacity, NormalizeCapacity(1));
- EXPECT_EQ(kMinCapacity, NormalizeCapacity(2));
- EXPECT_EQ(kMinCapacity, NormalizeCapacity(kMinCapacity));
- EXPECT_EQ(kMinCapacity * 2 + 1, NormalizeCapacity(kMinCapacity + 1));
- EXPECT_EQ(kMinCapacity * 2 + 1, NormalizeCapacity(kMinCapacity + 2));
+ EXPECT_EQ(1, NormalizeCapacity(0));
+ EXPECT_EQ(1, NormalizeCapacity(1));
+ EXPECT_EQ(3, NormalizeCapacity(2));
+ EXPECT_EQ(3, NormalizeCapacity(3));
+ EXPECT_EQ(7, NormalizeCapacity(4));
+ EXPECT_EQ(7, NormalizeCapacity(7));
+ EXPECT_EQ(15, NormalizeCapacity(8));
+ EXPECT_EQ(15, NormalizeCapacity(15));
+ EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 1));
+ EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 2));
+}
+
+TEST(Util, GrowthAndCapacity) {
+ // Verify that GrowthToCapacity gives the minimum capacity that has enough
+ // growth.
+ for (size_t growth = 0; growth < 10000; ++growth) {
+ SCOPED_TRACE(growth);
+ size_t capacity = NormalizeCapacity(GrowthToLowerboundCapacity(growth));
+ // The capacity is large enough for `growth`
+ EXPECT_THAT(CapacityToGrowth(capacity), Ge(growth));
+ if (growth != 0 && capacity > 1) {
+ // There is no smaller capacity that works.
+ EXPECT_THAT(CapacityToGrowth(capacity / 2), Lt(growth));
+ }
+ }
+
+ for (size_t capacity = Group::kWidth - 1; capacity < 10000;
+ capacity = 2 * capacity + 1) {
+ SCOPED_TRACE(capacity);
+ size_t growth = CapacityToGrowth(capacity);
+ EXPECT_THAT(growth, Lt(capacity));
+ EXPECT_LE(GrowthToLowerboundCapacity(growth), capacity);
+ EXPECT_EQ(NormalizeCapacity(GrowthToLowerboundCapacity(growth)), capacity);
+ }
}
TEST(Util, probe_seq) {
@@ -107,14 +136,14 @@ TEST(BitMask, WithShift) {
}
TEST(BitMask, LeadingTrailing) {
- EXPECT_EQ((BitMask<uint32_t, 16>(0b0001101001000000).LeadingZeros()), 3);
- EXPECT_EQ((BitMask<uint32_t, 16>(0b0001101001000000).TrailingZeros()), 6);
+ EXPECT_EQ((BitMask<uint32_t, 16>(0x00001a40).LeadingZeros()), 3);
+ EXPECT_EQ((BitMask<uint32_t, 16>(0x00001a40).TrailingZeros()), 6);
- EXPECT_EQ((BitMask<uint32_t, 16>(0b0000000000000001).LeadingZeros()), 15);
- EXPECT_EQ((BitMask<uint32_t, 16>(0b0000000000000001).TrailingZeros()), 0);
+ EXPECT_EQ((BitMask<uint32_t, 16>(0x00000001).LeadingZeros()), 15);
+ EXPECT_EQ((BitMask<uint32_t, 16>(0x00000001).TrailingZeros()), 0);
- EXPECT_EQ((BitMask<uint32_t, 16>(0b1000000000000000).LeadingZeros()), 0);
- EXPECT_EQ((BitMask<uint32_t, 16>(0b1000000000000000).TrailingZeros()), 15);
+ EXPECT_EQ((BitMask<uint32_t, 16>(0x00008000).LeadingZeros()), 0);
+ EXPECT_EQ((BitMask<uint32_t, 16>(0x00008000).TrailingZeros()), 15);
EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).LeadingZeros()), 3);
EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).TrailingZeros()), 1);
@@ -315,7 +344,25 @@ struct IntTable
: raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
std::equal_to<int64_t>, std::allocator<int64_t>> {
using Base = typename IntTable::raw_hash_set;
- IntTable() {}
+ using Base::Base;
+};
+
+template <typename T>
+struct CustomAlloc : std::allocator<T> {
+ CustomAlloc() {}
+
+ template <typename U>
+ CustomAlloc(const CustomAlloc<U>& other) {}
+
+ template<class U> struct rebind {
+ using other = CustomAlloc<U>;
+ };
+};
+
+struct CustomAllocIntTable
+ : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
+ std::equal_to<int64_t>, CustomAlloc<int64_t>> {
+ using Base = typename CustomAllocIntTable::raw_hash_set;
using Base::Base;
};
@@ -343,6 +390,7 @@ TEST(Table, EmptyFunctorOptimization) {
size_t size;
size_t capacity;
size_t growth_left;
+ void* infoz;
};
struct StatelessHash {
size_t operator()(absl::string_view) const { return 0; }
@@ -385,10 +433,11 @@ TEST(Table, Prefetch) {
t.prefetch(2);
// Do not run in debug mode, when prefetch is not implemented, or when
- // sanitizers are enabled.
-#if defined(NDEBUG) && defined(__GNUC__) && !defined(ADDRESS_SANITIZER) && \
- !defined(MEMORY_SANITIZER) && !defined(THREAD_SANITIZER) && \
- !defined(UNDEFINED_BEHAVIOR_SANITIZER)
+ // sanitizers are enabled, or on WebAssembly.
+#if defined(NDEBUG) && defined(__GNUC__) && defined(__x86_64__) && \
+ !defined(ADDRESS_SANITIZER) && !defined(MEMORY_SANITIZER) && \
+ !defined(THREAD_SANITIZER) && !defined(UNDEFINED_BEHAVIOR_SANITIZER) && \
+ !defined(__EMSCRIPTEN__)
const auto now = [] { return absl::base_internal::CycleClock::Now(); };
// Make size enough to not fit in L2 cache (16.7 Mb)
@@ -785,7 +834,7 @@ TEST(Table, EnsureNonQuadraticAsInRust) {
TEST(Table, ClearBug) {
IntTable t;
constexpr size_t capacity = container_internal::Group::kWidth - 1;
- constexpr size_t max_size = capacity / 2;
+ constexpr size_t max_size = capacity / 2 + 1;
for (size_t i = 0; i < max_size; ++i) {
t.insert(i);
}
@@ -816,6 +865,25 @@ TEST(Table, Erase) {
EXPECT_TRUE(t.find(0) == t.end());
}
+TEST(Table, EraseMaintainsValidIterator) {
+ IntTable t;
+ const int kNumElements = 100;
+ for (int i = 0; i < kNumElements; i ++) {
+ EXPECT_TRUE(t.emplace(i).second);
+ }
+ EXPECT_EQ(t.size(), kNumElements);
+
+ int num_erase_calls = 0;
+ auto it = t.begin();
+ while (it != t.end()) {
+ t.erase(it++);
+ num_erase_calls++;
+ }
+
+ EXPECT_TRUE(t.empty());
+ EXPECT_EQ(num_erase_calls, kNumElements);
+}
+
// Collect N bad keys by following algorithm:
// 1. Create an empty table and reserve it to 2 * N.
// 2. Insert N random elements.
@@ -1014,7 +1082,7 @@ ProbeStats CollectProbeStatsOnKeysXoredWithSeed(const std::vector<int64_t>& keys
ExpectedStats XorSeedExpectedStats() {
constexpr bool kRandomizesInserts =
-#if NDEBUG
+#ifdef NDEBUG
false;
#else // NDEBUG
true;
@@ -1051,6 +1119,7 @@ ExpectedStats XorSeedExpectedStats() {
ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width");
return {};
}
+
TEST(Table, DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength) {
ProbeStatsPerSize stats;
std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
@@ -1107,7 +1176,7 @@ ProbeStats CollectProbeStatsOnLinearlyTransformedKeys(
ExpectedStats LinearTransformExpectedStats() {
constexpr bool kRandomizesInserts =
-#if NDEBUG
+#ifdef NDEBUG
false;
#else // NDEBUG
true;
@@ -1144,6 +1213,7 @@ ExpectedStats LinearTransformExpectedStats() {
ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width");
return {};
}
+
TEST(Table, DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength) {
ProbeStatsPerSize stats;
std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
@@ -1296,37 +1366,31 @@ TEST(Table, ConstructFromInitList) {
TEST(Table, CopyConstruct) {
IntTable t;
- t.max_load_factor(.321f);
t.emplace(0);
EXPECT_EQ(1, t.size());
{
IntTable u(t);
EXPECT_EQ(1, u.size());
- EXPECT_EQ(t.max_load_factor(), u.max_load_factor());
EXPECT_THAT(*u.find(0), 0);
}
{
IntTable u{t};
EXPECT_EQ(1, u.size());
- EXPECT_EQ(t.max_load_factor(), u.max_load_factor());
EXPECT_THAT(*u.find(0), 0);
}
{
IntTable u = t;
EXPECT_EQ(1, u.size());
- EXPECT_EQ(t.max_load_factor(), u.max_load_factor());
EXPECT_THAT(*u.find(0), 0);
}
}
TEST(Table, CopyConstructWithAlloc) {
StringTable t;
- t.max_load_factor(.321f);
t.emplace("a", "b");
EXPECT_EQ(1, t.size());
StringTable u(t, Alloc<std::pair<std::string, std::string>>());
EXPECT_EQ(1, u.size());
- EXPECT_EQ(t.max_load_factor(), u.max_load_factor());
EXPECT_THAT(*u.find("a"), Pair("a", "b"));
}
@@ -1344,94 +1408,75 @@ TEST(Table, AllocWithExplicitCtor) {
TEST(Table, MoveConstruct) {
{
StringTable t;
- t.max_load_factor(.321f);
- const float lf = t.max_load_factor();
t.emplace("a", "b");
EXPECT_EQ(1, t.size());
StringTable u(std::move(t));
EXPECT_EQ(1, u.size());
- EXPECT_EQ(lf, u.max_load_factor());
EXPECT_THAT(*u.find("a"), Pair("a", "b"));
}
{
StringTable t;
- t.max_load_factor(.321f);
- const float lf = t.max_load_factor();
t.emplace("a", "b");
EXPECT_EQ(1, t.size());
StringTable u{std::move(t)};
EXPECT_EQ(1, u.size());
- EXPECT_EQ(lf, u.max_load_factor());
EXPECT_THAT(*u.find("a"), Pair("a", "b"));
}
{
StringTable t;
- t.max_load_factor(.321f);
- const float lf = t.max_load_factor();
t.emplace("a", "b");
EXPECT_EQ(1, t.size());
StringTable u = std::move(t);
EXPECT_EQ(1, u.size());
- EXPECT_EQ(lf, u.max_load_factor());
EXPECT_THAT(*u.find("a"), Pair("a", "b"));
}
}
TEST(Table, MoveConstructWithAlloc) {
StringTable t;
- t.max_load_factor(.321f);
- const float lf = t.max_load_factor();
t.emplace("a", "b");
EXPECT_EQ(1, t.size());
StringTable u(std::move(t), Alloc<std::pair<std::string, std::string>>());
EXPECT_EQ(1, u.size());
- EXPECT_EQ(lf, u.max_load_factor());
EXPECT_THAT(*u.find("a"), Pair("a", "b"));
}
TEST(Table, CopyAssign) {
StringTable t;
- t.max_load_factor(.321f);
t.emplace("a", "b");
EXPECT_EQ(1, t.size());
StringTable u;
u = t;
EXPECT_EQ(1, u.size());
- EXPECT_EQ(t.max_load_factor(), u.max_load_factor());
EXPECT_THAT(*u.find("a"), Pair("a", "b"));
}
TEST(Table, CopySelfAssign) {
StringTable t;
- t.max_load_factor(.321f);
- const float lf = t.max_load_factor();
t.emplace("a", "b");
EXPECT_EQ(1, t.size());
t = *&t;
EXPECT_EQ(1, t.size());
- EXPECT_EQ(lf, t.max_load_factor());
EXPECT_THAT(*t.find("a"), Pair("a", "b"));
}
TEST(Table, MoveAssign) {
StringTable t;
- t.max_load_factor(.321f);
- const float lf = t.max_load_factor();
t.emplace("a", "b");
EXPECT_EQ(1, t.size());
StringTable u;
u = std::move(t);
EXPECT_EQ(1, u.size());
- EXPECT_EQ(lf, u.max_load_factor());
EXPECT_THAT(*u.find("a"), Pair("a", "b"));
}
TEST(Table, Equality) {
StringTable t;
- std::vector<std::pair<std::string, std::string>> v = {{"a", "b"}, {"aa", "bb"}};
+ std::vector<std::pair<std::string, std::string>> v = {{"a", "b"},
+ {"aa", "bb"}};
t.insert(std::begin(v), std::end(v));
StringTable u = t;
EXPECT_EQ(u, t);
@@ -1439,20 +1484,24 @@ TEST(Table, Equality) {
TEST(Table, Equality2) {
StringTable t;
- std::vector<std::pair<std::string, std::string>> v1 = {{"a", "b"}, {"aa", "bb"}};
+ std::vector<std::pair<std::string, std::string>> v1 = {{"a", "b"},
+ {"aa", "bb"}};
t.insert(std::begin(v1), std::end(v1));
StringTable u;
- std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"}, {"aa", "aa"}};
+ std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"},
+ {"aa", "aa"}};
u.insert(std::begin(v2), std::end(v2));
EXPECT_NE(u, t);
}
TEST(Table, Equality3) {
StringTable t;
- std::vector<std::pair<std::string, std::string>> v1 = {{"b", "b"}, {"bb", "bb"}};
+ std::vector<std::pair<std::string, std::string>> v1 = {{"b", "b"},
+ {"bb", "bb"}};
t.insert(std::begin(v1), std::end(v1));
StringTable u;
- std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"}, {"aa", "aa"}};
+ std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"},
+ {"aa", "aa"}};
u.insert(std::begin(v2), std::end(v2));
EXPECT_NE(u, t);
}
@@ -1677,7 +1726,7 @@ TEST(Nodes, ExtractInsert) {
EXPECT_FALSE(node.empty());
StringTable t2;
- auto res = t2.insert(std::move(node));
+ StringTable::insert_return_type res = t2.insert(std::move(node));
EXPECT_TRUE(res.inserted);
EXPECT_THAT(*res.position, Pair(k0, ""));
EXPECT_FALSE(res.node);
@@ -1707,80 +1756,74 @@ TEST(Nodes, ExtractInsert) {
EXPECT_FALSE(node);
}
-StringTable MakeSimpleTable(size_t size) {
- StringTable t;
- for (size_t i = 0; i < size; ++i) t.emplace(std::string(1, 'A' + i), "");
+IntTable MakeSimpleTable(size_t size) {
+ IntTable t;
+ while (t.size() < size) t.insert(t.size());
return t;
}
-std::string OrderOfIteration(const StringTable& t) {
- std::string order;
- for (auto& p : t) order += p.first;
- return order;
+std::vector<int> OrderOfIteration(const IntTable& t) {
+ return {t.begin(), t.end()};
}
+// These IterationOrderChanges tests depend on non-deterministic behavior.
+// We are injecting non-determinism from the pointer of the table, but do so in
+// a way that only the page matters. We have to retry enough times to make sure
+// we are touching different memory pages to cause the ordering to change.
+// We also need to keep the old tables around to avoid getting the same memory
+// blocks over and over.
TEST(Table, IterationOrderChangesByInstance) {
- // Needs to be more than kWidth elements to be able to affect order.
- const StringTable reference = MakeSimpleTable(20);
-
- // Since order is non-deterministic we can't just try once and verify.
- // We'll try until we find that order changed. It should not take many tries
- // for that.
- // Important: we have to keep the old tables around. Otherwise tcmalloc will
- // just give us the same blocks and we would be doing the same order again.
- std::vector<StringTable> garbage;
- for (int i = 0; i < 10; ++i) {
- auto trial = MakeSimpleTable(20);
- if (OrderOfIteration(trial) != OrderOfIteration(reference)) {
- // We are done.
- return;
+ for (size_t size : {2, 6, 12, 20}) {
+ const auto reference_table = MakeSimpleTable(size);
+ const auto reference = OrderOfIteration(reference_table);
+
+ std::vector<IntTable> tables;
+ bool found_difference = false;
+ for (int i = 0; !found_difference && i < 5000; ++i) {
+ tables.push_back(MakeSimpleTable(size));
+ found_difference = OrderOfIteration(tables.back()) != reference;
+ }
+ if (!found_difference) {
+ FAIL()
+ << "Iteration order remained the same across many attempts with size "
+ << size;
}
- garbage.push_back(std::move(trial));
}
- FAIL();
}
TEST(Table, IterationOrderChangesOnRehash) {
- // Since order is non-deterministic we can't just try once and verify.
- // We'll try until we find that order changed. It should not take many tries
- // for that.
- // Important: we have to keep the old tables around. Otherwise tcmalloc will
- // just give us the same blocks and we would be doing the same order again.
- std::vector<StringTable> garbage;
- for (int i = 0; i < 10; ++i) {
- // Needs to be more than kWidth elements to be able to affect order.
- StringTable t = MakeSimpleTable(20);
- const std::string reference = OrderOfIteration(t);
+ std::vector<IntTable> garbage;
+ for (int i = 0; i < 5000; ++i) {
+ auto t = MakeSimpleTable(20);
+ const auto reference = OrderOfIteration(t);
// Force rehash to the same size.
t.rehash(0);
- std::string trial = OrderOfIteration(t);
+ auto trial = OrderOfIteration(t);
if (trial != reference) {
// We are done.
return;
}
garbage.push_back(std::move(t));
}
- FAIL();
+ FAIL() << "Iteration order remained the same across many attempts.";
}
-TEST(Table, IterationOrderChangesForSmallTables) {
- // Since order is non-deterministic we can't just try once and verify.
- // We'll try until we find that order changed.
- // Important: we have to keep the old tables around. Otherwise tcmalloc will
- // just give us the same blocks and we would be doing the same order again.
- StringTable reference_table = MakeSimpleTable(5);
- const std::string reference = OrderOfIteration(reference_table);
- std::vector<StringTable> garbage;
- for (int i = 0; i < 50; ++i) {
- StringTable t = MakeSimpleTable(5);
- std::string trial = OrderOfIteration(t);
- if (trial != reference) {
- // We are done.
- return;
- }
- garbage.push_back(std::move(t));
- }
- FAIL() << "Iteration order remained the same across many attempts.";
+// Verify that pointers are invalidated as soon as a second element is inserted.
+// This prevents dependency on pointer stability on small tables.
+TEST(Table, UnstablePointers) {
+ IntTable table;
+
+ const auto addr = [&](int i) {
+ return reinterpret_cast<uintptr_t>(&*table.find(i));
+ };
+
+ table.insert(0);
+ const uintptr_t old_ptr = addr(0);
+
+ // This causes a rehash.
+ table.insert(1);
+
+ EXPECT_NE(old_ptr, addr(0));
}
// Confirm that we assert if we try to erase() end().
@@ -1799,9 +1842,52 @@ TEST(TableDeathTest, EraseOfEndAsserts) {
EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()), kDeathMsg);
}
+TEST(RawHashSamplerTest, Sample) {
+ // Enable the feature even if the prod default is off.
+ SetHashtablezEnabled(true);
+ SetHashtablezSampleParameter(100);
+
+ auto& sampler = HashtablezSampler::Global();
+ size_t start_size = 0;
+ start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; });
+
+ std::vector<IntTable> tables;
+ for (int i = 0; i < 1000000; ++i) {
+ tables.emplace_back();
+ tables.back().insert(1);
+ }
+ size_t end_size = 0;
+ end_size += sampler.Iterate([&](const HashtablezInfo&) { ++end_size; });
+
+ EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
+ 0.01, 0.005);
+}
+
+TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
+ // Enable the feature even if the prod default is off.
+ SetHashtablezEnabled(true);
+ SetHashtablezSampleParameter(100);
+
+ auto& sampler = HashtablezSampler::Global();
+ size_t start_size = 0;
+ start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; });
+
+ std::vector<CustomAllocIntTable> tables;
+ for (int i = 0; i < 1000000; ++i) {
+ tables.emplace_back();
+ tables.back().insert(1);
+ }
+ size_t end_size = 0;
+ end_size += sampler.Iterate([&](const HashtablezInfo&) { ++end_size; });
+
+ EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
+ 0.00, 0.001);
+}
+
#ifdef ADDRESS_SANITIZER
TEST(Sanitizer, PoisoningUnused) {
IntTable t;
+ t.reserve(5);
// Insert something to force an allocation.
int64_t& v1 = *t.insert(0).first;
@@ -1826,5 +1912,5 @@ TEST(Sanitizer, PoisoningOnErase) {
} // namespace
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl