summaryrefslogtreecommitdiff
path: root/absl/container/internal/raw_hash_set_test.cc
diff options
context:
space:
mode:
Diffstat (limited to 'absl/container/internal/raw_hash_set_test.cc')
-rw-r--r--absl/container/internal/raw_hash_set_test.cc172
1 files changed, 142 insertions, 30 deletions
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
index f5ae83c4..7dac65a0 100644
--- a/absl/container/internal/raw_hash_set_test.cc
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -14,6 +14,7 @@
#include "absl/container/internal/raw_hash_set.h"
+#include <atomic>
#include <cmath>
#include <cstdint>
#include <deque>
@@ -22,6 +23,8 @@
#include <numeric>
#include <random>
#include <string>
+#include <unordered_map>
+#include <unordered_set>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
@@ -48,11 +51,10 @@ struct RawHashSetTestOnlyAccess {
namespace {
-using ::testing::DoubleNear;
using ::testing::ElementsAre;
+using ::testing::Eq;
using ::testing::Ge;
using ::testing::Lt;
-using ::testing::Optional;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
@@ -75,8 +77,14 @@ TEST(Util, GrowthAndCapacity) {
for (size_t growth = 0; growth < 10000; ++growth) {
SCOPED_TRACE(growth);
size_t capacity = NormalizeCapacity(GrowthToLowerboundCapacity(growth));
- // The capacity is large enough for `growth`
+ // The capacity is large enough for `growth`.
EXPECT_THAT(CapacityToGrowth(capacity), Ge(growth));
+ // For (capacity+1) < kWidth, growth should equal capacity.
+ if (capacity + 1 < Group::kWidth) {
+ EXPECT_THAT(CapacityToGrowth(capacity), Eq(capacity));
+ } else {
+ EXPECT_THAT(CapacityToGrowth(capacity), Lt(capacity));
+ }
if (growth != 0 && capacity > 1) {
// There is no smaller capacity that works.
EXPECT_THAT(CapacityToGrowth(capacity / 2), Lt(growth));
@@ -250,25 +258,43 @@ TEST(Group, CountLeadingEmptyOrDeleted) {
}
}
-struct IntPolicy {
- using slot_type = int64_t;
- using key_type = int64_t;
- using init_type = int64_t;
+template <class T>
+struct ValuePolicy {
+ using slot_type = T;
+ using key_type = T;
+ using init_type = T;
- static void construct(void*, int64_t* slot, int64_t v) { *slot = v; }
- static void destroy(void*, int64_t*) {}
- static void transfer(void*, int64_t* new_slot, int64_t* old_slot) {
- *new_slot = *old_slot;
+ template <class Allocator, class... Args>
+ static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
+ absl::allocator_traits<Allocator>::construct(*alloc, slot,
+ std::forward<Args>(args)...);
}
- static int64_t& element(slot_type* slot) { return *slot; }
+ template <class Allocator>
+ static void destroy(Allocator* alloc, slot_type* slot) {
+ absl::allocator_traits<Allocator>::destroy(*alloc, slot);
+ }
- template <class F>
- static auto apply(F&& f, int64_t x) -> decltype(std::forward<F>(f)(x, x)) {
- return std::forward<F>(f)(x, x);
+ template <class Allocator>
+ static void transfer(Allocator* alloc, slot_type* new_slot,
+ slot_type* old_slot) {
+ construct(alloc, new_slot, std::move(*old_slot));
+ destroy(alloc, old_slot);
+ }
+
+ static T& element(slot_type* slot) { return *slot; }
+
+ template <class F, class... Args>
+ static decltype(absl::container_internal::DecomposeValue(
+ std::declval<F>(), std::declval<Args>()...))
+ apply(F&& f, Args&&... args) {
+ return absl::container_internal::DecomposeValue(
+ std::forward<F>(f), std::forward<Args>(args)...);
}
};
+using IntPolicy = ValuePolicy<int64_t>;
+
class StringPolicy {
template <class F, class K, class V,
class = typename std::enable_if<
@@ -393,6 +419,13 @@ TEST(Table, EmptyFunctorOptimization) {
size_t growth_left;
void* infoz;
};
+ struct MockTableInfozDisabled {
+ void* ctrl;
+ void* slots;
+ size_t size;
+ size_t capacity;
+ size_t growth_left;
+ };
struct StatelessHash {
size_t operator()(absl::string_view) const { return 0; }
};
@@ -400,17 +433,27 @@ TEST(Table, EmptyFunctorOptimization) {
size_t dummy;
};
- EXPECT_EQ(
- sizeof(MockTable),
- sizeof(
- raw_hash_set<StringPolicy, StatelessHash,
- std::equal_to<absl::string_view>, std::allocator<int>>));
+ if (std::is_empty<HashtablezInfoHandle>::value) {
+ EXPECT_EQ(sizeof(MockTableInfozDisabled),
+ sizeof(raw_hash_set<StringPolicy, StatelessHash,
+ std::equal_to<absl::string_view>,
+ std::allocator<int>>));
- EXPECT_EQ(
- sizeof(MockTable) + sizeof(StatefulHash),
- sizeof(
- raw_hash_set<StringPolicy, StatefulHash,
- std::equal_to<absl::string_view>, std::allocator<int>>));
+ EXPECT_EQ(sizeof(MockTableInfozDisabled) + sizeof(StatefulHash),
+ sizeof(raw_hash_set<StringPolicy, StatefulHash,
+ std::equal_to<absl::string_view>,
+ std::allocator<int>>));
+ } else {
+ EXPECT_EQ(sizeof(MockTable),
+ sizeof(raw_hash_set<StringPolicy, StatelessHash,
+ std::equal_to<absl::string_view>,
+ std::allocator<int>>));
+
+ EXPECT_EQ(sizeof(MockTable) + sizeof(StatefulHash),
+ sizeof(raw_hash_set<StringPolicy, StatefulHash,
+ std::equal_to<absl::string_view>,
+ std::allocator<int>>));
+ }
}
TEST(Table, Empty) {
@@ -847,7 +890,8 @@ TEST(Table, EraseMaintainsValidIterator) {
std::vector<int64_t> CollectBadMergeKeys(size_t N) {
static constexpr int kGroupSize = Group::kWidth - 1;
- auto topk_range = [](size_t b, size_t e, IntTable* t) -> std::vector<int64_t> {
+ auto topk_range = [](size_t b, size_t e,
+ IntTable* t) -> std::vector<int64_t> {
for (size_t i = b; i != e; ++i) {
t->emplace(i);
}
@@ -1001,8 +1045,8 @@ using ProbeStatsPerSize = std::map<size_t, ProbeStats>;
// 1. Create new table and reserve it to keys.size() * 2
// 2. Insert all keys xored with seed
// 3. Collect ProbeStats from final table.
-ProbeStats CollectProbeStatsOnKeysXoredWithSeed(const std::vector<int64_t>& keys,
- size_t num_iters) {
+ProbeStats CollectProbeStatsOnKeysXoredWithSeed(
+ const std::vector<int64_t>& keys, size_t num_iters) {
const size_t reserve_size = keys.size() * 2;
ProbeStats stats;
@@ -1656,6 +1700,38 @@ TEST(Table, Merge) {
EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0")));
}
+TEST(Table, IteratorEmplaceConstructibleRequirement) {
+ struct Value {
+ explicit Value(absl::string_view view) : value(view) {}
+ std::string value;
+
+ bool operator==(const Value& other) const { return value == other.value; }
+ };
+ struct H {
+ size_t operator()(const Value& v) const {
+ return absl::Hash<std::string>{}(v.value);
+ }
+ };
+
+ struct Table : raw_hash_set<ValuePolicy<Value>, H, std::equal_to<Value>,
+ std::allocator<Value>> {
+ using Base = typename Table::raw_hash_set;
+ using Base::Base;
+ };
+
+ std::string input[3]{"A", "B", "C"};
+
+ Table t(std::begin(input), std::end(input));
+ EXPECT_THAT(t, UnorderedElementsAre(Value{"A"}, Value{"B"}, Value{"C"}));
+
+ input[0] = "D";
+ input[1] = "E";
+ input[2] = "F";
+ t.insert(std::begin(input), std::end(input));
+ EXPECT_THAT(t, UnorderedElementsAre(Value{"A"}, Value{"B"}, Value{"C"},
+ Value{"D"}, Value{"E"}, Value{"F"}));
+}
+
TEST(Nodes, EmptyNodeType) {
using node_type = StringTable::node_type;
node_type n;
@@ -1710,6 +1786,26 @@ TEST(Nodes, ExtractInsert) {
EXPECT_FALSE(node);
}
+TEST(Nodes, HintInsert) {
+ IntTable t = {1, 2, 3};
+ auto node = t.extract(1);
+ EXPECT_THAT(t, UnorderedElementsAre(2, 3));
+ auto it = t.insert(t.begin(), std::move(node));
+ EXPECT_THAT(t, UnorderedElementsAre(1, 2, 3));
+ EXPECT_EQ(*it, 1);
+ EXPECT_FALSE(node);
+
+ node = t.extract(2);
+ EXPECT_THAT(t, UnorderedElementsAre(1, 3));
+ // reinsert 2 to make the next insert fail.
+ t.insert(2);
+ EXPECT_THAT(t, UnorderedElementsAre(1, 2, 3));
+ it = t.insert(t.begin(), std::move(node));
+ EXPECT_EQ(*it, 2);
+ // The node was not emptied by the insert call.
+ EXPECT_TRUE(node);
+}
+
IntTable MakeSimpleTable(size_t size) {
IntTable t;
while (t.size() < size) t.insert(t.size());
@@ -1804,18 +1900,34 @@ TEST(RawHashSamplerTest, Sample) {
auto& sampler = HashtablezSampler::Global();
size_t start_size = 0;
- start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; });
+ std::unordered_set<const HashtablezInfo*> preexisting_info;
+ start_size += sampler.Iterate([&](const HashtablezInfo& info) {
+ preexisting_info.insert(&info);
+ ++start_size;
+ });
std::vector<IntTable> tables;
for (int i = 0; i < 1000000; ++i) {
tables.emplace_back();
tables.back().insert(1);
+ tables.back().insert(i % 5);
}
size_t end_size = 0;
- end_size += sampler.Iterate([&](const HashtablezInfo&) { ++end_size; });
+ std::unordered_map<size_t, int> observed_checksums;
+ end_size += sampler.Iterate([&](const HashtablezInfo& info) {
+ if (preexisting_info.count(&info) == 0) {
+ observed_checksums[info.hashes_bitwise_xor.load(
+ std::memory_order_relaxed)]++;
+ }
+ ++end_size;
+ });
EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
0.01, 0.005);
+ EXPECT_EQ(observed_checksums.size(), 5);
+ for (const auto& [_, count] : observed_checksums) {
+ EXPECT_NEAR((100 * count) / static_cast<double>(tables.size()), 0.2, 0.05);
+ }
}
#endif // ABSL_INTERNAL_HASHTABLEZ_SAMPLE