summaryrefslogtreecommitdiff
path: root/absl/container/internal/raw_hash_set.cc
diff options
context:
space:
mode:
Diffstat (limited to 'absl/container/internal/raw_hash_set.cc')
-rw-r--r--absl/container/internal/raw_hash_set.cc99
1 files changed, 70 insertions, 29 deletions
diff --git a/absl/container/internal/raw_hash_set.cc b/absl/container/internal/raw_hash_set.cc
index 3677ac59..2ff95b61 100644
--- a/absl/container/internal/raw_hash_set.cc
+++ b/absl/container/internal/raw_hash_set.cc
@@ -15,34 +15,50 @@
#include "absl/container/internal/raw_hash_set.h"
#include <atomic>
+#include <cassert>
#include <cstddef>
#include <cstring>
+#include "absl/base/attributes.h"
#include "absl/base/config.h"
+#include "absl/base/dynamic_annotations.h"
+#include "absl/hash/hash.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
-// A single block of empty control bytes for tables without any slots allocated.
-// This enables removing a branch in the hot path of find().
-// We have 17 bytes because there may be a generation counter. Any constant is
-// fine for the generation counter.
-alignas(16) ABSL_CONST_INIT ABSL_DLL const ctrl_t kEmptyGroup[17] = {
+// We have space for `growth_left` before a single block of control bytes. A
+// single block of empty control bytes for tables without any slots allocated.
+// This enables removing a branch in the hot path of find(). In order to ensure
+// that the control bytes are aligned to 16, we have 16 bytes before the control
+// bytes even though growth_left only needs 8.
+constexpr ctrl_t ZeroCtrlT() { return static_cast<ctrl_t>(0); }
+alignas(16) ABSL_CONST_INIT ABSL_DLL const ctrl_t kEmptyGroup[32] = {
+ ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(),
+ ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(),
+ ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(),
+ ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(),
ctrl_t::kSentinel, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
- ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
- static_cast<ctrl_t>(0)};
+ ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty};
#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr size_t Group::kWidth;
#endif
+namespace {
+
// Returns "random" seed.
inline size_t RandomSeed() {
#ifdef ABSL_HAVE_THREAD_LOCAL
static thread_local size_t counter = 0;
+ // On Linux kernels >= 5.4 the MSAN runtime has a false-positive when
+ // accessing thread local storage data from loaded libraries
+ // (https://github.com/google/sanitizers/issues/1265), for this reason counter
+ // needs to be annotated as initialized.
+ ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(&counter, sizeof(size_t));
size_t value = ++counter;
#else // ABSL_HAVE_THREAD_LOCAL
static std::atomic<size_t> counter(0);
@@ -51,6 +67,32 @@ inline size_t RandomSeed() {
return value ^ static_cast<size_t>(reinterpret_cast<uintptr_t>(&counter));
}
+} // namespace
+
+GenerationType* EmptyGeneration() {
+ if (SwisstableGenerationsEnabled()) {
+ constexpr size_t kNumEmptyGenerations = 1024;
+ static constexpr GenerationType kEmptyGenerations[kNumEmptyGenerations]{};
+ return const_cast<GenerationType*>(
+ &kEmptyGenerations[RandomSeed() % kNumEmptyGenerations]);
+ }
+ return nullptr;
+}
+
+bool CommonFieldsGenerationInfoEnabled::
+ should_rehash_for_bug_detection_on_insert(const ctrl_t* ctrl,
+ size_t capacity) const {
+ if (reserved_growth_ == kReservedGrowthJustRanOut) return true;
+ if (reserved_growth_ > 0) return false;
+ // Note: we can't use the abseil-random library because abseil-random
+ // depends on swisstable. We want to return true with probability
+ // `min(1, RehashProbabilityConstant() / capacity())`. In order to do this,
+ // we probe based on a random hash and see if the offset is less than
+ // RehashProbabilityConstant().
+ return probe(ctrl, capacity, absl::HashOf(RandomSeed())).offset() <
+ RehashProbabilityConstant();
+}
+
bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl) {
// To avoid problems with weak hashes and single bit tests, we use % 13.
// TODO(kfm,sbenza): revisit after we do unconditional mixing
@@ -75,21 +117,22 @@ FindInfo find_first_non_full_outofline(const CommonFields& common,
return find_first_non_full(common, hash);
}
-// Return address of the ith slot in slots where each slot occupies slot_size.
+// Returns the address of the ith slot in slots where each slot occupies
+// slot_size.
static inline void* SlotAddress(void* slot_array, size_t slot,
size_t slot_size) {
return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot_array) +
(slot * slot_size));
}
-// Return the address of the slot just after slot assuming each slot
-// has the specified size.
+// Returns the address of the slot just after slot assuming each slot has the
+// specified size.
static inline void* NextSlot(void* slot, size_t slot_size) {
return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot) + slot_size);
}
-// Return the address of the slot just before slot assuming each slot
-// has the specified size.
+// Returns the address of the slot just before slot assuming each slot has the
+// specified size.
static inline void* PrevSlot(void* slot, size_t slot_size) {
return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot) - slot_size);
}
@@ -97,8 +140,8 @@ static inline void* PrevSlot(void* slot, size_t slot_size) {
void DropDeletesWithoutResize(CommonFields& common,
const PolicyFunctions& policy, void* tmp_space) {
void* set = &common;
- void* slot_array = common.slots_;
- const size_t capacity = common.capacity_;
+ void* slot_array = common.slot_array();
+ const size_t capacity = common.capacity();
assert(IsValidCapacity(capacity));
assert(!is_small(capacity));
// Algorithm:
@@ -117,7 +160,7 @@ void DropDeletesWithoutResize(CommonFields& common,
// swap current element with target element
// mark target as FULL
// repeat procedure for current slot with moved from element (target)
- ctrl_t* ctrl = common.control_;
+ ctrl_t* ctrl = common.control();
ConvertDeletedToEmptyAndFullToDeleted(ctrl, capacity);
auto hasher = policy.hash_slot;
auto transfer = policy.transfer;
@@ -177,11 +220,11 @@ void DropDeletesWithoutResize(CommonFields& common,
void EraseMetaOnly(CommonFields& c, ctrl_t* it, size_t slot_size) {
assert(IsFull(*it) && "erasing a dangling iterator");
- --c.size_;
- const auto index = static_cast<size_t>(it - c.control_);
- const size_t index_before = (index - Group::kWidth) & c.capacity_;
+ c.set_size(c.size() - 1);
+ const auto index = static_cast<size_t>(it - c.control());
+ const size_t index_before = (index - Group::kWidth) & c.capacity();
const auto empty_after = Group(it).MaskEmpty();
- const auto empty_before = Group(c.control_ + index_before).MaskEmpty();
+ const auto empty_before = Group(c.control() + index_before).MaskEmpty();
// We count how many consecutive non empties we have to the right and to the
// left of `it`. If the sum is >= kWidth then there is at least one probe
@@ -193,26 +236,24 @@ void EraseMetaOnly(CommonFields& c, ctrl_t* it, size_t slot_size) {
SetCtrl(c, index, was_never_full ? ctrl_t::kEmpty : ctrl_t::kDeleted,
slot_size);
- c.growth_left() += (was_never_full ? 1 : 0);
+ c.set_growth_left(c.growth_left() + (was_never_full ? 1 : 0));
c.infoz().RecordErase();
}
void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
bool reuse) {
- c.size_ = 0;
+ c.set_size(0);
if (reuse) {
ResetCtrl(c, policy.slot_size);
- c.infoz().RecordStorageChanged(0, c.capacity_);
+ c.infoz().RecordStorageChanged(0, c.capacity());
} else {
- void* set = &c;
- (*policy.dealloc)(set, policy, c.control_, c.slots_, c.capacity_);
- c.control_ = EmptyGroup();
+ (*policy.dealloc)(c, policy);
+ c.set_control(EmptyGroup());
c.set_generation_ptr(EmptyGeneration());
- c.slots_ = nullptr;
- c.capacity_ = 0;
- c.growth_left() = 0;
+ c.set_slots(nullptr);
+ c.set_capacity(0);
c.infoz().RecordClearedReservation();
- assert(c.size_ == 0);
+ assert(c.size() == 0);
c.infoz().RecordStorageChanged(0, 0);
}
}