summaryrefslogtreecommitdiff
path: root/absl/container/internal/raw_hash_set.cc
diff options
context:
space:
mode:
authorGravatar Evan Brown <ezb@google.com>2023-06-30 13:13:07 -0700
committerGravatar Copybara-Service <copybara-worker@google.com>2023-06-30 13:14:03 -0700
commit40070892631b82c79864ff4d3f7e12105f003157 (patch)
tree1250cc069914fbb4a8e6931115bfd3a394b12ae8 /absl/container/internal/raw_hash_set.cc
parent495399deda56d656e3f3cac734734fefeb84f1e6 (diff)
roll forward: Make data members of CommonFields be private so that it's easier to change how we store this information internally.
PiperOrigin-RevId: 544732832 Change-Id: I0c9a30f18edc71b3c7fffe94e2002ff6c52050f8
Diffstat (limited to 'absl/container/internal/raw_hash_set.cc')
-rw-r--r--absl/container/internal/raw_hash_set.cc33
1 files changed, 17 insertions, 16 deletions
diff --git a/absl/container/internal/raw_hash_set.cc b/absl/container/internal/raw_hash_set.cc
index 1ccee1ed..a40e38db 100644
--- a/absl/container/internal/raw_hash_set.cc
+++ b/absl/container/internal/raw_hash_set.cc
@@ -15,6 +15,7 @@
#include "absl/container/internal/raw_hash_set.h"
#include <atomic>
+#include <cassert>
#include <cstddef>
#include <cstring>
@@ -130,8 +131,8 @@ static inline void* PrevSlot(void* slot, size_t slot_size) {
void DropDeletesWithoutResize(CommonFields& common,
const PolicyFunctions& policy, void* tmp_space) {
void* set = &common;
- void* slot_array = common.slots_;
- const size_t capacity = common.capacity_;
+ void* slot_array = common.slots_ptr();
+ const size_t capacity = common.capacity();
assert(IsValidCapacity(capacity));
assert(!is_small(capacity));
// Algorithm:
@@ -150,7 +151,7 @@ void DropDeletesWithoutResize(CommonFields& common,
// swap current element with target element
// mark target as FULL
// repeat procedure for current slot with moved from element (target)
- ctrl_t* ctrl = common.control_;
+ ctrl_t* ctrl = common.control();
ConvertDeletedToEmptyAndFullToDeleted(ctrl, capacity);
auto hasher = policy.hash_slot;
auto transfer = policy.transfer;
@@ -210,11 +211,11 @@ void DropDeletesWithoutResize(CommonFields& common,
void EraseMetaOnly(CommonFields& c, ctrl_t* it, size_t slot_size) {
assert(IsFull(*it) && "erasing a dangling iterator");
- --c.size_;
- const auto index = static_cast<size_t>(it - c.control_);
- const size_t index_before = (index - Group::kWidth) & c.capacity_;
+ c.set_size(c.size() - 1);
+ const auto index = static_cast<size_t>(it - c.control());
+ const size_t index_before = (index - Group::kWidth) & c.capacity();
const auto empty_after = Group(it).MaskEmpty();
- const auto empty_before = Group(c.control_ + index_before).MaskEmpty();
+ const auto empty_before = Group(c.control() + index_before).MaskEmpty();
// We count how many consecutive non empties we have to the right and to the
// left of `it`. If the sum is >= kWidth then there is at least one probe
@@ -226,26 +227,26 @@ void EraseMetaOnly(CommonFields& c, ctrl_t* it, size_t slot_size) {
SetCtrl(c, index, was_never_full ? ctrl_t::kEmpty : ctrl_t::kDeleted,
slot_size);
- c.growth_left() += (was_never_full ? 1 : 0);
+ c.set_growth_left(c.growth_left() + (was_never_full ? 1 : 0));
c.infoz().RecordErase();
}
void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
bool reuse) {
- c.size_ = 0;
+ c.set_size(0);
if (reuse) {
ResetCtrl(c, policy.slot_size);
- c.infoz().RecordStorageChanged(0, c.capacity_);
+ c.infoz().RecordStorageChanged(0, c.capacity());
} else {
void* set = &c;
- (*policy.dealloc)(set, policy, c.control_, c.slots_, c.capacity_);
- c.control_ = EmptyGroup();
+ (*policy.dealloc)(set, policy, c.control(), c.slots_ptr(), c.capacity());
+ c.set_control(EmptyGroup());
c.set_generation_ptr(EmptyGeneration());
- c.slots_ = nullptr;
- c.capacity_ = 0;
- c.growth_left() = 0;
+ c.set_slots(nullptr);
+ c.set_capacity(0);
+ c.set_growth_left(0);
c.infoz().RecordClearedReservation();
- assert(c.size_ == 0);
+ assert(c.size() == 0);
c.infoz().RecordStorageChanged(0, 0);
}
}