// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // An open-addressing // hashtable with quadratic probing. // // This is a low level hashtable on top of which different interfaces can be // implemented, like flat_hash_set, node_hash_set, string_hash_set, etc. // // The table interface is similar to that of std::unordered_set. Notable // differences are that most member functions support heterogeneous keys when // BOTH the hash and eq functions are marked as transparent. They do so by // providing a typedef called `is_transparent`. // // When heterogeneous lookup is enabled, functions that take key_type act as if // they have an overload set like: // // iterator find(const key_type& key); // template // iterator find(const K& key); // // size_type erase(const key_type& key); // template // size_type erase(const K& key); // // std::pair equal_range(const key_type& key); // template // std::pair equal_range(const K& key); // // When heterogeneous lookup is disabled, only the explicit `key_type` overloads // exist. // // find() also supports passing the hash explicitly: // // iterator find(const key_type& key, size_t hash); // template // iterator find(const U& key, size_t hash); // // In addition the pointer to element and iterator stability guarantees are // weaker: all iterators and pointers are invalidated after a new element is // inserted. // // IMPLEMENTATION DETAILS // // # Table Layout // // A raw_hash_set's backing array consists of control bytes followed by slots // that may or may not contain objects. // // The layout of the backing array, for `capacity` slots, is thus, as a // pseudo-struct: // // struct BackingArray { // // Control bytes for the "real" slots. // ctrl_t ctrl[capacity]; // // Always `ctrl_t::kSentinel`. This is used by iterators to find when to // // stop and serves no other purpose. // ctrl_t sentinel; // // A copy of the first `kWidth - 1` elements of `ctrl`. This is used so // // that if a probe sequence picks a value near the end of `ctrl`, // // `Group` will have valid control bytes to look at. // ctrl_t clones[kWidth - 1]; // // The actual slot data. // slot_type slots[capacity]; // }; // // The length of this array is computed by `AllocSize()` below. // // Control bytes (`ctrl_t`) are bytes (collected into groups of a // platform-specific size) that define the state of the corresponding slot in // the slot array. Group manipulation is tightly optimized to be as efficient // as possible: SSE and friends on x86, clever bit operations on other arches. // // Group 1 Group 2 Group 3 // +---------------+---------------+---------------+ // | | | | | | | | | | | | | | | | | | | | | | | | | // +---------------+---------------+---------------+ // // Each control byte is either a special value for empty slots, deleted slots // (sometimes called *tombstones*), and a special end-of-table marker used by // iterators, or, if occupied, seven bits (H2) from the hash of the value in the // corresponding slot. // // Storing control bytes in a separate array also has beneficial cache effects, // since more logical slots will fit into a cache line. // // # Hashing // // We compute two separate hashes, `H1` and `H2`, from the hash of an object. // `H1(hash(x))` is an index into `slots`, and essentially the starting point // for the probe sequence. `H2(hash(x))` is a 7-bit value used to filter out // objects that cannot possibly be the one we are looking for. // // # Table operations. // // The key operations are `insert`, `find`, and `erase`. // // Since `insert` and `erase` are implemented in terms of `find`, we describe // `find` first. To `find` a value `x`, we compute `hash(x)`. From // `H1(hash(x))` and the capacity, we construct a `probe_seq` that visits every // group of slots in some interesting order. // // We now walk through these indices. At each index, we select the entire group // starting with that index and extract potential candidates: occupied slots // with a control byte equal to `H2(hash(x))`. If we find an empty slot in the // group, we stop and return an error. Each candidate slot `y` is compared with // `x`; if `x == y`, we are done and return `&y`; otherwise we contine to the // next probe index. Tombstones effectively behave like full slots that never // match the value we're looking for. // // The `H2` bits ensure when we compare a slot to an object with `==`, we are // likely to have actually found the object. That is, the chance is low that // `==` is called and returns `false`. Thus, when we search for an object, we // are unlikely to call `==` many times. This likelyhood can be analyzed as // follows (assuming that H2 is a random enough hash function). // // Let's assume that there are `k` "wrong" objects that must be examined in a // probe sequence. For example, when doing a `find` on an object that is in the // table, `k` is the number of objects between the start of the probe sequence // and the final found object (not including the final found object). The // expected number of objects with an H2 match is then `k/128`. Measurements // and analysis indicate that even at high load factors, `k` is less than 32, // meaning that the number of "false positive" comparisons we must perform is // less than 1/8 per `find`. // `insert` is implemented in terms of `unchecked_insert`, which inserts a // value presumed to not be in the table (violating this requirement will cause // the table to behave erratically). Given `x` and its hash `hash(x)`, to insert // it, we construct a `probe_seq` once again, and use it to find the first // group with an unoccupied (empty *or* deleted) slot. We place `x` into the // first such slot in the group and mark it as full with `x`'s H2. // // To `insert`, we compose `unchecked_insert` with `find`. We compute `h(x)` and // perform a `find` to see if it's already present; if it is, we're done. If // it's not, we may decide the table is getting overcrowded (i.e. the load // factor is greater than 7/8 for big tables; `is_small()` tables use a max load // factor of 1); in this case, we allocate a bigger array, `unchecked_insert` // each element of the table into the new array (we know that no insertion here // will insert an already-present value), and discard the old backing array. At // this point, we may `unchecked_insert` the value `x`. // // Below, `unchecked_insert` is partly implemented by `prepare_insert`, which // presents a viable, initialized slot pointee to the caller. // // `erase` is implemented in terms of `erase_at`, which takes an index to a // slot. Given an offset, we simply create a tombstone and destroy its contents. // If we can prove that the slot would not appear in a probe sequence, we can // make the slot as empty, instead. We can prove this by observing that if a // group has any empty slots, it has never been full (assuming we never create // an empty slot in a group with no empties, which this heuristic guarantees we // never do) and find would stop at this group anyways (since it does not probe // beyond groups with empties). // // `erase` is `erase_at` composed with `find`: if we // have a value `x`, we can perform a `find`, and then `erase_at` the resulting // slot. // // To iterate, we simply traverse the array, skipping empty and deleted slots // and stopping when we hit a `kSentinel`. #ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ #define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ #include #include #include #include #include #include #include #include #include #include #include "absl/base/config.h" #include "absl/base/internal/endian.h" #include "absl/base/internal/prefetch.h" #include "absl/base/optimization.h" #include "absl/base/port.h" #include "absl/container/internal/common.h" #include "absl/container/internal/compressed_tuple.h" #include "absl/container/internal/container_memory.h" #include "absl/container/internal/hash_policy_traits.h" #include "absl/container/internal/hashtable_debug_hooks.h" #include "absl/container/internal/hashtablez_sampler.h" #include "absl/memory/memory.h" #include "absl/meta/type_traits.h" #include "absl/numeric/bits.h" #include "absl/utility/utility.h" #ifdef ABSL_INTERNAL_HAVE_SSE2 #include #endif #ifdef ABSL_INTERNAL_HAVE_SSSE3 #include #endif #ifdef _MSC_VER #include #endif #ifdef ABSL_INTERNAL_HAVE_ARM_NEON #include #endif namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { template void SwapAlloc(AllocType& lhs, AllocType& rhs, std::true_type /* propagate_on_container_swap */) { using std::swap; swap(lhs, rhs); } template void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/, std::false_type /* propagate_on_container_swap */) {} // The state for a probe sequence. // // Currently, the sequence is a triangular progression of the form // // p(i) := Width * (i^2 + i)/2 + hash (mod mask + 1) // // The use of `Width` ensures that each probe step does not overlap groups; // the sequence effectively outputs the addresses of *groups* (although not // necessarily aligned to any boundary). The `Group` machinery allows us // to check an entire group with minimal branching. // // Wrapping around at `mask + 1` is important, but not for the obvious reason. // As described above, the first few entries of the control byte array // are mirrored at the end of the array, which `Group` will find and use // for selecting candidates. However, when those candidates' slots are // actually inspected, there are no corresponding slots for the cloned bytes, // so we need to make sure we've treated those offsets as "wrapping around". // // It turns out that this probe sequence visits every group exactly once if the // number of groups is a power of two, since (i^2+i)/2 is a bijection in // Z/(2^m). See https://en.wikipedia.org/wiki/Quadratic_probing template class probe_seq { public: // Creates a new probe sequence using `hash` as the initial value of the // sequence and `mask` (usually the capacity of the table) as the mask to // apply to each value in the progression. probe_seq(size_t hash, size_t mask) { assert(((mask + 1) & mask) == 0 && "not a mask"); mask_ = mask; offset_ = hash & mask_; } // The offset within the table, i.e., the value `p(i)` above. size_t offset() const { return offset_; } size_t offset(size_t i) const { return (offset_ + i) & mask_; } void next() { index_ += Width; offset_ += index_; offset_ &= mask_; } // 0-based probe index, a multiple of `Width`. size_t index() const { return index_; } private: size_t mask_; size_t offset_; size_t index_ = 0; }; template struct RequireUsableKey { template std::pair< decltype(std::declval()(std::declval())), decltype(std::declval()(std::declval(), std::declval()))>* operator()(const PassedKey&, const Args&...) const; }; template struct IsDecomposable : std::false_type {}; template struct IsDecomposable< absl::void_t(), std::declval()...))>, Policy, Hash, Eq, Ts...> : std::true_type {}; // TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it. template constexpr bool IsNoThrowSwappable(std::true_type = {} /* is_swappable */) { using std::swap; return noexcept(swap(std::declval(), std::declval())); } template constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) { return false; } template uint32_t TrailingZeros(T x) { ABSL_ASSUME(x != 0); return static_cast(countr_zero(x)); } // An abstract bitmask, such as that emitted by a SIMD instruction. // // Specifically, this type implements a simple bitset whose representation is // controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number // of abstract bits in the bitset, while `Shift` is the log-base-two of the // width of an abstract bit in the representation. // This mask provides operations for any number of real bits set in an abstract // bit. To add iteration on top of that, implementation must guarantee no more // than one real bit is set in an abstract bit. template class NonIterableBitMask { public: explicit NonIterableBitMask(T mask) : mask_(mask) {} explicit operator bool() const { return this->mask_ != 0; } // Returns the index of the lowest *abstract* bit set in `self`. uint32_t LowestBitSet() const { return container_internal::TrailingZeros(mask_) >> Shift; } // Returns the index of the highest *abstract* bit set in `self`. uint32_t HighestBitSet() const { return static_cast((bit_width(mask_) - 1) >> Shift); } // Return the number of trailing zero *abstract* bits. uint32_t TrailingZeros() const { return container_internal::TrailingZeros(mask_) >> Shift; } // Return the number of leading zero *abstract* bits. uint32_t LeadingZeros() const { constexpr int total_significant_bits = SignificantBits << Shift; constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits; return static_cast(countl_zero(mask_ << extra_bits)) >> Shift; } T mask_; }; // Mask that can be iterable // // For example, when `SignificantBits` is 16 and `Shift` is zero, this is just // an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When // `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as // the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask. // // For example: // for (int i : BitMask(0b101)) -> yields 0, 2 // for (int i : BitMask(0x0000000080800000)) -> yields 2, 3 template class BitMask : public NonIterableBitMask { using Base = NonIterableBitMask; static_assert(std::is_unsigned::value, ""); static_assert(Shift == 0 || Shift == 3, ""); public: explicit BitMask(T mask) : Base(mask) {} // BitMask is an iterator over the indices of its abstract bits. using value_type = int; using iterator = BitMask; using const_iterator = BitMask; BitMask& operator++() { this->mask_ &= (this->mask_ - 1); return *this; } uint32_t operator*() const { return Base::LowestBitSet(); } BitMask begin() const { return *this; } BitMask end() const { return BitMask(0); } private: friend bool operator==(const BitMask& a, const BitMask& b) { return a.mask_ == b.mask_; } friend bool operator!=(const BitMask& a, const BitMask& b) { return a.mask_ != b.mask_; } }; using h2_t = uint8_t; // The values here are selected for maximum performance. See the static asserts // below for details. // A `ctrl_t` is a single control byte, which can have one of four // states: empty, deleted, full (which has an associated seven-bit h2_t value) // and the sentinel. They have the following bit patterns: // // empty: 1 0 0 0 0 0 0 0 // deleted: 1 1 1 1 1 1 1 0 // full: 0 h h h h h h h // h represents the hash bits. // sentinel: 1 1 1 1 1 1 1 1 // // These values are specifically tuned for SSE-flavored SIMD. // The static_asserts below detail the source of these choices. // // We use an enum class so that when strict aliasing is enabled, the compiler // knows ctrl_t doesn't alias other types. enum class ctrl_t : int8_t { kEmpty = -128, // 0b10000000 kDeleted = -2, // 0b11111110 kSentinel = -1, // 0b11111111 }; static_assert( (static_cast(ctrl_t::kEmpty) & static_cast(ctrl_t::kDeleted) & static_cast(ctrl_t::kSentinel) & 0x80) != 0, "Special markers need to have the MSB to make checking for them efficient"); static_assert( ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel, "ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than " "ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient"); static_assert( ctrl_t::kSentinel == static_cast(-1), "ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD " "registers (pcmpeqd xmm, xmm)"); static_assert(ctrl_t::kEmpty == static_cast(-128), "ctrl_t::kEmpty must be -128 to make the SIMD check for its " "existence efficient (psignb xmm, xmm)"); static_assert( (~static_cast(ctrl_t::kEmpty) & ~static_cast(ctrl_t::kDeleted) & static_cast(ctrl_t::kSentinel) & 0x7F) != 0, "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not " "shared by ctrl_t::kSentinel to make the scalar test for " "MaskEmptyOrDeleted() efficient"); static_assert(ctrl_t::kDeleted == static_cast(-2), "ctrl_t::kDeleted must be -2 to make the implementation of " "ConvertSpecialToEmptyAndFullToDeleted efficient"); ABSL_DLL extern const ctrl_t kEmptyGroup[16]; // Returns a pointer to a control byte group that can be used by empty tables. inline ctrl_t* EmptyGroup() { // Const must be cast away here; no uses of this function will actually write // to it, because it is only used for empty tables. return const_cast(kEmptyGroup); } // Mixes a randomly generated per-process seed with `hash` and `ctrl` to // randomize insertion order within groups. bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl); // Returns a per-table, hash salt, which changes on resize. This gets mixed into // H1 to randomize iteration order per-table. // // The seed consists of the ctrl_ pointer, which adds enough entropy to ensure // non-determinism of iteration order in most cases. inline size_t PerTableSalt(const ctrl_t* ctrl) { // The low bits of the pointer have little or no entropy because of // alignment. We shift the pointer to try to use higher entropy bits. A // good number seems to be 12 bits, because that aligns with page size. return reinterpret_cast(ctrl) >> 12; } // Extracts the H1 portion of a hash: 57 bits mixed with a per-table salt. inline size_t H1(size_t hash, const ctrl_t* ctrl) { return (hash >> 7) ^ PerTableSalt(ctrl); } // Extracts the H2 portion of a hash: the 7 bits not used for H1. // // These are used as an occupied control byte. inline h2_t H2(size_t hash) { return hash & 0x7F; } // Helpers for checking the state of a control byte. inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; } inline bool IsFull(ctrl_t c) { return c >= static_cast(0); } inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; } inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; } #ifdef ABSL_INTERNAL_HAVE_SSE2 // Quick reference guide for intrinsics used below: // // * __m128i: An XMM (128-bit) word. // // * _mm_setzero_si128: Returns a zero vector. // * _mm_set1_epi8: Returns a vector with the same i8 in each lane. // // * _mm_subs_epi8: Saturating-subtracts two i8 vectors. // * _mm_and_si128: Ands two i128s together. // * _mm_or_si128: Ors two i128s together. // * _mm_andnot_si128: And-nots two i128s together. // // * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality, // filling each lane with 0x00 or 0xff. // * _mm_cmpgt_epi8: Same as above, but using > rather than ==. // // * _mm_loadu_si128: Performs an unaligned load of an i128. // * _mm_storeu_si128: Performs an unaligned store of an i128. // // * _mm_sign_epi8: Retains, negates, or zeroes each i8 lane of the first // argument if the corresponding lane of the second // argument is positive, negative, or zero, respectively. // * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a // bitmask consisting of those bits. // * _mm_shuffle_epi8: Selects i8s from the first argument, using the low // four bits of each i8 lane in the second argument as // indices. // https://github.com/abseil/abseil-cpp/issues/209 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853 // _mm_cmpgt_epi8 is broken under GCC with -funsigned-char // Work around this by using the portable implementation of Group // when using -funsigned-char under GCC. inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) { #if defined(__GNUC__) && !defined(__clang__) if (std::is_unsigned::value) { const __m128i mask = _mm_set1_epi8(0x80); const __m128i diff = _mm_subs_epi8(b, a); return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask); } #endif return _mm_cmpgt_epi8(a, b); } struct GroupSse2Impl { static constexpr size_t kWidth = 16; // the number of slots per group explicit GroupSse2Impl(const ctrl_t* pos) { ctrl = _mm_loadu_si128(reinterpret_cast(pos)); } // Returns a bitmask representing the positions of slots that match hash. BitMask Match(h2_t hash) const { auto match = _mm_set1_epi8(static_cast(hash)); return BitMask( static_cast(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)))); } // Returns a bitmask representing the positions of empty slots. NonIterableBitMask MaskEmpty() const { #ifdef ABSL_INTERNAL_HAVE_SSSE3 // This only works because ctrl_t::kEmpty is -128. return NonIterableBitMask( static_cast(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)))); #else auto match = _mm_set1_epi8(static_cast(ctrl_t::kEmpty)); return NonIterableBitMask( static_cast(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)))); #endif } // Returns a bitmask representing the positions of empty or deleted slots. NonIterableBitMask MaskEmptyOrDeleted() const { auto special = _mm_set1_epi8(static_cast(ctrl_t::kSentinel)); return NonIterableBitMask(static_cast( _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)))); } // Returns the number of trailing empty or deleted elements in the group. uint32_t CountLeadingEmptyOrDeleted() const { auto special = _mm_set1_epi8(static_cast(ctrl_t::kSentinel)); return TrailingZeros(static_cast( _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1)); } void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { auto msbs = _mm_set1_epi8(static_cast(-128)); auto x126 = _mm_set1_epi8(126); #ifdef ABSL_INTERNAL_HAVE_SSSE3 auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs); #else auto zero = _mm_setzero_si128(); auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl); auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126)); #endif _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res); } __m128i ctrl; }; #endif // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 #if defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN) struct GroupAArch64Impl { static constexpr size_t kWidth = 8; explicit GroupAArch64Impl(const ctrl_t* pos) { ctrl = vld1_u8(reinterpret_cast(pos)); } BitMask Match(h2_t hash) const { uint8x8_t dup = vdup_n_u8(hash); auto mask = vceq_u8(ctrl, dup); constexpr uint64_t msbs = 0x8080808080808080ULL; return BitMask( vget_lane_u64(vreinterpret_u64_u8(mask), 0) & msbs); } NonIterableBitMask MaskEmpty() const { uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(vceq_s8( vdup_n_s8(static_cast(ctrl_t::kEmpty)), vreinterpret_s8_u8(ctrl))), 0); return NonIterableBitMask(mask); } NonIterableBitMask MaskEmptyOrDeleted() const { uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(vcgt_s8( vdup_n_s8(static_cast(ctrl_t::kSentinel)), vreinterpret_s8_u8(ctrl))), 0); return NonIterableBitMask(mask); } uint32_t CountLeadingEmptyOrDeleted() const { uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0); // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and // kDeleted. We lower all other bits and count number of trailing zeros. // Clang and GCC optimize countr_zero to rbit+clz without any check for 0, // so we should be fine. constexpr uint64_t bits = 0x0101010101010101ULL; return static_cast(countr_zero((mask | ~(mask >> 7)) & bits) >> 3); } void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0); constexpr uint64_t msbs = 0x8080808080808080ULL; constexpr uint64_t lsbs = 0x0101010101010101ULL; auto x = mask & msbs; auto res = (~x + (x >> 7)) & ~lsbs; little_endian::Store64(dst, res); } uint8x8_t ctrl; }; #endif // ABSL_INTERNAL_HAVE_ARM_NEON && ABSL_IS_LITTLE_ENDIAN struct GroupPortableImpl { static constexpr size_t kWidth = 8; explicit GroupPortableImpl(const ctrl_t* pos) : ctrl(little_endian::Load64(pos)) {} BitMask Match(h2_t hash) const { // For the technique, see: // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord // (Determine if a word has a byte equal to n). // // Caveat: there are false positives but: // - they only occur if there is a real match // - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel // - they will be handled gracefully by subsequent checks in code // // Example: // v = 0x1716151413121110 // hash = 0x12 // retval = (v - lsbs) & ~v & msbs = 0x0000000080800000 constexpr uint64_t msbs = 0x8080808080808080ULL; constexpr uint64_t lsbs = 0x0101010101010101ULL; auto x = ctrl ^ (lsbs * hash); return BitMask((x - lsbs) & ~x & msbs); } NonIterableBitMask MaskEmpty() const { constexpr uint64_t msbs = 0x8080808080808080ULL; return NonIterableBitMask((ctrl & (~ctrl << 6)) & msbs); } NonIterableBitMask MaskEmptyOrDeleted() const { constexpr uint64_t msbs = 0x8080808080808080ULL; return NonIterableBitMask((ctrl & (~ctrl << 7)) & msbs); } uint32_t CountLeadingEmptyOrDeleted() const { // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and // kDeleted. We lower all other bits and count number of trailing zeros. constexpr uint64_t bits = 0x0101010101010101ULL; return static_cast(countr_zero((ctrl | ~(ctrl >> 7)) & bits) >> 3); } void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { constexpr uint64_t msbs = 0x8080808080808080ULL; constexpr uint64_t lsbs = 0x0101010101010101ULL; auto x = ctrl & msbs; auto res = (~x + (x >> 7)) & ~lsbs; little_endian::Store64(dst, res); } uint64_t ctrl; }; #ifdef ABSL_INTERNAL_HAVE_SSE2 using Group = GroupSse2Impl; #elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN) using Group = GroupAArch64Impl; #else using Group = GroupPortableImpl; #endif // Returns he number of "cloned control bytes". // // This is the number of control bytes that are present both at the beginning // of the control byte array and at the end, such that we can create a // `Group::kWidth`-width probe window starting from any control byte. constexpr size_t NumClonedBytes() { return Group::kWidth - 1; } template class raw_hash_set; // Returns whether `n` is a valid capacity (i.e., number of slots). // // A valid capacity is a non-zero integer `2^m - 1`. inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; } // Applies the following mapping to every byte in the control array: // * kDeleted -> kEmpty // * kEmpty -> kEmpty // * _ -> kDeleted // PRECONDITION: // IsValidCapacity(capacity) // ctrl[capacity] == ctrl_t::kSentinel // ctrl[i] != ctrl_t::kSentinel for all i < capacity void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity); // Converts `n` into the next valid capacity, per `IsValidCapacity`. inline size_t NormalizeCapacity(size_t n) { return n ? ~size_t{} >> countl_zero(n) : 1; } // General notes on capacity/growth methods below: // - We use 7/8th as maximum load factor. For 16-wide groups, that gives an // average of two empty slots per group. // - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity. // - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we // never need to probe (the whole table fits in one group) so we don't need a // load factor less than 1. // Given `capacity`, applies the load factor; i.e., it returns the maximum // number of values we should put into the table before a resizing rehash. inline size_t CapacityToGrowth(size_t capacity) { assert(IsValidCapacity(capacity)); // `capacity*7/8` if (Group::kWidth == 8 && capacity == 7) { // x-x/8 does not work when x==7. return 6; } return capacity - capacity / 8; } // Given `growth`, "unapplies" the load factor to find how large the capacity // should be to stay within the load factor. // // This might not be a valid capacity and `NormalizeCapacity()` should be // called on this. inline size_t GrowthToLowerboundCapacity(size_t growth) { // `growth*8/7` if (Group::kWidth == 8 && growth == 7) { // x+(x-1)/7 does not work when x==7. return 8; } return growth + static_cast((static_cast(growth) - 1) / 7); } template size_t SelectBucketCountForIterRange(InputIter first, InputIter last, size_t bucket_count) { if (bucket_count != 0) { return bucket_count; } using InputIterCategory = typename std::iterator_traits::iterator_category; if (std::is_base_of::value) { return GrowthToLowerboundCapacity( static_cast(std::distance(first, last))); } return 0; } #define ABSL_INTERNAL_ASSERT_IS_FULL(ctrl, msg) \ ABSL_HARDENING_ASSERT((ctrl != nullptr && IsFull(*ctrl)) && msg) inline void AssertIsValid(ctrl_t* ctrl) { ABSL_HARDENING_ASSERT( (ctrl == nullptr || IsFull(*ctrl)) && "Invalid operation on iterator. The element might have " "been erased, the table might have rehashed, or this may " "be an end() iterator."); } struct FindInfo { size_t offset; size_t probe_length; }; // Whether a table is "small". A small table fits entirely into a probing // group, i.e., has a capacity < `Group::kWidth`. // // In small mode we are able to use the whole capacity. The extra control // bytes give us at least one "empty" control byte to stop the iteration. // This is important to make 1 a valid capacity. // // In small mode only the first `capacity` control bytes after the sentinel // are valid. The rest contain dummy ctrl_t::kEmpty values that do not // represent a real slot. This is important to take into account on // `find_first_non_full()`, where we never try // `ShouldInsertBackwards()` for small tables. inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; } // Begins a probing operation on `ctrl`, using `hash`. inline probe_seq probe(const ctrl_t* ctrl, size_t hash, size_t capacity) { return probe_seq(H1(hash, ctrl), capacity); } // Probes an array of control bits using a probe sequence derived from `hash`, // and returns the offset corresponding to the first deleted or empty slot. // // Behavior when the entire table is full is undefined. // // NOTE: this function must work with tables having both empty and deleted // slots in the same group. Such tables appear during `erase()`. template inline FindInfo find_first_non_full(const ctrl_t* ctrl, size_t hash, size_t capacity) { auto seq = probe(ctrl, hash, capacity); while (true) { Group g{ctrl + seq.offset()}; auto mask = g.MaskEmptyOrDeleted(); if (mask) { #if !defined(NDEBUG) // We want to add entropy even when ASLR is not enabled. // In debug build we will randomly insert in either the front or back of // the group. // TODO(kfm,sbenza): revisit after we do unconditional mixing if (!is_small(capacity) && ShouldInsertBackwards(hash, ctrl)) { return {seq.offset(mask.HighestBitSet()), seq.index()}; } #endif return {seq.offset(mask.LowestBitSet()), seq.index()}; } seq.next(); assert(seq.index() <= capacity && "full table!"); } } // Extern template for inline function keep possibility of inlining. // When compiler decided to not inline, no symbols will be added to the // corresponding translation unit. extern template FindInfo find_first_non_full(const ctrl_t*, size_t, size_t); // Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire // array as marked as empty. inline void ResetCtrl(size_t capacity, ctrl_t* ctrl, const void* slot, size_t slot_size) { std::memset(ctrl, static_cast(ctrl_t::kEmpty), capacity + 1 + NumClonedBytes()); ctrl[capacity] = ctrl_t::kSentinel; SanitizerPoisonMemoryRegion(slot, slot_size * capacity); } // Sets `ctrl[i]` to `h`. // // Unlike setting it directly, this function will perform bounds checks and // mirror the value to the cloned tail if necessary. inline void SetCtrl(size_t i, ctrl_t h, size_t capacity, ctrl_t* ctrl, const void* slot, size_t slot_size) { assert(i < capacity); auto* slot_i = static_cast(slot) + i * slot_size; if (IsFull(h)) { SanitizerUnpoisonMemoryRegion(slot_i, slot_size); } else { SanitizerPoisonMemoryRegion(slot_i, slot_size); } ctrl[i] = h; ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h; } // Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`. inline void SetCtrl(size_t i, h2_t h, size_t capacity, ctrl_t* ctrl, const void* slot, size_t slot_size) { SetCtrl(i, static_cast(h), capacity, ctrl, slot, slot_size); } // Given the capacity of a table, computes the offset (from the start of the // backing allocation) at which the slots begin. inline size_t SlotOffset(size_t capacity, size_t slot_align) { assert(IsValidCapacity(capacity)); const size_t num_control_bytes = capacity + 1 + NumClonedBytes(); return (num_control_bytes + slot_align - 1) & (~slot_align + 1); } // Given the capacity of a table, computes the total size of the backing // array. inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align) { return SlotOffset(capacity, slot_align) + capacity * slot_size; } // A SwissTable. // // Policy: a policy defines how to perform different operations on // the slots of the hashtable (see hash_policy_traits.h for the full interface // of policy). // // Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The // functor should accept a key and return size_t as hash. For best performance // it is important that the hash function provides high entropy across all bits // of the hash. // // Eq: a (possibly polymorphic) functor that compares two keys for equality. It // should accept two (of possibly different type) keys and return a bool: true // if they are equal, false if they are not. If two keys compare equal, then // their hash values as defined by Hash MUST be equal. // // Allocator: an Allocator // [https://en.cppreference.com/w/cpp/named_req/Allocator] with which // the storage of the hashtable will be allocated and the elements will be // constructed and destroyed. template class raw_hash_set { using PolicyTraits = hash_policy_traits; using KeyArgImpl = KeyArg::value && IsTransparent::value>; public: using init_type = typename PolicyTraits::init_type; using key_type = typename PolicyTraits::key_type; // TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user // code fixes! using slot_type = typename PolicyTraits::slot_type; using allocator_type = Alloc; using size_type = size_t; using difference_type = ptrdiff_t; using hasher = Hash; using key_equal = Eq; using policy_type = Policy; using value_type = typename PolicyTraits::value_type; using reference = value_type&; using const_reference = const value_type&; using pointer = typename absl::allocator_traits< allocator_type>::template rebind_traits::pointer; using const_pointer = typename absl::allocator_traits< allocator_type>::template rebind_traits::const_pointer; // Alias used for heterogeneous lookup functions. // `key_arg` evaluates to `K` when the functors are transparent and to // `key_type` otherwise. It permits template argument deduction on `K` for the // transparent case. template using key_arg = typename KeyArgImpl::template type; private: // Give an early error when key_type is not hashable/eq. auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k)); auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k)); using AllocTraits = absl::allocator_traits; using SlotAlloc = typename absl::allocator_traits< allocator_type>::template rebind_alloc; using SlotAllocTraits = typename absl::allocator_traits< allocator_type>::template rebind_traits; static_assert(std::is_lvalue_reference::value, "Policy::element() must return a reference"); template struct SameAsElementReference : std::is_same::type>::type, typename std::remove_cv< typename std::remove_reference::type>::type> {}; // An enabler for insert(T&&): T must be convertible to init_type or be the // same as [cv] value_type [ref]. // Note: we separate SameAsElementReference into its own type to avoid using // reference unless we need to. MSVC doesn't seem to like it in some // cases. template using RequiresInsertable = typename std::enable_if< absl::disjunction, SameAsElementReference>::value, int>::type; // RequiresNotInit is a workaround for gcc prior to 7.1. // See https://godbolt.org/g/Y4xsUh. template using RequiresNotInit = typename std::enable_if::value, int>::type; template using IsDecomposable = IsDecomposable; public: static_assert(std::is_same::value, "Allocators with custom pointer types are not supported"); static_assert(std::is_same::value, "Allocators with custom pointer types are not supported"); class iterator { friend class raw_hash_set; public: using iterator_category = std::forward_iterator_tag; using value_type = typename raw_hash_set::value_type; using reference = absl::conditional_t; using pointer = absl::remove_reference_t*; using difference_type = typename raw_hash_set::difference_type; iterator() {} // PRECONDITION: not an end() iterator. reference operator*() const { ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, "operator*() called on invalid iterator."); return PolicyTraits::element(slot_); } // PRECONDITION: not an end() iterator. pointer operator->() const { ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, "operator-> called on invalid iterator."); return &operator*(); } // PRECONDITION: not an end() iterator. iterator& operator++() { ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, "operator++ called on invalid iterator."); ++ctrl_; ++slot_; skip_empty_or_deleted(); return *this; } // PRECONDITION: not an end() iterator. iterator operator++(int) { auto tmp = *this; ++*this; return tmp; } friend bool operator==(const iterator& a, const iterator& b) { AssertIsValid(a.ctrl_); AssertIsValid(b.ctrl_); return a.ctrl_ == b.ctrl_; } friend bool operator!=(const iterator& a, const iterator& b) { return !(a == b); } private: iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) { // This assumption helps the compiler know that any non-end iterator is // not equal to any end iterator. ABSL_ASSUME(ctrl != nullptr); } // Fixes up `ctrl_` to point to a full by advancing it and `slot_` until // they reach one. // // If a sentinel is reached, we null both of them out instead. void skip_empty_or_deleted() { while (IsEmptyOrDeleted(*ctrl_)) { uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted(); ctrl_ += shift; slot_ += shift; } if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr; } ctrl_t* ctrl_ = nullptr; // To avoid uninitialized member warnings, put slot_ in an anonymous union. // The member is not initialized on singleton and end iterators. union { slot_type* slot_; }; }; class const_iterator { friend class raw_hash_set; public: using iterator_category = typename iterator::iterator_category; using value_type = typename raw_hash_set::value_type; using reference = typename raw_hash_set::const_reference; using pointer = typename raw_hash_set::const_pointer; using difference_type = typename raw_hash_set::difference_type; const_iterator() {} // Implicit construction from iterator. const_iterator(iterator i) : inner_(std::move(i)) {} reference operator*() const { return *inner_; } pointer operator->() const { return inner_.operator->(); } const_iterator& operator++() { ++inner_; return *this; } const_iterator operator++(int) { return inner_++; } friend bool operator==(const const_iterator& a, const const_iterator& b) { return a.inner_ == b.inner_; } friend bool operator!=(const const_iterator& a, const const_iterator& b) { return !(a == b); } private: const_iterator(const ctrl_t* ctrl, const slot_type* slot) : inner_(const_cast(ctrl), const_cast(slot)) {} iterator inner_; }; using node_type = node_handle, Alloc>; using insert_return_type = InsertReturnType; raw_hash_set() noexcept( std::is_nothrow_default_constructible::value&& std::is_nothrow_default_constructible::value&& std::is_nothrow_default_constructible::value) {} explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(), const key_equal& eq = key_equal(), const allocator_type& alloc = allocator_type()) : ctrl_(EmptyGroup()), settings_(0u, HashtablezInfoHandle(), hash, eq, alloc) { if (bucket_count) { capacity_ = NormalizeCapacity(bucket_count); initialize_slots(); } } raw_hash_set(size_t bucket_count, const hasher& hash, const allocator_type& alloc) : raw_hash_set(bucket_count, hash, key_equal(), alloc) {} raw_hash_set(size_t bucket_count, const allocator_type& alloc) : raw_hash_set(bucket_count, hasher(), key_equal(), alloc) {} explicit raw_hash_set(const allocator_type& alloc) : raw_hash_set(0, hasher(), key_equal(), alloc) {} template raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0, const hasher& hash = hasher(), const key_equal& eq = key_equal(), const allocator_type& alloc = allocator_type()) : raw_hash_set(SelectBucketCountForIterRange(first, last, bucket_count), hash, eq, alloc) { insert(first, last); } template raw_hash_set(InputIter first, InputIter last, size_t bucket_count, const hasher& hash, const allocator_type& alloc) : raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) {} template raw_hash_set(InputIter first, InputIter last, size_t bucket_count, const allocator_type& alloc) : raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) {} template raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc) : raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) {} // Instead of accepting std::initializer_list as the first // argument like std::unordered_set does, we have two overloads // that accept std::initializer_list and std::initializer_list. // This is advantageous for performance. // // // Turns {"abc", "def"} into std::initializer_list, then // // copies the strings into the set. // std::unordered_set s = {"abc", "def"}; // // // Turns {"abc", "def"} into std::initializer_list, then // // copies the strings into the set. // absl::flat_hash_set s = {"abc", "def"}; // // The same trick is used in insert(). // // The enabler is necessary to prevent this constructor from triggering where // the copy constructor is meant to be called. // // absl::flat_hash_set a, b{a}; // // RequiresNotInit is a workaround for gcc prior to 7.1. template = 0, RequiresInsertable = 0> raw_hash_set(std::initializer_list init, size_t bucket_count = 0, const hasher& hash = hasher(), const key_equal& eq = key_equal(), const allocator_type& alloc = allocator_type()) : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {} raw_hash_set(std::initializer_list init, size_t bucket_count = 0, const hasher& hash = hasher(), const key_equal& eq = key_equal(), const allocator_type& alloc = allocator_type()) : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {} template = 0, RequiresInsertable = 0> raw_hash_set(std::initializer_list init, size_t bucket_count, const hasher& hash, const allocator_type& alloc) : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {} raw_hash_set(std::initializer_list init, size_t bucket_count, const hasher& hash, const allocator_type& alloc) : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {} template = 0, RequiresInsertable = 0> raw_hash_set(std::initializer_list init, size_t bucket_count, const allocator_type& alloc) : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {} raw_hash_set(std::initializer_list init, size_t bucket_count, const allocator_type& alloc) : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {} template = 0, RequiresInsertable = 0> raw_hash_set(std::initializer_list init, const allocator_type& alloc) : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {} raw_hash_set(std::initializer_list init, const allocator_type& alloc) : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {} raw_hash_set(const raw_hash_set& that) : raw_hash_set(that, AllocTraits::select_on_container_copy_construction( that.alloc_ref())) {} raw_hash_set(const raw_hash_set& that, const allocator_type& a) : raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) { reserve(that.size()); // Because the table is guaranteed to be empty, we can do something faster // than a full `insert`. for (const auto& v : that) { const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v); auto target = find_first_non_full(ctrl_, hash, capacity_); SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type)); emplace_at(target.offset, v); infoz().RecordInsert(hash, target.probe_length); } size_ = that.size(); growth_left() -= that.size(); } raw_hash_set(raw_hash_set&& that) noexcept( std::is_nothrow_copy_constructible::value&& std::is_nothrow_copy_constructible::value&& std::is_nothrow_copy_constructible::value) : ctrl_(absl::exchange(that.ctrl_, EmptyGroup())), slots_(absl::exchange(that.slots_, nullptr)), size_(absl::exchange(that.size_, size_t{0})), capacity_(absl::exchange(that.capacity_, size_t{0})), // Hash, equality and allocator are copied instead of moved because // `that` must be left valid. If Hash is std::function, moving it // would create a nullptr functor that cannot be called. settings_(absl::exchange(that.growth_left(), size_t{0}), absl::exchange(that.infoz(), HashtablezInfoHandle()), that.hash_ref(), that.eq_ref(), that.alloc_ref()) {} raw_hash_set(raw_hash_set&& that, const allocator_type& a) : ctrl_(EmptyGroup()), slots_(nullptr), size_(0), capacity_(0), settings_(0, HashtablezInfoHandle(), that.hash_ref(), that.eq_ref(), a) { if (a == that.alloc_ref()) { std::swap(ctrl_, that.ctrl_); std::swap(slots_, that.slots_); std::swap(size_, that.size_); std::swap(capacity_, that.capacity_); std::swap(growth_left(), that.growth_left()); std::swap(infoz(), that.infoz()); } else { reserve(that.size()); // Note: this will copy elements of dense_set and unordered_set instead of // moving them. This can be fixed if it ever becomes an issue. for (auto& elem : that) insert(std::move(elem)); } } raw_hash_set& operator=(const raw_hash_set& that) { raw_hash_set tmp(that, AllocTraits::propagate_on_container_copy_assignment::value ? that.alloc_ref() : alloc_ref()); swap(tmp); return *this; } raw_hash_set& operator=(raw_hash_set&& that) noexcept( absl::allocator_traits::is_always_equal::value&& std::is_nothrow_move_assignable::value&& std::is_nothrow_move_assignable::value) { // TODO(sbenza): We should only use the operations from the noexcept clause // to make sure we actually adhere to that contract. return move_assign( std::move(that), typename AllocTraits::propagate_on_container_move_assignment()); } ~raw_hash_set() { destroy_slots(); } iterator begin() { auto it = iterator_at(0); it.skip_empty_or_deleted(); return it; } iterator end() { return {}; } const_iterator begin() const { return const_cast(this)->begin(); } const_iterator end() const { return {}; } const_iterator cbegin() const { return begin(); } const_iterator cend() const { return end(); } bool empty() const { return !size(); } size_t size() const { return size_; } size_t capacity() const { return capacity_; } size_t max_size() const { return (std::numeric_limits::max)(); } ABSL_ATTRIBUTE_REINITIALIZES void clear() { // Iterating over this container is O(bucket_count()). When bucket_count() // is much greater than size(), iteration becomes prohibitively expensive. // For clear() it is more important to reuse the allocated array when the // container is small because allocation takes comparatively long time // compared to destruction of the elements of the container. So we pick the // largest bucket_count() threshold for which iteration is still fast and // past that we simply deallocate the array. if (capacity_ > 127) { destroy_slots(); infoz().RecordClearedReservation(); } else if (capacity_) { for (size_t i = 0; i != capacity_; ++i) { if (IsFull(ctrl_[i])) { PolicyTraits::destroy(&alloc_ref(), slots_ + i); } } size_ = 0; ResetCtrl(capacity_, ctrl_, slots_, sizeof(slot_type)); reset_growth_left(); } assert(empty()); infoz().RecordStorageChanged(0, capacity_); } // This overload kicks in when the argument is an rvalue of insertable and // decomposable type other than init_type. // // flat_hash_map m; // m.insert(std::make_pair("abc", 42)); // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc // bug. template = 0, class T2 = T, typename std::enable_if::value, int>::type = 0, T* = nullptr> std::pair insert(T&& value) { return emplace(std::forward(value)); } // This overload kicks in when the argument is a bitfield or an lvalue of // insertable and decomposable type. // // union { int n : 1; }; // flat_hash_set s; // s.insert(n); // // flat_hash_set s; // const char* p = "hello"; // s.insert(p); // // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace // RequiresInsertable with RequiresInsertable. // We are hitting this bug: https://godbolt.org/g/1Vht4f. template < class T, RequiresInsertable = 0, typename std::enable_if::value, int>::type = 0> std::pair insert(const T& value) { return emplace(value); } // This overload kicks in when the argument is an rvalue of init_type. Its // purpose is to handle brace-init-list arguments. // // flat_hash_map s; // s.insert({"abc", 42}); std::pair insert(init_type&& value) { return emplace(std::move(value)); } // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc // bug. template = 0, class T2 = T, typename std::enable_if::value, int>::type = 0, T* = nullptr> iterator insert(const_iterator, T&& value) { return insert(std::forward(value)).first; } // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace // RequiresInsertable with RequiresInsertable. // We are hitting this bug: https://godbolt.org/g/1Vht4f. template < class T, RequiresInsertable = 0, typename std::enable_if::value, int>::type = 0> iterator insert(const_iterator, const T& value) { return insert(value).first; } iterator insert(const_iterator, init_type&& value) { return insert(std::move(value)).first; } template void insert(InputIt first, InputIt last) { for (; first != last; ++first) emplace(*first); } template = 0, RequiresInsertable = 0> void insert(std::initializer_list ilist) { insert(ilist.begin(), ilist.end()); } void insert(std::initializer_list ilist) { insert(ilist.begin(), ilist.end()); } insert_return_type insert(node_type&& node) { if (!node) return {end(), false, node_type()}; const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node)); auto res = PolicyTraits::apply( InsertSlot{*this, std::move(*CommonAccess::GetSlot(node))}, elem); if (res.second) { CommonAccess::Reset(&node); return {res.first, true, node_type()}; } else { return {res.first, false, std::move(node)}; } } iterator insert(const_iterator, node_type&& node) { auto res = insert(std::move(node)); node = std::move(res.node); return res.position; } // This overload kicks in if we can deduce the key from args. This enables us // to avoid constructing value_type if an entry with the same key already // exists. // // For example: // // flat_hash_map m = {{"abc", "def"}}; // // Creates no std::string copies and makes no heap allocations. // m.emplace("abc", "xyz"); template ::value, int>::type = 0> std::pair emplace(Args&&... args) { return PolicyTraits::apply(EmplaceDecomposable{*this}, std::forward(args)...); } // This overload kicks in if we cannot deduce the key from args. It constructs // value_type unconditionally and then either moves it into the table or // destroys. template ::value, int>::type = 0> std::pair emplace(Args&&... args) { alignas(slot_type) unsigned char raw[sizeof(slot_type)]; slot_type* slot = reinterpret_cast(&raw); PolicyTraits::construct(&alloc_ref(), slot, std::forward(args)...); const auto& elem = PolicyTraits::element(slot); return PolicyTraits::apply(InsertSlot{*this, std::move(*slot)}, elem); } template iterator emplace_hint(const_iterator, Args&&... args) { return emplace(std::forward(args)...).first; } // Extension API: support for lazy emplace. // // Looks up key in the table. If found, returns the iterator to the element. // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`. // // `f` must abide by several restrictions: // - it MUST call `raw_hash_set::constructor` with arguments as if a // `raw_hash_set::value_type` is constructed, // - it MUST NOT access the container before the call to // `raw_hash_set::constructor`, and // - it MUST NOT erase the lazily emplaced element. // Doing any of these is undefined behavior. // // For example: // // std::unordered_set s; // // Makes ArenaStr even if "abc" is in the map. // s.insert(ArenaString(&arena, "abc")); // // flat_hash_set s; // // Makes ArenaStr only if "abc" is not in the map. // s.lazy_emplace("abc", [&](const constructor& ctor) { // ctor(&arena, "abc"); // }); // // WARNING: This API is currently experimental. If there is a way to implement // the same thing with the rest of the API, prefer that. class constructor { friend class raw_hash_set; public: template void operator()(Args&&... args) const { assert(*slot_); PolicyTraits::construct(alloc_, *slot_, std::forward(args)...); *slot_ = nullptr; } private: constructor(allocator_type* a, slot_type** slot) : alloc_(a), slot_(slot) {} allocator_type* alloc_; slot_type** slot_; }; template iterator lazy_emplace(const key_arg& key, F&& f) { auto res = find_or_prepare_insert(key); if (res.second) { slot_type* slot = slots_ + res.first; std::forward(f)(constructor(&alloc_ref(), &slot)); assert(!slot); } return iterator_at(res.first); } // Extension API: support for heterogeneous keys. // // std::unordered_set s; // // Turns "abc" into std::string. // s.erase("abc"); // // flat_hash_set s; // // Uses "abc" directly without copying it into std::string. // s.erase("abc"); template size_type erase(const key_arg& key) { auto it = find(key); if (it == end()) return 0; erase(it); return 1; } // Erases the element pointed to by `it`. Unlike `std::unordered_set::erase`, // this method returns void to reduce algorithmic complexity to O(1). The // iterator is invalidated, so any increment should be done before calling // erase. In order to erase while iterating across a map, use the following // idiom (which also works for standard containers): // // for (auto it = m.begin(), end = m.end(); it != end;) { // // `erase()` will invalidate `it`, so advance `it` first. // auto copy_it = it++; // if () { // m.erase(copy_it); // } // } void erase(const_iterator cit) { erase(cit.inner_); } // This overload is necessary because otherwise erase(const K&) would be // a better match if non-const iterator is passed as an argument. void erase(iterator it) { ABSL_INTERNAL_ASSERT_IS_FULL(it.ctrl_, "erase() called on invalid iterator."); PolicyTraits::destroy(&alloc_ref(), it.slot_); erase_meta_only(it); } iterator erase(const_iterator first, const_iterator last) { while (first != last) { erase(first++); } return last.inner_; } // Moves elements from `src` into `this`. // If the element already exists in `this`, it is left unmodified in `src`. template void merge(raw_hash_set& src) { // NOLINT assert(this != &src); for (auto it = src.begin(), e = src.end(); it != e;) { auto next = std::next(it); if (PolicyTraits::apply(InsertSlot{*this, std::move(*it.slot_)}, PolicyTraits::element(it.slot_)) .second) { src.erase_meta_only(it); } it = next; } } template void merge(raw_hash_set&& src) { merge(src); } node_type extract(const_iterator position) { ABSL_INTERNAL_ASSERT_IS_FULL(position.inner_.ctrl_, "extract() called on invalid iterator."); auto node = CommonAccess::Transfer(alloc_ref(), position.inner_.slot_); erase_meta_only(position); return node; } template < class K = key_type, typename std::enable_if::value, int>::type = 0> node_type extract(const key_arg& key) { auto it = find(key); return it == end() ? node_type() : extract(const_iterator{it}); } void swap(raw_hash_set& that) noexcept( IsNoThrowSwappable() && IsNoThrowSwappable() && IsNoThrowSwappable( typename AllocTraits::propagate_on_container_swap{})) { using std::swap; swap(ctrl_, that.ctrl_); swap(slots_, that.slots_); swap(size_, that.size_); swap(capacity_, that.capacity_); swap(growth_left(), that.growth_left()); swap(hash_ref(), that.hash_ref()); swap(eq_ref(), that.eq_ref()); swap(infoz(), that.infoz()); SwapAlloc(alloc_ref(), that.alloc_ref(), typename AllocTraits::propagate_on_container_swap{}); } void rehash(size_t n) { if (n == 0 && capacity_ == 0) return; if (n == 0 && size_ == 0) { destroy_slots(); infoz().RecordStorageChanged(0, 0); infoz().RecordClearedReservation(); return; } // bitor is a faster way of doing `max` here. We will round up to the next // power-of-2-minus-1, so bitor is good enough. auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size())); // n == 0 unconditionally rehashes as per the standard. if (n == 0 || m > capacity_) { resize(m); // This is after resize, to ensure that we have completed the allocation // and have potentially sampled the hashtable. infoz().RecordReservation(n); } } void reserve(size_t n) { if (n > size() + growth_left()) { size_t m = GrowthToLowerboundCapacity(n); resize(NormalizeCapacity(m)); // This is after resize, to ensure that we have completed the allocation // and have potentially sampled the hashtable. infoz().RecordReservation(n); } } // Extension API: support for heterogeneous keys. // // std::unordered_set s; // // Turns "abc" into std::string. // s.count("abc"); // // ch_set s; // // Uses "abc" directly without copying it into std::string. // s.count("abc"); template size_t count(const key_arg& key) const { return find(key) == end() ? 0 : 1; } // Issues CPU prefetch instructions for the memory needed to find or insert // a key. Like all lookup functions, this support heterogeneous keys. // // NOTE: This is a very low level operation and should not be used without // specific benchmarks indicating its importance. template void prefetch(const key_arg& key) const { (void)key; // Avoid probing if we won't be able to prefetch the addresses received. #ifdef ABSL_INTERNAL_HAVE_PREFETCH prefetch_heap_block(); auto seq = probe(ctrl_, hash_ref()(key), capacity_); base_internal::PrefetchT0(ctrl_ + seq.offset()); base_internal::PrefetchT0(slots_ + seq.offset()); #endif // ABSL_INTERNAL_HAVE_PREFETCH } // The API of find() has two extensions. // // 1. The hash can be passed by the user. It must be equal to the hash of the // key. // // 2. The type of the key argument doesn't have to be key_type. This is so // called heterogeneous key support. template iterator find(const key_arg& key, size_t hash) { auto seq = probe(ctrl_, hash, capacity_); while (true) { Group g{ctrl_ + seq.offset()}; for (uint32_t i : g.Match(H2(hash))) { if (ABSL_PREDICT_TRUE(PolicyTraits::apply( EqualElement{key, eq_ref()}, PolicyTraits::element(slots_ + seq.offset(i))))) return iterator_at(seq.offset(i)); } if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return end(); seq.next(); assert(seq.index() <= capacity_ && "full table!"); } } template iterator find(const key_arg& key) { prefetch_heap_block(); return find(key, hash_ref()(key)); } template const_iterator find(const key_arg& key, size_t hash) const { return const_cast(this)->find(key, hash); } template const_iterator find(const key_arg& key) const { prefetch_heap_block(); return find(key, hash_ref()(key)); } template bool contains(const key_arg& key) const { return find(key) != end(); } template std::pair equal_range(const key_arg& key) { auto it = find(key); if (it != end()) return {it, std::next(it)}; return {it, it}; } template std::pair equal_range( const key_arg& key) const { auto it = find(key); if (it != end()) return {it, std::next(it)}; return {it, it}; } size_t bucket_count() const { return capacity_; } float load_factor() const { return capacity_ ? static_cast(size()) / capacity_ : 0.0; } float max_load_factor() const { return 1.0f; } void max_load_factor(float) { // Does nothing. } hasher hash_function() const { return hash_ref(); } key_equal key_eq() const { return eq_ref(); } allocator_type get_allocator() const { return alloc_ref(); } friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) { if (a.size() != b.size()) return false; const raw_hash_set* outer = &a; const raw_hash_set* inner = &b; if (outer->capacity() > inner->capacity()) std::swap(outer, inner); for (const value_type& elem : *outer) if (!inner->has_element(elem)) return false; return true; } friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) { return !(a == b); } template friend typename std::enable_if::value, H>::type AbslHashValue(H h, const raw_hash_set& s) { return H::combine(H::combine_unordered(std::move(h), s.begin(), s.end()), s.size()); } friend void swap(raw_hash_set& a, raw_hash_set& b) noexcept(noexcept(a.swap(b))) { a.swap(b); } private: template friend struct absl::container_internal::hashtable_debug_internal:: HashtableDebugAccess; struct FindElement { template const_iterator operator()(const K& key, Args&&...) const { return s.find(key); } const raw_hash_set& s; }; struct HashElement { template size_t operator()(const K& key, Args&&...) const { return h(key); } const hasher& h; }; template struct EqualElement { template bool operator()(const K2& lhs, Args&&...) const { return eq(lhs, rhs); } const K1& rhs; const key_equal& eq; }; struct EmplaceDecomposable { template std::pair operator()(const K& key, Args&&... args) const { auto res = s.find_or_prepare_insert(key); if (res.second) { s.emplace_at(res.first, std::forward(args)...); } return {s.iterator_at(res.first), res.second}; } raw_hash_set& s; }; template struct InsertSlot { template std::pair operator()(const K& key, Args&&...) && { auto res = s.find_or_prepare_insert(key); if (res.second) { PolicyTraits::transfer(&s.alloc_ref(), s.slots_ + res.first, &slot); } else if (do_destroy) { PolicyTraits::destroy(&s.alloc_ref(), &slot); } return {s.iterator_at(res.first), res.second}; } raw_hash_set& s; // Constructed slot. Either moved into place or destroyed. slot_type&& slot; }; // Erases, but does not destroy, the value pointed to by `it`. // // This merely updates the pertinent control byte. This can be used in // conjunction with Policy::transfer to move the object to another place. void erase_meta_only(const_iterator it) { assert(IsFull(*it.inner_.ctrl_) && "erasing a dangling iterator"); --size_; const size_t index = static_cast(it.inner_.ctrl_ - ctrl_); const size_t index_before = (index - Group::kWidth) & capacity_; const auto empty_after = Group(it.inner_.ctrl_).MaskEmpty(); const auto empty_before = Group(ctrl_ + index_before).MaskEmpty(); // We count how many consecutive non empties we have to the right and to the // left of `it`. If the sum is >= kWidth then there is at least one probe // window that might have seen a full group. bool was_never_full = empty_before && empty_after && static_cast(empty_after.TrailingZeros() + empty_before.LeadingZeros()) < Group::kWidth; SetCtrl(index, was_never_full ? ctrl_t::kEmpty : ctrl_t::kDeleted, capacity_, ctrl_, slots_, sizeof(slot_type)); growth_left() += was_never_full; infoz().RecordErase(); } // Allocates a backing array for `self` and initializes its control bytes. // This reads `capacity_` and updates all other fields based on the result of // the allocation. // // This does not free the currently held array; `capacity_` must be nonzero. void initialize_slots() { assert(capacity_); // Folks with custom allocators often make unwarranted assumptions about the // behavior of their classes vis-a-vis trivial destructability and what // calls they will or wont make. Avoid sampling for people with custom // allocators to get us out of this mess. This is not a hard guarantee but // a workaround while we plan the exact guarantee we want to provide. // // People are often sloppy with the exact type of their allocator (sometimes // it has an extra const or is missing the pair, but rebinds made it work // anyway). To avoid the ambiguity, we work off SlotAlloc which we have // bound more carefully. if (std::is_same>::value && slots_ == nullptr) { infoz() = Sample(sizeof(slot_type)); } char* mem = static_cast(Allocate( &alloc_ref(), AllocSize(capacity_, sizeof(slot_type), alignof(slot_type)))); ctrl_ = reinterpret_cast(mem); slots_ = reinterpret_cast( mem + SlotOffset(capacity_, alignof(slot_type))); ResetCtrl(capacity_, ctrl_, slots_, sizeof(slot_type)); reset_growth_left(); infoz().RecordStorageChanged(size_, capacity_); } // Destroys all slots in the backing array, frees the backing array, and // clears all top-level book-keeping data. // // This essentially implements `map = raw_hash_set();`. void destroy_slots() { if (!capacity_) return; for (size_t i = 0; i != capacity_; ++i) { if (IsFull(ctrl_[i])) { PolicyTraits::destroy(&alloc_ref(), slots_ + i); } } // Unpoison before returning the memory to the allocator. SanitizerUnpoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_); Deallocate( &alloc_ref(), ctrl_, AllocSize(capacity_, sizeof(slot_type), alignof(slot_type))); ctrl_ = EmptyGroup(); slots_ = nullptr; size_ = 0; capacity_ = 0; growth_left() = 0; } void resize(size_t new_capacity) { assert(IsValidCapacity(new_capacity)); auto* old_ctrl = ctrl_; auto* old_slots = slots_; const size_t old_capacity = capacity_; capacity_ = new_capacity; initialize_slots(); size_t total_probe_length = 0; for (size_t i = 0; i != old_capacity; ++i) { if (IsFull(old_ctrl[i])) { size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, PolicyTraits::element(old_slots + i)); auto target = find_first_non_full(ctrl_, hash, capacity_); size_t new_i = target.offset; total_probe_length += target.probe_length; SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type)); PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i); } } if (old_capacity) { SanitizerUnpoisonMemoryRegion(old_slots, sizeof(slot_type) * old_capacity); Deallocate( &alloc_ref(), old_ctrl, AllocSize(old_capacity, sizeof(slot_type), alignof(slot_type))); } infoz().RecordRehash(total_probe_length); } // Prunes control bytes to remove as many tombstones as possible. // // See the comment on `rehash_and_grow_if_necessary()`. void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE { assert(IsValidCapacity(capacity_)); assert(!is_small(capacity_)); // Algorithm: // - mark all DELETED slots as EMPTY // - mark all FULL slots as DELETED // - for each slot marked as DELETED // hash = Hash(element) // target = find_first_non_full(hash) // if target is in the same group // mark slot as FULL // else if target is EMPTY // transfer element to target // mark slot as EMPTY // mark target as FULL // else if target is DELETED // swap current element with target element // mark target as FULL // repeat procedure for current slot with moved from element (target) ConvertDeletedToEmptyAndFullToDeleted(ctrl_, capacity_); alignas(slot_type) unsigned char raw[sizeof(slot_type)]; size_t total_probe_length = 0; slot_type* slot = reinterpret_cast(&raw); for (size_t i = 0; i != capacity_; ++i) { if (!IsDeleted(ctrl_[i])) continue; const size_t hash = PolicyTraits::apply( HashElement{hash_ref()}, PolicyTraits::element(slots_ + i)); const FindInfo target = find_first_non_full(ctrl_, hash, capacity_); const size_t new_i = target.offset; total_probe_length += target.probe_length; // Verify if the old and new i fall within the same group wrt the hash. // If they do, we don't need to move the object as it falls already in the // best probe we can. const size_t probe_offset = probe(ctrl_, hash, capacity_).offset(); const auto probe_index = [probe_offset, this](size_t pos) { return ((pos - probe_offset) & capacity_) / Group::kWidth; }; // Element doesn't move. if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) { SetCtrl(i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type)); continue; } if (IsEmpty(ctrl_[new_i])) { // Transfer element to the empty spot. // SetCtrl poisons/unpoisons the slots so we have to call it at the // right time. SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type)); PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slots_ + i); SetCtrl(i, ctrl_t::kEmpty, capacity_, ctrl_, slots_, sizeof(slot_type)); } else { assert(IsDeleted(ctrl_[new_i])); SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type)); // Until we are done rehashing, DELETED marks previously FULL slots. // Swap i and new_i elements. PolicyTraits::transfer(&alloc_ref(), slot, slots_ + i); PolicyTraits::transfer(&alloc_ref(), slots_ + i, slots_ + new_i); PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slot); --i; // repeat } } reset_growth_left(); infoz().RecordRehash(total_probe_length); } // Called whenever the table *might* need to conditionally grow. // // This function is an optimization opportunity to perform a rehash even when // growth is unnecessary, because vacating tombstones is beneficial for // performance in the long-run. void rehash_and_grow_if_necessary() { if (capacity_ == 0) { resize(1); } else if (capacity_ > Group::kWidth && // Do these calcuations in 64-bit to avoid overflow. size() * uint64_t{32} <= capacity_ * uint64_t{25}) { // Squash DELETED without growing if there is enough capacity. // // Rehash in place if the current size is <= 25/32 of capacity_. // Rationale for such a high factor: 1) drop_deletes_without_resize() is // faster than resize, and 2) it takes quite a bit of work to add // tombstones. In the worst case, seems to take approximately 4 // insert/erase pairs to create a single tombstone and so if we are // rehashing because of tombstones, we can afford to rehash-in-place as // long as we are reclaiming at least 1/8 the capacity without doing more // than 2X the work. (Where "work" is defined to be size() for rehashing // or rehashing in place, and 1 for an insert or erase.) But rehashing in // place is faster per operation than inserting or even doubling the size // of the table, so we actually afford to reclaim even less space from a // resize-in-place. The decision is to rehash in place if we can reclaim // at about 1/8th of the usable capacity (specifically 3/28 of the // capacity) which means that the total cost of rehashing will be a small // fraction of the total work. // // Here is output of an experiment using the BM_CacheInSteadyState // benchmark running the old case (where we rehash-in-place only if we can // reclaim at least 7/16*capacity_) vs. this code (which rehashes in place // if we can recover 3/32*capacity_). // // Note that although in the worst-case number of rehashes jumped up from // 15 to 190, but the number of operations per second is almost the same. // // Abridged output of running BM_CacheInSteadyState benchmark from // raw_hash_set_benchmark. N is the number of insert/erase operations. // // | OLD (recover >= 7/16 | NEW (recover >= 3/32) // size | N/s LoadFactor NRehashes | N/s LoadFactor NRehashes // 448 | 145284 0.44 18 | 140118 0.44 19 // 493 | 152546 0.24 11 | 151417 0.48 28 // 538 | 151439 0.26 11 | 151152 0.53 38 // 583 | 151765 0.28 11 | 150572 0.57 50 // 628 | 150241 0.31 11 | 150853 0.61 66 // 672 | 149602 0.33 12 | 150110 0.66 90 // 717 | 149998 0.35 12 | 149531 0.70 129 // 762 | 149836 0.37 13 | 148559 0.74 190 // 807 | 149736 0.39 14 | 151107 0.39 14 // 852 | 150204 0.42 15 | 151019 0.42 15 drop_deletes_without_resize(); } else { // Otherwise grow the container. resize(capacity_ * 2 + 1); } } bool has_element(const value_type& elem) const { size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem); auto seq = probe(ctrl_, hash, capacity_); while (true) { Group g{ctrl_ + seq.offset()}; for (uint32_t i : g.Match(H2(hash))) { if (ABSL_PREDICT_TRUE(PolicyTraits::element(slots_ + seq.offset(i)) == elem)) return true; } if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return false; seq.next(); assert(seq.index() <= capacity_ && "full table!"); } return false; } // TODO(alkis): Optimize this assuming *this and that don't overlap. raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) { raw_hash_set tmp(std::move(that)); swap(tmp); return *this; } raw_hash_set& move_assign(raw_hash_set&& that, std::false_type) { raw_hash_set tmp(std::move(that), alloc_ref()); swap(tmp); return *this; } protected: // Attempts to find `key` in the table; if it isn't found, returns a slot that // the value can be inserted into, with the control byte already set to // `key`'s H2. template std::pair find_or_prepare_insert(const K& key) { prefetch_heap_block(); auto hash = hash_ref()(key); auto seq = probe(ctrl_, hash, capacity_); while (true) { Group g{ctrl_ + seq.offset()}; for (uint32_t i : g.Match(H2(hash))) { if (ABSL_PREDICT_TRUE(PolicyTraits::apply( EqualElement{key, eq_ref()}, PolicyTraits::element(slots_ + seq.offset(i))))) return {seq.offset(i), false}; } if (ABSL_PREDICT_TRUE(g.MaskEmpty())) break; seq.next(); assert(seq.index() <= capacity_ && "full table!"); } return {prepare_insert(hash), true}; } // Given the hash of a value not currently in the table, finds the next // viable slot index to insert it at. // // REQUIRES: At least one non-full slot available. size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE { auto target = find_first_non_full(ctrl_, hash, capacity_); if (ABSL_PREDICT_FALSE(growth_left() == 0 && !IsDeleted(ctrl_[target.offset]))) { rehash_and_grow_if_necessary(); target = find_first_non_full(ctrl_, hash, capacity_); } ++size_; growth_left() -= IsEmpty(ctrl_[target.offset]); SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type)); infoz().RecordInsert(hash, target.probe_length); return target.offset; } // Constructs the value in the space pointed by the iterator. This only works // after an unsuccessful find_or_prepare_insert() and before any other // modifications happen in the raw_hash_set. // // PRECONDITION: i is an index returned from find_or_prepare_insert(k), where // k is the key decomposed from `forward(args)...`, and the bool // returned by find_or_prepare_insert(k) was true. // POSTCONDITION: *m.iterator_at(i) == value_type(forward(args)...). template void emplace_at(size_t i, Args&&... args) { PolicyTraits::construct(&alloc_ref(), slots_ + i, std::forward(args)...); assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) == iterator_at(i) && "constructed value does not match the lookup key"); } iterator iterator_at(size_t i) { return {ctrl_ + i, slots_ + i}; } const_iterator iterator_at(size_t i) const { return {ctrl_ + i, slots_ + i}; } private: friend struct RawHashSetTestOnlyAccess; void reset_growth_left() { growth_left() = CapacityToGrowth(capacity()) - size_; } // The number of slots we can still fill without needing to rehash. // // This is stored separately due to tombstones: we do not include tombstones // in the growth capacity, because we'd like to rehash when the table is // otherwise filled with tombstones: otherwise, probe sequences might get // unacceptably long without triggering a rehash. Callers can also force a // rehash via the standard `rehash(0)`, which will recompute this value as a // side-effect. // // See `CapacityToGrowth()`. size_t& growth_left() { return settings_.template get<0>(); } // Prefetch the heap-allocated memory region to resolve potential TLB misses. // This is intended to overlap with execution of calculating the hash for a // key. void prefetch_heap_block() const { base_internal::PrefetchT2(ctrl_); } HashtablezInfoHandle& infoz() { return settings_.template get<1>(); } hasher& hash_ref() { return settings_.template get<2>(); } const hasher& hash_ref() const { return settings_.template get<2>(); } key_equal& eq_ref() { return settings_.template get<3>(); } const key_equal& eq_ref() const { return settings_.template get<3>(); } allocator_type& alloc_ref() { return settings_.template get<4>(); } const allocator_type& alloc_ref() const { return settings_.template get<4>(); } // TODO(alkis): Investigate removing some of these fields: // - ctrl/slots can be derived from each other // - size can be moved into the slot array // The control bytes (and, also, a pointer to the base of the backing array). // // This contains `capacity_ + 1 + NumClonedBytes()` entries, even // when the table is empty (hence EmptyGroup). ctrl_t* ctrl_ = EmptyGroup(); // The beginning of the slots, located at `SlotOffset()` bytes after // `ctrl_`. May be null for empty tables. slot_type* slots_ = nullptr; // The number of filled slots. size_t size_ = 0; // The total number of available slots. size_t capacity_ = 0; absl::container_internal::CompressedTuple settings_{0u, HashtablezInfoHandle{}, hasher{}, key_equal{}, allocator_type{}}; }; // Erases all elements that satisfy the predicate `pred` from the container `c`. template typename raw_hash_set::size_type EraseIf( Predicate& pred, raw_hash_set* c) { const auto initial_size = c->size(); for (auto it = c->begin(), last = c->end(); it != last;) { if (pred(*it)) { c->erase(it++); } else { ++it; } } return initial_size - c->size(); } namespace hashtable_debug_internal { template struct HashtableDebugAccess> { using Traits = typename Set::PolicyTraits; using Slot = typename Traits::slot_type; static size_t GetNumProbes(const Set& set, const typename Set::key_type& key) { size_t num_probes = 0; size_t hash = set.hash_ref()(key); auto seq = probe(set.ctrl_, hash, set.capacity_); while (true) { container_internal::Group g{set.ctrl_ + seq.offset()}; for (uint32_t i : g.Match(container_internal::H2(hash))) { if (Traits::apply( typename Set::template EqualElement{ key, set.eq_ref()}, Traits::element(set.slots_ + seq.offset(i)))) return num_probes; ++num_probes; } if (g.MaskEmpty()) return num_probes; seq.next(); ++num_probes; } } static size_t AllocatedByteSize(const Set& c) { size_t capacity = c.capacity_; if (capacity == 0) return 0; size_t m = AllocSize(capacity, sizeof(Slot), alignof(Slot)); size_t per_slot = Traits::space_used(static_cast(nullptr)); if (per_slot != ~size_t{}) { m += per_slot * c.size(); } else { for (size_t i = 0; i != capacity; ++i) { if (container_internal::IsFull(c.ctrl_[i])) { m += Traits::space_used(c.slots_ + i); } } } return m; } static size_t LowerBoundAllocatedByteSize(size_t size) { size_t capacity = GrowthToLowerboundCapacity(size); if (capacity == 0) return 0; size_t m = AllocSize(NormalizeCapacity(capacity), sizeof(Slot), alignof(Slot)); size_t per_slot = Traits::space_used(static_cast(nullptr)); if (per_slot != ~size_t{}) { m += per_slot * size; } return m; } }; } // namespace hashtable_debug_internal } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl #undef ABSL_INTERNAL_ASSERT_IS_FULL #endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_