diff options
author | Connal de Souza <connaldesouza@google.com> | 2023-08-30 12:22:55 -0700 |
---|---|---|
committer | Copybara-Service <copybara-worker@google.com> | 2023-08-30 12:23:48 -0700 |
commit | 37770938fc0fc9b25f458434031cf3d8f6c65e95 (patch) | |
tree | b9876595368f1f1c7e451920e5782073e98282af /absl/container/internal/raw_hash_set.h | |
parent | 99a3a6ae4af45fef96b5279ddaad6b9eb9e44462 (diff) |
Optimize Resize and Iteration on Arm
There is a few cycles of overhead when transfering between GPR and Neon registers. We pay this cost for GroupAarch64Impl, largely because the speedup we get in Match() makes it profitable. After a Match call, if we do subsequent Group operations, we don't have to pay the full GPR <-> Neon cost, so it makes sense to do them with Neon instructions as well.
However, in iteration and find_first_non_full(), we do not do a prior Match(), so the Mask/Count EmptyOrDeleted calls pay the full GPR <-> Neon cost. We can avoid this by using the GPR versions of the functions in the portable implementation of Group instead. We slightly change the order of operations in these functions (should be functionally a nop) in order to take advantage of Arm's free flexible second operand shifts with Logical operations.
Iteration and Resize are roughly 8% and 12.6% faster respectively.
This is not profitable on x86 because there is much lower GPR <-> xmm register latency and we use a 16-bit wide Group size.
PiperOrigin-RevId: 561415183
Change-Id: I660b5bb84afedb05a12dcdf04d5b2e1514902760
Diffstat (limited to 'absl/container/internal/raw_hash_set.h')
-rw-r--r-- | absl/container/internal/raw_hash_set.h | 20 |
1 files changed, 16 insertions, 4 deletions
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h index 4bbb85f4..9d789b48 100644 --- a/absl/container/internal/raw_hash_set.h +++ b/absl/container/internal/raw_hash_set.h @@ -740,13 +740,13 @@ struct GroupPortableImpl { NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const { constexpr uint64_t msbs = 0x8080808080808080ULL; - return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 6)) & + return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & ~(ctrl << 6)) & msbs); } NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const { constexpr uint64_t msbs = 0x8080808080808080ULL; - return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 7)) & + return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & ~(ctrl << 7)) & msbs); } @@ -771,10 +771,21 @@ struct GroupPortableImpl { #ifdef ABSL_INTERNAL_HAVE_SSE2 using Group = GroupSse2Impl; +using GroupEmptyOrDeleted = GroupSse2Impl; #elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN) using Group = GroupAArch64Impl; +// For Aarch64, we use the portable implementation for counting and masking +// empty or deleted group elements. This is to avoid the latency of moving +// between data GPRs and Neon registers when it does not provide a benefit. +// Using Neon is profitable when we call Match(), but is not when we don't, +// which is the case when we do *EmptyOrDeleted operations. It is difficult to +// make a similar approach beneficial on other architectures such as x86 since +// they have much lower GPR <-> vector register transfer latency and 16-wide +// Groups. +using GroupEmptyOrDeleted = GroupPortableImpl; #else using Group = GroupPortableImpl; +using GroupEmptyOrDeleted = GroupPortableImpl; #endif // When there is an insertion with no reserved growth, we rehash with @@ -1360,7 +1371,7 @@ inline FindInfo find_first_non_full(const CommonFields& common, size_t hash) { auto seq = probe(common, hash); const ctrl_t* ctrl = common.control(); while (true) { - Group g{ctrl + seq.offset()}; + GroupEmptyOrDeleted g{ctrl + seq.offset()}; auto mask = g.MaskEmptyOrDeleted(); if (mask) { #if !defined(NDEBUG) @@ -1699,7 +1710,8 @@ class raw_hash_set { // If a sentinel is reached, we null `ctrl_` out instead. void skip_empty_or_deleted() { while (IsEmptyOrDeleted(*ctrl_)) { - uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted(); + uint32_t shift = + GroupEmptyOrDeleted{ctrl_}.CountLeadingEmptyOrDeleted(); ctrl_ += shift; slot_ += shift; } |