summaryrefslogtreecommitdiff
path: root/absl/hash/internal/low_level_hash.cc
diff options
context:
space:
mode:
Diffstat (limited to 'absl/hash/internal/low_level_hash.cc')
-rw-r--r--absl/hash/internal/low_level_hash.cc88
1 files changed, 60 insertions, 28 deletions
diff --git a/absl/hash/internal/low_level_hash.cc b/absl/hash/internal/low_level_hash.cc
index b5db0b89..6dc71cf7 100644
--- a/absl/hash/internal/low_level_hash.cc
+++ b/absl/hash/internal/low_level_hash.cc
@@ -14,6 +14,9 @@
#include "absl/hash/internal/low_level_hash.h"
+#include <cstddef>
+#include <cstdint>
+
#include "absl/base/internal/unaligned_access.h"
#include "absl/base/prefetch.h"
#include "absl/numeric/int128.h"
@@ -28,19 +31,22 @@ static uint64_t Mix(uint64_t v0, uint64_t v1) {
return absl::Uint128Low64(p) ^ absl::Uint128High64(p);
}
-uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed,
- const uint64_t salt[5]) {
+uint64_t LowLevelHashLenGt16(const void* data, size_t len, uint64_t seed,
+ const uint64_t salt[5]) {
// Prefetch the cacheline that data resides in.
PrefetchToLocalCache(data);
const uint8_t* ptr = static_cast<const uint8_t*>(data);
uint64_t starting_length = static_cast<uint64_t>(len);
+ const uint8_t* last_16_ptr = ptr + starting_length - 16;
uint64_t current_state = seed ^ salt[0];
if (len > 64) {
// If we have more than 64 bytes, we're going to handle chunks of 64
// bytes at a time. We're going to build up two separate hash states
// which we will then hash together.
- uint64_t duplicated_state = current_state;
+ uint64_t duplicated_state0 = current_state;
+ uint64_t duplicated_state1 = current_state;
+ uint64_t duplicated_state2 = current_state;
do {
// Always prefetch the next cacheline.
@@ -55,40 +61,72 @@ uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed,
uint64_t g = absl::base_internal::UnalignedLoad64(ptr + 48);
uint64_t h = absl::base_internal::UnalignedLoad64(ptr + 56);
- uint64_t cs0 = Mix(a ^ salt[1], b ^ current_state);
- uint64_t cs1 = Mix(c ^ salt[2], d ^ current_state);
- current_state = (cs0 ^ cs1);
+ current_state = Mix(a ^ salt[1], b ^ current_state);
+ duplicated_state0 = Mix(c ^ salt[2], d ^ duplicated_state0);
- uint64_t ds0 = Mix(e ^ salt[3], f ^ duplicated_state);
- uint64_t ds1 = Mix(g ^ salt[4], h ^ duplicated_state);
- duplicated_state = (ds0 ^ ds1);
+ duplicated_state1 = Mix(e ^ salt[3], f ^ duplicated_state1);
+ duplicated_state2 = Mix(g ^ salt[4], h ^ duplicated_state2);
ptr += 64;
len -= 64;
} while (len > 64);
- current_state = current_state ^ duplicated_state;
+ current_state = (current_state ^ duplicated_state0) ^
+ (duplicated_state1 + duplicated_state2);
}
// We now have a data `ptr` with at most 64 bytes and the current state
// of the hashing state machine stored in current_state.
- while (len > 16) {
+ if (len > 32) {
uint64_t a = absl::base_internal::UnalignedLoad64(ptr);
uint64_t b = absl::base_internal::UnalignedLoad64(ptr + 8);
+ uint64_t c = absl::base_internal::UnalignedLoad64(ptr + 16);
+ uint64_t d = absl::base_internal::UnalignedLoad64(ptr + 24);
- current_state = Mix(a ^ salt[1], b ^ current_state);
+ uint64_t cs0 = Mix(a ^ salt[1], b ^ current_state);
+ uint64_t cs1 = Mix(c ^ salt[2], d ^ current_state);
+ current_state = cs0 ^ cs1;
+
+ ptr += 32;
+ len -= 32;
+ }
- ptr += 16;
- len -= 16;
+ // We now have a data `ptr` with at most 32 bytes and the current state
+ // of the hashing state machine stored in current_state.
+ if (len > 16) {
+ uint64_t a = absl::base_internal::UnalignedLoad64(ptr);
+ uint64_t b = absl::base_internal::UnalignedLoad64(ptr + 8);
+
+ current_state = Mix(a ^ salt[1], b ^ current_state);
}
- // We now have a data `ptr` with at most 16 bytes.
+ // We now have a data `ptr` with at least 1 and at most 16 bytes. But we can
+ // safely read from `ptr + len - 16`.
+ uint64_t a = absl::base_internal::UnalignedLoad64(last_16_ptr);
+ uint64_t b = absl::base_internal::UnalignedLoad64(last_16_ptr + 8);
+
+ return Mix(a ^ salt[1] ^ starting_length, b ^ current_state);
+}
+
+uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed,
+ const uint64_t salt[5]) {
+ if (len > 16) return LowLevelHashLenGt16(data, len, seed, salt);
+
+ // Prefetch the cacheline that data resides in.
+ PrefetchToLocalCache(data);
+ const uint8_t* ptr = static_cast<const uint8_t*>(data);
+ uint64_t starting_length = static_cast<uint64_t>(len);
+ uint64_t current_state = seed ^ salt[0];
+ if (len == 0) return current_state;
+
uint64_t a = 0;
uint64_t b = 0;
+
+ // We now have a data `ptr` with at least 1 and at most 16 bytes.
if (len > 8) {
// When we have at least 9 and at most 16 bytes, set A to the first 64
- // bits of the input and B to the last 64 bits of the input. Yes, they will
- // overlap in the middle if we are working with less than the full 16
+ // bits of the input and B to the last 64 bits of the input. Yes, they
+ // will overlap in the middle if we are working with less than the full 16
// bytes.
a = absl::base_internal::UnalignedLoad64(ptr);
b = absl::base_internal::UnalignedLoad64(ptr + len - 8);
@@ -97,20 +135,14 @@ uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed,
// bits and B to the last 32 bits.
a = absl::base_internal::UnalignedLoad32(ptr);
b = absl::base_internal::UnalignedLoad32(ptr + len - 4);
- } else if (len > 0) {
- // If we have at least 1 and at most 3 bytes, read all of the provided
- // bits into A, with some adjustments.
- a = static_cast<uint64_t>((ptr[0] << 16) | (ptr[len >> 1] << 8) |
- ptr[len - 1]);
- b = 0;
} else {
- a = 0;
- b = 0;
+ // If we have at least 1 and at most 3 bytes, read 2 bytes into A and the
+ // other byte into B, with some adjustments.
+ a = static_cast<uint64_t>((ptr[0] << 8) | ptr[len - 1]);
+ b = static_cast<uint64_t>(ptr[len >> 1]);
}
- uint64_t w = Mix(a ^ salt[1], b ^ current_state);
- uint64_t z = salt[1] ^ starting_length;
- return Mix(w, z);
+ return Mix(a ^ salt[1] ^ starting_length, b ^ current_state);
}
} // namespace hash_internal