summaryrefslogtreecommitdiff
path: root/absl/hash/internal/hash.h
diff options
context:
space:
mode:
Diffstat (limited to 'absl/hash/internal/hash.h')
-rw-r--r--absl/hash/internal/hash.h348
1 files changed, 297 insertions, 51 deletions
diff --git a/absl/hash/internal/hash.h b/absl/hash/internal/hash.h
index 7fb0af0b..45dfdd46 100644
--- a/absl/hash/internal/hash.h
+++ b/absl/hash/internal/hash.h
@@ -21,7 +21,9 @@
#include <algorithm>
#include <array>
+#include <bitset>
#include <cmath>
+#include <cstddef>
#include <cstring>
#include <deque>
#include <forward_list>
@@ -35,6 +37,8 @@
#include <string>
#include <tuple>
#include <type_traits>
+#include <unordered_map>
+#include <unordered_set>
#include <utility>
#include <vector>
@@ -42,17 +46,20 @@
#include "absl/base/internal/unaligned_access.h"
#include "absl/base/port.h"
#include "absl/container/fixed_array.h"
-#include "absl/hash/internal/wyhash.h"
+#include "absl/hash/internal/city.h"
+#include "absl/hash/internal/low_level_hash.h"
#include "absl/meta/type_traits.h"
#include "absl/numeric/int128.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "absl/types/variant.h"
#include "absl/utility/utility.h"
-#include "absl/hash/internal/city.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
+
+class HashState;
+
namespace hash_internal {
// Internal detail: Large buffers are hashed in smaller chunks. This function
@@ -114,24 +121,66 @@ class PiecewiseCombiner {
size_t position_;
};
+// is_hashable()
+//
+// Trait class which returns true if T is hashable by the absl::Hash framework.
+// Used for the AbslHashValue implementations for composite types below.
+template <typename T>
+struct is_hashable;
+
// HashStateBase
//
-// A hash state object represents an intermediate state in the computation
-// of an unspecified hash algorithm. `HashStateBase` provides a CRTP style
-// base class for hash state implementations. Developers adding type support
-// for `absl::Hash` should not rely on any parts of the state object other than
-// the following member functions:
+// An internal implementation detail that contains common implementation details
+// for all of the "hash state objects" objects generated by Abseil. This is not
+// a public API; users should not create classes that inherit from this.
+//
+// A hash state object is the template argument `H` passed to `AbslHashValue`.
+// It represents an intermediate state in the computation of an unspecified hash
+// algorithm. `HashStateBase` provides a CRTP style base class for hash state
+// implementations. Developers adding type support for `absl::Hash` should not
+// rely on any parts of the state object other than the following member
+// functions:
//
// * HashStateBase::combine()
// * HashStateBase::combine_contiguous()
+// * HashStateBase::combine_unordered()
//
-// A derived hash state class of type `H` must provide a static member function
+// A derived hash state class of type `H` must provide a public member function
// with a signature similar to the following:
//
// `static H combine_contiguous(H state, const unsigned char*, size_t)`.
//
+// It must also provide a private template method named RunCombineUnordered.
+//
+// A "consumer" is a 1-arg functor returning void. Its argument is a reference
+// to an inner hash state object, and it may be called multiple times. When
+// called, the functor consumes the entropy from the provided state object,
+// and resets that object to its empty state.
+//
+// A "combiner" is a stateless 2-arg functor returning void. Its arguments are
+// an inner hash state object and an ElementStateConsumer functor. A combiner
+// uses the provided inner hash state object to hash each element of the
+// container, passing the inner hash state object to the consumer after hashing
+// each element.
+//
+// Given these definitions, a derived hash state class of type H
+// must provide a private template method with a signature similar to the
+// following:
+//
+// `template <typename CombinerT>`
+// `static H RunCombineUnordered(H outer_state, CombinerT combiner)`
+//
+// This function is responsible for constructing the inner state object and
+// providing a consumer to the combiner. It uses side effects of the consumer
+// and combiner to mix the state of each element in an order-independent manner,
+// and uses this to return an updated value of `outer_state`.
+//
+// This inside-out approach generates efficient object code in the normal case,
+// but allows us to use stack storage to implement the absl::HashState type
+// erasure mechanism (avoiding heap allocations while hashing).
+//
// `HashStateBase` will provide a complete implementation for a hash state
-// object in terms of this method.
+// object in terms of these two methods.
//
// Example:
//
@@ -140,6 +189,10 @@ class PiecewiseCombiner {
// static H combine_contiguous(H state, const unsigned char*, size_t);
// using MyHashState::HashStateBase::combine;
// using MyHashState::HashStateBase::combine_contiguous;
+// using MyHashState::HashStateBase::combine_unordered;
+// private:
+// template <typename CombinerT>
+// static H RunCombineUnordered(H state, CombinerT combiner);
// };
template <typename H>
class HashStateBase {
@@ -180,7 +233,30 @@ class HashStateBase {
template <typename T>
static H combine_contiguous(H state, const T* data, size_t size);
+ template <typename I>
+ static H combine_unordered(H state, I begin, I end);
+
using AbslInternalPiecewiseCombiner = PiecewiseCombiner;
+
+ template <typename T>
+ using is_hashable = absl::hash_internal::is_hashable<T>;
+
+ private:
+ // Common implementation of the iteration step of a "combiner", as described
+ // above.
+ template <typename I>
+ struct CombineUnorderedCallback {
+ I begin;
+ I end;
+
+ template <typename InnerH, typename ElementStateConsumer>
+ void operator()(InnerH inner_state, ElementStateConsumer cb) {
+ for (; begin != end; ++begin) {
+ inner_state = H::combine(std::move(inner_state), *begin);
+ cb(inner_state);
+ }
+ }
+ };
};
// is_uniquely_represented
@@ -345,17 +421,43 @@ H AbslHashValue(H hash_state, std::nullptr_t) {
return H::combine(std::move(hash_state), static_cast<void*>(nullptr));
}
+// AbslHashValue() for hashing pointers-to-member
+template <typename H, typename T, typename C>
+H AbslHashValue(H hash_state, T C::* ptr) {
+ auto salient_ptm_size = [](std::size_t n) -> std::size_t {
+#if defined(_MSC_VER)
+ // Pointers-to-member-function on MSVC consist of one pointer plus 0, 1, 2,
+ // or 3 ints. In 64-bit mode, they are 8-byte aligned and thus can contain
+ // padding (namely when they have 1 or 3 ints). The value below is a lower
+ // bound on the number of salient, non-padding bytes that we use for
+ // hashing.
+ if (alignof(T C::*) == alignof(int)) {
+ // No padding when all subobjects have the same size as the total
+ // alignment. This happens in 32-bit mode.
+ return n;
+ } else {
+ // Padding for 1 int (size 16) or 3 ints (size 24).
+ // With 2 ints, the size is 16 with no padding, which we pessimize.
+ return n == 24 ? 20 : n == 16 ? 12 : n;
+ }
+#else
+ // On other platforms, we assume that pointers-to-members do not have
+ // padding.
+#ifdef __cpp_lib_has_unique_object_representations
+ static_assert(std::has_unique_object_representations_v<T C::*>);
+#endif // __cpp_lib_has_unique_object_representations
+ return n;
+#endif
+ };
+ return H::combine_contiguous(std::move(hash_state),
+ reinterpret_cast<unsigned char*>(&ptr),
+ salient_ptm_size(sizeof ptr));
+}
+
// -----------------------------------------------------------------------------
// AbslHashValue for Composite Types
// -----------------------------------------------------------------------------
-// is_hashable()
-//
-// Trait class which returns true if T is hashable by the absl::Hash framework.
-// Used for the AbslHashValue implementations for composite types below.
-template <typename T>
-struct is_hashable;
-
// AbslHashValue() for hashing pairs
template <typename H, typename T1, typename T2>
typename std::enable_if<is_hashable<T1>::value && is_hashable<T2>::value,
@@ -379,7 +481,7 @@ template <typename H, typename... Ts>
// This SFINAE gets MSVC confused under some conditions. Let's just disable it
// for now.
H
-#else // _MSC_VER
+#else // _MSC_VER
typename std::enable_if<absl::conjunction<is_hashable<Ts>...>::value, H>::type
#endif // _MSC_VER
AbslHashValue(H hash_state, const std::tuple<Ts...>& t) {
@@ -489,8 +591,9 @@ typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(
// AbslHashValue for hashing std::vector
//
-// Do not use this for vector<bool>. It does not have a .data(), and a fallback
-// for std::hash<> is most likely faster.
+// Do not use this for vector<bool> on platforms that have a working
+// implementation of std::hash. It does not have a .data(), and a fallback for
+// std::hash<> is most likely faster.
template <typename H, typename T, typename Allocator>
typename std::enable_if<is_hashable<T>::value && !std::is_same<T, bool>::value,
H>::type
@@ -500,6 +603,44 @@ AbslHashValue(H hash_state, const std::vector<T, Allocator>& vector) {
vector.size());
}
+// AbslHashValue special cases for hashing std::vector<bool>
+
+#if defined(ABSL_IS_BIG_ENDIAN) && \
+ (defined(__GLIBCXX__) || defined(__GLIBCPP__))
+
+// std::hash in libstdc++ does not work correctly with vector<bool> on Big
+// Endian platforms therefore we need to implement a custom AbslHashValue for
+// it. More details on the bug:
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102531
+template <typename H, typename T, typename Allocator>
+typename std::enable_if<is_hashable<T>::value && std::is_same<T, bool>::value,
+ H>::type
+AbslHashValue(H hash_state, const std::vector<T, Allocator>& vector) {
+ typename H::AbslInternalPiecewiseCombiner combiner;
+ for (const auto& i : vector) {
+ unsigned char c = static_cast<unsigned char>(i);
+ hash_state = combiner.add_buffer(std::move(hash_state), &c, sizeof(c));
+ }
+ return H::combine(combiner.finalize(std::move(hash_state)), vector.size());
+}
+#else
+// When not working around the libstdc++ bug above, we still have to contend
+// with the fact that std::hash<vector<bool>> is often poor quality, hashing
+// directly on the internal words and on no other state. On these platforms,
+// vector<bool>{1, 1} and vector<bool>{1, 1, 0} hash to the same value.
+//
+// Mixing in the size (as we do in our other vector<> implementations) on top
+// of the library-provided hash implementation avoids this QOI issue.
+template <typename H, typename T, typename Allocator>
+typename std::enable_if<is_hashable<T>::value && std::is_same<T, bool>::value,
+ H>::type
+AbslHashValue(H hash_state, const std::vector<T, Allocator>& vector) {
+ return H::combine(std::move(hash_state),
+ std::hash<std::vector<T, Allocator>>{}(vector),
+ vector.size());
+}
+#endif
+
// -----------------------------------------------------------------------------
// AbslHashValue for Ordered Associative Containers
// -----------------------------------------------------------------------------
@@ -550,6 +691,55 @@ typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(
}
// -----------------------------------------------------------------------------
+// AbslHashValue for Unordered Associative Containers
+// -----------------------------------------------------------------------------
+
+// AbslHashValue for hashing std::unordered_set
+template <typename H, typename Key, typename Hash, typename KeyEqual,
+ typename Alloc>
+typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(
+ H hash_state, const std::unordered_set<Key, Hash, KeyEqual, Alloc>& s) {
+ return H::combine(
+ H::combine_unordered(std::move(hash_state), s.begin(), s.end()),
+ s.size());
+}
+
+// AbslHashValue for hashing std::unordered_multiset
+template <typename H, typename Key, typename Hash, typename KeyEqual,
+ typename Alloc>
+typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(
+ H hash_state,
+ const std::unordered_multiset<Key, Hash, KeyEqual, Alloc>& s) {
+ return H::combine(
+ H::combine_unordered(std::move(hash_state), s.begin(), s.end()),
+ s.size());
+}
+
+// AbslHashValue for hashing std::unordered_set
+template <typename H, typename Key, typename T, typename Hash,
+ typename KeyEqual, typename Alloc>
+typename std::enable_if<is_hashable<Key>::value && is_hashable<T>::value,
+ H>::type
+AbslHashValue(H hash_state,
+ const std::unordered_map<Key, T, Hash, KeyEqual, Alloc>& s) {
+ return H::combine(
+ H::combine_unordered(std::move(hash_state), s.begin(), s.end()),
+ s.size());
+}
+
+// AbslHashValue for hashing std::unordered_multiset
+template <typename H, typename Key, typename T, typename Hash,
+ typename KeyEqual, typename Alloc>
+typename std::enable_if<is_hashable<Key>::value && is_hashable<T>::value,
+ H>::type
+AbslHashValue(H hash_state,
+ const std::unordered_multimap<Key, T, Hash, KeyEqual, Alloc>& s) {
+ return H::combine(
+ H::combine_unordered(std::move(hash_state), s.begin(), s.end()),
+ s.size());
+}
+
+// -----------------------------------------------------------------------------
// AbslHashValue for Wrapper Types
// -----------------------------------------------------------------------------
@@ -592,9 +782,28 @@ AbslHashValue(H hash_state, const absl::variant<T...>& v) {
// AbslHashValue for Other Types
// -----------------------------------------------------------------------------
-// AbslHashValue for hashing std::bitset is not defined, for the same reason as
-// for vector<bool> (see std::vector above): It does not expose the raw bytes,
-// and a fallback to std::hash<> is most likely faster.
+// AbslHashValue for hashing std::bitset is not defined on Little Endian
+// platforms, for the same reason as for vector<bool> (see std::vector above):
+// It does not expose the raw bytes, and a fallback to std::hash<> is most
+// likely faster.
+
+#if defined(ABSL_IS_BIG_ENDIAN) && \
+ (defined(__GLIBCXX__) || defined(__GLIBCPP__))
+// AbslHashValue for hashing std::bitset
+//
+// std::hash in libstdc++ does not work correctly with std::bitset on Big Endian
+// platforms therefore we need to implement a custom AbslHashValue for it. More
+// details on the bug: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102531
+template <typename H, size_t N>
+H AbslHashValue(H hash_state, const std::bitset<N>& set) {
+ typename H::AbslInternalPiecewiseCombiner combiner;
+ for (int i = 0; i < N; i++) {
+ unsigned char c = static_cast<unsigned char>(set[i]);
+ hash_state = combiner.add_buffer(std::move(hash_state), &c, sizeof(c));
+ }
+ return H::combine(combiner.finalize(std::move(hash_state)), N);
+}
+#endif
// -----------------------------------------------------------------------------
@@ -714,8 +923,8 @@ template <typename T>
struct is_hashable
: std::integral_constant<bool, HashSelect::template Apply<T>::value> {};
-// HashState
-class ABSL_DLL HashState : public HashStateBase<HashState> {
+// MixingHashState
+class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
// absl::uint128 is not an alias or a thin wrapper around the intrinsic.
// We use the intrinsic when available to improve performance.
#ifdef ABSL_HAVE_INTRINSIC_INT128
@@ -734,22 +943,23 @@ class ABSL_DLL HashState : public HashStateBase<HashState> {
public:
// Move only
- HashState(HashState&&) = default;
- HashState& operator=(HashState&&) = default;
+ MixingHashState(MixingHashState&&) = default;
+ MixingHashState& operator=(MixingHashState&&) = default;
- // HashState::combine_contiguous()
+ // MixingHashState::combine_contiguous()
//
// Fundamental base case for hash recursion: mixes the given range of bytes
// into the hash state.
- static HashState combine_contiguous(HashState hash_state,
- const unsigned char* first, size_t size) {
- return HashState(
+ static MixingHashState combine_contiguous(MixingHashState hash_state,
+ const unsigned char* first,
+ size_t size) {
+ return MixingHashState(
CombineContiguousImpl(hash_state.state_, first, size,
std::integral_constant<int, sizeof(size_t)>{}));
}
- using HashState::HashStateBase::combine_contiguous;
+ using MixingHashState::HashStateBase::combine_contiguous;
- // HashState::hash()
+ // MixingHashState::hash()
//
// For performance reasons in non-opt mode, we specialize this for
// integral types.
@@ -761,24 +971,49 @@ class ABSL_DLL HashState : public HashStateBase<HashState> {
return static_cast<size_t>(Mix(Seed(), static_cast<uint64_t>(value)));
}
- // Overload of HashState::hash()
+ // Overload of MixingHashState::hash()
template <typename T, absl::enable_if_t<!IntegralFastPath<T>::value, int> = 0>
static size_t hash(const T& value) {
- return static_cast<size_t>(combine(HashState{}, value).state_);
+ return static_cast<size_t>(combine(MixingHashState{}, value).state_);
}
private:
// Invoked only once for a given argument; that plus the fact that this is
// move-only ensures that there is only one non-moved-from object.
- HashState() : state_(Seed()) {}
+ MixingHashState() : state_(Seed()) {}
+
+ friend class MixingHashState::HashStateBase;
+
+ template <typename CombinerT>
+ static MixingHashState RunCombineUnordered(MixingHashState state,
+ CombinerT combiner) {
+ uint64_t unordered_state = 0;
+ combiner(MixingHashState{}, [&](MixingHashState& inner_state) {
+ // Add the hash state of the element to the running total, but mix the
+ // carry bit back into the low bit. This in intended to avoid losing
+ // entropy to overflow, especially when unordered_multisets contain
+ // multiple copies of the same value.
+ auto element_state = inner_state.state_;
+ unordered_state += element_state;
+ if (unordered_state < element_state) {
+ ++unordered_state;
+ }
+ inner_state = MixingHashState{};
+ });
+ return MixingHashState::combine(std::move(state), unordered_state);
+ }
+
+ // Allow the HashState type-erasure implementation to invoke
+ // RunCombinedUnordered() directly.
+ friend class absl::HashState;
// Workaround for MSVC bug.
// We make the type copyable to fix the calling convention, even though we
// never actually copy it. Keep it private to not affect the public API of the
// type.
- HashState(const HashState&) = default;
+ MixingHashState(const MixingHashState&) = default;
- explicit HashState(uint64_t state) : state_(state) {}
+ explicit MixingHashState(uint64_t state) : state_(state) {}
// Implementation of the base case for combine_contiguous where we actually
// mix the bytes into the state.
@@ -793,7 +1028,6 @@ class ABSL_DLL HashState : public HashStateBase<HashState> {
std::integral_constant<int, 8>
/* sizeof_size_t */);
-
// Slow dispatch path for calls to CombineContiguousImpl with a size argument
// larger than PiecewiseChunkSize(). Has the same effect as calling
// CombineContiguousImpl() repeatedly with the chunk stride size.
@@ -856,6 +1090,8 @@ class ABSL_DLL HashState : public HashStateBase<HashState> {
}
ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Mix(uint64_t state, uint64_t v) {
+ // Though the 128-bit product on AArch64 needs two instructions, it is
+ // still a good balance between speed and hash quality.
using MultType =
absl::conditional_t<sizeof(size_t) == 4, uint64_t, uint128>;
// We do the addition in 64-bit space to make sure the 128-bit
@@ -867,16 +1103,16 @@ class ABSL_DLL HashState : public HashStateBase<HashState> {
return static_cast<uint64_t>(m ^ (m >> (sizeof(m) * 8 / 2)));
}
- // An extern to avoid bloat on a direct call to Wyhash() with fixed values for
- // both the seed and salt parameters.
- static uint64_t WyhashImpl(const unsigned char* data, size_t len);
+ // An extern to avoid bloat on a direct call to LowLevelHash() with fixed
+ // values for both the seed and salt parameters.
+ static uint64_t LowLevelHashImpl(const unsigned char* data, size_t len);
ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Hash64(const unsigned char* data,
size_t len) {
#ifdef ABSL_HAVE_INTRINSIC_INT128
- return WyhashImpl(data, len);
+ return LowLevelHashImpl(data, len);
#else
- return absl::hash_internal::CityHash64(reinterpret_cast<const char*>(data), len);
+ return hash_internal::CityHash64(reinterpret_cast<const char*>(data), len);
#endif
}
@@ -911,8 +1147,8 @@ class ABSL_DLL HashState : public HashStateBase<HashState> {
uint64_t state_;
};
-// HashState::CombineContiguousImpl()
-inline uint64_t HashState::CombineContiguousImpl(
+// MixingHashState::CombineContiguousImpl()
+inline uint64_t MixingHashState::CombineContiguousImpl(
uint64_t state, const unsigned char* first, size_t len,
std::integral_constant<int, 4> /* sizeof_size_t */) {
// For large values we use CityHash, for small ones we just use a
@@ -922,7 +1158,7 @@ inline uint64_t HashState::CombineContiguousImpl(
if (ABSL_PREDICT_FALSE(len > PiecewiseChunkSize())) {
return CombineLargeContiguousImpl32(state, first, len);
}
- v = absl::hash_internal::CityHash32(reinterpret_cast<const char*>(first), len);
+ v = hash_internal::CityHash32(reinterpret_cast<const char*>(first), len);
} else if (len >= 4) {
v = Read4To8(first, len);
} else if (len > 0) {
@@ -934,12 +1170,12 @@ inline uint64_t HashState::CombineContiguousImpl(
return Mix(state, v);
}
-// Overload of HashState::CombineContiguousImpl()
-inline uint64_t HashState::CombineContiguousImpl(
+// Overload of MixingHashState::CombineContiguousImpl()
+inline uint64_t MixingHashState::CombineContiguousImpl(
uint64_t state, const unsigned char* first, size_t len,
std::integral_constant<int, 8> /* sizeof_size_t */) {
- // For large values we use Wyhash or CityHash depending on the platform, for
- // small ones we just use a multiplicative hash.
+ // For large values we use LowLevelHash or CityHash depending on the platform,
+ // for small ones we just use a multiplicative hash.
uint64_t v;
if (len > 16) {
if (ABSL_PREDICT_FALSE(len > PiecewiseChunkSize())) {
@@ -976,7 +1212,9 @@ struct PoisonedHash : private AggregateBarrier {
template <typename T>
struct HashImpl {
- size_t operator()(const T& value) const { return HashState::hash(value); }
+ size_t operator()(const T& value) const {
+ return MixingHashState::hash(value);
+ }
};
template <typename T>
@@ -998,6 +1236,14 @@ H HashStateBase<H>::combine_contiguous(H state, const T* data, size_t size) {
return hash_internal::hash_range_or_bytes(std::move(state), data, size);
}
+// HashStateBase::combine_unordered()
+template <typename H>
+template <typename I>
+H HashStateBase<H>::combine_unordered(H state, I begin, I end) {
+ return H::RunCombineUnordered(std::move(state),
+ CombineUnorderedCallback<I>{begin, end});
+}
+
// HashStateBase::PiecewiseCombiner::add_buffer()
template <typename H>
H PiecewiseCombiner::add_buffer(H state, const unsigned char* data,