diff options
55 files changed, 18696 insertions, 0 deletions
diff --git a/absl/CMakeLists.txt b/absl/CMakeLists.txt index 689f64e2..1d09b193 100644 --- a/absl/CMakeLists.txt +++ b/absl/CMakeLists.txt @@ -20,6 +20,7 @@ add_subdirectory(base) add_subdirectory(algorithm) add_subdirectory(container) add_subdirectory(debugging) +add_subdirectory(hash) add_subdirectory(memory) add_subdirectory(meta) add_subdirectory(numeric) diff --git a/absl/container/BUILD.bazel b/absl/container/BUILD.bazel index 7b5f52bc..265c5ec9 100644 --- a/absl/container/BUILD.bazel +++ b/absl/container/BUILD.bazel @@ -185,3 +185,459 @@ cc_test( "@com_google_googletest//:gtest_main", ], ) + +NOTEST_TAGS_NONMOBILE = [ + "no_test_darwin_x86_64", + "no_test_loonix", +] + +NOTEST_TAGS_MOBILE = [ + "no_test_android_arm", + "no_test_android_arm64", + "no_test_android_x86", + "no_test_ios_x86_64", +] + +NOTEST_TAGS = NOTEST_TAGS_MOBILE + NOTEST_TAGS_NONMOBILE + +cc_library( + name = "flat_hash_map", + hdrs = ["flat_hash_map.h"], + copts = ABSL_DEFAULT_COPTS, + deps = [ + ":container_memory", + ":hash_function_defaults", + ":raw_hash_map", + "//absl/memory", + ], +) + +cc_test( + name = "flat_hash_map_test", + srcs = ["flat_hash_map_test.cc"], + copts = ABSL_TEST_COPTS + ["-DUNORDERED_MAP_CXX17"], + tags = NOTEST_TAGS_NONMOBILE, + deps = [ + ":flat_hash_map", + ":hash_generator_testing", + ":unordered_map_constructor_test", + ":unordered_map_lookup_test", + ":unordered_map_modifiers_test", + "//absl/types:any", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "flat_hash_set", + hdrs = ["flat_hash_set.h"], + copts = ABSL_DEFAULT_COPTS, + deps = [ + ":container_memory", + ":hash_function_defaults", + ":raw_hash_set", + "//absl/base:core_headers", + "//absl/memory", + ], +) + +cc_test( + name = "flat_hash_set_test", + srcs = ["flat_hash_set_test.cc"], + copts = ABSL_TEST_COPTS + ["-DUNORDERED_SET_CXX17"], + tags = NOTEST_TAGS_NONMOBILE, + deps = [ + ":flat_hash_set", + ":hash_generator_testing", + ":unordered_set_constructor_test", + ":unordered_set_lookup_test", + ":unordered_set_modifiers_test", + "//absl/memory", + "//absl/strings", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "node_hash_map", + hdrs = ["node_hash_map.h"], + copts = ABSL_DEFAULT_COPTS, + deps = [ + ":container_memory", + ":hash_function_defaults", + ":node_hash_policy", + ":raw_hash_map", + "//absl/memory", + ], +) + +cc_test( + name = "node_hash_map_test", + srcs = ["node_hash_map_test.cc"], + copts = ABSL_TEST_COPTS + ["-DUNORDERED_MAP_CXX17"], + tags = NOTEST_TAGS_NONMOBILE, + deps = [ + ":hash_generator_testing", + ":node_hash_map", + ":tracked", + ":unordered_map_constructor_test", + ":unordered_map_lookup_test", + ":unordered_map_modifiers_test", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "node_hash_set", + hdrs = ["node_hash_set.h"], + copts = ABSL_DEFAULT_COPTS, + deps = [ + ":container_memory", + ":hash_function_defaults", + ":node_hash_policy", + ":raw_hash_set", + "//absl/memory", + ], +) + +cc_test( + name = "node_hash_set_test", + srcs = ["node_hash_set_test.cc"], + copts = ABSL_TEST_COPTS + ["-DUNORDERED_SET_CXX17"], + tags = NOTEST_TAGS_NONMOBILE, + deps = [ + ":hash_generator_testing", + ":node_hash_set", + ":unordered_set_constructor_test", + ":unordered_set_lookup_test", + ":unordered_set_modifiers_test", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "container_memory", + hdrs = ["internal/container_memory.h"], + copts = ABSL_DEFAULT_COPTS, + deps = [ + "//absl/memory", + "//absl/utility", + ], +) + +cc_test( + name = "container_memory_test", + srcs = ["internal/container_memory_test.cc"], + copts = ABSL_TEST_COPTS, + tags = NOTEST_TAGS_NONMOBILE, + deps = [ + ":container_memory", + "//absl/strings", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "hash_function_defaults", + hdrs = ["internal/hash_function_defaults.h"], + copts = ABSL_DEFAULT_COPTS, + deps = [ + "//absl/base:config", + "//absl/hash", + "//absl/strings", + ], +) + +cc_test( + name = "hash_function_defaults_test", + srcs = ["internal/hash_function_defaults_test.cc"], + copts = ABSL_TEST_COPTS, + tags = NOTEST_TAGS, + deps = [ + ":hash_function_defaults", + "//absl/hash", + "//absl/strings", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "hash_generator_testing", + testonly = 1, + srcs = ["internal/hash_generator_testing.cc"], + hdrs = ["internal/hash_generator_testing.h"], + copts = ABSL_TEST_COPTS, + deps = [ + ":hash_policy_testing", + "//absl/meta:type_traits", + "//absl/strings", + ], +) + +cc_library( + name = "hash_policy_testing", + testonly = 1, + hdrs = ["internal/hash_policy_testing.h"], + copts = ABSL_TEST_COPTS, + deps = [ + "//absl/hash", + "//absl/strings", + ], +) + +cc_test( + name = "hash_policy_testing_test", + srcs = ["internal/hash_policy_testing_test.cc"], + copts = ABSL_TEST_COPTS, + deps = [ + ":hash_policy_testing", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "hash_policy_traits", + hdrs = ["internal/hash_policy_traits.h"], + copts = ABSL_DEFAULT_COPTS, + deps = ["//absl/meta:type_traits"], +) + +cc_test( + name = "hash_policy_traits_test", + srcs = ["internal/hash_policy_traits_test.cc"], + copts = ABSL_TEST_COPTS, + deps = [ + ":hash_policy_traits", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "hashtable_debug", + hdrs = ["internal/hashtable_debug.h"], + copts = ABSL_DEFAULT_COPTS, + deps = [ + ":hashtable_debug_hooks", + ], +) + +cc_library( + name = "hashtable_debug_hooks", + hdrs = ["internal/hashtable_debug_hooks.h"], + copts = ABSL_DEFAULT_COPTS, +) + +cc_library( + name = "node_hash_policy", + hdrs = ["internal/node_hash_policy.h"], + copts = ABSL_DEFAULT_COPTS, +) + +cc_test( + name = "node_hash_policy_test", + srcs = ["internal/node_hash_policy_test.cc"], + copts = ABSL_TEST_COPTS, + deps = [ + ":hash_policy_traits", + ":node_hash_policy", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "raw_hash_map", + hdrs = ["internal/raw_hash_map.h"], + copts = ABSL_DEFAULT_COPTS, + deps = [ + ":container_memory", + ":raw_hash_set", + ], +) + +cc_library( + name = "raw_hash_set", + srcs = ["internal/raw_hash_set.cc"], + hdrs = ["internal/raw_hash_set.h"], + copts = ABSL_DEFAULT_COPTS, + deps = [ + ":compressed_tuple", + ":container_memory", + ":hash_policy_traits", + ":hashtable_debug_hooks", + ":layout", + "//absl/base:bits", + "//absl/base:config", + "//absl/base:core_headers", + "//absl/base:endian", + "//absl/memory", + "//absl/meta:type_traits", + "//absl/types:optional", + "//absl/utility", + ], +) + +cc_test( + name = "raw_hash_set_test", + srcs = ["internal/raw_hash_set_test.cc"], + copts = ABSL_TEST_COPTS, + linkstatic = 1, + tags = NOTEST_TAGS, + deps = [ + ":container_memory", + ":hash_function_defaults", + ":hash_policy_testing", + ":hashtable_debug", + ":raw_hash_set", + "//absl/base", + "//absl/base:core_headers", + "//absl/strings", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "raw_hash_set_allocator_test", + size = "small", + srcs = ["internal/raw_hash_set_allocator_test.cc"], + copts = ABSL_TEST_COPTS, + deps = [ + ":raw_hash_set", + ":tracked", + "//absl/base:core_headers", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "layout", + hdrs = ["internal/layout.h"], + copts = ABSL_DEFAULT_COPTS, + deps = [ + "//absl/base:core_headers", + "//absl/meta:type_traits", + "//absl/strings", + "//absl/types:span", + "//absl/utility", + ], +) + +cc_test( + name = "layout_test", + size = "small", + srcs = ["internal/layout_test.cc"], + copts = ABSL_TEST_COPTS, + tags = NOTEST_TAGS, + visibility = ["//visibility:private"], + deps = [ + ":layout", + "//absl/base", + "//absl/base:core_headers", + "//absl/types:span", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "tracked", + testonly = 1, + hdrs = ["internal/tracked.h"], + copts = ABSL_TEST_COPTS, +) + +cc_library( + name = "unordered_map_constructor_test", + testonly = 1, + hdrs = ["internal/unordered_map_constructor_test.h"], + copts = ABSL_TEST_COPTS, + deps = [ + ":hash_generator_testing", + ":hash_policy_testing", + "@com_google_googletest//:gtest", + ], +) + +cc_library( + name = "unordered_map_lookup_test", + testonly = 1, + hdrs = ["internal/unordered_map_lookup_test.h"], + copts = ABSL_TEST_COPTS, + deps = [ + ":hash_generator_testing", + ":hash_policy_testing", + "@com_google_googletest//:gtest", + ], +) + +cc_library( + name = "unordered_map_modifiers_test", + testonly = 1, + hdrs = ["internal/unordered_map_modifiers_test.h"], + copts = ABSL_TEST_COPTS, + deps = [ + ":hash_generator_testing", + ":hash_policy_testing", + "@com_google_googletest//:gtest", + ], +) + +cc_library( + name = "unordered_set_constructor_test", + testonly = 1, + hdrs = ["internal/unordered_set_constructor_test.h"], + copts = ABSL_TEST_COPTS, + deps = [ + ":hash_generator_testing", + ":hash_policy_testing", + "@com_google_googletest//:gtest", + ], +) + +cc_library( + name = "unordered_set_lookup_test", + testonly = 1, + hdrs = ["internal/unordered_set_lookup_test.h"], + copts = ABSL_TEST_COPTS, + deps = [ + ":hash_generator_testing", + ":hash_policy_testing", + "@com_google_googletest//:gtest", + ], +) + +cc_library( + name = "unordered_set_modifiers_test", + testonly = 1, + hdrs = ["internal/unordered_set_modifiers_test.h"], + copts = ABSL_TEST_COPTS, + deps = [ + ":hash_generator_testing", + ":hash_policy_testing", + "@com_google_googletest//:gtest", + ], +) + +cc_test( + name = "unordered_set_test", + srcs = ["internal/unordered_set_test.cc"], + copts = ABSL_TEST_COPTS, + tags = NOTEST_TAGS_NONMOBILE, + deps = [ + ":unordered_set_constructor_test", + ":unordered_set_lookup_test", + ":unordered_set_modifiers_test", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "unordered_map_test", + srcs = ["internal/unordered_map_test.cc"], + copts = ABSL_TEST_COPTS, + tags = NOTEST_TAGS_NONMOBILE, + deps = [ + ":unordered_map_constructor_test", + ":unordered_map_lookup_test", + ":unordered_map_modifiers_test", + "@com_google_googletest//:gtest_main", + ], +) diff --git a/absl/container/CMakeLists.txt b/absl/container/CMakeLists.txt index 123e4c48..710bace7 100644 --- a/absl/container/CMakeLists.txt +++ b/absl/container/CMakeLists.txt @@ -17,12 +17,34 @@ list(APPEND CONTAINER_PUBLIC_HEADERS "fixed_array.h" + "flat_hash_map.h" + "flat_hash_set.h" "inlined_vector.h" + "node_hash_map.h" + "node_hash_set.h" ) list(APPEND CONTAINER_INTERNAL_HEADERS + "internal/compressed_tuple.h" + "internal/container_memory.h" + "internal/hash_function_defaults.h" + "internal/hash_generator_testing.h" + "internal/hash_policy_testing.h" + "internal/hash_policy_traits.h" + "internal/hashtable_debug.h" + "internal/layout.h" + "internal/node_hash_policy.h" + "internal/raw_hash_map.h" + "internal/raw_hash_set.h" "internal/test_instance_tracker.h" + "internal/tracked.h" + "internal/unordered_map_constructor_test.h" + "internal/unordered_map_lookup_test.h" + "internal/unordered_map_modifiers_test.h" + "internal/unordered_set_constructor_test.h" + "internal/unordered_set_lookup_test.h" + "internal/unordered_set_modifiers_test.h" ) diff --git a/absl/container/flat_hash_map.h b/absl/container/flat_hash_map.h new file mode 100644 index 00000000..13fbfba5 --- /dev/null +++ b/absl/container/flat_hash_map.h @@ -0,0 +1,528 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: flat_hash_map.h +// ----------------------------------------------------------------------------- +// +// An `absl::flat_hash_map<K, V>` is an unordered associative container of +// unique keys and associated values designed to be a more efficient replacement +// for `std::unordered_map`. Like `unordered_map`, search, insertion, and +// deletion of map elements can be done as an `O(1)` operation. However, +// `flat_hash_map` (and other unordered associative containers known as the +// collection of Abseil "Swiss tables") contain other optimizations that result +// in both memory and computation advantages. +// +// In most cases, your default choice for a hash map should be a map of type +// `flat_hash_map`. + +#ifndef ABSL_CONTAINER_FLAT_HASH_MAP_H_ +#define ABSL_CONTAINER_FLAT_HASH_MAP_H_ + +#include <cstddef> +#include <new> +#include <type_traits> +#include <utility> + +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export +#include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export +#include "absl/memory/memory.h" + +namespace absl { +namespace container_internal { +template <class K, class V> +struct FlatHashMapPolicy; +} // namespace container_internal + +// ----------------------------------------------------------------------------- +// absl::flat_hash_map +// ----------------------------------------------------------------------------- +// +// An `absl::flat_hash_map<K, V>` is an unordered associative container which +// has been optimized for both speed and memory footprint in most common use +// cases. Its interface is similar to that of `std::unordered_map<K, V>` with +// the following notable differences: +// +// * Requires keys that are CopyConstructible +// * Requires values that are MoveConstructible +// * Supports heterogeneous lookup, through `find()`, `operator[]()` and +// `insert()`, provided that the map is provided a compatible heterogeneous +// hashing function and equality operator. +// * Invalidates any references and pointers to elements within the table after +// `rehash()`. +// * Contains a `capacity()` member function indicating the number of element +// slots (open, deleted, and empty) within the hash map. +// * Returns `void` from the `erase(iterator)` overload. +// +// By default, `flat_hash_map` uses the `absl::Hash` hashing framework. +// All fundamental and Abseil types that support the `absl::Hash` framework have +// a compatible equality operator for comparing insertions into `flat_hash_map`. +// If your type is not yet supported by the `asbl::Hash` framework, see +// absl/hash/hash.h for information on extending Abseil hashing to user-defined +// types. +// +// NOTE: A `flat_hash_map` stores its value types directly inside its +// implementation array to avoid memory indirection. Because a `flat_hash_map` +// is designed to move data when rehashed, map values will not retain pointer +// stability. If you require pointer stability, or your values are large, +// consider using `absl::flat_hash_map<Key, std::unique_ptr<Value>>` instead. +// If your types are not moveable or you require pointer stability for keys, +// consider `absl::node_hash_map`. +// +// Example: +// +// // Create a flat hash map of three strings (that map to strings) +// absl::flat_hash_map<std::string, std::string> ducks = +// {{"a", "huey"}, {"b", "dewey"}, {"c", "louie"}}; +// +// // Insert a new element into the flat hash map +// ducks.insert({"d", "donald"}}; +// +// // Force a rehash of the flat hash map +// ducks.rehash(0); +// +// // Find the element with the key "b" +// std::string search_key = "b"; +// auto result = ducks.find(search_key); +// if (result != ducks.end()) { +// std::cout << "Result: " << result->second << std::endl; +// } +template <class K, class V, + class Hash = absl::container_internal::hash_default_hash<K>, + class Eq = absl::container_internal::hash_default_eq<K>, + class Allocator = std::allocator<std::pair<const K, V>>> +class flat_hash_map : public absl::container_internal::raw_hash_map< + absl::container_internal::FlatHashMapPolicy<K, V>, + Hash, Eq, Allocator> { + using Base = typename flat_hash_map::raw_hash_map; + + public: + flat_hash_map() {} + using Base::Base; + + // flat_hash_map::begin() + // + // Returns an iterator to the beginning of the `flat_hash_map`. + using Base::begin; + + // flat_hash_map::cbegin() + // + // Returns a const iterator to the beginning of the `flat_hash_map`. + using Base::cbegin; + + // flat_hash_map::cend() + // + // Returns a const iterator to the end of the `flat_hash_map`. + using Base::cend; + + // flat_hash_map::end() + // + // Returns an iterator to the end of the `flat_hash_map`. + using Base::end; + + // flat_hash_map::capacity() + // + // Returns the number of element slots (assigned, deleted, and empty) + // available within the `flat_hash_map`. + // + // NOTE: this member function is particular to `absl::flat_hash_map` and is + // not provided in the `std::unordered_map` API. + using Base::capacity; + + // flat_hash_map::empty() + // + // Returns whether or not the `flat_hash_map` is empty. + using Base::empty; + + // flat_hash_map::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `flat_hash_map` under current memory constraints. This value can be thought + // of the largest value of `std::distance(begin(), end())` for a + // `flat_hash_map<K, V>`. + using Base::max_size; + + // flat_hash_map::size() + // + // Returns the number of elements currently within the `flat_hash_map`. + using Base::size; + + // flat_hash_map::clear() + // + // Removes all elements from the `flat_hash_map`. Invalidates any references, + // pointers, or iterators referring to contained elements. + // + // NOTE: this operation may shrink the underlying buffer. To avoid shrinking + // the underlying buffer call `erase(begin(), end())`. + using Base::clear; + + // flat_hash_map::erase() + // + // Erases elements within the `flat_hash_map`. Erasing does not trigger a + // rehash. Overloads are listed below. + // + // void erase(const_iterator pos): + // + // Erases the element at `position` of the `flat_hash_map`, returning + // `void`. + // + // NOTE: this return behavior is different than that of STL containers in + // general and `std::unordered_map` in particular. + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning an + // iterator pointing to `last`. + // + // size_type erase(const key_type& key): + // + // Erases the element with the matching key, if it exists. + using Base::erase; + + // flat_hash_map::insert() + // + // Inserts an element of the specified value into the `flat_hash_map`, + // returning an iterator pointing to the newly inserted element, provided that + // an element with the given key does not already exist. If rehashing occurs + // due to the insertion, all iterators are invalidated. Overloads are listed + // below. + // + // std::pair<iterator,bool> insert(const init_type& value): + // + // Inserts a value into the `flat_hash_map`. Returns a pair consisting of an + // iterator to the inserted element (or to the element that prevented the + // insertion) and a bool denoting whether the insertion took place. + // + // std::pair<iterator,bool> insert(T&& value): + // std::pair<iterator,bool> insert(init_type&& value ): + // + // Inserts a moveable value into the `flat_hash_map`. Returns a pair + // consisting of an iterator to the inserted element (or to the element that + // prevented the insertion) and a bool denoting whether the insertion took + // place. + // + // iterator insert(const_iterator hint, const init_type& value): + // iterator insert(const_iterator hint, T&& value): + // iterator insert(const_iterator hint, init_type&& value ); + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element, or to the existing element that prevented the + // insertion. + // + // void insert(InputIterator first, InputIterator last ): + // + // Inserts a range of values [`first`, `last`). + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently, for `flat_hash_map` we guarantee the + // first match is inserted. + // + // void insert(std::initializer_list<init_type> ilist ): + // + // Inserts the elements within the initializer list `ilist`. + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently within the initializer list, for + // `flat_hash_map` we guarantee the first match is inserted. + using Base::insert; + + // flat_hash_map::insert_or_assign() + // + // Inserts an element of the specified value into the `flat_hash_map` provided + // that a value with the given key does not already exist, or replaces it with + // the element value if a key for that value already exists, returning an + // iterator pointing to the newly inserted element. If rehashing occurs due + // to the insertion, all existing iterators are invalidated. Overloads are + // listed below. + // + // pair<iterator, bool> insert_or_assign(const init_type& k, T&& obj): + // pair<iterator, bool> insert_or_assign(init_type&& k, T&& obj): + // + // Inserts/Assigns (or moves) the element of the specified key into the + // `flat_hash_map`. + // + // iterator insert_or_assign(const_iterator hint, + // const init_type& k, T&& obj): + // iterator insert_or_assign(const_iterator hint, init_type&& k, T&& obj): + // + // Inserts/Assigns (or moves) the element of the specified key into the + // `flat_hash_map` using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. + using Base::insert_or_assign; + + // flat_hash_map::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_map`, provided that no element with the given key + // already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace; + + // flat_hash_map::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_map`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search, and only inserts + // provided that no element with the given key already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace_hint; + + // flat_hash_map::try_emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_map`, provided that no element with the given key + // already exists. Unlike `emplace()`, if an element with the given key + // already exists, we guarantee that no element is constructed. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + // Overloads are listed below. + // + // pair<iterator, bool> try_emplace(const key_type& k, Args&&... args): + // pair<iterator, bool> try_emplace(key_type&& k, Args&&... args): + // + // Inserts (via copy or move) the element of the specified key into the + // `flat_hash_map`. + // + // iterator try_emplace(const_iterator hint, + // const init_type& k, Args&&... args): + // iterator try_emplace(const_iterator hint, init_type&& k, Args&&... args): + // + // Inserts (via copy or move) the element of the specified key into the + // `flat_hash_map` using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. + using Base::try_emplace; + + // flat_hash_map::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the key,value pair of the element at the indicated position and + // returns a node handle owning that extracted data. + // + // node_type extract(const key_type& x): + // + // Extracts the key,value pair of the element with a key matching the passed + // key value and returns a node handle owning that extracted data. If the + // `flat_hash_map` does not contain an element with a matching key, this + // function returns an empty node handle. + using Base::extract; + + // flat_hash_map::merge() + // + // Extracts elements from a given `source` flat hash map into this + // `flat_hash_map`. If the destination `flat_hash_map` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // flat_hash_map::swap(flat_hash_map& other) + // + // Exchanges the contents of this `flat_hash_map` with those of the `other` + // flat hash map, avoiding invocation of any move, copy, or swap operations on + // individual elements. + // + // All iterators and references on the `flat_hash_map` remain valid, excepting + // for the past-the-end iterator, which is invalidated. + // + // `swap()` requires that the flat hash map's hashing and key equivalence + // functions be Swappable, and are exchaged using unqualified calls to + // non-member `swap()`. If the map's allocator has + // `std::allocator_traits<allocator_type>::propagate_on_container_swap::value` + // set to `true`, the allocators are also exchanged using an unqualified call + // to non-member `swap()`; otherwise, the allocators are not swapped. + using Base::swap; + + // flat_hash_map::rehash(count) + // + // Rehashes the `flat_hash_map`, setting the number of slots to be at least + // the passed value. If the new number of slots increases the load factor more + // than the current maximum load factor + // (`count` < `size()` / `max_load_factor()`), then the new number of slots + // will be at least `size()` / `max_load_factor()`. + // + // To force a rehash, pass rehash(0). + // + // NOTE: unlike behavior in `std::unordered_map`, references are also + // invalidated upon a `rehash()`. + using Base::rehash; + + // flat_hash_map::reserve(count) + // + // Sets the number of slots in the `flat_hash_map` to the number needed to + // accommodate at least `count` total elements without exceeding the current + // maximum load factor, and may rehash the container if needed. + using Base::reserve; + + // flat_hash_map::at() + // + // Returns a reference to the mapped value of the element with key equivalent + // to the passed key. + using Base::at; + + // flat_hash_map::contains() + // + // Determines whether an element with a key comparing equal to the given `key` + // exists within the `flat_hash_map`, returning `true` if so or `false` + // otherwise. + using Base::contains; + + // flat_hash_map::count(const Key& key) const + // + // Returns the number of elements with a key comparing equal to the given + // `key` within the `flat_hash_map`. note that this function will return + // either `1` or `0` since duplicate keys are not allowed within a + // `flat_hash_map`. + using Base::count; + + // flat_hash_map::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `flat_hash_map`. + using Base::equal_range; + + // flat_hash_map::find() + // + // Finds an element with the passed `key` within the `flat_hash_map`. + using Base::find; + + // flat_hash_map::operator[]() + // + // Returns a reference to the value mapped to the passed key within the + // `flat_hash_map`, performing an `insert()` if the key does not already + // exist. + // + // If an insertion occurs and results in a rehashing of the container, all + // iterators are invalidated. Otherwise iterators are not affected and + // references are not invalidated. Overloads are listed below. + // + // T& operator[](const Key& key ): + // + // Inserts an init_type object constructed in-place if the element with the + // given key does not exist. + // + // T& operator[]( Key&& key ): + // + // Inserts an init_type object constructed in-place provided that an element + // with the given key does not exist. + using Base::operator[]; + + // flat_hash_map::bucket_count() + // + // Returns the number of "buckets" within the `flat_hash_map`. Note that + // because a flat hash map contains all elements within its internal storage, + // this value simply equals the current capacity of the `flat_hash_map`. + using Base::bucket_count; + + // flat_hash_map::load_factor() + // + // Returns the current load factor of the `flat_hash_map` (the average number + // of slots occupied with a value within the hash map). + using Base::load_factor; + + // flat_hash_map::max_load_factor() + // + // Manages the maximum load factor of the `flat_hash_map`. Overloads are + // listed below. + // + // float flat_hash_map::max_load_factor() + // + // Returns the current maximum load factor of the `flat_hash_map`. + // + // void flat_hash_map::max_load_factor(float ml) + // + // Sets the maximum load factor of the `flat_hash_map` to the passed value. + // + // NOTE: This overload is provided only for API compatibility with the STL; + // `flat_hash_map` will ignore any set load factor and manage its rehashing + // internally as an implementation detail. + using Base::max_load_factor; + + // flat_hash_map::get_allocator() + // + // Returns the allocator function associated with this `flat_hash_map`. + using Base::get_allocator; + + // flat_hash_map::hash_function() + // + // Returns the hashing function used to hash the keys within this + // `flat_hash_map`. + using Base::hash_function; + + // flat_hash_map::key_eq() + // + // Returns the function used for comparing keys equality. + using Base::key_eq; +}; + +namespace container_internal { + +template <class K, class V> +struct FlatHashMapPolicy { + using slot_type = container_internal::slot_type<K, V>; + using key_type = K; + using mapped_type = V; + using init_type = std::pair</*non const*/ key_type, mapped_type>; + + template <class Allocator, class... Args> + static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { + slot_type::construct(alloc, slot, std::forward<Args>(args)...); + } + + template <class Allocator> + static void destroy(Allocator* alloc, slot_type* slot) { + slot_type::destroy(alloc, slot); + } + + template <class Allocator> + static void transfer(Allocator* alloc, slot_type* new_slot, + slot_type* old_slot) { + slot_type::transfer(alloc, new_slot, old_slot); + } + + template <class F, class... Args> + static decltype(absl::container_internal::DecomposePair( + std::declval<F>(), std::declval<Args>()...)) + apply(F&& f, Args&&... args) { + return absl::container_internal::DecomposePair(std::forward<F>(f), + std::forward<Args>(args)...); + } + + static size_t space_used(const slot_type*) { return 0; } + + static std::pair<const K, V>& element(slot_type* slot) { return slot->value; } + + static V& value(std::pair<const K, V>* kv) { return kv->second; } + static const V& value(const std::pair<const K, V>* kv) { return kv->second; } +}; + +} // namespace container_internal +} // namespace absl +#endif // ABSL_CONTAINER_FLAT_HASH_MAP_H_ diff --git a/absl/container/flat_hash_map_test.cc b/absl/container/flat_hash_map_test.cc new file mode 100644 index 00000000..10a781ff --- /dev/null +++ b/absl/container/flat_hash_map_test.cc @@ -0,0 +1,241 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/flat_hash_map.h" + +#include "absl/container/internal/hash_generator_testing.h" +#include "absl/container/internal/unordered_map_constructor_test.h" +#include "absl/container/internal/unordered_map_lookup_test.h" +#include "absl/container/internal/unordered_map_modifiers_test.h" +#include "absl/types/any.h" + +namespace absl { +namespace container_internal { +namespace { +using ::absl::container_internal::hash_internal::Enum; +using ::absl::container_internal::hash_internal::EnumClass; +using ::testing::_; +using ::testing::Pair; +using ::testing::UnorderedElementsAre; + +template <class K, class V> +using Map = + flat_hash_map<K, V, StatefulTestingHash, StatefulTestingEqual, Alloc<>>; + +static_assert(!std::is_standard_layout<NonStandardLayout>(), ""); + +using MapTypes = + ::testing::Types<Map<int, int>, Map<std::string, int>, Map<Enum, std::string>, + Map<EnumClass, int>, Map<int, NonStandardLayout>, + Map<NonStandardLayout, int>>; + +INSTANTIATE_TYPED_TEST_CASE_P(FlatHashMap, ConstructorTest, MapTypes); +INSTANTIATE_TYPED_TEST_CASE_P(FlatHashMap, LookupTest, MapTypes); +INSTANTIATE_TYPED_TEST_CASE_P(FlatHashMap, ModifiersTest, MapTypes); + +TEST(FlatHashMap, StandardLayout) { + struct Int { + explicit Int(size_t value) : value(value) {} + Int() : value(0) { ADD_FAILURE(); } + Int(const Int& other) : value(other.value) { ADD_FAILURE(); } + Int(Int&&) = default; + bool operator==(const Int& other) const { return value == other.value; } + size_t value; + }; + static_assert(std::is_standard_layout<Int>(), ""); + + struct Hash { + size_t operator()(const Int& obj) const { return obj.value; } + }; + + // Verify that neither the key nor the value get default-constructed or + // copy-constructed. + { + flat_hash_map<Int, Int, Hash> m; + m.try_emplace(Int(1), Int(2)); + m.try_emplace(Int(3), Int(4)); + m.erase(Int(1)); + m.rehash(2 * m.bucket_count()); + } + { + flat_hash_map<Int, Int, Hash> m; + m.try_emplace(Int(1), Int(2)); + m.try_emplace(Int(3), Int(4)); + m.erase(Int(1)); + m.clear(); + } +} + +// gcc becomes unhappy if this is inside the method, so pull it out here. +struct balast {}; + +TEST(FlatHashMap, IteratesMsan) { + // Because SwissTable randomizes on pointer addresses, we keep old tables + // around to ensure we don't reuse old memory. + std::vector<absl::flat_hash_map<int, balast>> garbage; + for (int i = 0; i < 100; ++i) { + absl::flat_hash_map<int, balast> t; + for (int j = 0; j < 100; ++j) { + t[j]; + for (const auto& p : t) EXPECT_THAT(p, Pair(_, _)); + } + garbage.push_back(std::move(t)); + } +} + +// Demonstration of the "Lazy Key" pattern. This uses heterogenous insert to +// avoid creating expensive key elements when the item is already present in the +// map. +struct LazyInt { + explicit LazyInt(size_t value, int* tracker) + : value(value), tracker(tracker) {} + + explicit operator size_t() const { + ++*tracker; + return value; + } + + size_t value; + int* tracker; +}; + +struct Hash { + using is_transparent = void; + int* tracker; + size_t operator()(size_t obj) const { + ++*tracker; + return obj; + } + size_t operator()(const LazyInt& obj) const { + ++*tracker; + return obj.value; + } +}; + +struct Eq { + using is_transparent = void; + bool operator()(size_t lhs, size_t rhs) const { + return lhs == rhs; + } + bool operator()(size_t lhs, const LazyInt& rhs) const { + return lhs == rhs.value; + } +}; + +TEST(FlatHashMap, LazyKeyPattern) { + // hashes are only guaranteed in opt mode, we use assertions to track internal + // state that can cause extra calls to hash. + int conversions = 0; + int hashes = 0; + flat_hash_map<size_t, size_t, Hash, Eq> m(0, Hash{&hashes}); + + m[LazyInt(1, &conversions)] = 1; + EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 1))); + EXPECT_EQ(conversions, 1); +#ifdef NDEBUG + EXPECT_EQ(hashes, 1); +#endif + + m[LazyInt(1, &conversions)] = 2; + EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 2))); + EXPECT_EQ(conversions, 1); +#ifdef NDEBUG + EXPECT_EQ(hashes, 2); +#endif + + m.try_emplace(LazyInt(2, &conversions), 3); + EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 2), Pair(2, 3))); + EXPECT_EQ(conversions, 2); +#ifdef NDEBUG + EXPECT_EQ(hashes, 3); +#endif + + m.try_emplace(LazyInt(2, &conversions), 4); + EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 2), Pair(2, 3))); + EXPECT_EQ(conversions, 2); +#ifdef NDEBUG + EXPECT_EQ(hashes, 4); +#endif +} + +TEST(FlatHashMap, BitfieldArgument) { + union { + int n : 1; + }; + n = 0; + flat_hash_map<int, int> m; + m.erase(n); + m.count(n); + m.prefetch(n); + m.find(n); + m.contains(n); + m.equal_range(n); + m.insert_or_assign(n, n); + m.insert_or_assign(m.end(), n, n); + m.try_emplace(n); + m.try_emplace(m.end(), n); + m.at(n); + m[n]; +} + +TEST(FlatHashMap, MergeExtractInsert) { + // We can't test mutable keys, or non-copyable keys with flat_hash_map. + // Test that the nodes have the proper API. + absl::flat_hash_map<int, int> m = {{1, 7}, {2, 9}}; + auto node = m.extract(1); + EXPECT_TRUE(node); + EXPECT_EQ(node.key(), 1); + EXPECT_EQ(node.mapped(), 7); + EXPECT_THAT(m, UnorderedElementsAre(Pair(2, 9))); + + node.mapped() = 17; + m.insert(std::move(node)); + EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 17), Pair(2, 9))); +} +#if !defined(__ANDROID__) && !defined(__APPLE__) && !defined(__EMSCRIPTEN__) +TEST(FlatHashMap, Any) { + absl::flat_hash_map<int, absl::any> m; + m.emplace(1, 7); + auto it = m.find(1); + ASSERT_NE(it, m.end()); + EXPECT_EQ(7, absl::any_cast<int>(it->second)); + + m.emplace(std::piecewise_construct, std::make_tuple(2), std::make_tuple(8)); + it = m.find(2); + ASSERT_NE(it, m.end()); + EXPECT_EQ(8, absl::any_cast<int>(it->second)); + + m.emplace(std::piecewise_construct, std::make_tuple(3), + std::make_tuple(absl::any(9))); + it = m.find(3); + ASSERT_NE(it, m.end()); + EXPECT_EQ(9, absl::any_cast<int>(it->second)); + + struct H { + size_t operator()(const absl::any&) const { return 0; } + }; + struct E { + bool operator()(const absl::any&, const absl::any&) const { return true; } + }; + absl::flat_hash_map<absl::any, int, H, E> m2; + m2.emplace(1, 7); + auto it2 = m2.find(1); + ASSERT_NE(it2, m2.end()); + EXPECT_EQ(7, it2->second); +} +#endif // __ANDROID__ + +} // namespace +} // namespace container_internal +} // namespace absl diff --git a/absl/container/flat_hash_set.h b/absl/container/flat_hash_set.h new file mode 100644 index 00000000..ccd03a4a --- /dev/null +++ b/absl/container/flat_hash_set.h @@ -0,0 +1,439 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: flat_hash_set.h +// ----------------------------------------------------------------------------- +// +// An `absl::flat_hash_set<T>` is an unordered associative container designed to +// be a more efficient replacement for `std::unordered_set`. Like +// `unordered_set`, search, insertion, and deletion of set elements can be done +// as an `O(1)` operation. However, `flat_hash_set` (and other unordered +// associative containers known as the collection of Abseil "Swiss tables") +// contain other optimizations that result in both memory and computation +// advantages. +// +// In most cases, your default choice for a hash set should be a set of type +// `flat_hash_set`. +#ifndef ABSL_CONTAINER_FLAT_HASH_SET_H_ +#define ABSL_CONTAINER_FLAT_HASH_SET_H_ + +#include <type_traits> +#include <utility> + +#include "absl/base/macros.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export +#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export +#include "absl/memory/memory.h" + +namespace absl { +namespace container_internal { +template <typename T> +struct FlatHashSetPolicy; +} // namespace container_internal + +// ----------------------------------------------------------------------------- +// absl::flat_hash_set +// ----------------------------------------------------------------------------- +// +// An `absl::flat_hash_set<T>` is an unordered associative container which has +// been optimized for both speed and memory footprint in most common use cases. +// Its interface is similar to that of `std::unordered_set<T>` with the +// following notable differences: +// +// * Requires keys that are CopyConstructible +// * Supports heterogeneous lookup, through `find()`, `operator[]()` and +// `insert()`, provided that the set is provided a compatible heterogeneous +// hashing function and equality operator. +// * Invalidates any references and pointers to elements within the table after +// `rehash()`. +// * Contains a `capacity()` member function indicating the number of element +// slots (open, deleted, and empty) within the hash set. +// * Returns `void` from the `erase(iterator)` overload. +// +// By default, `flat_hash_set` uses the `absl::Hash` hashing framework. All +// fundamental and Abseil types that support the `absl::Hash` framework have a +// compatible equality operator for comparing insertions into `flat_hash_map`. +// If your type is not yet supported by the `asbl::Hash` framework, see +// absl/hash/hash.h for information on extending Abseil hashing to user-defined +// types. +// +// NOTE: A `flat_hash_set` stores its keys directly inside its implementation +// array to avoid memory indirection. Because a `flat_hash_set` is designed to +// move data when rehashed, set keys will not retain pointer stability. If you +// require pointer stability, consider using +// `absl::flat_hash_set<std::unique_ptr<T>>`. If your type is not moveable and +// you require pointer stability, consider `absl::node_hash_set` instead. +// +// Example: +// +// // Create a flat hash set of three strings +// absl::flat_hash_set<std::string> ducks = +// {"huey", "dewey", "louie"}; +// +// // Insert a new element into the flat hash set +// ducks.insert("donald"}; +// +// // Force a rehash of the flat hash set +// ducks.rehash(0); +// +// // See if "dewey" is present +// if (ducks.contains("dewey")) { +// std::cout << "We found dewey!" << std::endl; +// } +template <class T, class Hash = absl::container_internal::hash_default_hash<T>, + class Eq = absl::container_internal::hash_default_eq<T>, + class Allocator = std::allocator<T>> +class flat_hash_set + : public absl::container_internal::raw_hash_set< + absl::container_internal::FlatHashSetPolicy<T>, Hash, Eq, Allocator> { + using Base = typename flat_hash_set::raw_hash_set; + + public: + flat_hash_set() {} + using Base::Base; + + // flat_hash_set::begin() + // + // Returns an iterator to the beginning of the `flat_hash_set`. + using Base::begin; + + // flat_hash_set::cbegin() + // + // Returns a const iterator to the beginning of the `flat_hash_set`. + using Base::cbegin; + + // flat_hash_set::cend() + // + // Returns a const iterator to the end of the `flat_hash_set`. + using Base::cend; + + // flat_hash_set::end() + // + // Returns an iterator to the end of the `flat_hash_set`. + using Base::end; + + // flat_hash_set::capacity() + // + // Returns the number of element slots (assigned, deleted, and empty) + // available within the `flat_hash_set`. + // + // NOTE: this member function is particular to `absl::flat_hash_set` and is + // not provided in the `std::unordered_map` API. + using Base::capacity; + + // flat_hash_set::empty() + // + // Returns whether or not the `flat_hash_set` is empty. + using Base::empty; + + // flat_hash_set::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `flat_hash_set` under current memory constraints. This value can be thought + // of the largest value of `std::distance(begin(), end())` for a + // `flat_hash_set<T>`. + using Base::max_size; + + // flat_hash_set::size() + // + // Returns the number of elements currently within the `flat_hash_set`. + using Base::size; + + // flat_hash_set::clear() + // + // Removes all elements from the `flat_hash_set`. Invalidates any references, + // pointers, or iterators referring to contained elements. + // + // NOTE: this operation may shrink the underlying buffer. To avoid shrinking + // the underlying buffer call `erase(begin(), end())`. + using Base::clear; + + // flat_hash_set::erase() + // + // Erases elements within the `flat_hash_set`. Erasing does not trigger a + // rehash. Overloads are listed below. + // + // void erase(const_iterator pos): + // + // Erases the element at `position` of the `flat_hash_set`, returning + // `void`. + // + // NOTE: this return behavior is different than that of STL containers in + // general and `std::unordered_map` in particular. + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning an + // iterator pointing to `last`. + // + // size_type erase(const key_type& key): + // + // Erases the element with the matching key, if it exists. + using Base::erase; + + // flat_hash_set::insert() + // + // Inserts an element of the specified value into the `flat_hash_set`, + // returning an iterator pointing to the newly inserted element, provided that + // an element with the given key does not already exist. If rehashing occurs + // due to the insertion, all iterators are invalidated. Overloads are listed + // below. + // + // std::pair<iterator,bool> insert(const T& value): + // + // Inserts a value into the `flat_hash_set`. Returns a pair consisting of an + // iterator to the inserted element (or to the element that prevented the + // insertion) and a bool denoting whether the insertion took place. + // + // std::pair<iterator,bool> insert(T&& value): + // + // Inserts a moveable value into the `flat_hash_set`. Returns a pair + // consisting of an iterator to the inserted element (or to the element that + // prevented the insertion) and a bool denoting whether the insertion took + // place. + // + // iterator insert(const_iterator hint, const T& value): + // iterator insert(const_iterator hint, T&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element, or to the existing element that prevented the + // insertion. + // + // void insert(InputIterator first, InputIterator last ): + // + // Inserts a range of values [`first`, `last`). + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently, for `flat_hash_set` we guarantee the + // first match is inserted. + // + // void insert(std::initializer_list<T> ilist ): + // + // Inserts the elements within the initializer list `ilist`. + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently within the initializer list, for + // `flat_hash_set` we guarantee the first match is inserted. + using Base::insert; + + // flat_hash_set::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_set`, provided that no element with the given key + // already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace; + + // flat_hash_set::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_set`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search, and only inserts + // provided that no element with the given key already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace_hint; + + // flat_hash_set::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // node_type extract(const key_type& x): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `flat_hash_set` + // does not contain an element with a matching key, this function returns an + // empty node handle. + using Base::extract; + + // flat_hash_set::merge() + // + // Extracts elements from a given `source` flat hash map into this + // `flat_hash_set`. If the destination `flat_hash_set` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // flat_hash_set::swap(flat_hash_set& other) + // + // Exchanges the contents of this `flat_hash_set` with those of the `other` + // flat hash map, avoiding invocation of any move, copy, or swap operations on + // individual elements. + // + // All iterators and references on the `flat_hash_set` remain valid, excepting + // for the past-the-end iterator, which is invalidated. + // + // `swap()` requires that the flat hash set's hashing and key equivalence + // functions be Swappable, and are exchaged using unqualified calls to + // non-member `swap()`. If the map's allocator has + // `std::allocator_traits<allocator_type>::propagate_on_container_swap::value` + // set to `true`, the allocators are also exchanged using an unqualified call + // to non-member `swap()`; otherwise, the allocators are not swapped. + using Base::swap; + + // flat_hash_set::rehash(count) + // + // Rehashes the `flat_hash_set`, setting the number of slots to be at least + // the passed value. If the new number of slots increases the load factor more + // than the current maximum load factor + // (`count` < `size()` / `max_load_factor()`), then the new number of slots + // will be at least `size()` / `max_load_factor()`. + // + // To force a rehash, pass rehash(0). + // + // NOTE: unlike behavior in `std::unordered_set`, references are also + // invalidated upon a `rehash()`. + using Base::rehash; + + // flat_hash_set::reserve(count) + // + // Sets the number of slots in the `flat_hash_set` to the number needed to + // accommodate at least `count` total elements without exceeding the current + // maximum load factor, and may rehash the container if needed. + using Base::reserve; + + // flat_hash_set::contains() + // + // Determines whether an element comparing equal to the given `key` exists + // within the `flat_hash_set`, returning `true` if so or `false` otherwise. + using Base::contains; + + // flat_hash_set::count(const Key& key) const + // + // Returns the number of elements comparing equal to the given `key` within + // the `flat_hash_set`. note that this function will return either `1` or `0` + // since duplicate elements are not allowed within a `flat_hash_set`. + using Base::count; + + // flat_hash_set::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `flat_hash_set`. + using Base::equal_range; + + // flat_hash_set::find() + // + // Finds an element with the passed `key` within the `flat_hash_set`. + using Base::find; + + // flat_hash_set::bucket_count() + // + // Returns the number of "buckets" within the `flat_hash_set`. Note that + // because a flat hash map contains all elements within its internal storage, + // this value simply equals the current capacity of the `flat_hash_set`. + using Base::bucket_count; + + // flat_hash_set::load_factor() + // + // Returns the current load factor of the `flat_hash_set` (the average number + // of slots occupied with a value within the hash map). + using Base::load_factor; + + // flat_hash_set::max_load_factor() + // + // Manages the maximum load factor of the `flat_hash_set`. Overloads are + // listed below. + // + // float flat_hash_set::max_load_factor() + // + // Returns the current maximum load factor of the `flat_hash_set`. + // + // void flat_hash_set::max_load_factor(float ml) + // + // Sets the maximum load factor of the `flat_hash_set` to the passed value. + // + // NOTE: This overload is provided only for API compatibility with the STL; + // `flat_hash_set` will ignore any set load factor and manage its rehashing + // internally as an implementation detail. + using Base::max_load_factor; + + // flat_hash_set::get_allocator() + // + // Returns the allocator function associated with this `flat_hash_set`. + using Base::get_allocator; + + // flat_hash_set::hash_function() + // + // Returns the hashing function used to hash the keys within this + // `flat_hash_set`. + using Base::hash_function; + + // flat_hash_set::key_eq() + // + // Returns the function used for comparing keys equality. + using Base::key_eq; +}; + +namespace container_internal { + +template <class T> +struct FlatHashSetPolicy { + using slot_type = T; + using key_type = T; + using init_type = T; + using constant_iterators = std::true_type; + + template <class Allocator, class... Args> + static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { + absl::allocator_traits<Allocator>::construct(*alloc, slot, + std::forward<Args>(args)...); + } + + template <class Allocator> + static void destroy(Allocator* alloc, slot_type* slot) { + absl::allocator_traits<Allocator>::destroy(*alloc, slot); + } + + template <class Allocator> + static void transfer(Allocator* alloc, slot_type* new_slot, + slot_type* old_slot) { + construct(alloc, new_slot, std::move(*old_slot)); + destroy(alloc, old_slot); + } + + static T& element(slot_type* slot) { return *slot; } + + template <class F, class... Args> + static decltype(absl::container_internal::DecomposeValue( + std::declval<F>(), std::declval<Args>()...)) + apply(F&& f, Args&&... args) { + return absl::container_internal::DecomposeValue( + std::forward<F>(f), std::forward<Args>(args)...); + } + + static size_t space_used(const T*) { return 0; } +}; +} // namespace container_internal +} // namespace absl +#endif // ABSL_CONTAINER_FLAT_HASH_SET_H_ diff --git a/absl/container/flat_hash_set_test.cc b/absl/container/flat_hash_set_test.cc new file mode 100644 index 00000000..e52fd532 --- /dev/null +++ b/absl/container/flat_hash_set_test.cc @@ -0,0 +1,126 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/flat_hash_set.h" + +#include <vector> + +#include "absl/container/internal/hash_generator_testing.h" +#include "absl/container/internal/unordered_set_constructor_test.h" +#include "absl/container/internal/unordered_set_lookup_test.h" +#include "absl/container/internal/unordered_set_modifiers_test.h" +#include "absl/memory/memory.h" +#include "absl/strings/string_view.h" + +namespace absl { +namespace container_internal { +namespace { + +using ::absl::container_internal::hash_internal::Enum; +using ::absl::container_internal::hash_internal::EnumClass; +using ::testing::Pointee; +using ::testing::UnorderedElementsAre; +using ::testing::UnorderedElementsAreArray; + +template <class T> +using Set = + absl::flat_hash_set<T, StatefulTestingHash, StatefulTestingEqual, Alloc<T>>; + +using SetTypes = + ::testing::Types<Set<int>, Set<std::string>, Set<Enum>, Set<EnumClass>>; + +INSTANTIATE_TYPED_TEST_CASE_P(FlatHashSet, ConstructorTest, SetTypes); +INSTANTIATE_TYPED_TEST_CASE_P(FlatHashSet, LookupTest, SetTypes); +INSTANTIATE_TYPED_TEST_CASE_P(FlatHashSet, ModifiersTest, SetTypes); + +TEST(FlatHashSet, EmplaceString) { + std::vector<std::string> v = {"a", "b"}; + absl::flat_hash_set<absl::string_view> hs(v.begin(), v.end()); + EXPECT_THAT(hs, UnorderedElementsAreArray(v)); +} + +TEST(FlatHashSet, BitfieldArgument) { + union { + int n : 1; + }; + n = 0; + absl::flat_hash_set<int> s = {n}; + s.insert(n); + s.insert(s.end(), n); + s.insert({n}); + s.erase(n); + s.count(n); + s.prefetch(n); + s.find(n); + s.contains(n); + s.equal_range(n); +} + +TEST(FlatHashSet, MergeExtractInsert) { + struct Hash { + size_t operator()(const std::unique_ptr<int>& p) const { return *p; } + }; + struct Eq { + bool operator()(const std::unique_ptr<int>& a, + const std::unique_ptr<int>& b) const { + return *a == *b; + } + }; + absl::flat_hash_set<std::unique_ptr<int>, Hash, Eq> set1, set2; + set1.insert(absl::make_unique<int>(7)); + set1.insert(absl::make_unique<int>(17)); + + set2.insert(absl::make_unique<int>(7)); + set2.insert(absl::make_unique<int>(19)); + + EXPECT_THAT(set1, UnorderedElementsAre(Pointee(7), Pointee(17))); + EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7), Pointee(19))); + + set1.merge(set2); + + EXPECT_THAT(set1, UnorderedElementsAre(Pointee(7), Pointee(17), Pointee(19))); + EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7))); + + auto node = set1.extract(absl::make_unique<int>(7)); + EXPECT_TRUE(node); + EXPECT_THAT(node.value(), Pointee(7)); + EXPECT_THAT(set1, UnorderedElementsAre(Pointee(17), Pointee(19))); + + auto insert_result = set2.insert(std::move(node)); + EXPECT_FALSE(node); + EXPECT_FALSE(insert_result.inserted); + EXPECT_TRUE(insert_result.node); + EXPECT_THAT(insert_result.node.value(), Pointee(7)); + EXPECT_EQ(**insert_result.position, 7); + EXPECT_NE(insert_result.position->get(), insert_result.node.value().get()); + EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7))); + + node = set1.extract(absl::make_unique<int>(17)); + EXPECT_TRUE(node); + EXPECT_THAT(node.value(), Pointee(17)); + EXPECT_THAT(set1, UnorderedElementsAre(Pointee(19))); + + node.value() = absl::make_unique<int>(23); + + insert_result = set2.insert(std::move(node)); + EXPECT_FALSE(node); + EXPECT_TRUE(insert_result.inserted); + EXPECT_FALSE(insert_result.node); + EXPECT_EQ(**insert_result.position, 23); + EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7), Pointee(23))); +} + +} // namespace +} // namespace container_internal +} // namespace absl diff --git a/absl/container/internal/container_memory.h b/absl/container/internal/container_memory.h new file mode 100644 index 00000000..56c5d2df --- /dev/null +++ b/absl/container/internal/container_memory.h @@ -0,0 +1,405 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_ +#define ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_ + +#ifdef ADDRESS_SANITIZER +#include <sanitizer/asan_interface.h> +#endif + +#ifdef MEMORY_SANITIZER +#include <sanitizer/msan_interface.h> +#endif + +#include <cassert> +#include <cstddef> +#include <memory> +#include <tuple> +#include <type_traits> +#include <utility> + +#include "absl/memory/memory.h" +#include "absl/utility/utility.h" + +namespace absl { +namespace container_internal { + +// Allocates at least n bytes aligned to the specified alignment. +// Alignment must be a power of 2. It must be positive. +// +// Note that many allocators don't honor alignment requirements above certain +// threshold (usually either alignof(std::max_align_t) or alignof(void*)). +// Allocate() doesn't apply alignment corrections. If the underlying allocator +// returns insufficiently alignment pointer, that's what you are going to get. +template <size_t Alignment, class Alloc> +void* Allocate(Alloc* alloc, size_t n) { + static_assert(Alignment > 0, ""); + assert(n && "n must be positive"); + struct alignas(Alignment) M {}; + using A = typename absl::allocator_traits<Alloc>::template rebind_alloc<M>; + using AT = typename absl::allocator_traits<Alloc>::template rebind_traits<M>; + A mem_alloc(*alloc); + void* p = AT::allocate(mem_alloc, (n + sizeof(M) - 1) / sizeof(M)); + assert(reinterpret_cast<uintptr_t>(p) % Alignment == 0 && + "allocator does not respect alignment"); + return p; +} + +// The pointer must have been previously obtained by calling +// Allocate<Alignment>(alloc, n). +template <size_t Alignment, class Alloc> +void Deallocate(Alloc* alloc, void* p, size_t n) { + static_assert(Alignment > 0, ""); + assert(n && "n must be positive"); + struct alignas(Alignment) M {}; + using A = typename absl::allocator_traits<Alloc>::template rebind_alloc<M>; + using AT = typename absl::allocator_traits<Alloc>::template rebind_traits<M>; + A mem_alloc(*alloc); + AT::deallocate(mem_alloc, static_cast<M*>(p), + (n + sizeof(M) - 1) / sizeof(M)); +} + +namespace memory_internal { + +// Constructs T into uninitialized storage pointed by `ptr` using the args +// specified in the tuple. +template <class Alloc, class T, class Tuple, size_t... I> +void ConstructFromTupleImpl(Alloc* alloc, T* ptr, Tuple&& t, + absl::index_sequence<I...>) { + absl::allocator_traits<Alloc>::construct( + *alloc, ptr, std::get<I>(std::forward<Tuple>(t))...); +} + +template <class T, class F> +struct WithConstructedImplF { + template <class... Args> + decltype(std::declval<F>()(std::declval<T>())) operator()( + Args&&... args) const { + return std::forward<F>(f)(T(std::forward<Args>(args)...)); + } + F&& f; +}; + +template <class T, class Tuple, size_t... Is, class F> +decltype(std::declval<F>()(std::declval<T>())) WithConstructedImpl( + Tuple&& t, absl::index_sequence<Is...>, F&& f) { + return WithConstructedImplF<T, F>{std::forward<F>(f)}( + std::get<Is>(std::forward<Tuple>(t))...); +} + +template <class T, size_t... Is> +auto TupleRefImpl(T&& t, absl::index_sequence<Is...>) + -> decltype(std::forward_as_tuple(std::get<Is>(std::forward<T>(t))...)) { + return std::forward_as_tuple(std::get<Is>(std::forward<T>(t))...); +} + +// Returns a tuple of references to the elements of the input tuple. T must be a +// tuple. +template <class T> +auto TupleRef(T&& t) -> decltype( + TupleRefImpl(std::forward<T>(t), + absl::make_index_sequence< + std::tuple_size<typename std::decay<T>::type>::value>())) { + return TupleRefImpl( + std::forward<T>(t), + absl::make_index_sequence< + std::tuple_size<typename std::decay<T>::type>::value>()); +} + +template <class F, class K, class V> +decltype(std::declval<F>()(std::declval<const K&>(), std::piecewise_construct, + std::declval<std::tuple<K>>(), std::declval<V>())) +DecomposePairImpl(F&& f, std::pair<std::tuple<K>, V> p) { + const auto& key = std::get<0>(p.first); + return std::forward<F>(f)(key, std::piecewise_construct, std::move(p.first), + std::move(p.second)); +} + +} // namespace memory_internal + +// Constructs T into uninitialized storage pointed by `ptr` using the args +// specified in the tuple. +template <class Alloc, class T, class Tuple> +void ConstructFromTuple(Alloc* alloc, T* ptr, Tuple&& t) { + memory_internal::ConstructFromTupleImpl( + alloc, ptr, std::forward<Tuple>(t), + absl::make_index_sequence< + std::tuple_size<typename std::decay<Tuple>::type>::value>()); +} + +// Constructs T using the args specified in the tuple and calls F with the +// constructed value. +template <class T, class Tuple, class F> +decltype(std::declval<F>()(std::declval<T>())) WithConstructed( + Tuple&& t, F&& f) { + return memory_internal::WithConstructedImpl<T>( + std::forward<Tuple>(t), + absl::make_index_sequence< + std::tuple_size<typename std::decay<Tuple>::type>::value>(), + std::forward<F>(f)); +} + +// Given arguments of an std::pair's consructor, PairArgs() returns a pair of +// tuples with references to the passed arguments. The tuples contain +// constructor arguments for the first and the second elements of the pair. +// +// The following two snippets are equivalent. +// +// 1. std::pair<F, S> p(args...); +// +// 2. auto a = PairArgs(args...); +// std::pair<F, S> p(std::piecewise_construct, +// std::move(p.first), std::move(p.second)); +inline std::pair<std::tuple<>, std::tuple<>> PairArgs() { return {}; } +template <class F, class S> +std::pair<std::tuple<F&&>, std::tuple<S&&>> PairArgs(F&& f, S&& s) { + return {std::piecewise_construct, std::forward_as_tuple(std::forward<F>(f)), + std::forward_as_tuple(std::forward<S>(s))}; +} +template <class F, class S> +std::pair<std::tuple<const F&>, std::tuple<const S&>> PairArgs( + const std::pair<F, S>& p) { + return PairArgs(p.first, p.second); +} +template <class F, class S> +std::pair<std::tuple<F&&>, std::tuple<S&&>> PairArgs(std::pair<F, S>&& p) { + return PairArgs(std::forward<F>(p.first), std::forward<S>(p.second)); +} +template <class F, class S> +auto PairArgs(std::piecewise_construct_t, F&& f, S&& s) + -> decltype(std::make_pair(memory_internal::TupleRef(std::forward<F>(f)), + memory_internal::TupleRef(std::forward<S>(s)))) { + return std::make_pair(memory_internal::TupleRef(std::forward<F>(f)), + memory_internal::TupleRef(std::forward<S>(s))); +} + +// A helper function for implementing apply() in map policies. +template <class F, class... Args> +auto DecomposePair(F&& f, Args&&... args) + -> decltype(memory_internal::DecomposePairImpl( + std::forward<F>(f), PairArgs(std::forward<Args>(args)...))) { + return memory_internal::DecomposePairImpl( + std::forward<F>(f), PairArgs(std::forward<Args>(args)...)); +} + +// A helper function for implementing apply() in set policies. +template <class F, class Arg> +decltype(std::declval<F>()(std::declval<const Arg&>(), std::declval<Arg>())) +DecomposeValue(F&& f, Arg&& arg) { + const auto& key = arg; + return std::forward<F>(f)(key, std::forward<Arg>(arg)); +} + +// Helper functions for asan and msan. +inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) { +#ifdef ADDRESS_SANITIZER + ASAN_POISON_MEMORY_REGION(m, s); +#endif +#ifdef MEMORY_SANITIZER + __msan_poison(m, s); +#endif + (void)m; + (void)s; +} + +inline void SanitizerUnpoisonMemoryRegion(const void* m, size_t s) { +#ifdef ADDRESS_SANITIZER + ASAN_UNPOISON_MEMORY_REGION(m, s); +#endif +#ifdef MEMORY_SANITIZER + __msan_unpoison(m, s); +#endif + (void)m; + (void)s; +} + +template <typename T> +inline void SanitizerPoisonObject(const T* object) { + SanitizerPoisonMemoryRegion(object, sizeof(T)); +} + +template <typename T> +inline void SanitizerUnpoisonObject(const T* object) { + SanitizerUnpoisonMemoryRegion(object, sizeof(T)); +} + +namespace memory_internal { + +// If Pair is a standard-layout type, OffsetOf<Pair>::kFirst and +// OffsetOf<Pair>::kSecond are equivalent to offsetof(Pair, first) and +// offsetof(Pair, second) respectively. Otherwise they are -1. +// +// The purpose of OffsetOf is to avoid calling offsetof() on non-standard-layout +// type, which is non-portable. +template <class Pair, class = std::true_type> +struct OffsetOf { + static constexpr size_t kFirst = -1; + static constexpr size_t kSecond = -1; +}; + +template <class Pair> +struct OffsetOf<Pair, typename std::is_standard_layout<Pair>::type> { + static constexpr size_t kFirst = offsetof(Pair, first); + static constexpr size_t kSecond = offsetof(Pair, second); +}; + +template <class K, class V> +struct IsLayoutCompatible { + private: + struct Pair { + K first; + V second; + }; + + // Is P layout-compatible with Pair? + template <class P> + static constexpr bool LayoutCompatible() { + return std::is_standard_layout<P>() && sizeof(P) == sizeof(Pair) && + alignof(P) == alignof(Pair) && + memory_internal::OffsetOf<P>::kFirst == + memory_internal::OffsetOf<Pair>::kFirst && + memory_internal::OffsetOf<P>::kSecond == + memory_internal::OffsetOf<Pair>::kSecond; + } + + public: + // Whether pair<const K, V> and pair<K, V> are layout-compatible. If they are, + // then it is safe to store them in a union and read from either. + static constexpr bool value = std::is_standard_layout<K>() && + std::is_standard_layout<Pair>() && + memory_internal::OffsetOf<Pair>::kFirst == 0 && + LayoutCompatible<std::pair<K, V>>() && + LayoutCompatible<std::pair<const K, V>>(); +}; + +} // namespace memory_internal + +// If kMutableKeys is false, only the value member is accessed. +// +// If kMutableKeys is true, key is accessed through all slots while value and +// mutable_value are accessed only via INITIALIZED slots. Slots are created and +// destroyed via mutable_value so that the key can be moved later. +template <class K, class V> +union slot_type { + private: + static void emplace(slot_type* slot) { + // The construction of union doesn't do anything at runtime but it allows us + // to access its members without violating aliasing rules. + new (slot) slot_type; + } + // If pair<const K, V> and pair<K, V> are layout-compatible, we can accept one + // or the other via slot_type. We are also free to access the key via + // slot_type::key in this case. + using kMutableKeys = + std::integral_constant<bool, + memory_internal::IsLayoutCompatible<K, V>::value>; + + public: + slot_type() {} + ~slot_type() = delete; + using value_type = std::pair<const K, V>; + using mutable_value_type = std::pair<K, V>; + + value_type value; + mutable_value_type mutable_value; + K key; + + template <class Allocator, class... Args> + static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { + emplace(slot); + if (kMutableKeys::value) { + absl::allocator_traits<Allocator>::construct(*alloc, &slot->mutable_value, + std::forward<Args>(args)...); + } else { + absl::allocator_traits<Allocator>::construct(*alloc, &slot->value, + std::forward<Args>(args)...); + } + } + + // Construct this slot by moving from another slot. + template <class Allocator> + static void construct(Allocator* alloc, slot_type* slot, slot_type* other) { + emplace(slot); + if (kMutableKeys::value) { + absl::allocator_traits<Allocator>::construct( + *alloc, &slot->mutable_value, std::move(other->mutable_value)); + } else { + absl::allocator_traits<Allocator>::construct(*alloc, &slot->value, + std::move(other->value)); + } + } + + template <class Allocator> + static void destroy(Allocator* alloc, slot_type* slot) { + if (kMutableKeys::value) { + absl::allocator_traits<Allocator>::destroy(*alloc, &slot->mutable_value); + } else { + absl::allocator_traits<Allocator>::destroy(*alloc, &slot->value); + } + } + + template <class Allocator> + static void transfer(Allocator* alloc, slot_type* new_slot, + slot_type* old_slot) { + emplace(new_slot); + if (kMutableKeys::value) { + absl::allocator_traits<Allocator>::construct( + *alloc, &new_slot->mutable_value, std::move(old_slot->mutable_value)); + } else { + absl::allocator_traits<Allocator>::construct(*alloc, &new_slot->value, + std::move(old_slot->value)); + } + destroy(alloc, old_slot); + } + + template <class Allocator> + static void swap(Allocator* alloc, slot_type* a, slot_type* b) { + if (kMutableKeys::value) { + using std::swap; + swap(a->mutable_value, b->mutable_value); + } else { + value_type tmp = std::move(a->value); + absl::allocator_traits<Allocator>::destroy(*alloc, &a->value); + absl::allocator_traits<Allocator>::construct(*alloc, &a->value, + std::move(b->value)); + absl::allocator_traits<Allocator>::destroy(*alloc, &b->value); + absl::allocator_traits<Allocator>::construct(*alloc, &b->value, + std::move(tmp)); + } + } + + template <class Allocator> + static void move(Allocator* alloc, slot_type* src, slot_type* dest) { + if (kMutableKeys::value) { + dest->mutable_value = std::move(src->mutable_value); + } else { + absl::allocator_traits<Allocator>::destroy(*alloc, &dest->value); + absl::allocator_traits<Allocator>::construct(*alloc, &dest->value, + std::move(src->value)); + } + } + + template <class Allocator> + static void move(Allocator* alloc, slot_type* first, slot_type* last, + slot_type* result) { + for (slot_type *src = first, *dest = result; src != last; ++src, ++dest) + move(alloc, src, dest); + } +}; + +} // namespace container_internal +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_ diff --git a/absl/container/internal/container_memory_test.cc b/absl/container/internal/container_memory_test.cc new file mode 100644 index 00000000..f1c40582 --- /dev/null +++ b/absl/container/internal/container_memory_test.cc @@ -0,0 +1,188 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/container_memory.h" + +#include <cstdint> +#include <tuple> +#include <utility> + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/strings/string_view.h" + +namespace absl { +namespace container_internal { +namespace { + +using ::testing::Pair; + +TEST(Memory, AlignmentLargerThanBase) { + std::allocator<int8_t> alloc; + void* mem = Allocate<2>(&alloc, 3); + EXPECT_EQ(0, reinterpret_cast<uintptr_t>(mem) % 2); + memcpy(mem, "abc", 3); + Deallocate<2>(&alloc, mem, 3); +} + +TEST(Memory, AlignmentSmallerThanBase) { + std::allocator<int64_t> alloc; + void* mem = Allocate<2>(&alloc, 3); + EXPECT_EQ(0, reinterpret_cast<uintptr_t>(mem) % 2); + memcpy(mem, "abc", 3); + Deallocate<2>(&alloc, mem, 3); +} + +class Fixture : public ::testing::Test { + using Alloc = std::allocator<std::string>; + + public: + Fixture() { ptr_ = std::allocator_traits<Alloc>::allocate(*alloc(), 1); } + ~Fixture() override { + std::allocator_traits<Alloc>::destroy(*alloc(), ptr_); + std::allocator_traits<Alloc>::deallocate(*alloc(), ptr_, 1); + } + std::string* ptr() { return ptr_; } + Alloc* alloc() { return &alloc_; } + + private: + Alloc alloc_; + std::string* ptr_; +}; + +TEST_F(Fixture, ConstructNoArgs) { + ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple()); + EXPECT_EQ(*ptr(), ""); +} + +TEST_F(Fixture, ConstructOneArg) { + ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple("abcde")); + EXPECT_EQ(*ptr(), "abcde"); +} + +TEST_F(Fixture, ConstructTwoArg) { + ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple(5, 'a')); + EXPECT_EQ(*ptr(), "aaaaa"); +} + +TEST(PairArgs, NoArgs) { + EXPECT_THAT(PairArgs(), + Pair(std::forward_as_tuple(), std::forward_as_tuple())); +} + +TEST(PairArgs, TwoArgs) { + EXPECT_EQ( + std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')), + PairArgs(1, 'A')); +} + +TEST(PairArgs, Pair) { + EXPECT_EQ( + std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')), + PairArgs(std::make_pair(1, 'A'))); +} + +TEST(PairArgs, Piecewise) { + EXPECT_EQ( + std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')), + PairArgs(std::piecewise_construct, std::forward_as_tuple(1), + std::forward_as_tuple('A'))); +} + +TEST(WithConstructed, Simple) { + EXPECT_EQ(1, WithConstructed<absl::string_view>( + std::make_tuple(std::string("a")), + [](absl::string_view str) { return str.size(); })); +} + +template <class F, class Arg> +decltype(DecomposeValue(std::declval<F>(), std::declval<Arg>())) +DecomposeValueImpl(int, F&& f, Arg&& arg) { + return DecomposeValue(std::forward<F>(f), std::forward<Arg>(arg)); +} + +template <class F, class Arg> +const char* DecomposeValueImpl(char, F&& f, Arg&& arg) { + return "not decomposable"; +} + +template <class F, class Arg> +decltype(DecomposeValueImpl(0, std::declval<F>(), std::declval<Arg>())) +TryDecomposeValue(F&& f, Arg&& arg) { + return DecomposeValueImpl(0, std::forward<F>(f), std::forward<Arg>(arg)); +} + +TEST(DecomposeValue, Decomposable) { + auto f = [](const int& x, int&& y) { + EXPECT_EQ(&x, &y); + EXPECT_EQ(42, x); + return 'A'; + }; + EXPECT_EQ('A', TryDecomposeValue(f, 42)); +} + +TEST(DecomposeValue, NotDecomposable) { + auto f = [](void*) { + ADD_FAILURE() << "Must not be called"; + return 'A'; + }; + EXPECT_STREQ("not decomposable", TryDecomposeValue(f, 42)); +} + +template <class F, class... Args> +decltype(DecomposePair(std::declval<F>(), std::declval<Args>()...)) +DecomposePairImpl(int, F&& f, Args&&... args) { + return DecomposePair(std::forward<F>(f), std::forward<Args>(args)...); +} + +template <class F, class... Args> +const char* DecomposePairImpl(char, F&& f, Args&&... args) { + return "not decomposable"; +} + +template <class F, class... Args> +decltype(DecomposePairImpl(0, std::declval<F>(), std::declval<Args>()...)) +TryDecomposePair(F&& f, Args&&... args) { + return DecomposePairImpl(0, std::forward<F>(f), std::forward<Args>(args)...); +} + +TEST(DecomposePair, Decomposable) { + auto f = [](const int& x, std::piecewise_construct_t, std::tuple<int&&> k, + std::tuple<double>&& v) { + EXPECT_EQ(&x, &std::get<0>(k)); + EXPECT_EQ(42, x); + EXPECT_EQ(0.5, std::get<0>(v)); + return 'A'; + }; + EXPECT_EQ('A', TryDecomposePair(f, 42, 0.5)); + EXPECT_EQ('A', TryDecomposePair(f, std::make_pair(42, 0.5))); + EXPECT_EQ('A', TryDecomposePair(f, std::piecewise_construct, + std::make_tuple(42), std::make_tuple(0.5))); +} + +TEST(DecomposePair, NotDecomposable) { + auto f = [](...) { + ADD_FAILURE() << "Must not be called"; + return 'A'; + }; + EXPECT_STREQ("not decomposable", + TryDecomposePair(f)); + EXPECT_STREQ("not decomposable", + TryDecomposePair(f, std::piecewise_construct, std::make_tuple(), + std::make_tuple(0.5))); +} + +} // namespace +} // namespace container_internal +} // namespace absl diff --git a/absl/container/internal/hash_function_defaults.h b/absl/container/internal/hash_function_defaults.h new file mode 100644 index 00000000..dd6cd8f5 --- /dev/null +++ b/absl/container/internal/hash_function_defaults.h @@ -0,0 +1,148 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Define the default Hash and Eq functions for SwissTable containers. +// +// std::hash<T> and std::equal_to<T> are not appropriate hash and equal +// functions for SwissTable containers. There are two reasons for this. +// +// SwissTable containers are power of 2 sized containers: +// +// This means they use the lower bits of the hash value to find the slot for +// each entry. The typical hash function for integral types is the identity. +// This is a very weak hash function for SwissTable and any power of 2 sized +// hashtable implementation which will lead to excessive collisions. For +// SwissTable we use murmur3 style mixing to reduce collisions to a minimum. +// +// SwissTable containers support heterogeneous lookup: +// +// In order to make heterogeneous lookup work, hash and equal functions must be +// polymorphic. At the same time they have to satisfy the same requirements the +// C++ standard imposes on hash functions and equality operators. That is: +// +// if hash_default_eq<T>(a, b) returns true for any a and b of type T, then +// hash_default_hash<T>(a) must equal hash_default_hash<T>(b) +// +// For SwissTable containers this requirement is relaxed to allow a and b of +// any and possibly different types. Note that like the standard the hash and +// equal functions are still bound to T. This is important because some type U +// can be hashed by/tested for equality differently depending on T. A notable +// example is `const char*`. `const char*` is treated as a c-style string when +// the hash function is hash<string> but as a pointer when the hash function is +// hash<void*>. +// +#ifndef ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_ +#define ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_ + +#include <stdint.h> +#include <cstddef> +#include <memory> +#include <string> +#include <type_traits> + +#include "absl/base/config.h" +#include "absl/hash/hash.h" +#include "absl/strings/string_view.h" + +namespace absl { +namespace container_internal { + +// The hash of an object of type T is computed by using absl::Hash. +template <class T, class E = void> +struct HashEq { + using Hash = absl::Hash<T>; + using Eq = std::equal_to<T>; +}; + +struct StringHash { + using is_transparent = void; + + size_t operator()(absl::string_view v) const { + return absl::Hash<absl::string_view>{}(v); + } +}; + +// Supports heterogeneous lookup for string-like elements. +struct StringHashEq { + using Hash = StringHash; + struct Eq { + using is_transparent = void; + bool operator()(absl::string_view lhs, absl::string_view rhs) const { + return lhs == rhs; + } + }; +}; + +#if defined(HAS_GLOBAL_STRING) +template <> +struct HashEq<std::string> : StringHashEq {}; +#endif +template <> +struct HashEq<std::string> : StringHashEq {}; +template <> +struct HashEq<absl::string_view> : StringHashEq {}; + +// Supports heterogeneous lookup for pointers and smart pointers. +template <class T> +struct HashEq<T*> { + struct Hash { + using is_transparent = void; + template <class U> + size_t operator()(const U& ptr) const { + return absl::Hash<const T*>{}(HashEq::ToPtr(ptr)); + } + }; + struct Eq { + using is_transparent = void; + template <class A, class B> + bool operator()(const A& a, const B& b) const { + return HashEq::ToPtr(a) == HashEq::ToPtr(b); + } + }; + + private: + static const T* ToPtr(const T* ptr) { return ptr; } + template <class U, class D> + static const T* ToPtr(const std::unique_ptr<U, D>& ptr) { + return ptr.get(); + } + template <class U> + static const T* ToPtr(const std::shared_ptr<U>& ptr) { + return ptr.get(); + } +}; + +template <class T, class D> +struct HashEq<std::unique_ptr<T, D>> : HashEq<T*> {}; +template <class T> +struct HashEq<std::shared_ptr<T>> : HashEq<T*> {}; + +// This header's visibility is restricted. If you need to access the default +// hasher please use the container's ::hasher alias instead. +// +// Example: typename Hash = typename absl::flat_hash_map<K, V>::hasher +template <class T> +using hash_default_hash = typename container_internal::HashEq<T>::Hash; + +// This header's visibility is restricted. If you need to access the default +// key equal please use the container's ::key_equal alias instead. +// +// Example: typename Eq = typename absl::flat_hash_map<K, V, Hash>::key_equal +template <class T> +using hash_default_eq = typename container_internal::HashEq<T>::Eq; + +} // namespace container_internal +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_ diff --git a/absl/container/internal/hash_function_defaults_test.cc b/absl/container/internal/hash_function_defaults_test.cc new file mode 100644 index 00000000..464baae0 --- /dev/null +++ b/absl/container/internal/hash_function_defaults_test.cc @@ -0,0 +1,299 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/hash_function_defaults.h" + +#include <functional> +#include <type_traits> +#include <utility> + +#include "gtest/gtest.h" +#include "absl/strings/string_view.h" + +namespace absl { +namespace container_internal { +namespace { + +using ::testing::Types; + +TEST(Eq, Int32) { + hash_default_eq<int32_t> eq; + EXPECT_TRUE(eq(1, 1u)); + EXPECT_TRUE(eq(1, char{1})); + EXPECT_TRUE(eq(1, true)); + EXPECT_TRUE(eq(1, double{1.1})); + EXPECT_FALSE(eq(1, char{2})); + EXPECT_FALSE(eq(1, 2u)); + EXPECT_FALSE(eq(1, false)); + EXPECT_FALSE(eq(1, 2.)); +} + +TEST(Hash, Int32) { + hash_default_hash<int32_t> hash; + auto h = hash(1); + EXPECT_EQ(h, hash(1u)); + EXPECT_EQ(h, hash(char{1})); + EXPECT_EQ(h, hash(true)); + EXPECT_EQ(h, hash(double{1.1})); + EXPECT_NE(h, hash(2u)); + EXPECT_NE(h, hash(char{2})); + EXPECT_NE(h, hash(false)); + EXPECT_NE(h, hash(2.)); +} + +enum class MyEnum { A, B, C, D }; + +TEST(Eq, Enum) { + hash_default_eq<MyEnum> eq; + EXPECT_TRUE(eq(MyEnum::A, MyEnum::A)); + EXPECT_FALSE(eq(MyEnum::A, MyEnum::B)); +} + +TEST(Hash, Enum) { + hash_default_hash<MyEnum> hash; + + for (MyEnum e : {MyEnum::A, MyEnum::B, MyEnum::C}) { + auto h = hash(e); + EXPECT_EQ(h, hash_default_hash<int>{}(static_cast<int>(e))); + EXPECT_NE(h, hash(MyEnum::D)); + } +} + +using StringTypes = ::testing::Types<std::string, absl::string_view>; + +template <class T> +struct EqString : ::testing::Test { + hash_default_eq<T> key_eq; +}; + +TYPED_TEST_CASE(EqString, StringTypes); + +template <class T> +struct HashString : ::testing::Test { + hash_default_hash<T> hasher; +}; + +TYPED_TEST_CASE(HashString, StringTypes); + +TYPED_TEST(EqString, Works) { + auto eq = this->key_eq; + EXPECT_TRUE(eq("a", "a")); + EXPECT_TRUE(eq("a", absl::string_view("a"))); + EXPECT_TRUE(eq("a", std::string("a"))); + EXPECT_FALSE(eq("a", "b")); + EXPECT_FALSE(eq("a", absl::string_view("b"))); + EXPECT_FALSE(eq("a", std::string("b"))); +} + +TYPED_TEST(HashString, Works) { + auto hash = this->hasher; + auto h = hash("a"); + EXPECT_EQ(h, hash(absl::string_view("a"))); + EXPECT_EQ(h, hash(std::string("a"))); + EXPECT_NE(h, hash(absl::string_view("b"))); + EXPECT_NE(h, hash(std::string("b"))); +} + +struct NoDeleter { + template <class T> + void operator()(const T* ptr) const {} +}; + +using PointerTypes = + ::testing::Types<const int*, int*, std::unique_ptr<const int>, + std::unique_ptr<const int, NoDeleter>, + std::unique_ptr<int>, std::unique_ptr<int, NoDeleter>, + std::shared_ptr<const int>, std::shared_ptr<int>>; + +template <class T> +struct EqPointer : ::testing::Test { + hash_default_eq<T> key_eq; +}; + +TYPED_TEST_CASE(EqPointer, PointerTypes); + +template <class T> +struct HashPointer : ::testing::Test { + hash_default_hash<T> hasher; +}; + +TYPED_TEST_CASE(HashPointer, PointerTypes); + +TYPED_TEST(EqPointer, Works) { + int dummy; + auto eq = this->key_eq; + auto sptr = std::make_shared<int>(); + std::shared_ptr<const int> csptr = sptr; + int* ptr = sptr.get(); + const int* cptr = ptr; + std::unique_ptr<int, NoDeleter> uptr(ptr); + std::unique_ptr<const int, NoDeleter> cuptr(ptr); + + EXPECT_TRUE(eq(ptr, cptr)); + EXPECT_TRUE(eq(ptr, sptr)); + EXPECT_TRUE(eq(ptr, uptr)); + EXPECT_TRUE(eq(ptr, csptr)); + EXPECT_TRUE(eq(ptr, cuptr)); + EXPECT_FALSE(eq(&dummy, cptr)); + EXPECT_FALSE(eq(&dummy, sptr)); + EXPECT_FALSE(eq(&dummy, uptr)); + EXPECT_FALSE(eq(&dummy, csptr)); + EXPECT_FALSE(eq(&dummy, cuptr)); +} + +TEST(Hash, DerivedAndBase) { + struct Base {}; + struct Derived : Base {}; + + hash_default_hash<Base*> hasher; + + Base base; + Derived derived; + EXPECT_NE(hasher(&base), hasher(&derived)); + EXPECT_EQ(hasher(static_cast<Base*>(&derived)), hasher(&derived)); + + auto dp = std::make_shared<Derived>(); + EXPECT_EQ(hasher(static_cast<Base*>(dp.get())), hasher(dp)); +} + +TEST(Hash, FunctionPointer) { + using Func = int (*)(); + hash_default_hash<Func> hasher; + hash_default_eq<Func> eq; + + Func p1 = [] { return 1; }, p2 = [] { return 2; }; + EXPECT_EQ(hasher(p1), hasher(p1)); + EXPECT_TRUE(eq(p1, p1)); + + EXPECT_NE(hasher(p1), hasher(p2)); + EXPECT_FALSE(eq(p1, p2)); +} + +TYPED_TEST(HashPointer, Works) { + int dummy; + auto hash = this->hasher; + auto sptr = std::make_shared<int>(); + std::shared_ptr<const int> csptr = sptr; + int* ptr = sptr.get(); + const int* cptr = ptr; + std::unique_ptr<int, NoDeleter> uptr(ptr); + std::unique_ptr<const int, NoDeleter> cuptr(ptr); + + EXPECT_EQ(hash(ptr), hash(cptr)); + EXPECT_EQ(hash(ptr), hash(sptr)); + EXPECT_EQ(hash(ptr), hash(uptr)); + EXPECT_EQ(hash(ptr), hash(csptr)); + EXPECT_EQ(hash(ptr), hash(cuptr)); + EXPECT_NE(hash(&dummy), hash(cptr)); + EXPECT_NE(hash(&dummy), hash(sptr)); + EXPECT_NE(hash(&dummy), hash(uptr)); + EXPECT_NE(hash(&dummy), hash(csptr)); + EXPECT_NE(hash(&dummy), hash(cuptr)); +} + +// Cartesian product of (string, std::string, absl::string_view) +// with (string, std::string, absl::string_view, const char*). +using StringTypesCartesianProduct = Types< + // clang-format off + + std::pair<std::string, std::string>, + std::pair<std::string, absl::string_view>, + std::pair<std::string, const char*>, + + std::pair<absl::string_view, std::string>, + std::pair<absl::string_view, absl::string_view>, + std::pair<absl::string_view, const char*>>; +// clang-format on + +constexpr char kFirstString[] = "abc123"; +constexpr char kSecondString[] = "ijk456"; + +template <typename T> +struct StringLikeTest : public ::testing::Test { + typename T::first_type a1{kFirstString}; + typename T::second_type b1{kFirstString}; + typename T::first_type a2{kSecondString}; + typename T::second_type b2{kSecondString}; + hash_default_eq<typename T::first_type> eq; + hash_default_hash<typename T::first_type> hash; +}; + +TYPED_TEST_CASE_P(StringLikeTest); + +TYPED_TEST_P(StringLikeTest, Eq) { + EXPECT_TRUE(this->eq(this->a1, this->b1)); + EXPECT_TRUE(this->eq(this->b1, this->a1)); +} + +TYPED_TEST_P(StringLikeTest, NotEq) { + EXPECT_FALSE(this->eq(this->a1, this->b2)); + EXPECT_FALSE(this->eq(this->b2, this->a1)); +} + +TYPED_TEST_P(StringLikeTest, HashEq) { + EXPECT_EQ(this->hash(this->a1), this->hash(this->b1)); + EXPECT_EQ(this->hash(this->a2), this->hash(this->b2)); + // It would be a poor hash function which collides on these strings. + EXPECT_NE(this->hash(this->a1), this->hash(this->b2)); +} + +TYPED_TEST_CASE(StringLikeTest, StringTypesCartesianProduct); + +} // namespace +} // namespace container_internal +} // namespace absl + +enum Hash : size_t { + kStd = 0x2, // std::hash +#ifdef _MSC_VER + kExtension = kStd, // In MSVC, std::hash == ::hash +#else // _MSC_VER + kExtension = 0x4, // ::hash (GCC extension) +#endif // _MSC_VER +}; + +// H is a bitmask of Hash enumerations. +// Hashable<H> is hashable via all means specified in H. +template <int H> +struct Hashable { + static constexpr bool HashableBy(Hash h) { return h & H; } +}; + +namespace std { +template <int H> +struct hash<Hashable<H>> { + template <class E = Hashable<H>, + class = typename std::enable_if<E::HashableBy(kStd)>::type> + size_t operator()(E) const { + return kStd; + } +}; +} // namespace std + +namespace absl { +namespace container_internal { +namespace { + +template <class T> +size_t Hash(const T& v) { + return hash_default_hash<T>()(v); +} + +TEST(Delegate, HashDispatch) { + EXPECT_EQ(Hash(kStd), Hash(Hashable<kStd>())); +} + +} // namespace +} // namespace container_internal +} // namespace absl diff --git a/absl/container/internal/hash_generator_testing.cc b/absl/container/internal/hash_generator_testing.cc new file mode 100644 index 00000000..0d6a9df1 --- /dev/null +++ b/absl/container/internal/hash_generator_testing.cc @@ -0,0 +1,72 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/hash_generator_testing.h" + +#include <deque> + +namespace absl { +namespace container_internal { +namespace hash_internal { +namespace { + +class RandomDeviceSeedSeq { + public: + using result_type = typename std::random_device::result_type; + + template <class Iterator> + void generate(Iterator start, Iterator end) { + while (start != end) { + *start = gen_(); + ++start; + } + } + + private: + std::random_device gen_; +}; + +} // namespace + +std::mt19937_64* GetThreadLocalRng() { + RandomDeviceSeedSeq seed_seq; + thread_local auto* rng = new std::mt19937_64(seed_seq); + return rng; +} + +std::string Generator<std::string>::operator()() const { + // NOLINTNEXTLINE(runtime/int) + std::uniform_int_distribution<short> chars(0x20, 0x7E); + std::string res; + res.resize(32); + std::generate(res.begin(), res.end(), + [&]() { return chars(*GetThreadLocalRng()); }); + return res; +} + +absl::string_view Generator<absl::string_view>::operator()() const { + static auto* arena = new std::deque<std::string>(); + // NOLINTNEXTLINE(runtime/int) + std::uniform_int_distribution<short> chars(0x20, 0x7E); + arena->emplace_back(); + auto& res = arena->back(); + res.resize(32); + std::generate(res.begin(), res.end(), + [&]() { return chars(*GetThreadLocalRng()); }); + return res; +} + +} // namespace hash_internal +} // namespace container_internal +} // namespace absl diff --git a/absl/container/internal/hash_generator_testing.h b/absl/container/internal/hash_generator_testing.h new file mode 100644 index 00000000..50d77102 --- /dev/null +++ b/absl/container/internal/hash_generator_testing.h @@ -0,0 +1,150 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Generates random values for testing. Specialized only for the few types we +// care about. + +#ifndef ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_ +#define ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_ + +#include <stdint.h> +#include <algorithm> +#include <iosfwd> +#include <random> +#include <tuple> +#include <type_traits> +#include <utility> + +#include "absl/container/internal/hash_policy_testing.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/string_view.h" + +namespace absl { +namespace container_internal { +namespace hash_internal { +namespace generator_internal { + +template <class Container, class = void> +struct IsMap : std::false_type {}; + +template <class Map> +struct IsMap<Map, absl::void_t<typename Map::mapped_type>> : std::true_type {}; + +} // namespace generator_internal + +std::mt19937_64* GetThreadLocalRng(); + +enum Enum { + kEnumEmpty, + kEnumDeleted, +}; + +enum class EnumClass : uint64_t { + kEmpty, + kDeleted, +}; + +inline std::ostream& operator<<(std::ostream& o, const EnumClass& ec) { + return o << static_cast<uint64_t>(ec); +} + +template <class T, class E = void> +struct Generator; + +template <class T> +struct Generator<T, typename std::enable_if<std::is_integral<T>::value>::type> { + T operator()() const { + std::uniform_int_distribution<T> dist; + return dist(*GetThreadLocalRng()); + } +}; + +template <> +struct Generator<Enum> { + Enum operator()() const { + std::uniform_int_distribution<typename std::underlying_type<Enum>::type> + dist; + while (true) { + auto variate = dist(*GetThreadLocalRng()); + if (variate != kEnumEmpty && variate != kEnumDeleted) + return static_cast<Enum>(variate); + } + } +}; + +template <> +struct Generator<EnumClass> { + EnumClass operator()() const { + std::uniform_int_distribution< + typename std::underlying_type<EnumClass>::type> + dist; + while (true) { + EnumClass variate = static_cast<EnumClass>(dist(*GetThreadLocalRng())); + if (variate != EnumClass::kEmpty && variate != EnumClass::kDeleted) + return static_cast<EnumClass>(variate); + } + } +}; + +template <> +struct Generator<std::string> { + std::string operator()() const; +}; + +template <> +struct Generator<absl::string_view> { + absl::string_view operator()() const; +}; + +template <> +struct Generator<NonStandardLayout> { + NonStandardLayout operator()() const { + return NonStandardLayout(Generator<std::string>()()); + } +}; + +template <class K, class V> +struct Generator<std::pair<K, V>> { + std::pair<K, V> operator()() const { + return std::pair<K, V>(Generator<typename std::decay<K>::type>()(), + Generator<typename std::decay<V>::type>()()); + } +}; + +template <class... Ts> +struct Generator<std::tuple<Ts...>> { + std::tuple<Ts...> operator()() const { + return std::tuple<Ts...>(Generator<typename std::decay<Ts>::type>()()...); + } +}; + +template <class U> +struct Generator<U, absl::void_t<decltype(std::declval<U&>().key()), + decltype(std::declval<U&>().value())>> + : Generator<std::pair< + typename std::decay<decltype(std::declval<U&>().key())>::type, + typename std::decay<decltype(std::declval<U&>().value())>::type>> {}; + +template <class Container> +using GeneratedType = decltype( + std::declval<const Generator< + typename std::conditional<generator_internal::IsMap<Container>::value, + typename Container::value_type, + typename Container::key_type>::type>&>()()); + +} // namespace hash_internal +} // namespace container_internal +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_ diff --git a/absl/container/internal/hash_policy_testing.h b/absl/container/internal/hash_policy_testing.h new file mode 100644 index 00000000..ffc76ead --- /dev/null +++ b/absl/container/internal/hash_policy_testing.h @@ -0,0 +1,178 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Utilities to help tests verify that hash tables properly handle stateful +// allocators and hash functions. + +#ifndef ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_ +#define ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_ + +#include <cstdlib> +#include <limits> +#include <memory> +#include <ostream> +#include <type_traits> +#include <utility> +#include <vector> + +#include "absl/hash/hash.h" +#include "absl/strings/string_view.h" + +namespace absl { +namespace container_internal { +namespace hash_testing_internal { + +template <class Derived> +struct WithId { + WithId() : id_(next_id<Derived>()) {} + WithId(const WithId& that) : id_(that.id_) {} + WithId(WithId&& that) : id_(that.id_) { that.id_ = 0; } + WithId& operator=(const WithId& that) { + id_ = that.id_; + return *this; + } + WithId& operator=(WithId&& that) { + id_ = that.id_; + that.id_ = 0; + return *this; + } + + size_t id() const { return id_; } + + friend bool operator==(const WithId& a, const WithId& b) { + return a.id_ == b.id_; + } + friend bool operator!=(const WithId& a, const WithId& b) { return !(a == b); } + + protected: + explicit WithId(size_t id) : id_(id) {} + + private: + size_t id_; + + template <class T> + static size_t next_id() { + // 0 is reserved for moved from state. + static size_t gId = 1; + return gId++; + } +}; + +} // namespace hash_testing_internal + +struct NonStandardLayout { + NonStandardLayout() {} + explicit NonStandardLayout(std::string s) : value(std::move(s)) {} + virtual ~NonStandardLayout() {} + + friend bool operator==(const NonStandardLayout& a, + const NonStandardLayout& b) { + return a.value == b.value; + } + friend bool operator!=(const NonStandardLayout& a, + const NonStandardLayout& b) { + return a.value != b.value; + } + + template <typename H> + friend H AbslHashValue(H h, const NonStandardLayout& v) { + return H::combine(std::move(h), v.value); + } + + std::string value; +}; + +struct StatefulTestingHash + : absl::container_internal::hash_testing_internal::WithId< + StatefulTestingHash> { + template <class T> + size_t operator()(const T& t) const { + return absl::Hash<T>{}(t); + } +}; + +struct StatefulTestingEqual + : absl::container_internal::hash_testing_internal::WithId< + StatefulTestingEqual> { + template <class T, class U> + bool operator()(const T& t, const U& u) const { + return t == u; + } +}; + +// It is expected that Alloc() == Alloc() for all allocators so we cannot use +// WithId base. We need to explicitly assign ids. +template <class T = int> +struct Alloc : std::allocator<T> { + using propagate_on_container_swap = std::true_type; + + // Using old paradigm for this to ensure compatibility. + explicit Alloc(size_t id = 0) : id_(id) {} + + Alloc(const Alloc&) = default; + Alloc& operator=(const Alloc&) = default; + + template <class U> + Alloc(const Alloc<U>& that) : std::allocator<T>(that), id_(that.id()) {} + + template <class U> + struct rebind { + using other = Alloc<U>; + }; + + size_t id() const { return id_; } + + friend bool operator==(const Alloc& a, const Alloc& b) { + return a.id_ == b.id_; + } + friend bool operator!=(const Alloc& a, const Alloc& b) { return !(a == b); } + + private: + size_t id_ = std::numeric_limits<size_t>::max(); +}; + +template <class Map> +auto items(const Map& m) -> std::vector< + std::pair<typename Map::key_type, typename Map::mapped_type>> { + using std::get; + std::vector<std::pair<typename Map::key_type, typename Map::mapped_type>> res; + res.reserve(m.size()); + for (const auto& v : m) res.emplace_back(get<0>(v), get<1>(v)); + return res; +} + +template <class Set> +auto keys(const Set& s) + -> std::vector<typename std::decay<typename Set::key_type>::type> { + std::vector<typename std::decay<typename Set::key_type>::type> res; + res.reserve(s.size()); + for (const auto& v : s) res.emplace_back(v); + return res; +} + +} // namespace container_internal +} // namespace absl + +// ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS is false for glibcxx versions +// where the unordered containers are missing certain constructors that +// take allocator arguments. This test is defined ad-hoc for the platforms +// we care about (notably Crosstool 17) because libstdcxx's useless +// versioning scheme precludes a more principled solution. +#if defined(__GLIBCXX__) && __GLIBCXX__ <= 20140425 +#define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 0 +#else +#define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 1 +#endif + +#endif // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_ diff --git a/absl/container/internal/hash_policy_testing_test.cc b/absl/container/internal/hash_policy_testing_test.cc new file mode 100644 index 00000000..c215c423 --- /dev/null +++ b/absl/container/internal/hash_policy_testing_test.cc @@ -0,0 +1,43 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/hash_policy_testing.h" + +#include "gtest/gtest.h" + +namespace absl { +namespace container_internal { +namespace { + +TEST(_, Hash) { + StatefulTestingHash h1; + EXPECT_EQ(1, h1.id()); + StatefulTestingHash h2; + EXPECT_EQ(2, h2.id()); + StatefulTestingHash h1c(h1); + EXPECT_EQ(1, h1c.id()); + StatefulTestingHash h2m(std::move(h2)); + EXPECT_EQ(2, h2m.id()); + EXPECT_EQ(0, h2.id()); + StatefulTestingHash h3; + EXPECT_EQ(3, h3.id()); + h3 = StatefulTestingHash(); + EXPECT_EQ(4, h3.id()); + h3 = std::move(h1); + EXPECT_EQ(1, h3.id()); +} + +} // namespace +} // namespace container_internal +} // namespace absl diff --git a/absl/container/internal/hash_policy_traits.h b/absl/container/internal/hash_policy_traits.h new file mode 100644 index 00000000..029e47e1 --- /dev/null +++ b/absl/container/internal/hash_policy_traits.h @@ -0,0 +1,189 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_ +#define ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_ + +#include <cstddef> +#include <memory> +#include <type_traits> +#include <utility> + +#include "absl/meta/type_traits.h" + +namespace absl { +namespace container_internal { + +// Defines how slots are initialized/destroyed/moved. +template <class Policy, class = void> +struct hash_policy_traits { + private: + struct ReturnKey { + // We return `Key` here. + // When Key=T&, we forward the lvalue reference. + // When Key=T, we return by value to avoid a dangling reference. + // eg, for string_hash_map. + template <class Key, class... Args> + Key operator()(Key&& k, const Args&...) const { + return std::forward<Key>(k); + } + }; + + template <class P = Policy, class = void> + struct ConstantIteratorsImpl : std::false_type {}; + + template <class P> + struct ConstantIteratorsImpl<P, absl::void_t<typename P::constant_iterators>> + : P::constant_iterators {}; + + public: + // The actual object stored in the hash table. + using slot_type = typename Policy::slot_type; + + // The type of the keys stored in the hashtable. + using key_type = typename Policy::key_type; + + // The argument type for insertions into the hashtable. This is different + // from value_type for increased performance. See initializer_list constructor + // and insert() member functions for more details. + using init_type = typename Policy::init_type; + + using reference = decltype(Policy::element(std::declval<slot_type*>())); + using pointer = typename std::remove_reference<reference>::type*; + using value_type = typename std::remove_reference<reference>::type; + + // Policies can set this variable to tell raw_hash_set that all iterators + // should be constant, even `iterator`. This is useful for set-like + // containers. + // Defaults to false if not provided by the policy. + using constant_iterators = ConstantIteratorsImpl<>; + + // PRECONDITION: `slot` is UNINITIALIZED + // POSTCONDITION: `slot` is INITIALIZED + template <class Alloc, class... Args> + static void construct(Alloc* alloc, slot_type* slot, Args&&... args) { + Policy::construct(alloc, slot, std::forward<Args>(args)...); + } + + // PRECONDITION: `slot` is INITIALIZED + // POSTCONDITION: `slot` is UNINITIALIZED + template <class Alloc> + static void destroy(Alloc* alloc, slot_type* slot) { + Policy::destroy(alloc, slot); + } + + // Transfers the `old_slot` to `new_slot`. Any memory allocated by the + // allocator inside `old_slot` to `new_slot` can be transfered. + // + // OPTIONAL: defaults to: + // + // clone(new_slot, std::move(*old_slot)); + // destroy(old_slot); + // + // PRECONDITION: `new_slot` is UNINITIALIZED and `old_slot` is INITIALIZED + // POSTCONDITION: `new_slot` is INITIALIZED and `old_slot` is + // UNINITIALIZED + template <class Alloc> + static void transfer(Alloc* alloc, slot_type* new_slot, slot_type* old_slot) { + transfer_impl(alloc, new_slot, old_slot, 0); + } + + // PRECONDITION: `slot` is INITIALIZED + // POSTCONDITION: `slot` is INITIALIZED + template <class P = Policy> + static auto element(slot_type* slot) -> decltype(P::element(slot)) { + return P::element(slot); + } + + // Returns the amount of memory owned by `slot`, exclusive of `sizeof(*slot)`. + // + // If `slot` is nullptr, returns the constant amount of memory owned by any + // full slot or -1 if slots own variable amounts of memory. + // + // PRECONDITION: `slot` is INITIALIZED or nullptr + template <class P = Policy> + static size_t space_used(const slot_type* slot) { + return P::space_used(slot); + } + + // Provides generalized access to the key for elements, both for elements in + // the table and for elements that have not yet been inserted (or even + // constructed). We would like an API that allows us to say: `key(args...)` + // but we cannot do that for all cases, so we use this more general API that + // can be used for many things, including the following: + // + // - Given an element in a table, get its key. + // - Given an element initializer, get its key. + // - Given `emplace()` arguments, get the element key. + // + // Implementations of this must adhere to a very strict technical + // specification around aliasing and consuming arguments: + // + // Let `value_type` be the result type of `element()` without ref- and + // cv-qualifiers. The first argument is a functor, the rest are constructor + // arguments for `value_type`. Returns `std::forward<F>(f)(k, xs...)`, where + // `k` is the element key, and `xs...` are the new constructor arguments for + // `value_type`. It's allowed for `k` to alias `xs...`, and for both to alias + // `ts...`. The key won't be touched once `xs...` are used to construct an + // element; `ts...` won't be touched at all, which allows `apply()` to consume + // any rvalues among them. + // + // If `value_type` is constructible from `Ts&&...`, `Policy::apply()` must not + // trigger a hard compile error unless it originates from `f`. In other words, + // `Policy::apply()` must be SFINAE-friendly. If `value_type` is not + // constructible from `Ts&&...`, either SFINAE or a hard compile error is OK. + // + // If `Ts...` is `[cv] value_type[&]` or `[cv] init_type[&]`, + // `Policy::apply()` must work. A compile error is not allowed, SFINAE or not. + template <class F, class... Ts, class P = Policy> + static auto apply(F&& f, Ts&&... ts) + -> decltype(P::apply(std::forward<F>(f), std::forward<Ts>(ts)...)) { + return P::apply(std::forward<F>(f), std::forward<Ts>(ts)...); + } + + // Returns the "key" portion of the slot. + // Used for node handle manipulation. + template <class P = Policy> + static auto key(slot_type* slot) + -> decltype(P::apply(ReturnKey(), element(slot))) { + return P::apply(ReturnKey(), element(slot)); + } + + // Returns the "value" (as opposed to the "key") portion of the element. Used + // by maps to implement `operator[]`, `at()` and `insert_or_assign()`. + template <class T, class P = Policy> + static auto value(T* elem) -> decltype(P::value(elem)) { + return P::value(elem); + } + + private: + // Use auto -> decltype as an enabler. + template <class Alloc, class P = Policy> + static auto transfer_impl(Alloc* alloc, slot_type* new_slot, + slot_type* old_slot, int) + -> decltype((void)P::transfer(alloc, new_slot, old_slot)) { + P::transfer(alloc, new_slot, old_slot); + } + template <class Alloc> + static void transfer_impl(Alloc* alloc, slot_type* new_slot, + slot_type* old_slot, char) { + construct(alloc, new_slot, std::move(element(old_slot))); + destroy(alloc, old_slot); + } +}; + +} // namespace container_internal +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_ diff --git a/absl/container/internal/hash_policy_traits_test.cc b/absl/container/internal/hash_policy_traits_test.cc new file mode 100644 index 00000000..423f1548 --- /dev/null +++ b/absl/container/internal/hash_policy_traits_test.cc @@ -0,0 +1,142 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/hash_policy_traits.h" + +#include <functional> +#include <memory> +#include <new> + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace absl { +namespace container_internal { +namespace { + +using ::testing::MockFunction; +using ::testing::Return; +using ::testing::ReturnRef; + +using Alloc = std::allocator<int>; +using Slot = int; + +struct PolicyWithoutOptionalOps { + using slot_type = Slot; + using key_type = Slot; + using init_type = Slot; + + static std::function<void(void*, Slot*, Slot)> construct; + static std::function<void(void*, Slot*)> destroy; + + static std::function<Slot&(Slot*)> element; + static int apply(int v) { return apply_impl(v); } + static std::function<int(int)> apply_impl; + static std::function<Slot&(Slot*)> value; +}; + +std::function<void(void*, Slot*, Slot)> PolicyWithoutOptionalOps::construct; +std::function<void(void*, Slot*)> PolicyWithoutOptionalOps::destroy; + +std::function<Slot&(Slot*)> PolicyWithoutOptionalOps::element; +std::function<int(int)> PolicyWithoutOptionalOps::apply_impl; +std::function<Slot&(Slot*)> PolicyWithoutOptionalOps::value; + +struct PolicyWithOptionalOps : PolicyWithoutOptionalOps { + static std::function<void(void*, Slot*, Slot*)> transfer; +}; + +std::function<void(void*, Slot*, Slot*)> PolicyWithOptionalOps::transfer; + +struct Test : ::testing::Test { + Test() { + PolicyWithoutOptionalOps::construct = [&](void* a1, Slot* a2, Slot a3) { + construct.Call(a1, a2, std::move(a3)); + }; + PolicyWithoutOptionalOps::destroy = [&](void* a1, Slot* a2) { + destroy.Call(a1, a2); + }; + + PolicyWithoutOptionalOps::element = [&](Slot* a1) -> Slot& { + return element.Call(a1); + }; + PolicyWithoutOptionalOps::apply_impl = [&](int a1) -> int { + return apply.Call(a1); + }; + PolicyWithoutOptionalOps::value = [&](Slot* a1) -> Slot& { + return value.Call(a1); + }; + + PolicyWithOptionalOps::transfer = [&](void* a1, Slot* a2, Slot* a3) { + return transfer.Call(a1, a2, a3); + }; + } + + std::allocator<int> alloc; + int a = 53; + + MockFunction<void(void*, Slot*, Slot)> construct; + MockFunction<void(void*, Slot*)> destroy; + + MockFunction<Slot&(Slot*)> element; + MockFunction<int(int)> apply; + MockFunction<Slot&(Slot*)> value; + + MockFunction<void(void*, Slot*, Slot*)> transfer; +}; + +TEST_F(Test, construct) { + EXPECT_CALL(construct, Call(&alloc, &a, 53)); + hash_policy_traits<PolicyWithoutOptionalOps>::construct(&alloc, &a, 53); +} + +TEST_F(Test, destroy) { + EXPECT_CALL(destroy, Call(&alloc, &a)); + hash_policy_traits<PolicyWithoutOptionalOps>::destroy(&alloc, &a); +} + +TEST_F(Test, element) { + int b = 0; + EXPECT_CALL(element, Call(&a)).WillOnce(ReturnRef(b)); + EXPECT_EQ(&b, &hash_policy_traits<PolicyWithoutOptionalOps>::element(&a)); +} + +TEST_F(Test, apply) { + EXPECT_CALL(apply, Call(42)).WillOnce(Return(1337)); + EXPECT_EQ(1337, (hash_policy_traits<PolicyWithoutOptionalOps>::apply(42))); +} + +TEST_F(Test, value) { + int b = 0; + EXPECT_CALL(value, Call(&a)).WillOnce(ReturnRef(b)); + EXPECT_EQ(&b, &hash_policy_traits<PolicyWithoutOptionalOps>::value(&a)); +} + +TEST_F(Test, without_transfer) { + int b = 42; + EXPECT_CALL(element, Call(&b)).WillOnce(::testing::ReturnRef(b)); + EXPECT_CALL(construct, Call(&alloc, &a, b)); + EXPECT_CALL(destroy, Call(&alloc, &b)); + hash_policy_traits<PolicyWithoutOptionalOps>::transfer(&alloc, &a, &b); +} + +TEST_F(Test, with_transfer) { + int b = 42; + EXPECT_CALL(transfer, Call(&alloc, &a, &b)); + hash_policy_traits<PolicyWithOptionalOps>::transfer(&alloc, &a, &b); +} + +} // namespace +} // namespace container_internal +} // namespace absl diff --git a/absl/container/internal/hashtable_debug.h b/absl/container/internal/hashtable_debug.h new file mode 100644 index 00000000..c3bd65c9 --- /dev/null +++ b/absl/container/internal/hashtable_debug.h @@ -0,0 +1,108 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This library provides APIs to debug the probing behavior of hash tables. +// +// In general, the probing behavior is a black box for users and only the +// side effects can be measured in the form of performance differences. +// These APIs give a glimpse on the actual behavior of the probing algorithms in +// these hashtables given a specified hash function and a set of elements. +// +// The probe count distribution can be used to assess the quality of the hash +// function for that particular hash table. Note that a hash function that +// performs well in one hash table implementation does not necessarily performs +// well in a different one. +// +// This library supports std::unordered_{set,map}, dense_hash_{set,map} and +// absl::{flat,node,string}_hash_{set,map}. + +#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_ +#define ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_ + +#include <cstddef> +#include <algorithm> +#include <type_traits> +#include <vector> + +#include "absl/container/internal/hashtable_debug_hooks.h" + +namespace absl { +namespace container_internal { + +// Returns the number of probes required to lookup `key`. Returns 0 for a +// search with no collisions. Higher values mean more hash collisions occurred; +// however, the exact meaning of this number varies according to the container +// type. +template <typename C> +size_t GetHashtableDebugNumProbes( + const C& c, const typename C::key_type& key) { + return absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess<C>::GetNumProbes(c, key); +} + +// Gets a histogram of the number of probes for each elements in the container. +// The sum of all the values in the vector is equal to container.size(). +template <typename C> +std::vector<size_t> GetHashtableDebugNumProbesHistogram(const C& container) { + std::vector<size_t> v; + for (auto it = container.begin(); it != container.end(); ++it) { + size_t num_probes = GetHashtableDebugNumProbes( + container, + absl::container_internal::hashtable_debug_internal::GetKey<C>(*it, 0)); + v.resize(std::max(v.size(), num_probes + 1)); + v[num_probes]++; + } + return v; +} + +struct HashtableDebugProbeSummary { + size_t total_elements; + size_t total_num_probes; + double mean; +}; + +// Gets a summary of the probe count distribution for the elements in the +// container. +template <typename C> +HashtableDebugProbeSummary GetHashtableDebugProbeSummary(const C& container) { + auto probes = GetHashtableDebugNumProbesHistogram(container); + HashtableDebugProbeSummary summary = {}; + for (size_t i = 0; i < probes.size(); ++i) { + summary.total_elements += probes[i]; + summary.total_num_probes += probes[i] * i; + } + summary.mean = 1.0 * summary.total_num_probes / summary.total_elements; + return summary; +} + +// Returns the number of bytes requested from the allocator by the container +// and not freed. +template <typename C> +size_t AllocatedByteSize(const C& c) { + return absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess<C>::AllocatedByteSize(c); +} + +// Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type `C` +// and `c.size()` is equal to `num_elements`. +template <typename C> +size_t LowerBoundAllocatedByteSize(size_t num_elements) { + return absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess<C>::LowerBoundAllocatedByteSize(num_elements); +} + +} // namespace container_internal +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_ diff --git a/absl/container/internal/hashtable_debug_hooks.h b/absl/container/internal/hashtable_debug_hooks.h new file mode 100644 index 00000000..8f219726 --- /dev/null +++ b/absl/container/internal/hashtable_debug_hooks.h @@ -0,0 +1,81 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Provides the internal API for hashtable_debug.h. + +#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_ +#define ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_ + +#include <cstddef> + +#include <algorithm> +#include <type_traits> +#include <vector> + +namespace absl { +namespace container_internal { +namespace hashtable_debug_internal { + +// If it is a map, call get<0>(). +using std::get; +template <typename T, typename = typename T::mapped_type> +auto GetKey(const typename T::value_type& pair, int) -> decltype(get<0>(pair)) { + return get<0>(pair); +} + +// If it is not a map, return the value directly. +template <typename T> +const typename T::key_type& GetKey(const typename T::key_type& key, char) { + return key; +} + +// Containers should specialize this to provide debug information for that +// container. +template <class Container, typename Enabler = void> +struct HashtableDebugAccess { + // Returns the number of probes required to find `key` in `c`. The "number of + // probes" is a concept that can vary by container. Implementations should + // return 0 when `key` was found in the minimum number of operations and + // should increment the result for each non-trivial operation required to find + // `key`. + // + // The default implementation uses the bucket api from the standard and thus + // works for `std::unordered_*` containers. + static size_t GetNumProbes(const Container& c, + const typename Container::key_type& key) { + if (!c.bucket_count()) return {}; + size_t num_probes = 0; + size_t bucket = c.bucket(key); + for (auto it = c.begin(bucket), e = c.end(bucket);; ++it, ++num_probes) { + if (it == e) return num_probes; + if (c.key_eq()(key, GetKey<Container>(*it, 0))) return num_probes; + } + } + + // Returns the number of bytes requested from the allocator by the container + // and not freed. + // + // static size_t AllocatedByteSize(const Container& c); + + // Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type + // `Container` and `c.size()` is equal to `num_elements`. + // + // static size_t LowerBoundAllocatedByteSize(size_t num_elements); +}; + +} // namespace hashtable_debug_internal +} // namespace container_internal +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_ diff --git a/absl/container/internal/layout.h b/absl/container/internal/layout.h new file mode 100644 index 00000000..0c239fe8 --- /dev/null +++ b/absl/container/internal/layout.h @@ -0,0 +1,732 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// MOTIVATION AND TUTORIAL +// +// If you want to put in a single heap allocation N doubles followed by M ints, +// it's easy if N and M are known at compile time. +// +// struct S { +// double a[N]; +// int b[M]; +// }; +// +// S* p = new S; +// +// But what if N and M are known only in run time? Class template Layout to the +// rescue! It's a portable generalization of the technique known as struct hack. +// +// // This object will tell us everything we need to know about the memory +// // layout of double[N] followed by int[M]. It's structurally identical to +// // size_t[2] that stores N and M. It's very cheap to create. +// const Layout<double, int> layout(N, M); +// +// // Allocate enough memory for both arrays. `AllocSize()` tells us how much +// // memory is needed. We are free to use any allocation function we want as +// // long as it returns aligned memory. +// std::unique_ptr<unsigned char[]> p(new unsigned char[layout.AllocSize()]); +// +// // Obtain the pointer to the array of doubles. +// // Equivalent to `reinterpret_cast<double*>(p.get())`. +// // +// // We could have written layout.Pointer<0>(p) instead. If all the types are +// // unique you can use either form, but if some types are repeated you must +// // use the index form. +// double* a = layout.Pointer<double>(p.get()); +// +// // Obtain the pointer to the array of ints. +// // Equivalent to `reinterpret_cast<int*>(p.get() + N * 8)`. +// int* b = layout.Pointer<int>(p); +// +// If we are unable to specify sizes of all fields, we can pass as many sizes as +// we can to `Partial()`. In return, it'll allow us to access the fields whose +// locations and sizes can be computed from the provided information. +// `Partial()` comes in handy when the array sizes are embedded into the +// allocation. +// +// // size_t[1] containing N, size_t[1] containing M, double[N], int[M]. +// using L = Layout<size_t, size_t, double, int>; +// +// unsigned char* Allocate(size_t n, size_t m) { +// const L layout(1, 1, n, m); +// unsigned char* p = new unsigned char[layout.AllocSize()]; +// *layout.Pointer<0>(p) = n; +// *layout.Pointer<1>(p) = m; +// return p; +// } +// +// void Use(unsigned char* p) { +// // First, extract N and M. +// // Specify that the first array has only one element. Using `prefix` we +// // can access the first two arrays but not more. +// constexpr auto prefix = L::Partial(1); +// size_t n = *prefix.Pointer<0>(p); +// size_t m = *prefix.Pointer<1>(p); +// +// // Now we can get pointers to the payload. +// const L layout(1, 1, n, m); +// double* a = layout.Pointer<double>(p); +// int* b = layout.Pointer<int>(p); +// } +// +// The layout we used above combines fixed-size with dynamically-sized fields. +// This is quite common. Layout is optimized for this use case and generates +// optimal code. All computations that can be performed at compile time are +// indeed performed at compile time. +// +// Efficiency tip: The order of fields matters. In `Layout<T1, ..., TN>` try to +// ensure that `alignof(T1) >= ... >= alignof(TN)`. This way you'll have no +// padding in between arrays. +// +// You can manually override the alignment of an array by wrapping the type in +// `Aligned<T, N>`. `Layout<..., Aligned<T, N>, ...>` has exactly the same API +// and behavior as `Layout<..., T, ...>` except that the first element of the +// array of `T` is aligned to `N` (the rest of the elements follow without +// padding). `N` cannot be less than `alignof(T)`. +// +// `AllocSize()` and `Pointer()` are the most basic methods for dealing with +// memory layouts. Check out the reference or code below to discover more. +// +// EXAMPLE +// +// // Immutable move-only string with sizeof equal to sizeof(void*). The +// // string size and the characters are kept in the same heap allocation. +// class CompactString { +// public: +// CompactString(const char* s = "") { +// const size_t size = strlen(s); +// // size_t[1] followed by char[size + 1]. +// const L layout(1, size + 1); +// p_.reset(new unsigned char[layout.AllocSize()]); +// // If running under ASAN, mark the padding bytes, if any, to catch +// // memory errors. +// layout.PoisonPadding(p_.get()); +// // Store the size in the allocation. +// *layout.Pointer<size_t>(p_.get()) = size; +// // Store the characters in the allocation. +// memcpy(layout.Pointer<char>(p_.get()), s, size + 1); +// } +// +// size_t size() const { +// // Equivalent to reinterpret_cast<size_t&>(*p). +// return *L::Partial().Pointer<size_t>(p_.get()); +// } +// +// const char* c_str() const { +// // Equivalent to reinterpret_cast<char*>(p.get() + sizeof(size_t)). +// // The argument in Partial(1) specifies that we have size_t[1] in front +// // of the characters. +// return L::Partial(1).Pointer<char>(p_.get()); +// } +// +// private: +// // Our heap allocation contains a size_t followed by an array of chars. +// using L = Layout<size_t, char>; +// std::unique_ptr<unsigned char[]> p_; +// }; +// +// int main() { +// CompactString s = "hello"; +// assert(s.size() == 5); +// assert(strcmp(s.c_str(), "hello") == 0); +// } +// +// DOCUMENTATION +// +// The interface exported by this file consists of: +// - class `Layout<>` and its public members. +// - The public members of class `internal_layout::LayoutImpl<>`. That class +// isn't intended to be used directly, and its name and template parameter +// list are internal implementation details, but the class itself provides +// most of the functionality in this file. See comments on its members for +// detailed documentation. +// +// `Layout<T1,... Tn>::Partial(count1,..., countm)` (where `m` <= `n`) returns a +// `LayoutImpl<>` object. `Layout<T1,..., Tn> layout(count1,..., countn)` +// creates a `Layout` object, which exposes the same functionality by inheriting +// from `LayoutImpl<>`. + +#ifndef ABSL_CONTAINER_INTERNAL_LAYOUT_H_ +#define ABSL_CONTAINER_INTERNAL_LAYOUT_H_ + +#include <assert.h> +#include <stddef.h> +#include <stdint.h> +#include <ostream> +#include <string> +#include <tuple> +#include <type_traits> +#include <typeinfo> +#include <utility> + +#ifdef ADDRESS_SANITIZER +#include <sanitizer/asan_interface.h> +#endif + +#include "absl/meta/type_traits.h" +#include "absl/strings/str_cat.h" +#include "absl/types/span.h" +#include "absl/utility/utility.h" + +#if defined(__GXX_RTTI) +#define ABSL_INTERNAL_HAS_CXA_DEMANGLE +#endif + +#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE +#include <cxxabi.h> +#endif + +namespace absl { +namespace container_internal { + +// A type wrapper that instructs `Layout` to use the specific alignment for the +// array. `Layout<..., Aligned<T, N>, ...>` has exactly the same API +// and behavior as `Layout<..., T, ...>` except that the first element of the +// array of `T` is aligned to `N` (the rest of the elements follow without +// padding). +// +// Requires: `N >= alignof(T)` and `N` is a power of 2. +template <class T, size_t N> +struct Aligned; + +namespace internal_layout { + +template <class T> +struct NotAligned {}; + +template <class T, size_t N> +struct NotAligned<const Aligned<T, N>> { + static_assert(sizeof(T) == 0, "Aligned<T, N> cannot be const-qualified"); +}; + +template <size_t> +using IntToSize = size_t; + +template <class> +using TypeToSize = size_t; + +template <class T> +struct Type : NotAligned<T> { + using type = T; +}; + +template <class T, size_t N> +struct Type<Aligned<T, N>> { + using type = T; +}; + +template <class T> +struct SizeOf : NotAligned<T>, std::integral_constant<size_t, sizeof(T)> {}; + +template <class T, size_t N> +struct SizeOf<Aligned<T, N>> : std::integral_constant<size_t, sizeof(T)> {}; + +template <class T> +struct AlignOf : NotAligned<T>, std::integral_constant<size_t, alignof(T)> {}; + +template <class T, size_t N> +struct AlignOf<Aligned<T, N>> : std::integral_constant<size_t, N> { + static_assert(N % alignof(T) == 0, + "Custom alignment can't be lower than the type's alignment"); +}; + +// Does `Ts...` contain `T`? +template <class T, class... Ts> +using Contains = absl::disjunction<std::is_same<T, Ts>...>; + +template <class From, class To> +using CopyConst = + typename std::conditional<std::is_const<From>::value, const To, To>::type; + +template <class T> +using SliceType = absl::Span<T>; + +// This namespace contains no types. It prevents functions defined in it from +// being found by ADL. +namespace adl_barrier { + +template <class Needle, class... Ts> +constexpr size_t Find(Needle, Needle, Ts...) { + static_assert(!Contains<Needle, Ts...>(), "Duplicate element type"); + return 0; +} + +template <class Needle, class T, class... Ts> +constexpr size_t Find(Needle, T, Ts...) { + return adl_barrier::Find(Needle(), Ts()...) + 1; +} + +constexpr bool IsPow2(size_t n) { return !(n & (n - 1)); } + +// Returns `q * m` for the smallest `q` such that `q * m >= n`. +// Requires: `m` is a power of two. It's enforced by IsLegalElementType below. +constexpr size_t Align(size_t n, size_t m) { return (n + m - 1) & ~(m - 1); } + +constexpr size_t Min(size_t a, size_t b) { return b < a ? b : a; } + +constexpr size_t Max(size_t a) { return a; } + +template <class... Ts> +constexpr size_t Max(size_t a, size_t b, Ts... rest) { + return adl_barrier::Max(b < a ? a : b, rest...); +} + +template <class T> +std::string TypeName() { + std::string out; + int status = 0; + char* demangled = nullptr; +#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE + demangled = abi::__cxa_demangle(typeid(T).name(), nullptr, nullptr, &status); +#endif + if (status == 0 && demangled != nullptr) { // Demangling succeeeded. + absl::StrAppend(&out, "<", demangled, ">"); + free(demangled); + } else { +#if defined(__GXX_RTTI) || defined(_CPPRTTI) + absl::StrAppend(&out, "<", typeid(T).name(), ">"); +#endif + } + return out; +} + +} // namespace adl_barrier + +template <bool C> +using EnableIf = typename std::enable_if<C, int>::type; + +// Can `T` be a template argument of `Layout`? +template <class T> +using IsLegalElementType = std::integral_constant< + bool, !std::is_reference<T>::value && !std::is_volatile<T>::value && + !std::is_reference<typename Type<T>::type>::value && + !std::is_volatile<typename Type<T>::type>::value && + adl_barrier::IsPow2(AlignOf<T>::value)>; + +template <class Elements, class SizeSeq, class OffsetSeq> +class LayoutImpl; + +// Public base class of `Layout` and the result type of `Layout::Partial()`. +// +// `Elements...` contains all template arguments of `Layout` that created this +// instance. +// +// `SizeSeq...` is `[0, NumSizes)` where `NumSizes` is the number of arguments +// passed to `Layout::Partial()` or `Layout::Layout()`. +// +// `OffsetSeq...` is `[0, NumOffsets)` where `NumOffsets` is +// `Min(sizeof...(Elements), NumSizes + 1)` (the number of arrays for which we +// can compute offsets). +template <class... Elements, size_t... SizeSeq, size_t... OffsetSeq> +class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>, + absl::index_sequence<OffsetSeq...>> { + private: + static_assert(sizeof...(Elements) > 0, "At least one field is required"); + static_assert(absl::conjunction<IsLegalElementType<Elements>...>::value, + "Invalid element type (see IsLegalElementType)"); + + enum { + NumTypes = sizeof...(Elements), + NumSizes = sizeof...(SizeSeq), + NumOffsets = sizeof...(OffsetSeq), + }; + + // These are guaranteed by `Layout`. + static_assert(NumOffsets == adl_barrier::Min(NumTypes, NumSizes + 1), + "Internal error"); + static_assert(NumTypes > 0, "Internal error"); + + // Returns the index of `T` in `Elements...`. Results in a compilation error + // if `Elements...` doesn't contain exactly one instance of `T`. + template <class T> + static constexpr size_t ElementIndex() { + static_assert(Contains<Type<T>, Type<typename Type<Elements>::type>...>(), + "Type not found"); + return adl_barrier::Find(Type<T>(), + Type<typename Type<Elements>::type>()...); + } + + template <size_t N> + using ElementAlignment = + AlignOf<typename std::tuple_element<N, std::tuple<Elements...>>::type>; + + public: + // Element types of all arrays packed in a tuple. + using ElementTypes = std::tuple<typename Type<Elements>::type...>; + + // Element type of the Nth array. + template <size_t N> + using ElementType = typename std::tuple_element<N, ElementTypes>::type; + + constexpr explicit LayoutImpl(IntToSize<SizeSeq>... sizes) + : size_{sizes...} {} + + // Alignment of the layout, equal to the strictest alignment of all elements. + // All pointers passed to the methods of layout must be aligned to this value. + static constexpr size_t Alignment() { + return adl_barrier::Max(AlignOf<Elements>::value...); + } + + // Offset in bytes of the Nth array. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout<int, double> x(3, 4); + // assert(x.Offset<0>() == 0); // The ints starts from 0. + // assert(x.Offset<1>() == 16); // The doubles starts from 16. + // + // Requires: `N <= NumSizes && N < sizeof...(Ts)`. + template <size_t N, EnableIf<N == 0> = 0> + constexpr size_t Offset() const { + return 0; + } + + template <size_t N, EnableIf<N != 0> = 0> + constexpr size_t Offset() const { + static_assert(N < NumOffsets, "Index out of bounds"); + return adl_barrier::Align( + Offset<N - 1>() + SizeOf<ElementType<N - 1>>() * size_[N - 1], + ElementAlignment<N>()); + } + + // Offset in bytes of the array with the specified element type. There must + // be exactly one such array and its zero-based index must be at most + // `NumSizes`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout<int, double> x(3, 4); + // assert(x.Offset<int>() == 0); // The ints starts from 0. + // assert(x.Offset<double>() == 16); // The doubles starts from 16. + template <class T> + constexpr size_t Offset() const { + return Offset<ElementIndex<T>()>(); + } + + // Offsets in bytes of all arrays for which the offsets are known. + constexpr std::array<size_t, NumOffsets> Offsets() const { + return {{Offset<OffsetSeq>()...}}; + } + + // The number of elements in the Nth array. This is the Nth argument of + // `Layout::Partial()` or `Layout::Layout()` (zero-based). + // + // // int[3], 4 bytes of padding, double[4]. + // Layout<int, double> x(3, 4); + // assert(x.Size<0>() == 3); + // assert(x.Size<1>() == 4); + // + // Requires: `N < NumSizes`. + template <size_t N> + constexpr size_t Size() const { + static_assert(N < NumSizes, "Index out of bounds"); + return size_[N]; + } + + // The number of elements in the array with the specified element type. + // There must be exactly one such array and its zero-based index must be + // at most `NumSizes`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout<int, double> x(3, 4); + // assert(x.Size<int>() == 3); + // assert(x.Size<double>() == 4); + template <class T> + constexpr size_t Size() const { + return Size<ElementIndex<T>()>(); + } + + // The number of elements of all arrays for which they are known. + constexpr std::array<size_t, NumSizes> Sizes() const { + return {{Size<SizeSeq>()...}}; + } + + // Pointer to the beginning of the Nth array. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout<int, double> x(3, 4); + // unsigned char* p = unsigned char[x.AllocSize()]; + // int* ints = x.Pointer<0>(p); + // double* doubles = x.Pointer<1>(p); + // + // Requires: `N <= NumSizes && N < sizeof...(Ts)`. + // Requires: `p` is aligned to `Alignment()`. + template <size_t N, class Char> + CopyConst<Char, ElementType<N>>* Pointer(Char* p) const { + using C = typename std::remove_const<Char>::type; + static_assert( + std::is_same<C, char>() || std::is_same<C, unsigned char>() || + std::is_same<C, signed char>(), + "The argument must be a pointer to [const] [signed|unsigned] char"); + constexpr size_t alignment = Alignment(); + (void)alignment; + assert(reinterpret_cast<uintptr_t>(p) % alignment == 0); + return reinterpret_cast<CopyConst<Char, ElementType<N>>*>(p + Offset<N>()); + } + + // Pointer to the beginning of the array with the specified element type. + // There must be exactly one such array and its zero-based index must be at + // most `NumSizes`. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout<int, double> x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // int* ints = x.Pointer<int>(p); + // double* doubles = x.Pointer<double>(p); + // + // Requires: `p` is aligned to `Alignment()`. + template <class T, class Char> + CopyConst<Char, T>* Pointer(Char* p) const { + return Pointer<ElementIndex<T>()>(p); + } + + // Pointers to all arrays for which pointers are known. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout<int, double> x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // + // int* ints; + // double* doubles; + // std::tie(ints, doubles) = x.Pointers(p); + // + // Requires: `p` is aligned to `Alignment()`. + // + // Note: We're not using ElementType alias here because it does not compile + // under MSVC. + template <class Char> + std::tuple<CopyConst< + Char, typename std::tuple_element<OffsetSeq, ElementTypes>::type>*...> + Pointers(Char* p) const { + return std::tuple<CopyConst<Char, ElementType<OffsetSeq>>*...>( + Pointer<OffsetSeq>(p)...); + } + + // The Nth array. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout<int, double> x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // Span<int> ints = x.Slice<0>(p); + // Span<double> doubles = x.Slice<1>(p); + // + // Requires: `N < NumSizes`. + // Requires: `p` is aligned to `Alignment()`. + template <size_t N, class Char> + SliceType<CopyConst<Char, ElementType<N>>> Slice(Char* p) const { + return SliceType<CopyConst<Char, ElementType<N>>>(Pointer<N>(p), Size<N>()); + } + + // The array with the specified element type. There must be exactly one + // such array and its zero-based index must be less than `NumSizes`. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout<int, double> x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // Span<int> ints = x.Slice<int>(p); + // Span<double> doubles = x.Slice<double>(p); + // + // Requires: `p` is aligned to `Alignment()`. + template <class T, class Char> + SliceType<CopyConst<Char, T>> Slice(Char* p) const { + return Slice<ElementIndex<T>()>(p); + } + + // All arrays with known sizes. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout<int, double> x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // + // Span<int> ints; + // Span<double> doubles; + // std::tie(ints, doubles) = x.Slices(p); + // + // Requires: `p` is aligned to `Alignment()`. + // + // Note: We're not using ElementType alias here because it does not compile + // under MSVC. + template <class Char> + std::tuple<SliceType<CopyConst< + Char, typename std::tuple_element<SizeSeq, ElementTypes>::type>>...> + Slices(Char* p) const { + // Workaround for https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63875 (fixed + // in 6.1). + (void)p; + return std::tuple<SliceType<CopyConst<Char, ElementType<SizeSeq>>>...>( + Slice<SizeSeq>(p)...); + } + + // The size of the allocation that fits all arrays. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout<int, double> x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; // 48 bytes + // + // Requires: `NumSizes == sizeof...(Ts)`. + constexpr size_t AllocSize() const { + static_assert(NumTypes == NumSizes, "You must specify sizes of all fields"); + return Offset<NumTypes - 1>() + + SizeOf<ElementType<NumTypes - 1>>() * size_[NumTypes - 1]; + } + + // If built with --config=asan, poisons padding bytes (if any) in the + // allocation. The pointer must point to a memory block at least + // `AllocSize()` bytes in length. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // Requires: `p` is aligned to `Alignment()`. + template <class Char, size_t N = NumOffsets - 1, EnableIf<N == 0> = 0> + void PoisonPadding(const Char* p) const { + Pointer<0>(p); // verify the requirements on `Char` and `p` + } + + template <class Char, size_t N = NumOffsets - 1, EnableIf<N != 0> = 0> + void PoisonPadding(const Char* p) const { + static_assert(N < NumOffsets, "Index out of bounds"); + (void)p; +#ifdef ADDRESS_SANITIZER + PoisonPadding<Char, N - 1>(p); + // The `if` is an optimization. It doesn't affect the observable behaviour. + if (ElementAlignment<N - 1>() % ElementAlignment<N>()) { + size_t start = + Offset<N - 1>() + SizeOf<ElementType<N - 1>>() * size_[N - 1]; + ASAN_POISON_MEMORY_REGION(p + start, Offset<N>() - start); + } +#endif + } + + // Human-readable description of the memory layout. Useful for debugging. + // Slow. + // + // // char[5], 3 bytes of padding, int[3], 4 bytes of padding, followed + // // by an unknown number of doubles. + // auto x = Layout<char, int, double>::Partial(5, 3); + // assert(x.DebugString() == + // "@0<char>(1)[5]; @8<int>(4)[3]; @24<double>(8)"); + // + // Each field is in the following format: @offset<type>(sizeof)[size] (<type> + // may be missing depending on the target platform). For example, + // @8<int>(4)[3] means that at offset 8 we have an array of ints, where each + // int is 4 bytes, and we have 3 of those ints. The size of the last field may + // be missing (as in the example above). Only fields with known offsets are + // described. Type names may differ across platforms: one compiler might + // produce "unsigned*" where another produces "unsigned int *". + std::string DebugString() const { + const auto offsets = Offsets(); + const size_t sizes[] = {SizeOf<ElementType<OffsetSeq>>()...}; + const std::string types[] = {adl_barrier::TypeName<ElementType<OffsetSeq>>()...}; + std::string res = absl::StrCat("@0", types[0], "(", sizes[0], ")"); + for (size_t i = 0; i != NumOffsets - 1; ++i) { + absl::StrAppend(&res, "[", size_[i], "]; @", offsets[i + 1], types[i + 1], + "(", sizes[i + 1], ")"); + } + // NumSizes is a constant that may be zero. Some compilers cannot see that + // inside the if statement "size_[NumSizes - 1]" must be valid. + int last = static_cast<int>(NumSizes) - 1; + if (NumTypes == NumSizes && last >= 0) { + absl::StrAppend(&res, "[", size_[last], "]"); + } + return res; + } + + private: + // Arguments of `Layout::Partial()` or `Layout::Layout()`. + size_t size_[NumSizes > 0 ? NumSizes : 1]; +}; + +template <size_t NumSizes, class... Ts> +using LayoutType = LayoutImpl< + std::tuple<Ts...>, absl::make_index_sequence<NumSizes>, + absl::make_index_sequence<adl_barrier::Min(sizeof...(Ts), NumSizes + 1)>>; + +} // namespace internal_layout + +// Descriptor of arrays of various types and sizes laid out in memory one after +// another. See the top of the file for documentation. +// +// Check out the public API of internal_layout::LayoutImpl above. The type is +// internal to the library but its methods are public, and they are inherited +// by `Layout`. +template <class... Ts> +class Layout : public internal_layout::LayoutType<sizeof...(Ts), Ts...> { + public: + static_assert(sizeof...(Ts) > 0, "At least one field is required"); + static_assert( + absl::conjunction<internal_layout::IsLegalElementType<Ts>...>::value, + "Invalid element type (see IsLegalElementType)"); + + // The result type of `Partial()` with `NumSizes` arguments. + template <size_t NumSizes> + using PartialType = internal_layout::LayoutType<NumSizes, Ts...>; + + // `Layout` knows the element types of the arrays we want to lay out in + // memory but not the number of elements in each array. + // `Partial(size1, ..., sizeN)` allows us to specify the latter. The + // resulting immutable object can be used to obtain pointers to the + // individual arrays. + // + // It's allowed to pass fewer array sizes than the number of arrays. E.g., + // if all you need is to the offset of the second array, you only need to + // pass one argument -- the number of elements in the first arrays. + // + // // int[3] followed by 4 bytes of padding and an unknown number of + // // doubles. + // auto x = Layout<int, double>::Partial(3); + // // doubles start at byte 16. + // assert(x.Offset<1>() == 16); + // + // If you know the number of elements in all arrays, you can still call + // `Partial()` but it's more convenient to use the constructor of `Layout`. + // + // Layout<int, double> x(3, 5); + // + // Note: The sizes of the arrays must be specified in number of elements, + // not in bytes. + // + // Requires: `sizeof...(Sizes) <= sizeof...(Ts)`. + // Requires: all arguments are convertible to `size_t`. + template <class... Sizes> + static constexpr PartialType<sizeof...(Sizes)> Partial(Sizes&&... sizes) { + static_assert(sizeof...(Sizes) <= sizeof...(Ts), ""); + return PartialType<sizeof...(Sizes)>(absl::forward<Sizes>(sizes)...); + } + + // Creates a layout with the sizes of all arrays specified. If you know + // only the sizes of the first N arrays (where N can be zero), you can use + // `Partial()` defined above. The constructor is essentially equivalent to + // calling `Partial()` and passing in all array sizes; the constructor is + // provided as a convenient abbreviation. + // + // Note: The sizes of the arrays must be specified in number of elements, + // not in bytes. + constexpr explicit Layout(internal_layout::TypeToSize<Ts>... sizes) + : internal_layout::LayoutType<sizeof...(Ts), Ts...>(sizes...) {} +}; + +} // namespace container_internal +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_LAYOUT_H_ diff --git a/absl/container/internal/layout_test.cc b/absl/container/internal/layout_test.cc new file mode 100644 index 00000000..f35157a3 --- /dev/null +++ b/absl/container/internal/layout_test.cc @@ -0,0 +1,1552 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/layout.h" + +// We need ::max_align_t because some libstdc++ versions don't provide +// std::max_align_t +#include <stddef.h> +#include <cstdint> +#include <memory> +#include <sstream> +#include <type_traits> + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/types/span.h" + +namespace absl { +namespace container_internal { +namespace { + +using ::absl::Span; +using ::testing::ElementsAre; + +size_t Distance(const void* from, const void* to) { + ABSL_RAW_CHECK(from <= to, "Distance must be non-negative"); + return static_cast<const char*>(to) - static_cast<const char*>(from); +} + +template <class Expected, class Actual> +Expected Type(Actual val) { + static_assert(std::is_same<Expected, Actual>(), ""); + return val; +} + +using Int128 = int64_t[2]; + +// Properties of types that this test relies on. +static_assert(sizeof(int8_t) == 1, ""); +static_assert(alignof(int8_t) == 1, ""); +static_assert(sizeof(int16_t) == 2, ""); +static_assert(alignof(int16_t) == 2, ""); +static_assert(sizeof(int32_t) == 4, ""); +static_assert(alignof(int32_t) == 4, ""); +static_assert(sizeof(Int128) == 16, ""); +static_assert(alignof(Int128) == 8, ""); + +template <class Expected, class Actual> +void SameType() { + static_assert(std::is_same<Expected, Actual>(), ""); +} + +TEST(Layout, ElementType) { + { + using L = Layout<int32_t>; + SameType<int32_t, L::ElementType<0>>(); + SameType<int32_t, decltype(L::Partial())::ElementType<0>>(); + SameType<int32_t, decltype(L::Partial(0))::ElementType<0>>(); + } + { + using L = Layout<int32_t, int32_t>; + SameType<int32_t, L::ElementType<0>>(); + SameType<int32_t, L::ElementType<1>>(); + SameType<int32_t, decltype(L::Partial())::ElementType<0>>(); + SameType<int32_t, decltype(L::Partial())::ElementType<1>>(); + SameType<int32_t, decltype(L::Partial(0))::ElementType<0>>(); + SameType<int32_t, decltype(L::Partial(0))::ElementType<1>>(); + } + { + using L = Layout<int8_t, int32_t, Int128>; + SameType<int8_t, L::ElementType<0>>(); + SameType<int32_t, L::ElementType<1>>(); + SameType<Int128, L::ElementType<2>>(); + SameType<int8_t, decltype(L::Partial())::ElementType<0>>(); + SameType<int8_t, decltype(L::Partial(0))::ElementType<0>>(); + SameType<int32_t, decltype(L::Partial(0))::ElementType<1>>(); + SameType<int8_t, decltype(L::Partial(0, 0))::ElementType<0>>(); + SameType<int32_t, decltype(L::Partial(0, 0))::ElementType<1>>(); + SameType<Int128, decltype(L::Partial(0, 0))::ElementType<2>>(); + SameType<int8_t, decltype(L::Partial(0, 0, 0))::ElementType<0>>(); + SameType<int32_t, decltype(L::Partial(0, 0, 0))::ElementType<1>>(); + SameType<Int128, decltype(L::Partial(0, 0, 0))::ElementType<2>>(); + } +} + +TEST(Layout, ElementTypes) { + { + using L = Layout<int32_t>; + SameType<std::tuple<int32_t>, L::ElementTypes>(); + SameType<std::tuple<int32_t>, decltype(L::Partial())::ElementTypes>(); + SameType<std::tuple<int32_t>, decltype(L::Partial(0))::ElementTypes>(); + } + { + using L = Layout<int32_t, int32_t>; + SameType<std::tuple<int32_t, int32_t>, L::ElementTypes>(); + SameType<std::tuple<int32_t, int32_t>, decltype(L::Partial())::ElementTypes>(); + SameType<std::tuple<int32_t, int32_t>, decltype(L::Partial(0))::ElementTypes>(); + } + { + using L = Layout<int8_t, int32_t, Int128>; + SameType<std::tuple<int8_t, int32_t, Int128>, L::ElementTypes>(); + SameType<std::tuple<int8_t, int32_t, Int128>, + decltype(L::Partial())::ElementTypes>(); + SameType<std::tuple<int8_t, int32_t, Int128>, + decltype(L::Partial(0))::ElementTypes>(); + SameType<std::tuple<int8_t, int32_t, Int128>, + decltype(L::Partial(0, 0))::ElementTypes>(); + SameType<std::tuple<int8_t, int32_t, Int128>, + decltype(L::Partial(0, 0, 0))::ElementTypes>(); + } +} + +TEST(Layout, OffsetByIndex) { + { + using L = Layout<int32_t>; + EXPECT_EQ(0, L::Partial().Offset<0>()); + EXPECT_EQ(0, L::Partial(3).Offset<0>()); + EXPECT_EQ(0, L(3).Offset<0>()); + } + { + using L = Layout<int32_t, int32_t>; + EXPECT_EQ(0, L::Partial().Offset<0>()); + EXPECT_EQ(0, L::Partial(3).Offset<0>()); + EXPECT_EQ(12, L::Partial(3).Offset<1>()); + EXPECT_EQ(0, L::Partial(3, 5).Offset<0>()); + EXPECT_EQ(12, L::Partial(3, 5).Offset<1>()); + EXPECT_EQ(0, L(3, 5).Offset<0>()); + EXPECT_EQ(12, L(3, 5).Offset<1>()); + } + { + using L = Layout<int8_t, int32_t, Int128>; + EXPECT_EQ(0, L::Partial().Offset<0>()); + EXPECT_EQ(0, L::Partial(0).Offset<0>()); + EXPECT_EQ(0, L::Partial(0).Offset<1>()); + EXPECT_EQ(0, L::Partial(1).Offset<0>()); + EXPECT_EQ(4, L::Partial(1).Offset<1>()); + EXPECT_EQ(0, L::Partial(5).Offset<0>()); + EXPECT_EQ(8, L::Partial(5).Offset<1>()); + EXPECT_EQ(0, L::Partial(0, 0).Offset<0>()); + EXPECT_EQ(0, L::Partial(0, 0).Offset<1>()); + EXPECT_EQ(0, L::Partial(0, 0).Offset<2>()); + EXPECT_EQ(0, L::Partial(1, 0).Offset<0>()); + EXPECT_EQ(4, L::Partial(1, 0).Offset<1>()); + EXPECT_EQ(8, L::Partial(1, 0).Offset<2>()); + EXPECT_EQ(0, L::Partial(5, 3).Offset<0>()); + EXPECT_EQ(8, L::Partial(5, 3).Offset<1>()); + EXPECT_EQ(24, L::Partial(5, 3).Offset<2>()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<0>()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<1>()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<2>()); + EXPECT_EQ(0, L::Partial(1, 0, 0).Offset<0>()); + EXPECT_EQ(4, L::Partial(1, 0, 0).Offset<1>()); + EXPECT_EQ(8, L::Partial(1, 0, 0).Offset<2>()); + EXPECT_EQ(0, L::Partial(5, 3, 1).Offset<0>()); + EXPECT_EQ(24, L::Partial(5, 3, 1).Offset<2>()); + EXPECT_EQ(8, L::Partial(5, 3, 1).Offset<1>()); + EXPECT_EQ(0, L(5, 3, 1).Offset<0>()); + EXPECT_EQ(24, L(5, 3, 1).Offset<2>()); + EXPECT_EQ(8, L(5, 3, 1).Offset<1>()); + } +} + +TEST(Layout, OffsetByType) { + { + using L = Layout<int32_t>; + EXPECT_EQ(0, L::Partial().Offset<int32_t>()); + EXPECT_EQ(0, L::Partial(3).Offset<int32_t>()); + EXPECT_EQ(0, L(3).Offset<int32_t>()); + } + { + using L = Layout<int8_t, int32_t, Int128>; + EXPECT_EQ(0, L::Partial().Offset<int8_t>()); + EXPECT_EQ(0, L::Partial(0).Offset<int8_t>()); + EXPECT_EQ(0, L::Partial(0).Offset<int32_t>()); + EXPECT_EQ(0, L::Partial(1).Offset<int8_t>()); + EXPECT_EQ(4, L::Partial(1).Offset<int32_t>()); + EXPECT_EQ(0, L::Partial(5).Offset<int8_t>()); + EXPECT_EQ(8, L::Partial(5).Offset<int32_t>()); + EXPECT_EQ(0, L::Partial(0, 0).Offset<int8_t>()); + EXPECT_EQ(0, L::Partial(0, 0).Offset<int32_t>()); + EXPECT_EQ(0, L::Partial(0, 0).Offset<Int128>()); + EXPECT_EQ(0, L::Partial(1, 0).Offset<int8_t>()); + EXPECT_EQ(4, L::Partial(1, 0).Offset<int32_t>()); + EXPECT_EQ(8, L::Partial(1, 0).Offset<Int128>()); + EXPECT_EQ(0, L::Partial(5, 3).Offset<int8_t>()); + EXPECT_EQ(8, L::Partial(5, 3).Offset<int32_t>()); + EXPECT_EQ(24, L::Partial(5, 3).Offset<Int128>()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<int8_t>()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<int32_t>()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<Int128>()); + EXPECT_EQ(0, L::Partial(1, 0, 0).Offset<int8_t>()); + EXPECT_EQ(4, L::Partial(1, 0, 0).Offset<int32_t>()); + EXPECT_EQ(8, L::Partial(1, 0, 0).Offset<Int128>()); + EXPECT_EQ(0, L::Partial(5, 3, 1).Offset<int8_t>()); + EXPECT_EQ(24, L::Partial(5, 3, 1).Offset<Int128>()); + EXPECT_EQ(8, L::Partial(5, 3, 1).Offset<int32_t>()); + EXPECT_EQ(0, L(5, 3, 1).Offset<int8_t>()); + EXPECT_EQ(24, L(5, 3, 1).Offset<Int128>()); + EXPECT_EQ(8, L(5, 3, 1).Offset<int32_t>()); + } +} + +TEST(Layout, Offsets) { + { + using L = Layout<int32_t>; + EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0)); + EXPECT_THAT(L::Partial(3).Offsets(), ElementsAre(0)); + EXPECT_THAT(L(3).Offsets(), ElementsAre(0)); + } + { + using L = Layout<int32_t, int32_t>; + EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0)); + EXPECT_THAT(L::Partial(3).Offsets(), ElementsAre(0, 12)); + EXPECT_THAT(L::Partial(3, 5).Offsets(), ElementsAre(0, 12)); + EXPECT_THAT(L(3, 5).Offsets(), ElementsAre(0, 12)); + } + { + using L = Layout<int8_t, int32_t, Int128>; + EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0)); + EXPECT_THAT(L::Partial(1).Offsets(), ElementsAre(0, 4)); + EXPECT_THAT(L::Partial(5).Offsets(), ElementsAre(0, 8)); + EXPECT_THAT(L::Partial(0, 0).Offsets(), ElementsAre(0, 0, 0)); + EXPECT_THAT(L::Partial(1, 0).Offsets(), ElementsAre(0, 4, 8)); + EXPECT_THAT(L::Partial(5, 3).Offsets(), ElementsAre(0, 8, 24)); + EXPECT_THAT(L::Partial(0, 0, 0).Offsets(), ElementsAre(0, 0, 0)); + EXPECT_THAT(L::Partial(1, 0, 0).Offsets(), ElementsAre(0, 4, 8)); + EXPECT_THAT(L::Partial(5, 3, 1).Offsets(), ElementsAre(0, 8, 24)); + EXPECT_THAT(L(5, 3, 1).Offsets(), ElementsAre(0, 8, 24)); + } +} + +TEST(Layout, AllocSize) { + { + using L = Layout<int32_t>; + EXPECT_EQ(0, L::Partial(0).AllocSize()); + EXPECT_EQ(12, L::Partial(3).AllocSize()); + EXPECT_EQ(12, L(3).AllocSize()); + } + { + using L = Layout<int32_t, int32_t>; + EXPECT_EQ(32, L::Partial(3, 5).AllocSize()); + EXPECT_EQ(32, L(3, 5).AllocSize()); + } + { + using L = Layout<int8_t, int32_t, Int128>; + EXPECT_EQ(0, L::Partial(0, 0, 0).AllocSize()); + EXPECT_EQ(8, L::Partial(1, 0, 0).AllocSize()); + EXPECT_EQ(8, L::Partial(0, 1, 0).AllocSize()); + EXPECT_EQ(16, L::Partial(0, 0, 1).AllocSize()); + EXPECT_EQ(24, L::Partial(1, 1, 1).AllocSize()); + EXPECT_EQ(136, L::Partial(3, 5, 7).AllocSize()); + EXPECT_EQ(136, L(3, 5, 7).AllocSize()); + } +} + +TEST(Layout, SizeByIndex) { + { + using L = Layout<int32_t>; + EXPECT_EQ(0, L::Partial(0).Size<0>()); + EXPECT_EQ(3, L::Partial(3).Size<0>()); + EXPECT_EQ(3, L(3).Size<0>()); + } + { + using L = Layout<int32_t, int32_t>; + EXPECT_EQ(0, L::Partial(0).Size<0>()); + EXPECT_EQ(3, L::Partial(3).Size<0>()); + EXPECT_EQ(3, L::Partial(3, 5).Size<0>()); + EXPECT_EQ(5, L::Partial(3, 5).Size<1>()); + EXPECT_EQ(3, L(3, 5).Size<0>()); + EXPECT_EQ(5, L(3, 5).Size<1>()); + } + { + using L = Layout<int8_t, int32_t, Int128>; + EXPECT_EQ(3, L::Partial(3).Size<0>()); + EXPECT_EQ(3, L::Partial(3, 5).Size<0>()); + EXPECT_EQ(5, L::Partial(3, 5).Size<1>()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Size<0>()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Size<1>()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Size<2>()); + EXPECT_EQ(3, L(3, 5, 7).Size<0>()); + EXPECT_EQ(5, L(3, 5, 7).Size<1>()); + EXPECT_EQ(7, L(3, 5, 7).Size<2>()); + } +} + +TEST(Layout, SizeByType) { + { + using L = Layout<int32_t>; + EXPECT_EQ(0, L::Partial(0).Size<int32_t>()); + EXPECT_EQ(3, L::Partial(3).Size<int32_t>()); + EXPECT_EQ(3, L(3).Size<int32_t>()); + } + { + using L = Layout<int8_t, int32_t, Int128>; + EXPECT_EQ(3, L::Partial(3).Size<int8_t>()); + EXPECT_EQ(3, L::Partial(3, 5).Size<int8_t>()); + EXPECT_EQ(5, L::Partial(3, 5).Size<int32_t>()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Size<int8_t>()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Size<int32_t>()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Size<Int128>()); + EXPECT_EQ(3, L(3, 5, 7).Size<int8_t>()); + EXPECT_EQ(5, L(3, 5, 7).Size<int32_t>()); + EXPECT_EQ(7, L(3, 5, 7).Size<Int128>()); + } +} + +TEST(Layout, Sizes) { + { + using L = Layout<int32_t>; + EXPECT_THAT(L::Partial().Sizes(), ElementsAre()); + EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3)); + EXPECT_THAT(L(3).Sizes(), ElementsAre(3)); + } + { + using L = Layout<int32_t, int32_t>; + EXPECT_THAT(L::Partial().Sizes(), ElementsAre()); + EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3)); + EXPECT_THAT(L::Partial(3, 5).Sizes(), ElementsAre(3, 5)); + EXPECT_THAT(L(3, 5).Sizes(), ElementsAre(3, 5)); + } + { + using L = Layout<int8_t, int32_t, Int128>; + EXPECT_THAT(L::Partial().Sizes(), ElementsAre()); + EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3)); + EXPECT_THAT(L::Partial(3, 5).Sizes(), ElementsAre(3, 5)); + EXPECT_THAT(L::Partial(3, 5, 7).Sizes(), ElementsAre(3, 5, 7)); + EXPECT_THAT(L(3, 5, 7).Sizes(), ElementsAre(3, 5, 7)); + } +} + +TEST(Layout, PointerByIndex) { + alignas(max_align_t) const unsigned char p[100] = {}; + { + using L = Layout<int32_t>; + EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3).Pointer<0>(p)))); + } + { + using L = Layout<int32_t, int32_t>; + EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p)))); + EXPECT_EQ(12, Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<1>(p)))); + EXPECT_EQ(0, + Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<0>(p)))); + EXPECT_EQ(12, + Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3, 5).Pointer<0>(p)))); + EXPECT_EQ(12, Distance(p, Type<const int32_t*>(L(3, 5).Pointer<1>(p)))); + } + { + using L = Layout<int8_t, int32_t, Int128>; + EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(0).Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(1).Pointer<0>(p)))); + EXPECT_EQ(4, Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(5).Pointer<0>(p)))); + EXPECT_EQ(8, Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<1>(p)))); + EXPECT_EQ(0, + Distance(p, Type<const int8_t*>(L::Partial(0, 0).Pointer<0>(p)))); + EXPECT_EQ(0, + Distance(p, Type<const int32_t*>(L::Partial(0, 0).Pointer<1>(p)))); + EXPECT_EQ(0, + Distance(p, Type<const Int128*>(L::Partial(0, 0).Pointer<2>(p)))); + EXPECT_EQ(0, + Distance(p, Type<const int8_t*>(L::Partial(1, 0).Pointer<0>(p)))); + EXPECT_EQ(4, + Distance(p, Type<const int32_t*>(L::Partial(1, 0).Pointer<1>(p)))); + EXPECT_EQ(8, + Distance(p, Type<const Int128*>(L::Partial(1, 0).Pointer<2>(p)))); + EXPECT_EQ(0, + Distance(p, Type<const int8_t*>(L::Partial(5, 3).Pointer<0>(p)))); + EXPECT_EQ(8, + Distance(p, Type<const int32_t*>(L::Partial(5, 3).Pointer<1>(p)))); + EXPECT_EQ(24, + Distance(p, Type<const Int128*>(L::Partial(5, 3).Pointer<2>(p)))); + EXPECT_EQ( + 0, Distance(p, Type<const int8_t*>(L::Partial(0, 0, 0).Pointer<0>(p)))); + EXPECT_EQ( + 0, Distance(p, Type<const int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p)))); + EXPECT_EQ( + 0, Distance(p, Type<const Int128*>(L::Partial(0, 0, 0).Pointer<2>(p)))); + EXPECT_EQ( + 0, Distance(p, Type<const int8_t*>(L::Partial(1, 0, 0).Pointer<0>(p)))); + EXPECT_EQ( + 4, Distance(p, Type<const int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p)))); + EXPECT_EQ( + 8, Distance(p, Type<const Int128*>(L::Partial(1, 0, 0).Pointer<2>(p)))); + EXPECT_EQ( + 0, Distance(p, Type<const int8_t*>(L::Partial(5, 3, 1).Pointer<0>(p)))); + EXPECT_EQ( + 24, + Distance(p, Type<const Int128*>(L::Partial(5, 3, 1).Pointer<2>(p)))); + EXPECT_EQ( + 8, Distance(p, Type<const int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L(5, 3, 1).Pointer<0>(p)))); + EXPECT_EQ(24, Distance(p, Type<const Int128*>(L(5, 3, 1).Pointer<2>(p)))); + EXPECT_EQ(8, Distance(p, Type<const int32_t*>(L(5, 3, 1).Pointer<1>(p)))); + } +} + +TEST(Layout, PointerByType) { + alignas(max_align_t) const unsigned char p[100] = {}; + { + using L = Layout<int32_t>; + EXPECT_EQ(0, + Distance(p, Type<const int32_t*>(L::Partial().Pointer<int32_t>(p)))); + EXPECT_EQ(0, + Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<int32_t>(p)))); + EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3).Pointer<int32_t>(p)))); + } + { + using L = Layout<int8_t, int32_t, Int128>; + EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial().Pointer<int8_t>(p)))); + EXPECT_EQ(0, + Distance(p, Type<const int8_t*>(L::Partial(0).Pointer<int8_t>(p)))); + EXPECT_EQ(0, + Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<int32_t>(p)))); + EXPECT_EQ(0, + Distance(p, Type<const int8_t*>(L::Partial(1).Pointer<int8_t>(p)))); + EXPECT_EQ(4, + Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<int32_t>(p)))); + EXPECT_EQ(0, + Distance(p, Type<const int8_t*>(L::Partial(5).Pointer<int8_t>(p)))); + EXPECT_EQ(8, + Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<int32_t>(p)))); + EXPECT_EQ( + 0, Distance(p, Type<const int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p)))); + EXPECT_EQ( + 0, Distance(p, Type<const int32_t*>(L::Partial(0, 0).Pointer<int32_t>(p)))); + EXPECT_EQ( + 0, + Distance(p, Type<const Int128*>(L::Partial(0, 0).Pointer<Int128>(p)))); + EXPECT_EQ( + 0, Distance(p, Type<const int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p)))); + EXPECT_EQ( + 4, Distance(p, Type<const int32_t*>(L::Partial(1, 0).Pointer<int32_t>(p)))); + EXPECT_EQ( + 8, + Distance(p, Type<const Int128*>(L::Partial(1, 0).Pointer<Int128>(p)))); + EXPECT_EQ( + 0, Distance(p, Type<const int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p)))); + EXPECT_EQ( + 8, Distance(p, Type<const int32_t*>(L::Partial(5, 3).Pointer<int32_t>(p)))); + EXPECT_EQ( + 24, + Distance(p, Type<const Int128*>(L::Partial(5, 3).Pointer<Int128>(p)))); + EXPECT_EQ( + 0, + Distance(p, Type<const int8_t*>(L::Partial(0, 0, 0).Pointer<int8_t>(p)))); + EXPECT_EQ( + 0, + Distance(p, Type<const int32_t*>(L::Partial(0, 0, 0).Pointer<int32_t>(p)))); + EXPECT_EQ(0, Distance(p, Type<const Int128*>( + L::Partial(0, 0, 0).Pointer<Int128>(p)))); + EXPECT_EQ( + 0, + Distance(p, Type<const int8_t*>(L::Partial(1, 0, 0).Pointer<int8_t>(p)))); + EXPECT_EQ( + 4, + Distance(p, Type<const int32_t*>(L::Partial(1, 0, 0).Pointer<int32_t>(p)))); + EXPECT_EQ(8, Distance(p, Type<const Int128*>( + L::Partial(1, 0, 0).Pointer<Int128>(p)))); + EXPECT_EQ( + 0, + Distance(p, Type<const int8_t*>(L::Partial(5, 3, 1).Pointer<int8_t>(p)))); + EXPECT_EQ(24, Distance(p, Type<const Int128*>( + L::Partial(5, 3, 1).Pointer<Int128>(p)))); + EXPECT_EQ( + 8, + Distance(p, Type<const int32_t*>(L::Partial(5, 3, 1).Pointer<int32_t>(p)))); + EXPECT_EQ(24, + Distance(p, Type<const Int128*>(L(5, 3, 1).Pointer<Int128>(p)))); + EXPECT_EQ(8, Distance(p, Type<const int32_t*>(L(5, 3, 1).Pointer<int32_t>(p)))); + } +} + +TEST(Layout, MutablePointerByIndex) { + alignas(max_align_t) unsigned char p[100]; + { + using L = Layout<int32_t>; + EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(3).Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type<int32_t*>(L(3).Pointer<0>(p)))); + } + { + using L = Layout<int32_t, int32_t>; + EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(3).Pointer<0>(p)))); + EXPECT_EQ(12, Distance(p, Type<int32_t*>(L::Partial(3).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(3, 5).Pointer<0>(p)))); + EXPECT_EQ(12, Distance(p, Type<int32_t*>(L::Partial(3, 5).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type<int32_t*>(L(3, 5).Pointer<0>(p)))); + EXPECT_EQ(12, Distance(p, Type<int32_t*>(L(3, 5).Pointer<1>(p)))); + } + { + using L = Layout<int8_t, int32_t, Int128>; + EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0).Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1).Pointer<0>(p)))); + EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5).Pointer<0>(p)))); + EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0, 0).Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0, 0).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type<Int128*>(L::Partial(0, 0).Pointer<2>(p)))); + EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1, 0).Pointer<0>(p)))); + EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1, 0).Pointer<1>(p)))); + EXPECT_EQ(8, Distance(p, Type<Int128*>(L::Partial(1, 0).Pointer<2>(p)))); + EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5, 3).Pointer<0>(p)))); + EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5, 3).Pointer<1>(p)))); + EXPECT_EQ(24, Distance(p, Type<Int128*>(L::Partial(5, 3).Pointer<2>(p)))); + EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0, 0, 0).Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type<Int128*>(L::Partial(0, 0, 0).Pointer<2>(p)))); + EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1, 0, 0).Pointer<0>(p)))); + EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p)))); + EXPECT_EQ(8, Distance(p, Type<Int128*>(L::Partial(1, 0, 0).Pointer<2>(p)))); + EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5, 3, 1).Pointer<0>(p)))); + EXPECT_EQ(24, + Distance(p, Type<Int128*>(L::Partial(5, 3, 1).Pointer<2>(p)))); + EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type<int8_t*>(L(5, 3, 1).Pointer<0>(p)))); + EXPECT_EQ(24, Distance(p, Type<Int128*>(L(5, 3, 1).Pointer<2>(p)))); + EXPECT_EQ(8, Distance(p, Type<int32_t*>(L(5, 3, 1).Pointer<1>(p)))); + } +} + +TEST(Layout, MutablePointerByType) { + alignas(max_align_t) unsigned char p[100]; + { + using L = Layout<int32_t>; + EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial().Pointer<int32_t>(p)))); + EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(3).Pointer<int32_t>(p)))); + EXPECT_EQ(0, Distance(p, Type<int32_t*>(L(3).Pointer<int32_t>(p)))); + } + { + using L = Layout<int8_t, int32_t, Int128>; + EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial().Pointer<int8_t>(p)))); + EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0).Pointer<int8_t>(p)))); + EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0).Pointer<int32_t>(p)))); + EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1).Pointer<int8_t>(p)))); + EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1).Pointer<int32_t>(p)))); + EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5).Pointer<int8_t>(p)))); + EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5).Pointer<int32_t>(p)))); + EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p)))); + EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0, 0).Pointer<int32_t>(p)))); + EXPECT_EQ(0, + Distance(p, Type<Int128*>(L::Partial(0, 0).Pointer<Int128>(p)))); + EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p)))); + EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1, 0).Pointer<int32_t>(p)))); + EXPECT_EQ(8, + Distance(p, Type<Int128*>(L::Partial(1, 0).Pointer<Int128>(p)))); + EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p)))); + EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5, 3).Pointer<int32_t>(p)))); + EXPECT_EQ(24, + Distance(p, Type<Int128*>(L::Partial(5, 3).Pointer<Int128>(p)))); + EXPECT_EQ(0, + Distance(p, Type<int8_t*>(L::Partial(0, 0, 0).Pointer<int8_t>(p)))); + EXPECT_EQ(0, + Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<int32_t>(p)))); + EXPECT_EQ( + 0, Distance(p, Type<Int128*>(L::Partial(0, 0, 0).Pointer<Int128>(p)))); + EXPECT_EQ(0, + Distance(p, Type<int8_t*>(L::Partial(1, 0, 0).Pointer<int8_t>(p)))); + EXPECT_EQ(4, + Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<int32_t>(p)))); + EXPECT_EQ( + 8, Distance(p, Type<Int128*>(L::Partial(1, 0, 0).Pointer<Int128>(p)))); + EXPECT_EQ(0, + Distance(p, Type<int8_t*>(L::Partial(5, 3, 1).Pointer<int8_t>(p)))); + EXPECT_EQ( + 24, Distance(p, Type<Int128*>(L::Partial(5, 3, 1).Pointer<Int128>(p)))); + EXPECT_EQ(8, + Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<int32_t>(p)))); + EXPECT_EQ(0, Distance(p, Type<int8_t*>(L(5, 3, 1).Pointer<int8_t>(p)))); + EXPECT_EQ(24, Distance(p, Type<Int128*>(L(5, 3, 1).Pointer<Int128>(p)))); + EXPECT_EQ(8, Distance(p, Type<int32_t*>(L(5, 3, 1).Pointer<int32_t>(p)))); + } +} + +TEST(Layout, Pointers) { + alignas(max_align_t) const unsigned char p[100] = {}; + using L = Layout<int8_t, int8_t, Int128>; + { + const auto x = L::Partial(); + EXPECT_EQ(std::make_tuple(x.Pointer<0>(p)), + Type<std::tuple<const int8_t*>>(x.Pointers(p))); + } + { + const auto x = L::Partial(1); + EXPECT_EQ(std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p)), + (Type<std::tuple<const int8_t*, const int8_t*>>(x.Pointers(p)))); + } + { + const auto x = L::Partial(1, 2); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>( + x.Pointers(p)))); + } + { + const auto x = L::Partial(1, 2, 3); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>( + x.Pointers(p)))); + } + { + const L x(1, 2, 3); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>( + x.Pointers(p)))); + } +} + +TEST(Layout, MutablePointers) { + alignas(max_align_t) unsigned char p[100]; + using L = Layout<int8_t, int8_t, Int128>; + { + const auto x = L::Partial(); + EXPECT_EQ(std::make_tuple(x.Pointer<0>(p)), + Type<std::tuple<int8_t*>>(x.Pointers(p))); + } + { + const auto x = L::Partial(1); + EXPECT_EQ(std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p)), + (Type<std::tuple<int8_t*, int8_t*>>(x.Pointers(p)))); + } + { + const auto x = L::Partial(1, 2); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type<std::tuple<int8_t*, int8_t*, Int128*>>(x.Pointers(p)))); + } + { + const auto x = L::Partial(1, 2, 3); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type<std::tuple<int8_t*, int8_t*, Int128*>>(x.Pointers(p)))); + } + { + const L x(1, 2, 3); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type<std::tuple<int8_t*, int8_t*, Int128*>>(x.Pointers(p)))); + } +} + +TEST(Layout, SliceByIndexSize) { + alignas(max_align_t) const unsigned char p[100] = {}; + { + using L = Layout<int32_t>; + EXPECT_EQ(0, L::Partial(0).Slice<0>(p).size()); + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(3, L(3).Slice<0>(p).size()); + } + { + using L = Layout<int32_t, int32_t>; + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size()); + EXPECT_EQ(5, L(3, 5).Slice<1>(p).size()); + } + { + using L = Layout<int8_t, int32_t, Int128>; + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(3, L::Partial(3, 5).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<1>(p).size()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<2>(p).size()); + EXPECT_EQ(3, L(3, 5, 7).Slice<0>(p).size()); + EXPECT_EQ(5, L(3, 5, 7).Slice<1>(p).size()); + EXPECT_EQ(7, L(3, 5, 7).Slice<2>(p).size()); + } +} + +TEST(Layout, SliceByTypeSize) { + alignas(max_align_t) const unsigned char p[100] = {}; + { + using L = Layout<int32_t>; + EXPECT_EQ(0, L::Partial(0).Slice<int32_t>(p).size()); + EXPECT_EQ(3, L::Partial(3).Slice<int32_t>(p).size()); + EXPECT_EQ(3, L(3).Slice<int32_t>(p).size()); + } + { + using L = Layout<int8_t, int32_t, Int128>; + EXPECT_EQ(3, L::Partial(3).Slice<int8_t>(p).size()); + EXPECT_EQ(3, L::Partial(3, 5).Slice<int8_t>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice<int32_t>(p).size()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<int8_t>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<int32_t>(p).size()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<Int128>(p).size()); + EXPECT_EQ(3, L(3, 5, 7).Slice<int8_t>(p).size()); + EXPECT_EQ(5, L(3, 5, 7).Slice<int32_t>(p).size()); + EXPECT_EQ(7, L(3, 5, 7).Slice<Int128>(p).size()); + } +} + +TEST(Layout, MutableSliceByIndexSize) { + alignas(max_align_t) unsigned char p[100]; + { + using L = Layout<int32_t>; + EXPECT_EQ(0, L::Partial(0).Slice<0>(p).size()); + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(3, L(3).Slice<0>(p).size()); + } + { + using L = Layout<int32_t, int32_t>; + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size()); + EXPECT_EQ(5, L(3, 5).Slice<1>(p).size()); + } + { + using L = Layout<int8_t, int32_t, Int128>; + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(3, L::Partial(3, 5).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<1>(p).size()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<2>(p).size()); + EXPECT_EQ(3, L(3, 5, 7).Slice<0>(p).size()); + EXPECT_EQ(5, L(3, 5, 7).Slice<1>(p).size()); + EXPECT_EQ(7, L(3, 5, 7).Slice<2>(p).size()); + } +} + +TEST(Layout, MutableSliceByTypeSize) { + alignas(max_align_t) unsigned char p[100]; + { + using L = Layout<int32_t>; + EXPECT_EQ(0, L::Partial(0).Slice<int32_t>(p).size()); + EXPECT_EQ(3, L::Partial(3).Slice<int32_t>(p).size()); + EXPECT_EQ(3, L(3).Slice<int32_t>(p).size()); + } + { + using L = Layout<int8_t, int32_t, Int128>; + EXPECT_EQ(3, L::Partial(3).Slice<int8_t>(p).size()); + EXPECT_EQ(3, L::Partial(3, 5).Slice<int8_t>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice<int32_t>(p).size()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<int8_t>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<int32_t>(p).size()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<Int128>(p).size()); + EXPECT_EQ(3, L(3, 5, 7).Slice<int8_t>(p).size()); + EXPECT_EQ(5, L(3, 5, 7).Slice<int32_t>(p).size()); + EXPECT_EQ(7, L(3, 5, 7).Slice<Int128>(p).size()); + } +} + +TEST(Layout, SliceByIndexData) { + alignas(max_align_t) const unsigned char p[100] = {}; + { + using L = Layout<int32_t>; + EXPECT_EQ( + 0, + Distance(p, Type<Span<const int32_t>>(L::Partial(0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data())); + EXPECT_EQ(0, Distance(p, Type<Span<const int32_t>>(L(3).Slice<0>(p)).data())); + } + { + using L = Layout<int32_t, int32_t>; + EXPECT_EQ( + 0, + Distance(p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, + Type<Span<const int32_t>>(L::Partial(3, 5).Slice<0>(p)).data())); + EXPECT_EQ( + 12, + Distance(p, + Type<Span<const int32_t>>(L::Partial(3, 5).Slice<1>(p)).data())); + EXPECT_EQ(0, + Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<0>(p)).data())); + EXPECT_EQ(12, + Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<1>(p)).data())); + } + { + using L = Layout<int8_t, int32_t, Int128>; + EXPECT_EQ( + 0, + Distance(p, Type<Span<const int8_t>>(L::Partial(0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type<Span<const int8_t>>(L::Partial(1).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type<Span<const int8_t>>(L::Partial(5).Slice<0>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, + Type<Span<const int32_t>>(L::Partial(0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 4, + Distance(p, + Type<Span<const int32_t>>(L::Partial(1, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<0>(p)).data())); + EXPECT_EQ( + 8, + Distance(p, + Type<Span<const int32_t>>(L::Partial(5, 3).Slice<1>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type<Span<const int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type<Span<const int32_t>>(L::Partial(0, 0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type<Span<const Int128>>(L::Partial(0, 0, 0).Slice<2>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type<Span<const int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 4, + Distance( + p, + Type<Span<const int32_t>>(L::Partial(1, 0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, + Type<Span<const Int128>>(L::Partial(1, 0, 0).Slice<2>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type<Span<const int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data())); + EXPECT_EQ( + 24, + Distance( + p, + Type<Span<const Int128>>(L::Partial(5, 3, 1).Slice<2>(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, + Type<Span<const int32_t>>(L::Partial(5, 3, 1).Slice<1>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type<Span<const int8_t>>(L(5, 3, 1).Slice<0>(p)).data())); + EXPECT_EQ( + 24, + Distance(p, Type<Span<const Int128>>(L(5, 3, 1).Slice<2>(p)).data())); + EXPECT_EQ( + 8, Distance(p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<1>(p)).data())); + } +} + +TEST(Layout, SliceByTypeData) { + alignas(max_align_t) const unsigned char p[100] = {}; + { + using L = Layout<int32_t>; + EXPECT_EQ( + 0, + Distance( + p, Type<Span<const int32_t>>(L::Partial(0).Slice<int32_t>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type<Span<const int32_t>>(L::Partial(3).Slice<int32_t>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type<Span<const int32_t>>(L(3).Slice<int32_t>(p)).data())); + } + { + using L = Layout<int8_t, int32_t, Int128>; + EXPECT_EQ( + 0, Distance( + p, Type<Span<const int8_t>>(L::Partial(0).Slice<int8_t>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type<Span<const int8_t>>(L::Partial(1).Slice<int8_t>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type<Span<const int8_t>>(L::Partial(5).Slice<int8_t>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<int8_t>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type<Span<const int32_t>>(L::Partial(0, 0).Slice<int32_t>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<int8_t>(p)).data())); + EXPECT_EQ( + 4, + Distance( + p, + Type<Span<const int32_t>>(L::Partial(1, 0).Slice<int32_t>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<int8_t>(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, + Type<Span<const int32_t>>(L::Partial(5, 3).Slice<int32_t>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type<Span<const int8_t>>(L::Partial(0, 0, 0).Slice<int8_t>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type<Span<const int32_t>>(L::Partial(0, 0, 0).Slice<int32_t>(p)) + .data())); + EXPECT_EQ(0, Distance(p, Type<Span<const Int128>>( + L::Partial(0, 0, 0).Slice<Int128>(p)) + .data())); + EXPECT_EQ( + 0, + Distance( + p, + Type<Span<const int8_t>>(L::Partial(1, 0, 0).Slice<int8_t>(p)).data())); + EXPECT_EQ( + 4, + Distance(p, Type<Span<const int32_t>>(L::Partial(1, 0, 0).Slice<int32_t>(p)) + .data())); + EXPECT_EQ(8, Distance(p, Type<Span<const Int128>>( + L::Partial(1, 0, 0).Slice<Int128>(p)) + .data())); + EXPECT_EQ( + 0, + Distance( + p, + Type<Span<const int8_t>>(L::Partial(5, 3, 1).Slice<int8_t>(p)).data())); + EXPECT_EQ(24, Distance(p, Type<Span<const Int128>>( + L::Partial(5, 3, 1).Slice<Int128>(p)) + .data())); + EXPECT_EQ( + 8, + Distance(p, Type<Span<const int32_t>>(L::Partial(5, 3, 1).Slice<int32_t>(p)) + .data())); + EXPECT_EQ( + 0, + Distance(p, Type<Span<const int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data())); + EXPECT_EQ( + 24, + Distance(p, + Type<Span<const Int128>>(L(5, 3, 1).Slice<Int128>(p)).data())); + EXPECT_EQ( + 8, Distance( + p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data())); + } +} + +TEST(Layout, MutableSliceByIndexData) { + alignas(max_align_t) unsigned char p[100]; + { + using L = Layout<int32_t>; + EXPECT_EQ(0, + Distance(p, Type<Span<int32_t>>(L::Partial(0).Slice<0>(p)).data())); + EXPECT_EQ(0, + Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data())); + EXPECT_EQ(0, Distance(p, Type<Span<int32_t>>(L(3).Slice<0>(p)).data())); + } + { + using L = Layout<int32_t, int32_t>; + EXPECT_EQ(0, + Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type<Span<int32_t>>(L::Partial(3, 5).Slice<0>(p)).data())); + EXPECT_EQ( + 12, + Distance(p, Type<Span<int32_t>>(L::Partial(3, 5).Slice<1>(p)).data())); + EXPECT_EQ(0, Distance(p, Type<Span<int32_t>>(L(3, 5).Slice<0>(p)).data())); + EXPECT_EQ(12, Distance(p, Type<Span<int32_t>>(L(3, 5).Slice<1>(p)).data())); + } + { + using L = Layout<int8_t, int32_t, Int128>; + EXPECT_EQ(0, + Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<0>(p)).data())); + EXPECT_EQ(0, + Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<0>(p)).data())); + EXPECT_EQ(0, + Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<0>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type<Span<int8_t>>(L::Partial(0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type<Span<int8_t>>(L::Partial(1, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 4, Distance(p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type<Span<int8_t>>(L::Partial(5, 3).Slice<0>(p)).data())); + EXPECT_EQ( + 8, Distance(p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<1>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type<Span<Int128>>(L::Partial(0, 0, 0).Slice<2>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 4, + Distance(p, Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 8, Distance( + p, Type<Span<Int128>>(L::Partial(1, 0, 0).Slice<2>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data())); + EXPECT_EQ( + 24, Distance( + p, Type<Span<Int128>>(L::Partial(5, 3, 1).Slice<2>(p)).data())); + EXPECT_EQ( + 8, + Distance(p, Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<1>(p)).data())); + EXPECT_EQ(0, Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<0>(p)).data())); + EXPECT_EQ(24, + Distance(p, Type<Span<Int128>>(L(5, 3, 1).Slice<2>(p)).data())); + EXPECT_EQ(8, Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<1>(p)).data())); + } +} + +TEST(Layout, MutableSliceByTypeData) { + alignas(max_align_t) unsigned char p[100]; + { + using L = Layout<int32_t>; + EXPECT_EQ( + 0, + Distance(p, Type<Span<int32_t>>(L::Partial(0).Slice<int32_t>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<int32_t>(p)).data())); + EXPECT_EQ(0, Distance(p, Type<Span<int32_t>>(L(3).Slice<int32_t>(p)).data())); + } + { + using L = Layout<int8_t, int32_t, Int128>; + EXPECT_EQ( + 0, Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<int8_t>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<int8_t>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<int8_t>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type<Span<int8_t>>(L::Partial(0, 0).Slice<int8_t>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<int32_t>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type<Span<int8_t>>(L::Partial(1, 0).Slice<int8_t>(p)).data())); + EXPECT_EQ( + 4, Distance( + p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<int32_t>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type<Span<int8_t>>(L::Partial(5, 3).Slice<int8_t>(p)).data())); + EXPECT_EQ( + 8, Distance( + p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<int32_t>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<int8_t>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<int32_t>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type<Span<Int128>>(L::Partial(0, 0, 0).Slice<Int128>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<int8_t>(p)).data())); + EXPECT_EQ( + 4, + Distance( + p, Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<int32_t>(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, + Type<Span<Int128>>(L::Partial(1, 0, 0).Slice<Int128>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<int8_t>(p)).data())); + EXPECT_EQ( + 24, + Distance( + p, + Type<Span<Int128>>(L::Partial(5, 3, 1).Slice<Int128>(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<int32_t>(p)).data())); + EXPECT_EQ(0, + Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data())); + EXPECT_EQ( + 24, + Distance(p, Type<Span<Int128>>(L(5, 3, 1).Slice<Int128>(p)).data())); + EXPECT_EQ( + 8, Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data())); + } +} + +MATCHER_P(IsSameSlice, slice, "") { + return arg.size() == slice.size() && arg.data() == slice.data(); +} + +template <typename... M> +class TupleMatcher { + public: + explicit TupleMatcher(M... matchers) : matchers_(std::move(matchers)...) {} + + template <typename Tuple> + bool MatchAndExplain(const Tuple& p, + testing::MatchResultListener* /* listener */) const { + static_assert(std::tuple_size<Tuple>::value == sizeof...(M), ""); + return MatchAndExplainImpl( + p, absl::make_index_sequence<std::tuple_size<Tuple>::value>{}); + } + + // For the matcher concept. Left empty as we don't really need the diagnostics + // right now. + void DescribeTo(::std::ostream* os) const {} + void DescribeNegationTo(::std::ostream* os) const {} + + private: + template <typename Tuple, size_t... Is> + bool MatchAndExplainImpl(const Tuple& p, absl::index_sequence<Is...>) const { + // Using std::min as a simple variadic "and". + return std::min( + {true, testing::SafeMatcherCast< + const typename std::tuple_element<Is, Tuple>::type&>( + std::get<Is>(matchers_)) + .Matches(std::get<Is>(p))...}); + } + + std::tuple<M...> matchers_; +}; + +template <typename... M> +testing::PolymorphicMatcher<TupleMatcher<M...>> Tuple(M... matchers) { + return testing::MakePolymorphicMatcher( + TupleMatcher<M...>(std::move(matchers)...)); +} + +TEST(Layout, Slices) { + alignas(max_align_t) const unsigned char p[100] = {}; + using L = Layout<int8_t, int8_t, Int128>; + { + const auto x = L::Partial(); + EXPECT_THAT(Type<std::tuple<>>(x.Slices(p)), Tuple()); + } + { + const auto x = L::Partial(1); + EXPECT_THAT(Type<std::tuple<Span<const int8_t>>>(x.Slices(p)), + Tuple(IsSameSlice(x.Slice<0>(p)))); + } + { + const auto x = L::Partial(1, 2); + EXPECT_THAT( + (Type<std::tuple<Span<const int8_t>, Span<const int8_t>>>(x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)))); + } + { + const auto x = L::Partial(1, 2, 3); + EXPECT_THAT((Type<std::tuple<Span<const int8_t>, Span<const int8_t>, + Span<const Int128>>>(x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), + IsSameSlice(x.Slice<2>(p)))); + } + { + const L x(1, 2, 3); + EXPECT_THAT((Type<std::tuple<Span<const int8_t>, Span<const int8_t>, + Span<const Int128>>>(x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), + IsSameSlice(x.Slice<2>(p)))); + } +} + +TEST(Layout, MutableSlices) { + alignas(max_align_t) unsigned char p[100] = {}; + using L = Layout<int8_t, int8_t, Int128>; + { + const auto x = L::Partial(); + EXPECT_THAT(Type<std::tuple<>>(x.Slices(p)), Tuple()); + } + { + const auto x = L::Partial(1); + EXPECT_THAT(Type<std::tuple<Span<int8_t>>>(x.Slices(p)), + Tuple(IsSameSlice(x.Slice<0>(p)))); + } + { + const auto x = L::Partial(1, 2); + EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>>>(x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)))); + } + { + const auto x = L::Partial(1, 2, 3); + EXPECT_THAT( + (Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), + IsSameSlice(x.Slice<2>(p)))); + } + { + const L x(1, 2, 3); + EXPECT_THAT( + (Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), + IsSameSlice(x.Slice<2>(p)))); + } +} + +TEST(Layout, UnalignedTypes) { + constexpr Layout<unsigned char, unsigned char, unsigned char> x(1, 2, 3); + alignas(max_align_t) unsigned char p[x.AllocSize() + 1]; + EXPECT_THAT(x.Pointers(p + 1), Tuple(p + 1, p + 2, p + 4)); +} + +TEST(Layout, CustomAlignment) { + constexpr Layout<unsigned char, Aligned<unsigned char, 8>> x(1, 2); + alignas(max_align_t) unsigned char p[x.AllocSize()]; + EXPECT_EQ(10, x.AllocSize()); + EXPECT_THAT(x.Pointers(p), Tuple(p + 0, p + 8)); +} + +TEST(Layout, OverAligned) { + constexpr size_t M = alignof(max_align_t); + constexpr Layout<unsigned char, Aligned<unsigned char, 2 * M>> x(1, 3); + alignas(2 * M) unsigned char p[x.AllocSize()]; + EXPECT_EQ(2 * M + 3, x.AllocSize()); + EXPECT_THAT(x.Pointers(p), Tuple(p + 0, p + 2 * M)); +} + +TEST(Layout, Alignment) { + static_assert(Layout<int8_t>::Alignment() == 1, ""); + static_assert(Layout<int32_t>::Alignment() == 4, ""); + static_assert(Layout<int64_t>::Alignment() == 8, ""); + static_assert(Layout<Aligned<int8_t, 64>>::Alignment() == 64, ""); + static_assert(Layout<int8_t, int32_t, int64_t>::Alignment() == 8, ""); + static_assert(Layout<int8_t, int64_t, int32_t>::Alignment() == 8, ""); + static_assert(Layout<int32_t, int8_t, int64_t>::Alignment() == 8, ""); + static_assert(Layout<int32_t, int64_t, int8_t>::Alignment() == 8, ""); + static_assert(Layout<int64_t, int8_t, int32_t>::Alignment() == 8, ""); + static_assert(Layout<int64_t, int32_t, int8_t>::Alignment() == 8, ""); +} + +TEST(Layout, ConstexprPartial) { + constexpr size_t M = alignof(max_align_t); + constexpr Layout<unsigned char, Aligned<unsigned char, 2 * M>> x(1, 3); + static_assert(x.Partial(1).template Offset<1>() == 2 * M, ""); +} +// [from, to) +struct Region { + size_t from; + size_t to; +}; + +void ExpectRegionPoisoned(const unsigned char* p, size_t n, bool poisoned) { +#ifdef ADDRESS_SANITIZER + for (size_t i = 0; i != n; ++i) { + EXPECT_EQ(poisoned, __asan_address_is_poisoned(p + i)); + } +#endif +} + +template <size_t N> +void ExpectPoisoned(const unsigned char (&buf)[N], + std::initializer_list<Region> reg) { + size_t prev = 0; + for (const Region& r : reg) { + ExpectRegionPoisoned(buf + prev, r.from - prev, false); + ExpectRegionPoisoned(buf + r.from, r.to - r.from, true); + prev = r.to; + } + ExpectRegionPoisoned(buf + prev, N - prev, false); +} + +TEST(Layout, PoisonPadding) { + using L = Layout<int8_t, int64_t, int32_t, Int128>; + + constexpr size_t n = L::Partial(1, 2, 3, 4).AllocSize(); + { + constexpr auto x = L::Partial(); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {}); + } + { + constexpr auto x = L::Partial(1); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {{1, 8}}); + } + { + constexpr auto x = L::Partial(1, 2); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {{1, 8}}); + } + { + constexpr auto x = L::Partial(1, 2, 3); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {{1, 8}, {36, 40}}); + } + { + constexpr auto x = L::Partial(1, 2, 3, 4); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {{1, 8}, {36, 40}}); + } + { + constexpr L x(1, 2, 3, 4); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {{1, 8}, {36, 40}}); + } +} + +TEST(Layout, DebugString) { + const std::string int64_type = +#ifdef _MSC_VER + "__int64"; +#else // _MSC_VER + std::is_same<int64_t, long long>::value ? "long long" : "long"; // NOLINT +#endif // _MSC_VER + { + constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial(); + EXPECT_EQ("@0<signed char>(1)", x.DebugString()); + } + { + constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial(1); + EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)", x.DebugString()); + } + { + constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2); + EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)", + x.DebugString()); + } + { + constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3); + EXPECT_EQ( + "@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; " + "@16<" + + int64_type + " [2]>(16)", + x.DebugString()); + } + { + constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3, 4); + EXPECT_EQ( + "@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; " + "@16<" + + int64_type + " [2]>(16)[4]", + x.DebugString()); + } + { + constexpr Layout<int8_t, int32_t, int8_t, Int128> x(1, 2, 3, 4); + EXPECT_EQ( + "@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; " + "@16<" + + int64_type + " [2]>(16)[4]", + x.DebugString()); + } +} + +TEST(Layout, CharTypes) { + constexpr Layout<int32_t> x(1); + alignas(max_align_t) char c[x.AllocSize()] = {}; + alignas(max_align_t) unsigned char uc[x.AllocSize()] = {}; + alignas(max_align_t) signed char sc[x.AllocSize()] = {}; + alignas(max_align_t) const char cc[x.AllocSize()] = {}; + alignas(max_align_t) const unsigned char cuc[x.AllocSize()] = {}; + alignas(max_align_t) const signed char csc[x.AllocSize()] = {}; + + Type<int32_t*>(x.Pointer<0>(c)); + Type<int32_t*>(x.Pointer<0>(uc)); + Type<int32_t*>(x.Pointer<0>(sc)); + Type<const int32_t*>(x.Pointer<0>(cc)); + Type<const int32_t*>(x.Pointer<0>(cuc)); + Type<const int32_t*>(x.Pointer<0>(csc)); + + Type<int32_t*>(x.Pointer<int32_t>(c)); + Type<int32_t*>(x.Pointer<int32_t>(uc)); + Type<int32_t*>(x.Pointer<int32_t>(sc)); + Type<const int32_t*>(x.Pointer<int32_t>(cc)); + Type<const int32_t*>(x.Pointer<int32_t>(cuc)); + Type<const int32_t*>(x.Pointer<int32_t>(csc)); + + Type<std::tuple<int32_t*>>(x.Pointers(c)); + Type<std::tuple<int32_t*>>(x.Pointers(uc)); + Type<std::tuple<int32_t*>>(x.Pointers(sc)); + Type<std::tuple<const int32_t*>>(x.Pointers(cc)); + Type<std::tuple<const int32_t*>>(x.Pointers(cuc)); + Type<std::tuple<const int32_t*>>(x.Pointers(csc)); + + Type<Span<int32_t>>(x.Slice<0>(c)); + Type<Span<int32_t>>(x.Slice<0>(uc)); + Type<Span<int32_t>>(x.Slice<0>(sc)); + Type<Span<const int32_t>>(x.Slice<0>(cc)); + Type<Span<const int32_t>>(x.Slice<0>(cuc)); + Type<Span<const int32_t>>(x.Slice<0>(csc)); + + Type<std::tuple<Span<int32_t>>>(x.Slices(c)); + Type<std::tuple<Span<int32_t>>>(x.Slices(uc)); + Type<std::tuple<Span<int32_t>>>(x.Slices(sc)); + Type<std::tuple<Span<const int32_t>>>(x.Slices(cc)); + Type<std::tuple<Span<const int32_t>>>(x.Slices(cuc)); + Type<std::tuple<Span<const int32_t>>>(x.Slices(csc)); +} + +TEST(Layout, ConstElementType) { + constexpr Layout<const int32_t> x(1); + alignas(int32_t) char c[x.AllocSize()] = {}; + const char* cc = c; + const int32_t* p = reinterpret_cast<const int32_t*>(cc); + + EXPECT_EQ(alignof(int32_t), x.Alignment()); + + EXPECT_EQ(0, x.Offset<0>()); + EXPECT_EQ(0, x.Offset<const int32_t>()); + + EXPECT_THAT(x.Offsets(), ElementsAre(0)); + + EXPECT_EQ(1, x.Size<0>()); + EXPECT_EQ(1, x.Size<const int32_t>()); + + EXPECT_THAT(x.Sizes(), ElementsAre(1)); + + EXPECT_EQ(sizeof(int32_t), x.AllocSize()); + + EXPECT_EQ(p, Type<const int32_t*>(x.Pointer<0>(c))); + EXPECT_EQ(p, Type<const int32_t*>(x.Pointer<0>(cc))); + + EXPECT_EQ(p, Type<const int32_t*>(x.Pointer<const int32_t>(c))); + EXPECT_EQ(p, Type<const int32_t*>(x.Pointer<const int32_t>(cc))); + + EXPECT_THAT(Type<std::tuple<const int32_t*>>(x.Pointers(c)), Tuple(p)); + EXPECT_THAT(Type<std::tuple<const int32_t*>>(x.Pointers(cc)), Tuple(p)); + + EXPECT_THAT(Type<Span<const int32_t>>(x.Slice<0>(c)), + IsSameSlice(Span<const int32_t>(p, 1))); + EXPECT_THAT(Type<Span<const int32_t>>(x.Slice<0>(cc)), + IsSameSlice(Span<const int32_t>(p, 1))); + + EXPECT_THAT(Type<Span<const int32_t>>(x.Slice<const int32_t>(c)), + IsSameSlice(Span<const int32_t>(p, 1))); + EXPECT_THAT(Type<Span<const int32_t>>(x.Slice<const int32_t>(cc)), + IsSameSlice(Span<const int32_t>(p, 1))); + + EXPECT_THAT(Type<std::tuple<Span<const int32_t>>>(x.Slices(c)), + Tuple(IsSameSlice(Span<const int32_t>(p, 1)))); + EXPECT_THAT(Type<std::tuple<Span<const int32_t>>>(x.Slices(cc)), + Tuple(IsSameSlice(Span<const int32_t>(p, 1)))); +} + +namespace example { + +// Immutable move-only string with sizeof equal to sizeof(void*). The string +// size and the characters are kept in the same heap allocation. +class CompactString { + public: + CompactString(const char* s = "") { // NOLINT + const size_t size = strlen(s); + // size_t[1], followed by char[size + 1]. + // This statement doesn't allocate memory. + const L layout(1, size + 1); + // AllocSize() tells us how much memory we need to allocate for all our + // data. + p_.reset(new unsigned char[layout.AllocSize()]); + // If running under ASAN, mark the padding bytes, if any, to catch memory + // errors. + layout.PoisonPadding(p_.get()); + // Store the size in the allocation. + // Pointer<size_t>() is a synonym for Pointer<0>(). + *layout.Pointer<size_t>(p_.get()) = size; + // Store the characters in the allocation. + memcpy(layout.Pointer<char>(p_.get()), s, size + 1); + } + + size_t size() const { + // Equivalent to reinterpret_cast<size_t&>(*p). + return *L::Partial().Pointer<size_t>(p_.get()); + } + + const char* c_str() const { + // Equivalent to reinterpret_cast<char*>(p.get() + sizeof(size_t)). + // The argument in Partial(1) specifies that we have size_t[1] in front of + // the + // characters. + return L::Partial(1).Pointer<char>(p_.get()); + } + + private: + // Our heap allocation contains a size_t followed by an array of chars. + using L = Layout<size_t, char>; + std::unique_ptr<unsigned char[]> p_; +}; + +TEST(CompactString, Works) { + CompactString s = "hello"; + EXPECT_EQ(5, s.size()); + EXPECT_STREQ("hello", s.c_str()); +} + +} // namespace example + +} // namespace +} // namespace container_internal +} // namespace absl diff --git a/absl/container/internal/node_hash_policy.h b/absl/container/internal/node_hash_policy.h new file mode 100644 index 00000000..065e7009 --- /dev/null +++ b/absl/container/internal/node_hash_policy.h @@ -0,0 +1,88 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Adapts a policy for nodes. +// +// The node policy should model: +// +// struct Policy { +// // Returns a new node allocated and constructed using the allocator, using +// // the specified arguments. +// template <class Alloc, class... Args> +// value_type* new_element(Alloc* alloc, Args&&... args) const; +// +// // Destroys and deallocates node using the allocator. +// template <class Alloc> +// void delete_element(Alloc* alloc, value_type* node) const; +// }; +// +// It may also optionally define `value()` and `apply()`. For documentation on +// these, see hash_policy_traits.h. + +#ifndef ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_ +#define ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_ + +#include <cassert> +#include <cstddef> +#include <memory> +#include <type_traits> +#include <utility> + +namespace absl { +namespace container_internal { + +template <class Reference, class Policy> +struct node_hash_policy { + static_assert(std::is_lvalue_reference<Reference>::value, ""); + + using slot_type = typename std::remove_cv< + typename std::remove_reference<Reference>::type>::type*; + + template <class Alloc, class... Args> + static void construct(Alloc* alloc, slot_type* slot, Args&&... args) { + *slot = Policy::new_element(alloc, std::forward<Args>(args)...); + } + + template <class Alloc> + static void destroy(Alloc* alloc, slot_type* slot) { + Policy::delete_element(alloc, *slot); + } + + template <class Alloc> + static void transfer(Alloc*, slot_type* new_slot, slot_type* old_slot) { + *new_slot = *old_slot; + } + + static size_t space_used(const slot_type* slot) { + if (slot == nullptr) return Policy::element_space_used(nullptr); + return Policy::element_space_used(*slot); + } + + static Reference element(slot_type* slot) { return **slot; } + + template <class T, class P = Policy> + static auto value(T* elem) -> decltype(P::value(elem)) { + return P::value(elem); + } + + template <class... Ts, class P = Policy> + static auto apply(Ts&&... ts) -> decltype(P::apply(std::forward<Ts>(ts)...)) { + return P::apply(std::forward<Ts>(ts)...); + } +}; + +} // namespace container_internal +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_ diff --git a/absl/container/internal/node_hash_policy_test.cc b/absl/container/internal/node_hash_policy_test.cc new file mode 100644 index 00000000..43d287e3 --- /dev/null +++ b/absl/container/internal/node_hash_policy_test.cc @@ -0,0 +1,67 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/node_hash_policy.h" + +#include <memory> + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/container/internal/hash_policy_traits.h" + +namespace absl { +namespace container_internal { +namespace { + +using ::testing::Pointee; + +struct Policy : node_hash_policy<int&, Policy> { + using key_type = int; + using init_type = int; + + template <class Alloc> + static int* new_element(Alloc* alloc, int value) { + return new int(value); + } + + template <class Alloc> + static void delete_element(Alloc* alloc, int* elem) { + delete elem; + } +}; + +using NodePolicy = hash_policy_traits<Policy>; + +struct NodeTest : ::testing::Test { + std::allocator<int> alloc; + int n = 53; + int* a = &n; +}; + +TEST_F(NodeTest, ConstructDestroy) { + NodePolicy::construct(&alloc, &a, 42); + EXPECT_THAT(a, Pointee(42)); + NodePolicy::destroy(&alloc, &a); +} + +TEST_F(NodeTest, transfer) { + int s = 42; + int* b = &s; + NodePolicy::transfer(&alloc, &a, &b); + EXPECT_EQ(&s, a); +} + +} // namespace +} // namespace container_internal +} // namespace absl diff --git a/absl/container/internal/raw_hash_map.h b/absl/container/internal/raw_hash_map.h new file mode 100644 index 00000000..1edc0071 --- /dev/null +++ b/absl/container/internal/raw_hash_map.h @@ -0,0 +1,182 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_ +#define ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_ + +#include <tuple> +#include <type_traits> +#include <utility> + +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export + +namespace absl { +namespace container_internal { + +template <class Policy, class Hash, class Eq, class Alloc> +class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> { + // P is Policy. It's passed as a template argument to support maps that have + // incomplete types as values, as in unordered_map<K, IncompleteType>. + // MappedReference<> may be a non-reference type. + template <class P> + using MappedReference = decltype(P::value( + std::addressof(std::declval<typename raw_hash_map::reference>()))); + + // MappedConstReference<> may be a non-reference type. + template <class P> + using MappedConstReference = decltype(P::value( + std::addressof(std::declval<typename raw_hash_map::const_reference>()))); + + public: + using key_type = typename Policy::key_type; + using mapped_type = typename Policy::mapped_type; + template <typename K> + using key_arg = typename raw_hash_map::raw_hash_set::template key_arg<K>; + + static_assert(!std::is_reference<key_type>::value, ""); + // TODO(alkis): remove this assertion and verify that reference mapped_type is + // supported. + static_assert(!std::is_reference<mapped_type>::value, ""); + + using iterator = typename raw_hash_map::raw_hash_set::iterator; + using const_iterator = typename raw_hash_map::raw_hash_set::const_iterator; + + raw_hash_map() {} + using raw_hash_map::raw_hash_set::raw_hash_set; + + // The last two template parameters ensure that both arguments are rvalues + // (lvalue arguments are handled by the overloads below). This is necessary + // for supporting bitfield arguments. + // + // union { int n : 1; }; + // flat_hash_map<int, int> m; + // m.insert_or_assign(n, n); + template <class K = key_type, class V = mapped_type, K* = nullptr, + V* = nullptr> + std::pair<iterator, bool> insert_or_assign(key_arg<K>&& k, V&& v) { + return insert_or_assign_impl(std::forward<K>(k), std::forward<V>(v)); + } + + template <class K = key_type, class V = mapped_type, K* = nullptr> + std::pair<iterator, bool> insert_or_assign(key_arg<K>&& k, const V& v) { + return insert_or_assign_impl(std::forward<K>(k), v); + } + + template <class K = key_type, class V = mapped_type, V* = nullptr> + std::pair<iterator, bool> insert_or_assign(const key_arg<K>& k, V&& v) { + return insert_or_assign_impl(k, std::forward<V>(v)); + } + + template <class K = key_type, class V = mapped_type> + std::pair<iterator, bool> insert_or_assign(const key_arg<K>& k, const V& v) { + return insert_or_assign_impl(k, v); + } + + template <class K = key_type, class V = mapped_type, K* = nullptr, + V* = nullptr> + iterator insert_or_assign(const_iterator, key_arg<K>&& k, V&& v) { + return insert_or_assign(std::forward<K>(k), std::forward<V>(v)).first; + } + + template <class K = key_type, class V = mapped_type, K* = nullptr> + iterator insert_or_assign(const_iterator, key_arg<K>&& k, const V& v) { + return insert_or_assign(std::forward<K>(k), v).first; + } + + template <class K = key_type, class V = mapped_type, V* = nullptr> + iterator insert_or_assign(const_iterator, const key_arg<K>& k, V&& v) { + return insert_or_assign(k, std::forward<V>(v)).first; + } + + template <class K = key_type, class V = mapped_type> + iterator insert_or_assign(const_iterator, const key_arg<K>& k, const V& v) { + return insert_or_assign(k, v).first; + } + + template <class K = key_type, class... Args, + typename std::enable_if< + !std::is_convertible<K, const_iterator>::value, int>::type = 0, + K* = nullptr> + std::pair<iterator, bool> try_emplace(key_arg<K>&& k, Args&&... args) { + return try_emplace_impl(std::forward<K>(k), std::forward<Args>(args)...); + } + + template <class K = key_type, class... Args, + typename std::enable_if< + !std::is_convertible<K, const_iterator>::value, int>::type = 0> + std::pair<iterator, bool> try_emplace(const key_arg<K>& k, Args&&... args) { + return try_emplace_impl(k, std::forward<Args>(args)...); + } + + template <class K = key_type, class... Args, K* = nullptr> + iterator try_emplace(const_iterator, key_arg<K>&& k, Args&&... args) { + return try_emplace(std::forward<K>(k), std::forward<Args>(args)...).first; + } + + template <class K = key_type, class... Args> + iterator try_emplace(const_iterator, const key_arg<K>& k, Args&&... args) { + return try_emplace(k, std::forward<Args>(args)...).first; + } + + template <class K = key_type, class P = Policy> + MappedReference<P> at(const key_arg<K>& key) { + auto it = this->find(key); + if (it == this->end()) std::abort(); + return Policy::value(&*it); + } + + template <class K = key_type, class P = Policy> + MappedConstReference<P> at(const key_arg<K>& key) const { + auto it = this->find(key); + if (it == this->end()) std::abort(); + return Policy::value(&*it); + } + + template <class K = key_type, class P = Policy, K* = nullptr> + MappedReference<P> operator[](key_arg<K>&& key) { + return Policy::value(&*try_emplace(std::forward<K>(key)).first); + } + + template <class K = key_type, class P = Policy> + MappedReference<P> operator[](const key_arg<K>& key) { + return Policy::value(&*try_emplace(key).first); + } + + private: + template <class K, class V> + std::pair<iterator, bool> insert_or_assign_impl(K&& k, V&& v) { + auto res = this->find_or_prepare_insert(k); + if (res.second) + this->emplace_at(res.first, std::forward<K>(k), std::forward<V>(v)); + else + Policy::value(&*this->iterator_at(res.first)) = std::forward<V>(v); + return {this->iterator_at(res.first), res.second}; + } + + template <class K = key_type, class... Args> + std::pair<iterator, bool> try_emplace_impl(K&& k, Args&&... args) { + auto res = this->find_or_prepare_insert(k); + if (res.second) + this->emplace_at(res.first, std::piecewise_construct, + std::forward_as_tuple(std::forward<K>(k)), + std::forward_as_tuple(std::forward<Args>(args)...)); + return {this->iterator_at(res.first), res.second}; + } +}; + +} // namespace container_internal +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_ diff --git a/absl/container/internal/raw_hash_set.cc b/absl/container/internal/raw_hash_set.cc new file mode 100644 index 00000000..10153129 --- /dev/null +++ b/absl/container/internal/raw_hash_set.cc @@ -0,0 +1,45 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/raw_hash_set.h" + +#include <cstddef> + +#include "absl/base/config.h" + +namespace absl { +namespace container_internal { + +constexpr size_t Group::kWidth; + +// Returns "random" seed. +inline size_t RandomSeed() { +#if ABSL_HAVE_THREAD_LOCAL + static thread_local size_t counter = 0; + size_t value = ++counter; +#else // ABSL_HAVE_THREAD_LOCAL + static std::atomic<size_t> counter; + size_t value = counter.fetch_add(1, std::memory_order_relaxed); +#endif // ABSL_HAVE_THREAD_LOCAL + return value ^ static_cast<size_t>(reinterpret_cast<uintptr_t>(&counter)); +} + +bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl) { + // To avoid problems with weak hashes and single bit tests, we use % 13. + // TODO(kfm,sbenza): revisit after we do unconditional mixing + return (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6; +} + +} // namespace container_internal +} // namespace absl diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h new file mode 100644 index 00000000..0c0e5906 --- /dev/null +++ b/absl/container/internal/raw_hash_set.h @@ -0,0 +1,1906 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// An open-addressing +// hashtable with quadratic probing. +// +// This is a low level hashtable on top of which different interfaces can be +// implemented, like flat_hash_set, node_hash_set, string_hash_set, etc. +// +// The table interface is similar to that of std::unordered_set. Notable +// differences are that most member functions support heterogeneous keys when +// BOTH the hash and eq functions are marked as transparent. They do so by +// providing a typedef called `is_transparent`. +// +// When heterogeneous lookup is enabled, functions that take key_type act as if +// they have an overload set like: +// +// iterator find(const key_type& key); +// template <class K> +// iterator find(const K& key); +// +// size_type erase(const key_type& key); +// template <class K> +// size_type erase(const K& key); +// +// std::pair<iterator, iterator> equal_range(const key_type& key); +// template <class K> +// std::pair<iterator, iterator> equal_range(const K& key); +// +// When heterogeneous lookup is disabled, only the explicit `key_type` overloads +// exist. +// +// find() also supports passing the hash explicitly: +// +// iterator find(const key_type& key, size_t hash); +// template <class U> +// iterator find(const U& key, size_t hash); +// +// In addition the pointer to element and iterator stability guarantees are +// weaker: all iterators and pointers are invalidated after a new element is +// inserted. +// +// IMPLEMENTATION DETAILS +// +// The table stores elements inline in a slot array. In addition to the slot +// array the table maintains some control state per slot. The extra state is one +// byte per slot and stores empty or deleted marks, or alternatively 7 bits from +// the hash of an occupied slot. The table is split into logical groups of +// slots, like so: +// +// Group 1 Group 2 Group 3 +// +---------------+---------------+---------------+ +// | | | | | | | | | | | | | | | | | | | | | | | | | +// +---------------+---------------+---------------+ +// +// On lookup the hash is split into two parts: +// - H2: 7 bits (those stored in the control bytes) +// - H1: the rest of the bits +// The groups are probed using H1. For each group the slots are matched to H2 in +// parallel. Because H2 is 7 bits (128 states) and the number of slots per group +// is low (8 or 16) in almost all cases a match in H2 is also a lookup hit. +// +// On insert, once the right group is found (as in lookup), its slots are +// filled in order. +// +// On erase a slot is cleared. In case the group did not have any empty slots +// before the erase, the erased slot is marked as deleted. +// +// Groups without empty slots (but maybe with deleted slots) extend the probe +// sequence. The probing algorithm is quadratic. Given N the number of groups, +// the probing function for the i'th probe is: +// +// P(0) = H1 % N +// +// P(i) = (P(i - 1) + i) % N +// +// This probing function guarantees that after N probes, all the groups of the +// table will be probed exactly once. + +#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ +#define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ + +#ifndef SWISSTABLE_HAVE_SSE2 +#ifdef __SSE2__ +#define SWISSTABLE_HAVE_SSE2 1 +#else +#define SWISSTABLE_HAVE_SSE2 0 +#endif +#endif + +#ifndef SWISSTABLE_HAVE_SSSE3 +#ifdef __SSSE3__ +#define SWISSTABLE_HAVE_SSSE3 1 +#else +#define SWISSTABLE_HAVE_SSSE3 0 +#endif +#endif + +#if SWISSTABLE_HAVE_SSSE3 && !SWISSTABLE_HAVE_SSE2 +#error "Bad configuration!" +#endif + +#if SWISSTABLE_HAVE_SSE2 +#include <x86intrin.h> +#endif + +#include <algorithm> +#include <cmath> +#include <cstdint> +#include <cstring> +#include <iterator> +#include <limits> +#include <memory> +#include <tuple> +#include <type_traits> +#include <utility> + +#include "absl/base/internal/bits.h" +#include "absl/base/internal/endian.h" +#include "absl/base/port.h" +#include "absl/container/internal/compressed_tuple.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/hash_policy_traits.h" +#include "absl/container/internal/hashtable_debug_hooks.h" +#include "absl/container/internal/layout.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" +#include "absl/types/optional.h" +#include "absl/utility/utility.h" + +namespace absl { +namespace container_internal { + +template <size_t Width> +class probe_seq { + public: + probe_seq(size_t hash, size_t mask) { + assert(((mask + 1) & mask) == 0 && "not a mask"); + mask_ = mask; + offset_ = hash & mask_; + } + size_t offset() const { return offset_; } + size_t offset(size_t i) const { return (offset_ + i) & mask_; } + + void next() { + index_ += Width; + offset_ += index_; + offset_ &= mask_; + } + // 0-based probe index. The i-th probe in the probe sequence. + size_t index() const { return index_; } + + private: + size_t mask_; + size_t offset_; + size_t index_ = 0; +}; + +template <class ContainerKey, class Hash, class Eq> +struct RequireUsableKey { + template <class PassedKey, class... Args> + std::pair< + decltype(std::declval<const Hash&>()(std::declval<const PassedKey&>())), + decltype(std::declval<const Eq&>()(std::declval<const ContainerKey&>(), + std::declval<const PassedKey&>()))>* + operator()(const PassedKey&, const Args&...) const; +}; + +template <class E, class Policy, class Hash, class Eq, class... Ts> +struct IsDecomposable : std::false_type {}; + +template <class Policy, class Hash, class Eq, class... Ts> +struct IsDecomposable< + absl::void_t<decltype( + Policy::apply(RequireUsableKey<typename Policy::key_type, Hash, Eq>(), + std::declval<Ts>()...))>, + Policy, Hash, Eq, Ts...> : std::true_type {}; + +template <class, class = void> +struct IsTransparent : std::false_type {}; +template <class T> +struct IsTransparent<T, absl::void_t<typename T::is_transparent>> + : std::true_type {}; + +// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it. +template <class T> +constexpr bool IsNoThrowSwappable() { + using std::swap; + return noexcept(swap(std::declval<T&>(), std::declval<T&>())); +} + +template <typename T> +int TrailingZeros(T x) { + return sizeof(T) == 8 ? base_internal::CountTrailingZerosNonZero64(x) + : base_internal::CountTrailingZerosNonZero32(x); +} + +template <typename T> +int LeadingZeros(T x) { + return sizeof(T) == 8 ? base_internal::CountLeadingZeros64(x) + : base_internal::CountLeadingZeros32(x); +} + +// An abstraction over a bitmask. It provides an easy way to iterate through the +// indexes of the set bits of a bitmask. When Shift=0 (platforms with SSE), +// this is a true bitmask. On non-SSE, platforms the arithematic used to +// emulate the SSE behavior works in bytes (Shift=3) and leaves each bytes as +// either 0x00 or 0x80. +// +// For example: +// for (int i : BitMask<uint32_t, 16>(0x5)) -> yields 0, 2 +// for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3 +template <class T, int SignificantBits, int Shift = 0> +class BitMask { + static_assert(std::is_unsigned<T>::value, ""); + static_assert(Shift == 0 || Shift == 3, ""); + + public: + // These are useful for unit tests (gunit). + using value_type = int; + using iterator = BitMask; + using const_iterator = BitMask; + + explicit BitMask(T mask) : mask_(mask) {} + BitMask& operator++() { + mask_ &= (mask_ - 1); + return *this; + } + explicit operator bool() const { return mask_ != 0; } + int operator*() const { return LowestBitSet(); } + int LowestBitSet() const { + return container_internal::TrailingZeros(mask_) >> Shift; + } + int HighestBitSet() const { + return (sizeof(T) * CHAR_BIT - container_internal::LeadingZeros(mask_) - + 1) >> + Shift; + } + + BitMask begin() const { return *this; } + BitMask end() const { return BitMask(0); } + + int TrailingZeros() const { + return container_internal::TrailingZeros(mask_) >> Shift; + } + + int LeadingZeros() const { + constexpr int total_significant_bits = SignificantBits << Shift; + constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits; + return container_internal::LeadingZeros(mask_ << extra_bits) >> Shift; + } + + private: + friend bool operator==(const BitMask& a, const BitMask& b) { + return a.mask_ == b.mask_; + } + friend bool operator!=(const BitMask& a, const BitMask& b) { + return a.mask_ != b.mask_; + } + + T mask_; +}; + +using ctrl_t = signed char; +using h2_t = uint8_t; + +// The values here are selected for maximum performance. See the static asserts +// below for details. +enum Ctrl : ctrl_t { + kEmpty = -128, // 0b10000000 + kDeleted = -2, // 0b11111110 + kSentinel = -1, // 0b11111111 +}; +static_assert( + kEmpty & kDeleted & kSentinel & 0x80, + "Special markers need to have the MSB to make checking for them efficient"); +static_assert(kEmpty < kSentinel && kDeleted < kSentinel, + "kEmpty and kDeleted must be smaller than kSentinel to make the " + "SIMD test of IsEmptyOrDeleted() efficient"); +static_assert(kSentinel == -1, + "kSentinel must be -1 to elide loading it from memory into SIMD " + "registers (pcmpeqd xmm, xmm)"); +static_assert(kEmpty == -128, + "kEmpty must be -128 to make the SIMD check for its " + "existence efficient (psignb xmm, xmm)"); +static_assert(~kEmpty & ~kDeleted & kSentinel & 0x7F, + "kEmpty and kDeleted must share an unset bit that is not shared " + "by kSentinel to make the scalar test for MatchEmptyOrDeleted() " + "efficient"); +static_assert(kDeleted == -2, + "kDeleted must be -2 to make the implementation of " + "ConvertSpecialToEmptyAndFullToDeleted efficient"); + +// A single block of empty control bytes for tables without any slots allocated. +// This enables removing a branch in the hot path of find(). +inline ctrl_t* EmptyGroup() { + alignas(16) static constexpr ctrl_t empty_group[] = { + kSentinel, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, + kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty}; + return const_cast<ctrl_t*>(empty_group); +} + +// Mixes a randomly generated per-process seed with `hash` and `ctrl` to +// randomize insertion order within groups. +bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl); + +// Returns a hash seed. +// +// The seed consists of the ctrl_ pointer, which adds enough entropy to ensure +// non-determinism of iteration order in most cases. +inline size_t HashSeed(const ctrl_t* ctrl) { + // The low bits of the pointer have little or no entropy because of + // alignment. We shift the pointer to try to use higher entropy bits. A + // good number seems to be 12 bits, because that aligns with page size. + return reinterpret_cast<uintptr_t>(ctrl) >> 12; +} + +inline size_t H1(size_t hash, const ctrl_t* ctrl) { + return (hash >> 7) ^ HashSeed(ctrl); +} +inline ctrl_t H2(size_t hash) { return hash & 0x7F; } + +inline bool IsEmpty(ctrl_t c) { return c == kEmpty; } +inline bool IsFull(ctrl_t c) { return c >= 0; } +inline bool IsDeleted(ctrl_t c) { return c == kDeleted; } +inline bool IsEmptyOrDeleted(ctrl_t c) { return c < kSentinel; } + +#if SWISSTABLE_HAVE_SSE2 +struct Group { + static constexpr size_t kWidth = 16; // the number of slots per group + + explicit Group(const ctrl_t* pos) { + ctrl = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pos)); + } + + // Returns a bitmask representing the positions of slots that match hash. + BitMask<uint32_t, kWidth> Match(h2_t hash) const { + auto match = _mm_set1_epi8(hash); + return BitMask<uint32_t, kWidth>( + _mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))); + } + + // Returns a bitmask representing the positions of empty slots. + BitMask<uint32_t, kWidth> MatchEmpty() const { +#if SWISSTABLE_HAVE_SSSE3 + // This only works because kEmpty is -128. + return BitMask<uint32_t, kWidth>( + _mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))); +#else + return Match(kEmpty); +#endif + } + + // Returns a bitmask representing the positions of empty or deleted slots. + BitMask<uint32_t, kWidth> MatchEmptyOrDeleted() const { + auto special = _mm_set1_epi8(kSentinel); + return BitMask<uint32_t, kWidth>( + _mm_movemask_epi8(_mm_cmpgt_epi8(special, ctrl))); + } + + // Returns the number of trailing empty or deleted elements in the group. + uint32_t CountLeadingEmptyOrDeleted() const { + auto special = _mm_set1_epi8(kSentinel); + return TrailingZeros(_mm_movemask_epi8(_mm_cmpgt_epi8(special, ctrl)) + 1); + } + + void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { + auto msbs = _mm_set1_epi8(0x80); + auto x126 = _mm_set1_epi8(126); +#if SWISSTABLE_HAVE_SSSE3 + auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs); +#else + auto zero = _mm_setzero_si128(); + auto special_mask = _mm_cmpgt_epi8(zero, ctrl); + auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126)); +#endif + _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res); + } + + __m128i ctrl; +}; +#else +struct Group { + static constexpr size_t kWidth = 8; + + explicit Group(const ctrl_t* pos) : ctrl(little_endian::Load64(pos)) {} + + BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const { + // For the technique, see: + // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord + // (Determine if a word has a byte equal to n). + // + // Caveat: there are false positives but: + // - they only occur if there is a real match + // - they never occur on kEmpty, kDeleted, kSentinel + // - they will be handled gracefully by subsequent checks in code + // + // Example: + // v = 0x1716151413121110 + // hash = 0x12 + // retval = (v - lsbs) & ~v & msbs = 0x0000000080800000 + constexpr uint64_t msbs = 0x8080808080808080ULL; + constexpr uint64_t lsbs = 0x0101010101010101ULL; + auto x = ctrl ^ (lsbs * hash); + return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & msbs); + } + + BitMask<uint64_t, kWidth, 3> MatchEmpty() const { + constexpr uint64_t msbs = 0x8080808080808080ULL; + return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 6)) & msbs); + } + + BitMask<uint64_t, kWidth, 3> MatchEmptyOrDeleted() const { + constexpr uint64_t msbs = 0x8080808080808080ULL; + return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 7)) & msbs); + } + + uint32_t CountLeadingEmptyOrDeleted() const { + constexpr uint64_t gaps = 0x00FEFEFEFEFEFEFEULL; + return (TrailingZeros(((~ctrl & (ctrl >> 7)) | gaps) + 1) + 7) >> 3; + } + + void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { + constexpr uint64_t msbs = 0x8080808080808080ULL; + constexpr uint64_t lsbs = 0x0101010101010101ULL; + auto x = ctrl & msbs; + auto res = (~x + (x >> 7)) & ~lsbs; + little_endian::Store64(dst, res); + } + + uint64_t ctrl; +}; +#endif // SWISSTABLE_HAVE_SSE2 + +template <class Policy, class Hash, class Eq, class Alloc> +class raw_hash_set; + + +inline bool IsValidCapacity(size_t n) { + return ((n + 1) & n) == 0 && n >= Group::kWidth - 1; +} + +// PRECONDITION: +// IsValidCapacity(capacity) +// ctrl[capacity] == kSentinel +// ctrl[i] != kSentinel for all i < capacity +// Applies mapping for every byte in ctrl: +// DELETED -> EMPTY +// EMPTY -> EMPTY +// FULL -> DELETED +inline void ConvertDeletedToEmptyAndFullToDeleted( + ctrl_t* ctrl, size_t capacity) { + assert(ctrl[capacity] == kSentinel); + assert(IsValidCapacity(capacity)); + for (ctrl_t* pos = ctrl; pos != ctrl + capacity + 1; pos += Group::kWidth) { + Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos); + } + // Copy the cloned ctrl bytes. + std::memcpy(ctrl + capacity + 1, ctrl, Group::kWidth); + ctrl[capacity] = kSentinel; +} + +// Rounds up the capacity to the next power of 2 minus 1 and ensures it is +// greater or equal to Group::kWidth - 1. +inline size_t NormalizeCapacity(size_t n) { + constexpr size_t kMinCapacity = Group::kWidth - 1; + return n <= kMinCapacity + ? kMinCapacity + : std::numeric_limits<size_t>::max() >> LeadingZeros(n); +} + +// The node_handle concept from C++17. +// We specialize node_handle for sets and maps. node_handle_base holds the +// common API of both. +template <typename Policy, typename Alloc> +class node_handle_base { + protected: + using PolicyTraits = hash_policy_traits<Policy>; + using slot_type = typename PolicyTraits::slot_type; + + public: + using allocator_type = Alloc; + + constexpr node_handle_base() {} + node_handle_base(node_handle_base&& other) noexcept { + *this = std::move(other); + } + ~node_handle_base() { destroy(); } + node_handle_base& operator=(node_handle_base&& other) { + destroy(); + if (!other.empty()) { + alloc_ = other.alloc_; + PolicyTraits::transfer(alloc(), slot(), other.slot()); + other.reset(); + } + return *this; + } + + bool empty() const noexcept { return !alloc_; } + explicit operator bool() const noexcept { return !empty(); } + allocator_type get_allocator() const { return *alloc_; } + + protected: + template <typename, typename, typename, typename> + friend class raw_hash_set; + + node_handle_base(const allocator_type& a, slot_type* s) : alloc_(a) { + PolicyTraits::transfer(alloc(), slot(), s); + } + + void destroy() { + if (!empty()) { + PolicyTraits::destroy(alloc(), slot()); + reset(); + } + } + + void reset() { + assert(alloc_.has_value()); + alloc_ = absl::nullopt; + } + + slot_type* slot() const { + assert(!empty()); + return reinterpret_cast<slot_type*>(std::addressof(slot_space_)); + } + allocator_type* alloc() { return std::addressof(*alloc_); } + + private: + absl::optional<allocator_type> alloc_; + mutable absl::aligned_storage_t<sizeof(slot_type), alignof(slot_type)> + slot_space_; +}; + +// For sets. +template <typename Policy, typename Alloc, typename = void> +class node_handle : public node_handle_base<Policy, Alloc> { + using Base = typename node_handle::node_handle_base; + + public: + using value_type = typename Base::PolicyTraits::value_type; + + constexpr node_handle() {} + + value_type& value() const { + return Base::PolicyTraits::element(this->slot()); + } + + private: + template <typename, typename, typename, typename> + friend class raw_hash_set; + + node_handle(const Alloc& a, typename Base::slot_type* s) : Base(a, s) {} +}; + +// For maps. +template <typename Policy, typename Alloc> +class node_handle<Policy, Alloc, absl::void_t<typename Policy::mapped_type>> + : public node_handle_base<Policy, Alloc> { + using Base = typename node_handle::node_handle_base; + + public: + using key_type = typename Policy::key_type; + using mapped_type = typename Policy::mapped_type; + + constexpr node_handle() {} + + auto key() const -> decltype(Base::PolicyTraits::key(this->slot())) { + return Base::PolicyTraits::key(this->slot()); + } + + mapped_type& mapped() const { + return Base::PolicyTraits::value( + &Base::PolicyTraits::element(this->slot())); + } + + private: + template <typename, typename, typename, typename> + friend class raw_hash_set; + + node_handle(const Alloc& a, typename Base::slot_type* s) : Base(a, s) {} +}; + +// Implement the insert_return_type<> concept of C++17. +template <class Iterator, class NodeType> +struct insert_return_type { + Iterator position; + bool inserted; + NodeType node; +}; + +// Helper trait to allow or disallow arbitrary keys when the hash and +// eq functions are transparent. +// It is very important that the inner template is an alias and that the type it +// produces is not a dependent type. Otherwise, type deduction would fail. +template <bool is_transparent> +struct KeyArg { + // Transparent. Forward `K`. + template <typename K, typename key_type> + using type = K; +}; + +template <> +struct KeyArg<false> { + // Not transparent. Always use `key_type`. + template <typename K, typename key_type> + using type = key_type; +}; + +// Policy: a policy defines how to perform different operations on +// the slots of the hashtable (see hash_policy_traits.h for the full interface +// of policy). +// +// Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The +// functor should accept a key and return size_t as hash. For best performance +// it is important that the hash function provides high entropy across all bits +// of the hash. +// +// Eq: a (possibly polymorphic) functor that compares two keys for equality. It +// should accept two (of possibly different type) keys and return a bool: true +// if they are equal, false if they are not. If two keys compare equal, then +// their hash values as defined by Hash MUST be equal. +// +// Allocator: an Allocator [http://devdocs.io/cpp/concept/allocator] with which +// the storage of the hashtable will be allocated and the elements will be +// constructed and destroyed. +template <class Policy, class Hash, class Eq, class Alloc> +class raw_hash_set { + using PolicyTraits = hash_policy_traits<Policy>; + using KeyArgImpl = container_internal::KeyArg<IsTransparent<Eq>::value && + IsTransparent<Hash>::value>; + + public: + using init_type = typename PolicyTraits::init_type; + using key_type = typename PolicyTraits::key_type; + // TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user + // code fixes! + using slot_type = typename PolicyTraits::slot_type; + using allocator_type = Alloc; + using size_type = size_t; + using difference_type = ptrdiff_t; + using hasher = Hash; + using key_equal = Eq; + using policy_type = Policy; + using value_type = typename PolicyTraits::value_type; + using reference = value_type&; + using const_reference = const value_type&; + using pointer = typename absl::allocator_traits< + allocator_type>::template rebind_traits<value_type>::pointer; + using const_pointer = typename absl::allocator_traits< + allocator_type>::template rebind_traits<value_type>::const_pointer; + + // Alias used for heterogeneous lookup functions. + // `key_arg<K>` evaluates to `K` when the functors are tranparent and to + // `key_type` otherwise. It permits template argument deduction on `K` for the + // transparent case. + template <class K> + using key_arg = typename KeyArgImpl::template type<K, key_type>; + + private: + // Give an early error when key_type is not hashable/eq. + auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k)); + auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k)); + + using Layout = absl::container_internal::Layout<ctrl_t, slot_type>; + + static Layout MakeLayout(size_t capacity) { + assert(IsValidCapacity(capacity)); + return Layout(capacity + Group::kWidth + 1, capacity); + } + + using AllocTraits = absl::allocator_traits<allocator_type>; + using SlotAlloc = typename absl::allocator_traits< + allocator_type>::template rebind_alloc<slot_type>; + using SlotAllocTraits = typename absl::allocator_traits< + allocator_type>::template rebind_traits<slot_type>; + + static_assert(std::is_lvalue_reference<reference>::value, + "Policy::element() must return a reference"); + + template <typename T> + struct SameAsElementReference + : std::is_same<typename std::remove_cv< + typename std::remove_reference<reference>::type>::type, + typename std::remove_cv< + typename std::remove_reference<T>::type>::type> {}; + + // An enabler for insert(T&&): T must be convertible to init_type or be the + // same as [cv] value_type [ref]. + // Note: we separate SameAsElementReference into its own type to avoid using + // reference unless we need to. MSVC doesn't seem to like it in some + // cases. + template <class T> + using RequiresInsertable = typename std::enable_if< + absl::disjunction<std::is_convertible<T, init_type>, + SameAsElementReference<T>>::value, + int>::type; + + // RequiresNotInit is a workaround for gcc prior to 7.1. + // See https://godbolt.org/g/Y4xsUh. + template <class T> + using RequiresNotInit = + typename std::enable_if<!std::is_same<T, init_type>::value, int>::type; + + template <class... Ts> + using IsDecomposable = IsDecomposable<void, PolicyTraits, Hash, Eq, Ts...>; + + public: + static_assert(std::is_same<pointer, value_type*>::value, + "Allocators with custom pointer types are not supported"); + static_assert(std::is_same<const_pointer, const value_type*>::value, + "Allocators with custom pointer types are not supported"); + + class iterator { + friend class raw_hash_set; + + public: + using iterator_category = std::forward_iterator_tag; + using value_type = typename raw_hash_set::value_type; + using reference = + absl::conditional_t<PolicyTraits::constant_iterators::value, + const value_type&, value_type&>; + using pointer = absl::remove_reference_t<reference>*; + using difference_type = typename raw_hash_set::difference_type; + + iterator() {} + + // PRECONDITION: not an end() iterator. + reference operator*() const { return PolicyTraits::element(slot_); } + + // PRECONDITION: not an end() iterator. + pointer operator->() const { return &operator*(); } + + // PRECONDITION: not an end() iterator. + iterator& operator++() { + ++ctrl_; + ++slot_; + skip_empty_or_deleted(); + return *this; + } + // PRECONDITION: not an end() iterator. + iterator operator++(int) { + auto tmp = *this; + ++*this; + return tmp; + } + + friend bool operator==(const iterator& a, const iterator& b) { + return a.ctrl_ == b.ctrl_; + } + friend bool operator!=(const iterator& a, const iterator& b) { + return !(a == b); + } + + private: + iterator(ctrl_t* ctrl) : ctrl_(ctrl) {} // for end() + iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) {} + + void skip_empty_or_deleted() { + while (IsEmptyOrDeleted(*ctrl_)) { + // ctrl is not necessarily aligned to Group::kWidth. It is also likely + // to read past the space for ctrl bytes and into slots. This is ok + // because ctrl has sizeof() == 1 and slot has sizeof() >= 1 so there + // is no way to read outside the combined slot array. + uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted(); + ctrl_ += shift; + slot_ += shift; + } + } + + ctrl_t* ctrl_ = nullptr; + slot_type* slot_; + }; + + class const_iterator { + friend class raw_hash_set; + + public: + using iterator_category = typename iterator::iterator_category; + using value_type = typename raw_hash_set::value_type; + using reference = typename raw_hash_set::const_reference; + using pointer = typename raw_hash_set::const_pointer; + using difference_type = typename raw_hash_set::difference_type; + + const_iterator() {} + // Implicit construction from iterator. + const_iterator(iterator i) : inner_(std::move(i)) {} + + reference operator*() const { return *inner_; } + pointer operator->() const { return inner_.operator->(); } + + const_iterator& operator++() { + ++inner_; + return *this; + } + const_iterator operator++(int) { return inner_++; } + + friend bool operator==(const const_iterator& a, const const_iterator& b) { + return a.inner_ == b.inner_; + } + friend bool operator!=(const const_iterator& a, const const_iterator& b) { + return !(a == b); + } + + private: + const_iterator(const ctrl_t* ctrl, const slot_type* slot) + : inner_(const_cast<ctrl_t*>(ctrl), const_cast<slot_type*>(slot)) {} + + iterator inner_; + }; + + using node_type = container_internal::node_handle<Policy, Alloc>; + + raw_hash_set() noexcept( + std::is_nothrow_default_constructible<hasher>::value&& + std::is_nothrow_default_constructible<key_equal>::value&& + std::is_nothrow_default_constructible<allocator_type>::value) {} + + explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(), + const key_equal& eq = key_equal(), + const allocator_type& alloc = allocator_type()) + : ctrl_(EmptyGroup()), settings_(0, hash, eq, alloc) { + if (bucket_count) { + capacity_ = NormalizeCapacity(bucket_count); + growth_left() = static_cast<size_t>(capacity_ * kMaxLoadFactor); + initialize_slots(); + } + } + + raw_hash_set(size_t bucket_count, const hasher& hash, + const allocator_type& alloc) + : raw_hash_set(bucket_count, hash, key_equal(), alloc) {} + + raw_hash_set(size_t bucket_count, const allocator_type& alloc) + : raw_hash_set(bucket_count, hasher(), key_equal(), alloc) {} + + explicit raw_hash_set(const allocator_type& alloc) + : raw_hash_set(0, hasher(), key_equal(), alloc) {} + + template <class InputIter> + raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0, + const hasher& hash = hasher(), const key_equal& eq = key_equal(), + const allocator_type& alloc = allocator_type()) + : raw_hash_set(bucket_count, hash, eq, alloc) { + insert(first, last); + } + + template <class InputIter> + raw_hash_set(InputIter first, InputIter last, size_t bucket_count, + const hasher& hash, const allocator_type& alloc) + : raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) {} + + template <class InputIter> + raw_hash_set(InputIter first, InputIter last, size_t bucket_count, + const allocator_type& alloc) + : raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) {} + + template <class InputIter> + raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc) + : raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) {} + + // Instead of accepting std::initializer_list<value_type> as the first + // argument like std::unordered_set<value_type> does, we have two overloads + // that accept std::initializer_list<T> and std::initializer_list<init_type>. + // This is advantageous for performance. + // + // // Turns {"abc", "def"} into std::initializer_list<std::string>, then copies + // // the strings into the set. + // std::unordered_set<std::string> s = {"abc", "def"}; + // + // // Turns {"abc", "def"} into std::initializer_list<const char*>, then + // // copies the strings into the set. + // absl::flat_hash_set<std::string> s = {"abc", "def"}; + // + // The same trick is used in insert(). + // + // The enabler is necessary to prevent this constructor from triggering where + // the copy constructor is meant to be called. + // + // absl::flat_hash_set<int> a, b{a}; + // + // RequiresNotInit<T> is a workaround for gcc prior to 7.1. + template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0> + raw_hash_set(std::initializer_list<T> init, size_t bucket_count = 0, + const hasher& hash = hasher(), const key_equal& eq = key_equal(), + const allocator_type& alloc = allocator_type()) + : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {} + + raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count = 0, + const hasher& hash = hasher(), const key_equal& eq = key_equal(), + const allocator_type& alloc = allocator_type()) + : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {} + + template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0> + raw_hash_set(std::initializer_list<T> init, size_t bucket_count, + const hasher& hash, const allocator_type& alloc) + : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {} + + raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count, + const hasher& hash, const allocator_type& alloc) + : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {} + + template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0> + raw_hash_set(std::initializer_list<T> init, size_t bucket_count, + const allocator_type& alloc) + : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {} + + raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count, + const allocator_type& alloc) + : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {} + + template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0> + raw_hash_set(std::initializer_list<T> init, const allocator_type& alloc) + : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {} + + raw_hash_set(std::initializer_list<init_type> init, + const allocator_type& alloc) + : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {} + + raw_hash_set(const raw_hash_set& that) + : raw_hash_set(that, AllocTraits::select_on_container_copy_construction( + that.alloc_ref())) {} + + raw_hash_set(const raw_hash_set& that, const allocator_type& a) + : raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) { + reserve(that.size()); + // Because the table is guaranteed to be empty, we can do something faster + // than a full `insert`. + for (const auto& v : that) { + const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v); + const size_t i = find_first_non_full(hash); + set_ctrl(i, H2(hash)); + emplace_at(i, v); + } + size_ = that.size(); + growth_left() -= that.size(); + } + + raw_hash_set(raw_hash_set&& that) noexcept( + std::is_nothrow_copy_constructible<hasher>::value&& + std::is_nothrow_copy_constructible<key_equal>::value&& + std::is_nothrow_copy_constructible<allocator_type>::value) + : ctrl_(absl::exchange(that.ctrl_, EmptyGroup())), + slots_(absl::exchange(that.slots_, nullptr)), + size_(absl::exchange(that.size_, 0)), + capacity_(absl::exchange(that.capacity_, 0)), + // Hash, equality and allocator are copied instead of moved because + // `that` must be left valid. If Hash is std::function<Key>, moving it + // would create a nullptr functor that cannot be called. + settings_(that.settings_) { + // growth_left was copied above, reset the one from `that`. + that.growth_left() = 0; + } + + raw_hash_set(raw_hash_set&& that, const allocator_type& a) + : ctrl_(EmptyGroup()), + slots_(nullptr), + size_(0), + capacity_(0), + settings_(0, that.hash_ref(), that.eq_ref(), a) { + if (a == that.alloc_ref()) { + std::swap(ctrl_, that.ctrl_); + std::swap(slots_, that.slots_); + std::swap(size_, that.size_); + std::swap(capacity_, that.capacity_); + std::swap(growth_left(), that.growth_left()); + } else { + reserve(that.size()); + // Note: this will copy elements of dense_set and unordered_set instead of + // moving them. This can be fixed if it ever becomes an issue. + for (auto& elem : that) insert(std::move(elem)); + } + } + + raw_hash_set& operator=(const raw_hash_set& that) { + raw_hash_set tmp(that, + AllocTraits::propagate_on_container_copy_assignment::value + ? that.alloc_ref() + : alloc_ref()); + swap(tmp); + return *this; + } + + raw_hash_set& operator=(raw_hash_set&& that) noexcept( + absl::allocator_traits<allocator_type>::is_always_equal::value&& + std::is_nothrow_move_assignable<hasher>::value&& + std::is_nothrow_move_assignable<key_equal>::value) { + // TODO(sbenza): We should only use the operations from the noexcept clause + // to make sure we actually adhere to that contract. + return move_assign( + std::move(that), + typename AllocTraits::propagate_on_container_move_assignment()); + } + + ~raw_hash_set() { destroy_slots(); } + + iterator begin() { + auto it = iterator_at(0); + it.skip_empty_or_deleted(); + return it; + } + iterator end() { return {ctrl_ + capacity_}; } + + const_iterator begin() const { + return const_cast<raw_hash_set*>(this)->begin(); + } + const_iterator end() const { return const_cast<raw_hash_set*>(this)->end(); } + const_iterator cbegin() const { return begin(); } + const_iterator cend() const { return end(); } + + bool empty() const { return !size(); } + size_t size() const { return size_; } + size_t capacity() const { return capacity_; } + size_t max_size() const { return std::numeric_limits<size_t>::max(); } + + void clear() { + // Iterating over this container is O(bucket_count()). When bucket_count() + // is much greater than size(), iteration becomes prohibitively expensive. + // For clear() it is more important to reuse the allocated array when the + // container is small because allocation takes comparatively long time + // compared to destruction of the elements of the container. So we pick the + // largest bucket_count() threshold for which iteration is still fast and + // past that we simply deallocate the array. + if (capacity_ > 127) { + destroy_slots(); + } else if (capacity_) { + for (size_t i = 0; i != capacity_; ++i) { + if (IsFull(ctrl_[i])) { + PolicyTraits::destroy(&alloc_ref(), slots_ + i); + } + } + size_ = 0; + reset_ctrl(); + growth_left() = static_cast<size_t>(capacity_ * kMaxLoadFactor); + } + assert(empty()); + } + + // This overload kicks in when the argument is an rvalue of insertable and + // decomposable type other than init_type. + // + // flat_hash_map<std::string, int> m; + // m.insert(std::make_pair("abc", 42)); + template <class T, RequiresInsertable<T> = 0, + typename std::enable_if<IsDecomposable<T>::value, int>::type = 0, + T* = nullptr> + std::pair<iterator, bool> insert(T&& value) { + return emplace(std::forward<T>(value)); + } + + // This overload kicks in when the argument is a bitfield or an lvalue of + // insertable and decomposable type. + // + // union { int n : 1; }; + // flat_hash_set<int> s; + // s.insert(n); + // + // flat_hash_set<std::string> s; + // const char* p = "hello"; + // s.insert(p); + // + // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace + // RequiresInsertable<T> with RequiresInsertable<const T&>. + // We are hitting this bug: https://godbolt.org/g/1Vht4f. + template < + class T, RequiresInsertable<T> = 0, + typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0> + std::pair<iterator, bool> insert(const T& value) { + return emplace(value); + } + + // This overload kicks in when the argument is an rvalue of init_type. Its + // purpose is to handle brace-init-list arguments. + // + // flat_hash_set<std::string, int> s; + // s.insert({"abc", 42}); + std::pair<iterator, bool> insert(init_type&& value) { + return emplace(std::move(value)); + } + + template <class T, RequiresInsertable<T> = 0, + typename std::enable_if<IsDecomposable<T>::value, int>::type = 0, + T* = nullptr> + iterator insert(const_iterator, T&& value) { + return insert(std::forward<T>(value)).first; + } + + // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace + // RequiresInsertable<T> with RequiresInsertable<const T&>. + // We are hitting this bug: https://godbolt.org/g/1Vht4f. + template < + class T, RequiresInsertable<T> = 0, + typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0> + iterator insert(const_iterator, const T& value) { + return insert(value).first; + } + + iterator insert(const_iterator, init_type&& value) { + return insert(std::move(value)).first; + } + + template <class InputIt> + void insert(InputIt first, InputIt last) { + for (; first != last; ++first) insert(*first); + } + + template <class T, RequiresNotInit<T> = 0, RequiresInsertable<const T&> = 0> + void insert(std::initializer_list<T> ilist) { + insert(ilist.begin(), ilist.end()); + } + + void insert(std::initializer_list<init_type> ilist) { + insert(ilist.begin(), ilist.end()); + } + + insert_return_type<iterator, node_type> insert(node_type&& node) { + if (!node) return {end(), false, node_type()}; + const auto& elem = PolicyTraits::element(node.slot()); + auto res = PolicyTraits::apply( + InsertSlot<false>{*this, std::move(*node.slot())}, elem); + if (res.second) { + node.reset(); + return {res.first, true, node_type()}; + } else { + return {res.first, false, std::move(node)}; + } + } + + iterator insert(const_iterator, node_type&& node) { + return insert(std::move(node)).first; + } + + // This overload kicks in if we can deduce the key from args. This enables us + // to avoid constructing value_type if an entry with the same key already + // exists. + // + // For example: + // + // flat_hash_map<std::string, std::string> m = {{"abc", "def"}}; + // // Creates no std::string copies and makes no heap allocations. + // m.emplace("abc", "xyz"); + template <class... Args, typename std::enable_if< + IsDecomposable<Args...>::value, int>::type = 0> + std::pair<iterator, bool> emplace(Args&&... args) { + return PolicyTraits::apply(EmplaceDecomposable{*this}, + std::forward<Args>(args)...); + } + + // This overload kicks in if we cannot deduce the key from args. It constructs + // value_type unconditionally and then either moves it into the table or + // destroys. + template <class... Args, typename std::enable_if< + !IsDecomposable<Args...>::value, int>::type = 0> + std::pair<iterator, bool> emplace(Args&&... args) { + typename std::aligned_storage<sizeof(slot_type), alignof(slot_type)>::type + raw; + slot_type* slot = reinterpret_cast<slot_type*>(&raw); + + PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...); + const auto& elem = PolicyTraits::element(slot); + return PolicyTraits::apply(InsertSlot<true>{*this, std::move(*slot)}, elem); + } + + template <class... Args> + iterator emplace_hint(const_iterator, Args&&... args) { + return emplace(std::forward<Args>(args)...).first; + } + + // Extension API: support for lazy emplace. + // + // Looks up key in the table. If found, returns the iterator to the element. + // Otherwise calls f with one argument of type raw_hash_set::constructor. f + // MUST call raw_hash_set::constructor with arguments as if a + // raw_hash_set::value_type is constructed, otherwise the behavior is + // undefined. + // + // For example: + // + // std::unordered_set<ArenaString> s; + // // Makes ArenaStr even if "abc" is in the map. + // s.insert(ArenaString(&arena, "abc")); + // + // flat_hash_set<ArenaStr> s; + // // Makes ArenaStr only if "abc" is not in the map. + // s.lazy_emplace("abc", [&](const constructor& ctor) { + // ctor(&arena, "abc"); + // }); + // + // WARNING: This API is currently experimental. If there is a way to implement + // the same thing with the rest of the API, prefer that. + class constructor { + friend class raw_hash_set; + + public: + template <class... Args> + void operator()(Args&&... args) const { + assert(*slot_); + PolicyTraits::construct(alloc_, *slot_, std::forward<Args>(args)...); + *slot_ = nullptr; + } + + private: + constructor(allocator_type* a, slot_type** slot) : alloc_(a), slot_(slot) {} + + allocator_type* alloc_; + slot_type** slot_; + }; + + template <class K = key_type, class F> + iterator lazy_emplace(const key_arg<K>& key, F&& f) { + auto res = find_or_prepare_insert(key); + if (res.second) { + slot_type* slot = slots_ + res.first; + std::forward<F>(f)(constructor(&alloc_ref(), &slot)); + assert(!slot); + } + return iterator_at(res.first); + } + + // Extension API: support for heterogeneous keys. + // + // std::unordered_set<std::string> s; + // // Turns "abc" into std::string. + // s.erase("abc"); + // + // flat_hash_set<std::string> s; + // // Uses "abc" directly without copying it into std::string. + // s.erase("abc"); + template <class K = key_type> + size_type erase(const key_arg<K>& key) { + auto it = find(key); + if (it == end()) return 0; + erase(it); + return 1; + } + + // Erases the element pointed to by `it`. Unlike `std::unordered_set::erase`, + // this method returns void to reduce algorithmic complexity to O(1). In + // order to erase while iterating across a map, use the following idiom (which + // also works for standard containers): + // + // for (auto it = m.begin(), end = m.end(); it != end;) { + // if (<pred>) { + // m.erase(it++); + // } else { + // ++it; + // } + // } + void erase(const_iterator cit) { erase(cit.inner_); } + + // This overload is necessary because otherwise erase<K>(const K&) would be + // a better match if non-const iterator is passed as an argument. + void erase(iterator it) { + assert(it != end()); + PolicyTraits::destroy(&alloc_ref(), it.slot_); + erase_meta_only(it); + } + + iterator erase(const_iterator first, const_iterator last) { + while (first != last) { + erase(first++); + } + return last.inner_; + } + + // Moves elements from `src` into `this`. + // If the element already exists in `this`, it is left unmodified in `src`. + template <typename H, typename E> + void merge(raw_hash_set<Policy, H, E, Alloc>& src) { // NOLINT + assert(this != &src); + for (auto it = src.begin(), e = src.end(); it != e; ++it) { + if (PolicyTraits::apply(InsertSlot<false>{*this, std::move(*it.slot_)}, + PolicyTraits::element(it.slot_)) + .second) { + src.erase_meta_only(it); + } + } + } + + template <typename H, typename E> + void merge(raw_hash_set<Policy, H, E, Alloc>&& src) { + merge(src); + } + + node_type extract(const_iterator position) { + node_type node(alloc_ref(), position.inner_.slot_); + erase_meta_only(position); + return node; + } + + template < + class K = key_type, + typename std::enable_if<!std::is_same<K, iterator>::value, int>::type = 0> + node_type extract(const key_arg<K>& key) { + auto it = find(key); + return it == end() ? node_type() : extract(const_iterator{it}); + } + + void swap(raw_hash_set& that) noexcept( + IsNoThrowSwappable<hasher>() && IsNoThrowSwappable<key_equal>() && + (!AllocTraits::propagate_on_container_swap::value || + IsNoThrowSwappable<allocator_type>())) { + using std::swap; + swap(ctrl_, that.ctrl_); + swap(slots_, that.slots_); + swap(size_, that.size_); + swap(capacity_, that.capacity_); + swap(growth_left(), that.growth_left()); + swap(hash_ref(), that.hash_ref()); + swap(eq_ref(), that.eq_ref()); + if (AllocTraits::propagate_on_container_swap::value) { + swap(alloc_ref(), that.alloc_ref()); + } else { + // If the allocators do not compare equal it is officially undefined + // behavior. We choose to do nothing. + } + } + + void rehash(size_t n) { + if (n == 0 && capacity_ == 0) return; + if (n == 0 && size_ == 0) return destroy_slots(); + auto m = NormalizeCapacity(std::max( + n, static_cast<size_t>(std::ceil(size() / kMaxLoadFactor)))); + // n == 0 unconditionally rehashes as per the standard. + if (n == 0 || m > capacity_) { + resize(m); + } + } + + void reserve(size_t n) { + rehash(static_cast<size_t>(std::ceil(n / kMaxLoadFactor))); + } + + // Extension API: support for heterogeneous keys. + // + // std::unordered_set<std::string> s; + // // Turns "abc" into std::string. + // s.count("abc"); + // + // ch_set<std::string> s; + // // Uses "abc" directly without copying it into std::string. + // s.count("abc"); + template <class K = key_type> + size_t count(const key_arg<K>& key) const { + return find(key) == end() ? 0 : 1; + } + + // Issues CPU prefetch instructions for the memory needed to find or insert + // a key. Like all lookup functions, this support heterogeneous keys. + // + // NOTE: This is a very low level operation and should not be used without + // specific benchmarks indicating its importance. + template <class K = key_type> + void prefetch(const key_arg<K>& key) const { + (void)key; +#if defined(__GNUC__) + auto seq = probe(hash_ref()(key)); + __builtin_prefetch(static_cast<const void*>(ctrl_ + seq.offset())); + __builtin_prefetch(static_cast<const void*>(slots_ + seq.offset())); +#endif // __GNUC__ + } + + // The API of find() has two extensions. + // + // 1. The hash can be passed by the user. It must be equal to the hash of the + // key. + // + // 2. The type of the key argument doesn't have to be key_type. This is so + // called heterogeneous key support. + template <class K = key_type> + iterator find(const key_arg<K>& key, size_t hash) { + auto seq = probe(hash); + while (true) { + Group g{ctrl_ + seq.offset()}; + for (int i : g.Match(H2(hash))) { + if (ABSL_PREDICT_TRUE(PolicyTraits::apply( + EqualElement<K>{key, eq_ref()}, + PolicyTraits::element(slots_ + seq.offset(i))))) + return iterator_at(seq.offset(i)); + } + if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return end(); + seq.next(); + } + } + template <class K = key_type> + iterator find(const key_arg<K>& key) { + return find(key, hash_ref()(key)); + } + + template <class K = key_type> + const_iterator find(const key_arg<K>& key, size_t hash) const { + return const_cast<raw_hash_set*>(this)->find(key, hash); + } + template <class K = key_type> + const_iterator find(const key_arg<K>& key) const { + return find(key, hash_ref()(key)); + } + + template <class K = key_type> + bool contains(const key_arg<K>& key) const { + return find(key) != end(); + } + + template <class K = key_type> + std::pair<iterator, iterator> equal_range(const key_arg<K>& key) { + auto it = find(key); + if (it != end()) return {it, std::next(it)}; + return {it, it}; + } + template <class K = key_type> + std::pair<const_iterator, const_iterator> equal_range( + const key_arg<K>& key) const { + auto it = find(key); + if (it != end()) return {it, std::next(it)}; + return {it, it}; + } + + size_t bucket_count() const { return capacity_; } + float load_factor() const { + return capacity_ ? static_cast<double>(size()) / capacity_ : 0.0; + } + float max_load_factor() const { return 1.0f; } + void max_load_factor(float) { + // Does nothing. + } + + hasher hash_function() const { return hash_ref(); } + key_equal key_eq() const { return eq_ref(); } + allocator_type get_allocator() const { return alloc_ref(); } + + friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) { + if (a.size() != b.size()) return false; + const raw_hash_set* outer = &a; + const raw_hash_set* inner = &b; + if (outer->capacity() > inner->capacity()) std::swap(outer, inner); + for (const value_type& elem : *outer) + if (!inner->has_element(elem)) return false; + return true; + } + + friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) { + return !(a == b); + } + + friend void swap(raw_hash_set& a, + raw_hash_set& b) noexcept(noexcept(a.swap(b))) { + a.swap(b); + } + + private: + template <class Container, typename Enabler> + friend struct absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess; + + struct FindElement { + template <class K, class... Args> + const_iterator operator()(const K& key, Args&&...) const { + return s.find(key); + } + const raw_hash_set& s; + }; + + struct HashElement { + template <class K, class... Args> + size_t operator()(const K& key, Args&&...) const { + return h(key); + } + const hasher& h; + }; + + template <class K1> + struct EqualElement { + template <class K2, class... Args> + bool operator()(const K2& lhs, Args&&...) const { + return eq(lhs, rhs); + } + const K1& rhs; + const key_equal& eq; + }; + + struct EmplaceDecomposable { + template <class K, class... Args> + std::pair<iterator, bool> operator()(const K& key, Args&&... args) const { + auto res = s.find_or_prepare_insert(key); + if (res.second) { + s.emplace_at(res.first, std::forward<Args>(args)...); + } + return {s.iterator_at(res.first), res.second}; + } + raw_hash_set& s; + }; + + template <bool do_destroy> + struct InsertSlot { + template <class K, class... Args> + std::pair<iterator, bool> operator()(const K& key, Args&&...) && { + auto res = s.find_or_prepare_insert(key); + if (res.second) { + PolicyTraits::transfer(&s.alloc_ref(), s.slots_ + res.first, &slot); + } else if (do_destroy) { + PolicyTraits::destroy(&s.alloc_ref(), &slot); + } + return {s.iterator_at(res.first), res.second}; + } + raw_hash_set& s; + // Constructed slot. Either moved into place or destroyed. + slot_type&& slot; + }; + + // "erases" the object from the container, except that it doesn't actually + // destroy the object. It only updates all the metadata of the class. + // This can be used in conjunction with Policy::transfer to move the object to + // another place. + void erase_meta_only(const_iterator it) { + assert(IsFull(*it.inner_.ctrl_) && "erasing a dangling iterator"); + --size_; + const size_t index = it.inner_.ctrl_ - ctrl_; + const size_t index_before = (index - Group::kWidth) & capacity_; + const auto empty_after = Group(it.inner_.ctrl_).MatchEmpty(); + const auto empty_before = Group(ctrl_ + index_before).MatchEmpty(); + + // We count how many consecutive non empties we have to the right and to the + // left of `it`. If the sum is >= kWidth then there is at least one probe + // window that might have seen a full group. + bool was_never_full = + empty_before && empty_after && + static_cast<size_t>(empty_after.TrailingZeros() + + empty_before.LeadingZeros()) < Group::kWidth; + + set_ctrl(index, was_never_full ? kEmpty : kDeleted); + growth_left() += was_never_full; + } + + void initialize_slots() { + assert(capacity_); + auto layout = MakeLayout(capacity_); + char* mem = static_cast<char*>( + Allocate<Layout::Alignment()>(&alloc_ref(), layout.AllocSize())); + ctrl_ = reinterpret_cast<ctrl_t*>(layout.template Pointer<0>(mem)); + slots_ = layout.template Pointer<1>(mem); + reset_ctrl(); + growth_left() = static_cast<size_t>(capacity_ * kMaxLoadFactor) - size_; + } + + void destroy_slots() { + if (!capacity_) return; + for (size_t i = 0; i != capacity_; ++i) { + if (IsFull(ctrl_[i])) { + PolicyTraits::destroy(&alloc_ref(), slots_ + i); + } + } + auto layout = MakeLayout(capacity_); + // Unpoison before returning the memory to the allocator. + SanitizerUnpoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_); + Deallocate<Layout::Alignment()>(&alloc_ref(), ctrl_, layout.AllocSize()); + ctrl_ = EmptyGroup(); + slots_ = nullptr; + size_ = 0; + capacity_ = 0; + growth_left() = 0; + } + + void resize(size_t new_capacity) { + assert(IsValidCapacity(new_capacity)); + auto* old_ctrl = ctrl_; + auto* old_slots = slots_; + const size_t old_capacity = capacity_; + capacity_ = new_capacity; + initialize_slots(); + + for (size_t i = 0; i != old_capacity; ++i) { + if (IsFull(old_ctrl[i])) { + size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, + PolicyTraits::element(old_slots + i)); + size_t new_i = find_first_non_full(hash); + set_ctrl(new_i, H2(hash)); + PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i); + } + } + if (old_capacity) { + SanitizerUnpoisonMemoryRegion(old_slots, + sizeof(slot_type) * old_capacity); + auto layout = MakeLayout(old_capacity); + Deallocate<Layout::Alignment()>(&alloc_ref(), old_ctrl, + layout.AllocSize()); + } + } + + void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE { + assert(IsValidCapacity(capacity_)); + // Algorithm: + // - mark all DELETED slots as EMPTY + // - mark all FULL slots as DELETED + // - for each slot marked as DELETED + // hash = Hash(element) + // target = find_first_non_full(hash) + // if target is in the same group + // mark slot as FULL + // else if target is EMPTY + // transfer element to target + // mark slot as EMPTY + // mark target as FULL + // else if target is DELETED + // swap current element with target element + // mark target as FULL + // repeat procedure for current slot with moved from element (target) + ConvertDeletedToEmptyAndFullToDeleted(ctrl_, capacity_); + typename std::aligned_storage<sizeof(slot_type), alignof(slot_type)>::type + raw; + slot_type* slot = reinterpret_cast<slot_type*>(&raw); + for (size_t i = 0; i != capacity_; ++i) { + if (!IsDeleted(ctrl_[i])) continue; + size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, + PolicyTraits::element(slots_ + i)); + size_t new_i = find_first_non_full(hash); + + // Verify if the old and new i fall within the same group wrt the hash. + // If they do, we don't need to move the object as it falls already in the + // best probe we can. + const auto probe_index = [&](size_t pos) { + return ((pos - probe(hash).offset()) & capacity_) / Group::kWidth; + }; + + // Element doesn't move. + if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) { + set_ctrl(i, H2(hash)); + continue; + } + if (IsEmpty(ctrl_[new_i])) { + // Transfer element to the empty spot. + // set_ctrl poisons/unpoisons the slots so we have to call it at the + // right time. + set_ctrl(new_i, H2(hash)); + PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slots_ + i); + set_ctrl(i, kEmpty); + } else { + assert(IsDeleted(ctrl_[new_i])); + set_ctrl(new_i, H2(hash)); + // Until we are done rehashing, DELETED marks previously FULL slots. + // Swap i and new_i elements. + PolicyTraits::transfer(&alloc_ref(), slot, slots_ + i); + PolicyTraits::transfer(&alloc_ref(), slots_ + i, slots_ + new_i); + PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slot); + --i; // repeat + } + } + growth_left() = static_cast<size_t>(capacity_ * kMaxLoadFactor) - size_; + } + + void rehash_and_grow_if_necessary() { + if (capacity_ == 0) { + resize(Group::kWidth - 1); + } else if (size() <= kMaxLoadFactor / 2 * capacity_) { + // Squash DELETED without growing if there is enough capacity. + drop_deletes_without_resize(); + } else { + // Otherwise grow the container. + resize(capacity_ * 2 + 1); + } + } + + bool has_element(const value_type& elem) const { + size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem); + auto seq = probe(hash); + while (true) { + Group g{ctrl_ + seq.offset()}; + for (int i : g.Match(H2(hash))) { + if (ABSL_PREDICT_TRUE(PolicyTraits::element(slots_ + seq.offset(i)) == + elem)) + return true; + } + if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return false; + seq.next(); + assert(seq.index() < capacity_ && "full table!"); + } + return false; + } + + // Probes the raw_hash_set with the probe sequence for hash and returns the + // pointer to the first empty or deleted slot. + // NOTE: this function must work with tables having both kEmpty and kDelete + // in one group. Such tables appears during drop_deletes_without_resize. + // + // This function is very useful when insertions happen and: + // - the input is already a set + // - there are enough slots + // - the element with the hash is not in the table + size_t find_first_non_full(size_t hash) { + auto seq = probe(hash); + while (true) { + Group g{ctrl_ + seq.offset()}; + auto mask = g.MatchEmptyOrDeleted(); + if (mask) { +#if !defined(NDEBUG) + // We want to force small tables to have random entries too, so + // in debug build we will randomly insert in either the front or back of + // the group. + // TODO(kfm,sbenza): revisit after we do unconditional mixing + if (ShouldInsertBackwards(hash, ctrl_)) + return seq.offset(mask.HighestBitSet()); + else + return seq.offset(mask.LowestBitSet()); +#else + return seq.offset(mask.LowestBitSet()); +#endif + } + assert(seq.index() < capacity_ && "full table!"); + seq.next(); + } + } + + // TODO(alkis): Optimize this assuming *this and that don't overlap. + raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) { + raw_hash_set tmp(std::move(that)); + swap(tmp); + return *this; + } + raw_hash_set& move_assign(raw_hash_set&& that, std::false_type) { + raw_hash_set tmp(std::move(that), alloc_ref()); + swap(tmp); + return *this; + } + + protected: + template <class K> + std::pair<size_t, bool> find_or_prepare_insert(const K& key) { + auto hash = hash_ref()(key); + auto seq = probe(hash); + while (true) { + Group g{ctrl_ + seq.offset()}; + for (int i : g.Match(H2(hash))) { + if (ABSL_PREDICT_TRUE(PolicyTraits::apply( + EqualElement<K>{key, eq_ref()}, + PolicyTraits::element(slots_ + seq.offset(i))))) + return {seq.offset(i), false}; + } + if (ABSL_PREDICT_TRUE(g.MatchEmpty())) break; + seq.next(); + } + return {prepare_insert(hash), true}; + } + + size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE { + size_t target = find_first_non_full(hash); + if (ABSL_PREDICT_FALSE(growth_left() == 0 && !IsDeleted(ctrl_[target]))) { + rehash_and_grow_if_necessary(); + target = find_first_non_full(hash); + } + ++size_; + growth_left() -= IsEmpty(ctrl_[target]); + set_ctrl(target, H2(hash)); + return target; + } + + // Constructs the value in the space pointed by the iterator. This only works + // after an unsuccessful find_or_prepare_insert() and before any other + // modifications happen in the raw_hash_set. + // + // PRECONDITION: i is an index returned from find_or_prepare_insert(k), where + // k is the key decomposed from `forward<Args>(args)...`, and the bool + // returned by find_or_prepare_insert(k) was true. + // POSTCONDITION: *m.iterator_at(i) == value_type(forward<Args>(args)...). + template <class... Args> + void emplace_at(size_t i, Args&&... args) { + PolicyTraits::construct(&alloc_ref(), slots_ + i, + std::forward<Args>(args)...); + + assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) == + iterator_at(i) && + "constructed value does not match the lookup key"); + } + + iterator iterator_at(size_t i) { return {ctrl_ + i, slots_ + i}; } + const_iterator iterator_at(size_t i) const { return {ctrl_ + i, slots_ + i}; } + + private: + friend struct RawHashSetTestOnlyAccess; + + probe_seq<Group::kWidth> probe(size_t hash) const { + return probe_seq<Group::kWidth>(H1(hash, ctrl_), capacity_); + } + + // Reset all ctrl bytes back to kEmpty, except the sentinel. + void reset_ctrl() { + std::memset(ctrl_, kEmpty, capacity_ + Group::kWidth); + ctrl_[capacity_] = kSentinel; + SanitizerPoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_); + } + + // Sets the control byte, and if `i < Group::kWidth`, set the cloned byte at + // the end too. + void set_ctrl(size_t i, ctrl_t h) { + assert(i < capacity_); + + if (IsFull(h)) { + SanitizerUnpoisonObject(slots_ + i); + } else { + SanitizerPoisonObject(slots_ + i); + } + + ctrl_[i] = h; + ctrl_[((i - Group::kWidth) & capacity_) + Group::kWidth] = h; + } + + size_t& growth_left() { return settings_.template get<0>(); } + + hasher& hash_ref() { return settings_.template get<1>(); } + const hasher& hash_ref() const { return settings_.template get<1>(); } + key_equal& eq_ref() { return settings_.template get<2>(); } + const key_equal& eq_ref() const { return settings_.template get<2>(); } + allocator_type& alloc_ref() { return settings_.template get<3>(); } + const allocator_type& alloc_ref() const { + return settings_.template get<3>(); + } + + // On average each group has 2 empty slot (for the vectorized case). + static constexpr float kMaxLoadFactor = 14.0 / 16.0; + + // TODO(alkis): Investigate removing some of these fields: + // - ctrl/slots can be derived from each other + // - size can be moved into the slot array + ctrl_t* ctrl_ = EmptyGroup(); // [(capacity + 1) * ctrl_t] + slot_type* slots_ = nullptr; // [capacity * slot_type] + size_t size_ = 0; // number of full slots + size_t capacity_ = 0; // total number of slots + absl::container_internal::CompressedTuple<size_t /* growth_left */, hasher, + key_equal, allocator_type> + settings_{0, hasher{}, key_equal{}, allocator_type{}}; +}; + +namespace hashtable_debug_internal { +template <typename Set> +struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> { + using Traits = typename Set::PolicyTraits; + using Slot = typename Traits::slot_type; + + static size_t GetNumProbes(const Set& set, + const typename Set::key_type& key) { + size_t num_probes = 0; + size_t hash = set.hash_ref()(key); + auto seq = set.probe(hash); + while (true) { + container_internal::Group g{set.ctrl_ + seq.offset()}; + for (int i : g.Match(container_internal::H2(hash))) { + if (Traits::apply( + typename Set::template EqualElement<typename Set::key_type>{ + key, set.eq_ref()}, + Traits::element(set.slots_ + seq.offset(i)))) + return num_probes; + ++num_probes; + } + if (g.MatchEmpty()) return num_probes; + seq.next(); + ++num_probes; + } + } + + static size_t AllocatedByteSize(const Set& c) { + size_t capacity = c.capacity_; + if (capacity == 0) return 0; + auto layout = Set::MakeLayout(capacity); + size_t m = layout.AllocSize(); + + size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr)); + if (per_slot != ~size_t{}) { + m += per_slot * c.size(); + } else { + for (size_t i = 0; i != capacity; ++i) { + if (container_internal::IsFull(c.ctrl_[i])) { + m += Traits::space_used(c.slots_ + i); + } + } + } + return m; + } + + static size_t LowerBoundAllocatedByteSize(size_t size) { + size_t capacity = container_internal::NormalizeCapacity( + std::ceil(size / Set::kMaxLoadFactor)); + if (capacity == 0) return 0; + auto layout = Set::MakeLayout(capacity); + size_t m = layout.AllocSize(); + size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr)); + if (per_slot != ~size_t{}) { + m += per_slot * size; + } + return m; + } +}; + +} // namespace hashtable_debug_internal +} // namespace container_internal +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ diff --git a/absl/container/internal/raw_hash_set_allocator_test.cc b/absl/container/internal/raw_hash_set_allocator_test.cc new file mode 100644 index 00000000..891fa450 --- /dev/null +++ b/absl/container/internal/raw_hash_set_allocator_test.cc @@ -0,0 +1,428 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include <limits> +#include <scoped_allocator> + +#include "gtest/gtest.h" +#include "absl/container/internal/raw_hash_set.h" +#include "absl/container/internal/tracked.h" + +namespace absl { +namespace container_internal { +namespace { + +enum AllocSpec { + kPropagateOnCopy = 1, + kPropagateOnMove = 2, + kPropagateOnSwap = 4, +}; + +struct AllocState { + size_t num_allocs = 0; + std::set<void*> owned; +}; + +template <class T, + int Spec = kPropagateOnCopy | kPropagateOnMove | kPropagateOnSwap> +class CheckedAlloc { + public: + template <class, int> + friend class CheckedAlloc; + + using value_type = T; + + CheckedAlloc() {} + explicit CheckedAlloc(size_t id) : id_(id) {} + CheckedAlloc(const CheckedAlloc&) = default; + CheckedAlloc& operator=(const CheckedAlloc&) = default; + + template <class U> + CheckedAlloc(const CheckedAlloc<U, Spec>& that) + : id_(that.id_), state_(that.state_) {} + + template <class U> + struct rebind { + using other = CheckedAlloc<U, Spec>; + }; + + using propagate_on_container_copy_assignment = + std::integral_constant<bool, (Spec & kPropagateOnCopy) != 0>; + + using propagate_on_container_move_assignment = + std::integral_constant<bool, (Spec & kPropagateOnMove) != 0>; + + using propagate_on_container_swap = + std::integral_constant<bool, (Spec & kPropagateOnSwap) != 0>; + + CheckedAlloc select_on_container_copy_construction() const { + if (Spec & kPropagateOnCopy) return *this; + return {}; + } + + T* allocate(size_t n) { + T* ptr = std::allocator<T>().allocate(n); + track_alloc(ptr); + return ptr; + } + void deallocate(T* ptr, size_t n) { + memset(ptr, 0, n * sizeof(T)); // The freed memory must be unpoisoned. + track_dealloc(ptr); + return std::allocator<T>().deallocate(ptr, n); + } + + friend bool operator==(const CheckedAlloc& a, const CheckedAlloc& b) { + return a.id_ == b.id_; + } + friend bool operator!=(const CheckedAlloc& a, const CheckedAlloc& b) { + return !(a == b); + } + + size_t num_allocs() const { return state_->num_allocs; } + + void swap(CheckedAlloc& that) { + using std::swap; + swap(id_, that.id_); + swap(state_, that.state_); + } + + friend void swap(CheckedAlloc& a, CheckedAlloc& b) { a.swap(b); } + + friend std::ostream& operator<<(std::ostream& o, const CheckedAlloc& a) { + return o << "alloc(" << a.id_ << ")"; + } + + private: + void track_alloc(void* ptr) { + AllocState* state = state_.get(); + ++state->num_allocs; + if (!state->owned.insert(ptr).second) + ADD_FAILURE() << *this << " got previously allocated memory: " << ptr; + } + void track_dealloc(void* ptr) { + if (state_->owned.erase(ptr) != 1) + ADD_FAILURE() << *this + << " deleting memory owned by another allocator: " << ptr; + } + + size_t id_ = std::numeric_limits<size_t>::max(); + + std::shared_ptr<AllocState> state_ = std::make_shared<AllocState>(); +}; + +struct Identity { + int32_t operator()(int32_t v) const { return v; } +}; + +struct Policy { + using slot_type = Tracked<int32_t>; + using init_type = Tracked<int32_t>; + using key_type = int32_t; + + template <class allocator_type, class... Args> + static void construct(allocator_type* alloc, slot_type* slot, + Args&&... args) { + std::allocator_traits<allocator_type>::construct( + *alloc, slot, std::forward<Args>(args)...); + } + + template <class allocator_type> + static void destroy(allocator_type* alloc, slot_type* slot) { + std::allocator_traits<allocator_type>::destroy(*alloc, slot); + } + + template <class allocator_type> + static void transfer(allocator_type* alloc, slot_type* new_slot, + slot_type* old_slot) { + construct(alloc, new_slot, std::move(*old_slot)); + destroy(alloc, old_slot); + } + + template <class F> + static auto apply(F&& f, int32_t v) -> decltype(std::forward<F>(f)(v, v)) { + return std::forward<F>(f)(v, v); + } + + template <class F> + static auto apply(F&& f, const slot_type& v) + -> decltype(std::forward<F>(f)(v.val(), v)) { + return std::forward<F>(f)(v.val(), v); + } + + template <class F> + static auto apply(F&& f, slot_type&& v) + -> decltype(std::forward<F>(f)(v.val(), std::move(v))) { + return std::forward<F>(f)(v.val(), std::move(v)); + } + + static slot_type& element(slot_type* slot) { return *slot; } +}; + +template <int Spec> +struct PropagateTest : public ::testing::Test { + using Alloc = CheckedAlloc<Tracked<int32_t>, Spec>; + + using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, Alloc>; + + PropagateTest() { + EXPECT_EQ(a1, t1.get_allocator()); + EXPECT_NE(a2, t1.get_allocator()); + } + + Alloc a1 = Alloc(1); + Table t1 = Table(0, a1); + Alloc a2 = Alloc(2); +}; + +using PropagateOnAll = + PropagateTest<kPropagateOnCopy | kPropagateOnMove | kPropagateOnSwap>; +using NoPropagateOnCopy = PropagateTest<kPropagateOnMove | kPropagateOnSwap>; +using NoPropagateOnMove = PropagateTest<kPropagateOnCopy | kPropagateOnSwap>; + +TEST_F(PropagateOnAll, Empty) { EXPECT_EQ(0, a1.num_allocs()); } + +TEST_F(PropagateOnAll, InsertAllocates) { + auto it = t1.insert(0).first; + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, InsertDecomposes) { + auto it = t1.insert(0).first; + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); + + EXPECT_FALSE(t1.insert(0).second); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, RehashMoves) { + auto it = t1.insert(0).first; + EXPECT_EQ(0, it->num_moves()); + t1.rehash(2 * t1.capacity()); + EXPECT_EQ(2, a1.num_allocs()); + it = t1.find(0); + EXPECT_EQ(1, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, CopyConstructor) { + auto it = t1.insert(0).first; + Table u(t1); + EXPECT_EQ(2, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(NoPropagateOnCopy, CopyConstructor) { + auto it = t1.insert(0).first; + Table u(t1); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, u.get_allocator().num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(PropagateOnAll, CopyConstructorWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(t1, a1); + EXPECT_EQ(2, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(NoPropagateOnCopy, CopyConstructorWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(t1, a1); + EXPECT_EQ(2, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(PropagateOnAll, CopyConstructorWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(t1, a2); + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, a2.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(NoPropagateOnCopy, CopyConstructorWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(t1, a2); + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, a2.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(PropagateOnAll, MoveConstructor) { + auto it = t1.insert(0).first; + Table u(std::move(t1)); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(NoPropagateOnMove, MoveConstructor) { + auto it = t1.insert(0).first; + Table u(std::move(t1)); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, MoveConstructorWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(std::move(t1), a1); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(NoPropagateOnMove, MoveConstructorWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(std::move(t1), a1); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, MoveConstructorWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(std::move(t1), a2); + it = u.find(0); + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, a2.num_allocs()); + EXPECT_EQ(1, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(NoPropagateOnMove, MoveConstructorWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(std::move(t1), a2); + it = u.find(0); + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, a2.num_allocs()); + EXPECT_EQ(1, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, CopyAssignmentWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(0, a1); + u = t1; + EXPECT_EQ(2, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(NoPropagateOnCopy, CopyAssignmentWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(0, a1); + u = t1; + EXPECT_EQ(2, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(PropagateOnAll, CopyAssignmentWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(0, a2); + u = t1; + EXPECT_EQ(a1, u.get_allocator()); + EXPECT_EQ(2, a1.num_allocs()); + EXPECT_EQ(0, a2.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(NoPropagateOnCopy, CopyAssignmentWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(0, a2); + u = t1; + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, a2.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(PropagateOnAll, MoveAssignmentWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(0, a1); + u = std::move(t1); + EXPECT_EQ(a1, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(NoPropagateOnMove, MoveAssignmentWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(0, a1); + u = std::move(t1); + EXPECT_EQ(a1, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, MoveAssignmentWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(0, a2); + u = std::move(t1); + EXPECT_EQ(a1, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, a2.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(NoPropagateOnMove, MoveAssignmentWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(0, a2); + u = std::move(t1); + it = u.find(0); + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, a2.num_allocs()); + EXPECT_EQ(1, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, Swap) { + auto it = t1.insert(0).first; + Table u(0, a2); + u.swap(t1); + EXPECT_EQ(a1, u.get_allocator()); + EXPECT_EQ(a2, t1.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, a2.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +} // namespace +} // namespace container_internal +} // namespace absl diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc new file mode 100644 index 00000000..f59a19b4 --- /dev/null +++ b/absl/container/internal/raw_hash_set_test.cc @@ -0,0 +1,1961 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/raw_hash_set.h" + +#include <array> +#include <cmath> +#include <cstdint> +#include <deque> +#include <functional> +#include <memory> +#include <numeric> +#include <random> +#include <string> + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/base/internal/cycleclock.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/hash_function_defaults.h" +#include "absl/container/internal/hash_policy_testing.h" +#include "absl/container/internal/hashtable_debug.h" +#include "absl/strings/string_view.h" + +namespace absl { +namespace container_internal { + +struct RawHashSetTestOnlyAccess { + template <typename C> + static auto GetSlots(const C& c) -> decltype(c.slots_) { + return c.slots_; + } +}; + +namespace { + +using ::testing::DoubleNear; +using ::testing::ElementsAre; +using ::testing::Optional; +using ::testing::Pair; +using ::testing::UnorderedElementsAre; + +TEST(Util, NormalizeCapacity) { + constexpr size_t kMinCapacity = Group::kWidth - 1; + EXPECT_EQ(kMinCapacity, NormalizeCapacity(0)); + EXPECT_EQ(kMinCapacity, NormalizeCapacity(1)); + EXPECT_EQ(kMinCapacity, NormalizeCapacity(2)); + EXPECT_EQ(kMinCapacity, NormalizeCapacity(kMinCapacity)); + EXPECT_EQ(kMinCapacity * 2 + 1, NormalizeCapacity(kMinCapacity + 1)); + EXPECT_EQ(kMinCapacity * 2 + 1, NormalizeCapacity(kMinCapacity + 2)); +} + +TEST(Util, probe_seq) { + probe_seq<16> seq(0, 127); + auto gen = [&]() { + size_t res = seq.offset(); + seq.next(); + return res; + }; + std::vector<size_t> offsets(8); + std::generate_n(offsets.begin(), 8, gen); + EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64)); + seq = probe_seq<16>(128, 127); + std::generate_n(offsets.begin(), 8, gen); + EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64)); +} + +TEST(BitMask, Smoke) { + EXPECT_FALSE((BitMask<uint8_t, 8>(0))); + EXPECT_TRUE((BitMask<uint8_t, 8>(5))); + + EXPECT_THAT((BitMask<uint8_t, 8>(0)), ElementsAre()); + EXPECT_THAT((BitMask<uint8_t, 8>(0x1)), ElementsAre(0)); + EXPECT_THAT((BitMask<uint8_t, 8>(0x2)), ElementsAre(1)); + EXPECT_THAT((BitMask<uint8_t, 8>(0x3)), ElementsAre(0, 1)); + EXPECT_THAT((BitMask<uint8_t, 8>(0x4)), ElementsAre(2)); + EXPECT_THAT((BitMask<uint8_t, 8>(0x5)), ElementsAre(0, 2)); + EXPECT_THAT((BitMask<uint8_t, 8>(0x55)), ElementsAre(0, 2, 4, 6)); + EXPECT_THAT((BitMask<uint8_t, 8>(0xAA)), ElementsAre(1, 3, 5, 7)); +} + +TEST(BitMask, WithShift) { + // See the non-SSE version of Group for details on what this math is for. + uint64_t ctrl = 0x1716151413121110; + uint64_t hash = 0x12; + constexpr uint64_t msbs = 0x8080808080808080ULL; + constexpr uint64_t lsbs = 0x0101010101010101ULL; + auto x = ctrl ^ (lsbs * hash); + uint64_t mask = (x - lsbs) & ~x & msbs; + EXPECT_EQ(0x0000000080800000, mask); + + BitMask<uint64_t, 8, 3> b(mask); + EXPECT_EQ(*b, 2); +} + +TEST(BitMask, LeadingTrailing) { + EXPECT_EQ((BitMask<uint32_t, 16>(0b0001101001000000).LeadingZeros()), 3); + EXPECT_EQ((BitMask<uint32_t, 16>(0b0001101001000000).TrailingZeros()), 6); + + EXPECT_EQ((BitMask<uint32_t, 16>(0b0000000000000001).LeadingZeros()), 15); + EXPECT_EQ((BitMask<uint32_t, 16>(0b0000000000000001).TrailingZeros()), 0); + + EXPECT_EQ((BitMask<uint32_t, 16>(0b1000000000000000).LeadingZeros()), 0); + EXPECT_EQ((BitMask<uint32_t, 16>(0b1000000000000000).TrailingZeros()), 15); + + EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).LeadingZeros()), 3); + EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).TrailingZeros()), 1); + + EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000000000000080).LeadingZeros()), 7); + EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000000000000080).TrailingZeros()), 0); + + EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x8000000000000000).LeadingZeros()), 0); + EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x8000000000000000).TrailingZeros()), 7); +} + +TEST(Group, EmptyGroup) { + for (h2_t h = 0; h != 128; ++h) EXPECT_FALSE(Group{EmptyGroup()}.Match(h)); +} + +#if SWISSTABLE_HAVE_SSE2 +TEST(Group, Match) { + ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7, + 7, 5, 3, 1, 1, 1, 1, 1}; + EXPECT_THAT(Group{group}.Match(0), ElementsAre()); + EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 11, 12, 13, 14, 15)); + EXPECT_THAT(Group{group}.Match(3), ElementsAre(3, 10)); + EXPECT_THAT(Group{group}.Match(5), ElementsAre(5, 9)); + EXPECT_THAT(Group{group}.Match(7), ElementsAre(7, 8)); +} + +TEST(Group, MatchEmpty) { + ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7, + 7, 5, 3, 1, 1, 1, 1, 1}; + EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0, 4)); +} + +TEST(Group, MatchEmptyOrDeleted) { + ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7, + 7, 5, 3, 1, 1, 1, 1, 1}; + EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 2, 4)); +} +#else +TEST(Group, Match) { + ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1}; + EXPECT_THAT(Group{group}.Match(0), ElementsAre()); + EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 5, 7)); + EXPECT_THAT(Group{group}.Match(2), ElementsAre(2, 4)); +} +TEST(Group, MatchEmpty) { + ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1}; + EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0)); +} + +TEST(Group, MatchEmptyOrDeleted) { + ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1}; + EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 3)); +} +#endif + +TEST(Batch, DropDeletes) { + constexpr size_t kCapacity = 63; + constexpr size_t kGroupWidth = container_internal::Group::kWidth; + std::vector<ctrl_t> ctrl(kCapacity + 1 + kGroupWidth); + ctrl[kCapacity] = kSentinel; + std::vector<ctrl_t> pattern = {kEmpty, 2, kDeleted, 2, kEmpty, 1, kDeleted}; + for (size_t i = 0; i != kCapacity; ++i) { + ctrl[i] = pattern[i % pattern.size()]; + if (i < kGroupWidth - 1) + ctrl[i + kCapacity + 1] = pattern[i % pattern.size()]; + } + ConvertDeletedToEmptyAndFullToDeleted(ctrl.data(), kCapacity); + ASSERT_EQ(ctrl[kCapacity], kSentinel); + for (size_t i = 0; i < kCapacity + 1 + kGroupWidth; ++i) { + ctrl_t expected = pattern[i % (kCapacity + 1) % pattern.size()]; + if (i == kCapacity) expected = kSentinel; + if (expected == kDeleted) expected = kEmpty; + if (IsFull(expected)) expected = kDeleted; + EXPECT_EQ(ctrl[i], expected) + << i << " " << int{pattern[i % pattern.size()]}; + } +} + +TEST(Group, CountLeadingEmptyOrDeleted) { + const std::vector<ctrl_t> empty_examples = {kEmpty, kDeleted}; + const std::vector<ctrl_t> full_examples = {0, 1, 2, 3, 5, 9, 127, kSentinel}; + + for (ctrl_t empty : empty_examples) { + std::vector<ctrl_t> e(Group::kWidth, empty); + EXPECT_EQ(Group::kWidth, Group{e.data()}.CountLeadingEmptyOrDeleted()); + for (ctrl_t full : full_examples) { + for (size_t i = 0; i != Group::kWidth; ++i) { + std::vector<ctrl_t> f(Group::kWidth, empty); + f[i] = full; + EXPECT_EQ(i, Group{f.data()}.CountLeadingEmptyOrDeleted()); + } + std::vector<ctrl_t> f(Group::kWidth, empty); + f[Group::kWidth * 2 / 3] = full; + f[Group::kWidth / 2] = full; + EXPECT_EQ( + Group::kWidth / 2, Group{f.data()}.CountLeadingEmptyOrDeleted()); + } + } +} + +struct IntPolicy { + using slot_type = int64_t; + using key_type = int64_t; + using init_type = int64_t; + + static void construct(void*, int64_t* slot, int64_t v) { *slot = v; } + static void destroy(void*, int64_t*) {} + static void transfer(void*, int64_t* new_slot, int64_t* old_slot) { + *new_slot = *old_slot; + } + + static int64_t& element(slot_type* slot) { return *slot; } + + template <class F> + static auto apply(F&& f, int64_t x) -> decltype(std::forward<F>(f)(x, x)) { + return std::forward<F>(f)(x, x); + } +}; + +class StringPolicy { + template <class F, class K, class V, + class = typename std::enable_if< + std::is_convertible<const K&, absl::string_view>::value>::type> + decltype(std::declval<F>()( + std::declval<const absl::string_view&>(), std::piecewise_construct, + std::declval<std::tuple<K>>(), + std::declval<V>())) static apply_impl(F&& f, + std::pair<std::tuple<K>, V> p) { + const absl::string_view& key = std::get<0>(p.first); + return std::forward<F>(f)(key, std::piecewise_construct, std::move(p.first), + std::move(p.second)); + } + + public: + struct slot_type { + struct ctor {}; + + template <class... Ts> + slot_type(ctor, Ts&&... ts) : pair(std::forward<Ts>(ts)...) {} + + std::pair<std::string, std::string> pair; + }; + + using key_type = std::string; + using init_type = std::pair<std::string, std::string>; + + template <class allocator_type, class... Args> + static void construct(allocator_type* alloc, slot_type* slot, Args... args) { + std::allocator_traits<allocator_type>::construct( + *alloc, slot, typename slot_type::ctor(), std::forward<Args>(args)...); + } + + template <class allocator_type> + static void destroy(allocator_type* alloc, slot_type* slot) { + std::allocator_traits<allocator_type>::destroy(*alloc, slot); + } + + template <class allocator_type> + static void transfer(allocator_type* alloc, slot_type* new_slot, + slot_type* old_slot) { + construct(alloc, new_slot, std::move(old_slot->pair)); + destroy(alloc, old_slot); + } + + static std::pair<std::string, std::string>& element(slot_type* slot) { + return slot->pair; + } + + template <class F, class... Args> + static auto apply(F&& f, Args&&... args) + -> decltype(apply_impl(std::forward<F>(f), + PairArgs(std::forward<Args>(args)...))) { + return apply_impl(std::forward<F>(f), + PairArgs(std::forward<Args>(args)...)); + } +}; + +struct StringHash : absl::Hash<absl::string_view> { + using is_transparent = void; +}; +struct StringEq : std::equal_to<absl::string_view> { + using is_transparent = void; +}; + +struct StringTable + : raw_hash_set<StringPolicy, StringHash, StringEq, std::allocator<int>> { + using Base = typename StringTable::raw_hash_set; + StringTable() {} + using Base::Base; +}; + +struct IntTable + : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>, + std::equal_to<int64_t>, std::allocator<int64_t>> { + using Base = typename IntTable::raw_hash_set; + IntTable() {} + using Base::Base; +}; + +struct BadFastHash { + template <class T> + size_t operator()(const T&) const { + return 0; + } +}; + +struct BadTable : raw_hash_set<IntPolicy, BadFastHash, std::equal_to<int>, + std::allocator<int>> { + using Base = typename BadTable::raw_hash_set; + BadTable() {} + using Base::Base; +}; + +TEST(Table, EmptyFunctorOptimization) { + static_assert(std::is_empty<std::equal_to<absl::string_view>>::value, ""); + static_assert(std::is_empty<std::allocator<int>>::value, ""); + + struct MockTable { + void* ctrl; + void* slots; + size_t size; + size_t capacity; + size_t growth_left; + }; + struct StatelessHash { + size_t operator()(absl::string_view) const { return 0; } + }; + struct StatefulHash : StatelessHash { + size_t dummy; + }; + + EXPECT_EQ( + sizeof(MockTable), + sizeof( + raw_hash_set<StringPolicy, StatelessHash, + std::equal_to<absl::string_view>, std::allocator<int>>)); + + EXPECT_EQ( + sizeof(MockTable) + sizeof(StatefulHash), + sizeof( + raw_hash_set<StringPolicy, StatefulHash, + std::equal_to<absl::string_view>, std::allocator<int>>)); +} + +TEST(Table, Empty) { + IntTable t; + EXPECT_EQ(0, t.size()); + EXPECT_TRUE(t.empty()); +} + +#ifdef __GNUC__ +template <class T> +ABSL_ATTRIBUTE_ALWAYS_INLINE inline void DoNotOptimize(const T& v) { + asm volatile("" : : "r,m"(v) : "memory"); +} +#endif + +TEST(Table, Prefetch) { + IntTable t; + t.emplace(1); + // Works for both present and absent keys. + t.prefetch(1); + t.prefetch(2); + + // Do not run in debug mode, when prefetch is not implemented, or when + // sanitizers are enabled. +#if defined(NDEBUG) && defined(__GNUC__) && !defined(ADDRESS_SANITIZER) && \ + !defined(MEMORY_SANITIZER) && !defined(THREAD_SANITIZER) && \ + !defined(UNDEFINED_BEHAVIOR_SANITIZER) + const auto now = [] { return absl::base_internal::CycleClock::Now(); }; + + static constexpr int size = 1000000; + for (int i = 0; i < size; ++i) t.insert(i); + + int64_t no_prefetch = 0, prefetch = 0; + for (int iter = 0; iter < 10; ++iter) { + int64_t time = now(); + for (int i = 0; i < size; ++i) { + DoNotOptimize(t.find(i)); + } + no_prefetch += now() - time; + + time = now(); + for (int i = 0; i < size; ++i) { + t.prefetch(i + 20); + DoNotOptimize(t.find(i)); + } + prefetch += now() - time; + } + + // no_prefetch is at least 30% slower. + EXPECT_GE(1.0 * no_prefetch / prefetch, 1.3); +#endif +} + +TEST(Table, LookupEmpty) { + IntTable t; + auto it = t.find(0); + EXPECT_TRUE(it == t.end()); +} + +TEST(Table, Insert1) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + auto res = t.emplace(0); + EXPECT_TRUE(res.second); + EXPECT_THAT(*res.first, 0); + EXPECT_EQ(1, t.size()); + EXPECT_THAT(*t.find(0), 0); +} + +TEST(Table, Insert2) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + auto res = t.emplace(0); + EXPECT_TRUE(res.second); + EXPECT_THAT(*res.first, 0); + EXPECT_EQ(1, t.size()); + EXPECT_TRUE(t.find(1) == t.end()); + res = t.emplace(1); + EXPECT_TRUE(res.second); + EXPECT_THAT(*res.first, 1); + EXPECT_EQ(2, t.size()); + EXPECT_THAT(*t.find(0), 0); + EXPECT_THAT(*t.find(1), 1); +} + +TEST(Table, InsertCollision) { + BadTable t; + EXPECT_TRUE(t.find(1) == t.end()); + auto res = t.emplace(1); + EXPECT_TRUE(res.second); + EXPECT_THAT(*res.first, 1); + EXPECT_EQ(1, t.size()); + + EXPECT_TRUE(t.find(2) == t.end()); + res = t.emplace(2); + EXPECT_THAT(*res.first, 2); + EXPECT_TRUE(res.second); + EXPECT_EQ(2, t.size()); + + EXPECT_THAT(*t.find(1), 1); + EXPECT_THAT(*t.find(2), 2); +} + +// Test that we do not add existent element in case we need to search through +// many groups with deleted elements +TEST(Table, InsertCollisionAndFindAfterDelete) { + BadTable t; // all elements go to the same group. + // Have at least 2 groups with Group::kWidth collisions + // plus some extra collisions in the last group. + constexpr size_t kNumInserts = Group::kWidth * 2 + 5; + for (size_t i = 0; i < kNumInserts; ++i) { + auto res = t.emplace(i); + EXPECT_TRUE(res.second); + EXPECT_THAT(*res.first, i); + EXPECT_EQ(i + 1, t.size()); + } + + // Remove elements one by one and check + // that we still can find all other elements. + for (size_t i = 0; i < kNumInserts; ++i) { + EXPECT_EQ(1, t.erase(i)) << i; + for (size_t j = i + 1; j < kNumInserts; ++j) { + EXPECT_THAT(*t.find(j), j); + auto res = t.emplace(j); + EXPECT_FALSE(res.second) << i << " " << j; + EXPECT_THAT(*res.first, j); + EXPECT_EQ(kNumInserts - i - 1, t.size()); + } + } + EXPECT_TRUE(t.empty()); +} + +TEST(Table, LazyEmplace) { + StringTable t; + bool called = false; + auto it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) { + called = true; + f("abc", "ABC"); + }); + EXPECT_TRUE(called); + EXPECT_THAT(*it, Pair("abc", "ABC")); + called = false; + it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) { + called = true; + f("abc", "DEF"); + }); + EXPECT_FALSE(called); + EXPECT_THAT(*it, Pair("abc", "ABC")); +} + +TEST(Table, ContainsEmpty) { + IntTable t; + + EXPECT_FALSE(t.contains(0)); +} + +TEST(Table, Contains1) { + IntTable t; + + EXPECT_TRUE(t.insert(0).second); + EXPECT_TRUE(t.contains(0)); + EXPECT_FALSE(t.contains(1)); + + EXPECT_EQ(1, t.erase(0)); + EXPECT_FALSE(t.contains(0)); +} + +TEST(Table, Contains2) { + IntTable t; + + EXPECT_TRUE(t.insert(0).second); + EXPECT_TRUE(t.contains(0)); + EXPECT_FALSE(t.contains(1)); + + t.clear(); + EXPECT_FALSE(t.contains(0)); +} + +int decompose_constructed; +struct DecomposeType { + DecomposeType(int i) : i(i) { // NOLINT + ++decompose_constructed; + } + + explicit DecomposeType(const char* d) : DecomposeType(*d) {} + + int i; +}; + +struct DecomposeHash { + using is_transparent = void; + size_t operator()(DecomposeType a) const { return a.i; } + size_t operator()(int a) const { return a; } + size_t operator()(const char* a) const { return *a; } +}; + +struct DecomposeEq { + using is_transparent = void; + bool operator()(DecomposeType a, DecomposeType b) const { return a.i == b.i; } + bool operator()(DecomposeType a, int b) const { return a.i == b; } + bool operator()(DecomposeType a, const char* b) const { return a.i == *b; } +}; + +struct DecomposePolicy { + using slot_type = DecomposeType; + using key_type = DecomposeType; + using init_type = DecomposeType; + + template <typename T> + static void construct(void*, DecomposeType* slot, T&& v) { + *slot = DecomposeType(std::forward<T>(v)); + } + static void destroy(void*, DecomposeType*) {} + static DecomposeType& element(slot_type* slot) { return *slot; } + + template <class F, class T> + static auto apply(F&& f, const T& x) -> decltype(std::forward<F>(f)(x, x)) { + return std::forward<F>(f)(x, x); + } +}; + +template <typename Hash, typename Eq> +void TestDecompose(bool construct_three) { + DecomposeType elem{0}; + const int one = 1; + const char* three_p = "3"; + const auto& three = three_p; + + raw_hash_set<DecomposePolicy, Hash, Eq, std::allocator<int>> set1; + + decompose_constructed = 0; + int expected_constructed = 0; + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.insert(elem); + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.insert(1); + EXPECT_EQ(++expected_constructed, decompose_constructed); + set1.emplace("3"); + EXPECT_EQ(++expected_constructed, decompose_constructed); + EXPECT_EQ(expected_constructed, decompose_constructed); + + { // insert(T&&) + set1.insert(1); + EXPECT_EQ(expected_constructed, decompose_constructed); + } + + { // insert(const T&) + set1.insert(one); + EXPECT_EQ(expected_constructed, decompose_constructed); + } + + { // insert(hint, T&&) + set1.insert(set1.begin(), 1); + EXPECT_EQ(expected_constructed, decompose_constructed); + } + + { // insert(hint, const T&) + set1.insert(set1.begin(), one); + EXPECT_EQ(expected_constructed, decompose_constructed); + } + + { // emplace(...) + set1.emplace(1); + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace("3"); + expected_constructed += construct_three; + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace(one); + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace(three); + expected_constructed += construct_three; + EXPECT_EQ(expected_constructed, decompose_constructed); + } + + { // emplace_hint(...) + set1.emplace_hint(set1.begin(), 1); + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace_hint(set1.begin(), "3"); + expected_constructed += construct_three; + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace_hint(set1.begin(), one); + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace_hint(set1.begin(), three); + expected_constructed += construct_three; + EXPECT_EQ(expected_constructed, decompose_constructed); + } +} + +TEST(Table, Decompose) { + TestDecompose<DecomposeHash, DecomposeEq>(false); + + struct TransparentHashIntOverload { + size_t operator()(DecomposeType a) const { return a.i; } + size_t operator()(int a) const { return a; } + }; + struct TransparentEqIntOverload { + bool operator()(DecomposeType a, DecomposeType b) const { + return a.i == b.i; + } + bool operator()(DecomposeType a, int b) const { return a.i == b; } + }; + TestDecompose<TransparentHashIntOverload, DecomposeEq>(true); + TestDecompose<TransparentHashIntOverload, TransparentEqIntOverload>(true); + TestDecompose<DecomposeHash, TransparentEqIntOverload>(true); +} + +// Returns the largest m such that a table with m elements has the same number +// of buckets as a table with n elements. +size_t MaxDensitySize(size_t n) { + IntTable t; + t.reserve(n); + for (size_t i = 0; i != n; ++i) t.emplace(i); + const size_t c = t.bucket_count(); + while (c == t.bucket_count()) t.emplace(n++); + return t.size() - 1; +} + +struct Modulo1000Hash { + size_t operator()(int x) const { return x % 1000; } +}; + +struct Modulo1000HashTable + : public raw_hash_set<IntPolicy, Modulo1000Hash, std::equal_to<int>, + std::allocator<int>> {}; + +// Test that rehash with no resize happen in case of many deleted slots. +TEST(Table, RehashWithNoResize) { + Modulo1000HashTable t; + // Adding the same length (and the same hash) strings + // to have at least kMinFullGroups groups + // with Group::kWidth collisions. Then feel upto MaxDensitySize; + const size_t kMinFullGroups = 7; + std::vector<int> keys; + for (size_t i = 0; i < MaxDensitySize(Group::kWidth * kMinFullGroups); ++i) { + int k = i * 1000; + t.emplace(k); + keys.push_back(k); + } + const size_t capacity = t.capacity(); + + // Remove elements from all groups except the first and the last one. + // All elements removed from full groups will be marked as kDeleted. + const size_t erase_begin = Group::kWidth / 2; + const size_t erase_end = (t.size() / Group::kWidth - 1) * Group::kWidth; + for (size_t i = erase_begin; i < erase_end; ++i) { + EXPECT_EQ(1, t.erase(keys[i])) << i; + } + keys.erase(keys.begin() + erase_begin, keys.begin() + erase_end); + + auto last_key = keys.back(); + size_t last_key_num_probes = GetHashtableDebugNumProbes(t, last_key); + + // Make sure that we have to make a lot of probes for last key. + ASSERT_GT(last_key_num_probes, kMinFullGroups); + + int x = 1; + // Insert and erase one element, before inplace rehash happen. + while (last_key_num_probes == GetHashtableDebugNumProbes(t, last_key)) { + t.emplace(x); + ASSERT_EQ(capacity, t.capacity()); + // All elements should be there. + ASSERT_TRUE(t.find(x) != t.end()) << x; + for (const auto& k : keys) { + ASSERT_TRUE(t.find(k) != t.end()) << k; + } + t.erase(x); + ++x; + } +} + +TEST(Table, InsertEraseStressTest) { + IntTable t; + const size_t kMinElementCount = 250; + std::deque<int> keys; + size_t i = 0; + for (; i < MaxDensitySize(kMinElementCount); ++i) { + t.emplace(i); + keys.push_back(i); + } + const size_t kNumIterations = 1000000; + for (; i < kNumIterations; ++i) { + ASSERT_EQ(1, t.erase(keys.front())); + keys.pop_front(); + t.emplace(i); + keys.push_back(i); + } +} + +TEST(Table, InsertOverloads) { + StringTable t; + // These should all trigger the insert(init_type) overload. + t.insert({{}, {}}); + t.insert({"ABC", {}}); + t.insert({"DEF", "!!!"}); + + EXPECT_THAT(t, UnorderedElementsAre(Pair("", ""), Pair("ABC", ""), + Pair("DEF", "!!!"))); +} + +TEST(Table, LargeTable) { + IntTable t; + for (int64_t i = 0; i != 100000; ++i) t.emplace(i << 40); + for (int64_t i = 0; i != 100000; ++i) ASSERT_EQ(i << 40, *t.find(i << 40)); +} + +// Timeout if copy is quadratic as it was in Rust. +TEST(Table, EnsureNonQuadraticAsInRust) { + static const size_t kLargeSize = 1 << 15; + + IntTable t; + for (size_t i = 0; i != kLargeSize; ++i) { + t.insert(i); + } + + // If this is quadratic, the test will timeout. + IntTable t2; + for (const auto& entry : t) t2.insert(entry); +} + +TEST(Table, ClearBug) { + IntTable t; + constexpr size_t capacity = container_internal::Group::kWidth - 1; + constexpr size_t max_size = capacity / 2; + for (size_t i = 0; i < max_size; ++i) { + t.insert(i); + } + ASSERT_EQ(capacity, t.capacity()); + intptr_t original = reinterpret_cast<intptr_t>(&*t.find(2)); + t.clear(); + ASSERT_EQ(capacity, t.capacity()); + for (size_t i = 0; i < max_size; ++i) { + t.insert(i); + } + ASSERT_EQ(capacity, t.capacity()); + intptr_t second = reinterpret_cast<intptr_t>(&*t.find(2)); + // We are checking that original and second are close enough to each other + // that they are probably still in the same group. This is not strictly + // guaranteed. + EXPECT_LT(std::abs(original - second), + capacity * sizeof(IntTable::value_type)); +} + +TEST(Table, Erase) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + auto res = t.emplace(0); + EXPECT_TRUE(res.second); + EXPECT_EQ(1, t.size()); + t.erase(res.first); + EXPECT_EQ(0, t.size()); + EXPECT_TRUE(t.find(0) == t.end()); +} + +// Collect N bad keys by following algorithm: +// 1. Create an empty table and reserve it to 2 * N. +// 2. Insert N random elements. +// 3. Take first Group::kWidth - 1 to bad_keys array. +// 4. Clear the table without resize. +// 5. Go to point 2 while N keys not collected +std::vector<int64_t> CollectBadMergeKeys(size_t N) { + static constexpr int kGroupSize = Group::kWidth - 1; + + auto topk_range = [](size_t b, size_t e, IntTable* t) -> std::vector<int64_t> { + for (size_t i = b; i != e; ++i) { + t->emplace(i); + } + std::vector<int64_t> res; + res.reserve(kGroupSize); + auto it = t->begin(); + for (size_t i = b; i != e && i != b + kGroupSize; ++i, ++it) { + res.push_back(*it); + } + return res; + }; + + std::vector<int64_t> bad_keys; + bad_keys.reserve(N); + IntTable t; + t.reserve(N * 2); + + for (size_t b = 0; bad_keys.size() < N; b += N) { + auto keys = topk_range(b, b + N, &t); + bad_keys.insert(bad_keys.end(), keys.begin(), keys.end()); + t.erase(t.begin(), t.end()); + EXPECT_TRUE(t.empty()); + } + return bad_keys; +} + +struct ProbeStats { + // Number of elements with specific probe length over all tested tables. + std::vector<size_t> all_probes_histogram; + // Ratios total_probe_length/size for every tested table. + std::vector<double> single_table_ratios; + + friend ProbeStats operator+(const ProbeStats& a, const ProbeStats& b) { + ProbeStats res = a; + res.all_probes_histogram.resize(std::max(res.all_probes_histogram.size(), + b.all_probes_histogram.size())); + std::transform(b.all_probes_histogram.begin(), b.all_probes_histogram.end(), + res.all_probes_histogram.begin(), + res.all_probes_histogram.begin(), std::plus<size_t>()); + res.single_table_ratios.insert(res.single_table_ratios.end(), + b.single_table_ratios.begin(), + b.single_table_ratios.end()); + return res; + } + + // Average ratio total_probe_length/size over tables. + double AvgRatio() const { + return std::accumulate(single_table_ratios.begin(), + single_table_ratios.end(), 0.0) / + single_table_ratios.size(); + } + + // Maximum ratio total_probe_length/size over tables. + double MaxRatio() const { + return *std::max_element(single_table_ratios.begin(), + single_table_ratios.end()); + } + + // Percentile ratio total_probe_length/size over tables. + double PercentileRatio(double Percentile = 0.95) const { + auto r = single_table_ratios; + auto mid = r.begin() + static_cast<size_t>(r.size() * Percentile); + if (mid != r.end()) { + std::nth_element(r.begin(), mid, r.end()); + return *mid; + } else { + return MaxRatio(); + } + } + + // Maximum probe length over all elements and all tables. + size_t MaxProbe() const { return all_probes_histogram.size(); } + + // Fraction of elements with specified probe length. + std::vector<double> ProbeNormalizedHistogram() const { + double total_elements = std::accumulate(all_probes_histogram.begin(), + all_probes_histogram.end(), 0ull); + std::vector<double> res; + for (size_t p : all_probes_histogram) { + res.push_back(p / total_elements); + } + return res; + } + + size_t PercentileProbe(double Percentile = 0.99) const { + size_t idx = 0; + for (double p : ProbeNormalizedHistogram()) { + if (Percentile > p) { + Percentile -= p; + ++idx; + } else { + return idx; + } + } + return idx; + } + + friend std::ostream& operator<<(std::ostream& out, const ProbeStats& s) { + out << "{AvgRatio:" << s.AvgRatio() << ", MaxRatio:" << s.MaxRatio() + << ", PercentileRatio:" << s.PercentileRatio() + << ", MaxProbe:" << s.MaxProbe() << ", Probes=["; + for (double p : s.ProbeNormalizedHistogram()) { + out << p << ","; + } + out << "]}"; + + return out; + } +}; + +struct ExpectedStats { + double avg_ratio; + double max_ratio; + std::vector<std::pair<double, double>> pecentile_ratios; + std::vector<std::pair<double, double>> pecentile_probes; + + friend std::ostream& operator<<(std::ostream& out, const ExpectedStats& s) { + out << "{AvgRatio:" << s.avg_ratio << ", MaxRatio:" << s.max_ratio + << ", PercentileRatios: ["; + for (auto el : s.pecentile_ratios) { + out << el.first << ":" << el.second << ", "; + } + out << "], PercentileProbes: ["; + for (auto el : s.pecentile_probes) { + out << el.first << ":" << el.second << ", "; + } + out << "]}"; + + return out; + } +}; + +void VerifyStats(size_t size, const ExpectedStats& exp, + const ProbeStats& stats) { + EXPECT_LT(stats.AvgRatio(), exp.avg_ratio) << size << " " << stats; + EXPECT_LT(stats.MaxRatio(), exp.max_ratio) << size << " " << stats; + for (auto pr : exp.pecentile_ratios) { + EXPECT_LE(stats.PercentileRatio(pr.first), pr.second) + << size << " " << pr.first << " " << stats; + } + + for (auto pr : exp.pecentile_probes) { + EXPECT_LE(stats.PercentileProbe(pr.first), pr.second) + << size << " " << pr.first << " " << stats; + } +} + +using ProbeStatsPerSize = std::map<size_t, ProbeStats>; + +// Collect total ProbeStats on num_iters iterations of the following algorithm: +// 1. Create new table and reserve it to keys.size() * 2 +// 2. Insert all keys xored with seed +// 3. Collect ProbeStats from final table. +ProbeStats CollectProbeStatsOnKeysXoredWithSeed(const std::vector<int64_t>& keys, + size_t num_iters) { + const size_t reserve_size = keys.size() * 2; + + ProbeStats stats; + + int64_t seed = 0x71b1a19b907d6e33; + while (num_iters--) { + seed = static_cast<int64_t>(static_cast<uint64_t>(seed) * 17 + 13); + IntTable t1; + t1.reserve(reserve_size); + for (const auto& key : keys) { + t1.emplace(key ^ seed); + } + + auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1); + stats.all_probes_histogram.resize( + std::max(stats.all_probes_histogram.size(), probe_histogram.size())); + std::transform(probe_histogram.begin(), probe_histogram.end(), + stats.all_probes_histogram.begin(), + stats.all_probes_histogram.begin(), std::plus<size_t>()); + + size_t total_probe_seq_length = 0; + for (size_t i = 0; i < probe_histogram.size(); ++i) { + total_probe_seq_length += i * probe_histogram[i]; + } + stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 / + keys.size()); + t1.erase(t1.begin(), t1.end()); + } + return stats; +} + +ExpectedStats XorSeedExpectedStats() { + constexpr bool kRandomizesInserts = +#if NDEBUG + false; +#else // NDEBUG + true; +#endif // NDEBUG + + // The effective load factor is larger in non-opt mode because we insert + // elements out of order. + switch (container_internal::Group::kWidth) { + case 8: + if (kRandomizesInserts) { + return {0.05, + 1.0, + {{0.95, 0.5}}, + {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}}; + } else { + return {0.05, + 2.0, + {{0.95, 0.1}}, + {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}}; + } + break; + case 16: + if (kRandomizesInserts) { + return {0.1, + 1.0, + {{0.95, 0.1}}, + {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}}; + } else { + return {0.05, + 1.0, + {{0.95, 0.05}}, + {{0.95, 0}, {0.99, 1}, {0.999, 4}, {0.9999, 10}}}; + } + break; + default: + ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width"); + } + return {}; +} +TEST(Table, DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength) { + ProbeStatsPerSize stats; + std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10}; + for (size_t size : sizes) { + stats[size] = + CollectProbeStatsOnKeysXoredWithSeed(CollectBadMergeKeys(size), 200); + } + auto expected = XorSeedExpectedStats(); + for (size_t size : sizes) { + auto& stat = stats[size]; + VerifyStats(size, expected, stat); + } +} + +// Collect total ProbeStats on num_iters iterations of the following algorithm: +// 1. Create new table +// 2. Select 10% of keys and insert 10 elements key * 17 + j * 13 +// 3. Collect ProbeStats from final table +ProbeStats CollectProbeStatsOnLinearlyTransformedKeys( + const std::vector<int64_t>& keys, size_t num_iters) { + ProbeStats stats; + + std::random_device rd; + std::mt19937 rng(rd()); + auto linear_transform = [](size_t x, size_t y) { return x * 17 + y * 13; }; + std::uniform_int_distribution<size_t> dist(0, keys.size()-1); + while (num_iters--) { + IntTable t1; + size_t num_keys = keys.size() / 10; + size_t start = dist(rng); + for (size_t i = 0; i != num_keys; ++i) { + for (size_t j = 0; j != 10; ++j) { + t1.emplace(linear_transform(keys[(i + start) % keys.size()], j)); + } + } + + auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1); + stats.all_probes_histogram.resize( + std::max(stats.all_probes_histogram.size(), probe_histogram.size())); + std::transform(probe_histogram.begin(), probe_histogram.end(), + stats.all_probes_histogram.begin(), + stats.all_probes_histogram.begin(), std::plus<size_t>()); + + size_t total_probe_seq_length = 0; + for (size_t i = 0; i < probe_histogram.size(); ++i) { + total_probe_seq_length += i * probe_histogram[i]; + } + stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 / + t1.size()); + t1.erase(t1.begin(), t1.end()); + } + return stats; +} + +ExpectedStats LinearTransformExpectedStats() { + constexpr bool kRandomizesInserts = +#if NDEBUG + false; +#else // NDEBUG + true; +#endif // NDEBUG + + // The effective load factor is larger in non-opt mode because we insert + // elements out of order. + switch (container_internal::Group::kWidth) { + case 8: + if (kRandomizesInserts) { + return {0.1, + 0.5, + {{0.95, 0.3}}, + {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}}; + } else { + return {0.15, + 0.5, + {{0.95, 0.3}}, + {{0.95, 0}, {0.99, 3}, {0.999, 15}, {0.9999, 25}}}; + } + break; + case 16: + if (kRandomizesInserts) { + return {0.1, + 0.4, + {{0.95, 0.3}}, + {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}}; + } else { + return {0.05, + 0.2, + {{0.95, 0.1}}, + {{0.95, 0}, {0.99, 1}, {0.999, 6}, {0.9999, 10}}}; + } + break; + default: + ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width"); + } + return {}; +} +TEST(Table, DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength) { + ProbeStatsPerSize stats; + std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10}; + for (size_t size : sizes) { + stats[size] = CollectProbeStatsOnLinearlyTransformedKeys( + CollectBadMergeKeys(size), 300); + } + auto expected = LinearTransformExpectedStats(); + for (size_t size : sizes) { + auto& stat = stats[size]; + VerifyStats(size, expected, stat); + } +} + +TEST(Table, EraseCollision) { + BadTable t; + + // 1 2 3 + t.emplace(1); + t.emplace(2); + t.emplace(3); + EXPECT_THAT(*t.find(1), 1); + EXPECT_THAT(*t.find(2), 2); + EXPECT_THAT(*t.find(3), 3); + EXPECT_EQ(3, t.size()); + + // 1 DELETED 3 + t.erase(t.find(2)); + EXPECT_THAT(*t.find(1), 1); + EXPECT_TRUE(t.find(2) == t.end()); + EXPECT_THAT(*t.find(3), 3); + EXPECT_EQ(2, t.size()); + + // DELETED DELETED 3 + t.erase(t.find(1)); + EXPECT_TRUE(t.find(1) == t.end()); + EXPECT_TRUE(t.find(2) == t.end()); + EXPECT_THAT(*t.find(3), 3); + EXPECT_EQ(1, t.size()); + + // DELETED DELETED DELETED + t.erase(t.find(3)); + EXPECT_TRUE(t.find(1) == t.end()); + EXPECT_TRUE(t.find(2) == t.end()); + EXPECT_TRUE(t.find(3) == t.end()); + EXPECT_EQ(0, t.size()); +} + +TEST(Table, EraseInsertProbing) { + BadTable t(100); + + // 1 2 3 4 + t.emplace(1); + t.emplace(2); + t.emplace(3); + t.emplace(4); + + // 1 DELETED 3 DELETED + t.erase(t.find(2)); + t.erase(t.find(4)); + + // 1 10 3 11 12 + t.emplace(10); + t.emplace(11); + t.emplace(12); + + EXPECT_EQ(5, t.size()); + EXPECT_THAT(t, UnorderedElementsAre(1, 10, 3, 11, 12)); +} + +TEST(Table, Clear) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + t.clear(); + EXPECT_TRUE(t.find(0) == t.end()); + auto res = t.emplace(0); + EXPECT_TRUE(res.second); + EXPECT_EQ(1, t.size()); + t.clear(); + EXPECT_EQ(0, t.size()); + EXPECT_TRUE(t.find(0) == t.end()); +} + +TEST(Table, Swap) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + auto res = t.emplace(0); + EXPECT_TRUE(res.second); + EXPECT_EQ(1, t.size()); + IntTable u; + t.swap(u); + EXPECT_EQ(0, t.size()); + EXPECT_EQ(1, u.size()); + EXPECT_TRUE(t.find(0) == t.end()); + EXPECT_THAT(*u.find(0), 0); +} + +TEST(Table, Rehash) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + t.emplace(0); + t.emplace(1); + EXPECT_EQ(2, t.size()); + t.rehash(128); + EXPECT_EQ(2, t.size()); + EXPECT_THAT(*t.find(0), 0); + EXPECT_THAT(*t.find(1), 1); +} + +TEST(Table, RehashDoesNotRehashWhenNotNecessary) { + IntTable t; + t.emplace(0); + t.emplace(1); + auto* p = &*t.find(0); + t.rehash(1); + EXPECT_EQ(p, &*t.find(0)); +} + +TEST(Table, RehashZeroDoesNotAllocateOnEmptyTable) { + IntTable t; + t.rehash(0); + EXPECT_EQ(0, t.bucket_count()); +} + +TEST(Table, RehashZeroDeallocatesEmptyTable) { + IntTable t; + t.emplace(0); + t.clear(); + EXPECT_NE(0, t.bucket_count()); + t.rehash(0); + EXPECT_EQ(0, t.bucket_count()); +} + +TEST(Table, RehashZeroForcesRehash) { + IntTable t; + t.emplace(0); + t.emplace(1); + auto* p = &*t.find(0); + t.rehash(0); + EXPECT_NE(p, &*t.find(0)); +} + +TEST(Table, ConstructFromInitList) { + using P = std::pair<std::string, std::string>; + struct Q { + operator P() const { return {}; } + }; + StringTable t = {P(), Q(), {}, {{}, {}}}; +} + +TEST(Table, CopyConstruct) { + IntTable t; + t.max_load_factor(.321f); + t.emplace(0); + EXPECT_EQ(1, t.size()); + { + IntTable u(t); + EXPECT_EQ(1, u.size()); + EXPECT_EQ(t.max_load_factor(), u.max_load_factor()); + EXPECT_THAT(*u.find(0), 0); + } + { + IntTable u{t}; + EXPECT_EQ(1, u.size()); + EXPECT_EQ(t.max_load_factor(), u.max_load_factor()); + EXPECT_THAT(*u.find(0), 0); + } + { + IntTable u = t; + EXPECT_EQ(1, u.size()); + EXPECT_EQ(t.max_load_factor(), u.max_load_factor()); + EXPECT_THAT(*u.find(0), 0); + } +} + +TEST(Table, CopyConstructWithAlloc) { + StringTable t; + t.max_load_factor(.321f); + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + StringTable u(t, Alloc<std::pair<std::string, std::string>>()); + EXPECT_EQ(1, u.size()); + EXPECT_EQ(t.max_load_factor(), u.max_load_factor()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); +} + +struct ExplicitAllocIntTable + : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>, + std::equal_to<int64_t>, Alloc<int64_t>> { + ExplicitAllocIntTable() {} +}; + +TEST(Table, AllocWithExplicitCtor) { + ExplicitAllocIntTable t; + EXPECT_EQ(0, t.size()); +} + +TEST(Table, MoveConstruct) { + { + StringTable t; + t.max_load_factor(.321f); + const float lf = t.max_load_factor(); + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + + StringTable u(std::move(t)); + EXPECT_EQ(1, u.size()); + EXPECT_EQ(lf, u.max_load_factor()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); + } + { + StringTable t; + t.max_load_factor(.321f); + const float lf = t.max_load_factor(); + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + + StringTable u{std::move(t)}; + EXPECT_EQ(1, u.size()); + EXPECT_EQ(lf, u.max_load_factor()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); + } + { + StringTable t; + t.max_load_factor(.321f); + const float lf = t.max_load_factor(); + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + + StringTable u = std::move(t); + EXPECT_EQ(1, u.size()); + EXPECT_EQ(lf, u.max_load_factor()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); + } +} + +TEST(Table, MoveConstructWithAlloc) { + StringTable t; + t.max_load_factor(.321f); + const float lf = t.max_load_factor(); + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + StringTable u(std::move(t), Alloc<std::pair<std::string, std::string>>()); + EXPECT_EQ(1, u.size()); + EXPECT_EQ(lf, u.max_load_factor()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); +} + +TEST(Table, CopyAssign) { + StringTable t; + t.max_load_factor(.321f); + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + StringTable u; + u = t; + EXPECT_EQ(1, u.size()); + EXPECT_EQ(t.max_load_factor(), u.max_load_factor()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); +} + +TEST(Table, CopySelfAssign) { + StringTable t; + t.max_load_factor(.321f); + const float lf = t.max_load_factor(); + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + t = *&t; + EXPECT_EQ(1, t.size()); + EXPECT_EQ(lf, t.max_load_factor()); + EXPECT_THAT(*t.find("a"), Pair("a", "b")); +} + +TEST(Table, MoveAssign) { + StringTable t; + t.max_load_factor(.321f); + const float lf = t.max_load_factor(); + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + StringTable u; + u = std::move(t); + EXPECT_EQ(1, u.size()); + EXPECT_EQ(lf, u.max_load_factor()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); +} + +TEST(Table, Equality) { + StringTable t; + std::vector<std::pair<std::string, std::string>> v = {{"a", "b"}, {"aa", "bb"}}; + t.insert(std::begin(v), std::end(v)); + StringTable u = t; + EXPECT_EQ(u, t); +} + +TEST(Table, Equality2) { + StringTable t; + std::vector<std::pair<std::string, std::string>> v1 = {{"a", "b"}, {"aa", "bb"}}; + t.insert(std::begin(v1), std::end(v1)); + StringTable u; + std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"}, {"aa", "aa"}}; + u.insert(std::begin(v2), std::end(v2)); + EXPECT_NE(u, t); +} + +TEST(Table, Equality3) { + StringTable t; + std::vector<std::pair<std::string, std::string>> v1 = {{"b", "b"}, {"bb", "bb"}}; + t.insert(std::begin(v1), std::end(v1)); + StringTable u; + std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"}, {"aa", "aa"}}; + u.insert(std::begin(v2), std::end(v2)); + EXPECT_NE(u, t); +} + +TEST(Table, NumDeletedRegression) { + IntTable t; + t.emplace(0); + t.erase(t.find(0)); + // construct over a deleted slot. + t.emplace(0); + t.clear(); +} + +TEST(Table, FindFullDeletedRegression) { + IntTable t; + for (int i = 0; i < 1000; ++i) { + t.emplace(i); + t.erase(t.find(i)); + } + EXPECT_EQ(0, t.size()); +} + +TEST(Table, ReplacingDeletedSlotDoesNotRehash) { + size_t n; + { + // Compute n such that n is the maximum number of elements before rehash. + IntTable t; + t.emplace(0); + size_t c = t.bucket_count(); + for (n = 1; c == t.bucket_count(); ++n) t.emplace(n); + --n; + } + IntTable t; + t.rehash(n); + const size_t c = t.bucket_count(); + for (size_t i = 0; i != n; ++i) t.emplace(i); + EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n; + t.erase(0); + t.emplace(0); + EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n; +} + +TEST(Table, NoThrowMoveConstruct) { + ASSERT_TRUE( + std::is_nothrow_copy_constructible<absl::Hash<absl::string_view>>::value); + ASSERT_TRUE(std::is_nothrow_copy_constructible< + std::equal_to<absl::string_view>>::value); + ASSERT_TRUE(std::is_nothrow_copy_constructible<std::allocator<int>>::value); + EXPECT_TRUE(std::is_nothrow_move_constructible<StringTable>::value); +} + +TEST(Table, NoThrowMoveAssign) { + ASSERT_TRUE( + std::is_nothrow_move_assignable<absl::Hash<absl::string_view>>::value); + ASSERT_TRUE( + std::is_nothrow_move_assignable<std::equal_to<absl::string_view>>::value); + ASSERT_TRUE(std::is_nothrow_move_assignable<std::allocator<int>>::value); + ASSERT_TRUE( + absl::allocator_traits<std::allocator<int>>::is_always_equal::value); + EXPECT_TRUE(std::is_nothrow_move_assignable<StringTable>::value); +} + +TEST(Table, NoThrowSwappable) { + ASSERT_TRUE( + container_internal::IsNoThrowSwappable<absl::Hash<absl::string_view>>()); + ASSERT_TRUE(container_internal::IsNoThrowSwappable< + std::equal_to<absl::string_view>>()); + ASSERT_TRUE(container_internal::IsNoThrowSwappable<std::allocator<int>>()); + EXPECT_TRUE(container_internal::IsNoThrowSwappable<StringTable>()); +} + +TEST(Table, HeterogeneousLookup) { + struct Hash { + size_t operator()(int64_t i) const { return i; } + size_t operator()(double i) const { + ADD_FAILURE(); + return i; + } + }; + struct Eq { + bool operator()(int64_t a, int64_t b) const { return a == b; } + bool operator()(double a, int64_t b) const { + ADD_FAILURE(); + return a == b; + } + bool operator()(int64_t a, double b) const { + ADD_FAILURE(); + return a == b; + } + bool operator()(double a, double b) const { + ADD_FAILURE(); + return a == b; + } + }; + + struct THash { + using is_transparent = void; + size_t operator()(int64_t i) const { return i; } + size_t operator()(double i) const { return i; } + }; + struct TEq { + using is_transparent = void; + bool operator()(int64_t a, int64_t b) const { return a == b; } + bool operator()(double a, int64_t b) const { return a == b; } + bool operator()(int64_t a, double b) const { return a == b; } + bool operator()(double a, double b) const { return a == b; } + }; + + raw_hash_set<IntPolicy, Hash, Eq, Alloc<int64_t>> s{0, 1, 2}; + // It will convert to int64_t before the query. + EXPECT_EQ(1, *s.find(double{1.1})); + + raw_hash_set<IntPolicy, THash, TEq, Alloc<int64_t>> ts{0, 1, 2}; + // It will try to use the double, and fail to find the object. + EXPECT_TRUE(ts.find(1.1) == ts.end()); +} + +template <class Table> +using CallFind = decltype(std::declval<Table&>().find(17)); + +template <class Table> +using CallErase = decltype(std::declval<Table&>().erase(17)); + +template <class Table> +using CallExtract = decltype(std::declval<Table&>().extract(17)); + +template <class Table> +using CallPrefetch = decltype(std::declval<Table&>().prefetch(17)); + +template <class Table> +using CallCount = decltype(std::declval<Table&>().count(17)); + +template <template <typename> class C, class Table, class = void> +struct VerifyResultOf : std::false_type {}; + +template <template <typename> class C, class Table> +struct VerifyResultOf<C, Table, absl::void_t<C<Table>>> : std::true_type {}; + +TEST(Table, HeterogeneousLookupOverloads) { + using NonTransparentTable = + raw_hash_set<StringPolicy, absl::Hash<absl::string_view>, + std::equal_to<absl::string_view>, std::allocator<int>>; + + EXPECT_FALSE((VerifyResultOf<CallFind, NonTransparentTable>())); + EXPECT_FALSE((VerifyResultOf<CallErase, NonTransparentTable>())); + EXPECT_FALSE((VerifyResultOf<CallExtract, NonTransparentTable>())); + EXPECT_FALSE((VerifyResultOf<CallPrefetch, NonTransparentTable>())); + EXPECT_FALSE((VerifyResultOf<CallCount, NonTransparentTable>())); + + using TransparentTable = raw_hash_set< + StringPolicy, + absl::container_internal::hash_default_hash<absl::string_view>, + absl::container_internal::hash_default_eq<absl::string_view>, + std::allocator<int>>; + + EXPECT_TRUE((VerifyResultOf<CallFind, TransparentTable>())); + EXPECT_TRUE((VerifyResultOf<CallErase, TransparentTable>())); + EXPECT_TRUE((VerifyResultOf<CallExtract, TransparentTable>())); + EXPECT_TRUE((VerifyResultOf<CallPrefetch, TransparentTable>())); + EXPECT_TRUE((VerifyResultOf<CallCount, TransparentTable>())); +} + +// TODO(alkis): Expand iterator tests. +TEST(Iterator, IsDefaultConstructible) { + StringTable::iterator i; + EXPECT_TRUE(i == StringTable::iterator()); +} + +TEST(ConstIterator, IsDefaultConstructible) { + StringTable::const_iterator i; + EXPECT_TRUE(i == StringTable::const_iterator()); +} + +TEST(Iterator, ConvertsToConstIterator) { + StringTable::iterator i; + EXPECT_TRUE(i == StringTable::const_iterator()); +} + +TEST(Iterator, Iterates) { + IntTable t; + for (size_t i = 3; i != 6; ++i) EXPECT_TRUE(t.emplace(i).second); + EXPECT_THAT(t, UnorderedElementsAre(3, 4, 5)); +} + +TEST(Table, Merge) { + StringTable t1, t2; + t1.emplace("0", "-0"); + t1.emplace("1", "-1"); + t2.emplace("0", "~0"); + t2.emplace("2", "~2"); + + EXPECT_THAT(t1, UnorderedElementsAre(Pair("0", "-0"), Pair("1", "-1"))); + EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0"), Pair("2", "~2"))); + + t1.merge(t2); + EXPECT_THAT(t1, UnorderedElementsAre(Pair("0", "-0"), Pair("1", "-1"), + Pair("2", "~2"))); + EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0"))); +} + +TEST(Nodes, EmptyNodeType) { + using node_type = StringTable::node_type; + node_type n; + EXPECT_FALSE(n); + EXPECT_TRUE(n.empty()); + + EXPECT_TRUE((std::is_same<node_type::allocator_type, + StringTable::allocator_type>::value)); +} + +TEST(Nodes, ExtractInsert) { + constexpr char k0[] = "Very long std::string zero."; + constexpr char k1[] = "Very long std::string one."; + constexpr char k2[] = "Very long std::string two."; + StringTable t = {{k0, ""}, {k1, ""}, {k2, ""}}; + EXPECT_THAT(t, + UnorderedElementsAre(Pair(k0, ""), Pair(k1, ""), Pair(k2, ""))); + + auto node = t.extract(k0); + EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, ""))); + EXPECT_TRUE(node); + EXPECT_FALSE(node.empty()); + + StringTable t2; + auto res = t2.insert(std::move(node)); + EXPECT_TRUE(res.inserted); + EXPECT_THAT(*res.position, Pair(k0, "")); + EXPECT_FALSE(res.node); + EXPECT_THAT(t2, UnorderedElementsAre(Pair(k0, ""))); + + // Not there. + EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, ""))); + node = t.extract("Not there!"); + EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, ""))); + EXPECT_FALSE(node); + + // Inserting nothing. + res = t2.insert(std::move(node)); + EXPECT_FALSE(res.inserted); + EXPECT_EQ(res.position, t2.end()); + EXPECT_FALSE(res.node); + EXPECT_THAT(t2, UnorderedElementsAre(Pair(k0, ""))); + + t.emplace(k0, "1"); + node = t.extract(k0); + + // Insert duplicate. + res = t2.insert(std::move(node)); + EXPECT_FALSE(res.inserted); + EXPECT_THAT(*res.position, Pair(k0, "")); + EXPECT_TRUE(res.node); + EXPECT_FALSE(node); +} + +StringTable MakeSimpleTable(size_t size) { + StringTable t; + for (size_t i = 0; i < size; ++i) t.emplace(std::string(1, 'A' + i), ""); + return t; +} + +std::string OrderOfIteration(const StringTable& t) { + std::string order; + for (auto& p : t) order += p.first; + return order; +} + +TEST(Table, IterationOrderChangesByInstance) { + // Needs to be more than kWidth elements to be able to affect order. + const StringTable reference = MakeSimpleTable(20); + + // Since order is non-deterministic we can't just try once and verify. + // We'll try until we find that order changed. It should not take many tries + // for that. + // Important: we have to keep the old tables around. Otherwise tcmalloc will + // just give us the same blocks and we would be doing the same order again. + std::vector<StringTable> garbage; + for (int i = 0; i < 10; ++i) { + auto trial = MakeSimpleTable(20); + if (OrderOfIteration(trial) != OrderOfIteration(reference)) { + // We are done. + return; + } + garbage.push_back(std::move(trial)); + } + FAIL(); +} + +TEST(Table, IterationOrderChangesOnRehash) { + // Since order is non-deterministic we can't just try once and verify. + // We'll try until we find that order changed. It should not take many tries + // for that. + // Important: we have to keep the old tables around. Otherwise tcmalloc will + // just give us the same blocks and we would be doing the same order again. + std::vector<StringTable> garbage; + for (int i = 0; i < 10; ++i) { + // Needs to be more than kWidth elements to be able to affect order. + StringTable t = MakeSimpleTable(20); + const std::string reference = OrderOfIteration(t); + // Force rehash to the same size. + t.rehash(0); + std::string trial = OrderOfIteration(t); + if (trial != reference) { + // We are done. + return; + } + garbage.push_back(std::move(t)); + } + FAIL(); +} + +TEST(Table, IterationOrderChangesForSmallTables) { + // Since order is non-deterministic we can't just try once and verify. + // We'll try until we find that order changed. + // Important: we have to keep the old tables around. Otherwise tcmalloc will + // just give us the same blocks and we would be doing the same order again. + StringTable reference_table = MakeSimpleTable(5); + const std::string reference = OrderOfIteration(reference_table); + std::vector<StringTable> garbage; + for (int i = 0; i < 50; ++i) { + StringTable t = MakeSimpleTable(5); + std::string trial = OrderOfIteration(t); + if (trial != reference) { + // We are done. + return; + } + garbage.push_back(std::move(t)); + } + FAIL() << "Iteration order remained the same across many attempts."; +} + +// Fill the table to 3 different load factors (min, median, max) and evaluate +// the percentage of perfect hits using the debug API. +template <class Table, class AddFn> +std::vector<double> CollectPerfectRatios(Table t, AddFn add) { + using Key = typename Table::key_type; + + // First, fill enough to have a good distribution. + constexpr size_t kMinSize = 10000; + std::vector<Key> keys; + while (t.size() < kMinSize) keys.push_back(add(t)); + // Then, insert until we reach min load factor. + double lf = t.load_factor(); + while (lf <= t.load_factor()) keys.push_back(add(t)); + + // We are now at min load factor. Take a snapshot. + size_t perfect = 0; + auto update_perfect = [&](Key k) { + perfect += GetHashtableDebugNumProbes(t, k) == 0; + }; + for (const auto& k : keys) update_perfect(k); + + std::vector<double> perfect_ratios; + // Keep going until we hit max load factor. + while (t.load_factor() < .6) { + perfect_ratios.push_back(1.0 * perfect / t.size()); + update_perfect(add(t)); + } + while (t.load_factor() > .5) { + perfect_ratios.push_back(1.0 * perfect / t.size()); + update_perfect(add(t)); + } + return perfect_ratios; +} + +std::vector<std::pair<double, double>> StringTablePefectRatios() { + constexpr bool kRandomizesInserts = +#if NDEBUG + false; +#else // NDEBUG + true; +#endif // NDEBUG + + // The effective load factor is larger in non-opt mode because we insert + // elements out of order. + switch (container_internal::Group::kWidth) { + case 8: + if (kRandomizesInserts) { + return {{0.986, 0.02}, {0.95, 0.02}, {0.89, 0.02}}; + } else { + return {{0.995, 0.01}, {0.97, 0.01}, {0.89, 0.02}}; + } + break; + case 16: + if (kRandomizesInserts) { + return {{0.973, 0.01}, {0.965, 0.01}, {0.92, 0.02}}; + } else { + return {{0.995, 0.005}, {0.99, 0.005}, {0.94, 0.01}}; + } + break; + default: + // Ignore anything else. + return {}; + } +} + +// This is almost a change detector, but it allows us to know how we are +// affecting the probe distribution. +TEST(Table, EffectiveLoadFactorStrings) { + std::vector<double> perfect_ratios = + CollectPerfectRatios(StringTable(), [](StringTable& t) { + return t.emplace(std::to_string(t.size()), "").first->first; + }); + + auto ratios = StringTablePefectRatios(); + if (ratios.empty()) return; + + EXPECT_THAT(perfect_ratios.front(), + DoubleNear(ratios[0].first, ratios[0].second)); + EXPECT_THAT(perfect_ratios[perfect_ratios.size() / 2], + DoubleNear(ratios[1].first, ratios[1].second)); + EXPECT_THAT(perfect_ratios.back(), + DoubleNear(ratios[2].first, ratios[2].second)); +} + +std::vector<std::pair<double, double>> IntTablePefectRatios() { + constexpr bool kRandomizesInserts = +#ifdef NDEBUG + false; +#else // NDEBUG + true; +#endif // NDEBUG + + // The effective load factor is larger in non-opt mode because we insert + // elements out of order. + switch (container_internal::Group::kWidth) { + case 8: + if (kRandomizesInserts) { + return {{0.99, 0.02}, {0.985, 0.02}, {0.95, 0.05}}; + } else { + return {{0.99, 0.01}, {0.99, 0.01}, {0.95, 0.02}}; + } + break; + case 16: + if (kRandomizesInserts) { + return {{0.98, 0.02}, {0.978, 0.02}, {0.96, 0.02}}; + } else { + return {{0.998, 0.003}, {0.995, 0.01}, {0.975, 0.02}}; + } + break; + default: + // Ignore anything else. + return {}; + } +} + +// This is almost a change detector, but it allows us to know how we are +// affecting the probe distribution. +TEST(Table, EffectiveLoadFactorInts) { + std::vector<double> perfect_ratios = CollectPerfectRatios( + IntTable(), [](IntTable& t) { return *t.emplace(t.size()).first; }); + + auto ratios = IntTablePefectRatios(); + if (ratios.empty()) return; + + EXPECT_THAT(perfect_ratios.front(), + DoubleNear(ratios[0].first, ratios[0].second)); + EXPECT_THAT(perfect_ratios[perfect_ratios.size() / 2], + DoubleNear(ratios[1].first, ratios[1].second)); + EXPECT_THAT(perfect_ratios.back(), + DoubleNear(ratios[2].first, ratios[2].second)); +} + +// Confirm that we assert if we try to erase() end(). +TEST(Table, EraseOfEndAsserts) { + // Use an assert with side-effects to figure out if they are actually enabled. + bool assert_enabled = false; + assert([&]() { + assert_enabled = true; + return true; + }()); + if (!assert_enabled) return; + + IntTable t; + // Extra simple "regexp" as regexp support is highly varied across platforms. + constexpr char kDeathMsg[] = "it != end"; + EXPECT_DEATH(t.erase(t.end()), kDeathMsg); +} + +#ifdef ADDRESS_SANITIZER +TEST(Sanitizer, PoisoningUnused) { + IntTable t; + // Insert something to force an allocation. + int64_t& v1 = *t.insert(0).first; + + // Make sure there is something to test. + ASSERT_GT(t.capacity(), 1); + + int64_t* slots = RawHashSetTestOnlyAccess::GetSlots(t); + for (size_t i = 0; i < t.capacity(); ++i) { + EXPECT_EQ(slots + i != &v1, __asan_address_is_poisoned(slots + i)); + } +} + +TEST(Sanitizer, PoisoningOnErase) { + IntTable t; + int64_t& v = *t.insert(0).first; + + EXPECT_FALSE(__asan_address_is_poisoned(&v)); + t.erase(0); + EXPECT_TRUE(__asan_address_is_poisoned(&v)); +} +#endif // ADDRESS_SANITIZER + +} // namespace +} // namespace container_internal +} // namespace absl diff --git a/absl/container/internal/tracked.h b/absl/container/internal/tracked.h new file mode 100644 index 00000000..7d14af03 --- /dev/null +++ b/absl/container/internal/tracked.h @@ -0,0 +1,78 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_TRACKED_H_ +#define ABSL_CONTAINER_INTERNAL_TRACKED_H_ + +#include <stddef.h> +#include <memory> +#include <utility> + +namespace absl { +namespace container_internal { + +// A class that tracks its copies and moves so that it can be queried in tests. +template <class T> +class Tracked { + public: + Tracked() {} + // NOLINTNEXTLINE(runtime/explicit) + Tracked(const T& val) : val_(val) {} + Tracked(const Tracked& that) + : val_(that.val_), + num_moves_(that.num_moves_), + num_copies_(that.num_copies_) { + ++(*num_copies_); + } + Tracked(Tracked&& that) + : val_(std::move(that.val_)), + num_moves_(std::move(that.num_moves_)), + num_copies_(std::move(that.num_copies_)) { + ++(*num_moves_); + } + Tracked& operator=(const Tracked& that) { + val_ = that.val_; + num_moves_ = that.num_moves_; + num_copies_ = that.num_copies_; + ++(*num_copies_); + } + Tracked& operator=(Tracked&& that) { + val_ = std::move(that.val_); + num_moves_ = std::move(that.num_moves_); + num_copies_ = std::move(that.num_copies_); + ++(*num_moves_); + } + + const T& val() const { return val_; } + + friend bool operator==(const Tracked& a, const Tracked& b) { + return a.val_ == b.val_; + } + friend bool operator!=(const Tracked& a, const Tracked& b) { + return !(a == b); + } + + size_t num_copies() { return *num_copies_; } + size_t num_moves() { return *num_moves_; } + + private: + T val_; + std::shared_ptr<size_t> num_moves_ = std::make_shared<size_t>(0); + std::shared_ptr<size_t> num_copies_ = std::make_shared<size_t>(0); +}; + +} // namespace container_internal +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_TRACKED_H_ diff --git a/absl/container/internal/unordered_map_constructor_test.h b/absl/container/internal/unordered_map_constructor_test.h new file mode 100644 index 00000000..2ffb646c --- /dev/null +++ b/absl/container/internal/unordered_map_constructor_test.h @@ -0,0 +1,404 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_ +#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_ + +#include <algorithm> +#include <vector> + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/container/internal/hash_generator_testing.h" +#include "absl/container/internal/hash_policy_testing.h" + +namespace absl { +namespace container_internal { + +template <class UnordMap> +class ConstructorTest : public ::testing::Test {}; + +TYPED_TEST_CASE_P(ConstructorTest); + +TYPED_TEST_P(ConstructorTest, NoArgs) { + TypeParam m; + EXPECT_TRUE(m.empty()); + EXPECT_THAT(m, ::testing::UnorderedElementsAre()); +} + +TYPED_TEST_P(ConstructorTest, BucketCount) { + TypeParam m(123); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(m, ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); +} + +TYPED_TEST_P(ConstructorTest, BucketCountHash) { + using H = typename TypeParam::hasher; + H hasher; + TypeParam m(123, hasher); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(m, ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); +} + +TYPED_TEST_P(ConstructorTest, BucketCountHashEqual) { + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + H hasher; + E equal; + TypeParam m(123, hasher, equal); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.key_eq(), equal); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(m, ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); +} + +TYPED_TEST_P(ConstructorTest, BucketCountHashEqualAlloc) { + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(123, hasher, equal, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.key_eq(), equal); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(m, ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); +} + +TYPED_TEST_P(ConstructorTest, BucketCountAlloc) { +#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17) + using A = typename TypeParam::allocator_type; + A alloc(0); + TypeParam m(123, alloc); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(m, ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); +#endif +} + +TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) { +#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17) + using H = typename TypeParam::hasher; + using A = typename TypeParam::allocator_type; + H hasher; + A alloc(0); + TypeParam m(123, hasher, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(m, ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); +#endif +} + +TYPED_TEST_P(ConstructorTest, BucketAlloc) { +#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS + using A = typename TypeParam::allocator_type; + A alloc(0); + TypeParam m(alloc); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(m, ::testing::UnorderedElementsAre()); +#endif +} + +TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) { + using T = hash_internal::GeneratedType<TypeParam>; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + std::vector<T> values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator<T>()); + TypeParam m(values.begin(), values.end(), 123, hasher, equal, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.key_eq(), equal); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); +} + +TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) { +#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17) + using T = hash_internal::GeneratedType<TypeParam>; + using A = typename TypeParam::allocator_type; + A alloc(0); + std::vector<T> values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator<T>()); + TypeParam m(values.begin(), values.end(), 123, alloc); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); +#endif +} + +TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) { +#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17) + using T = hash_internal::GeneratedType<TypeParam>; + using H = typename TypeParam::hasher; + using A = typename TypeParam::allocator_type; + H hasher; + A alloc(0); + std::vector<T> values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator<T>()); + TypeParam m(values.begin(), values.end(), 123, hasher, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); +#endif +} + +TYPED_TEST_P(ConstructorTest, CopyConstructor) { + using T = hash_internal::GeneratedType<TypeParam>; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(123, hasher, equal, alloc); + for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()()); + TypeParam n(m); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_EQ(m.get_allocator(), n.get_allocator()); + EXPECT_EQ(m, n); +} + +TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) { +#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS + using T = hash_internal::GeneratedType<TypeParam>; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(123, hasher, equal, alloc); + for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()()); + TypeParam n(m, A(11)); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_NE(m.get_allocator(), n.get_allocator()); + EXPECT_EQ(m, n); +#endif +} + +// TODO(alkis): Test non-propagating allocators on copy constructors. + +TYPED_TEST_P(ConstructorTest, MoveConstructor) { + using T = hash_internal::GeneratedType<TypeParam>; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(123, hasher, equal, alloc); + for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()()); + TypeParam t(m); + TypeParam n(std::move(t)); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_EQ(m.get_allocator(), n.get_allocator()); + EXPECT_EQ(m, n); +} + +TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) { +#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS + using T = hash_internal::GeneratedType<TypeParam>; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(123, hasher, equal, alloc); + for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()()); + TypeParam t(m); + TypeParam n(std::move(t), A(1)); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_NE(m.get_allocator(), n.get_allocator()); + EXPECT_EQ(m, n); +#endif +} + +// TODO(alkis): Test non-propagating allocators on move constructors. + +TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) { + using T = hash_internal::GeneratedType<TypeParam>; + hash_internal::Generator<T> gen; + std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()}; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(values, 123, hasher, equal, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.key_eq(), equal); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); +} + +TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) { +#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17) + using T = hash_internal::GeneratedType<TypeParam>; + using A = typename TypeParam::allocator_type; + hash_internal::Generator<T> gen; + std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()}; + A alloc(0); + TypeParam m(values, 123, alloc); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); +#endif +} + +TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) { +#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17) + using T = hash_internal::GeneratedType<TypeParam>; + using H = typename TypeParam::hasher; + using A = typename TypeParam::allocator_type; + H hasher; + A alloc(0); + hash_internal::Generator<T> gen; + std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()}; + TypeParam m(values, 123, hasher, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); +#endif +} + +TYPED_TEST_P(ConstructorTest, Assignment) { + using T = hash_internal::GeneratedType<TypeParam>; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + hash_internal::Generator<T> gen; + TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc); + TypeParam n; + n = m; + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_EQ(m, n); +} + +// TODO(alkis): Test [non-]propagating allocators on move/copy assignments +// (it depends on traits). + +TYPED_TEST_P(ConstructorTest, MoveAssignment) { + using T = hash_internal::GeneratedType<TypeParam>; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + hash_internal::Generator<T> gen; + TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc); + TypeParam t(m); + TypeParam n; + n = std::move(t); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_EQ(m, n); +} + +TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) { + using T = hash_internal::GeneratedType<TypeParam>; + hash_internal::Generator<T> gen; + std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()}; + TypeParam m; + m = values; + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); +} + +TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) { + using T = hash_internal::GeneratedType<TypeParam>; + hash_internal::Generator<T> gen; + TypeParam m({gen(), gen(), gen()}); + TypeParam n({gen()}); + n = m; + EXPECT_EQ(m, n); +} + +TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) { + using T = hash_internal::GeneratedType<TypeParam>; + hash_internal::Generator<T> gen; + TypeParam m({gen(), gen(), gen()}); + TypeParam t(m); + TypeParam n({gen()}); + n = std::move(t); + EXPECT_EQ(m, n); +} + +TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) { + using T = hash_internal::GeneratedType<TypeParam>; + hash_internal::Generator<T> gen; + std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()}; + TypeParam m; + m = values; + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); +} + +TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) { + using T = hash_internal::GeneratedType<TypeParam>; + hash_internal::Generator<T> gen; + std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()}; + TypeParam m(values); + m = *&m; // Avoid -Wself-assign + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); +} + +// We cannot test self move as standard states that it leaves standard +// containers in unspecified state (and in practice in causes memory-leak +// according to heap-checker!). + +REGISTER_TYPED_TEST_CASE_P( + ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual, + BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, + BucketAlloc, InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc, + InputIteratorBucketHashAlloc, CopyConstructor, CopyConstructorAlloc, + MoveConstructor, MoveConstructorAlloc, InitializerListBucketHashEqualAlloc, + InitializerListBucketAlloc, InitializerListBucketHashAlloc, Assignment, + MoveAssignment, AssignmentFromInitializerList, + AssignmentOverwritesExisting, MoveAssignmentOverwritesExisting, + AssignmentFromInitializerListOverwritesExisting, AssignmentOnSelf); + +} // namespace container_internal +} // namespace absl +#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_ diff --git a/absl/container/internal/unordered_map_lookup_test.h b/absl/container/internal/unordered_map_lookup_test.h new file mode 100644 index 00000000..1f1b6b48 --- /dev/null +++ b/absl/container/internal/unordered_map_lookup_test.h @@ -0,0 +1,114 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_ +#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_ + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/container/internal/hash_generator_testing.h" +#include "absl/container/internal/hash_policy_testing.h" + +namespace absl { +namespace container_internal { + +template <class UnordMap> +class LookupTest : public ::testing::Test {}; + +TYPED_TEST_CASE_P(LookupTest); + +TYPED_TEST_P(LookupTest, At) { + using T = hash_internal::GeneratedType<TypeParam>; + std::vector<T> values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator<T>()); + TypeParam m(values.begin(), values.end()); + for (const auto& p : values) { + const auto& val = m.at(p.first); + EXPECT_EQ(p.second, val) << ::testing::PrintToString(p.first); + } +} + +TYPED_TEST_P(LookupTest, OperatorBracket) { + using T = hash_internal::GeneratedType<TypeParam>; + using V = typename TypeParam::mapped_type; + std::vector<T> values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator<T>()); + TypeParam m; + for (const auto& p : values) { + auto& val = m[p.first]; + EXPECT_EQ(V(), val) << ::testing::PrintToString(p.first); + val = p.second; + } + for (const auto& p : values) + EXPECT_EQ(p.second, m[p.first]) << ::testing::PrintToString(p.first); +} + +TYPED_TEST_P(LookupTest, Count) { + using T = hash_internal::GeneratedType<TypeParam>; + std::vector<T> values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator<T>()); + TypeParam m; + for (const auto& p : values) + EXPECT_EQ(0, m.count(p.first)) << ::testing::PrintToString(p.first); + m.insert(values.begin(), values.end()); + for (const auto& p : values) + EXPECT_EQ(1, m.count(p.first)) << ::testing::PrintToString(p.first); +} + +TYPED_TEST_P(LookupTest, Find) { + using std::get; + using T = hash_internal::GeneratedType<TypeParam>; + std::vector<T> values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator<T>()); + TypeParam m; + for (const auto& p : values) + EXPECT_TRUE(m.end() == m.find(p.first)) + << ::testing::PrintToString(p.first); + m.insert(values.begin(), values.end()); + for (const auto& p : values) { + auto it = m.find(p.first); + EXPECT_TRUE(m.end() != it) << ::testing::PrintToString(p.first); + EXPECT_EQ(p.second, get<1>(*it)) << ::testing::PrintToString(p.first); + } +} + +TYPED_TEST_P(LookupTest, EqualRange) { + using std::get; + using T = hash_internal::GeneratedType<TypeParam>; + std::vector<T> values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator<T>()); + TypeParam m; + for (const auto& p : values) { + auto r = m.equal_range(p.first); + ASSERT_EQ(0, std::distance(r.first, r.second)); + } + m.insert(values.begin(), values.end()); + for (const auto& p : values) { + auto r = m.equal_range(p.first); + ASSERT_EQ(1, std::distance(r.first, r.second)); + EXPECT_EQ(p.second, get<1>(*r.first)) << ::testing::PrintToString(p.first); + } +} + +REGISTER_TYPED_TEST_CASE_P(LookupTest, At, OperatorBracket, Count, Find, + EqualRange); + +} // namespace container_internal +} // namespace absl +#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_ diff --git a/absl/container/internal/unordered_map_modifiers_test.h b/absl/container/internal/unordered_map_modifiers_test.h new file mode 100644 index 00000000..b6c633ae --- /dev/null +++ b/absl/container/internal/unordered_map_modifiers_test.h @@ -0,0 +1,272 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_ +#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_ + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/container/internal/hash_generator_testing.h" +#include "absl/container/internal/hash_policy_testing.h" + +namespace absl { +namespace container_internal { + +template <class UnordMap> +class ModifiersTest : public ::testing::Test {}; + +TYPED_TEST_CASE_P(ModifiersTest); + +TYPED_TEST_P(ModifiersTest, Clear) { + using T = hash_internal::GeneratedType<TypeParam>; + std::vector<T> values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator<T>()); + TypeParam m(values.begin(), values.end()); + ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + m.clear(); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAre()); + EXPECT_TRUE(m.empty()); +} + +TYPED_TEST_P(ModifiersTest, Insert) { + using T = hash_internal::GeneratedType<TypeParam>; + using V = typename TypeParam::mapped_type; + T val = hash_internal::Generator<T>()(); + TypeParam m; + auto p = m.insert(val); + EXPECT_TRUE(p.second); + EXPECT_EQ(val, *p.first); + T val2 = {val.first, hash_internal::Generator<V>()()}; + p = m.insert(val2); + EXPECT_FALSE(p.second); + EXPECT_EQ(val, *p.first); +} + +TYPED_TEST_P(ModifiersTest, InsertHint) { + using T = hash_internal::GeneratedType<TypeParam>; + using V = typename TypeParam::mapped_type; + T val = hash_internal::Generator<T>()(); + TypeParam m; + auto it = m.insert(m.end(), val); + EXPECT_TRUE(it != m.end()); + EXPECT_EQ(val, *it); + T val2 = {val.first, hash_internal::Generator<V>()()}; + it = m.insert(it, val2); + EXPECT_TRUE(it != m.end()); + EXPECT_EQ(val, *it); +} + +TYPED_TEST_P(ModifiersTest, InsertRange) { + using T = hash_internal::GeneratedType<TypeParam>; + std::vector<T> values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator<T>()); + TypeParam m; + m.insert(values.begin(), values.end()); + ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); +} + +TYPED_TEST_P(ModifiersTest, InsertOrAssign) { +#ifdef UNORDERED_MAP_CXX17 + using std::get; + using K = typename TypeParam::key_type; + using V = typename TypeParam::mapped_type; + K k = hash_internal::Generator<K>()(); + V val = hash_internal::Generator<V>()(); + TypeParam m; + auto p = m.insert_or_assign(k, val); + EXPECT_TRUE(p.second); + EXPECT_EQ(k, get<0>(*p.first)); + EXPECT_EQ(val, get<1>(*p.first)); + V val2 = hash_internal::Generator<V>()(); + p = m.insert_or_assign(k, val2); + EXPECT_FALSE(p.second); + EXPECT_EQ(k, get<0>(*p.first)); + EXPECT_EQ(val2, get<1>(*p.first)); +#endif +} + +TYPED_TEST_P(ModifiersTest, InsertOrAssignHint) { +#ifdef UNORDERED_MAP_CXX17 + using std::get; + using K = typename TypeParam::key_type; + using V = typename TypeParam::mapped_type; + K k = hash_internal::Generator<K>()(); + V val = hash_internal::Generator<V>()(); + TypeParam m; + auto it = m.insert_or_assign(m.end(), k, val); + EXPECT_TRUE(it != m.end()); + EXPECT_EQ(k, get<0>(*it)); + EXPECT_EQ(val, get<1>(*it)); + V val2 = hash_internal::Generator<V>()(); + it = m.insert_or_assign(it, k, val2); + EXPECT_EQ(k, get<0>(*it)); + EXPECT_EQ(val2, get<1>(*it)); +#endif +} + +TYPED_TEST_P(ModifiersTest, Emplace) { + using T = hash_internal::GeneratedType<TypeParam>; + using V = typename TypeParam::mapped_type; + T val = hash_internal::Generator<T>()(); + TypeParam m; + // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps + // with test traits/policy. + auto p = m.emplace(val); + EXPECT_TRUE(p.second); + EXPECT_EQ(val, *p.first); + T val2 = {val.first, hash_internal::Generator<V>()()}; + p = m.emplace(val2); + EXPECT_FALSE(p.second); + EXPECT_EQ(val, *p.first); +} + +TYPED_TEST_P(ModifiersTest, EmplaceHint) { + using T = hash_internal::GeneratedType<TypeParam>; + using V = typename TypeParam::mapped_type; + T val = hash_internal::Generator<T>()(); + TypeParam m; + // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps + // with test traits/policy. + auto it = m.emplace_hint(m.end(), val); + EXPECT_EQ(val, *it); + T val2 = {val.first, hash_internal::Generator<V>()()}; + it = m.emplace_hint(it, val2); + EXPECT_EQ(val, *it); +} + +TYPED_TEST_P(ModifiersTest, TryEmplace) { +#ifdef UNORDERED_MAP_CXX17 + using T = hash_internal::GeneratedType<TypeParam>; + using V = typename TypeParam::mapped_type; + T val = hash_internal::Generator<T>()(); + TypeParam m; + // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps + // with test traits/policy. + auto p = m.try_emplace(val.first, val.second); + EXPECT_TRUE(p.second); + EXPECT_EQ(val, *p.first); + T val2 = {val.first, hash_internal::Generator<V>()()}; + p = m.try_emplace(val2.first, val2.second); + EXPECT_FALSE(p.second); + EXPECT_EQ(val, *p.first); +#endif +} + +TYPED_TEST_P(ModifiersTest, TryEmplaceHint) { +#ifdef UNORDERED_MAP_CXX17 + using T = hash_internal::GeneratedType<TypeParam>; + using V = typename TypeParam::mapped_type; + T val = hash_internal::Generator<T>()(); + TypeParam m; + // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps + // with test traits/policy. + auto it = m.try_emplace(m.end(), val.first, val.second); + EXPECT_EQ(val, *it); + T val2 = {val.first, hash_internal::Generator<V>()()}; + it = m.try_emplace(it, val2.first, val2.second); + EXPECT_EQ(val, *it); +#endif +} + +template <class V> +using IfNotVoid = typename std::enable_if<!std::is_void<V>::value, V>::type; + +// In openmap we chose not to return the iterator from erase because that's +// more expensive. As such we adapt erase to return an iterator here. +struct EraseFirst { + template <class Map> + auto operator()(Map* m, int) const + -> IfNotVoid<decltype(m->erase(m->begin()))> { + return m->erase(m->begin()); + } + template <class Map> + typename Map::iterator operator()(Map* m, ...) const { + auto it = m->begin(); + m->erase(it++); + return it; + } +}; + +TYPED_TEST_P(ModifiersTest, Erase) { + using T = hash_internal::GeneratedType<TypeParam>; + using std::get; + std::vector<T> values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator<T>()); + TypeParam m(values.begin(), values.end()); + ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + auto& first = *m.begin(); + std::vector<T> values2; + for (const auto& val : values) + if (get<0>(val) != get<0>(first)) values2.push_back(val); + auto it = EraseFirst()(&m, 0); + ASSERT_TRUE(it != m.end()); + EXPECT_EQ(1, std::count(values2.begin(), values2.end(), *it)); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values2.begin(), + values2.end())); +} + +TYPED_TEST_P(ModifiersTest, EraseRange) { + using T = hash_internal::GeneratedType<TypeParam>; + std::vector<T> values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator<T>()); + TypeParam m(values.begin(), values.end()); + ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + auto it = m.erase(m.begin(), m.end()); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAre()); + EXPECT_TRUE(it == m.end()); +} + +TYPED_TEST_P(ModifiersTest, EraseKey) { + using T = hash_internal::GeneratedType<TypeParam>; + std::vector<T> values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator<T>()); + TypeParam m(values.begin(), values.end()); + ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_EQ(1, m.erase(values[0].first)); + EXPECT_EQ(0, std::count(m.begin(), m.end(), values[0])); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values.begin() + 1, + values.end())); +} + +TYPED_TEST_P(ModifiersTest, Swap) { + using T = hash_internal::GeneratedType<TypeParam>; + std::vector<T> v1; + std::vector<T> v2; + std::generate_n(std::back_inserter(v1), 5, hash_internal::Generator<T>()); + std::generate_n(std::back_inserter(v2), 5, hash_internal::Generator<T>()); + TypeParam m1(v1.begin(), v1.end()); + TypeParam m2(v2.begin(), v2.end()); + EXPECT_THAT(items(m1), ::testing::UnorderedElementsAreArray(v1)); + EXPECT_THAT(items(m2), ::testing::UnorderedElementsAreArray(v2)); + m1.swap(m2); + EXPECT_THAT(items(m1), ::testing::UnorderedElementsAreArray(v2)); + EXPECT_THAT(items(m2), ::testing::UnorderedElementsAreArray(v1)); +} + +// TODO(alkis): Write tests for extract. +// TODO(alkis): Write tests for merge. + +REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint, + InsertRange, InsertOrAssign, InsertOrAssignHint, + Emplace, EmplaceHint, TryEmplace, TryEmplaceHint, + Erase, EraseRange, EraseKey, Swap); + +} // namespace container_internal +} // namespace absl +#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_ diff --git a/absl/container/internal/unordered_map_test.cc b/absl/container/internal/unordered_map_test.cc new file mode 100644 index 00000000..40e799cd --- /dev/null +++ b/absl/container/internal/unordered_map_test.cc @@ -0,0 +1,38 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include <unordered_map> + +#include "absl/container/internal/unordered_map_constructor_test.h" +#include "absl/container/internal/unordered_map_lookup_test.h" +#include "absl/container/internal/unordered_map_modifiers_test.h" + +namespace absl { +namespace container_internal { +namespace { + +using MapTypes = ::testing::Types< + std::unordered_map<int, int, StatefulTestingHash, StatefulTestingEqual, + Alloc<std::pair<const int, int>>>, + std::unordered_map<std::string, std::string, StatefulTestingHash, + StatefulTestingEqual, + Alloc<std::pair<const std::string, std::string>>>>; + +INSTANTIATE_TYPED_TEST_CASE_P(UnorderedMap, ConstructorTest, MapTypes); +INSTANTIATE_TYPED_TEST_CASE_P(UnorderedMap, LookupTest, MapTypes); +INSTANTIATE_TYPED_TEST_CASE_P(UnorderedMap, ModifiersTest, MapTypes); + +} // namespace +} // namespace container_internal +} // namespace absl diff --git a/absl/container/internal/unordered_set_constructor_test.h b/absl/container/internal/unordered_set_constructor_test.h new file mode 100644 index 00000000..cb593704 --- /dev/null +++ b/absl/container/internal/unordered_set_constructor_test.h @@ -0,0 +1,408 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_ +#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_ + +#include <algorithm> +#include <vector> + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/container/internal/hash_generator_testing.h" +#include "absl/container/internal/hash_policy_testing.h" + +namespace absl { +namespace container_internal { + +template <class UnordMap> +class ConstructorTest : public ::testing::Test {}; + +TYPED_TEST_CASE_P(ConstructorTest); + +TYPED_TEST_P(ConstructorTest, NoArgs) { + TypeParam m; + EXPECT_TRUE(m.empty()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); +} + +TYPED_TEST_P(ConstructorTest, BucketCount) { + TypeParam m(123); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); +} + +TYPED_TEST_P(ConstructorTest, BucketCountHash) { + using H = typename TypeParam::hasher; + H hasher; + TypeParam m(123, hasher); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); +} + +TYPED_TEST_P(ConstructorTest, BucketCountHashEqual) { + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + H hasher; + E equal; + TypeParam m(123, hasher, equal); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.key_eq(), equal); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); +} + +TYPED_TEST_P(ConstructorTest, BucketCountHashEqualAlloc) { + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(123, hasher, equal, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.key_eq(), equal); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); + + const auto& cm = m; + EXPECT_EQ(cm.hash_function(), hasher); + EXPECT_EQ(cm.key_eq(), equal); + EXPECT_EQ(cm.get_allocator(), alloc); + EXPECT_TRUE(cm.empty()); + EXPECT_THAT(keys(cm), ::testing::UnorderedElementsAre()); + EXPECT_GE(cm.bucket_count(), 123); +} + +TYPED_TEST_P(ConstructorTest, BucketCountAlloc) { +#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17) + using A = typename TypeParam::allocator_type; + A alloc(0); + TypeParam m(123, alloc); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); +#endif +} + +TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) { +#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17) + using H = typename TypeParam::hasher; + using A = typename TypeParam::allocator_type; + H hasher; + A alloc(0); + TypeParam m(123, hasher, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); +#endif +} + +TYPED_TEST_P(ConstructorTest, BucketAlloc) { +#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS + using A = typename TypeParam::allocator_type; + A alloc(0); + TypeParam m(alloc); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); +#endif +} + +TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) { + using T = hash_internal::GeneratedType<TypeParam>; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + std::vector<T> values; + for (size_t i = 0; i != 10; ++i) + values.push_back(hash_internal::Generator<T>()()); + TypeParam m(values.begin(), values.end(), 123, hasher, equal, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.key_eq(), equal); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); +} + +TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) { +#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17) + using T = hash_internal::GeneratedType<TypeParam>; + using A = typename TypeParam::allocator_type; + A alloc(0); + std::vector<T> values; + for (size_t i = 0; i != 10; ++i) + values.push_back(hash_internal::Generator<T>()()); + TypeParam m(values.begin(), values.end(), 123, alloc); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); +#endif +} + +TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) { +#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17) + using T = hash_internal::GeneratedType<TypeParam>; + using H = typename TypeParam::hasher; + using A = typename TypeParam::allocator_type; + H hasher; + A alloc(0); + std::vector<T> values; + for (size_t i = 0; i != 10; ++i) + values.push_back(hash_internal::Generator<T>()()); + TypeParam m(values.begin(), values.end(), 123, hasher, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); +#endif +} + +TYPED_TEST_P(ConstructorTest, CopyConstructor) { + using T = hash_internal::GeneratedType<TypeParam>; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(123, hasher, equal, alloc); + for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()()); + TypeParam n(m); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_EQ(m.get_allocator(), n.get_allocator()); + EXPECT_EQ(m, n); +} + +TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) { +#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS + using T = hash_internal::GeneratedType<TypeParam>; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(123, hasher, equal, alloc); + for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()()); + TypeParam n(m, A(11)); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_NE(m.get_allocator(), n.get_allocator()); + EXPECT_EQ(m, n); +#endif +} + +// TODO(alkis): Test non-propagating allocators on copy constructors. + +TYPED_TEST_P(ConstructorTest, MoveConstructor) { + using T = hash_internal::GeneratedType<TypeParam>; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(123, hasher, equal, alloc); + for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()()); + TypeParam t(m); + TypeParam n(std::move(t)); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_EQ(m.get_allocator(), n.get_allocator()); + EXPECT_EQ(m, n); +} + +TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) { +#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS + using T = hash_internal::GeneratedType<TypeParam>; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(123, hasher, equal, alloc); + for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()()); + TypeParam t(m); + TypeParam n(std::move(t), A(1)); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_NE(m.get_allocator(), n.get_allocator()); + EXPECT_EQ(m, n); +#endif +} + +// TODO(alkis): Test non-propagating allocators on move constructors. + +TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) { + using T = hash_internal::GeneratedType<TypeParam>; + hash_internal::Generator<T> gen; + std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()}; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(values, 123, hasher, equal, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.key_eq(), equal); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); +} + +TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) { +#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17) + using T = hash_internal::GeneratedType<TypeParam>; + using A = typename TypeParam::allocator_type; + hash_internal::Generator<T> gen; + std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()}; + A alloc(0); + TypeParam m(values, 123, alloc); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); +#endif +} + +TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) { +#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17) + using T = hash_internal::GeneratedType<TypeParam>; + using H = typename TypeParam::hasher; + using A = typename TypeParam::allocator_type; + H hasher; + A alloc(0); + hash_internal::Generator<T> gen; + std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()}; + TypeParam m(values, 123, hasher, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); +#endif +} + +TYPED_TEST_P(ConstructorTest, Assignment) { + using T = hash_internal::GeneratedType<TypeParam>; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + hash_internal::Generator<T> gen; + TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc); + TypeParam n; + n = m; + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_EQ(m, n); +} + +// TODO(alkis): Test [non-]propagating allocators on move/copy assignments +// (it depends on traits). + +TYPED_TEST_P(ConstructorTest, MoveAssignment) { + using T = hash_internal::GeneratedType<TypeParam>; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + hash_internal::Generator<T> gen; + TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc); + TypeParam t(m); + TypeParam n; + n = std::move(t); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_EQ(m, n); +} + +TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) { + using T = hash_internal::GeneratedType<TypeParam>; + hash_internal::Generator<T> gen; + std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()}; + TypeParam m; + m = values; + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); +} + +TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) { + using T = hash_internal::GeneratedType<TypeParam>; + hash_internal::Generator<T> gen; + TypeParam m({gen(), gen(), gen()}); + TypeParam n({gen()}); + n = m; + EXPECT_EQ(m, n); +} + +TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) { + using T = hash_internal::GeneratedType<TypeParam>; + hash_internal::Generator<T> gen; + TypeParam m({gen(), gen(), gen()}); + TypeParam t(m); + TypeParam n({gen()}); + n = std::move(t); + EXPECT_EQ(m, n); +} + +TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) { + using T = hash_internal::GeneratedType<TypeParam>; + hash_internal::Generator<T> gen; + std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()}; + TypeParam m; + m = values; + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); +} + +TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) { + using T = hash_internal::GeneratedType<TypeParam>; + hash_internal::Generator<T> gen; + std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()}; + TypeParam m(values); + m = *&m; // Avoid -Wself-assign. + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); +} + +REGISTER_TYPED_TEST_CASE_P( + ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual, + BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, + BucketAlloc, InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc, + InputIteratorBucketHashAlloc, CopyConstructor, CopyConstructorAlloc, + MoveConstructor, MoveConstructorAlloc, InitializerListBucketHashEqualAlloc, + InitializerListBucketAlloc, InitializerListBucketHashAlloc, Assignment, + MoveAssignment, AssignmentFromInitializerList, + AssignmentOverwritesExisting, MoveAssignmentOverwritesExisting, + AssignmentFromInitializerListOverwritesExisting, AssignmentOnSelf); + +} // namespace container_internal +} // namespace absl +#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_ diff --git a/absl/container/internal/unordered_set_lookup_test.h b/absl/container/internal/unordered_set_lookup_test.h new file mode 100644 index 00000000..aca9c6a5 --- /dev/null +++ b/absl/container/internal/unordered_set_lookup_test.h @@ -0,0 +1,88 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_ +#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_ + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/container/internal/hash_generator_testing.h" +#include "absl/container/internal/hash_policy_testing.h" + +namespace absl { +namespace container_internal { + +template <class UnordSet> +class LookupTest : public ::testing::Test {}; + +TYPED_TEST_CASE_P(LookupTest); + +TYPED_TEST_P(LookupTest, Count) { + using T = hash_internal::GeneratedType<TypeParam>; + std::vector<T> values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator<T>()); + TypeParam m; + for (const auto& v : values) + EXPECT_EQ(0, m.count(v)) << ::testing::PrintToString(v); + m.insert(values.begin(), values.end()); + for (const auto& v : values) + EXPECT_EQ(1, m.count(v)) << ::testing::PrintToString(v); +} + +TYPED_TEST_P(LookupTest, Find) { + using T = hash_internal::GeneratedType<TypeParam>; + std::vector<T> values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator<T>()); + TypeParam m; + for (const auto& v : values) + EXPECT_TRUE(m.end() == m.find(v)) << ::testing::PrintToString(v); + m.insert(values.begin(), values.end()); + for (const auto& v : values) { + typename TypeParam::iterator it = m.find(v); + static_assert(std::is_same<const typename TypeParam::value_type&, + decltype(*it)>::value, + ""); + static_assert(std::is_same<const typename TypeParam::value_type*, + decltype(it.operator->())>::value, + ""); + EXPECT_TRUE(m.end() != it) << ::testing::PrintToString(v); + EXPECT_EQ(v, *it) << ::testing::PrintToString(v); + } +} + +TYPED_TEST_P(LookupTest, EqualRange) { + using T = hash_internal::GeneratedType<TypeParam>; + std::vector<T> values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator<T>()); + TypeParam m; + for (const auto& v : values) { + auto r = m.equal_range(v); + ASSERT_EQ(0, std::distance(r.first, r.second)); + } + m.insert(values.begin(), values.end()); + for (const auto& v : values) { + auto r = m.equal_range(v); + ASSERT_EQ(1, std::distance(r.first, r.second)); + EXPECT_EQ(v, *r.first); + } +} + +REGISTER_TYPED_TEST_CASE_P(LookupTest, Count, Find, EqualRange); + +} // namespace container_internal +} // namespace absl +#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_ diff --git a/absl/container/internal/unordered_set_modifiers_test.h b/absl/container/internal/unordered_set_modifiers_test.h new file mode 100644 index 00000000..9beacf33 --- /dev/null +++ b/absl/container/internal/unordered_set_modifiers_test.h @@ -0,0 +1,187 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_ +#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_ + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/container/internal/hash_generator_testing.h" +#include "absl/container/internal/hash_policy_testing.h" + +namespace absl { +namespace container_internal { + +template <class UnordSet> +class ModifiersTest : public ::testing::Test {}; + +TYPED_TEST_CASE_P(ModifiersTest); + +TYPED_TEST_P(ModifiersTest, Clear) { + using T = hash_internal::GeneratedType<TypeParam>; + std::vector<T> values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator<T>()); + TypeParam m(values.begin(), values.end()); + ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + m.clear(); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + EXPECT_TRUE(m.empty()); +} + +TYPED_TEST_P(ModifiersTest, Insert) { + using T = hash_internal::GeneratedType<TypeParam>; + T val = hash_internal::Generator<T>()(); + TypeParam m; + auto p = m.insert(val); + EXPECT_TRUE(p.second); + EXPECT_EQ(val, *p.first); + p = m.insert(val); + EXPECT_FALSE(p.second); +} + +TYPED_TEST_P(ModifiersTest, InsertHint) { + using T = hash_internal::GeneratedType<TypeParam>; + T val = hash_internal::Generator<T>()(); + TypeParam m; + auto it = m.insert(m.end(), val); + EXPECT_TRUE(it != m.end()); + EXPECT_EQ(val, *it); + it = m.insert(it, val); + EXPECT_TRUE(it != m.end()); + EXPECT_EQ(val, *it); +} + +TYPED_TEST_P(ModifiersTest, InsertRange) { + using T = hash_internal::GeneratedType<TypeParam>; + std::vector<T> values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator<T>()); + TypeParam m; + m.insert(values.begin(), values.end()); + ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); +} + +TYPED_TEST_P(ModifiersTest, Emplace) { + using T = hash_internal::GeneratedType<TypeParam>; + T val = hash_internal::Generator<T>()(); + TypeParam m; + // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps + // with test traits/policy. + auto p = m.emplace(val); + EXPECT_TRUE(p.second); + EXPECT_EQ(val, *p.first); + p = m.emplace(val); + EXPECT_FALSE(p.second); + EXPECT_EQ(val, *p.first); +} + +TYPED_TEST_P(ModifiersTest, EmplaceHint) { + using T = hash_internal::GeneratedType<TypeParam>; + T val = hash_internal::Generator<T>()(); + TypeParam m; + // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps + // with test traits/policy. + auto it = m.emplace_hint(m.end(), val); + EXPECT_EQ(val, *it); + it = m.emplace_hint(it, val); + EXPECT_EQ(val, *it); +} + +template <class V> +using IfNotVoid = typename std::enable_if<!std::is_void<V>::value, V>::type; + +// In openmap we chose not to return the iterator from erase because that's +// more expensive. As such we adapt erase to return an iterator here. +struct EraseFirst { + template <class Map> + auto operator()(Map* m, int) const + -> IfNotVoid<decltype(m->erase(m->begin()))> { + return m->erase(m->begin()); + } + template <class Map> + typename Map::iterator operator()(Map* m, ...) const { + auto it = m->begin(); + m->erase(it++); + return it; + } +}; + +TYPED_TEST_P(ModifiersTest, Erase) { + using T = hash_internal::GeneratedType<TypeParam>; + std::vector<T> values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator<T>()); + TypeParam m(values.begin(), values.end()); + ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + std::vector<T> values2; + for (const auto& val : values) + if (val != *m.begin()) values2.push_back(val); + auto it = EraseFirst()(&m, 0); + ASSERT_TRUE(it != m.end()); + EXPECT_EQ(1, std::count(values2.begin(), values2.end(), *it)); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values2.begin(), + values2.end())); +} + +TYPED_TEST_P(ModifiersTest, EraseRange) { + using T = hash_internal::GeneratedType<TypeParam>; + std::vector<T> values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator<T>()); + TypeParam m(values.begin(), values.end()); + ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + auto it = m.erase(m.begin(), m.end()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + EXPECT_TRUE(it == m.end()); +} + +TYPED_TEST_P(ModifiersTest, EraseKey) { + using T = hash_internal::GeneratedType<TypeParam>; + std::vector<T> values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator<T>()); + TypeParam m(values.begin(), values.end()); + ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_EQ(1, m.erase(values[0])); + EXPECT_EQ(0, std::count(m.begin(), m.end(), values[0])); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values.begin() + 1, + values.end())); +} + +TYPED_TEST_P(ModifiersTest, Swap) { + using T = hash_internal::GeneratedType<TypeParam>; + std::vector<T> v1; + std::vector<T> v2; + std::generate_n(std::back_inserter(v1), 5, hash_internal::Generator<T>()); + std::generate_n(std::back_inserter(v2), 5, hash_internal::Generator<T>()); + TypeParam m1(v1.begin(), v1.end()); + TypeParam m2(v2.begin(), v2.end()); + EXPECT_THAT(keys(m1), ::testing::UnorderedElementsAreArray(v1)); + EXPECT_THAT(keys(m2), ::testing::UnorderedElementsAreArray(v2)); + m1.swap(m2); + EXPECT_THAT(keys(m1), ::testing::UnorderedElementsAreArray(v2)); + EXPECT_THAT(keys(m2), ::testing::UnorderedElementsAreArray(v1)); +} + +// TODO(alkis): Write tests for extract. +// TODO(alkis): Write tests for merge. + +REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint, + InsertRange, Emplace, EmplaceHint, Erase, EraseRange, + EraseKey, Swap); + +} // namespace container_internal +} // namespace absl +#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_ diff --git a/absl/container/internal/unordered_set_test.cc b/absl/container/internal/unordered_set_test.cc new file mode 100644 index 00000000..1281ce53 --- /dev/null +++ b/absl/container/internal/unordered_set_test.cc @@ -0,0 +1,37 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include <unordered_set> + +#include "absl/container/internal/unordered_set_constructor_test.h" +#include "absl/container/internal/unordered_set_lookup_test.h" +#include "absl/container/internal/unordered_set_modifiers_test.h" + +namespace absl { +namespace container_internal { +namespace { + +using SetTypes = + ::testing::Types<std::unordered_set<int, StatefulTestingHash, + StatefulTestingEqual, Alloc<int>>, + std::unordered_set<std::string, StatefulTestingHash, + StatefulTestingEqual, Alloc<std::string>>>; + +INSTANTIATE_TYPED_TEST_CASE_P(UnorderedSet, ConstructorTest, SetTypes); +INSTANTIATE_TYPED_TEST_CASE_P(UnorderedSet, LookupTest, SetTypes); +INSTANTIATE_TYPED_TEST_CASE_P(UnorderedSet, ModifiersTest, SetTypes); + +} // namespace +} // namespace container_internal +} // namespace absl diff --git a/absl/container/node_hash_map.h b/absl/container/node_hash_map.h new file mode 100644 index 00000000..3c7de1e8 --- /dev/null +++ b/absl/container/node_hash_map.h @@ -0,0 +1,530 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: node_hash_map.h +// ----------------------------------------------------------------------------- +// +// An `absl::node_hash_map<K, V>` is an unordered associative container of +// unique keys and associated values designed to be a more efficient replacement +// for `std::unordered_map`. Like `unordered_map`, search, insertion, and +// deletion of map elements can be done as an `O(1)` operation. However, +// `node_hash_map` (and other unordered associative containers known as the +// collection of Abseil "Swiss tables") contain other optimizations that result +// in both memory and computation advantages. +// +// In most cases, your default choice for a hash map should be a map of type +// `flat_hash_map`. However, if you need pointer stability and cannot store +// a `flat_hash_map` with `unique_ptr` elements, a `node_hash_map` may be a +// valid alternative. As well, if you are migrating your code from using +// `std::unordered_map`, a `node_hash_map` provides a more straightforward +// migration, because it guarantees pointer stability. Consider migrating to +// `node_hash_map` and perhaps converting to a more efficient `flat_hash_map` +// upon further review. + +#ifndef ABSL_CONTAINER_NODE_HASH_MAP_H_ +#define ABSL_CONTAINER_NODE_HASH_MAP_H_ + +#include <tuple> +#include <type_traits> +#include <utility> + +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export +#include "absl/container/internal/node_hash_policy.h" +#include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export +#include "absl/memory/memory.h" + +namespace absl { +namespace container_internal { +template <class Key, class Value> +class NodeHashMapPolicy; +} // namespace container_internal + +// ----------------------------------------------------------------------------- +// absl::node_hash_map +// ----------------------------------------------------------------------------- +// +// An `absl::node_hash_map<K, V>` is an unordered associative container which +// has been optimized for both speed and memory footprint in most common use +// cases. Its interface is similar to that of `std::unordered_map<K, V>` with +// the following notable differences: +// +// * Supports heterogeneous lookup, through `find()`, `operator[]()` and +// `insert()`, provided that the map is provided a compatible heterogeneous +// hashing function and equality operator. +// * Contains a `capacity()` member function indicating the number of element +// slots (open, deleted, and empty) within the hash map. +// * Returns `void` from the `erase(iterator)` overload. +// +// By default, `node_hash_map` uses the `absl::Hash` hashing framework. +// All fundamental and Abseil types that support the `absl::Hash` framework have +// a compatible equality operator for comparing insertions into `node_hash_map`. +// If your type is not yet supported by the `asbl::Hash` framework, see +// absl/hash/hash.h for information on extending Abseil hashing to user-defined +// types. +// +// Example: +// +// // Create a node hash map of three strings (that map to strings) +// absl::node_hash_map<std::string, std::string> ducks = +// {{"a", "huey"}, {"b", "dewey"}, {"c", "louie"}}; +// +// // Insert a new element into the node hash map +// ducks.insert({"d", "donald"}}; +// +// // Force a rehash of the node hash map +// ducks.rehash(0); +// +// // Find the element with the key "b" +// std::string search_key = "b"; +// auto result = ducks.find(search_key); +// if (result != ducks.end()) { +// std::cout << "Result: " << search_key->second << std::endl; +// } +template <class Key, class Value, + class Hash = absl::container_internal::hash_default_hash<Key>, + class Eq = absl::container_internal::hash_default_eq<Key>, + class Alloc = std::allocator<std::pair<const Key, Value>>> +class node_hash_map + : public absl::container_internal::raw_hash_map< + absl::container_internal::NodeHashMapPolicy<Key, Value>, Hash, Eq, + Alloc> { + using Base = typename node_hash_map::raw_hash_map; + + public: + node_hash_map() {} + using Base::Base; + + // node_hash_map::begin() + // + // Returns an iterator to the beginning of the `node_hash_map`. + using Base::begin; + + // node_hash_map::cbegin() + // + // Returns a const iterator to the beginning of the `node_hash_map`. + using Base::cbegin; + + // node_hash_map::cend() + // + // Returns a const iterator to the end of the `node_hash_map`. + using Base::cend; + + // node_hash_map::end() + // + // Returns an iterator to the end of the `node_hash_map`. + using Base::end; + + // node_hash_map::capacity() + // + // Returns the number of element slots (assigned, deleted, and empty) + // available within the `node_hash_map`. + // + // NOTE: this member function is particular to `absl::node_hash_map` and is + // not provided in the `std::unordered_map` API. + using Base::capacity; + + // node_hash_map::empty() + // + // Returns whether or not the `node_hash_map` is empty. + using Base::empty; + + // node_hash_map::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `node_hash_map` under current memory constraints. This value can be thought + // of as the largest value of `std::distance(begin(), end())` for a + // `node_hash_map<K, V>`. + using Base::max_size; + + // node_hash_map::size() + // + // Returns the number of elements currently within the `node_hash_map`. + using Base::size; + + // node_hash_map::clear() + // + // Removes all elements from the `node_hash_map`. Invalidates any references, + // pointers, or iterators referring to contained elements. + // + // NOTE: this operation may shrink the underlying buffer. To avoid shrinking + // the underlying buffer call `erase(begin(), end())`. + using Base::clear; + + // node_hash_map::erase() + // + // Erases elements within the `node_hash_map`. Erasing does not trigger a + // rehash. Overloads are listed below. + // + // void erase(const_iterator pos): + // + // Erases the element at `position` of the `node_hash_map`, returning + // `void`. + // + // NOTE: this return behavior is different than that of STL containers in + // general and `std::unordered_map` in particular. + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning an + // iterator pointing to `last`. + // + // size_type erase(const key_type& key): + // + // Erases the element with the matching key, if it exists. + using Base::erase; + + // node_hash_map::insert() + // + // Inserts an element of the specified value into the `node_hash_map`, + // returning an iterator pointing to the newly inserted element, provided that + // an element with the given key does not already exist. If rehashing occurs + // due to the insertion, all iterators are invalidated. Overloads are listed + // below. + // + // std::pair<iterator,bool> insert(const init_type& value): + // + // Inserts a value into the `node_hash_map`. Returns a pair consisting of an + // iterator to the inserted element (or to the element that prevented the + // insertion) and a `bool` denoting whether the insertion took place. + // + // std::pair<iterator,bool> insert(T&& value): + // std::pair<iterator,bool> insert(init_type&& value ): + // + // Inserts a moveable value into the `node_hash_map`. Returns a `std::pair` + // consisting of an iterator to the inserted element (or to the element that + // prevented the insertion) and a `bool` denoting whether the insertion took + // place. + // + // iterator insert(const_iterator hint, const init_type& value): + // iterator insert(const_iterator hint, T&& value): + // iterator insert(const_iterator hint, init_type&& value ); + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element, or to the existing element that prevented the + // insertion. + // + // void insert(InputIterator first, InputIterator last ): + // + // Inserts a range of values [`first`, `last`). + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently, for `node_hash_map` we guarantee the + // first match is inserted. + // + // void insert(std::initializer_list<init_type> ilist ): + // + // Inserts the elements within the initializer list `ilist`. + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently within the initializer list, for + // `node_hash_map` we guarantee the first match is inserted. + using Base::insert; + + // node_hash_map::insert_or_assign() + // + // Inserts an element of the specified value into the `node_hash_map` provided + // that a value with the given key does not already exist, or replaces it with + // the element value if a key for that value already exists, returning an + // iterator pointing to the newly inserted element. If rehashing occurs due to + // the insertion, all iterators are invalidated. Overloads are listed + // below. + // + // std::pair<iterator, bool> insert_or_assign(const init_type& k, T&& obj): + // std::pair<iterator, bool> insert_or_assign(init_type&& k, T&& obj): + // + // Inserts/Assigns (or moves) the element of the specified key into the + // `node_hash_map`. + // + // iterator insert_or_assign(const_iterator hint, + // const init_type& k, T&& obj): + // iterator insert_or_assign(const_iterator hint, init_type&& k, T&& obj): + // + // Inserts/Assigns (or moves) the element of the specified key into the + // `node_hash_map` using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. + using Base::insert_or_assign; + + // node_hash_map::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `node_hash_map`, provided that no element with the given key + // already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace; + + // node_hash_map::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `node_hash_map`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search, and only inserts + // provided that no element with the given key already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace_hint; + + // node_hash_map::try_emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `node_hash_map`, provided that no element with the given key + // already exists. Unlike `emplace()`, if an element with the given key + // already exists, we guarantee that no element is constructed. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + // Overloads are listed below. + // + // std::pair<iterator, bool> try_emplace(const key_type& k, Args&&... args): + // std::pair<iterator, bool> try_emplace(key_type&& k, Args&&... args): + // + // Inserts (via copy or move) the element of the specified key into the + // `node_hash_map`. + // + // iterator try_emplace(const_iterator hint, + // const init_type& k, Args&&... args): + // iterator try_emplace(const_iterator hint, init_type&& k, Args&&... args): + // + // Inserts (via copy or move) the element of the specified key into the + // `node_hash_map` using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. + using Base::try_emplace; + + // node_hash_map::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the key,value pair of the element at the indicated position and + // returns a node handle owning that extracted data. + // + // node_type extract(const key_type& x): + // + // Extracts the key,value pair of the element with a key matching the passed + // key value and returns a node handle owning that extracted data. If the + // `node_hash_map` does not contain an element with a matching key, this + // function returns an empty node handle. + using Base::extract; + + // node_hash_map::merge() + // + // Extracts elements from a given `source` node hash map into this + // `node_hash_map`. If the destination `node_hash_map` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // node_hash_map::swap(node_hash_map& other) + // + // Exchanges the contents of this `node_hash_map` with those of the `other` + // node hash map, avoiding invocation of any move, copy, or swap operations on + // individual elements. + // + // All iterators and references on the `node_hash_map` remain valid, excepting + // for the past-the-end iterator, which is invalidated. + // + // `swap()` requires that the node hash map's hashing and key equivalence + // functions be Swappable, and are exchaged using unqualified calls to + // non-member `swap()`. If the map's allocator has + // `std::allocator_traits<allocator_type>::propagate_on_container_swap::value` + // set to `true`, the allocators are also exchanged using an unqualified call + // to non-member `swap()`; otherwise, the allocators are not swapped. + using Base::swap; + + // node_hash_map::rehash(count) + // + // Rehashes the `node_hash_map`, setting the number of slots to be at least + // the passed value. If the new number of slots increases the load factor more + // than the current maximum load factor + // (`count` < `size()` / `max_load_factor()`), then the new number of slots + // will be at least `size()` / `max_load_factor()`. + // + // To force a rehash, pass rehash(0). + using Base::rehash; + + // node_hash_map::reserve(count) + // + // Sets the number of slots in the `node_hash_map` to the number needed to + // accommodate at least `count` total elements without exceeding the current + // maximum load factor, and may rehash the container if needed. + using Base::reserve; + + // node_hash_map::at() + // + // Returns a reference to the mapped value of the element with key equivalent + // to the passed key. + using Base::at; + + // node_hash_map::contains() + // + // Determines whether an element with a key comparing equal to the given `key` + // exists within the `node_hash_map`, returning `true` if so or `false` + // otherwise. + using Base::contains; + + // node_hash_map::count(const Key& key) const + // + // Returns the number of elements with a key comparing equal to the given + // `key` within the `node_hash_map`. note that this function will return + // either `1` or `0` since duplicate keys are not allowed within a + // `node_hash_map`. + using Base::count; + + // node_hash_map::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `node_hash_map`. + using Base::equal_range; + + // node_hash_map::find() + // + // Finds an element with the passed `key` within the `node_hash_map`. + using Base::find; + + // node_hash_map::operator[]() + // + // Returns a reference to the value mapped to the passed key within the + // `node_hash_map`, performing an `insert()` if the key does not already + // exist. If an insertion occurs and results in a rehashing of the container, + // all iterators are invalidated. Otherwise iterators are not affected and + // references are not invalidated. Overloads are listed below. + // + // T& operator[](const Key& key ): + // + // Inserts an init_type object constructed in-place if the element with the + // given key does not exist. + // + // T& operator[]( Key&& key ): + // + // Inserts an init_type object constructed in-place provided that an element + // with the given key does not exist. + using Base::operator[]; + + // node_hash_map::bucket_count() + // + // Returns the number of "buckets" within the `node_hash_map`. + using Base::bucket_count; + + // node_hash_map::load_factor() + // + // Returns the current load factor of the `node_hash_map` (the average number + // of slots occupied with a value within the hash map). + using Base::load_factor; + + // node_hash_map::max_load_factor() + // + // Manages the maximum load factor of the `node_hash_map`. Overloads are + // listed below. + // + // float node_hash_map::max_load_factor() + // + // Returns the current maximum load factor of the `node_hash_map`. + // + // void node_hash_map::max_load_factor(float ml) + // + // Sets the maximum load factor of the `node_hash_map` to the passed value. + // + // NOTE: This overload is provided only for API compatibility with the STL; + // `node_hash_map` will ignore any set load factor and manage its rehashing + // internally as an implementation detail. + using Base::max_load_factor; + + // node_hash_map::get_allocator() + // + // Returns the allocator function associated with this `node_hash_map`. + using Base::get_allocator; + + // node_hash_map::hash_function() + // + // Returns the hashing function used to hash the keys within this + // `node_hash_map`. + using Base::hash_function; + + // node_hash_map::key_eq() + // + // Returns the function used for comparing keys equality. + using Base::key_eq; + + ABSL_DEPRECATED("Call `hash_function()` instead.") + typename Base::hasher hash_funct() { return this->hash_function(); } + + ABSL_DEPRECATED("Call `rehash()` instead.") + void resize(typename Base::size_type hint) { this->rehash(hint); } +}; + +namespace container_internal { + +template <class Key, class Value> +class NodeHashMapPolicy + : public absl::container_internal::node_hash_policy< + std::pair<const Key, Value>&, NodeHashMapPolicy<Key, Value>> { + using value_type = std::pair<const Key, Value>; + + public: + using key_type = Key; + using mapped_type = Value; + using init_type = std::pair</*non const*/ key_type, mapped_type>; + + template <class Allocator, class... Args> + static value_type* new_element(Allocator* alloc, Args&&... args) { + using PairAlloc = typename absl::allocator_traits< + Allocator>::template rebind_alloc<value_type>; + PairAlloc pair_alloc(*alloc); + value_type* res = + absl::allocator_traits<PairAlloc>::allocate(pair_alloc, 1); + absl::allocator_traits<PairAlloc>::construct(pair_alloc, res, + std::forward<Args>(args)...); + return res; + } + + template <class Allocator> + static void delete_element(Allocator* alloc, value_type* pair) { + using PairAlloc = typename absl::allocator_traits< + Allocator>::template rebind_alloc<value_type>; + PairAlloc pair_alloc(*alloc); + absl::allocator_traits<PairAlloc>::destroy(pair_alloc, pair); + absl::allocator_traits<PairAlloc>::deallocate(pair_alloc, pair, 1); + } + + template <class F, class... Args> + static decltype(absl::container_internal::DecomposePair( + std::declval<F>(), std::declval<Args>()...)) + apply(F&& f, Args&&... args) { + return absl::container_internal::DecomposePair(std::forward<F>(f), + std::forward<Args>(args)...); + } + + static size_t element_space_used(const value_type*) { + return sizeof(value_type); + } + + static Value& value(value_type* elem) { return elem->second; } + static const Value& value(const value_type* elem) { return elem->second; } +}; +} // namespace container_internal +} // namespace absl +#endif // ABSL_CONTAINER_NODE_HASH_MAP_H_ diff --git a/absl/container/node_hash_map_test.cc b/absl/container/node_hash_map_test.cc new file mode 100644 index 00000000..bd789644 --- /dev/null +++ b/absl/container/node_hash_map_test.cc @@ -0,0 +1,218 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/node_hash_map.h" + +#include "absl/container/internal/tracked.h" +#include "absl/container/internal/unordered_map_constructor_test.h" +#include "absl/container/internal/unordered_map_lookup_test.h" +#include "absl/container/internal/unordered_map_modifiers_test.h" + +namespace absl { +namespace container_internal { +namespace { + +using ::testing::Field; +using ::testing::Pair; +using ::testing::UnorderedElementsAre; + +using MapTypes = ::testing::Types< + absl::node_hash_map<int, int, StatefulTestingHash, StatefulTestingEqual, + Alloc<std::pair<const int, int>>>, + absl::node_hash_map<std::string, std::string, StatefulTestingHash, + StatefulTestingEqual, + Alloc<std::pair<const std::string, std::string>>>>; + +INSTANTIATE_TYPED_TEST_CASE_P(NodeHashMap, ConstructorTest, MapTypes); +INSTANTIATE_TYPED_TEST_CASE_P(NodeHashMap, LookupTest, MapTypes); +INSTANTIATE_TYPED_TEST_CASE_P(NodeHashMap, ModifiersTest, MapTypes); + +using M = absl::node_hash_map<std::string, Tracked<int>>; + +TEST(NodeHashMap, Emplace) { + M m; + Tracked<int> t(53); + m.emplace("a", t); + ASSERT_EQ(0, t.num_moves()); + ASSERT_EQ(1, t.num_copies()); + + m.emplace(std::string("a"), t); + ASSERT_EQ(0, t.num_moves()); + ASSERT_EQ(1, t.num_copies()); + + std::string a("a"); + m.emplace(a, t); + ASSERT_EQ(0, t.num_moves()); + ASSERT_EQ(1, t.num_copies()); + + const std::string ca("a"); + m.emplace(a, t); + ASSERT_EQ(0, t.num_moves()); + ASSERT_EQ(1, t.num_copies()); + + m.emplace(std::make_pair("a", t)); + ASSERT_EQ(0, t.num_moves()); + ASSERT_EQ(2, t.num_copies()); + + m.emplace(std::make_pair(std::string("a"), t)); + ASSERT_EQ(0, t.num_moves()); + ASSERT_EQ(3, t.num_copies()); + + std::pair<std::string, Tracked<int>> p("a", t); + ASSERT_EQ(0, t.num_moves()); + ASSERT_EQ(4, t.num_copies()); + m.emplace(p); + ASSERT_EQ(0, t.num_moves()); + ASSERT_EQ(4, t.num_copies()); + + const std::pair<std::string, Tracked<int>> cp("a", t); + ASSERT_EQ(0, t.num_moves()); + ASSERT_EQ(5, t.num_copies()); + m.emplace(cp); + ASSERT_EQ(0, t.num_moves()); + ASSERT_EQ(5, t.num_copies()); + + std::pair<const std::string, Tracked<int>> pc("a", t); + ASSERT_EQ(0, t.num_moves()); + ASSERT_EQ(6, t.num_copies()); + m.emplace(pc); + ASSERT_EQ(0, t.num_moves()); + ASSERT_EQ(6, t.num_copies()); + + const std::pair<const std::string, Tracked<int>> cpc("a", t); + ASSERT_EQ(0, t.num_moves()); + ASSERT_EQ(7, t.num_copies()); + m.emplace(cpc); + ASSERT_EQ(0, t.num_moves()); + ASSERT_EQ(7, t.num_copies()); + + m.emplace(std::piecewise_construct, std::forward_as_tuple("a"), + std::forward_as_tuple(t)); + ASSERT_EQ(0, t.num_moves()); + ASSERT_EQ(7, t.num_copies()); + + m.emplace(std::piecewise_construct, std::forward_as_tuple(std::string("a")), + std::forward_as_tuple(t)); + ASSERT_EQ(0, t.num_moves()); + ASSERT_EQ(7, t.num_copies()); +} + +TEST(NodeHashMap, AssignRecursive) { + struct Tree { + // Verify that unordered_map<K, IncompleteType> can be instantiated. + absl::node_hash_map<int, Tree> children; + }; + Tree root; + const Tree& child = root.children.emplace().first->second; + // Verify that `lhs = rhs` doesn't read rhs after clearing lhs. + root = child; +} + +TEST(FlatHashMap, MoveOnlyKey) { + struct Key { + Key() = default; + Key(Key&&) = default; + Key& operator=(Key&&) = default; + }; + struct Eq { + bool operator()(const Key&, const Key&) const { return true; } + }; + struct Hash { + size_t operator()(const Key&) const { return 0; } + }; + absl::node_hash_map<Key, int, Hash, Eq> m; + m[Key()]; +} + +struct NonMovableKey { + explicit NonMovableKey(int i) : i(i) {} + NonMovableKey(NonMovableKey&&) = delete; + int i; +}; +struct NonMovableKeyHash { + using is_transparent = void; + size_t operator()(const NonMovableKey& k) const { return k.i; } + size_t operator()(int k) const { return k; } +}; +struct NonMovableKeyEq { + using is_transparent = void; + bool operator()(const NonMovableKey& a, const NonMovableKey& b) const { + return a.i == b.i; + } + bool operator()(const NonMovableKey& a, int b) const { return a.i == b; } +}; + +TEST(NodeHashMap, MergeExtractInsert) { + absl::node_hash_map<NonMovableKey, int, NonMovableKeyHash, NonMovableKeyEq> + set1, set2; + set1.emplace(std::piecewise_construct, std::make_tuple(7), + std::make_tuple(-7)); + set1.emplace(std::piecewise_construct, std::make_tuple(17), + std::make_tuple(-17)); + + set2.emplace(std::piecewise_construct, std::make_tuple(7), + std::make_tuple(-70)); + set2.emplace(std::piecewise_construct, std::make_tuple(19), + std::make_tuple(-190)); + + auto Elem = [](int key, int value) { + return Pair(Field(&NonMovableKey::i, key), value); + }; + + EXPECT_THAT(set1, UnorderedElementsAre(Elem(7, -7), Elem(17, -17))); + EXPECT_THAT(set2, UnorderedElementsAre(Elem(7, -70), Elem(19, -190))); + + // NonMovableKey is neither copyable nor movable. We should still be able to + // move nodes around. + static_assert(!std::is_move_constructible<NonMovableKey>::value, ""); + set1.merge(set2); + + EXPECT_THAT(set1, + UnorderedElementsAre(Elem(7, -7), Elem(17, -17), Elem(19, -190))); + EXPECT_THAT(set2, UnorderedElementsAre(Elem(7, -70))); + + auto node = set1.extract(7); + EXPECT_TRUE(node); + EXPECT_EQ(node.key().i, 7); + EXPECT_EQ(node.mapped(), -7); + EXPECT_THAT(set1, UnorderedElementsAre(Elem(17, -17), Elem(19, -190))); + + auto insert_result = set2.insert(std::move(node)); + EXPECT_FALSE(node); + EXPECT_FALSE(insert_result.inserted); + EXPECT_TRUE(insert_result.node); + EXPECT_EQ(insert_result.node.key().i, 7); + EXPECT_EQ(insert_result.node.mapped(), -7); + EXPECT_THAT(*insert_result.position, Elem(7, -70)); + EXPECT_THAT(set2, UnorderedElementsAre(Elem(7, -70))); + + node = set1.extract(17); + EXPECT_TRUE(node); + EXPECT_EQ(node.key().i, 17); + EXPECT_EQ(node.mapped(), -17); + EXPECT_THAT(set1, UnorderedElementsAre(Elem(19, -190))); + + node.mapped() = 23; + + insert_result = set2.insert(std::move(node)); + EXPECT_FALSE(node); + EXPECT_TRUE(insert_result.inserted); + EXPECT_FALSE(insert_result.node); + EXPECT_THAT(*insert_result.position, Elem(17, 23)); + EXPECT_THAT(set2, UnorderedElementsAre(Elem(7, -70), Elem(17, 23))); +} + +} // namespace +} // namespace container_internal +} // namespace absl diff --git a/absl/container/node_hash_set.h b/absl/container/node_hash_set.h new file mode 100644 index 00000000..927a454c --- /dev/null +++ b/absl/container/node_hash_set.h @@ -0,0 +1,439 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: node_hash_set.h +// ----------------------------------------------------------------------------- +// +// An `absl::node_hash_set<T>` is an unordered associative container designed to +// be a more efficient replacement for `std::unordered_set`. Like +// `unordered_set`, search, insertion, and deletion of map elements can be done +// as an `O(1)` operation. However, `node_hash_set` (and other unordered +// associative containers known as the collection of Abseil "Swiss tables") +// contain other optimizations that result in both memory and computation +// advantages. +// +// In most cases, your default choice for a hash table should be a map of type +// `flat_hash_map` or a set of type `flat_hash_set`. However, if you need +// pointer stability, a `node_hash_set` should be your preferred choice. As +// well, if you are migrating your code from using `std::unordered_set`, a +// `node_hash_set` should be an easy migration. Consider migrating to +// `node_hash_set` and perhaps converting to a more efficient `flat_hash_set` +// upon further review. + +#ifndef ABSL_CONTAINER_NODE_HASH_SET_H_ +#define ABSL_CONTAINER_NODE_HASH_SET_H_ + +#include <type_traits> + +#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export +#include "absl/container/internal/node_hash_policy.h" +#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export +#include "absl/memory/memory.h" + +namespace absl { +namespace container_internal { +template <typename T> +struct NodeHashSetPolicy; +} // namespace container_internal + +// ----------------------------------------------------------------------------- +// absl::node_hash_set +// ----------------------------------------------------------------------------- +// +// An `absl::node_hash_set<T>` is an unordered associative container which +// has been optimized for both speed and memory footprint in most common use +// cases. Its interface is similar to that of `std::unordered_set<T>` with the +// following notable differences: +// +// * Supports heterogeneous lookup, through `find()`, `operator[]()` and +// `insert()`, provided that the map is provided a compatible heterogeneous +// hashing function and equality operator. +// * Contains a `capacity()` member function indicating the number of element +// slots (open, deleted, and empty) within the hash set. +// * Returns `void` from the `erase(iterator)` overload. +// +// By default, `node_hash_set` uses the `absl::Hash` hashing framework. +// All fundamental and Abseil types that support the `absl::Hash` framework have +// a compatible equality operator for comparing insertions into `node_hash_set`. +// If your type is not yet supported by the `asbl::Hash` framework, see +// absl/hash/hash.h for information on extending Abseil hashing to user-defined +// types. +// +// Example: +// +// // Create a node hash set of three strings +// absl::node_hash_map<std::string, std::string> ducks = +// {"huey", "dewey"}, "louie"}; +// +// // Insert a new element into the node hash map +// ducks.insert("donald"}; +// +// // Force a rehash of the node hash map +// ducks.rehash(0); +// +// // See if "dewey" is present +// if (ducks.contains("dewey")) { +// std::cout << "We found dewey!" << std::endl; +// } +template <class T, class Hash = absl::container_internal::hash_default_hash<T>, + class Eq = absl::container_internal::hash_default_eq<T>, + class Alloc = std::allocator<T>> +class node_hash_set + : public absl::container_internal::raw_hash_set< + absl::container_internal::NodeHashSetPolicy<T>, Hash, Eq, Alloc> { + using Base = typename node_hash_set::raw_hash_set; + + public: + node_hash_set() {} + using Base::Base; + + // node_hash_set::begin() + // + // Returns an iterator to the beginning of the `node_hash_set`. + using Base::begin; + + // node_hash_set::cbegin() + // + // Returns a const iterator to the beginning of the `node_hash_set`. + using Base::cbegin; + + // node_hash_set::cend() + // + // Returns a const iterator to the end of the `node_hash_set`. + using Base::cend; + + // node_hash_set::end() + // + // Returns an iterator to the end of the `node_hash_set`. + using Base::end; + + // node_hash_set::capacity() + // + // Returns the number of element slots (assigned, deleted, and empty) + // available within the `node_hash_set`. + // + // NOTE: this member function is particular to `absl::node_hash_set` and is + // not provided in the `std::unordered_map` API. + using Base::capacity; + + // node_hash_set::empty() + // + // Returns whether or not the `node_hash_set` is empty. + using Base::empty; + + // node_hash_set::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `node_hash_set` under current memory constraints. This value can be thought + // of the largest value of `std::distance(begin(), end())` for a + // `node_hash_set<T>`. + using Base::max_size; + + // node_hash_set::size() + // + // Returns the number of elements currently within the `node_hash_set`. + using Base::size; + + // node_hash_set::clear() + // + // Removes all elements from the `node_hash_set`. Invalidates any references, + // pointers, or iterators referring to contained elements. + // + // NOTE: this operation may shrink the underlying buffer. To avoid shrinking + // the underlying buffer call `erase(begin(), end())`. + using Base::clear; + + // node_hash_set::erase() + // + // Erases elements within the `node_hash_set`. Erasing does not trigger a + // rehash. Overloads are listed below. + // + // void erase(const_iterator pos): + // + // Erases the element at `position` of the `node_hash_set`, returning + // `void`. + // + // NOTE: this return behavior is different than that of STL containers in + // general and `std::unordered_map` in particular. + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning an + // iterator pointing to `last`. + // + // size_type erase(const key_type& key): + // + // Erases the element with the matching key, if it exists. + using Base::erase; + + // node_hash_set::insert() + // + // Inserts an element of the specified value into the `node_hash_set`, + // returning an iterator pointing to the newly inserted element, provided that + // an element with the given key does not already exist. If rehashing occurs + // due to the insertion, all iterators are invalidated. Overloads are listed + // below. + // + // std::pair<iterator,bool> insert(const T& value): + // + // Inserts a value into the `node_hash_set`. Returns a pair consisting of an + // iterator to the inserted element (or to the element that prevented the + // insertion) and a bool denoting whether the insertion took place. + // + // std::pair<iterator,bool> insert(T&& value): + // + // Inserts a moveable value into the `node_hash_set`. Returns a pair + // consisting of an iterator to the inserted element (or to the element that + // prevented the insertion) and a bool denoting whether the insertion took + // place. + // + // iterator insert(const_iterator hint, const T& value): + // iterator insert(const_iterator hint, T&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element, or to the existing element that prevented the + // insertion. + // + // void insert(InputIterator first, InputIterator last ): + // + // Inserts a range of values [`first`, `last`). + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently, for `node_hash_set` we guarantee the + // first match is inserted. + // + // void insert(std::initializer_list<T> ilist ): + // + // Inserts the elements within the initializer list `ilist`. + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently within the initializer list, for + // `node_hash_set` we guarantee the first match is inserted. + using Base::insert; + + // node_hash_set::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `node_hash_set`, provided that no element with the given key + // already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace; + + // node_hash_set::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `node_hash_set`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search, and only inserts + // provided that no element with the given key already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace_hint; + + // node_hash_set::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // node_type extract(const key_type& x): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `node_hash_set` + // does not contain an element with a matching key, this function returns an + // empty node handle. + using Base::extract; + + // node_hash_set::merge() + // + // Extracts elements from a given `source` flat hash map into this + // `node_hash_set`. If the destination `node_hash_set` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // node_hash_set::swap(node_hash_set& other) + // + // Exchanges the contents of this `node_hash_set` with those of the `other` + // flat hash map, avoiding invocation of any move, copy, or swap operations on + // individual elements. + // + // All iterators and references on the `node_hash_set` remain valid, excepting + // for the past-the-end iterator, which is invalidated. + // + // `swap()` requires that the flat hash set's hashing and key equivalence + // functions be Swappable, and are exchaged using unqualified calls to + // non-member `swap()`. If the map's allocator has + // `std::allocator_traits<allocator_type>::propagate_on_container_swap::value` + // set to `true`, the allocators are also exchanged using an unqualified call + // to non-member `swap()`; otherwise, the allocators are not swapped. + using Base::swap; + + // node_hash_set::rehash(count) + // + // Rehashes the `node_hash_set`, setting the number of slots to be at least + // the passed value. If the new number of slots increases the load factor more + // than the current maximum load factor + // (`count` < `size()` / `max_load_factor()`), then the new number of slots + // will be at least `size()` / `max_load_factor()`. + // + // To force a rehash, pass rehash(0). + // + // NOTE: unlike behavior in `std::unordered_set`, references are also + // invalidated upon a `rehash()`. + using Base::rehash; + + // node_hash_set::reserve(count) + // + // Sets the number of slots in the `node_hash_set` to the number needed to + // accommodate at least `count` total elements without exceeding the current + // maximum load factor, and may rehash the container if needed. + using Base::reserve; + + // node_hash_set::contains() + // + // Determines whether an element comparing equal to the given `key` exists + // within the `node_hash_set`, returning `true` if so or `false` otherwise. + using Base::contains; + + // node_hash_set::count(const Key& key) const + // + // Returns the number of elements comparing equal to the given `key` within + // the `node_hash_set`. note that this function will return either `1` or `0` + // since duplicate elements are not allowed within a `node_hash_set`. + using Base::count; + + // node_hash_set::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `node_hash_set`. + using Base::equal_range; + + // node_hash_set::find() + // + // Finds an element with the passed `key` within the `node_hash_set`. + using Base::find; + + // node_hash_set::bucket_count() + // + // Returns the number of "buckets" within the `node_hash_set`. Note that + // because a flat hash map contains all elements within its internal storage, + // this value simply equals the current capacity of the `node_hash_set`. + using Base::bucket_count; + + // node_hash_set::load_factor() + // + // Returns the current load factor of the `node_hash_set` (the average number + // of slots occupied with a value within the hash map). + using Base::load_factor; + + // node_hash_set::max_load_factor() + // + // Manages the maximum load factor of the `node_hash_set`. Overloads are + // listed below. + // + // float node_hash_set::max_load_factor() + // + // Returns the current maximum load factor of the `node_hash_set`. + // + // void node_hash_set::max_load_factor(float ml) + // + // Sets the maximum load factor of the `node_hash_set` to the passed value. + // + // NOTE: This overload is provided only for API compatibility with the STL; + // `node_hash_set` will ignore any set load factor and manage its rehashing + // internally as an implementation detail. + using Base::max_load_factor; + + // node_hash_set::get_allocator() + // + // Returns the allocator function associated with this `node_hash_set`. + using Base::get_allocator; + + // node_hash_set::hash_function() + // + // Returns the hashing function used to hash the keys within this + // `node_hash_set`. + using Base::hash_function; + + // node_hash_set::key_eq() + // + // Returns the function used for comparing keys equality. + using Base::key_eq; + + ABSL_DEPRECATED("Call `hash_function()` instead.") + typename Base::hasher hash_funct() { return this->hash_function(); } + + ABSL_DEPRECATED("Call `rehash()` instead.") + void resize(typename Base::size_type hint) { this->rehash(hint); } +}; + +namespace container_internal { + +template <class T> +struct NodeHashSetPolicy + : absl::container_internal::node_hash_policy<T&, NodeHashSetPolicy<T>> { + using key_type = T; + using init_type = T; + using constant_iterators = std::true_type; + + template <class Allocator, class... Args> + static T* new_element(Allocator* alloc, Args&&... args) { + using ValueAlloc = + typename absl::allocator_traits<Allocator>::template rebind_alloc<T>; + ValueAlloc value_alloc(*alloc); + T* res = absl::allocator_traits<ValueAlloc>::allocate(value_alloc, 1); + absl::allocator_traits<ValueAlloc>::construct(value_alloc, res, + std::forward<Args>(args)...); + return res; + } + + template <class Allocator> + static void delete_element(Allocator* alloc, T* elem) { + using ValueAlloc = + typename absl::allocator_traits<Allocator>::template rebind_alloc<T>; + ValueAlloc value_alloc(*alloc); + absl::allocator_traits<ValueAlloc>::destroy(value_alloc, elem); + absl::allocator_traits<ValueAlloc>::deallocate(value_alloc, elem, 1); + } + + template <class F, class... Args> + static decltype(absl::container_internal::DecomposeValue( + std::declval<F>(), std::declval<Args>()...)) + apply(F&& f, Args&&... args) { + return absl::container_internal::DecomposeValue( + std::forward<F>(f), std::forward<Args>(args)...); + } + + static size_t element_space_used(const T*) { return sizeof(T); } +}; +} // namespace container_internal +} // namespace absl +#endif // ABSL_CONTAINER_NODE_HASH_SET_H_ diff --git a/absl/container/node_hash_set_test.cc b/absl/container/node_hash_set_test.cc new file mode 100644 index 00000000..7e498f0f --- /dev/null +++ b/absl/container/node_hash_set_test.cc @@ -0,0 +1,103 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/node_hash_set.h" + +#include "absl/container/internal/unordered_set_constructor_test.h" +#include "absl/container/internal/unordered_set_lookup_test.h" +#include "absl/container/internal/unordered_set_modifiers_test.h" + +namespace absl { +namespace container_internal { +namespace { +using ::absl::container_internal::hash_internal::Enum; +using ::absl::container_internal::hash_internal::EnumClass; +using ::testing::Pointee; +using ::testing::UnorderedElementsAre; + +using SetTypes = ::testing::Types< + node_hash_set<int, StatefulTestingHash, StatefulTestingEqual, Alloc<int>>, + node_hash_set<std::string, StatefulTestingHash, StatefulTestingEqual, + Alloc<int>>, + node_hash_set<Enum, StatefulTestingHash, StatefulTestingEqual, Alloc<Enum>>, + node_hash_set<EnumClass, StatefulTestingHash, StatefulTestingEqual, + Alloc<EnumClass>>>; + +INSTANTIATE_TYPED_TEST_CASE_P(NodeHashSet, ConstructorTest, SetTypes); +INSTANTIATE_TYPED_TEST_CASE_P(NodeHashSet, LookupTest, SetTypes); +INSTANTIATE_TYPED_TEST_CASE_P(NodeHashSet, ModifiersTest, SetTypes); + +TEST(NodeHashSet, MoveableNotCopyableCompiles) { + node_hash_set<std::unique_ptr<void*>> t; + node_hash_set<std::unique_ptr<void*>> u; + u = std::move(t); +} + +TEST(NodeHashSet, MergeExtractInsert) { + struct Hash { + size_t operator()(const std::unique_ptr<int>& p) const { return *p; } + }; + struct Eq { + bool operator()(const std::unique_ptr<int>& a, + const std::unique_ptr<int>& b) const { + return *a == *b; + } + }; + absl::node_hash_set<std::unique_ptr<int>, Hash, Eq> set1, set2; + set1.insert(absl::make_unique<int>(7)); + set1.insert(absl::make_unique<int>(17)); + + set2.insert(absl::make_unique<int>(7)); + set2.insert(absl::make_unique<int>(19)); + + EXPECT_THAT(set1, UnorderedElementsAre(Pointee(7), Pointee(17))); + EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7), Pointee(19))); + + set1.merge(set2); + + EXPECT_THAT(set1, UnorderedElementsAre(Pointee(7), Pointee(17), Pointee(19))); + EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7))); + + auto node = set1.extract(absl::make_unique<int>(7)); + EXPECT_TRUE(node); + EXPECT_THAT(node.value(), Pointee(7)); + EXPECT_THAT(set1, UnorderedElementsAre(Pointee(17), Pointee(19))); + + auto insert_result = set2.insert(std::move(node)); + EXPECT_FALSE(node); + EXPECT_FALSE(insert_result.inserted); + EXPECT_TRUE(insert_result.node); + EXPECT_THAT(insert_result.node.value(), Pointee(7)); + EXPECT_EQ(**insert_result.position, 7); + EXPECT_NE(insert_result.position->get(), insert_result.node.value().get()); + EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7))); + + node = set1.extract(absl::make_unique<int>(17)); + EXPECT_TRUE(node); + EXPECT_THAT(node.value(), Pointee(17)); + EXPECT_THAT(set1, UnorderedElementsAre(Pointee(19))); + + node.value() = absl::make_unique<int>(23); + + insert_result = set2.insert(std::move(node)); + EXPECT_FALSE(node); + EXPECT_TRUE(insert_result.inserted); + EXPECT_FALSE(insert_result.node); + EXPECT_EQ(**insert_result.position, 23); + EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7), Pointee(23))); +} + +} // namespace +} // namespace container_internal +} // namespace absl diff --git a/absl/copts.bzl b/absl/copts.bzl index e4b425b0..717f5a45 100644 --- a/absl/copts.bzl +++ b/absl/copts.bzl @@ -117,6 +117,7 @@ MSVC_FLAGS = [ "/W3", "/wd4005", # macro-redefinition "/wd4068", # unknown pragma + "/wd4180", # qualifier applied to function type has no meaning; ignored "/wd4244", # conversion from 'type1' to 'type2', possible loss of data "/wd4267", # conversion from 'size_t' to 'type', possible loss of data "/wd4800", # forcing value to bool 'true' or 'false' (performance warning) diff --git a/absl/hash/BUILD.bazel b/absl/hash/BUILD.bazel new file mode 100644 index 00000000..50aa5506 --- /dev/null +++ b/absl/hash/BUILD.bazel @@ -0,0 +1,114 @@ +# +# Copyright 2018 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +load( + "//absl:copts.bzl", + "ABSL_DEFAULT_COPTS", + "ABSL_TEST_COPTS", +) + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) # Apache 2.0 + +cc_library( + name = "hash", + srcs = [ + "internal/hash.cc", + "internal/hash.h", + ], + hdrs = ["hash.h"], + copts = ABSL_DEFAULT_COPTS, + deps = [ + ":city", + "//absl/base:core_headers", + "//absl/base:endian", + "//absl/container:fixed_array", + "//absl/meta:type_traits", + "//absl/numeric:int128", + "//absl/strings", + "//absl/types:optional", + "//absl/types:variant", + "//absl/utility", + ], +) + +cc_library( + name = "hash_testing", + testonly = 1, + hdrs = ["hash_testing.h"], + deps = [ + ":spy_hash_state", + "//absl/meta:type_traits", + "//absl/strings", + "//absl/types:variant", + "@com_google_googletest//:gtest", + ], +) + +cc_test( + name = "hash_test", + srcs = ["hash_test.cc"], + copts = ABSL_TEST_COPTS, + deps = [ + ":hash", + ":hash_testing", + "//absl/base:core_headers", + "//absl/container:flat_hash_set", + "//absl/hash:spy_hash_state", + "//absl/meta:type_traits", + "//absl/numeric:int128", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "spy_hash_state", + testonly = 1, + hdrs = ["internal/spy_hash_state.h"], + copts = ABSL_DEFAULT_COPTS, + visibility = ["//visibility:private"], + deps = [ + ":hash", + "//absl/strings", + "//absl/strings:str_format", + ], +) + +cc_library( + name = "city", + srcs = ["internal/city.cc"], + hdrs = [ + "internal/city.h", + "internal/city_crc.h", + ], + copts = ABSL_DEFAULT_COPTS, + deps = [ + "//absl/base:config", + "//absl/base:core_headers", + "//absl/base:endian", + ], +) + +cc_test( + name = "city_test", + srcs = ["internal/city_test.cc"], + copts = ABSL_TEST_COPTS, + deps = [ + ":city", + "@com_google_googletest//:gtest_main", + ], +) diff --git a/absl/hash/CMakeLists.txt b/absl/hash/CMakeLists.txt new file mode 100644 index 00000000..35081e37 --- /dev/null +++ b/absl/hash/CMakeLists.txt @@ -0,0 +1,80 @@ +# +# Copyright 2018 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +list(APPEND HASH_PUBLIC_HEADERS + "hash.h" +) + +list(APPEND HASH_INTERNAL_HEADERS + "internal/city.h" + "internal/city_crc.h" + "internal/hash.h" +) + +# absl_hash library +list(APPEND HASH_SRC + "internal/city.cc" + "internal/hash.cc" + ${HASH_PUBLIC_HEADERS} + ${HASH_INTERNAL_HEADERS} +) + +set(HASH_PUBLIC_LIBRARIES absl::hash absl::container absl::strings absl::str_format absl::utility) + +absl_library( + TARGET + absl_hash + SOURCES + ${HASH_SRC} + PUBLIC_LIBRARIES + ${HASH_PUBLIC_LIBRARIES} + EXPORT_NAME + hash +) + +# +## TESTS +# + +# testing support +set(HASH_TEST_HEADERS hash_testing.h internal/spy_hash_state.h) +set(HASH_TEST_PUBLIC_LIBRARIES absl::hash absl::container absl::numeric absl::strings absl::str_format) + +# hash_test +set(HASH_TEST_SRC "hash_test.cc" ${HASH_TEST_HEADERS}) + +absl_test( + TARGET + hash_test + SOURCES + ${HASH_TEST_SRC} + PUBLIC_LIBRARIES + ${HASH_TEST_PUBLIC_LIBRARIES} +) + +# hash_test +set(CITY_TEST_SRC "internal/city_test.cc") + +absl_test( + TARGET + city_test + SOURCES + ${CITY_TEST_SRC} + PUBLIC_LIBRARIES + ${HASH_TEST_PUBLIC_LIBRARIES} +) + + diff --git a/absl/hash/hash.h b/absl/hash/hash.h new file mode 100644 index 00000000..c7ba4c2b --- /dev/null +++ b/absl/hash/hash.h @@ -0,0 +1,312 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: hash.h +// ----------------------------------------------------------------------------- +// +// This header file defines the Abseil `hash` library and the Abseil hashing +// framework. This framework consists of the following: +// +// * The `absl::Hash` functor, which is used to invoke the hasher within the +// Abseil hashing framework. `absl::Hash<T>` supports most basic types and +// a number of Abseil types out of the box. +// * `AbslHashValue`, an extension point that allows you to extend types to +// support Abseil hashing without requiring you to define a hashing +// algorithm. +// * `HashState`, a type-erased class which implement the manipulation of the +// hash state (H) itself. containing member functions `combine()` and +// `combine_contiguous()`, which you can use to contribute to an existing +// hash state when hashing your types. +// +// Unlike `std::hash` or other hashing frameworks, the Abseil hashing framework +// provides most of its utility by abstracting away the hash algorithm (and its +// implementation) entirely. Instead, a type invokes the Abseil hashing +// framework by simply combining its state with the state of known, hashable +// types. Hashing of that combined state is separately done by `absl::Hash`. +// +// Example: +// +// // Suppose we have a class `Circle` for which we want to add hashing +// class Circle { +// public: +// ... +// private: +// std::pair<int, int> center_; +// int radius_; +// }; +// +// // To add hashing support to `Circle`, we simply need to add an ordinary +// // function `AbslHashValue()`, and return the combined hash state of the +// // existing hash state and the class state: +// +// template <typename H> +// friend H AbslHashValue(H h, const Circle& c) { +// return H::combine(std::move(h), c.center_, c.radius_); +// } +// +// For more information, see Adding Type Support to `absl::Hash` below. +// +#ifndef ABSL_HASH_HASH_H_ +#define ABSL_HASH_HASH_H_ + +#include "absl/hash/internal/hash.h" + +namespace absl { + +// ----------------------------------------------------------------------------- +// `absl::Hash` +// ----------------------------------------------------------------------------- +// +// `absl::Hash<T>` is a convenient general-purpose hash functor for a type `T` +// satisfying any of the following conditions (in order): +// +// * T is an arithmetic or pointer type +// * T defines an overload for `AbslHashValue(H, const T&)` for an arbitrary +// hash state `H`. +// - T defines a specialization of `HASH_NAMESPACE::hash<T>` +// - T defines a specialization of `std::hash<T>` +// +// `absl::Hash` intrinsically supports the following types: +// +// * All integral types (including bool) +// * All enum types +// * All floating-point types (although hashing them is discouraged) +// * All pointer types, including nullptr_t +// * std::pair<T1, T2>, if T1 and T2 are hashable +// * std::tuple<Ts...>, if all the Ts... are hashable +// * std::unique_ptr and std::shared_ptr +// * All string-like types including: +// * std::string +// * std::string_view (as well as any instance of std::basic_string that +// uses char and std::char_traits) +// * All the standard sequence containers (provided the elements are hashable) +// * All the standard ordered associative containers (provided the elements are +// hashable) +// * absl types such as the following: +// * absl::string_view +// * absl::InlinedVector +// * absl::FixedArray +// * absl::unit128 +// * absl::Time, absl::Duration, and absl::TimeZone +// +// Note: the list above is not meant to be exhaustive. Additional type support +// may be added, in which case the above list will be updated. +// +// ----------------------------------------------------------------------------- +// absl::Hash Invocation Evaluation +// ----------------------------------------------------------------------------- +// +// When invoked, `absl::Hash<T>` searches for supplied hash functions in the +// following order: +// +// * Natively supported types out of the box (see above) +// * Types for which an `AbslHashValue()` overload is provided (such as +// user-defined types). See "Adding Type Support to `absl::Hash`" below. +// * Types which define a `HASH_NAMESPACE::hash<T>` specialization (aka +// `__gnu_cxx::hash<T>` for gcc/Clang or `stdext::hash<T>` for MSVC) +// * Types which define a `std::hash<T>` specialization +// +// The fallback to legacy hash functions exists mainly for backwards +// compatibility. If you have a choice, prefer defining an `AbslHashValue` +// overload instead of specializing any legacy hash functors. +// +// ----------------------------------------------------------------------------- +// The Hash State Concept, and using `HashState` for Type Erasure +// ----------------------------------------------------------------------------- +// +// The `absl::Hash` framework relies on the Concept of a "hash state." Such a +// hash state is used in several places: +// +// * Within existing implementations of `absl::Hash<T>` to store the hashed +// state of an object. Note that it is up to the implementation how it stores +// such state. A hash table, for example, may mix the state to produce an +// integer value; a testing framework may simply hold a vector of that state. +// * Within implementations of `AbslHashValue()` used to extend user-defined +// types. (See "Adding Type Support to absl::Hash" below.) +// * Inside a `HashState`, providing type erasure for the concept of a hash +// state, which you can use to extend the `absl::Hash` framework for types +// that are otherwise difficult to extend using `AbslHashValue()`. (See the +// `HashState` class below.) +// +// The "hash state" concept contains two member functions for mixing hash state: +// +// * `H::combine()` +// +// Combines an arbitrary number of values into a hash state, returning the +// updated state. Note that the existing hash state is move-only and must be +// passed by value. +// +// Each of the value types T must be hashable by H. +// +// NOTE: +// +// state = H::combine(std::move(state), value1, value2, value3); +// +// must be guaranteed to produce the same hash expansion as +// +// state = H::combine(std::move(state), value1); +// state = H::combine(std::move(state), value2); +// state = H::combine(std::move(state), value3); +// +// * `H::combine_contiguous()` +// +// Combines a contiguous array of `size` elements into a hash state, +// returning the updated state. Note that the existing hash state is +// move-only and must be passed by value. +// +// NOTE: +// +// state = H::combine_contiguous(std::move(state), data, size); +// +// need NOT be guaranteed to produce the same hash expansion as a loop +// (it may perform internal optimizations). If you need this guarantee, use a +// loop instead. +// +// ----------------------------------------------------------------------------- +// Adding Type Support to `absl::Hash` +// ----------------------------------------------------------------------------- +// +// To add support for your user-defined type, add a proper `AbslHashValue()` +// overload as a free (non-member) function. The overload will take an +// existing hash state and should combine that state with state from the type. +// +// Example: +// +// template <typename H> +// H AbslHashValue(H state, const MyType& v) { +// return H::combine(std::move(state), v.field1, ..., v.fieldN); +// } +// +// where `(field1, ..., fieldN)` are the members you would use on your +// `operator==` to define equality. +// +// Notice that `AbslHashValue` is not a class member, but an ordinary function. +// An `AbslHashValue` overload for a type should only be declared in the same +// file and namespace as said type. The proper `AbslHashValue` implementation +// for a given type will be discovered via ADL. +// +// Note: unlike `std::hash', `absl::Hash` should never be specialized. It must +// only be extended by adding `AbslHashValue()` overloads. +// +template <typename T> +using Hash = absl::hash_internal::Hash<T>; + +// HashState +// +// A type erased version of the hash state concept, for use in user-defined +// `AbslHashValue` implementations that can't use templates (such as PImpl +// classes, virtual functions, etc.). The type erasure adds overhead so it +// should be avoided unless necessary. +// +// Note: This wrapper will only erase calls to: +// combine_contiguous(H, const unsigned char*, size_t) +// +// All other calls will be handled internally and will not invoke overloads +// provided by the wrapped class. +// +// Users of this class should still define a template `AbslHashValue` function, +// but can use `absl::HashState::Create(&state)` to erase the type of the hash +// state and dispatch to their private hashing logic. +// +// This state can be used like any other hash state. In particular, you can call +// `HashState::combine()` and `HashState::combine_contiguous()` on it. +// +// Example: +// +// class Interface { +// public: +// template <typename H> +// friend H AbslHashValue(H state, const Interface& value) { +// state = H::combine(std::move(state), std::type_index(typeid(*this))); +// value.HashValue(absl::HashState::Create(&state)); +// return state; +// } +// private: +// virtual void HashValue(absl::HashState state) const = 0; +// }; +// +// class Impl : Interface { +// private: +// void HashValue(absl::HashState state) const override { +// absl::HashState::combine(std::move(state), v1_, v2_); +// } +// int v1_; +// string v2_; +// }; +class HashState : public hash_internal::HashStateBase<HashState> { + public: + // HashState::Create() + // + // Create a new `HashState` instance that wraps `state`. All calls to + // `combine()` and `combine_contiguous()` on the new instance will be + // redirected to the original `state` object. The `state` object must outlive + // the `HashState` instance. + template <typename T> + static HashState Create(T* state) { + HashState s; + s.Init(state); + return s; + } + + HashState(const HashState&) = delete; + HashState& operator=(const HashState&) = delete; + HashState(HashState&&) = default; + HashState& operator=(HashState&&) = default; + + // HashState::combine() + // + // Combines an arbitrary number of values into a hash state, returning the + // updated state. + using HashState::HashStateBase::combine; + + // HashState::combine_contiguous() + // + // Combines a contiguous array of `size` elements into a hash state, returning + // the updated state. + static HashState combine_contiguous(HashState hash_state, + const unsigned char* first, size_t size) { + hash_state.combine_contiguous_(hash_state.state_, first, size); + return hash_state; + } + using HashState::HashStateBase::combine_contiguous; + + private: + HashState() = default; + + template <typename T> + static void CombineContiguousImpl(void* p, const unsigned char* first, + size_t size) { + T& state = *static_cast<T*>(p); + state = T::combine_contiguous(std::move(state), first, size); + } + + template <typename T> + void Init(T* state) { + state_ = state; + combine_contiguous_ = &CombineContiguousImpl<T>; + } + + // Do not erase an already erased state. + void Init(HashState* state) { + state_ = state->state_; + combine_contiguous_ = state->combine_contiguous_; + } + + void* state_; + void (*combine_contiguous_)(void*, const unsigned char*, size_t); +}; + +} // namespace absl +#endif // ABSL_HASH_HASH_H_ diff --git a/absl/hash/hash_test.cc b/absl/hash/hash_test.cc new file mode 100644 index 00000000..7b6fb2e7 --- /dev/null +++ b/absl/hash/hash_test.cc @@ -0,0 +1,425 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/hash/hash.h" + +#include <array> +#include <cstring> +#include <deque> +#include <forward_list> +#include <functional> +#include <iterator> +#include <limits> +#include <list> +#include <map> +#include <memory> +#include <numeric> +#include <random> +#include <set> +#include <string> +#include <tuple> +#include <type_traits> +#include <unordered_map> +#include <utility> +#include <vector> + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/container/flat_hash_set.h" +#include "absl/hash/hash_testing.h" +#include "absl/hash/internal/spy_hash_state.h" +#include "absl/meta/type_traits.h" +#include "absl/numeric/int128.h" + +namespace { + +using absl::Hash; +using absl::hash_internal::SpyHashState; + +template <typename T> +class HashValueIntTest : public testing::Test { +}; +TYPED_TEST_CASE_P(HashValueIntTest); + +template <typename T> +SpyHashState SpyHash(const T& value) { + return SpyHashState::combine(SpyHashState(), value); +} + +// Helper trait to verify if T is hashable. We use absl::Hash's poison status to +// detect it. +template <typename T> +using is_hashable = std::is_default_constructible<absl::Hash<T>>; + +TYPED_TEST_P(HashValueIntTest, BasicUsage) { + EXPECT_TRUE((is_hashable<TypeParam>::value)); + + TypeParam n = 42; + EXPECT_EQ(SpyHash(n), SpyHash(TypeParam{42})); + EXPECT_NE(SpyHash(n), SpyHash(TypeParam{0})); + EXPECT_NE(SpyHash(std::numeric_limits<TypeParam>::max()), + SpyHash(std::numeric_limits<TypeParam>::min())); +} + +TYPED_TEST_P(HashValueIntTest, FastPath) { + // Test the fast-path to make sure the values are the same. + TypeParam n = 42; + EXPECT_EQ(absl::Hash<TypeParam>{}(n), + absl::Hash<std::tuple<TypeParam>>{}(std::tuple<TypeParam>(n))); +} + +REGISTER_TYPED_TEST_CASE_P(HashValueIntTest, BasicUsage, FastPath); +using IntTypes = testing::Types<unsigned char, char, int, int32_t, int64_t, uint32_t, + uint64_t, size_t>; +INSTANTIATE_TYPED_TEST_CASE_P(My, HashValueIntTest, IntTypes); + +template <typename T, typename = void> +struct IsHashCallble : std::false_type {}; + +template <typename T> +struct IsHashCallble<T, absl::void_t<decltype(std::declval<absl::Hash<T>>()( + std::declval<const T&>()))>> : std::true_type {}; + +template <typename T, typename = void> +struct IsAggregateInitializable : std::false_type {}; + +template <typename T> +struct IsAggregateInitializable<T, absl::void_t<decltype(T{})>> + : std::true_type {}; + +TEST(IsHashableTest, ValidHash) { + EXPECT_TRUE((is_hashable<int>::value)); + EXPECT_TRUE(std::is_default_constructible<absl::Hash<int>>::value); + EXPECT_TRUE(std::is_copy_constructible<absl::Hash<int>>::value); + EXPECT_TRUE(std::is_move_constructible<absl::Hash<int>>::value); + EXPECT_TRUE(absl::is_copy_assignable<absl::Hash<int>>::value); + EXPECT_TRUE(absl::is_move_assignable<absl::Hash<int>>::value); + EXPECT_TRUE(IsHashCallble<int>::value); + EXPECT_TRUE(IsAggregateInitializable<absl::Hash<int>>::value); +} +#if ABSL_HASH_INTERNAL_CAN_POISON_ && !defined(__APPLE__) +TEST(IsHashableTest, PoisonHash) { + struct X {}; + EXPECT_FALSE((is_hashable<X>::value)); + EXPECT_FALSE(std::is_default_constructible<absl::Hash<X>>::value); + EXPECT_FALSE(std::is_copy_constructible<absl::Hash<X>>::value); + EXPECT_FALSE(std::is_move_constructible<absl::Hash<X>>::value); + EXPECT_FALSE(absl::is_copy_assignable<absl::Hash<X>>::value); + EXPECT_FALSE(absl::is_move_assignable<absl::Hash<X>>::value); + EXPECT_FALSE(IsHashCallble<X>::value); + EXPECT_FALSE(IsAggregateInitializable<absl::Hash<X>>::value); +} +#endif // ABSL_HASH_INTERNAL_CAN_POISON_ + +// Hashable types +// +// These types exist simply to exercise various AbslHashValue behaviors, so +// they are named by what their AbslHashValue overload does. +struct NoOp { + template <typename HashCode> + friend HashCode AbslHashValue(HashCode h, NoOp n) { + return std::move(h); + } +}; + +struct EmptyCombine { + template <typename HashCode> + friend HashCode AbslHashValue(HashCode h, EmptyCombine e) { + return HashCode::combine(std::move(h)); + } +}; + +template <typename Int> +struct CombineIterative { + template <typename HashCode> + friend HashCode AbslHashValue(HashCode h, CombineIterative c) { + for (int i = 0; i < 5; ++i) { + h = HashCode::combine(std::move(h), Int(i)); + } + return h; + } +}; + +template <typename Int> +struct CombineVariadic { + template <typename HashCode> + friend HashCode AbslHashValue(HashCode h, CombineVariadic c) { + return HashCode::combine(std::move(h), Int(0), Int(1), Int(2), Int(3), + Int(4)); + } +}; + +using InvokeTag = absl::hash_internal::InvokeHashTag; +template <InvokeTag T> +using InvokeTagConstant = std::integral_constant<InvokeTag, T>; + +template <InvokeTag... Tags> +struct MinTag; + +template <InvokeTag a, InvokeTag b, InvokeTag... Tags> +struct MinTag<a, b, Tags...> : MinTag<(a < b ? a : b), Tags...> {}; + +template <InvokeTag a> +struct MinTag<a> : InvokeTagConstant<a> {}; + +template <InvokeTag... Tags> +struct CustomHashType { + size_t value; +}; + +template <InvokeTag allowed, InvokeTag... tags> +struct EnableIfContained + : std::enable_if<absl::disjunction< + std::integral_constant<bool, allowed == tags>...>::value> {}; + +template < + typename H, InvokeTag... Tags, + typename = typename EnableIfContained<InvokeTag::kHashValue, Tags...>::type> +H AbslHashValue(H state, CustomHashType<Tags...> t) { + static_assert(MinTag<Tags...>::value == InvokeTag::kHashValue, ""); + return H::combine(std::move(state), + t.value + static_cast<int>(InvokeTag::kHashValue)); +} + +} // namespace + +namespace absl { +namespace hash_internal { +template <InvokeTag... Tags> +struct is_uniquely_represented< + CustomHashType<Tags...>, + typename EnableIfContained<InvokeTag::kUniquelyRepresented, Tags...>::type> + : std::true_type {}; +} // namespace hash_internal +} // namespace absl + +#if ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_ +namespace ABSL_INTERNAL_LEGACY_HASH_NAMESPACE { +template <InvokeTag... Tags> +struct hash<CustomHashType<Tags...>> { + template <InvokeTag... TagsIn, typename = typename EnableIfContained< + InvokeTag::kLegacyHash, TagsIn...>::type> + size_t operator()(CustomHashType<TagsIn...> t) const { + static_assert(MinTag<Tags...>::value == InvokeTag::kLegacyHash, ""); + return t.value + static_cast<int>(InvokeTag::kLegacyHash); + } +}; +} // namespace ABSL_INTERNAL_LEGACY_HASH_NAMESPACE +#endif // ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_ + +namespace std { +template <InvokeTag... Tags> // NOLINT +struct hash<CustomHashType<Tags...>> { + template <InvokeTag... TagsIn, typename = typename EnableIfContained< + InvokeTag::kStdHash, TagsIn...>::type> + size_t operator()(CustomHashType<TagsIn...> t) const { + static_assert(MinTag<Tags...>::value == InvokeTag::kStdHash, ""); + return t.value + static_cast<int>(InvokeTag::kStdHash); + } +}; +} // namespace std + +namespace { + +template <typename... T> +void TestCustomHashType(InvokeTagConstant<InvokeTag::kNone>, T...) { + using type = CustomHashType<T::value...>; + SCOPED_TRACE(testing::PrintToString(std::vector<InvokeTag>{T::value...})); + EXPECT_TRUE(is_hashable<type>()); + EXPECT_TRUE(is_hashable<const type>()); + EXPECT_TRUE(is_hashable<const type&>()); + + const size_t offset = static_cast<int>(std::min({T::value...})); + EXPECT_EQ(SpyHash(type{7}), SpyHash(size_t{7 + offset})); +} + +void TestCustomHashType(InvokeTagConstant<InvokeTag::kNone>) { +#if ABSL_HASH_INTERNAL_CAN_POISON_ + // is_hashable is false if we don't support any of the hooks. + using type = CustomHashType<>; + EXPECT_FALSE(is_hashable<type>()); + EXPECT_FALSE(is_hashable<const type>()); + EXPECT_FALSE(is_hashable<const type&>()); +#endif // ABSL_HASH_INTERNAL_CAN_POISON_ +} + +template <InvokeTag Tag, typename... T> +void TestCustomHashType(InvokeTagConstant<Tag> tag, T... t) { + constexpr auto next = static_cast<InvokeTag>(static_cast<int>(Tag) + 1); + TestCustomHashType(InvokeTagConstant<next>(), tag, t...); + TestCustomHashType(InvokeTagConstant<next>(), t...); +} + +TEST(HashTest, CustomHashType) { + TestCustomHashType(InvokeTagConstant<InvokeTag{}>()); +} + +TEST(HashTest, NoOpsAreEquivalent) { + EXPECT_EQ(Hash<NoOp>()({}), Hash<NoOp>()({})); + EXPECT_EQ(Hash<NoOp>()({}), Hash<EmptyCombine>()({})); +} + +template <typename T> +class HashIntTest : public testing::Test { +}; +TYPED_TEST_CASE_P(HashIntTest); + +TYPED_TEST_P(HashIntTest, BasicUsage) { + EXPECT_NE(Hash<NoOp>()({}), Hash<TypeParam>()(0)); + EXPECT_NE(Hash<NoOp>()({}), + Hash<TypeParam>()(std::numeric_limits<TypeParam>::max())); + if (std::numeric_limits<TypeParam>::min() != 0) { + EXPECT_NE(Hash<NoOp>()({}), + Hash<TypeParam>()(std::numeric_limits<TypeParam>::min())); + } + + EXPECT_EQ(Hash<CombineIterative<TypeParam>>()({}), + Hash<CombineVariadic<TypeParam>>()({})); +} + +REGISTER_TYPED_TEST_CASE_P(HashIntTest, BasicUsage); +using IntTypes = testing::Types<unsigned char, char, int, int32_t, int64_t, uint32_t, + uint64_t, size_t>; +INSTANTIATE_TYPED_TEST_CASE_P(My, HashIntTest, IntTypes); + +struct StructWithPadding { + char c; + int i; + + template <typename H> + friend H AbslHashValue(H hash_state, const StructWithPadding& s) { + return H::combine(std::move(hash_state), s.c, s.i); + } +}; + +static_assert(sizeof(StructWithPadding) > sizeof(char) + sizeof(int), + "StructWithPadding doesn't have padding"); +static_assert(std::is_standard_layout<StructWithPadding>::value, ""); + +// This check has to be disabled because libstdc++ doesn't support it. +// static_assert(std::is_trivially_constructible<StructWithPadding>::value, ""); + +template <typename T> +struct ArraySlice { + T* begin; + T* end; + + template <typename H> + friend H AbslHashValue(H hash_state, const ArraySlice& slice) { + for (auto t = slice.begin; t != slice.end; ++t) { + hash_state = H::combine(std::move(hash_state), *t); + } + return hash_state; + } +}; + +TEST(HashTest, HashNonUniquelyRepresentedType) { + // Create equal StructWithPadding objects that are known to have non-equal + // padding bytes. + static const size_t kNumStructs = 10; + unsigned char buffer1[kNumStructs * sizeof(StructWithPadding)]; + std::memset(buffer1, 0, sizeof(buffer1)); + auto* s1 = reinterpret_cast<StructWithPadding*>(buffer1); + + unsigned char buffer2[kNumStructs * sizeof(StructWithPadding)]; + std::memset(buffer2, 255, sizeof(buffer2)); + auto* s2 = reinterpret_cast<StructWithPadding*>(buffer2); + for (int i = 0; i < kNumStructs; ++i) { + SCOPED_TRACE(i); + s1[i].c = s2[i].c = '0' + i; + s1[i].i = s2[i].i = i; + ASSERT_FALSE(memcmp(buffer1 + i * sizeof(StructWithPadding), + buffer2 + i * sizeof(StructWithPadding), + sizeof(StructWithPadding)) == 0) + << "Bug in test code: objects do not have unequal" + << " object representations"; + } + + EXPECT_EQ(Hash<StructWithPadding>()(s1[0]), Hash<StructWithPadding>()(s2[0])); + EXPECT_EQ(Hash<ArraySlice<StructWithPadding>>()({s1, s1 + kNumStructs}), + Hash<ArraySlice<StructWithPadding>>()({s2, s2 + kNumStructs})); +} + +TEST(HashTest, StandardHashContainerUsage) { + std::unordered_map<int, std::string, Hash<int>> map = {{0, "foo"}, { 42, "bar" }}; + + EXPECT_NE(map.find(0), map.end()); + EXPECT_EQ(map.find(1), map.end()); + EXPECT_NE(map.find(0u), map.end()); +} + +struct ConvertibleFromNoOp { + ConvertibleFromNoOp(NoOp) {} // NOLINT(runtime/explicit) + + template <typename H> + friend H AbslHashValue(H hash_state, ConvertibleFromNoOp) { + return H::combine(std::move(hash_state), 1); + } +}; + +TEST(HashTest, HeterogeneousCall) { + EXPECT_NE(Hash<ConvertibleFromNoOp>()(NoOp()), + Hash<NoOp>()(NoOp())); +} + +TEST(IsUniquelyRepresentedTest, SanityTest) { + using absl::hash_internal::is_uniquely_represented; + + EXPECT_TRUE(is_uniquely_represented<unsigned char>::value); + EXPECT_TRUE(is_uniquely_represented<int>::value); + EXPECT_FALSE(is_uniquely_represented<bool>::value); + EXPECT_FALSE(is_uniquely_represented<int*>::value); +} + +struct IntAndString { + int i; + std::string s; + + template <typename H> + friend H AbslHashValue(H hash_state, IntAndString int_and_string) { + return H::combine(std::move(hash_state), int_and_string.s, + int_and_string.i); + } +}; + +TEST(HashTest, SmallValueOn64ByteBoundary) { + Hash<IntAndString>()(IntAndString{0, std::string(63, '0')}); +} + +struct TypeErased { + size_t n; + + template <typename H> + friend H AbslHashValue(H hash_state, const TypeErased& v) { + v.HashValue(absl::HashState::Create(&hash_state)); + return hash_state; + } + + void HashValue(absl::HashState state) const { + absl::HashState::combine(std::move(state), n); + } +}; + +TEST(HashTest, TypeErased) { + EXPECT_TRUE((is_hashable<TypeErased>::value)); + EXPECT_TRUE((is_hashable<std::pair<TypeErased, int>>::value)); + + EXPECT_EQ(SpyHash(TypeErased{7}), SpyHash(size_t{7})); + EXPECT_NE(SpyHash(TypeErased{7}), SpyHash(size_t{13})); + + EXPECT_EQ(SpyHash(std::make_pair(TypeErased{7}, 17)), + SpyHash(std::make_pair(size_t{7}, 17))); +} + +} // namespace diff --git a/absl/hash/hash_testing.h b/absl/hash/hash_testing.h new file mode 100644 index 00000000..1e3cda64 --- /dev/null +++ b/absl/hash/hash_testing.h @@ -0,0 +1,372 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_HASH_HASH_TESTING_H_ +#define ABSL_HASH_HASH_TESTING_H_ + +#include <initializer_list> +#include <tuple> +#include <type_traits> +#include <vector> + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/hash/internal/spy_hash_state.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/str_cat.h" +#include "absl/types/variant.h" + +namespace absl { + +// Run the absl::Hash algorithm over all the elements passed in and verify that +// their hash expansion is congruent with their `==` operator. +// +// It is used in conjunction with EXPECT_TRUE. Failures will output information +// on what requirement failed and on which objects. +// +// Users should pass a collection of types as either an initializer list or a +// container of cases. +// +// EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly( +// {v1, v2, ..., vN})); +// +// std::vector<MyType> cases; +// // Fill cases... +// EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(cases)); +// +// Users can pass a variety of types for testing heterogeneous lookup with +// `std::make_tuple`: +// +// EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly( +// std::make_tuple(v1, v2, ..., vN))); +// +// +// Ideally, the values passed should provide enough coverage of the `==` +// operator and the AbslHashValue implementations. +// For dynamically sized types, the empty state should usually be included in +// the values. +// +// The function accepts an optional comparator function, in case that `==` is +// not enough for the values provided. +// +// Usage: +// +// EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly( +// std::make_tuple(v1, v2, ..., vN), MyCustomEq{})); +// +// It checks the following requirements: +// 1. The expansion for a value is deterministic. +// 2. For any two objects `a` and `b` in the sequence, if `a == b` evaluates +// to true, then their hash expansion must be equal. +// 3. If `a == b` evaluates to false their hash expansion must be unequal. +// 4. If `a == b` evaluates to false neither hash expansion can be a +// suffix of the other. +// 5. AbslHashValue overloads should not be called by the user. They are only +// meant to be called by the framework. Users should call H::combine() and +// H::combine_contiguous(). +// 6. No moved-from instance of the hash state is used in the implementation +// of AbslHashValue. +// +// The values do not have to have the same type. This can be useful for +// equivalent types that support heterogeneous lookup. +// +// A possible reason for breaking (2) is combining state in the hash expansion +// that was not used in `==`. +// For example: +// +// struct Bad2 { +// int a, b; +// template <typename H> +// friend H AbslHashValue(H state, Bad2 x) { +// // Uses a and b. +// return H::combine(x.a, x.b); +// } +// friend bool operator==(Bad2 x, Bad2 y) { +// // Only uses a. +// return x.a == y.a; +// } +// }; +// +// As for (3), breaking this usually means that there is state being passed to +// the `==` operator that is not used in the hash expansion. +// For example: +// +// struct Bad3 { +// int a, b; +// template <typename H> +// friend H AbslHashValue(H state, Bad3 x) { +// // Only uses a. +// return H::combine(x.a); +// } +// friend bool operator==(Bad3 x, Bad3 y) { +// // Uses a and b. +// return x.a == y.a && x.b == y.b; +// } +// }; +// +// Finally, a common way to break 4 is by combining dynamic ranges without +// combining the size of the range. +// For example: +// +// struct Bad4 { +// int *p, size; +// template <typename H> +// friend H AbslHashValue(H state, Bad4 x) { +// return H::combine_range(x.p, x.p + x.size); +// } +// friend bool operator==(Bad4 x, Bad4 y) { +// return std::equal(x.p, x.p + x.size, y.p, y.p + y.size); +// } +// }; +// +// An easy solution to this is to combine the size after combining the range, +// like so: +// template <typename H> +// friend H AbslHashValue(H state, Bad4 x) { +// return H::combine(H::combine_range(x.p, x.p + x.size), x.size); +// } +// +template <int&... ExplicitBarrier, typename Container> +ABSL_MUST_USE_RESULT testing::AssertionResult +VerifyTypeImplementsAbslHashCorrectly(const Container& values); + +template <int&... ExplicitBarrier, typename Container, typename Eq> +ABSL_MUST_USE_RESULT testing::AssertionResult +VerifyTypeImplementsAbslHashCorrectly(const Container& values, Eq equals); + +template <int&..., typename T> +ABSL_MUST_USE_RESULT testing::AssertionResult +VerifyTypeImplementsAbslHashCorrectly(std::initializer_list<T> values); + +template <int&..., typename T, typename Eq> +ABSL_MUST_USE_RESULT testing::AssertionResult +VerifyTypeImplementsAbslHashCorrectly(std::initializer_list<T> values, + Eq equals); + +namespace hash_internal { + +struct PrintVisitor { + size_t index; + template <typename T> + std::string operator()(const T* value) const { + return absl::StrCat("#", index, "(", testing::PrintToString(*value), ")"); + } +}; + +template <typename Eq> +struct EqVisitor { + Eq eq; + template <typename T, typename U> + bool operator()(const T* t, const U* u) const { + return eq(*t, *u); + } +}; + +struct ExpandVisitor { + template <typename T> + SpyHashState operator()(const T* value) const { + return SpyHashState::combine(SpyHashState(), *value); + } +}; + +template <typename Container, typename Eq> +ABSL_MUST_USE_RESULT testing::AssertionResult +VerifyTypeImplementsAbslHashCorrectly(const Container& values, Eq equals) { + using V = typename Container::value_type; + + struct Info { + const V& value; + size_t index; + std::string ToString() const { return absl::visit(PrintVisitor{index}, value); } + SpyHashState expand() const { return absl::visit(ExpandVisitor{}, value); } + }; + + using EqClass = std::vector<Info>; + std::vector<EqClass> classes; + + // Gather the values in equivalence classes. + size_t i = 0; + for (const auto& value : values) { + EqClass* c = nullptr; + for (auto& eqclass : classes) { + if (absl::visit(EqVisitor<Eq>{equals}, value, eqclass[0].value)) { + c = &eqclass; + break; + } + } + if (c == nullptr) { + classes.emplace_back(); + c = &classes.back(); + } + c->push_back({value, i}); + ++i; + + // Verify potential errors captured by SpyHashState. + if (auto error = c->back().expand().error()) { + return testing::AssertionFailure() << *error; + } + } + + if (classes.size() < 2) { + return testing::AssertionFailure() + << "At least two equivalence classes are expected."; + } + + // We assume that equality is correctly implemented. + // Now we verify that AbslHashValue is also correctly implemented. + + for (const auto& c : classes) { + // All elements of the equivalence class must have the same hash expansion. + const SpyHashState expected = c[0].expand(); + for (const Info& v : c) { + if (v.expand() != v.expand()) { + return testing::AssertionFailure() + << "Hash expansion for " << v.ToString() + << " is non-deterministic."; + } + if (v.expand() != expected) { + return testing::AssertionFailure() + << "Values " << c[0].ToString() << " and " << v.ToString() + << " evaluate as equal but have an unequal hash expansion."; + } + } + + // Elements from other classes must have different hash expansion. + for (const auto& c2 : classes) { + if (&c == &c2) continue; + const SpyHashState c2_hash = c2[0].expand(); + switch (SpyHashState::Compare(expected, c2_hash)) { + case SpyHashState::CompareResult::kEqual: + return testing::AssertionFailure() + << "Values " << c[0].ToString() << " and " << c2[0].ToString() + << " evaluate as unequal but have an equal hash expansion."; + case SpyHashState::CompareResult::kBSuffixA: + return testing::AssertionFailure() + << "Hash expansion of " << c2[0].ToString() + << " is a suffix of the hash expansion of " << c[0].ToString() + << "."; + case SpyHashState::CompareResult::kASuffixB: + return testing::AssertionFailure() + << "Hash expansion of " << c[0].ToString() + << " is a suffix of the hash expansion of " << c2[0].ToString() + << "."; + case SpyHashState::CompareResult::kUnequal: + break; + } + } + } + return testing::AssertionSuccess(); +} + +template <typename... T> +struct TypeSet { + template <typename U, bool = disjunction<std::is_same<T, U>...>::value> + struct Insert { + using type = TypeSet<U, T...>; + }; + template <typename U> + struct Insert<U, true> { + using type = TypeSet; + }; + + template <template <typename...> class C> + using apply = C<T...>; +}; + +template <typename... T> +struct MakeTypeSet : TypeSet<>{}; +template <typename T, typename... Ts> +struct MakeTypeSet<T, Ts...> : MakeTypeSet<Ts...>::template Insert<T>::type {}; + +template <typename... T> +using VariantForTypes = typename MakeTypeSet< + const typename std::decay<T>::type*...>::template apply<absl::variant>; + +template <typename Container> +struct ContainerAsVector { + using V = absl::variant<const typename Container::value_type*>; + using Out = std::vector<V>; + + static Out Do(const Container& values) { + Out out; + for (const auto& v : values) out.push_back(&v); + return out; + } +}; + +template <typename... T> +struct ContainerAsVector<std::tuple<T...>> { + using V = VariantForTypes<T...>; + using Out = std::vector<V>; + + template <size_t... I> + static Out DoImpl(const std::tuple<T...>& tuple, absl::index_sequence<I...>) { + return Out{&std::get<I>(tuple)...}; + } + + static Out Do(const std::tuple<T...>& values) { + return DoImpl(values, absl::index_sequence_for<T...>()); + } +}; + +template <> +struct ContainerAsVector<std::tuple<>> { + static std::vector<VariantForTypes<int>> Do(std::tuple<>) { return {}; } +}; + +struct DefaultEquals { + template <typename T, typename U> + bool operator()(const T& t, const U& u) const { + return t == u; + } +}; + +} // namespace hash_internal + +template <int&..., typename Container> +ABSL_MUST_USE_RESULT testing::AssertionResult +VerifyTypeImplementsAbslHashCorrectly(const Container& values) { + return hash_internal::VerifyTypeImplementsAbslHashCorrectly( + hash_internal::ContainerAsVector<Container>::Do(values), + hash_internal::DefaultEquals{}); +} + +template <int&..., typename Container, typename Eq> +ABSL_MUST_USE_RESULT testing::AssertionResult +VerifyTypeImplementsAbslHashCorrectly(const Container& values, Eq equals) { + return hash_internal::VerifyTypeImplementsAbslHashCorrectly( + hash_internal::ContainerAsVector<Container>::Do(values), + equals); +} + +template <int&..., typename T> +ABSL_MUST_USE_RESULT testing::AssertionResult +VerifyTypeImplementsAbslHashCorrectly(std::initializer_list<T> values) { + return hash_internal::VerifyTypeImplementsAbslHashCorrectly( + hash_internal::ContainerAsVector<std::initializer_list<T>>::Do(values), + hash_internal::DefaultEquals{}); +} + +template <int&..., typename T, typename Eq> +ABSL_MUST_USE_RESULT testing::AssertionResult +VerifyTypeImplementsAbslHashCorrectly(std::initializer_list<T> values, + Eq equals) { + return hash_internal::VerifyTypeImplementsAbslHashCorrectly( + hash_internal::ContainerAsVector<std::initializer_list<T>>::Do(values), + equals); +} + +} // namespace absl + +#endif // ABSL_HASH_HASH_TESTING_H_ diff --git a/absl/hash/internal/city.cc b/absl/hash/internal/city.cc new file mode 100644 index 00000000..591017ab --- /dev/null +++ b/absl/hash/internal/city.cc @@ -0,0 +1,589 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file provides CityHash64() and related functions. +// +// It's probably possible to create even faster hash functions by +// writing a program that systematically explores some of the space of +// possible hash functions, by using SIMD instructions, or by +// compromising on hash quality. + +#include "absl/hash/internal/city.h" + +#include <string.h> // for memcpy and memset +#include <algorithm> + +#include "absl/base/config.h" +#include "absl/base/internal/endian.h" +#include "absl/base/internal/unaligned_access.h" +#include "absl/base/optimization.h" + +namespace absl { +namespace hash_internal { + +#ifdef ABSL_IS_BIG_ENDIAN +#define uint32_in_expected_order(x) (absl::gbswap_32(x)) +#define uint64_in_expected_order(x) (absl::gbswap_64(x)) +#else +#define uint32_in_expected_order(x) (x) +#define uint64_in_expected_order(x) (x) +#endif + +static uint64_t Fetch64(const char *p) { + return uint64_in_expected_order(ABSL_INTERNAL_UNALIGNED_LOAD64(p)); +} + +static uint32_t Fetch32(const char *p) { + return uint32_in_expected_order(ABSL_INTERNAL_UNALIGNED_LOAD32(p)); +} + +// Some primes between 2^63 and 2^64 for various uses. +static const uint64_t k0 = 0xc3a5c85c97cb3127ULL; +static const uint64_t k1 = 0xb492b66fbe98f273ULL; +static const uint64_t k2 = 0x9ae16a3b2f90404fULL; + +// Magic numbers for 32-bit hashing. Copied from Murmur3. +static const uint32_t c1 = 0xcc9e2d51; +static const uint32_t c2 = 0x1b873593; + +// A 32-bit to 32-bit integer hash copied from Murmur3. +static uint32_t fmix(uint32_t h) { + h ^= h >> 16; + h *= 0x85ebca6b; + h ^= h >> 13; + h *= 0xc2b2ae35; + h ^= h >> 16; + return h; +} + +static uint32_t Rotate32(uint32_t val, int shift) { + // Avoid shifting by 32: doing so yields an undefined result. + return shift == 0 ? val : ((val >> shift) | (val << (32 - shift))); +} + +#undef PERMUTE3 +#define PERMUTE3(a, b, c) \ + do { \ + std::swap(a, b); \ + std::swap(a, c); \ + } while (0) + +static uint32_t Mur(uint32_t a, uint32_t h) { + // Helper from Murmur3 for combining two 32-bit values. + a *= c1; + a = Rotate32(a, 17); + a *= c2; + h ^= a; + h = Rotate32(h, 19); + return h * 5 + 0xe6546b64; +} + +static uint32_t Hash32Len13to24(const char *s, size_t len) { + uint32_t a = Fetch32(s - 4 + (len >> 1)); + uint32_t b = Fetch32(s + 4); + uint32_t c = Fetch32(s + len - 8); + uint32_t d = Fetch32(s + (len >> 1)); + uint32_t e = Fetch32(s); + uint32_t f = Fetch32(s + len - 4); + uint32_t h = len; + + return fmix(Mur(f, Mur(e, Mur(d, Mur(c, Mur(b, Mur(a, h))))))); +} + +static uint32_t Hash32Len0to4(const char *s, size_t len) { + uint32_t b = 0; + uint32_t c = 9; + for (size_t i = 0; i < len; i++) { + signed char v = s[i]; + b = b * c1 + v; + c ^= b; + } + return fmix(Mur(b, Mur(len, c))); +} + +static uint32_t Hash32Len5to12(const char *s, size_t len) { + uint32_t a = len, b = len * 5, c = 9, d = b; + a += Fetch32(s); + b += Fetch32(s + len - 4); + c += Fetch32(s + ((len >> 1) & 4)); + return fmix(Mur(c, Mur(b, Mur(a, d)))); +} + +uint32_t CityHash32(const char *s, size_t len) { + if (len <= 24) { + return len <= 12 + ? (len <= 4 ? Hash32Len0to4(s, len) : Hash32Len5to12(s, len)) + : Hash32Len13to24(s, len); + } + + // len > 24 + uint32_t h = len, g = c1 * len, f = g; + uint32_t a0 = Rotate32(Fetch32(s + len - 4) * c1, 17) * c2; + uint32_t a1 = Rotate32(Fetch32(s + len - 8) * c1, 17) * c2; + uint32_t a2 = Rotate32(Fetch32(s + len - 16) * c1, 17) * c2; + uint32_t a3 = Rotate32(Fetch32(s + len - 12) * c1, 17) * c2; + uint32_t a4 = Rotate32(Fetch32(s + len - 20) * c1, 17) * c2; + h ^= a0; + h = Rotate32(h, 19); + h = h * 5 + 0xe6546b64; + h ^= a2; + h = Rotate32(h, 19); + h = h * 5 + 0xe6546b64; + g ^= a1; + g = Rotate32(g, 19); + g = g * 5 + 0xe6546b64; + g ^= a3; + g = Rotate32(g, 19); + g = g * 5 + 0xe6546b64; + f += a4; + f = Rotate32(f, 19); + f = f * 5 + 0xe6546b64; + size_t iters = (len - 1) / 20; + do { + uint32_t a0 = Rotate32(Fetch32(s) * c1, 17) * c2; + uint32_t a1 = Fetch32(s + 4); + uint32_t a2 = Rotate32(Fetch32(s + 8) * c1, 17) * c2; + uint32_t a3 = Rotate32(Fetch32(s + 12) * c1, 17) * c2; + uint32_t a4 = Fetch32(s + 16); + h ^= a0; + h = Rotate32(h, 18); + h = h * 5 + 0xe6546b64; + f += a1; + f = Rotate32(f, 19); + f = f * c1; + g += a2; + g = Rotate32(g, 18); + g = g * 5 + 0xe6546b64; + h ^= a3 + a1; + h = Rotate32(h, 19); + h = h * 5 + 0xe6546b64; + g ^= a4; + g = absl::gbswap_32(g) * 5; + h += a4 * 5; + h = absl::gbswap_32(h); + f += a0; + PERMUTE3(f, h, g); + s += 20; + } while (--iters != 0); + g = Rotate32(g, 11) * c1; + g = Rotate32(g, 17) * c1; + f = Rotate32(f, 11) * c1; + f = Rotate32(f, 17) * c1; + h = Rotate32(h + g, 19); + h = h * 5 + 0xe6546b64; + h = Rotate32(h, 17) * c1; + h = Rotate32(h + f, 19); + h = h * 5 + 0xe6546b64; + h = Rotate32(h, 17) * c1; + return h; +} + +// Bitwise right rotate. Normally this will compile to a single +// instruction, especially if the shift is a manifest constant. +static uint64_t Rotate(uint64_t val, int shift) { + // Avoid shifting by 64: doing so yields an undefined result. + return shift == 0 ? val : ((val >> shift) | (val << (64 - shift))); +} + +static uint64_t ShiftMix(uint64_t val) { return val ^ (val >> 47); } + +static uint64_t HashLen16(uint64_t u, uint64_t v) { + return Hash128to64(uint128(u, v)); +} + +static uint64_t HashLen16(uint64_t u, uint64_t v, uint64_t mul) { + // Murmur-inspired hashing. + uint64_t a = (u ^ v) * mul; + a ^= (a >> 47); + uint64_t b = (v ^ a) * mul; + b ^= (b >> 47); + b *= mul; + return b; +} + +static uint64_t HashLen0to16(const char *s, size_t len) { + if (len >= 8) { + uint64_t mul = k2 + len * 2; + uint64_t a = Fetch64(s) + k2; + uint64_t b = Fetch64(s + len - 8); + uint64_t c = Rotate(b, 37) * mul + a; + uint64_t d = (Rotate(a, 25) + b) * mul; + return HashLen16(c, d, mul); + } + if (len >= 4) { + uint64_t mul = k2 + len * 2; + uint64_t a = Fetch32(s); + return HashLen16(len + (a << 3), Fetch32(s + len - 4), mul); + } + if (len > 0) { + uint8_t a = s[0]; + uint8_t b = s[len >> 1]; + uint8_t c = s[len - 1]; + uint32_t y = static_cast<uint32_t>(a) + (static_cast<uint32_t>(b) << 8); + uint32_t z = len + (static_cast<uint32_t>(c) << 2); + return ShiftMix(y * k2 ^ z * k0) * k2; + } + return k2; +} + +// This probably works well for 16-byte strings as well, but it may be overkill +// in that case. +static uint64_t HashLen17to32(const char *s, size_t len) { + uint64_t mul = k2 + len * 2; + uint64_t a = Fetch64(s) * k1; + uint64_t b = Fetch64(s + 8); + uint64_t c = Fetch64(s + len - 8) * mul; + uint64_t d = Fetch64(s + len - 16) * k2; + return HashLen16(Rotate(a + b, 43) + Rotate(c, 30) + d, + a + Rotate(b + k2, 18) + c, mul); +} + +// Return a 16-byte hash for 48 bytes. Quick and dirty. +// Callers do best to use "random-looking" values for a and b. +static std::pair<uint64_t, uint64_t> WeakHashLen32WithSeeds(uint64_t w, uint64_t x, + uint64_t y, uint64_t z, + uint64_t a, uint64_t b) { + a += w; + b = Rotate(b + a + z, 21); + uint64_t c = a; + a += x; + a += y; + b += Rotate(a, 44); + return std::make_pair(a + z, b + c); +} + +// Return a 16-byte hash for s[0] ... s[31], a, and b. Quick and dirty. +static std::pair<uint64_t, uint64_t> WeakHashLen32WithSeeds(const char *s, uint64_t a, + uint64_t b) { + return WeakHashLen32WithSeeds(Fetch64(s), Fetch64(s + 8), Fetch64(s + 16), + Fetch64(s + 24), a, b); +} + +// Return an 8-byte hash for 33 to 64 bytes. +static uint64_t HashLen33to64(const char *s, size_t len) { + uint64_t mul = k2 + len * 2; + uint64_t a = Fetch64(s) * k2; + uint64_t b = Fetch64(s + 8); + uint64_t c = Fetch64(s + len - 24); + uint64_t d = Fetch64(s + len - 32); + uint64_t e = Fetch64(s + 16) * k2; + uint64_t f = Fetch64(s + 24) * 9; + uint64_t g = Fetch64(s + len - 8); + uint64_t h = Fetch64(s + len - 16) * mul; + uint64_t u = Rotate(a + g, 43) + (Rotate(b, 30) + c) * 9; + uint64_t v = ((a + g) ^ d) + f + 1; + uint64_t w = absl::gbswap_64((u + v) * mul) + h; + uint64_t x = Rotate(e + f, 42) + c; + uint64_t y = (absl::gbswap_64((v + w) * mul) + g) * mul; + uint64_t z = e + f + c; + a = absl::gbswap_64((x + z) * mul + y) + b; + b = ShiftMix((z + a) * mul + d + h) * mul; + return b + x; +} + +uint64_t CityHash64(const char *s, size_t len) { + if (len <= 32) { + if (len <= 16) { + return HashLen0to16(s, len); + } else { + return HashLen17to32(s, len); + } + } else if (len <= 64) { + return HashLen33to64(s, len); + } + + // For strings over 64 bytes we hash the end first, and then as we + // loop we keep 56 bytes of state: v, w, x, y, and z. + uint64_t x = Fetch64(s + len - 40); + uint64_t y = Fetch64(s + len - 16) + Fetch64(s + len - 56); + uint64_t z = HashLen16(Fetch64(s + len - 48) + len, Fetch64(s + len - 24)); + std::pair<uint64_t, uint64_t> v = WeakHashLen32WithSeeds(s + len - 64, len, z); + std::pair<uint64_t, uint64_t> w = WeakHashLen32WithSeeds(s + len - 32, y + k1, x); + x = x * k1 + Fetch64(s); + + // Decrease len to the nearest multiple of 64, and operate on 64-byte chunks. + len = (len - 1) & ~static_cast<size_t>(63); + do { + x = Rotate(x + y + v.first + Fetch64(s + 8), 37) * k1; + y = Rotate(y + v.second + Fetch64(s + 48), 42) * k1; + x ^= w.second; + y += v.first + Fetch64(s + 40); + z = Rotate(z + w.first, 33) * k1; + v = WeakHashLen32WithSeeds(s, v.second * k1, x + w.first); + w = WeakHashLen32WithSeeds(s + 32, z + w.second, y + Fetch64(s + 16)); + std::swap(z, x); + s += 64; + len -= 64; + } while (len != 0); + return HashLen16(HashLen16(v.first, w.first) + ShiftMix(y) * k1 + z, + HashLen16(v.second, w.second) + x); +} + +uint64_t CityHash64WithSeed(const char *s, size_t len, uint64_t seed) { + return CityHash64WithSeeds(s, len, k2, seed); +} + +uint64_t CityHash64WithSeeds(const char *s, size_t len, uint64_t seed0, + uint64_t seed1) { + return HashLen16(CityHash64(s, len) - seed0, seed1); +} + +// A subroutine for CityHash128(). Returns a decent 128-bit hash for strings +// of any length representable in signed long. Based on City and Murmur. +static uint128 CityMurmur(const char *s, size_t len, uint128 seed) { + uint64_t a = Uint128Low64(seed); + uint64_t b = Uint128High64(seed); + uint64_t c = 0; + uint64_t d = 0; + int64_t l = len - 16; + if (l <= 0) { // len <= 16 + a = ShiftMix(a * k1) * k1; + c = b * k1 + HashLen0to16(s, len); + d = ShiftMix(a + (len >= 8 ? Fetch64(s) : c)); + } else { // len > 16 + c = HashLen16(Fetch64(s + len - 8) + k1, a); + d = HashLen16(b + len, c + Fetch64(s + len - 16)); + a += d; + do { + a ^= ShiftMix(Fetch64(s) * k1) * k1; + a *= k1; + b ^= a; + c ^= ShiftMix(Fetch64(s + 8) * k1) * k1; + c *= k1; + d ^= c; + s += 16; + l -= 16; + } while (l > 0); + } + a = HashLen16(a, c); + b = HashLen16(d, b); + return uint128(a ^ b, HashLen16(b, a)); +} + +uint128 CityHash128WithSeed(const char *s, size_t len, uint128 seed) { + if (len < 128) { + return CityMurmur(s, len, seed); + } + + // We expect len >= 128 to be the common case. Keep 56 bytes of state: + // v, w, x, y, and z. + std::pair<uint64_t, uint64_t> v, w; + uint64_t x = Uint128Low64(seed); + uint64_t y = Uint128High64(seed); + uint64_t z = len * k1; + v.first = Rotate(y ^ k1, 49) * k1 + Fetch64(s); + v.second = Rotate(v.first, 42) * k1 + Fetch64(s + 8); + w.first = Rotate(y + z, 35) * k1 + x; + w.second = Rotate(x + Fetch64(s + 88), 53) * k1; + + // This is the same inner loop as CityHash64(), manually unrolled. + do { + x = Rotate(x + y + v.first + Fetch64(s + 8), 37) * k1; + y = Rotate(y + v.second + Fetch64(s + 48), 42) * k1; + x ^= w.second; + y += v.first + Fetch64(s + 40); + z = Rotate(z + w.first, 33) * k1; + v = WeakHashLen32WithSeeds(s, v.second * k1, x + w.first); + w = WeakHashLen32WithSeeds(s + 32, z + w.second, y + Fetch64(s + 16)); + std::swap(z, x); + s += 64; + x = Rotate(x + y + v.first + Fetch64(s + 8), 37) * k1; + y = Rotate(y + v.second + Fetch64(s + 48), 42) * k1; + x ^= w.second; + y += v.first + Fetch64(s + 40); + z = Rotate(z + w.first, 33) * k1; + v = WeakHashLen32WithSeeds(s, v.second * k1, x + w.first); + w = WeakHashLen32WithSeeds(s + 32, z + w.second, y + Fetch64(s + 16)); + std::swap(z, x); + s += 64; + len -= 128; + } while (ABSL_PREDICT_TRUE(len >= 128)); + x += Rotate(v.first + z, 49) * k0; + y = y * k0 + Rotate(w.second, 37); + z = z * k0 + Rotate(w.first, 27); + w.first *= 9; + v.first *= k0; + // If 0 < len < 128, hash up to 4 chunks of 32 bytes each from the end of s. + for (size_t tail_done = 0; tail_done < len;) { + tail_done += 32; + y = Rotate(x + y, 42) * k0 + v.second; + w.first += Fetch64(s + len - tail_done + 16); + x = x * k0 + w.first; + z += w.second + Fetch64(s + len - tail_done); + w.second += v.first; + v = WeakHashLen32WithSeeds(s + len - tail_done, v.first + z, v.second); + v.first *= k0; + } + // At this point our 56 bytes of state should contain more than + // enough information for a strong 128-bit hash. We use two + // different 56-byte-to-8-byte hashes to get a 16-byte final result. + x = HashLen16(x, v.first); + y = HashLen16(y + z, w.first); + return uint128(HashLen16(x + v.second, w.second) + y, + HashLen16(x + w.second, y + v.second)); +} + +uint128 CityHash128(const char *s, size_t len) { + return len >= 16 + ? CityHash128WithSeed(s + 16, len - 16, + uint128(Fetch64(s), Fetch64(s + 8) + k0)) + : CityHash128WithSeed(s, len, uint128(k0, k1)); +} +} // namespace hash_internal +} // namespace absl + +#ifdef __SSE4_2__ +#include <nmmintrin.h> +#include "absl/hash/internal/city_crc.h" + +namespace absl { +namespace hash_internal { + +// Requires len >= 240. +static void CityHashCrc256Long(const char *s, size_t len, uint32_t seed, + uint64_t *result) { + uint64_t a = Fetch64(s + 56) + k0; + uint64_t b = Fetch64(s + 96) + k0; + uint64_t c = result[0] = HashLen16(b, len); + uint64_t d = result[1] = Fetch64(s + 120) * k0 + len; + uint64_t e = Fetch64(s + 184) + seed; + uint64_t f = 0; + uint64_t g = 0; + uint64_t h = c + d; + uint64_t x = seed; + uint64_t y = 0; + uint64_t z = 0; + + // 240 bytes of input per iter. + size_t iters = len / 240; + len -= iters * 240; + do { +#undef CHUNK +#define CHUNK(r) \ + PERMUTE3(x, z, y); \ + b += Fetch64(s); \ + c += Fetch64(s + 8); \ + d += Fetch64(s + 16); \ + e += Fetch64(s + 24); \ + f += Fetch64(s + 32); \ + a += b; \ + h += f; \ + b += c; \ + f += d; \ + g += e; \ + e += z; \ + g += x; \ + z = _mm_crc32_u64(z, b + g); \ + y = _mm_crc32_u64(y, e + h); \ + x = _mm_crc32_u64(x, f + a); \ + e = Rotate(e, r); \ + c += e; \ + s += 40 + + CHUNK(0); + PERMUTE3(a, h, c); + CHUNK(33); + PERMUTE3(a, h, f); + CHUNK(0); + PERMUTE3(b, h, f); + CHUNK(42); + PERMUTE3(b, h, d); + CHUNK(0); + PERMUTE3(b, h, e); + CHUNK(33); + PERMUTE3(a, h, e); + } while (--iters > 0); + + while (len >= 40) { + CHUNK(29); + e ^= Rotate(a, 20); + h += Rotate(b, 30); + g ^= Rotate(c, 40); + f += Rotate(d, 34); + PERMUTE3(c, h, g); + len -= 40; + } + if (len > 0) { + s = s + len - 40; + CHUNK(33); + e ^= Rotate(a, 43); + h += Rotate(b, 42); + g ^= Rotate(c, 41); + f += Rotate(d, 40); + } + result[0] ^= h; + result[1] ^= g; + g += h; + a = HashLen16(a, g + z); + x += y << 32; + b += x; + c = HashLen16(c, z) + h; + d = HashLen16(d, e + result[0]); + g += e; + h += HashLen16(x, f); + e = HashLen16(a, d) + g; + z = HashLen16(b, c) + a; + y = HashLen16(g, h) + c; + result[0] = e + z + y + x; + a = ShiftMix((a + y) * k0) * k0 + b; + result[1] += a + result[0]; + a = ShiftMix(a * k0) * k0 + c; + result[2] = a + result[1]; + a = ShiftMix((a + e) * k0) * k0; + result[3] = a + result[2]; +} + +// Requires len < 240. +static void CityHashCrc256Short(const char *s, size_t len, uint64_t *result) { + char buf[240]; + memcpy(buf, s, len); + memset(buf + len, 0, 240 - len); + CityHashCrc256Long(buf, 240, ~static_cast<uint32_t>(len), result); +} + +void CityHashCrc256(const char *s, size_t len, uint64_t *result) { + if (ABSL_PREDICT_TRUE(len >= 240)) { + CityHashCrc256Long(s, len, 0, result); + } else { + CityHashCrc256Short(s, len, result); + } +} + +uint128 CityHashCrc128WithSeed(const char *s, size_t len, uint128 seed) { + if (len <= 900) { + return CityHash128WithSeed(s, len, seed); + } else { + uint64_t result[4]; + CityHashCrc256(s, len, result); + uint64_t u = Uint128High64(seed) + result[0]; + uint64_t v = Uint128Low64(seed) + result[1]; + return uint128(HashLen16(u, v + result[2]), + HashLen16(Rotate(v, 32), u * k0 + result[3])); + } +} + +uint128 CityHashCrc128(const char *s, size_t len) { + if (len <= 900) { + return CityHash128(s, len); + } else { + uint64_t result[4]; + CityHashCrc256(s, len, result); + return uint128(result[2], result[3]); + } +} + +} // namespace hash_internal +} // namespace absl + +#endif diff --git a/absl/hash/internal/city.h b/absl/hash/internal/city.h new file mode 100644 index 00000000..55b37b87 --- /dev/null +++ b/absl/hash/internal/city.h @@ -0,0 +1,108 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// http://code.google.com/p/cityhash/ +// +// This file provides a few functions for hashing strings. All of them are +// high-quality functions in the sense that they pass standard tests such +// as Austin Appleby's SMHasher. They are also fast. +// +// For 64-bit x86 code, on short strings, we don't know of anything faster than +// CityHash64 that is of comparable quality. We believe our nearest competitor +// is Murmur3. For 64-bit x86 code, CityHash64 is an excellent choice for hash +// tables and most other hashing (excluding cryptography). +// +// For 64-bit x86 code, on long strings, the picture is more complicated. +// On many recent Intel CPUs, such as Nehalem, Westmere, Sandy Bridge, etc., +// CityHashCrc128 appears to be faster than all competitors of comparable +// quality. CityHash128 is also good but not quite as fast. We believe our +// nearest competitor is Bob Jenkins' Spooky. We don't have great data for +// other 64-bit CPUs, but for long strings we know that Spooky is slightly +// faster than CityHash on some relatively recent AMD x86-64 CPUs, for example. +// Note that CityHashCrc128 is declared in citycrc.h. +// +// For 32-bit x86 code, we don't know of anything faster than CityHash32 that +// is of comparable quality. We believe our nearest competitor is Murmur3A. +// (On 64-bit CPUs, it is typically faster to use the other CityHash variants.) +// +// Functions in the CityHash family are not suitable for cryptography. +// +// Please see CityHash's README file for more details on our performance +// measurements and so on. +// +// WARNING: This code has been only lightly tested on big-endian platforms! +// It is known to work well on little-endian platforms that have a small penalty +// for unaligned reads, such as current Intel and AMD moderate-to-high-end CPUs. +// It should work on all 32-bit and 64-bit platforms that allow unaligned reads; +// bug reports are welcome. +// +// By the way, for some hash functions, given strings a and b, the hash +// of a+b is easily derived from the hashes of a and b. This property +// doesn't hold for any hash functions in this file. + +#ifndef ABSL_HASH_INTERNAL_CITY_H_ +#define ABSL_HASH_INTERNAL_CITY_H_ + +#include <stdint.h> +#include <stdlib.h> // for size_t. +#include <utility> + + +namespace absl { +namespace hash_internal { + +typedef std::pair<uint64_t, uint64_t> uint128; + +inline uint64_t Uint128Low64(const uint128 &x) { return x.first; } +inline uint64_t Uint128High64(const uint128 &x) { return x.second; } + +// Hash function for a byte array. +uint64_t CityHash64(const char *s, size_t len); + +// Hash function for a byte array. For convenience, a 64-bit seed is also +// hashed into the result. +uint64_t CityHash64WithSeed(const char *s, size_t len, uint64_t seed); + +// Hash function for a byte array. For convenience, two seeds are also +// hashed into the result. +uint64_t CityHash64WithSeeds(const char *s, size_t len, uint64_t seed0, + uint64_t seed1); + +// Hash function for a byte array. +uint128 CityHash128(const char *s, size_t len); + +// Hash function for a byte array. For convenience, a 128-bit seed is also +// hashed into the result. +uint128 CityHash128WithSeed(const char *s, size_t len, uint128 seed); + +// Hash function for a byte array. Most useful in 32-bit binaries. +uint32_t CityHash32(const char *s, size_t len); + +// Hash 128 input bits down to 64 bits of output. +// This is intended to be a reasonably good hash function. +inline uint64_t Hash128to64(const uint128 &x) { + // Murmur-inspired hashing. + const uint64_t kMul = 0x9ddfea08eb382d69ULL; + uint64_t a = (Uint128Low64(x) ^ Uint128High64(x)) * kMul; + a ^= (a >> 47); + uint64_t b = (Uint128High64(x) ^ a) * kMul; + b ^= (b >> 47); + b *= kMul; + return b; +} + +} // namespace hash_internal +} // namespace absl + +#endif // ABSL_HASH_INTERNAL_CITY_H_ diff --git a/absl/hash/internal/city_crc.h b/absl/hash/internal/city_crc.h new file mode 100644 index 00000000..6be6557d --- /dev/null +++ b/absl/hash/internal/city_crc.h @@ -0,0 +1,41 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file declares the subset of the CityHash functions that require +// _mm_crc32_u64(). See the CityHash README for details. +// +// Functions in the CityHash family are not suitable for cryptography. + +#ifndef ABSL_HASH_INTERNAL_CITY_CRC_H_ +#define ABSL_HASH_INTERNAL_CITY_CRC_H_ + +#include "absl/hash/internal/city.h" + +namespace absl { +namespace hash_internal { + +// Hash function for a byte array. +uint128 CityHashCrc128(const char *s, size_t len); + +// Hash function for a byte array. For convenience, a 128-bit seed is also +// hashed into the result. +uint128 CityHashCrc128WithSeed(const char *s, size_t len, uint128 seed); + +// Hash function for a byte array. Sets result[0] ... result[3]. +void CityHashCrc256(const char *s, size_t len, uint64_t *result); + +} // namespace hash_internal +} // namespace absl + +#endif // ABSL_HASH_INTERNAL_CITY_CRC_H_ diff --git a/absl/hash/internal/city_test.cc b/absl/hash/internal/city_test.cc new file mode 100644 index 00000000..678da53d --- /dev/null +++ b/absl/hash/internal/city_test.cc @@ -0,0 +1,1812 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/hash/internal/city.h" + +#include <string.h> +#include <cstdio> +#include <iostream> +#include "gtest/gtest.h" +#ifdef __SSE4_2__ +#include "absl/hash/internal/city_crc.h" +#endif + +namespace absl { +namespace hash_internal { + +static const uint64_t k0 = 0xc3a5c85c97cb3127ULL; +static const uint64_t kSeed0 = 1234567; +static const uint64_t kSeed1 = k0; +static const uint128 kSeed128(kSeed0, kSeed1); +static const int kDataSize = 1 << 20; +static const int kTestSize = 300; + +static char data[kDataSize]; + +// Initialize data to pseudorandom values. +void setup() { + uint64_t a = 9; + uint64_t b = 777; + for (int i = 0; i < kDataSize; i++) { + a += b; + b += a; + a = (a ^ (a >> 41)) * k0; + b = (b ^ (b >> 41)) * k0 + i; + uint8_t u = b >> 37; + memcpy(data + i, &u, 1); // uint8_t -> char + } +} + +#define C(x) 0x##x##ULL +static const uint64_t testdata[kTestSize][16] = { + {C(9ae16a3b2f90404f), C(75106db890237a4a), C(3feac5f636039766), + C(3df09dfc64c09a2b), C(3cb540c392e51e29), C(6b56343feac0663), + C(5b7bc50fd8e8ad92), C(3df09dfc64c09a2b), C(3cb540c392e51e29), + C(6b56343feac0663), C(5b7bc50fd8e8ad92), C(95162f24e6a5f930), + C(6808bdf4f1eb06e0), C(b3b1f3a67b624d82), C(c9a62f12bd4cd80b), + C(dc56d17a)}, + {C(541150e87f415e96), C(1aef0d24b3148a1a), C(bacc300e1e82345a), + C(c3cdc41e1df33513), C(2c138ff2596d42f6), C(f58e9082aed3055f), + C(162e192b2957163d), C(c3cdc41e1df33513), C(2c138ff2596d42f6), + C(f58e9082aed3055f), C(162e192b2957163d), C(fb99e85e0d16f90c), + C(608462c15bdf27e8), C(e7d2c5c943572b62), C(1baaa9327642798c), + C(99929334)}, + {C(f3786a4b25827c1), C(34ee1a2bf767bd1c), C(2f15ca2ebfb631f2), + C(3149ba1dac77270d), C(70e2e076e30703c), C(59bcc9659bc5296), + C(9ecbc8132ae2f1d7), C(3149ba1dac77270d), C(70e2e076e30703c), + C(59bcc9659bc5296), C(9ecbc8132ae2f1d7), C(a01d30789bad7cf2), + C(ae03fe371981a0e0), C(127e3883b8788934), C(d0ac3d4c0a6fca32), + C(4252edb7)}, + {C(ef923a7a1af78eab), C(79163b1e1e9a9b18), C(df3b2aca6e1e4a30), + C(2193fb7620cbf23b), C(8b6a8ff06cda8302), C(1a44469afd3e091f), + C(8b0449376612506), C(2193fb7620cbf23b), C(8b6a8ff06cda8302), + C(1a44469afd3e091f), C(8b0449376612506), C(e9d9d41c32ad91d1), + C(b44ab09f58e3c608), C(19e9175f9fcf784), C(839b3c9581b4a480), C(ebc34f3c)}, + {C(11df592596f41d88), C(843ec0bce9042f9c), C(cce2ea1e08b1eb30), + C(4d09e42f09cc3495), C(666236631b9f253b), C(d28b3763cd02b6a3), + C(43b249e57c4d0c1b), C(4d09e42f09cc3495), C(666236631b9f253b), + C(d28b3763cd02b6a3), C(43b249e57c4d0c1b), C(3887101c8adea101), + C(8a9355d4efc91df0), C(3e610944cc9fecfd), C(5bf9eb60b08ac0ce), + C(26f2b463)}, + {C(831f448bdc5600b3), C(62a24be3120a6919), C(1b44098a41e010da), + C(dc07df53b949c6b), C(d2b11b2081aeb002), C(d212b02c1b13f772), + C(c0bed297b4be1912), C(dc07df53b949c6b), C(d2b11b2081aeb002), + C(d212b02c1b13f772), C(c0bed297b4be1912), C(682d3d2ad304e4af), + C(40e9112a655437a1), C(268b09f7ee09843f), C(6b9698d43859ca47), + C(b042c047)}, + {C(3eca803e70304894), C(d80de767e4a920a), C(a51cfbb292efd53d), + C(d183dcda5f73edfa), C(3a93cbf40f30128c), C(1a92544d0b41dbda), + C(aec2c4bee81975e1), C(d183dcda5f73edfa), C(3a93cbf40f30128c), + C(1a92544d0b41dbda), C(aec2c4bee81975e1), C(5f91814d1126ba4b), + C(f8ac57eee87fcf1f), C(c55c644a5d0023cd), C(adb761e827825ff2), + C(e73bb0a8)}, + {C(1b5a063fb4c7f9f1), C(318dbc24af66dee9), C(10ef7b32d5c719af), + C(b140a02ef5c97712), C(b7d00ef065b51b33), C(635121d532897d98), + C(532daf21b312a6d6), C(b140a02ef5c97712), C(b7d00ef065b51b33), + C(635121d532897d98), C(532daf21b312a6d6), C(c0b09b75d943910), + C(8c84dfb5ef2a8e96), C(e5c06034b0353433), C(3170faf1c33a45dd), + C(91dfdd75)}, + {C(a0f10149a0e538d6), C(69d008c20f87419f), C(41b36376185b3e9e), + C(26b6689960ccf81d), C(55f23b27bb9efd94), C(3a17f6166dd765db), + C(c891a8a62931e782), C(26b6689960ccf81d), C(55f23b27bb9efd94), + C(3a17f6166dd765db), C(c891a8a62931e782), C(23852dc37ddd2607), + C(8b7f1b1ec897829e), C(d1d69452a54eed8a), C(56431f2bd766ec24), + C(c87f95de)}, + {C(fb8d9c70660b910b), C(a45b0cc3476bff1b), C(b28d1996144f0207), + C(98ec31113e5e35d2), C(5e4aeb853f1b9aa7), C(bcf5c8fe4465b7c8), + C(b1ea3a8243996f15), C(98ec31113e5e35d2), C(5e4aeb853f1b9aa7), + C(bcf5c8fe4465b7c8), C(b1ea3a8243996f15), C(cabbccedb6407571), + C(d1e40a84c445ec3a), C(33302aa908cf4039), C(9f15f79211b5cdf8), + C(3f5538ef)}, + {C(236827beae282a46), C(e43970221139c946), C(4f3ac6faa837a3aa), + C(71fec0f972248915), C(2170ec2061f24574), C(9eb346b6caa36e82), + C(2908f0fdbca48e73), C(71fec0f972248915), C(2170ec2061f24574), + C(9eb346b6caa36e82), C(2908f0fdbca48e73), C(8101c99f07c64abb), + C(b9f4b02b1b6a96a7), C(583a2b10cd222f88), C(199dae4cf9db24c), C(70eb1a1f)}, + {C(c385e435136ecf7c), C(d9d17368ff6c4a08), C(1b31eed4e5251a67), + C(df01a322c43a6200), C(298b65a1714b5a7e), C(933b83f0aedf23c), + C(157bcb44d63f765a), C(df01a322c43a6200), C(298b65a1714b5a7e), + C(933b83f0aedf23c), C(157bcb44d63f765a), C(d6e9fc7a272d8b51), + C(3ee5073ef1a9b777), C(63149e31fac02c59), C(2f7979ff636ba1d8), + C(cfd63b83)}, + {C(e3f6828b6017086d), C(21b4d1900554b3b0), C(bef38be1809e24f1), + C(d93251758985ee6c), C(32a9e9f82ba2a932), C(3822aacaa95f3329), + C(db349b2f90a490d8), C(d93251758985ee6c), C(32a9e9f82ba2a932), + C(3822aacaa95f3329), C(db349b2f90a490d8), C(8d49194a894a19ca), + C(79a78b06e42738e6), C(7e0f1eda3d390c66), C(1c291d7e641100a5), + C(894a52ef)}, + {C(851fff285561dca0), C(4d1277d73cdf416f), C(28ccffa61010ebe2), + C(77a4ccacd131d9ee), C(e1d08eeb2f0e29aa), C(70b9e3051383fa45), + C(582d0120425caba), C(77a4ccacd131d9ee), C(e1d08eeb2f0e29aa), + C(70b9e3051383fa45), C(582d0120425caba), C(a740eef1846e4564), + C(572dddb74ac3ae00), C(fdb5ca9579163bbd), C(a649b9b799c615d2), + C(9cde6a54)}, + {C(61152a63595a96d9), C(d1a3a91ef3a7ba45), C(443b6bb4a493ad0c), + C(a154296d11362d06), C(d0f0bf1f1cb02fc1), C(ccb87e09309f90d1), + C(b24a8e4881911101), C(a154296d11362d06), C(d0f0bf1f1cb02fc1), + C(ccb87e09309f90d1), C(b24a8e4881911101), C(1a481b4528559f58), + C(bf837a3150896995), C(4989ef6b941a3757), C(2e725ab72d0b2948), + C(6c4898d5)}, + {C(44473e03be306c88), C(30097761f872472a), C(9fd1b669bfad82d7), + C(3bab18b164396783), C(47e385ff9d4c06f), C(18062081bf558df), + C(63416eb68f104a36), C(3bab18b164396783), C(47e385ff9d4c06f), + C(18062081bf558df), C(63416eb68f104a36), C(4abda1560c47ac80), + C(1ea0e63dc6587aee), C(33ec79d92ebc1de), C(94f9dccef771e048), C(13e1978e)}, + {C(3ead5f21d344056), C(fb6420393cfb05c3), C(407932394cbbd303), + C(ac059617f5906673), C(94d50d3dcd3069a7), C(2b26c3b92dea0f0), + C(99b7374cc78fc3fb), C(ac059617f5906673), C(94d50d3dcd3069a7), + C(2b26c3b92dea0f0), C(99b7374cc78fc3fb), C(1a8e3c73cdd40ee8), + C(cbb5fca06747f45b), C(ceec44238b291841), C(28bf35cce9c90a25), C(51b4ba8)}, + {C(6abbfde37ee03b5b), C(83febf188d2cc113), C(cda7b62d94d5b8ee), + C(a4375590b8ae7c82), C(168fd42f9ecae4ff), C(23bbde43de2cb214), + C(a8c333112a243c8c), C(a4375590b8ae7c82), C(168fd42f9ecae4ff), + C(23bbde43de2cb214), C(a8c333112a243c8c), C(10ac012e8c518b49), + C(64a44605d8b29458), C(a67e701d2a679075), C(3a3a20f43ec92303), + C(b6b06e40)}, + {C(943e7ed63b3c080), C(1ef207e9444ef7f8), C(ef4a9f9f8c6f9b4a), + C(6b54fc38d6a84108), C(32f4212a47a4665), C(6b5a9a8f64ee1da6), + C(9f74e86c6da69421), C(6b54fc38d6a84108), C(32f4212a47a4665), + C(6b5a9a8f64ee1da6), C(9f74e86c6da69421), C(946dd0cb30c1a08e), + C(fdf376956907eaaa), C(a59074c6eec03028), C(b1a3abcf283f34ac), C(240a2f2)}, + {C(d72ce05171ef8a1a), C(c6bd6bd869203894), C(c760e6396455d23a), + C(f86af0b40dcce7b), C(8d3c15d613394d3c), C(491e400491cd4ece), + C(7c19d3530ea3547f), C(f86af0b40dcce7b), C(8d3c15d613394d3c), + C(491e400491cd4ece), C(7c19d3530ea3547f), C(1362963a1dc32af9), + C(fb9bc11762e1385c), C(9e164ef1f5376083), C(6c15819b5e828a7e), + C(5dcefc30)}, + {C(4182832b52d63735), C(337097e123eea414), C(b5a72ca0456df910), + C(7ebc034235bc122f), C(d9a7783d4edd8049), C(5f8b04a15ae42361), + C(fc193363336453dd), C(7ebc034235bc122f), C(d9a7783d4edd8049), + C(5f8b04a15ae42361), C(fc193363336453dd), C(9b6c50224ef8c4f8), + C(ba225c7942d16c3f), C(6f6d55226a73c412), C(abca061fe072152a), + C(7a48b105)}, + {C(d6cdae892584a2cb), C(58de0fa4eca17dcd), C(43df30b8f5f1cb00), + C(9e4ea5a4941e097d), C(547e048d5a9daaba), C(eb6ecbb0b831d185), + C(e0168df5fad0c670), C(9e4ea5a4941e097d), C(547e048d5a9daaba), + C(eb6ecbb0b831d185), C(e0168df5fad0c670), C(afa9705f98c2c96a), + C(749436f48137a96b), C(759c041fc21df486), C(b23bf400107aa2ec), + C(fd55007b)}, + {C(5c8e90bc267c5ee4), C(e9ae044075d992d9), C(f234cbfd1f0a1e59), + C(ce2744521944f14c), C(104f8032f99dc152), C(4e7f425bfac67ca7), + C(9461b911a1c6d589), C(ce2744521944f14c), C(104f8032f99dc152), + C(4e7f425bfac67ca7), C(9461b911a1c6d589), C(5e5ecc726db8b60d), + C(cce68b0586083b51), C(8a7f8e54a9cba0fc), C(42f010181d16f049), + C(6b95894c)}, + {C(bbd7f30ac310a6f3), C(b23b570d2666685f), C(fb13fb08c9814fe7), + C(4ee107042e512374), C(1e2c8c0d16097e13), C(210c7500995aa0e6), + C(6c13190557106457), C(4ee107042e512374), C(1e2c8c0d16097e13), + C(210c7500995aa0e6), C(6c13190557106457), C(a99b31c96777f381), + C(8312ae8301d386c0), C(ed5042b2a4fa96a3), C(d71d1bb23907fe97), + C(3360e827)}, + {C(36a097aa49519d97), C(8204380a73c4065), C(77c2004bdd9e276a), + C(6ee1f817ce0b7aee), C(e9dcb3507f0596ca), C(6bc63c666b5100e2), + C(e0b056f1821752af), C(6ee1f817ce0b7aee), C(e9dcb3507f0596ca), + C(6bc63c666b5100e2), C(e0b056f1821752af), C(8ea1114e60292678), + C(904b80b46becc77), C(46cd9bb6e9dff52f), C(4c91e3b698355540), C(45177e0b)}, + {C(dc78cb032c49217), C(112464083f83e03a), C(96ae53e28170c0f5), + C(d367ff54952a958), C(cdad930657371147), C(aa24dc2a9573d5fe), + C(eb136daa89da5110), C(d367ff54952a958), C(cdad930657371147), + C(aa24dc2a9573d5fe), C(eb136daa89da5110), C(de623005f6d46057), + C(b50c0c92b95e9b7f), C(a8aa54050b81c978), C(573fb5c7895af9b5), + C(7c6fffe4)}, + {C(441593e0da922dfe), C(936ef46061469b32), C(204a1921197ddd87), + C(50d8a70e7a8d8f56), C(256d150ae75dab76), C(e81f4c4a1989036a), + C(d0f8db365f9d7e00), C(50d8a70e7a8d8f56), C(256d150ae75dab76), + C(e81f4c4a1989036a), C(d0f8db365f9d7e00), C(753d686677b14522), + C(9f76e0cb6f2d0a66), C(ab14f95988ec0d39), C(97621d9da9c9812f), + C(bbc78da4)}, + {C(2ba3883d71cc2133), C(72f2bbb32bed1a3c), C(27e1bd96d4843251), + C(a90f761e8db1543a), C(c339e23c09703cd8), C(f0c6624c4b098fd3), + C(1bae2053e41fa4d9), C(a90f761e8db1543a), C(c339e23c09703cd8), + C(f0c6624c4b098fd3), C(1bae2053e41fa4d9), C(3589e273c22ba059), + C(63798246e5911a0b), C(18e710ec268fc5dc), C(714a122de1d074f3), + C(c5c25d39)}, + {C(f2b6d2adf8423600), C(7514e2f016a48722), C(43045743a50396ba), + C(23dacb811652ad4f), C(c982da480e0d4c7d), C(3a9c8ed5a399d0a9), + C(951b8d084691d4e4), C(23dacb811652ad4f), C(c982da480e0d4c7d), + C(3a9c8ed5a399d0a9), C(951b8d084691d4e4), C(d9f87b4988cff2f7), + C(217a191d986aa3bc), C(6ad23c56b480350), C(dd78673938ceb2e7), C(b6e5d06e)}, + {C(38fffe7f3680d63c), C(d513325255a7a6d1), C(31ed47790f6ca62f), + C(c801faaa0a2e331f), C(491dbc58279c7f88), C(9c0178848321c97a), + C(9d934f814f4d6a3c), C(c801faaa0a2e331f), C(491dbc58279c7f88), + C(9c0178848321c97a), C(9d934f814f4d6a3c), C(606a3e4fc8763192), + C(bc15cb36a677ee84), C(52d5904157e1fe71), C(1588dd8b1145b79b), + C(6178504e)}, + {C(b7477bf0b9ce37c6), C(63b1c580a7fd02a4), C(f6433b9f10a5dac), + C(68dd76db9d64eca7), C(36297682b64b67), C(42b192d71f414b7a), + C(79692cef44fa0206), C(68dd76db9d64eca7), C(36297682b64b67), + C(42b192d71f414b7a), C(79692cef44fa0206), C(f0979252f4776d07), + C(4b87cd4f1c9bbf52), C(51b84bbc6312c710), C(150720fbf85428a7), + C(bd4c3637)}, + {C(55bdb0e71e3edebd), C(c7ab562bcf0568bc), C(43166332f9ee684f), + C(b2e25964cd409117), C(a010599d6287c412), C(fa5d6461e768dda2), + C(cb3ce74e8ec4f906), C(b2e25964cd409117), C(a010599d6287c412), + C(fa5d6461e768dda2), C(cb3ce74e8ec4f906), C(6120abfd541a2610), + C(aa88b148cc95794d), C(2686ca35df6590e3), C(c6b02d18616ce94d), + C(6e7ac474)}, + {C(782fa1b08b475e7), C(fb7138951c61b23b), C(9829105e234fb11e), + C(9a8c431f500ef06e), C(d848581a580b6c12), C(fecfe11e13a2bdb4), + C(6c4fa0273d7db08c), C(9a8c431f500ef06e), C(d848581a580b6c12), + C(fecfe11e13a2bdb4), C(6c4fa0273d7db08c), C(482f43bf5ae59fcb), + C(f651fbca105d79e6), C(f09f78695d865817), C(7a99d0092085cf47), + C(1fb4b518)}, + {C(c5dc19b876d37a80), C(15ffcff666cfd710), C(e8c30c72003103e2), + C(7870765b470b2c5d), C(78a9103ff960d82), C(7bb50ffc9fac74b3), + C(477e70ab2b347db2), C(7870765b470b2c5d), C(78a9103ff960d82), + C(7bb50ffc9fac74b3), C(477e70ab2b347db2), C(a625238bdf7c07cf), + C(1128d515174809f5), C(b0f1647e82f45873), C(17792d1c4f222c39), + C(31d13d6d)}, + {C(5e1141711d2d6706), C(b537f6dee8de6933), C(3af0a1fbbe027c54), + C(ea349dbc16c2e441), C(38a7455b6a877547), C(5f97b9750e365411), + C(e8cde7f93af49a3), C(ea349dbc16c2e441), C(38a7455b6a877547), + C(5f97b9750e365411), C(e8cde7f93af49a3), C(ba101925ec1f7e26), + C(d5e84cab8192c71e), C(e256427726fdd633), C(a4f38e2c6116890d), + C(26fa72e3)}, + {C(782edf6da001234f), C(f48cbd5c66c48f3), C(808754d1e64e2a32), + C(5d9dde77353b1a6d), C(11f58c54581fa8b1), C(da90fa7c28c37478), + C(5e9a2eafc670a88a), C(5d9dde77353b1a6d), C(11f58c54581fa8b1), + C(da90fa7c28c37478), C(5e9a2eafc670a88a), C(e35e1bc172e011ef), + C(bf9255a4450ae7fe), C(55f85194e26bc55f), C(4f327873e14d0e54), + C(6a7433bf)}, + {C(d26285842ff04d44), C(8f38d71341eacca9), C(5ca436f4db7a883c), + C(bf41e5376b9f0eec), C(2252d21eb7e1c0e9), C(f4b70a971855e732), + C(40c7695aa3662afd), C(bf41e5376b9f0eec), C(2252d21eb7e1c0e9), + C(f4b70a971855e732), C(40c7695aa3662afd), C(770fe19e16ab73bb), + C(d603ebda6393d749), C(e58c62439aa50dbd), C(96d51e5a02d2d7cf), + C(4e6df758)}, + {C(c6ab830865a6bae6), C(6aa8e8dd4b98815c), C(efe3846713c371e5), + C(a1924cbf0b5f9222), C(7f4872369c2b4258), C(cd6da30530f3ea89), + C(b7f8b9a704e6cea1), C(a1924cbf0b5f9222), C(7f4872369c2b4258), + C(cd6da30530f3ea89), C(b7f8b9a704e6cea1), C(fa06ff40433fd535), + C(fb1c36fe8f0737f1), C(bb7050561171f80), C(b1bc23235935d897), C(d57f63ea)}, + {C(44b3a1929232892), C(61dca0e914fc217), C(a607cc142096b964), + C(f7dbc8433c89b274), C(2f5f70581c9b7d32), C(39bf5e5fec82dcca), + C(8ade56388901a619), C(f7dbc8433c89b274), C(2f5f70581c9b7d32), + C(39bf5e5fec82dcca), C(8ade56388901a619), C(c1c6a725caab3ea9), + C(c1c7906c2f80b898), C(9c3871a04cc884e6), C(df01813cbbdf217f), + C(52ef73b3)}, + {C(4b603d7932a8de4f), C(fae64c464b8a8f45), C(8fafab75661d602a), + C(8ffe870ef4adc087), C(65bea2be41f55b54), C(82f3503f636aef1), + C(5f78a282378b6bb0), C(8ffe870ef4adc087), C(65bea2be41f55b54), + C(82f3503f636aef1), C(5f78a282378b6bb0), C(7bf2422c0beceddb), + C(9d238d4780114bd), C(7ad198311906597f), C(ec8f892c0422aca3), C(3cb36c3)}, + {C(4ec0b54cf1566aff), C(30d2c7269b206bf4), C(77c22e82295e1061), + C(3df9b04434771542), C(feddce785ccb661f), C(a644aff716928297), + C(dd46aee73824b4ed), C(3df9b04434771542), C(feddce785ccb661f), + C(a644aff716928297), C(dd46aee73824b4ed), C(bf8d71879da29b02), + C(fc82dccbfc8022a0), C(31bfcd0d9f48d1d3), C(c64ee24d0e7b5f8b), + C(72c39bea)}, + {C(ed8b7a4b34954ff7), C(56432de31f4ee757), C(85bd3abaa572b155), + C(7d2c38a926dc1b88), C(5245b9eb4cd6791d), C(fb53ab03b9ad0855), + C(3664026c8fc669d7), C(7d2c38a926dc1b88), C(5245b9eb4cd6791d), + C(fb53ab03b9ad0855), C(3664026c8fc669d7), C(45024d5080bc196), + C(b236ebec2cc2740), C(27231ad0e3443be4), C(145780b63f809250), C(a65aa25c)}, + {C(5d28b43694176c26), C(714cc8bc12d060ae), C(3437726273a83fe6), + C(864b1b28ec16ea86), C(6a78a5a4039ec2b9), C(8e959533e35a766), + C(347b7c22b75ae65f), C(864b1b28ec16ea86), C(6a78a5a4039ec2b9), + C(8e959533e35a766), C(347b7c22b75ae65f), C(5005892bb61e647c), + C(fe646519b4a1894d), C(cd801026f74a8a53), C(8713463e9a1ab9ce), + C(74740539)}, + {C(6a1ef3639e1d202e), C(919bc1bd145ad928), C(30f3f7e48c28a773), + C(2e8c49d7c7aaa527), C(5e2328fc8701db7c), C(89ef1afca81f7de8), + C(b1857db11985d296), C(2e8c49d7c7aaa527), C(5e2328fc8701db7c), + C(89ef1afca81f7de8), C(b1857db11985d296), C(17763d695f616115), + C(b8f7bf1fcdc8322c), C(cf0c61938ab07a27), C(1122d3e6edb4e866), + C(c3ae3c26)}, + {C(159f4d9e0307b111), C(3e17914a5675a0c), C(af849bd425047b51), + C(3b69edadf357432b), C(3a2e311c121e6bf2), C(380fad1e288d57e5), + C(bf7c7e8ef0e3b83a), C(3b69edadf357432b), C(3a2e311c121e6bf2), + C(380fad1e288d57e5), C(bf7c7e8ef0e3b83a), C(92966d5f4356ae9b), + C(2a03fc66c4d6c036), C(2516d8bddb0d5259), C(b3ffe9737ff5090), C(f29db8a2)}, + {C(cc0a840725a7e25b), C(57c69454396e193a), C(976eaf7eee0b4540), + C(cd7a46850b95e901), C(c57f7d060dda246f), C(6b9406ead64079bf), + C(11b28e20a573b7bd), C(cd7a46850b95e901), C(c57f7d060dda246f), + C(6b9406ead64079bf), C(11b28e20a573b7bd), C(2d6db356e9369ace), + C(dc0afe10fba193), C(5cdb10885dbbfce), C(5c700e205782e35a), C(1ef4cbf4)}, + {C(a2b27ee22f63c3f1), C(9ebde0ce1b3976b2), C(2fe6a92a257af308), + C(8c1df927a930af59), C(a462f4423c9e384e), C(236542255b2ad8d9), + C(595d201a2c19d5bc), C(8c1df927a930af59), C(a462f4423c9e384e), + C(236542255b2ad8d9), C(595d201a2c19d5bc), C(22c87d4604a67f3), + C(585a06eb4bc44c4f), C(b4175a7ac7eabcd8), C(a457d3eeba14ab8c), + C(a9be6c41)}, + {C(d8f2f234899bcab3), C(b10b037297c3a168), C(debea2c510ceda7f), + C(9498fefb890287ce), C(ae68c2be5b1a69a6), C(6189dfba34ed656c), + C(91658f95836e5206), C(9498fefb890287ce), C(ae68c2be5b1a69a6), + C(6189dfba34ed656c), C(91658f95836e5206), C(c0bb4fff32aecd4d), + C(94125f505a50eef9), C(6ac406e7cfbce5bb), C(344a4b1dcdb7f5d8), C(fa31801)}, + {C(584f28543864844f), C(d7cee9fc2d46f20d), C(a38dca5657387205), + C(7a0b6dbab9a14e69), C(c6d0a9d6b0e31ac4), C(a674d85812c7cf6), + C(63538c0351049940), C(7a0b6dbab9a14e69), C(c6d0a9d6b0e31ac4), + C(a674d85812c7cf6), C(63538c0351049940), C(9710e5f0bc93d1d), + C(c2bea5bd7c54ddd4), C(48739af2bed0d32d), C(ba2c4e09e21fba85), + C(8331c5d8)}, + {C(a94be46dd9aa41af), C(a57e5b7723d3f9bd), C(34bf845a52fd2f), + C(843b58463c8df0ae), C(74b258324e916045), C(bdd7353230eb2b38), + C(fad31fced7abade5), C(843b58463c8df0ae), C(74b258324e916045), + C(bdd7353230eb2b38), C(fad31fced7abade5), C(2436aeafb0046f85), + C(65bc9af9e5e33161), C(92733b1b3ae90628), C(f48143eaf78a7a89), + C(e9876db8)}, + {C(9a87bea227491d20), C(a468657e2b9c43e7), C(af9ba60db8d89ef7), + C(cc76f429ea7a12bb), C(5f30eaf2bb14870a), C(434e824cb3e0cd11), + C(431a4d382e39d16e), C(cc76f429ea7a12bb), C(5f30eaf2bb14870a), + C(434e824cb3e0cd11), C(431a4d382e39d16e), C(9e51f913c4773a8), + C(32ab1925823d0add), C(99c61b54c1d8f69d), C(38cfb80f02b43b1f), + C(27b0604e)}, + {C(27688c24958d1a5c), C(e3b4a1c9429cf253), C(48a95811f70d64bc), + C(328063229db22884), C(67e9c95f8ba96028), C(7c6bf01c60436075), + C(fa55161e7d9030b2), C(328063229db22884), C(67e9c95f8ba96028), + C(7c6bf01c60436075), C(fa55161e7d9030b2), C(dadbc2f0dab91681), + C(da39d7a4934ca11), C(162e845d24c1b45c), C(eb5b9dcd8c6ed31b), C(dcec07f2)}, + {C(5d1d37790a1873ad), C(ed9cd4bcc5fa1090), C(ce51cde05d8cd96a), + C(f72c26e624407e66), C(a0eb541bdbc6d409), C(c3f40a2f40b3b213), + C(6a784de68794492d), C(f72c26e624407e66), C(a0eb541bdbc6d409), + C(c3f40a2f40b3b213), C(6a784de68794492d), C(10a38a23dbef7937), + C(6a5560f853252278), C(c3387bbf3c7b82ba), C(fbee7c12eb072805), + C(cff0a82a)}, + {C(1f03fd18b711eea9), C(566d89b1946d381a), C(6e96e83fc92563ab), + C(405f66cf8cae1a32), C(d7261740d8f18ce6), C(fea3af64a413d0b2), + C(d64d1810e83520fe), C(405f66cf8cae1a32), C(d7261740d8f18ce6), + C(fea3af64a413d0b2), C(d64d1810e83520fe), C(e1334a00a580c6e8), + C(454049e1b52c15f), C(8895d823d9778247), C(efa7f2e88b826618), C(fec83621)}, + {C(f0316f286cf527b6), C(f84c29538de1aa5a), C(7612ed3c923d4a71), + C(d4eccebe9393ee8a), C(2eb7867c2318cc59), C(1ce621fd700fe396), + C(686450d7a346878a), C(d4eccebe9393ee8a), C(2eb7867c2318cc59), + C(1ce621fd700fe396), C(686450d7a346878a), C(75a5f37579f8b4cb), + C(500cc16eb6541dc7), C(b7b02317b539d9a6), C(3519ddff5bc20a29), C(743d8dc)}, + {C(297008bcb3e3401d), C(61a8e407f82b0c69), C(a4a35bff0524fa0e), + C(7a61d8f552a53442), C(821d1d8d8cfacf35), C(7cc06361b86d0559), + C(119b617a8c2be199), C(7a61d8f552a53442), C(821d1d8d8cfacf35), + C(7cc06361b86d0559), C(119b617a8c2be199), C(2996487da6721759), + C(61a901376070b91d), C(d88dee12ae9c9b3c), C(5665491be1fa53a7), + C(64d41d26)}, + {C(43c6252411ee3be), C(b4ca1b8077777168), C(2746dc3f7da1737f), + C(2247a4b2058d1c50), C(1b3fa184b1d7bcc0), C(deb85613995c06ed), + C(cbe1d957485a3ccd), C(2247a4b2058d1c50), C(1b3fa184b1d7bcc0), + C(deb85613995c06ed), C(cbe1d957485a3ccd), C(dfe241f8f33c96b6), + C(6597eb05019c2109), C(da344b2a63a219cf), C(79b8e3887612378a), + C(acd90c81)}, + {C(ce38a9a54fad6599), C(6d6f4a90b9e8755e), C(c3ecc79ff105de3f), + C(e8b9ee96efa2d0e), C(90122905c4ab5358), C(84f80c832d71979c), + C(229310f3ffbbf4c6), C(e8b9ee96efa2d0e), C(90122905c4ab5358), + C(84f80c832d71979c), C(229310f3ffbbf4c6), C(cc9eb42100cd63a7), + C(7a283f2f3da7b9f), C(359b061d314e7a72), C(d0d959720028862), C(7c746a4b)}, + {C(270a9305fef70cf), C(600193999d884f3a), C(f4d49eae09ed8a1), + C(2e091b85660f1298), C(bfe37fae1cdd64c9), C(8dddfbab930f6494), + C(2ccf4b08f5d417a), C(2e091b85660f1298), C(bfe37fae1cdd64c9), + C(8dddfbab930f6494), C(2ccf4b08f5d417a), C(365c2ee85582fe6), + C(dee027bcd36db62a), C(b150994d3c7e5838), C(fdfd1a0e692e436d), + C(b1047e99)}, + {C(e71be7c28e84d119), C(eb6ace59932736e6), C(70c4397807ba12c5), + C(7a9d77781ac53509), C(4489c3ccfda3b39c), C(fa722d4f243b4964), + C(25f15800bffdd122), C(7a9d77781ac53509), C(4489c3ccfda3b39c), + C(fa722d4f243b4964), C(25f15800bffdd122), C(ed85e4157fbd3297), + C(aab1967227d59efd), C(2199631212eb3839), C(3e4c19359aae1cc2), + C(d1fd1068)}, + {C(b5b58c24b53aaa19), C(d2a6ab0773dd897f), C(ef762fe01ecb5b97), + C(9deefbcfa4cab1f1), C(b58f5943cd2492ba), C(a96dcc4d1f4782a7), + C(102b62a82309dde5), C(9deefbcfa4cab1f1), C(b58f5943cd2492ba), + C(a96dcc4d1f4782a7), C(102b62a82309dde5), C(35fe52684763b338), + C(afe2616651eaad1f), C(43e38715bdfa05e7), C(83c9ba83b5ec4a40), + C(56486077)}, + {C(44dd59bd301995cf), C(3ccabd76493ada1a), C(540db4c87d55ef23), + C(cfc6d7adda35797), C(14c7d1f32332cf03), C(2d553ffbff3be99d), + C(c91c4ee0cb563182), C(cfc6d7adda35797), C(14c7d1f32332cf03), + C(2d553ffbff3be99d), C(c91c4ee0cb563182), C(9aa5e507f49136f0), + C(760c5dd1a82c4888), C(beea7e974a1cfb5c), C(640b247774fe4bf7), + C(6069be80)}, + {C(b4d4789eb6f2630b), C(bf6973263ce8ef0e), C(d1c75c50844b9d3), + C(bce905900c1ec6ea), C(c30f304f4045487d), C(a5c550166b3a142b), + C(2f482b4e35327287), C(bce905900c1ec6ea), C(c30f304f4045487d), + C(a5c550166b3a142b), C(2f482b4e35327287), C(15b21ddddf355438), + C(496471fa3006bab), C(2a8fd458d06c1a32), C(db91e8ae812f0b8d), C(2078359b)}, + {C(12807833c463737c), C(58e927ea3b3776b4), C(72dd20ef1c2f8ad0), + C(910b610de7a967bf), C(801bc862120f6bf5), C(9653efeed5897681), + C(f5367ff83e9ebbb3), C(910b610de7a967bf), C(801bc862120f6bf5), + C(9653efeed5897681), C(f5367ff83e9ebbb3), C(cf56d489afd1b0bf), + C(c7c793715cae3de8), C(631f91d64abae47c), C(5f1f42fb14a444a2), + C(9ea21004)}, + {C(e88419922b87176f), C(bcf32f41a7ddbf6f), C(d6ebefd8085c1a0f), + C(d1d44fe99451ef72), C(ec951ba8e51e3545), C(c0ca86b360746e96), + C(aa679cc066a8040b), C(d1d44fe99451ef72), C(ec951ba8e51e3545), + C(c0ca86b360746e96), C(aa679cc066a8040b), C(51065861ece6ffc1), + C(76777368a2997e11), C(87f278f46731100c), C(bbaa4140bdba4527), + C(9c9cfe88)}, + {C(105191e0ec8f7f60), C(5918dbfcca971e79), C(6b285c8a944767b9), + C(d3e86ac4f5eccfa4), C(e5399df2b106ca1), C(814aadfacd217f1d), + C(2754e3def1c405a9), C(d3e86ac4f5eccfa4), C(e5399df2b106ca1), + C(814aadfacd217f1d), C(2754e3def1c405a9), C(99290323b9f06e74), + C(a9782e043f271461), C(13c8b3b8c275a860), C(6038d620e581e9e7), + C(b70a6ddd)}, + {C(a5b88bf7399a9f07), C(fca3ddfd96461cc4), C(ebe738fdc0282fc6), + C(69afbc800606d0fb), C(6104b97a9db12df7), C(fcc09198bb90bf9f), + C(c5e077e41a65ba91), C(69afbc800606d0fb), C(6104b97a9db12df7), + C(fcc09198bb90bf9f), C(c5e077e41a65ba91), C(db261835ee8aa08e), + C(db0ee662e5796dc9), C(fc1880ecec499e5f), C(648866fbe1502034), + C(dea37298)}, + {C(d08c3f5747d84f50), C(4e708b27d1b6f8ac), C(70f70fd734888606), + C(909ae019d761d019), C(368bf4aab1b86ef9), C(308bd616d5460239), + C(4fd33269f76783ea), C(909ae019d761d019), C(368bf4aab1b86ef9), + C(308bd616d5460239), C(4fd33269f76783ea), C(7d53b37c19713eab), + C(6bba6eabda58a897), C(91abb50efc116047), C(4e902f347e0e0e35), + C(8f480819)}, + {C(2f72d12a40044b4b), C(889689352fec53de), C(f03e6ad87eb2f36), + C(ef79f28d874b9e2d), C(b512089e8e63b76c), C(24dc06833bf193a9), + C(3c23308ba8e99d7e), C(ef79f28d874b9e2d), C(b512089e8e63b76c), + C(24dc06833bf193a9), C(3c23308ba8e99d7e), C(5ceff7b85cacefb7), + C(ef390338898cd73), C(b12967d7d2254f54), C(de874cbd8aef7b75), C(30b3b16)}, + {C(aa1f61fdc5c2e11e), C(c2c56cd11277ab27), C(a1e73069fdf1f94f), + C(8184bab36bb79df0), C(c81929ce8655b940), C(301b11bf8a4d8ce8), + C(73126fd45ab75de9), C(8184bab36bb79df0), C(c81929ce8655b940), + C(301b11bf8a4d8ce8), C(73126fd45ab75de9), C(4bd6f76e4888229a), + C(9aae355b54a756d5), C(ca3de9726f6e99d5), C(83f80cac5bc36852), + C(f31bc4e8)}, + {C(9489b36fe2246244), C(3355367033be74b8), C(5f57c2277cbce516), + C(bc61414f9802ecaf), C(8edd1e7a50562924), C(48f4ab74a35e95f2), + C(cc1afcfd99a180e7), C(bc61414f9802ecaf), C(8edd1e7a50562924), + C(48f4ab74a35e95f2), C(cc1afcfd99a180e7), C(517dd5e3acf66110), + C(7dd3ad9e8978b30d), C(1f6d5dfc70de812b), C(947daaba6441aaf3), + C(419f953b)}, + {C(358d7c0476a044cd), C(e0b7b47bcbd8854f), C(ffb42ec696705519), + C(d45e44c263e95c38), C(df61db53923ae3b1), C(f2bc948cc4fc027c), + C(8a8000c6066772a3), C(d45e44c263e95c38), C(df61db53923ae3b1), + C(f2bc948cc4fc027c), C(8a8000c6066772a3), C(9fd93c942d31fa17), + C(d7651ecebe09cbd3), C(68682cefb6a6f165), C(541eb99a2dcee40e), + C(20e9e76d)}, + {C(b0c48df14275265a), C(9da4448975905efa), C(d716618e414ceb6d), + C(30e888af70df1e56), C(4bee54bd47274f69), C(178b4059e1a0afe5), + C(6e2c96b7f58e5178), C(30e888af70df1e56), C(4bee54bd47274f69), + C(178b4059e1a0afe5), C(6e2c96b7f58e5178), C(bb429d3b9275e9bc), + C(c198013f09cafdc6), C(ec0a6ee4fb5de348), C(744e1e8ed2eb1eb0), + C(646f0ff8)}, + {C(daa70bb300956588), C(410ea6883a240c6d), C(f5c8239fb5673eb3), + C(8b1d7bb4903c105f), C(cfb1c322b73891d4), C(5f3b792b22f07297), + C(fd64061f8be86811), C(8b1d7bb4903c105f), C(cfb1c322b73891d4), + C(5f3b792b22f07297), C(fd64061f8be86811), C(1d2db712921cfc2b), + C(cd1b2b2f2cee18ae), C(6b6f8790dc7feb09), C(46c179efa3f0f518), + C(eeb7eca8)}, + {C(4ec97a20b6c4c7c2), C(5913b1cd454f29fd), C(a9629f9daf06d685), + C(852c9499156a8f3), C(3a180a6abfb79016), C(9fc3c4764037c3c9), + C(2890c42fc0d972cf), C(852c9499156a8f3), C(3a180a6abfb79016), + C(9fc3c4764037c3c9), C(2890c42fc0d972cf), C(1f92231d4e537651), + C(fab8bb07aa54b7b9), C(e05d2d771c485ed4), C(d50b34bf808ca731), C(8112bb9)}, + {C(5c3323628435a2e8), C(1bea45ce9e72a6e3), C(904f0a7027ddb52e), + C(939f31de14dcdc7b), C(a68fdf4379df068), C(f169e1f0b835279d), + C(7498e432f9619b27), C(939f31de14dcdc7b), C(a68fdf4379df068), + C(f169e1f0b835279d), C(7498e432f9619b27), C(1aa2a1f11088e785), + C(d6ad72f45729de78), C(9a63814157c80267), C(55538e35c648e435), + C(85a6d477)}, + {C(c1ef26bea260abdb), C(6ee423f2137f9280), C(df2118b946ed0b43), + C(11b87fb1b900cc39), C(e33e59b90dd815b1), C(aa6cb5c4bafae741), + C(739699951ca8c713), C(11b87fb1b900cc39), C(e33e59b90dd815b1), + C(aa6cb5c4bafae741), C(739699951ca8c713), C(2b4389a967310077), + C(1d5382568a31c2c9), C(55d1e787fbe68991), C(277c254bc31301e7), + C(56f76c84)}, + {C(6be7381b115d653a), C(ed046190758ea511), C(de6a45ffc3ed1159), + C(a64760e4041447d0), C(e3eac49f3e0c5109), C(dd86c4d4cb6258e2), + C(efa9857afd046c7f), C(a64760e4041447d0), C(e3eac49f3e0c5109), + C(dd86c4d4cb6258e2), C(efa9857afd046c7f), C(fab793dae8246f16), + C(c9e3b121b31d094c), C(a2a0f55858465226), C(dba6f0ff39436344), + C(9af45d55)}, + {C(ae3eece1711b2105), C(14fd3f4027f81a4a), C(abb7e45177d151db), + C(501f3e9b18861e44), C(465201170074e7d8), C(96d5c91970f2cb12), + C(40fd28c43506c95d), C(501f3e9b18861e44), C(465201170074e7d8), + C(96d5c91970f2cb12), C(40fd28c43506c95d), C(e86c4b07802aaff3), + C(f317d14112372a70), C(641b13e587711650), C(4915421ab1090eaa), + C(d1c33760)}, + {C(376c28588b8fb389), C(6b045e84d8491ed2), C(4e857effb7d4e7dc), + C(154dd79fd2f984b4), C(f11171775622c1c3), C(1fbe30982e78e6f0), + C(a460a15dcf327e44), C(154dd79fd2f984b4), C(f11171775622c1c3), + C(1fbe30982e78e6f0), C(a460a15dcf327e44), C(f359e0900cc3d582), + C(7e11070447976d00), C(324e6daf276ea4b5), C(7aa6e2df0cc94fa2), + C(c56bbf69)}, + {C(58d943503bb6748f), C(419c6c8e88ac70f6), C(586760cbf3d3d368), + C(b7e164979d5ccfc1), C(12cb4230d26bf286), C(f1bf910d44bd84cb), + C(b32c24c6a40272), C(b7e164979d5ccfc1), C(12cb4230d26bf286), + C(f1bf910d44bd84cb), C(b32c24c6a40272), C(11ed12e34c48c039), + C(b0c2538e51d0a6ac), C(4269bb773e1d553a), C(e35a9dbabd34867), C(abecfb9b)}, + {C(dfff5989f5cfd9a1), C(bcee2e7ea3a96f83), C(681c7874adb29017), + C(3ff6c8ac7c36b63a), C(48bc8831d849e326), C(30b078e76b0214e2), + C(42954e6ad721b920), C(3ff6c8ac7c36b63a), C(48bc8831d849e326), + C(30b078e76b0214e2), C(42954e6ad721b920), C(f9aeb33d164b4472), + C(7b353b110831dbdc), C(16f64c82f44ae17b), C(b71244cc164b3b2b), + C(8de13255)}, + {C(7fb19eb1a496e8f5), C(d49e5dfdb5c0833f), C(c0d5d7b2f7c48dc7), + C(1a57313a32f22dde), C(30af46e49850bf8b), C(aa0fe8d12f808f83), + C(443e31d70873bb6b), C(1a57313a32f22dde), C(30af46e49850bf8b), + C(aa0fe8d12f808f83), C(443e31d70873bb6b), C(bbeb67c49c9fdc13), + C(18f1e2a88f59f9d5), C(fb1b05038e5def11), C(d0450b5ce4c39c52), + C(a98ee299)}, + {C(5dba5b0dadccdbaa), C(4ba8da8ded87fcdc), C(f693fdd25badf2f0), + C(e9029e6364286587), C(ae69f49ecb46726c), C(18e002679217c405), + C(bd6d66e85332ae9f), C(e9029e6364286587), C(ae69f49ecb46726c), + C(18e002679217c405), C(bd6d66e85332ae9f), C(6bf330b1c353dd2a), + C(74e9f2e71e3a4152), C(3f85560b50f6c413), C(d33a52a47eaed2b4), + C(3015f556)}, + {C(688bef4b135a6829), C(8d31d82abcd54e8e), C(f95f8a30d55036d7), + C(3d8c90e27aa2e147), C(2ec937ce0aa236b4), C(89b563996d3a0b78), + C(39b02413b23c3f08), C(3d8c90e27aa2e147), C(2ec937ce0aa236b4), + C(89b563996d3a0b78), C(39b02413b23c3f08), C(8d475a2e64faf2d2), + C(48567f7dca46ecaf), C(254cda08d5f87a6d), C(ec6ae9f729c47039), + C(5a430e29)}, + {C(d8323be05433a412), C(8d48fa2b2b76141d), C(3d346f23978336a5), + C(4d50c7537562033f), C(57dc7625b61dfe89), C(9723a9f4c08ad93a), + C(5309596f48ab456b), C(4d50c7537562033f), C(57dc7625b61dfe89), + C(9723a9f4c08ad93a), C(5309596f48ab456b), C(7e453088019d220f), + C(8776067ba6ab9714), C(67e1d06bd195de39), C(74a1a32f8994b918), + C(2797add0)}, + {C(3b5404278a55a7fc), C(23ca0b327c2d0a81), C(a6d65329571c892c), + C(45504801e0e6066b), C(86e6c6d6152a3d04), C(4f3db1c53eca2952), + C(d24d69b3e9ef10f3), C(45504801e0e6066b), C(86e6c6d6152a3d04), + C(4f3db1c53eca2952), C(d24d69b3e9ef10f3), C(93a0de2219e66a70), + C(8932c7115ccb1f8a), C(5ef503fdf2841a8c), C(38064dd9efa80a41), + C(27d55016)}, + {C(2a96a3f96c5e9bbc), C(8caf8566e212dda8), C(904de559ca16e45e), + C(f13bc2d9c2fe222e), C(be4ccec9a6cdccfd), C(37b2cbdd973a3ac9), + C(7b3223cd9c9497be), C(f13bc2d9c2fe222e), C(be4ccec9a6cdccfd), + C(37b2cbdd973a3ac9), C(7b3223cd9c9497be), C(d5904440f376f889), + C(62b13187699c473c), C(4751b89251f26726), C(9500d84fa3a61ba8), + C(84945a82)}, + {C(22bebfdcc26d18ff), C(4b4d8dcb10807ba1), C(40265eee30c6b896), + C(3752b423073b119a), C(377dc5eb7c662bdb), C(2b9f07f93a6c25b9), + C(96f24ede2bdc0718), C(3752b423073b119a), C(377dc5eb7c662bdb), + C(2b9f07f93a6c25b9), C(96f24ede2bdc0718), C(f7699b12c31417bd), + C(17b366f401c58b2), C(bf60188d5f437b37), C(484436e56df17f04), C(3ef7e224)}, + {C(627a2249ec6bbcc2), C(c0578b462a46735a), C(4974b8ee1c2d4f1f), + C(ebdbb918eb6d837f), C(8fb5f218dd84147c), C(c77dd1f881df2c54), + C(62eac298ec226dc3), C(ebdbb918eb6d837f), C(8fb5f218dd84147c), + C(c77dd1f881df2c54), C(62eac298ec226dc3), C(43eded83c4b60bd0), + C(9a0a403b5487503b), C(25f305d9147f0bda), C(3ad417f511bc1e64), + C(35ed8dc8)}, + {C(3abaf1667ba2f3e0), C(ee78476b5eeadc1), C(7e56ac0a6ca4f3f4), + C(f1b9b413df9d79ed), C(a7621b6fd02db503), C(d92f7ba9928a4ffe), + C(53f56babdcae96a6), C(f1b9b413df9d79ed), C(a7621b6fd02db503), + C(d92f7ba9928a4ffe), C(53f56babdcae96a6), C(5302b89fc48713ab), + C(d03e3b04dbe7a2f2), C(fa74ef8af6d376a7), C(103c8cdea1050ef2), + C(6a75e43d)}, + {C(3931ac68c5f1b2c9), C(efe3892363ab0fb0), C(40b707268337cd36), + C(a53a6b64b1ac85c9), C(d50e7f86ee1b832b), C(7bab08fdd26ba0a4), + C(7587743c18fe2475), C(a53a6b64b1ac85c9), C(d50e7f86ee1b832b), + C(7bab08fdd26ba0a4), C(7587743c18fe2475), C(e3b5d5d490cf5761), + C(dfc053f7d065edd5), C(42ffd8d5fb70129f), C(599ca38677cccdc3), + C(235d9805)}, + {C(b98fb0606f416754), C(46a6e5547ba99c1e), C(c909d82112a8ed2), + C(dbfaae9642b3205a), C(f676a1339402bcb9), C(f4f12a5b1ac11f29), + C(7db8bad81249dee4), C(dbfaae9642b3205a), C(f676a1339402bcb9), + C(f4f12a5b1ac11f29), C(7db8bad81249dee4), C(b26e46f2da95922e), + C(2aaedd5e12e3c611), C(a0e2d9082966074), C(c64da8a167add63d), C(f7d69572)}, + {C(7f7729a33e58fcc4), C(2e4bc1e7a023ead4), C(e707008ea7ca6222), + C(47418a71800334a0), C(d10395d8fc64d8a4), C(8257a30062cb66f), + C(6786f9b2dc1ff18a), C(47418a71800334a0), C(d10395d8fc64d8a4), + C(8257a30062cb66f), C(6786f9b2dc1ff18a), C(5633f437bb2f180f), + C(e5a3a405737d22d6), C(ca0ff1ef6f7f0b74), C(d0ae600684b16df8), + C(bacd0199)}, + {C(42a0aa9ce82848b3), C(57232730e6bee175), C(f89bb3f370782031), + C(caa33cf9b4f6619c), C(b2c8648ad49c209f), C(9e89ece0712db1c0), + C(101d8274a711a54b), C(caa33cf9b4f6619c), C(b2c8648ad49c209f), + C(9e89ece0712db1c0), C(101d8274a711a54b), C(538e79f1e70135cd), + C(e1f5a76f983c844e), C(653c082fd66088fc), C(1b9c9b464b654958), + C(e428f50e)}, + {C(6b2c6d38408a4889), C(de3ef6f68fb25885), C(20754f456c203361), + C(941f5023c0c943f9), C(dfdeb9564fd66f24), C(2140cec706b9d406), + C(7b22429b131e9c72), C(941f5023c0c943f9), C(dfdeb9564fd66f24), + C(2140cec706b9d406), C(7b22429b131e9c72), C(94215c22eb940f45), + C(d28b9ed474f7249a), C(6f25e88f2fbf9f56), C(b6718f9e605b38ac), + C(81eaaad3)}, + {C(930380a3741e862a), C(348d28638dc71658), C(89dedcfd1654ea0d), + C(7e7f61684080106), C(837ace9794582976), C(5ac8ca76a357eb1b), + C(32b58308625661fb), C(7e7f61684080106), C(837ace9794582976), + C(5ac8ca76a357eb1b), C(32b58308625661fb), C(c09705c4572025d9), + C(f9187f6af0291303), C(1c0edd8ee4b02538), C(e6cb105daa0578a), C(addbd3e3)}, + {C(94808b5d2aa25f9a), C(cec72968128195e0), C(d9f4da2bdc1e130f), + C(272d8dd74f3006cc), C(ec6c2ad1ec03f554), C(4ad276b249a5d5dd), + C(549a22a17c0cde12), C(272d8dd74f3006cc), C(ec6c2ad1ec03f554), + C(4ad276b249a5d5dd), C(549a22a17c0cde12), C(602119cb824d7cde), + C(f4d3cef240ef35fa), C(e889895e01911bc7), C(785a7e5ac20e852b), + C(e66dbca0)}, + {C(b31abb08ae6e3d38), C(9eb9a95cbd9e8223), C(8019e79b7ee94ea9), + C(7b2271a7a3248e22), C(3b4f700e5a0ba523), C(8ebc520c227206fe), + C(da3f861490f5d291), C(7b2271a7a3248e22), C(3b4f700e5a0ba523), + C(8ebc520c227206fe), C(da3f861490f5d291), C(d08a689f9f3aa60e), + C(547c1b97a068661f), C(4b15a67fa29172f0), C(eaf40c085191d80f), + C(afe11fd5)}, + {C(dccb5534a893ea1a), C(ce71c398708c6131), C(fe2396315457c164), + C(3f1229f4d0fd96fb), C(33130aa5fa9d43f2), C(e42693d5b34e63ab), + C(2f4ef2be67f62104), C(3f1229f4d0fd96fb), C(33130aa5fa9d43f2), + C(e42693d5b34e63ab), C(2f4ef2be67f62104), C(372e5153516e37b9), + C(af9ec142ab12cc86), C(777920c09345e359), C(e7c4a383bef8adc6), + C(a71a406f)}, + {C(6369163565814de6), C(8feb86fb38d08c2f), C(4976933485cc9a20), + C(7d3e82d5ba29a90d), C(d5983cc93a9d126a), C(37e9dfd950e7b692), + C(80673be6a7888b87), C(7d3e82d5ba29a90d), C(d5983cc93a9d126a), + C(37e9dfd950e7b692), C(80673be6a7888b87), C(57f732dc600808bc), + C(59477199802cc78b), C(f824810eb8f2c2de), C(c4a3437f05b3b61c), + C(9d90eaf5)}, + {C(edee4ff253d9f9b3), C(96ef76fb279ef0ad), C(a4d204d179db2460), + C(1f3dcdfa513512d6), C(4dc7ec07283117e4), C(4438bae88ae28bf9), + C(aa7eae72c9244a0d), C(1f3dcdfa513512d6), C(4dc7ec07283117e4), + C(4438bae88ae28bf9), C(aa7eae72c9244a0d), C(b9aedc8d3ecc72df), + C(b75a8eb090a77d62), C(6b15677f9cd91507), C(51d8282cb3a9ddbf), + C(6665db10)}, + {C(941993df6e633214), C(929bc1beca5b72c6), C(141fc52b8d55572d), + C(b3b782ad308f21ed), C(4f2676485041dee0), C(bfe279aed5cb4bc8), + C(2a62508a467a22ff), C(b3b782ad308f21ed), C(4f2676485041dee0), + C(bfe279aed5cb4bc8), C(2a62508a467a22ff), C(e74d29eab742385d), + C(56b05cd90ecfc293), C(c603728ea73f8844), C(8638fcd21bc692c4), + C(9c977cbf)}, + {C(859838293f64cd4c), C(484403b39d44ad79), C(bf674e64d64b9339), + C(44d68afda9568f08), C(478568ed51ca1d65), C(679c204ad3d9e766), + C(b28e788878488dc1), C(44d68afda9568f08), C(478568ed51ca1d65), + C(679c204ad3d9e766), C(b28e788878488dc1), C(d001a84d3a84fae6), + C(d376958fe4cb913e), C(17435277e36c86f0), C(23657b263c347aa6), + C(ee83ddd4)}, + {C(c19b5648e0d9f555), C(328e47b2b7562993), C(e756b92ba4bd6a51), + C(c3314e362764ddb8), C(6481c084ee9ec6b5), C(ede23fb9a251771), + C(bd617f2643324590), C(c3314e362764ddb8), C(6481c084ee9ec6b5), + C(ede23fb9a251771), C(bd617f2643324590), C(d2d30c9b95e030f5), + C(8a517312ffc5795e), C(8b1f325033bd535e), C(3ee6e867e03f2892), C(26519cc)}, + {C(f963b63b9006c248), C(9e9bf727ffaa00bc), C(c73bacc75b917e3a), + C(2c6aa706129cc54c), C(17a706f59a49f086), C(c7c1eec455217145), + C(6adfdc6e07602d42), C(2c6aa706129cc54c), C(17a706f59a49f086), + C(c7c1eec455217145), C(6adfdc6e07602d42), C(fb75fca30d848dd2), + C(5228c9ed14653ed4), C(953958910153b1a2), C(a430103a24f42a5d), + C(a485a53f)}, + {C(6a8aa0852a8c1f3b), C(c8f1e5e206a21016), C(2aa554aed1ebb524), + C(fc3e3c322cd5d89b), C(b7e3911dc2bd4ebb), C(fcd6da5e5fae833a), + C(51ed3c41f87f9118), C(fc3e3c322cd5d89b), C(b7e3911dc2bd4ebb), + C(fcd6da5e5fae833a), C(51ed3c41f87f9118), C(f31750cbc19c420a), + C(186dab1abada1d86), C(ca7f88cb894b3cd7), C(2859eeb1c373790c), + C(f62bc412)}, + {C(740428b4d45e5fb8), C(4c95a4ce922cb0a5), C(e99c3ba78feae796), + C(914f1ea2fdcebf5c), C(9566453c07cd0601), C(9841bf66d0462cd), + C(79140c1c18536aeb), C(914f1ea2fdcebf5c), C(9566453c07cd0601), + C(9841bf66d0462cd), C(79140c1c18536aeb), C(a963b930b05820c2), + C(6a7d9fa0c8c45153), C(64214c40d07cf39b), C(7057daf1d806c014), + C(8975a436)}, + {C(658b883b3a872b86), C(2f0e303f0f64827a), C(975337e23dc45e1), + C(99468a917986162b), C(7b31434aac6e0af0), C(f6915c1562c7d82f), + C(e4071d82a6dd71db), C(99468a917986162b), C(7b31434aac6e0af0), + C(f6915c1562c7d82f), C(e4071d82a6dd71db), C(5f5331f077b5d996), + C(7b314ba21b747a4f), C(5a73cb9521da17f5), C(12ed435fae286d86), + C(94ff7f41)}, + {C(6df0a977da5d27d4), C(891dd0e7cb19508), C(fd65434a0b71e680), + C(8799e4740e573c50), C(9e739b52d0f341e8), C(cdfd34ba7d7b03eb), + C(5061812ce6c88499), C(8799e4740e573c50), C(9e739b52d0f341e8), + C(cdfd34ba7d7b03eb), C(5061812ce6c88499), C(612b8d8f2411dc5c), + C(878bd883d29c7787), C(47a846727182bb), C(ec4949508c8b3b9a), C(760aa031)}, + {C(a900275464ae07ef), C(11f2cfda34beb4a3), C(9abf91e5a1c38e4), + C(8063d80ab26f3d6d), C(4177b4b9b4f0393f), C(6de42ba8672b9640), + C(d0bccdb72c51c18), C(8063d80ab26f3d6d), C(4177b4b9b4f0393f), + C(6de42ba8672b9640), C(d0bccdb72c51c18), C(af3f611b7f22cf12), + C(3863c41492645755), C(928c7a616a8f14f9), C(a82c78eb2eadc58b), + C(3bda76df)}, + {C(810bc8aa0c40bcb0), C(448a019568d01441), C(f60ec52f60d3aeae), + C(52c44837aa6dfc77), C(15d8d8fccdd6dc5b), C(345b793ccfa93055), + C(932160fe802ca975), C(52c44837aa6dfc77), C(15d8d8fccdd6dc5b), + C(345b793ccfa93055), C(932160fe802ca975), C(a624b0dd93fc18cd), + C(d955b254c2037f1e), C(e540533d370a664c), C(2ba4ec12514e9d7), C(498e2e65)}, + {C(22036327deb59ed7), C(adc05ceb97026a02), C(48bff0654262672b), + C(c791b313aba3f258), C(443c7757a4727bee), C(e30e4b2372171bdf), + C(f3db986c4156f3cb), C(c791b313aba3f258), C(443c7757a4727bee), + C(e30e4b2372171bdf), C(f3db986c4156f3cb), C(a939aefab97c6e15), + C(dbeb8acf1d5b0e6c), C(1e0eab667a795bba), C(80dd539902df4d50), + C(d38deb48)}, + {C(7d14dfa9772b00c8), C(595735efc7eeaed7), C(29872854f94c3507), + C(bc241579d8348401), C(16dc832804d728f0), C(e9cc71ae64e3f09e), + C(bef634bc978bac31), C(bc241579d8348401), C(16dc832804d728f0), + C(e9cc71ae64e3f09e), C(bef634bc978bac31), C(7f64b1fa2a9129e), + C(71d831bd530ac7f3), C(c7ad0a8a6d5be6f1), C(82a7d3a815c7aaab), + C(82b3fb6b)}, + {C(2d777cddb912675d), C(278d7b10722a13f9), C(f5c02bfb7cc078af), + C(4283001239888836), C(f44ca39a6f79db89), C(ed186122d71bcc9f), + C(8620017ab5f3ba3b), C(4283001239888836), C(f44ca39a6f79db89), + C(ed186122d71bcc9f), C(8620017ab5f3ba3b), C(e787472187f176c), + C(267e64c4728cf181), C(f1ba4b3007c15e30), C(8e3a75d5b02ecfc0), + C(e500e25f)}, + {C(f2ec98824e8aa613), C(5eb7e3fb53fe3bed), C(12c22860466e1dd4), + C(374dd4288e0b72e5), C(ff8916db706c0df4), C(cb1a9e85de5e4b8d), + C(d4d12afb67a27659), C(374dd4288e0b72e5), C(ff8916db706c0df4), + C(cb1a9e85de5e4b8d), C(d4d12afb67a27659), C(feb69095d1ba175a), + C(e2003aab23a47fad), C(8163a3ecab894b49), C(46d356674ce041f6), + C(bd2bb07c)}, + {C(5e763988e21f487f), C(24189de8065d8dc5), C(d1519d2403b62aa0), + C(9136456740119815), C(4d8ff7733b27eb83), C(ea3040bc0c717ef8), + C(7617ab400dfadbc), C(9136456740119815), C(4d8ff7733b27eb83), + C(ea3040bc0c717ef8), C(7617ab400dfadbc), C(fb336770c10b17a1), + C(6123b68b5b31f151), C(1e147d5f295eccf2), C(9ecbb1333556f977), + C(3a2b431d)}, + {C(48949dc327bb96ad), C(e1fd21636c5c50b4), C(3f6eb7f13a8712b4), + C(14cf7f02dab0eee8), C(6d01750605e89445), C(4f1cf4006e613b78), + C(57c40c4db32bec3b), C(14cf7f02dab0eee8), C(6d01750605e89445), + C(4f1cf4006e613b78), C(57c40c4db32bec3b), C(1fde5a347f4a326e), + C(cb5a54308adb0e3f), C(14994b2ba447a23c), C(7067d0abb4257b68), + C(7322a83d)}, + {C(b7c4209fb24a85c5), C(b35feb319c79ce10), C(f0d3de191833b922), + C(570d62758ddf6397), C(5e0204fb68a7b800), C(4383a9236f8b5a2b), + C(7bc1a64641d803a4), C(570d62758ddf6397), C(5e0204fb68a7b800), + C(4383a9236f8b5a2b), C(7bc1a64641d803a4), C(5434d61285099f7a), + C(d49449aacdd5dd67), C(97855ba0e9a7d75d), C(da67328062f3a62f), + C(a645ca1c)}, + {C(9c9e5be0943d4b05), C(b73dc69e45201cbb), C(aab17180bfe5083d), + C(c738a77a9a55f0e2), C(705221addedd81df), C(fd9bd8d397abcfa3), + C(8ccf0004aa86b795), C(c738a77a9a55f0e2), C(705221addedd81df), + C(fd9bd8d397abcfa3), C(8ccf0004aa86b795), C(2bb5db2280068206), + C(8c22d29f307a01d), C(274a22de02f473c8), C(b8791870f4268182), C(8909a45a)}, + {C(3898bca4dfd6638d), C(f911ff35efef0167), C(24bdf69e5091fc88), + C(9b82567ab6560796), C(891b69462b41c224), C(8eccc7e4f3af3b51), + C(381e54c3c8f1c7d0), C(9b82567ab6560796), C(891b69462b41c224), + C(8eccc7e4f3af3b51), C(381e54c3c8f1c7d0), C(c80fbc489a558a55), + C(1ba88e062a663af7), C(af7b1ef1c0116303), C(bd20e1a5a6b1a0cd), + C(bd30074c)}, + {C(5b5d2557400e68e7), C(98d610033574cee), C(dfd08772ce385deb), + C(3c13e894365dc6c2), C(26fc7bbcda3f0ef), C(dbb71106cdbfea36), + C(785239a742c6d26d), C(3c13e894365dc6c2), C(26fc7bbcda3f0ef), + C(dbb71106cdbfea36), C(785239a742c6d26d), C(f810c415ae05b2f4), + C(bb9b9e7398526088), C(70128f1bf830a32b), C(bcc73f82b6410899), + C(c17cf001)}, + {C(a927ed8b2bf09bb6), C(606e52f10ae94eca), C(71c2203feb35a9ee), + C(6e65ec14a8fb565), C(34bff6f2ee5a7f79), C(2e329a5be2c011b), + C(73161c93331b14f9), C(6e65ec14a8fb565), C(34bff6f2ee5a7f79), + C(2e329a5be2c011b), C(73161c93331b14f9), C(15d13f2408aecf88), + C(9f5b61b8a4b55b31), C(8fe25a43b296dba6), C(bdad03b7300f284e), + C(26ffd25a)}, + {C(8d25746414aedf28), C(34b1629d28b33d3a), C(4d5394aea5f82d7b), + C(379f76458a3c8957), C(79dd080f9843af77), C(c46f0a7847f60c1d), + C(af1579c5797703cc), C(379f76458a3c8957), C(79dd080f9843af77), + C(c46f0a7847f60c1d), C(af1579c5797703cc), C(8b7d31f338755c14), + C(2eff97679512aaa8), C(df07d68e075179ed), C(c8fa6c7a729e7f1f), + C(f1d8ce3c)}, + {C(b5bbdb73458712f2), C(1ff887b3c2a35137), C(7f7231f702d0ace9), + C(1e6f0910c3d25bd8), C(ad9e250862102467), C(1c842a07abab30cd), + C(cd8124176bac01ac), C(1e6f0910c3d25bd8), C(ad9e250862102467), + C(1c842a07abab30cd), C(cd8124176bac01ac), C(ea6ebe7a79b67edc), + C(73f598ac9db26713), C(4f4e72d7460b8fc), C(365dc4b9fdf13f21), C(3ee8fb17)}, + {C(3d32a26e3ab9d254), C(fc4070574dc30d3a), C(f02629579c2b27c9), + C(b1cf09b0184a4834), C(5c03db48eb6cc159), C(f18c7fcf34d1df47), + C(dfb043419ecf1fa9), C(b1cf09b0184a4834), C(5c03db48eb6cc159), + C(f18c7fcf34d1df47), C(dfb043419ecf1fa9), C(dcd78d13f9ca658f), + C(4355d408ffe8e49f), C(81eefee908b593b4), C(590c213c20e981a3), + C(a77acc2a)}, + {C(9371d3c35fa5e9a5), C(42967cf4d01f30), C(652d1eeae704145c), + C(ceaf1a0d15234f15), C(1450a54e45ba9b9), C(65e9c1fd885aa932), + C(354d4bc034ba8cbe), C(ceaf1a0d15234f15), C(1450a54e45ba9b9), + C(65e9c1fd885aa932), C(354d4bc034ba8cbe), C(8fd4ff484c08fb4b), + C(bf46749866f69ba0), C(cf1c21ede82c9477), C(4217548c43da109), C(f4556dee)}, + {C(cbaa3cb8f64f54e0), C(76c3b48ee5c08417), C(9f7d24e87e61ce9), + C(85b8e53f22e19507), C(bb57137739ca486b), C(c77f131cca38f761), + C(c56ac3cf275be121), C(85b8e53f22e19507), C(bb57137739ca486b), + C(c77f131cca38f761), C(c56ac3cf275be121), C(9ec1a6c9109d2685), + C(3dad0922e76afdb0), C(fd58cbf952958103), C(7b04c908e78639a1), + C(de287a64)}, + {C(b2e23e8116c2ba9f), C(7e4d9c0060101151), C(3310da5e5028f367), + C(adc52dddb76f6e5e), C(4aad4e925a962b68), C(204b79b7f7168e64), + C(df29ed6671c36952), C(adc52dddb76f6e5e), C(4aad4e925a962b68), + C(204b79b7f7168e64), C(df29ed6671c36952), C(e02927cac396d210), + C(5d500e71742b638a), C(5c9998af7f27b124), C(3fba9a2573dc2f7), C(878e55b9)}, + {C(8aa77f52d7868eb9), C(4d55bd587584e6e2), C(d2db37041f495f5), + C(ce030d15b5fe2f4), C(86b4a7a0780c2431), C(ee070a9ae5b51db7), + C(edc293d9595be5d8), C(ce030d15b5fe2f4), C(86b4a7a0780c2431), + C(ee070a9ae5b51db7), C(edc293d9595be5d8), C(3dfc5ec108260a2b), + C(8afe28c7123bf4e2), C(da82ef38023a7a5f), C(3e1f77b0174b77c3), C(7648486)}, + {C(858fea922c7fe0c3), C(cfe8326bf733bc6f), C(4e5e2018cf8f7dfc), + C(64fd1bc011e5bab7), C(5c9e858728015568), C(97ac42c2b00b29b1), + C(7f89caf08c109aee), C(64fd1bc011e5bab7), C(5c9e858728015568), + C(97ac42c2b00b29b1), C(7f89caf08c109aee), C(9a8af34fd0e9dacf), + C(bbc54161aa1507e0), C(7cda723ccbbfe5ee), C(2c289d839fb93f58), + C(57ac0fb1)}, + {C(46ef25fdec8392b1), C(e48d7b6d42a5cd35), C(56a6fe1c175299ca), + C(fdfa836b41dcef62), C(2f8db8030e847e1b), C(5ba0a49ac4f9b0f8), + C(dae897ed3e3fce44), C(fdfa836b41dcef62), C(2f8db8030e847e1b), + C(5ba0a49ac4f9b0f8), C(dae897ed3e3fce44), C(9c432e31aef626e7), + C(9a36e1c6cd6e3dd), C(5095a167c34d19d), C(a70005cfa6babbea), C(d01967ca)}, + {C(8d078f726b2df464), C(b50ee71cdcabb299), C(f4af300106f9c7ba), + C(7d222caae025158a), C(cc028d5fd40241b9), C(dd42515b639e6f97), + C(e08e86531a58f87f), C(7d222caae025158a), C(cc028d5fd40241b9), + C(dd42515b639e6f97), C(e08e86531a58f87f), C(d93612c835b37d7b), + C(91dd61729b2fa7f4), C(ba765a1bdda09db7), C(55258b451b2b1297), + C(96ecdf74)}, + {C(35ea86e6960ca950), C(34fe1fe234fc5c76), C(a00207a3dc2a72b7), + C(80395e48739e1a67), C(74a67d8f7f43c3d7), C(dd2bdd1d62246c6e), + C(a1f44298ba80acf6), C(80395e48739e1a67), C(74a67d8f7f43c3d7), + C(dd2bdd1d62246c6e), C(a1f44298ba80acf6), C(ad86d86c187bf38), + C(26feea1f2eee240d), C(ed7f1fd066b23897), C(a768cf1e0fbb502), C(779f5506)}, + {C(8aee9edbc15dd011), C(51f5839dc8462695), C(b2213e17c37dca2d), + C(133b299a939745c5), C(796e2aac053f52b3), C(e8d9fe1521a4a222), + C(819a8863e5d1c290), C(133b299a939745c5), C(796e2aac053f52b3), + C(e8d9fe1521a4a222), C(819a8863e5d1c290), C(c0737f0fe34d36ad), + C(e6d6d4a267a5cc31), C(98300a7911674c23), C(bef189661c257098), + C(3c94c2de)}, + {C(c3e142ba98432dda), C(911d060cab126188), C(b753fbfa8365b844), + C(fd1a9ba5e71b08a2), C(7ac0dc2ed7778533), C(b543161ff177188a), + C(492fc08a6186f3f4), C(fd1a9ba5e71b08a2), C(7ac0dc2ed7778533), + C(b543161ff177188a), C(492fc08a6186f3f4), C(fc4745f516afd3b6), + C(88c30370a53080e), C(65a1bb34abc465e2), C(abbd14662911c8b3), C(39f98faf)}, + {C(123ba6b99c8cd8db), C(448e582672ee07c4), C(cebe379292db9e65), + C(938f5bbab544d3d6), C(d2a95f9f2d376d73), C(68b2f16149e81aa3), + C(ad7e32f82d86c79d), C(938f5bbab544d3d6), C(d2a95f9f2d376d73), + C(68b2f16149e81aa3), C(ad7e32f82d86c79d), C(4574015ae8626ce2), + C(455aa6137386a582), C(658ad2542e8ec20), C(e31d7be2ca35d00), C(7af31199)}, + {C(ba87acef79d14f53), C(b3e0fcae63a11558), C(d5ac313a593a9f45), + C(eea5f5a9f74af591), C(578710bcc36fbea2), C(7a8393432188931d), + C(705cfc5ec7cc172), C(eea5f5a9f74af591), C(578710bcc36fbea2), + C(7a8393432188931d), C(705cfc5ec7cc172), C(da85ebe5fc427976), + C(bfa5c7a454df54c8), C(4632b72a81bf66d2), C(5dd72877db539ee2), + C(e341a9d6)}, + {C(bcd3957d5717dc3), C(2da746741b03a007), C(873816f4b1ece472), + C(2b826f1a2c08c289), C(da50f56863b55e74), C(b18712f6b3eed83b), + C(bdc7cc05ab4c685f), C(2b826f1a2c08c289), C(da50f56863b55e74), + C(b18712f6b3eed83b), C(bdc7cc05ab4c685f), C(9e45fb833d1b0af), + C(d7213081db29d82e), C(d2a6b6c6a09ed55e), C(98a7686cba323ca9), + C(ca24aeeb)}, + {C(61442ff55609168e), C(6447c5fc76e8c9cf), C(6a846de83ae15728), + C(effc2663cffc777f), C(93214f8f463afbed), C(a156ef06066f4e4e), + C(a407b6ed8769d51e), C(effc2663cffc777f), C(93214f8f463afbed), + C(a156ef06066f4e4e), C(a407b6ed8769d51e), C(bb2f9ed29745c02a), + C(981eecd435b36ad9), C(461a5a05fb9cdff4), C(bd6cb2a87b9f910c), + C(b2252b57)}, + {C(dbe4b1b2d174757f), C(506512da18712656), C(6857f3e0b8dd95f), + C(5a4fc2728a9bb671), C(ebb971522ec38759), C(1a5a093e6cf1f72b), + C(729b057fe784f504), C(5a4fc2728a9bb671), C(ebb971522ec38759), + C(1a5a093e6cf1f72b), C(729b057fe784f504), C(71fcbf42a767f9cf), + C(114cfe772da6cdd), C(60cdf9cb629d9d7a), C(e270d10ad088b24e), C(72c81da1)}, + {C(531e8e77b363161c), C(eece0b43e2dae030), C(8294b82c78f34ed1), + C(e777b1fd580582f2), C(7b880f58da112699), C(562c6b189a6333f4), + C(139d64f88a611d4), C(e777b1fd580582f2), C(7b880f58da112699), + C(562c6b189a6333f4), C(139d64f88a611d4), C(53d8ef17eda64fa4), + C(bf3eded14dc60a04), C(2b5c559cf5ec07c5), C(8895f7339d03a48a), + C(6b9fce95)}, + {C(f71e9c926d711e2b), C(d77af2853a4ceaa1), C(9aa0d6d76a36fae7), + C(dd16cd0fbc08393), C(29a414a5d8c58962), C(72793d8d1022b5b2), + C(2e8e69cf7cbffdf0), C(dd16cd0fbc08393), C(29a414a5d8c58962), + C(72793d8d1022b5b2), C(2e8e69cf7cbffdf0), C(3721c0473aa99c9a), + C(1cff4ed9c31cd91c), C(4990735033cc482b), C(7fdf8c701c72f577), + C(19399857)}, + {C(cb20ac28f52df368), C(e6705ee7880996de), C(9b665cc3ec6972f2), + C(4260e8c254e9924b), C(f197a6eb4591572d), C(8e867ff0fb7ab27c), + C(f95502fb503efaf3), C(4260e8c254e9924b), C(f197a6eb4591572d), + C(8e867ff0fb7ab27c), C(f95502fb503efaf3), C(30c41876b08e3e22), + C(958e2419e3cd22f4), C(f0f3aa1fe119a107), C(481662310a379100), + C(3c57a994)}, + {C(e4a794b4acb94b55), C(89795358057b661b), C(9c4cdcec176d7a70), + C(4890a83ee435bc8b), C(d8c1c00fceb00914), C(9e7111ba234f900f), + C(eb8dbab364d8b604), C(4890a83ee435bc8b), C(d8c1c00fceb00914), + C(9e7111ba234f900f), C(eb8dbab364d8b604), C(b3261452963eebb), + C(6cf94b02792c4f95), C(d88fa815ef1e8fc), C(2d687af66604c73), C(c053e729)}, + {C(cb942e91443e7208), C(e335de8125567c2a), C(d4d74d268b86df1f), + C(8ba0fdd2ffc8b239), C(f413b366c1ffe02f), C(c05b2717c59a8a28), + C(981188eab4fcc8fb), C(8ba0fdd2ffc8b239), C(f413b366c1ffe02f), + C(c05b2717c59a8a28), C(981188eab4fcc8fb), C(e563f49a1d9072ba), + C(3c6a3aa4a26367dc), C(ba0db13448653f34), C(31065d756074d7d6), + C(51cbbba7)}, + {C(ecca7563c203f7ba), C(177ae2423ef34bb2), C(f60b7243400c5731), + C(cf1edbfe7330e94e), C(881945906bcb3cc6), C(4acf0293244855da), + C(65ae042c1c2a28c2), C(cf1edbfe7330e94e), C(881945906bcb3cc6), + C(4acf0293244855da), C(65ae042c1c2a28c2), C(b25fa0a1cab33559), + C(d98e8daa28124131), C(fce17f50b9c351b3), C(3f995ccf7386864b), + C(1acde79a)}, + {C(1652cb940177c8b5), C(8c4fe7d85d2a6d6d), C(f6216ad097e54e72), + C(f6521b912b368ae6), C(a9fe4eff81d03e73), C(d6f623629f80d1a3), + C(2b9604f32cb7dc34), C(f6521b912b368ae6), C(a9fe4eff81d03e73), + C(d6f623629f80d1a3), C(2b9604f32cb7dc34), C(2a43d84dcf59c7e2), + C(d0a197c70c5dae0b), C(6e84d4bbc71d76a0), C(c7e94620378c6cb2), + C(2d160d13)}, + {C(31fed0fc04c13ce8), C(3d5d03dbf7ff240a), C(727c5c9b51581203), + C(6b5ffc1f54fecb29), C(a8e8e7ad5b9a21d9), C(c4d5a32cd6aac22d), + C(d7e274ad22d4a79a), C(6b5ffc1f54fecb29), C(a8e8e7ad5b9a21d9), + C(c4d5a32cd6aac22d), C(d7e274ad22d4a79a), C(368841ea5731a112), + C(feaf7bc2e73ca48f), C(636fb272e9ea1f6), C(5d9cb7580c3f6207), C(787f5801)}, + {C(e7b668947590b9b3), C(baa41ad32938d3fa), C(abcbc8d4ca4b39e4), + C(381ee1b7ea534f4e), C(da3759828e3de429), C(3e015d76729f9955), + C(cbbec51a6485fbde), C(381ee1b7ea534f4e), C(da3759828e3de429), + C(3e015d76729f9955), C(cbbec51a6485fbde), C(9b86605281f20727), + C(fc6fcf508676982a), C(3b135f7a813a1040), C(d3a4706bea1db9c9), + C(c9629828)}, + {C(1de2119923e8ef3c), C(6ab27c096cf2fe14), C(8c3658edca958891), + C(4cc8ed3ada5f0f2), C(4a496b77c1f1c04e), C(9085b0a862084201), + C(a1894bde9e3dee21), C(4cc8ed3ada5f0f2), C(4a496b77c1f1c04e), + C(9085b0a862084201), C(a1894bde9e3dee21), C(367fb472dc5b277d), + C(7d39ccca16fc6745), C(763f988d70db9106), C(a8b66f7fecb70f02), + C(be139231)}, + {C(1269df1e69e14fa7), C(992f9d58ac5041b7), C(e97fcf695a7cbbb4), + C(e5d0549802d15008), C(424c134ecd0db834), C(6fc44fd91be15c6c), + C(a1a5ef95d50e537d), C(e5d0549802d15008), C(424c134ecd0db834), + C(6fc44fd91be15c6c), C(a1a5ef95d50e537d), C(d1e3daf5d05f5308), + C(4c7f81600eaa1327), C(109d1b8d1f9d0d2b), C(871e8699e0aeb862), + C(7df699ef)}, + {C(820826d7aba567ff), C(1f73d28e036a52f3), C(41c4c5a73f3b0893), + C(aa0d74d4a98db89b), C(36fd486d07c56e1d), C(d0ad23cbb6660d8a), + C(1264a84665b35e19), C(aa0d74d4a98db89b), C(36fd486d07c56e1d), + C(d0ad23cbb6660d8a), C(1264a84665b35e19), C(789682bf7d781b33), + C(6bfa6abd2fb5722d), C(6779cb3623d33900), C(435ca5214e1ee5f0), + C(8ce6b96d)}, + {C(ffe0547e4923cef9), C(3534ed49b9da5b02), C(548a273700fba03d), + C(28ac84ca70958f7e), C(d8ae575a68faa731), C(2aaaee9b9dcffd4c), + C(6c7faab5c285c6da), C(28ac84ca70958f7e), C(d8ae575a68faa731), + C(2aaaee9b9dcffd4c), C(6c7faab5c285c6da), C(45d94235f99ba78f), + C(ab5ea16f39497f5b), C(fb4d6c86fccbdca3), C(8104e6310a5fd2c7), + C(6f9ed99c)}, + {C(72da8d1b11d8bc8b), C(ba94b56b91b681c6), C(4e8cc51bd9b0fc8c), + C(43505ed133be672a), C(e8f2f9d973c2774e), C(677b9b9c7cad6d97), + C(4e1f5d56ef17b906), C(43505ed133be672a), C(e8f2f9d973c2774e), + C(677b9b9c7cad6d97), C(4e1f5d56ef17b906), C(eea3a6038f983767), + C(87109f077f86db01), C(ecc1ca41f74d61cc), C(34a87e86e83bed17), + C(e0244796)}, + {C(d62ab4e3f88fc797), C(ea86c7aeb6283ae4), C(b5b93e09a7fe465), + C(4344a1a0134afe2), C(ff5c17f02b62341d), C(3214c6a587ce4644), + C(a905e7ed0629d05c), C(4344a1a0134afe2), C(ff5c17f02b62341d), + C(3214c6a587ce4644), C(a905e7ed0629d05c), C(b5c72690cd716e82), + C(7c6097649e6ebe7b), C(7ceee8c6e56a4dcd), C(80ca849dc53eb9e4), + C(4ccf7e75)}, + {C(d0f06c28c7b36823), C(1008cb0874de4bb8), C(d6c7ff816c7a737b), + C(489b697fe30aa65f), C(4da0fb621fdc7817), C(dc43583b82c58107), + C(4b0261debdec3cd6), C(489b697fe30aa65f), C(4da0fb621fdc7817), + C(dc43583b82c58107), C(4b0261debdec3cd6), C(a9748d7b6c0e016c), + C(7e8828f7ba4b034b), C(da0fa54348a2512a), C(ebf9745c0962f9ad), + C(915cef86)}, + {C(99b7042460d72ec6), C(2a53e5e2b8e795c2), C(53a78132d9e1b3e3), + C(c043e67e6fc64118), C(ff0abfe926d844d3), C(f2a9fe5db2e910fe), + C(ce352cdc84a964dd), C(c043e67e6fc64118), C(ff0abfe926d844d3), + C(f2a9fe5db2e910fe), C(ce352cdc84a964dd), C(b89bc028aa5e6063), + C(a354e7fdac04459c), C(68d6547e6e980189), C(c968dddfd573773e), + C(5cb59482)}, + {C(4f4dfcfc0ec2bae5), C(841233148268a1b8), C(9248a76ab8be0d3), + C(334c5a25b5903a8c), C(4c94fef443122128), C(743e7d8454655c40), + C(1ab1e6d1452ae2cd), C(334c5a25b5903a8c), C(4c94fef443122128), + C(743e7d8454655c40), C(1ab1e6d1452ae2cd), C(fec766de4a8e476c), + C(cc0929da9567e71b), C(5f9ef5b5f150c35a), C(87659cabd649768f), + C(6ca3f532)}, + {C(fe86bf9d4422b9ae), C(ebce89c90641ef9c), C(1c84e2292c0b5659), + C(8bde625a10a8c50d), C(eb8271ded1f79a0b), C(14dc6844f0de7a3c), + C(f85b2f9541e7e6da), C(8bde625a10a8c50d), C(eb8271ded1f79a0b), + C(14dc6844f0de7a3c), C(f85b2f9541e7e6da), C(2fe22cfd1683b961), + C(ea1d75c5b7aa01ca), C(9eef60a44876bb95), C(950c818e505c6f7f), + C(e24f3859)}, + {C(a90d81060932dbb0), C(8acfaa88c5fbe92b), C(7c6f3447e90f7f3f), + C(dd52fc14c8dd3143), C(1bc7508516e40628), C(3059730266ade626), + C(ffa526822f391c2), C(dd52fc14c8dd3143), C(1bc7508516e40628), + C(3059730266ade626), C(ffa526822f391c2), C(e25232d7afc8a406), + C(d2b8a5a3f3b5f670), C(6630f33edb7dfe32), C(c71250ba68c4ea86), + C(adf5a9c7)}, + {C(17938a1b0e7f5952), C(22cadd2f56f8a4be), C(84b0d1183d5ed7c1), + C(c1336b92fef91bf6), C(80332a3945f33fa9), C(a0f68b86f726ff92), + C(a3db5282cf5f4c0b), C(c1336b92fef91bf6), C(80332a3945f33fa9), + C(a0f68b86f726ff92), C(a3db5282cf5f4c0b), C(82640b6fc4916607), + C(2dc2a3aa1a894175), C(8b4c852bdee7cc9), C(10b9d0a08b55ff83), C(32264b75)}, + {C(de9e0cb0e16f6e6d), C(238e6283aa4f6594), C(4fb9c914c2f0a13b), + C(497cb912b670f3b), C(d963a3f02ff4a5b6), C(4fccefae11b50391), + C(42ba47db3f7672f), C(497cb912b670f3b), C(d963a3f02ff4a5b6), + C(4fccefae11b50391), C(42ba47db3f7672f), C(1d6b655a1889feef), + C(5f319abf8fafa19f), C(715c2e49deb14620), C(8d9153082ecdcea4), + C(a64b3376)}, + {C(6d4b876d9b146d1a), C(aab2d64ce8f26739), C(d315f93600e83fe5), + C(2fe9fabdbe7fdd4), C(755db249a2d81a69), C(f27929f360446d71), + C(79a1bf957c0c1b92), C(2fe9fabdbe7fdd4), C(755db249a2d81a69), + C(f27929f360446d71), C(79a1bf957c0c1b92), C(3c8a28d4c936c9cd), + C(df0d3d13b2c6a902), C(c76702dd97cd2edd), C(1aa220f7be16517), C(d33890e)}, + {C(e698fa3f54e6ea22), C(bd28e20e7455358c), C(9ace161f6ea76e66), + C(d53fb7e3c93a9e4), C(737ae71b051bf108), C(7ac71feb84c2df42), + C(3d8075cd293a15b4), C(d53fb7e3c93a9e4), C(737ae71b051bf108), + C(7ac71feb84c2df42), C(3d8075cd293a15b4), C(bf8cee5e095d8a7c), + C(e7086b3c7608143a), C(e55b0c2fa938d70c), C(fffb5f58e643649c), + C(926d4b63)}, + {C(7bc0deed4fb349f7), C(1771aff25dc722fa), C(19ff0644d9681917), + C(cf7d7f25bd70cd2c), C(9464ed9baeb41b4f), C(b9064f5c3cb11b71), + C(237e39229b012b20), C(cf7d7f25bd70cd2c), C(9464ed9baeb41b4f), + C(b9064f5c3cb11b71), C(237e39229b012b20), C(dd54d3f5d982dffe), + C(7fc7562dbfc81dbf), C(5b0dd1924f70945), C(f1760537d8261135), C(d51ba539)}, + {C(db4b15e88533f622), C(256d6d2419b41ce9), C(9d7c5378396765d5), + C(9040e5b936b8661b), C(276e08fa53ac27fd), C(8c944d39c2bdd2cc), + C(e2514c9802a5743c), C(9040e5b936b8661b), C(276e08fa53ac27fd), + C(8c944d39c2bdd2cc), C(e2514c9802a5743c), C(e82107b11ac90386), + C(7d6a22bc35055e6), C(fd6ea9d1c438d8ae), C(be6015149e981553), C(7f37636d)}, + {C(922834735e86ecb2), C(363382685b88328e), C(e9c92960d7144630), + C(8431b1bfd0a2379c), C(90383913aea283f9), C(a6163831eb4924d2), + C(5f3921b4f9084aee), C(8431b1bfd0a2379c), C(90383913aea283f9), + C(a6163831eb4924d2), C(5f3921b4f9084aee), C(7a70061a1473e579), + C(5b19d80dcd2c6331), C(6196b97931faad27), C(869bf6828e237c3f), + C(b98026c0)}, + {C(30f1d72c812f1eb8), C(b567cd4a69cd8989), C(820b6c992a51f0bc), + C(c54677a80367125e), C(3204fbdba462e606), C(8563278afc9eae69), + C(262147dd4bf7e566), C(c54677a80367125e), C(3204fbdba462e606), + C(8563278afc9eae69), C(262147dd4bf7e566), C(2178b63e7ee2d230), + C(e9c61ad81f5bff26), C(9af7a81b3c501eca), C(44104a3859f0238f), + C(b877767e)}, + {C(168884267f3817e9), C(5b376e050f637645), C(1c18314abd34497a), + C(9598f6ab0683fcc2), C(1c805abf7b80e1ee), C(dec9ac42ee0d0f32), + C(8cd72e3912d24663), C(9598f6ab0683fcc2), C(1c805abf7b80e1ee), + C(dec9ac42ee0d0f32), C(8cd72e3912d24663), C(1f025d405f1c1d87), + C(bf7b6221e1668f8f), C(52316f64e692dbb0), C(7bf43df61ec51b39), C(aefae77)}, + {C(82e78596ee3e56a7), C(25697d9c87f30d98), C(7600a8342834924d), + C(6ba372f4b7ab268b), C(8c3237cf1fe243df), C(3833fc51012903df), + C(8e31310108c5683f), C(6ba372f4b7ab268b), C(8c3237cf1fe243df), + C(3833fc51012903df), C(8e31310108c5683f), C(126593715c2de429), + C(48ca8f35a3f54b90), C(b9322b632f4f8b0), C(926bb169b7337693), C(f686911)}, + {C(aa2d6cf22e3cc252), C(9b4dec4f5e179f16), C(76fb0fba1d99a99a), + C(9a62af3dbba140da), C(27857ea044e9dfc1), C(33abce9da2272647), + C(b22a7993aaf32556), C(9a62af3dbba140da), C(27857ea044e9dfc1), + C(33abce9da2272647), C(b22a7993aaf32556), C(bf8f88f8019bedf0), + C(ed2d7f01fb273905), C(6b45f15901b481cd), C(f88ebb413ba6a8d5), + C(3deadf12)}, + {C(7bf5ffd7f69385c7), C(fc077b1d8bc82879), C(9c04e36f9ed83a24), + C(82065c62e6582188), C(8ef787fd356f5e43), C(2922e53e36e17dfa), + C(9805f223d385010b), C(82065c62e6582188), C(8ef787fd356f5e43), + C(2922e53e36e17dfa), C(9805f223d385010b), C(692154f3491b787d), + C(e7e64700e414fbf), C(757d4d4ab65069a0), C(cd029446a8e348e2), C(ccf02a4e)}, + {C(e89c8ff9f9c6e34b), C(f54c0f669a49f6c4), C(fc3e46f5d846adef), + C(22f2aa3df2221cc), C(f66fea90f5d62174), C(b75defaeaa1dd2a7), + C(9b994cd9a7214fd5), C(22f2aa3df2221cc), C(f66fea90f5d62174), + C(b75defaeaa1dd2a7), C(9b994cd9a7214fd5), C(fac675a31804b773), + C(98bcb3b820c50fc6), C(e14af64d28cf0885), C(27466fbd2b360eb5), + C(176c1722)}, + {C(a18fbcdccd11e1f4), C(8248216751dfd65e), C(40c089f208d89d7c), + C(229b79ab69ae97d), C(a87aabc2ec26e582), C(be2b053721eb26d2), + C(10febd7f0c3d6fcb), C(229b79ab69ae97d), C(a87aabc2ec26e582), + C(be2b053721eb26d2), C(10febd7f0c3d6fcb), C(9cc5b9b2f6e3bf7b), + C(655d8495fe624a86), C(6381a9f3d1f2bd7e), C(79ebabbfc25c83e2), C(26f82ad)}, + {C(2d54f40cc4088b17), C(59d15633b0cd1399), C(a8cc04bb1bffd15b), + C(d332cdb073d8dc46), C(272c56466868cb46), C(7e7fcbe35ca6c3f3), + C(ee8f51e5a70399d4), C(d332cdb073d8dc46), C(272c56466868cb46), + C(7e7fcbe35ca6c3f3), C(ee8f51e5a70399d4), C(16737a9c7581fe7b), + C(ed04bf52f4b75dcb), C(9707ffb36bd30c1a), C(1390f236fdc0de3e), + C(b5244f42)}, + {C(69276946cb4e87c7), C(62bdbe6183be6fa9), C(3ba9773dac442a1a), + C(702e2afc7f5a1825), C(8c49b11ea8151fdc), C(caf3fef61f5a86fa), + C(ef0b2ee8649d7272), C(702e2afc7f5a1825), C(8c49b11ea8151fdc), + C(caf3fef61f5a86fa), C(ef0b2ee8649d7272), C(9e34a4e08d9441e1), + C(7bdc0cd64d5af533), C(a926b14d99e3d868), C(fca923a17788cce4), + C(49a689e5)}, + {C(668174a3f443df1d), C(407299392da1ce86), C(c2a3f7d7f2c5be28), + C(a590b202a7a5807b), C(968d2593f7ccb54e), C(9dd8d669e3e95dec), + C(ee0cc5dd58b6e93a), C(a590b202a7a5807b), C(968d2593f7ccb54e), + C(9dd8d669e3e95dec), C(ee0cc5dd58b6e93a), C(ac65d5a9466fb483), + C(221be538b2c9d806), C(5cbe9441784f9fd9), C(d4c7d5d6e3c122b8), C(59fcdd3)}, + {C(5e29be847bd5046), C(b561c7f19c8f80c3), C(5e5abd5021ccaeaf), + C(7432d63888e0c306), C(74bbceeed479cb71), C(6471586599575fdf), + C(6a859ad23365cba2), C(7432d63888e0c306), C(74bbceeed479cb71), + C(6471586599575fdf), C(6a859ad23365cba2), C(f9ceec84acd18dcc), + C(74a242ff1907437c), C(f70890194e1ee913), C(777dfcb4bb01f0ba), + C(4f4b04e9)}, + {C(cd0d79f2164da014), C(4c386bb5c5d6ca0c), C(8e771b03647c3b63), + C(69db23875cb0b715), C(ada8dd91504ae37f), C(46bf18dbf045ed6a), + C(e1b5f67b0645ab63), C(69db23875cb0b715), C(ada8dd91504ae37f), + C(46bf18dbf045ed6a), C(e1b5f67b0645ab63), C(877be8f5dcddff4), + C(6d471b5f9ca2e2d1), C(802c86d6f495b9bb), C(a1f9b9b22b3be704), + C(8b00f891)}, + {C(e0e6fc0b1628af1d), C(29be5fb4c27a2949), C(1c3f781a604d3630), + C(c4af7faf883033aa), C(9bd296c4e9453cac), C(ca45426c1f7e33f9), + C(a6bbdcf7074d40c5), C(c4af7faf883033aa), C(9bd296c4e9453cac), + C(ca45426c1f7e33f9), C(a6bbdcf7074d40c5), C(e13a005d7142733b), + C(c02b7925c5eeefaf), C(d39119a60441e2d5), C(3c24c710df8f4d43), + C(16e114f3)}, + {C(2058927664adfd93), C(6e8f968c7963baa5), C(af3dced6fff7c394), + C(42e34cf3d53c7876), C(9cddbb26424dc5e), C(64f6340a6d8eddad), + C(2196e488eb2a3a4b), C(42e34cf3d53c7876), C(9cddbb26424dc5e), + C(64f6340a6d8eddad), C(2196e488eb2a3a4b), C(c9e9da25911a16fd), + C(e21b4683f3e196a8), C(cb80bf1a4c6fdbb4), C(53792e9b3c3e67f8), + C(d6b6dadc)}, + {C(dc107285fd8e1af7), C(a8641a0609321f3f), C(db06e89ffdc54466), + C(bcc7a81ed5432429), C(b6d7bdc6ad2e81f1), C(93605ec471aa37db), + C(a2a73f8a85a8e397), C(bcc7a81ed5432429), C(b6d7bdc6ad2e81f1), + C(93605ec471aa37db), C(a2a73f8a85a8e397), C(10a012b8ca7ac24b), + C(aac5fd63351595cf), C(5bb4c648a226dea0), C(9d11ecb2b5c05c5f), + C(897e20ac)}, + {C(fbba1afe2e3280f1), C(755a5f392f07fce), C(9e44a9a15402809a), + C(6226a32e25099848), C(ea895661ecf53004), C(4d7e0158db2228b9), + C(e5a7d82922f69842), C(6226a32e25099848), C(ea895661ecf53004), + C(4d7e0158db2228b9), C(e5a7d82922f69842), C(2cea7713b69840ca), + C(18de7b9ae938375b), C(f127cca08f3cc665), C(b1c22d727665ad2), C(f996e05d)}, + {C(bfa10785ddc1011b), C(b6e1c4d2f670f7de), C(517d95604e4fcc1f), + C(ca6552a0dfb82c73), C(b024cdf09e34ba07), C(66cd8c5a95d7393b), + C(e3939acf790d4a74), C(ca6552a0dfb82c73), C(b024cdf09e34ba07), + C(66cd8c5a95d7393b), C(e3939acf790d4a74), C(97827541a1ef051e), + C(ac2fce47ebe6500c), C(b3f06d3bddf3bd6a), C(1d74afb25e1ce5fe), + C(c4306af6)}, + {C(534cc35f0ee1eb4e), C(b703820f1f3b3dce), C(884aa164cf22363), + C(f14ef7f47d8a57a3), C(80d1f86f2e061d7c), C(401d6c2f151b5a62), + C(e988460224108944), C(f14ef7f47d8a57a3), C(80d1f86f2e061d7c), + C(401d6c2f151b5a62), C(e988460224108944), C(7804d4135f68cd19), + C(5487b4b39e69fe8e), C(8cc5999015358a27), C(8f3729b61c2d5601), + C(6dcad433)}, + {C(7ca6e3933995dac), C(fd118c77daa8188), C(3aceb7b5e7da6545), + C(c8389799445480db), C(5389f5df8aacd50d), C(d136581f22fab5f), + C(c2f31f85991da417), C(c8389799445480db), C(5389f5df8aacd50d), + C(d136581f22fab5f), C(c2f31f85991da417), C(aefbf9ff84035a43), + C(8accbaf44adadd7c), C(e57f3657344b67f5), C(21490e5e8abdec51), + C(3c07374d)}, + {C(f0d6044f6efd7598), C(e044d6ba4369856e), C(91968e4f8c8a1a4c), + C(70bd1968996bffc2), C(4c613de5d8ab32ac), C(fe1f4f97206f79d8), + C(ac0434f2c4e213a9), C(70bd1968996bffc2), C(4c613de5d8ab32ac), + C(fe1f4f97206f79d8), C(ac0434f2c4e213a9), C(7490e9d82cfe22ca), + C(5fbbf7f987454238), C(c39e0dc8368ce949), C(22201d3894676c71), + C(f0f4602c)}, + {C(3d69e52049879d61), C(76610636ea9f74fe), C(e9bf5602f89310c0), + C(8eeb177a86053c11), C(e390122c345f34a2), C(1e30e47afbaaf8d6), + C(7b892f68e5f91732), C(8eeb177a86053c11), C(e390122c345f34a2), + C(1e30e47afbaaf8d6), C(7b892f68e5f91732), C(b87922525fa44158), + C(f440a1ee1a1a766b), C(ee8efad279d08c5c), C(421f910c5b60216e), + C(3e1ea071)}, + {C(79da242a16acae31), C(183c5f438e29d40), C(6d351710ae92f3de), + C(27233b28b5b11e9b), C(c7dfe8988a942700), C(570ed11c4abad984), + C(4b4c04632f48311a), C(27233b28b5b11e9b), C(c7dfe8988a942700), + C(570ed11c4abad984), C(4b4c04632f48311a), C(12f33235442cbf9), + C(a35315ca0b5b8cdb), C(d8abde62ead5506b), C(fc0fcf8478ad5266), + C(67580f0c)}, + {C(461c82656a74fb57), C(d84b491b275aa0f7), C(8f262cb29a6eb8b2), + C(49fa3070bc7b06d0), C(f12ed446bd0c0539), C(6d43ac5d1dd4b240), + C(7609524fe90bec93), C(49fa3070bc7b06d0), C(f12ed446bd0c0539), + C(6d43ac5d1dd4b240), C(7609524fe90bec93), C(391c2b2e076ec241), + C(f5e62deda7839f7b), C(3c7b3186a10d870f), C(77ef4f2cba4f1005), + C(4e109454)}, + {C(53c1a66d0b13003), C(731f060e6fe797fc), C(daa56811791371e3), + C(57466046cf6896ed), C(8ac37e0e8b25b0c6), C(3e6074b52ad3cf18), + C(aa491ce7b45db297), C(57466046cf6896ed), C(8ac37e0e8b25b0c6), + C(3e6074b52ad3cf18), C(aa491ce7b45db297), C(f7a9227c5e5e22c3), + C(3d92e0841e29ce28), C(2d30da5b2859e59d), C(ff37fa1c9cbfafc2), + C(88a474a7)}, + {C(d3a2efec0f047e9), C(1cabce58853e58ea), C(7a17b2eae3256be4), + C(c2dcc9758c910171), C(cb5cddaeff4ddb40), C(5d7cc5869baefef1), + C(9644c5853af9cfeb), C(c2dcc9758c910171), C(cb5cddaeff4ddb40), + C(5d7cc5869baefef1), C(9644c5853af9cfeb), C(255c968184694ee1), + C(4e4d726eda360927), C(7d27dd5b6d100377), C(9a300e2020ddea2c), C(5b5bedd)}, + {C(43c64d7484f7f9b2), C(5da002b64aafaeb7), C(b576c1e45800a716), + C(3ee84d3d5b4ca00b), C(5cbc6d701894c3f9), C(d9e946f5ae1ca95), + C(24ca06e67f0b1833), C(3ee84d3d5b4ca00b), C(5cbc6d701894c3f9), + C(d9e946f5ae1ca95), C(24ca06e67f0b1833), C(3413d46b4152650e), + C(cbdfdbc2ab516f9c), C(2aad8acb739e0c6c), C(2bfc950d9f9fa977), + C(1aaddfa7)}, + {C(a7dec6ad81cf7fa1), C(180c1ab708683063), C(95e0fd7008d67cff), + C(6b11c5073687208), C(7e0a57de0d453f3), C(e48c267d4f646867), + C(2168e9136375f9cb), C(6b11c5073687208), C(7e0a57de0d453f3), + C(e48c267d4f646867), C(2168e9136375f9cb), C(64da194aeeea7fdf), + C(a3b9f01fa5885678), C(c316f8ee2eb2bd17), C(a7e4d80f83e4427f), + C(5be07fd8)}, + {C(5408a1df99d4aff), C(b9565e588740f6bd), C(abf241813b08006e), + C(7da9e81d89fda7ad), C(274157cabe71440d), C(2c22d9a480b331f7), + C(e835c8ac746472d5), C(7da9e81d89fda7ad), C(274157cabe71440d), + C(2c22d9a480b331f7), C(e835c8ac746472d5), C(2038ce817a201ae4), + C(46f3289dfe1c5e40), C(435578a42d4b7c56), C(f96d9f409fcf561), C(cbca8606)}, + {C(a8b27a6bcaeeed4b), C(aec1eeded6a87e39), C(9daf246d6fed8326), + C(d45a938b79f54e8f), C(366b219d6d133e48), C(5b14be3c25c49405), + C(fdd791d48811a572), C(d45a938b79f54e8f), C(366b219d6d133e48), + C(5b14be3c25c49405), C(fdd791d48811a572), C(3de67b8d9e95d335), + C(903c01307cfbeed5), C(af7d65f32274f1d1), C(4dba141b5fc03c42), + C(bde64d01)}, + {C(9a952a8246fdc269), C(d0dcfcac74ef278c), C(250f7139836f0f1f), + C(c83d3c5f4e5f0320), C(694e7adeb2bf32e5), C(7ad09538a3da27f5), + C(2b5c18f934aa5303), C(c83d3c5f4e5f0320), C(694e7adeb2bf32e5), + C(7ad09538a3da27f5), C(2b5c18f934aa5303), C(c4dad7703d34326e), + C(825569e2bcdc6a25), C(b83d267709ca900d), C(44ed05151f5d74e6), + C(ee90cf33)}, + {C(c930841d1d88684f), C(5eb66eb18b7f9672), C(e455d413008a2546), + C(bc271bc0df14d647), C(b071100a9ff2edbb), C(2b1a4c1cc31a119a), + C(b5d7caa1bd946cef), C(bc271bc0df14d647), C(b071100a9ff2edbb), + C(2b1a4c1cc31a119a), C(b5d7caa1bd946cef), C(e02623ae10f4aadd), + C(d79f600389cd06fd), C(1e8da7965303e62b), C(86f50e10eeab0925), + C(4305c3ce)}, + {C(94dc6971e3cf071a), C(994c7003b73b2b34), C(ea16e85978694e5), + C(336c1b59a1fc19f6), C(c173acaecc471305), C(db1267d24f3f3f36), + C(e9a5ee98627a6e78), C(336c1b59a1fc19f6), C(c173acaecc471305), + C(db1267d24f3f3f36), C(e9a5ee98627a6e78), C(718f334204305ae5), + C(e3b53c148f98d22c), C(a184012df848926), C(6e96386127d51183), C(4b3a1d76)}, + {C(7fc98006e25cac9), C(77fee0484cda86a7), C(376ec3d447060456), + C(84064a6dcf916340), C(fbf55a26790e0ebb), C(2e7f84151c31a5c2), + C(9f7f6d76b950f9bf), C(84064a6dcf916340), C(fbf55a26790e0ebb), + C(2e7f84151c31a5c2), C(9f7f6d76b950f9bf), C(125e094fbee2b146), + C(5706aa72b2eef7c2), C(1c4a2daa905ee66e), C(83d48029b5451694), + C(a8bb6d80)}, + {C(bd781c4454103f6), C(612197322f49c931), C(b9cf17fd7e5462d5), + C(e38e526cd3324364), C(85f2b63a5b5e840a), C(485d7cef5aaadd87), + C(d2b837a462f6db6d), C(e38e526cd3324364), C(85f2b63a5b5e840a), + C(485d7cef5aaadd87), C(d2b837a462f6db6d), C(3e41cef031520d9a), + C(82df73902d7f67e), C(3ba6fd54c15257cb), C(22f91f079be42d40), C(1f9fa607)}, + {C(da60e6b14479f9df), C(3bdccf69ece16792), C(18ebf45c4fecfdc9), + C(16818ee9d38c6664), C(5519fa9a1e35a329), C(cbd0001e4b08ed8), + C(41a965e37a0c731b), C(16818ee9d38c6664), C(5519fa9a1e35a329), + C(cbd0001e4b08ed8), C(41a965e37a0c731b), C(66e7b5dcca1ca28f), + C(963b2d993614347d), C(9b6fc6f41d411106), C(aaaecaccf7848c0c), + C(8d0e4ed2)}, + {C(4ca56a348b6c4d3), C(60618537c3872514), C(2fbb9f0e65871b09), + C(30278016830ddd43), C(f046646d9012e074), C(c62a5804f6e7c9da), + C(98d51f5830e2bc1e), C(30278016830ddd43), C(f046646d9012e074), + C(c62a5804f6e7c9da), C(98d51f5830e2bc1e), C(7b2cbe5d37e3f29e), + C(7b8c3ed50bda4aa0), C(3ea60cc24639e038), C(f7706de9fb0b5801), + C(1bf31347)}, + {C(ebd22d4b70946401), C(6863602bf7139017), C(c0b1ac4e11b00666), + C(7d2782b82bd494b6), C(97159ba1c26b304b), C(42b3b0fd431b2ac2), + C(faa81f82691c830c), C(7d2782b82bd494b6), C(97159ba1c26b304b), + C(42b3b0fd431b2ac2), C(faa81f82691c830c), C(7cc6449234c7e185), + C(aeaa6fa643ca86a5), C(1412db1c0f2e0133), C(4df2fe3e4072934f), + C(1ae3fc5b)}, + {C(3cc4693d6cbcb0c), C(501689ea1c70ffa), C(10a4353e9c89e364), + C(58c8aba7475e2d95), C(3e2f291698c9427a), C(e8710d19c9de9e41), + C(65dda22eb04cf953), C(58c8aba7475e2d95), C(3e2f291698c9427a), + C(e8710d19c9de9e41), C(65dda22eb04cf953), C(d7729c48c250cffa), + C(ef76162b2ddfba4b), C(52371e17f4d51f6d), C(ddd002112ff0c833), + C(459c3930)}, + {C(38908e43f7ba5ef0), C(1ab035d4e7781e76), C(41d133e8c0a68ff7), + C(d1090893afaab8bc), C(96c4fe6922772807), C(4522426c2b4205eb), + C(efad99a1262e7e0d), C(d1090893afaab8bc), C(96c4fe6922772807), + C(4522426c2b4205eb), C(efad99a1262e7e0d), C(c7696029abdb465e), + C(4e18eaf03d517651), C(d006bced54c86ac8), C(4330326d1021860c), + C(e00c4184)}, + {C(34983ccc6aa40205), C(21802cad34e72bc4), C(1943e8fb3c17bb8), + C(fc947167f69c0da5), C(ae79cfdb91b6f6c1), C(7b251d04c26cbda3), + C(128a33a79060d25e), C(fc947167f69c0da5), C(ae79cfdb91b6f6c1), + C(7b251d04c26cbda3), C(128a33a79060d25e), C(1eca842dbfe018dd), + C(50a4cd2ee0ba9c63), C(c2f5c97d8399682f), C(3f929fc7cbe8ecbb), + C(ffc7a781)}, + {C(86215c45dcac9905), C(ea546afe851cae4b), C(d85b6457e489e374), + C(b7609c8e70386d66), C(36e6ccc278d1636d), C(2f873307c08e6a1c), + C(10f252a758505289), C(b7609c8e70386d66), C(36e6ccc278d1636d), + C(2f873307c08e6a1c), C(10f252a758505289), C(c8977646e81ab4b6), + C(8017b745cd80213b), C(960687db359bea0), C(ef4a470660799488), C(6a125480)}, + {C(420fc255c38db175), C(d503cd0f3c1208d1), C(d4684e74c825a0bc), + C(4c10537443152f3d), C(720451d3c895e25d), C(aff60c4d11f513fd), + C(881e8d6d2d5fb953), C(4c10537443152f3d), C(720451d3c895e25d), + C(aff60c4d11f513fd), C(881e8d6d2d5fb953), C(9dec034a043f1f55), + C(e27a0c22e7bfb39d), C(2220b959128324), C(53240272152dbd8b), C(88a1512b)}, + {C(1d7a31f5bc8fe2f9), C(4763991092dcf836), C(ed695f55b97416f4), + C(f265edb0c1c411d7), C(30e1e9ec5262b7e6), C(c2c3ba061ce7957a), + C(d975f93b89a16409), C(f265edb0c1c411d7), C(30e1e9ec5262b7e6), + C(c2c3ba061ce7957a), C(d975f93b89a16409), C(e9d703123f43450a), + C(41383fedfed67c82), C(6e9f43ecbbbd6004), C(c7ccd23a24e77b8), C(549bbbe5)}, + {C(94129a84c376a26e), C(c245e859dc231933), C(1b8f74fecf917453), + C(e9369d2e9007e74b), C(b1375915d1136052), C(926c2021fe1d2351), + C(1d943addaaa2e7e6), C(e9369d2e9007e74b), C(b1375915d1136052), + C(926c2021fe1d2351), C(1d943addaaa2e7e6), C(f5f515869c246738), + C(7e309cd0e1c0f2a0), C(153c3c36cf523e3b), C(4931c66872ea6758), + C(c133d38c)}, + {C(1d3a9809dab05c8d), C(adddeb4f71c93e8), C(ef342eb36631edb), + C(301d7a61c4b3dbca), C(861336c3f0552d61), C(12c6db947471300f), + C(a679ef0ed761deb9), C(301d7a61c4b3dbca), C(861336c3f0552d61), + C(12c6db947471300f), C(a679ef0ed761deb9), C(5f713b720efcd147), + C(37ac330a333aa6b), C(3309dc9ec1616eef), C(52301d7a908026b5), C(fcace348)}, + {C(90fa3ccbd60848da), C(dfa6e0595b569e11), C(e585d067a1f5135d), + C(6cef866ec295abea), C(c486c0d9214beb2d), C(d6e490944d5fe100), + C(59df3175d72c9f38), C(6cef866ec295abea), C(c486c0d9214beb2d), + C(d6e490944d5fe100), C(59df3175d72c9f38), C(3f23aeb4c04d1443), + C(9bf0515cd8d24770), C(958554f60ccaade2), C(5182863c90132fe8), + C(ed7b6f9a)}, + {C(2dbb4fc71b554514), C(9650e04b86be0f82), C(60f2304fba9274d3), + C(fcfb9443e997cab), C(f13310d96dec2772), C(709cad2045251af2), + C(afd0d30cc6376dad), C(fcfb9443e997cab), C(f13310d96dec2772), + C(709cad2045251af2), C(afd0d30cc6376dad), C(59d4bed30d550d0d), + C(58006d4e22d8aad1), C(eee12d2362d1f13b), C(35cf1d7faaf1d228), + C(6d907dda)}, + {C(b98bf4274d18374a), C(1b669fd4c7f9a19a), C(b1f5972b88ba2b7a), + C(73119c99e6d508be), C(5d4036a187735385), C(8fa66e192fd83831), + C(2abf64b6b592ed57), C(73119c99e6d508be), C(5d4036a187735385), + C(8fa66e192fd83831), C(2abf64b6b592ed57), C(d4501f95dd84b08c), + C(bf1552439c8bea02), C(4f56fe753ba7e0ba), C(4ca8d35cc058cfcd), + C(7a4d48d5)}, + {C(d6781d0b5e18eb68), C(b992913cae09b533), C(58f6021caaee3a40), + C(aafcb77497b5a20b), C(411819e5e79b77a3), C(bd779579c51c77ce), + C(58d11f5dcf5d075d), C(aafcb77497b5a20b), C(411819e5e79b77a3), + C(bd779579c51c77ce), C(58d11f5dcf5d075d), C(9eae76cde1cb4233), + C(32fe25a9bf657970), C(1c0c807948edb06a), C(b8f29a3dfaee254d), + C(e686f3db)}, + {C(226651cf18f4884c), C(595052a874f0f51c), C(c9b75162b23bab42), + C(3f44f873be4812ec), C(427662c1dbfaa7b2), C(a207ff9638fb6558), + C(a738d919e45f550f), C(3f44f873be4812ec), C(427662c1dbfaa7b2), + C(a207ff9638fb6558), C(a738d919e45f550f), C(cb186ea05717e7d6), + C(1ca7d68a5871fdc1), C(5d4c119ea8ef3750), C(72b6a10fa2ff9406), C(cce7c55)}, + {C(a734fb047d3162d6), C(e523170d240ba3a5), C(125a6972809730e8), + C(d396a297799c24a1), C(8fee992e3069bad5), C(2e3a01b0697ccf57), + C(ee9c7390bd901cfa), C(d396a297799c24a1), C(8fee992e3069bad5), + C(2e3a01b0697ccf57), C(ee9c7390bd901cfa), C(56f2d9da0af28af2), + C(3fdd37b2fe8437cb), C(3d13eeeb60d6aec0), C(2432ae62e800a5ce), C(f58b96b)}, + {C(c6df6364a24f75a3), C(c294e2c84c4f5df8), C(a88df65c6a89313b), + C(895fe8443183da74), C(c7f2f6f895a67334), C(a0d6b6a506691d31), + C(24f51712b459a9f0), C(895fe8443183da74), C(c7f2f6f895a67334), + C(a0d6b6a506691d31), C(24f51712b459a9f0), C(173a699481b9e088), + C(1dee9b77bcbf45d3), C(32b98a646a8667d0), C(3adcd4ee28f42a0e), + C(1bbf6f60)}, + {C(d8d1364c1fbcd10), C(2d7cc7f54832deaa), C(4e22c876a7c57625), + C(a3d5d1137d30c4bd), C(1e7d706a49bdfb9e), C(c63282b20ad86db2), + C(aec97fa07916bfd6), C(a3d5d1137d30c4bd), C(1e7d706a49bdfb9e), + C(c63282b20ad86db2), C(aec97fa07916bfd6), C(7c9ba3e52d44f73e), + C(af62fd245811185d), C(8a9d2dacd8737652), C(bd2cce277d5fbec0), + C(ce5e0cc2)}, + {C(aae06f9146db885f), C(3598736441e280d9), C(fba339b117083e55), + C(b22bf08d9f8aecf7), C(c182730de337b922), C(2b9adc87a0450a46), + C(192c29a9cfc00aad), C(b22bf08d9f8aecf7), C(c182730de337b922), + C(2b9adc87a0450a46), C(192c29a9cfc00aad), C(9fd733f1d84a59d9), + C(d86bd5c9839ace15), C(af20b57303172876), C(9f63cb7161b5364c), + C(584cfd6f)}, + {C(8955ef07631e3bcc), C(7d70965ea3926f83), C(39aed4134f8b2db6), + C(882efc2561715a9c), C(ef8132a18a540221), C(b20a3c87a8c257c1), + C(f541b8628fad6c23), C(882efc2561715a9c), C(ef8132a18a540221), + C(b20a3c87a8c257c1), C(f541b8628fad6c23), C(9552aed57a6e0467), + C(4d9fdd56867611a7), C(c330279bf23b9eab), C(44dbbaea2fcb8eba), + C(8f9bbc33)}, + {C(ad611c609cfbe412), C(d3c00b18bf253877), C(90b2172e1f3d0bfd), + C(371a98b2cb084883), C(33a2886ee9f00663), C(be9568818ed6e6bd), + C(f244a0fa2673469a), C(371a98b2cb084883), C(33a2886ee9f00663), + C(be9568818ed6e6bd), C(f244a0fa2673469a), C(b447050bd3e559e9), + C(d3b695dae7a13383), C(ded0bb65be471188), C(ca3c7a2b78922cae), + C(d7640d95)}, + {C(d5339adc295d5d69), C(b633cc1dcb8b586a), C(ee84184cf5b1aeaf), + C(89f3aab99afbd636), C(f420e004f8148b9a), C(6818073faa797c7c), + C(dd3b4e21cbbf42ca), C(89f3aab99afbd636), C(f420e004f8148b9a), + C(6818073faa797c7c), C(dd3b4e21cbbf42ca), C(6a2b7db261164844), + C(cbead63d1895852a), C(93d37e1eae05e2f9), C(5d06db2703fbc3ae), C(3d12a2b)}, + {C(40d0aeff521375a8), C(77ba1ad7ecebd506), C(547c6f1a7d9df427), + C(21c2be098327f49b), C(7e035065ac7bbef5), C(6d7348e63023fb35), + C(9d427dc1b67c3830), C(21c2be098327f49b), C(7e035065ac7bbef5), + C(6d7348e63023fb35), C(9d427dc1b67c3830), C(4e3d018a43858341), + C(cf924bb44d6b43c5), C(4618b6a26e3446ae), C(54d3013fac3ed469), + C(aaeafed0)}, + {C(8b2d54ae1a3df769), C(11e7adaee3216679), C(3483781efc563e03), + C(9d097dd3152ab107), C(51e21d24126e8563), C(cba56cac884a1354), + C(39abb1b595f0a977), C(9d097dd3152ab107), C(51e21d24126e8563), + C(cba56cac884a1354), C(39abb1b595f0a977), C(81e6dd1c1109848f), + C(1644b209826d7b15), C(6ac67e4e4b4812f0), C(b3a9f5622c935bf7), + C(95b9b814)}, + {C(99c175819b4eae28), C(932e8ff9f7a40043), C(ec78dcab07ca9f7c), + C(c1a78b82ba815b74), C(458cbdfc82eb322a), C(17f4a192376ed8d7), + C(6f9e92968bc8ccef), C(c1a78b82ba815b74), C(458cbdfc82eb322a), + C(17f4a192376ed8d7), C(6f9e92968bc8ccef), C(93e098c333b39905), + C(d59b1cace44b7fdc), C(f7a64ed78c64c7c5), C(7c6eca5dd87ec1ce), + C(45fbe66e)}, + {C(2a418335779b82fc), C(af0295987849a76b), C(c12bc5ff0213f46e), + C(5aeead8d6cb25bb9), C(739315f7743ec3ff), C(9ab48d27111d2dcc), + C(5b87bd35a975929b), C(5aeead8d6cb25bb9), C(739315f7743ec3ff), + C(9ab48d27111d2dcc), C(5b87bd35a975929b), C(c3dd8d6d95a46bb3), + C(7bf9093215a4f483), C(cb557d6ed84285bd), C(daf58422f261fdb5), + C(b4baa7a8)}, + {C(3b1fc6a3d279e67d), C(70ea1e49c226396), C(25505adcf104697c), + C(ba1ffba29f0367aa), C(a20bec1dd15a8b6c), C(e9bf61d2dab0f774), + C(f4f35bf5870a049c), C(ba1ffba29f0367aa), C(a20bec1dd15a8b6c), + C(e9bf61d2dab0f774), C(f4f35bf5870a049c), C(26787efa5b92385), + C(3d9533590ce30b59), C(a4da3e40530a01d4), C(6395deaefb70067c), + C(83e962fe)}, + {C(d97eacdf10f1c3c9), C(b54f4654043a36e0), C(b128f6eb09d1234), + C(d8ad7ec84a9c9aa2), C(e256cffed11f69e6), C(2cf65e4958ad5bda), + C(cfbf9b03245989a7), C(d8ad7ec84a9c9aa2), C(e256cffed11f69e6), + C(2cf65e4958ad5bda), C(cfbf9b03245989a7), C(9fa51e6686cf4444), + C(9425c117a34609d5), C(b25f7e2c6f30e96), C(ea5477c3f2b5afd1), C(aac3531c)}, + {C(293a5c1c4e203cd4), C(6b3329f1c130cefe), C(f2e32f8ec76aac91), + C(361e0a62c8187bff), C(6089971bb84d7133), C(93df7741588dd50b), + C(c2a9b6abcd1d80b1), C(361e0a62c8187bff), C(6089971bb84d7133), + C(93df7741588dd50b), C(c2a9b6abcd1d80b1), C(4d2f86869d79bc59), + C(85cd24d8aa570ff), C(b0dcf6ef0e94bbb5), C(2037c69aa7a78421), C(2b1db7cc)}, + {C(4290e018ffaedde7), C(a14948545418eb5e), C(72d851b202284636), + C(4ec02f3d2f2b23f2), C(ab3580708aa7c339), C(cdce066fbab3f65), + C(d8ed3ecf3c7647b9), C(4ec02f3d2f2b23f2), C(ab3580708aa7c339), + C(cdce066fbab3f65), C(d8ed3ecf3c7647b9), C(6d2204b3e31f344a), + C(61a4d87f80ee61d7), C(446c43dbed4b728f), C(73130ac94f58747e), + C(cf00cd31)}, + {C(f919a59cbde8bf2f), C(a56d04203b2dc5a5), C(38b06753ac871e48), + C(c2c9fc637dbdfcfa), C(292ab8306d149d75), C(7f436b874b9ffc07), + C(a5b56b0129218b80), C(c2c9fc637dbdfcfa), C(292ab8306d149d75), + C(7f436b874b9ffc07), C(a5b56b0129218b80), C(9188f7bdc47ec050), + C(cfe9345d03a15ade), C(40b520fb2750c49e), C(c2e83d343968af2e), + C(7d3c43b8)}, + {C(1d70a3f5521d7fa4), C(fb97b3fdc5891965), C(299d49bbbe3535af), + C(e1a8286a7d67946e), C(52bd956f047b298), C(cbd74332dd4204ac), + C(12b5be7752721976), C(e1a8286a7d67946e), C(52bd956f047b298), + C(cbd74332dd4204ac), C(12b5be7752721976), C(278426e27f6204b6), + C(932ca7a7cd610181), C(41647321f0a5914d), C(48f4aa61a0ae80db), + C(cbd5fac6)}, + {C(6af98d7b656d0d7c), C(d2e99ae96d6b5c0c), C(f63bd1603ef80627), + C(bde51033ac0413f8), C(bc0272f691aec629), C(6204332651bebc44), + C(1cbf00de026ea9bd), C(bde51033ac0413f8), C(bc0272f691aec629), + C(6204332651bebc44), C(1cbf00de026ea9bd), C(b9c7ed6a75f3ff1e), + C(7e310b76a5808e4f), C(acbbd1aad5531885), C(fc245f2473adeb9c), + C(76d0fec4)}, + {C(395b7a8adb96ab75), C(582df7165b20f4a), C(e52bd30e9ff657f9), + C(6c71064996cbec8b), C(352c535edeefcb89), C(ac7f0aba15cd5ecd), + C(3aba1ca8353e5c60), C(6c71064996cbec8b), C(352c535edeefcb89), + C(ac7f0aba15cd5ecd), C(3aba1ca8353e5c60), C(5c30a288a80ce646), + C(c2940488b6617674), C(925f8cc66b370575), C(aa65d1283b9bb0ef), + C(405e3402)}, + {C(3822dd82c7df012f), C(b9029b40bd9f122b), C(fd25b988468266c4), + C(43e47bd5bab1e0ef), C(4a71f363421f282f), C(880b2f32a2b4e289), + C(1299d4eda9d3eadf), C(43e47bd5bab1e0ef), C(4a71f363421f282f), + C(880b2f32a2b4e289), C(1299d4eda9d3eadf), C(d713a40226f5564), + C(4d8d34fedc769406), C(a85001b29cd9cac3), C(cae92352a41fd2b0), + C(c732c481)}, + {C(79f7efe4a80b951a), C(dd3a3fddfc6c9c41), C(ab4c812f9e27aa40), + C(832954ec9d0de333), C(94c390aa9bcb6b8a), C(f3b32afdc1f04f82), + C(d229c3b72e4b9a74), C(832954ec9d0de333), C(94c390aa9bcb6b8a), + C(f3b32afdc1f04f82), C(d229c3b72e4b9a74), C(1d11860d7ed624a6), + C(cadee20b3441b984), C(75307079bf306f7b), C(87902aa3b9753ba4), + C(a8d123c9)}, + {C(ae6e59f5f055921a), C(e9d9b7bf68e82), C(5ce4e4a5b269cc59), + C(4960111789727567), C(149b8a37c7125ab6), C(78c7a13ab9749382), + C(1c61131260ca151a), C(4960111789727567), C(149b8a37c7125ab6), + C(78c7a13ab9749382), C(1c61131260ca151a), C(1e93276b35c309a0), + C(2618f56230acde58), C(af61130a18e4febf), C(7145deb18e89befe), + C(1e80ad7d)}, + {C(8959dbbf07387d36), C(b4658afce48ea35d), C(8f3f82437d8cb8d6), + C(6566d74954986ba5), C(99d5235cc82519a7), C(257a23805c2d825), + C(ad75ccb968e93403), C(6566d74954986ba5), C(99d5235cc82519a7), + C(257a23805c2d825), C(ad75ccb968e93403), C(b45bd4cf78e11f7f), + C(80c5536bdc487983), C(a4fd76ecbf018c8a), C(3b9dac78a7a70d43), + C(52aeb863)}, + {C(4739613234278a49), C(99ea5bcd340bf663), C(258640912e712b12), + C(c8a2827404991402), C(7ee5e78550f02675), C(2ec53952db5ac662), + C(1526405a9df6794b), C(c8a2827404991402), C(7ee5e78550f02675), + C(2ec53952db5ac662), C(1526405a9df6794b), C(eddc6271170c5e1f), + C(f5a85f986001d9d6), C(95427c677bf58d58), C(53ed666dfa85cb29), + C(ef7c0c18)}, + {C(420e6c926bc54841), C(96dbbf6f4e7c75cd), C(d8d40fa70c3c67bb), + C(3edbc10e4bfee91b), C(f0d681304c28ef68), C(77ea602029aaaf9c), + C(90f070bd24c8483c), C(3edbc10e4bfee91b), C(f0d681304c28ef68), + C(77ea602029aaaf9c), C(90f070bd24c8483c), C(28bc8e41e08ceb86), + C(1eb56e48a65691ef), C(9fea5301c9202f0e), C(3fcb65091aa9f135), + C(b6ad4b68)}, + {C(c8601bab561bc1b7), C(72b26272a0ff869a), C(56fdfc986d6bc3c4), + C(83707730cad725d4), C(c9ca88c3a779674a), C(e1c696fbbd9aa933), + C(723f3baab1c17a45), C(83707730cad725d4), C(c9ca88c3a779674a), + C(e1c696fbbd9aa933), C(723f3baab1c17a45), C(f82abc7a1d851682), + C(30683836818e857d), C(78bfa3e89a5ab23f), C(6928234482b31817), + C(c1e46b17)}, + {C(b2d294931a0e20eb), C(284ffd9a0815bc38), C(1f8a103aac9bbe6), + C(1ef8e98e1ea57269), C(5971116272f45a8b), C(187ad68ce95d8eac), + C(e94e93ee4e8ecaa6), C(1ef8e98e1ea57269), C(5971116272f45a8b), + C(187ad68ce95d8eac), C(e94e93ee4e8ecaa6), C(a0ff2a58611838b5), + C(b01e03849bfbae6f), C(d081e202e28ea3ab), C(51836bcee762bf13), + C(57b8df25)}, + {C(7966f53c37b6c6d7), C(8e6abcfb3aa2b88f), C(7f2e5e0724e5f345), + C(3eeb60c3f5f8143d), C(a25aec05c422a24f), C(b026b03ad3cca4db), + C(e6e030028cc02a02), C(3eeb60c3f5f8143d), C(a25aec05c422a24f), + C(b026b03ad3cca4db), C(e6e030028cc02a02), C(16fe679338b34bfc), + C(c1be385b5c8a9de4), C(65af5df6567530eb), C(ed3b303df4dc6335), + C(e9fa36d6)}, + {C(be9bb0abd03b7368), C(13bca93a3031be55), C(e864f4f52b55b472), + C(36a8d13a2cbb0939), C(254ac73907413230), C(73520d1522315a70), + C(8c9fdb5cf1e1a507), C(36a8d13a2cbb0939), C(254ac73907413230), + C(73520d1522315a70), C(8c9fdb5cf1e1a507), C(b3640570b926886), + C(fba2344ee87f7bab), C(de57341ab448df05), C(385612ee094fa977), + C(8f8daefc)}, + {C(a08d128c5f1649be), C(a8166c3dbbe19aad), C(cb9f914f829ec62c), + C(5b2b7ca856fad1c3), C(8093022d682e375d), C(ea5d163ba7ea231f), + C(d6181d012c0de641), C(5b2b7ca856fad1c3), C(8093022d682e375d), + C(ea5d163ba7ea231f), C(d6181d012c0de641), C(e7d40d0ab8b08159), + C(2e82320f51b3a67e), C(27c2e356ea0b63a3), C(58842d01a2b1d077), C(6e1bb7e)}, + {C(7c386f0ffe0465ac), C(530419c9d843dbf3), C(7450e3a4f72b8d8c), + C(48b218e3b721810d), C(d3757ac8609bc7fc), C(111ba02a88aefc8), + C(e86343137d3bfc2a), C(48b218e3b721810d), C(d3757ac8609bc7fc), + C(111ba02a88aefc8), C(e86343137d3bfc2a), C(44ad26b51661b507), + C(db1268670274f51e), C(62a5e75beae875f3), C(e266e7a44c5f28c6), + C(fd0076f0)}, + {C(bb362094e7ef4f8), C(ff3c2a48966f9725), C(55152803acd4a7fe), + C(15747d8c505ffd00), C(438a15f391312cd6), C(e46ca62c26d821f5), + C(be78d74c9f79cb44), C(15747d8c505ffd00), C(438a15f391312cd6), + C(e46ca62c26d821f5), C(be78d74c9f79cb44), C(a8aa19f3aa59f09a), + C(effb3cddab2c9267), C(d78e41ad97cb16a5), C(ace6821513527d32), + C(899b17b6)}, + {C(cd80dea24321eea4), C(52b4fdc8130c2b15), C(f3ea100b154bfb82), + C(d9ccef1d4be46988), C(5ede0c4e383a5e66), C(da69683716a54d1e), + C(bfc3fdf02d242d24), C(d9ccef1d4be46988), C(5ede0c4e383a5e66), + C(da69683716a54d1e), C(bfc3fdf02d242d24), C(20ed30274651b3f5), + C(4c659824169e86c6), C(637226dae5b52a0e), C(7e050dbd1c71dc7f), + C(e3e84e31)}, + {C(d599a04125372c3a), C(313136c56a56f363), C(1e993c3677625832), + C(2870a99c76a587a4), C(99f74cc0b182dda4), C(8a5e895b2f0ca7b6), + C(3d78882d5e0bb1dc), C(2870a99c76a587a4), C(99f74cc0b182dda4), + C(8a5e895b2f0ca7b6), C(3d78882d5e0bb1dc), C(f466123732a3e25e), + C(aca5e59716a40e50), C(261d2e7383d0e686), C(ce9362d6a42c15a7), + C(eef79b6b)}, + {C(dbbf541e9dfda0a), C(1479fceb6db4f844), C(31ab576b59062534), + C(a3335c417687cf3a), C(92ff114ac45cda75), C(c3b8a627384f13b5), + C(c4f25de33de8b3f7), C(a3335c417687cf3a), C(92ff114ac45cda75), + C(c3b8a627384f13b5), C(c4f25de33de8b3f7), C(eacbf520578c5964), + C(4cb19c5ab24f3215), C(e7d8a6f67f0c6e7), C(325c2413eb770ada), C(868e3315)}, + {C(c2ee3288be4fe2bf), C(c65d2f5ddf32b92), C(af6ecdf121ba5485), + C(c7cd48f7abf1fe59), C(ce600656ace6f53a), C(8a94a4381b108b34), + C(f9d1276c64bf59fb), C(c7cd48f7abf1fe59), C(ce600656ace6f53a), + C(8a94a4381b108b34), C(f9d1276c64bf59fb), C(219ce70ff5a112a5), + C(e6026c576e2d28d7), C(b8e467f25015e3a6), C(950cb904f37af710), + C(4639a426)}, + {C(d86603ced1ed4730), C(f9de718aaada7709), C(db8b9755194c6535), + C(d803e1eead47604c), C(ad00f7611970a71b), C(bc50036b16ce71f5), + C(afba96210a2ca7d6), C(d803e1eead47604c), C(ad00f7611970a71b), + C(bc50036b16ce71f5), C(afba96210a2ca7d6), C(28f7a7be1d6765f0), + C(97bd888b93938c68), C(6ad41d1b407ded49), C(b9bfec098dc543e4), + C(f3213646)}, + {C(915263c671b28809), C(a815378e7ad762fd), C(abec6dc9b669f559), + C(d17c928c5342477f), C(745130b795254ad5), C(8c5db926fe88f8ba), + C(742a95c953e6d974), C(d17c928c5342477f), C(745130b795254ad5), + C(8c5db926fe88f8ba), C(742a95c953e6d974), C(279db8057b5d3e96), + C(98168411565b4ec4), C(50a72c54fa1125fa), C(27766a635db73638), + C(17f148e9)}, + {C(2b67cdd38c307a5e), C(cb1d45bb5c9fe1c), C(800baf2a02ec18ad), + C(6531c1fe32bcb417), C(8c970d8df8cdbeb4), C(917ba5fc67e72b40), + C(4b65e4e263e0a426), C(6531c1fe32bcb417), C(8c970d8df8cdbeb4), + C(917ba5fc67e72b40), C(4b65e4e263e0a426), C(e0de33ce88a8b3a9), + C(f8ef98a437e16b08), C(a5162c0c7c5f7b62), C(dbdac43361b2b881), + C(bfd94880)}, + {C(2d107419073b9cd0), C(a96db0740cef8f54), C(ec41ee91b3ecdc1b), + C(ffe319654c8e7ebc), C(6a67b8f13ead5a72), C(6dd10a34f80d532f), + C(6e9cfaece9fbca4), C(ffe319654c8e7ebc), C(6a67b8f13ead5a72), + C(6dd10a34f80d532f), C(6e9cfaece9fbca4), C(b4468eb6a30aa7e9), + C(e87995bee483222a), C(d036c2c90c609391), C(853306e82fa32247), + C(bb1fa7f3)}, + {C(f3e9487ec0e26dfc), C(1ab1f63224e837fa), C(119983bb5a8125d8), + C(8950cfcf4bdf622c), C(8847dca82efeef2f), C(646b75b026708169), + C(21cab4b1687bd8b), C(8950cfcf4bdf622c), C(8847dca82efeef2f), + C(646b75b026708169), C(21cab4b1687bd8b), C(243b489a9eae6231), + C(5f3e634c4b779876), C(ff8abd1548eaf646), C(c7962f5f0151914b), C(88816b1)}, + {C(1160987c8fe86f7d), C(879e6db1481eb91b), C(d7dcb802bfe6885d), + C(14453b5cc3d82396), C(4ef700c33ed278bc), C(1639c72ffc00d12e), + C(fb140ee6155f700d), C(14453b5cc3d82396), C(4ef700c33ed278bc), + C(1639c72ffc00d12e), C(fb140ee6155f700d), C(2e6b5c96a6620862), + C(a1f136998cbe19c), C(74e058a3b6c5a712), C(93dcf6bd33928b17), C(5c2faeb3)}, + {C(eab8112c560b967b), C(97f550b58e89dbae), C(846ed506d304051f), + C(276aa37744b5a028), C(8c10800ee90ea573), C(e6e57d2b33a1e0b7), + C(91f83563cd3b9dda), C(276aa37744b5a028), C(8c10800ee90ea573), + C(e6e57d2b33a1e0b7), C(91f83563cd3b9dda), C(afbb4739570738a1), + C(440ba98da5d8f69), C(fde4e9b0eda20350), C(e67dfa5a2138fa1), C(51b5fc6f)}, + {C(1addcf0386d35351), C(b5f436561f8f1484), C(85d38e22181c9bb1), + C(ff5c03f003c1fefe), C(e1098670afe7ff6), C(ea445030cf86de19), + C(f155c68b5c2967f8), C(ff5c03f003c1fefe), C(e1098670afe7ff6), + C(ea445030cf86de19), C(f155c68b5c2967f8), C(95d31b145dbb2e9e), + C(914fe1ca3deb3265), C(6066020b1358ccc1), C(c74bb7e2dee15036), + C(33d94752)}, + {C(d445ba84bf803e09), C(1216c2497038f804), C(2293216ea2237207), + C(e2164451c651adfb), C(b2534e65477f9823), C(4d70691a69671e34), + C(15be4963dbde8143), C(e2164451c651adfb), C(b2534e65477f9823), + C(4d70691a69671e34), C(15be4963dbde8143), C(762e75c406c5e9a3), + C(7b7579f7e0356841), C(480533eb066dfce5), C(90ae14ea6bfeb4ae), + C(b0c92948)}, + {C(37235a096a8be435), C(d9b73130493589c2), C(3b1024f59378d3be), + C(ad159f542d81f04e), C(49626a97a946096), C(d8d3998bf09fd304), + C(d127a411eae69459), C(ad159f542d81f04e), C(49626a97a946096), + C(d8d3998bf09fd304), C(d127a411eae69459), C(8f3253c4eb785a7b), + C(4049062f37e62397), C(b9fa04d3b670e5c1), C(1211a7967ac9350f), + C(c7171590)}, + {C(763ad6ea2fe1c99d), C(cf7af5368ac1e26b), C(4d5e451b3bb8d3d4), + C(3712eb913d04e2f2), C(2f9500d319c84d89), C(4ac6eb21a8cf06f9), + C(7d1917afcde42744), C(3712eb913d04e2f2), C(2f9500d319c84d89), + C(4ac6eb21a8cf06f9), C(7d1917afcde42744), C(6b58604b5dd10903), + C(c4288dfbc1e319fc), C(230f75ca96817c6e), C(8894cba3b763756c), + C(240a67fb)}, + {C(ea627fc84cd1b857), C(85e372494520071f), C(69ec61800845780b), + C(a3c1c5ca1b0367), C(eb6933997272bb3d), C(76a72cb62692a655), + C(140bb5531edf756e), C(a3c1c5ca1b0367), C(eb6933997272bb3d), + C(76a72cb62692a655), C(140bb5531edf756e), C(8d0d8067d1c925f4), + C(7b3fa56d8d77a10c), C(2bd00287b0946d88), C(f08c8e4bd65b8970), + C(e1843cd5)}, + {C(1f2ffd79f2cdc0c8), C(726a1bc31b337aaa), C(678b7f275ef96434), + C(5aa82bfaa99d3978), C(c18f96cade5ce18d), C(38404491f9e34c03), + C(891fb8926ba0418c), C(5aa82bfaa99d3978), C(c18f96cade5ce18d), + C(38404491f9e34c03), C(891fb8926ba0418c), C(e5f69a6398114c15), + C(7b8ded3623bc6b1d), C(2f3e5c5da5ff70e8), C(1ab142addea6a9ec), + C(fda1452b)}, + {C(39a9e146ec4b3210), C(f63f75802a78b1ac), C(e2e22539c94741c3), + C(8b305d532e61226e), C(caeae80da2ea2e), C(88a6289a76ac684e), + C(8ce5b5f9df1cbd85), C(8b305d532e61226e), C(caeae80da2ea2e), + C(88a6289a76ac684e), C(8ce5b5f9df1cbd85), C(8ae1fc4798e00d57), + C(e7164b8fb364fc46), C(6a978c9bd3a66943), C(ef10d5ae4dd08dc), C(a2cad330)}, + {C(74cba303e2dd9d6d), C(692699b83289fad1), C(dfb9aa7874678480), + C(751390a8a5c41bdc), C(6ee5fbf87605d34), C(6ca73f610f3a8f7c), + C(e898b3c996570ad), C(751390a8a5c41bdc), C(6ee5fbf87605d34), + C(6ca73f610f3a8f7c), C(e898b3c996570ad), C(98168a5858fc7110), + C(6f987fa27aa0daa2), C(f25e3e180d4b36a3), C(d0b03495aeb1be8a), + C(53467e16)}, + {C(4cbc2b73a43071e0), C(56c5db4c4ca4e0b7), C(1b275a162f46bd3d), + C(b87a326e413604bf), C(d8f9a5fa214b03ab), C(8a8bb8265771cf88), + C(a655319054f6e70f), C(b87a326e413604bf), C(d8f9a5fa214b03ab), + C(8a8bb8265771cf88), C(a655319054f6e70f), C(b499cb8e65a9af44), + C(bee7fafcc8307491), C(5d2e55fa9b27cda2), C(63b120f5fb2d6ee5), + C(da14a8d0)}, + {C(875638b9715d2221), C(d9ba0615c0c58740), C(616d4be2dfe825aa), + C(5df25f13ea7bc284), C(165edfaafd2598fb), C(af7215c5c718c696), + C(e9f2f9ca655e769), C(5df25f13ea7bc284), C(165edfaafd2598fb), + C(af7215c5c718c696), C(e9f2f9ca655e769), C(e459cfcb565d3d2d), + C(41d032631be2418a), C(c505db05fd946f60), C(54990394a714f5de), + C(67333551)}, + {C(fb686b2782994a8d), C(edee60693756bb48), C(e6bc3cae0ded2ef5), + C(58eb4d03b2c3ddf5), C(6d2542995f9189f1), C(c0beec58a5f5fea2), + C(ed67436f42e2a78b), C(58eb4d03b2c3ddf5), C(6d2542995f9189f1), + C(c0beec58a5f5fea2), C(ed67436f42e2a78b), C(dfec763cdb2b5193), + C(724a8d5345bd2d6), C(94d4fd1b81457c23), C(28e87c50cdede453), C(a0ebd66e)}, + {C(ab21d81a911e6723), C(4c31b07354852f59), C(835da384c9384744), + C(7f759dddc6e8549a), C(616dd0ca022c8735), C(94717ad4bc15ceb3), + C(f66c7be808ab36e), C(7f759dddc6e8549a), C(616dd0ca022c8735), + C(94717ad4bc15ceb3), C(f66c7be808ab36e), C(af8286b550b2f4b7), + C(745bd217d20a9f40), C(c73bfb9c5430f015), C(55e65922666e3fc2), + C(4b769593)}, + {C(33d013cc0cd46ecf), C(3de726423aea122c), C(116af51117fe21a9), + C(f271ba474edc562d), C(e6596e67f9dd3ebd), C(c0a288edf808f383), + C(b3def70681c6babc), C(f271ba474edc562d), C(e6596e67f9dd3ebd), + C(c0a288edf808f383), C(b3def70681c6babc), C(7da7864e9989b095), + C(bf2f8718693cd8a1), C(264a9144166da776), C(61ad90676870beb6), + C(6aa75624)}, + {C(8ca92c7cd39fae5d), C(317e620e1bf20f1), C(4f0b33bf2194b97f), + C(45744afcf131dbee), C(97222392c2559350), C(498a19b280c6d6ed), + C(83ac2c36acdb8d49), C(45744afcf131dbee), C(97222392c2559350), + C(498a19b280c6d6ed), C(83ac2c36acdb8d49), C(7a69645c294daa62), + C(abe9d2be8275b3d2), C(39542019de371085), C(7f4efac8488cd6ad), + C(602a3f96)}, + {C(fdde3b03f018f43e), C(38f932946c78660), C(c84084ce946851ee), + C(b6dd09ba7851c7af), C(570de4e1bb13b133), C(c4e784eb97211642), + C(8285a7fcdcc7c58d), C(b6dd09ba7851c7af), C(570de4e1bb13b133), + C(c4e784eb97211642), C(8285a7fcdcc7c58d), C(d421f47990da899b), + C(8aed409c997eaa13), C(7a045929c2e29ccf), C(b373682a6202c86b), + C(cd183c4d)}, + {C(9c8502050e9c9458), C(d6d2a1a69964beb9), C(1675766f480229b5), + C(216e1d6c86cb524c), C(d01cf6fd4f4065c0), C(fffa4ec5b482ea0f), + C(a0e20ee6a5404ac1), C(216e1d6c86cb524c), C(d01cf6fd4f4065c0), + C(fffa4ec5b482ea0f), C(a0e20ee6a5404ac1), C(c1b037e4eebaf85e), + C(634e3d7c3ebf89eb), C(bcda972358c67d1), C(fd1352181e5b8578), C(960a4d07)}, + {C(348176ca2fa2fdd2), C(3a89c514cc360c2d), C(9f90b8afb318d6d0), + C(bceee07c11a9ac30), C(2e2d47dff8e77eb7), C(11a394cd7b6d614a), + C(1d7c41d54e15cb4a), C(bceee07c11a9ac30), C(2e2d47dff8e77eb7), + C(11a394cd7b6d614a), C(1d7c41d54e15cb4a), C(15baa5ae7312b0fc), + C(f398f596cc984635), C(8ab8fdf87a6788e8), C(b2b5c1234ab47e2), C(9ae998c4)}, + {C(4a3d3dfbbaea130b), C(4e221c920f61ed01), C(553fd6cd1304531f), + C(bd2b31b5608143fe), C(ab717a10f2554853), C(293857f04d194d22), + C(d51be8fa86f254f0), C(bd2b31b5608143fe), C(ab717a10f2554853), + C(293857f04d194d22), C(d51be8fa86f254f0), C(1eee39e07686907e), + C(639039fe0e8d3052), C(d6ec1470cef97ff), C(370c82b860034f0f), C(74e2179d)}, + {C(b371f768cdf4edb9), C(bdef2ace6d2de0f0), C(e05b4100f7f1baec), + C(b9e0d415b4ebd534), C(c97c2a27efaa33d7), C(591cdb35f84ef9da), + C(a57d02d0e8e3756c), C(b9e0d415b4ebd534), C(c97c2a27efaa33d7), + C(591cdb35f84ef9da), C(a57d02d0e8e3756c), C(23f55f12d7c5c87b), + C(4c7ca0fe23221101), C(dbc3020480334564), C(d985992f32c236b1), + C(ee9bae25)}, + {C(7a1d2e96934f61f), C(eb1760ae6af7d961), C(887eb0da063005df), + C(2228d6725e31b8ab), C(9b98f7e4d0142e70), C(b6a8c2115b8e0fe7), + C(b591e2f5ab9b94b1), C(2228d6725e31b8ab), C(9b98f7e4d0142e70), + C(b6a8c2115b8e0fe7), C(b591e2f5ab9b94b1), C(6c1feaa8065318e0), + C(4e7e2ca21c2e81fb), C(e9fe5d8ce7993c45), C(ee411fa2f12cf8df), + C(b66edf10)}, + {C(8be53d466d4728f2), C(86a5ac8e0d416640), C(984aa464cdb5c8bb), + C(87049e68f5d38e59), C(7d8ce44ec6bd7751), C(cc28d08ab414839c), + C(6c8f0bd34fe843e3), C(87049e68f5d38e59), C(7d8ce44ec6bd7751), + C(cc28d08ab414839c), C(6c8f0bd34fe843e3), C(b8496dcdc01f3e47), + C(2f03125c282ac26), C(82a8797ba3f5ef07), C(7c977a4d10bf52b8), C(d6209737)}, + {C(829677eb03abf042), C(43cad004b6bc2c0), C(f2f224756803971a), + C(98d0dbf796480187), C(fbcb5f3e1bef5742), C(5af2a0463bf6e921), + C(ad9555bf0120b3a3), C(98d0dbf796480187), C(fbcb5f3e1bef5742), + C(5af2a0463bf6e921), C(ad9555bf0120b3a3), C(283e39b3dc99f447), + C(bedaa1a4a0250c28), C(9d50546624ff9a57), C(4abaf523d1c090f6), C(b994a88)}, + {C(754435bae3496fc), C(5707fc006f094dcf), C(8951c86ab19d8e40), + C(57c5208e8f021a77), C(f7653fbb69cd9276), C(a484410af21d75cb), + C(f19b6844b3d627e8), C(57c5208e8f021a77), C(f7653fbb69cd9276), + C(a484410af21d75cb), C(f19b6844b3d627e8), C(f37400fc3ffd9514), + C(36ae0d821734edfd), C(5f37820af1f1f306), C(be637d40e6a5ad0), C(a05d43c0)}, + {C(fda9877ea8e3805f), C(31e868b6ffd521b7), C(b08c90681fb6a0fd), + C(68110a7f83f5d3ff), C(6d77e045901b85a8), C(84ef681113036d8b), + C(3b9f8e3928f56160), C(68110a7f83f5d3ff), C(6d77e045901b85a8), + C(84ef681113036d8b), C(3b9f8e3928f56160), C(fc8b7f56c130835), + C(a11f3e800638e841), C(d9572267f5cf28c1), C(7897c8149803f2aa), + C(c79f73a8)}, + {C(2e36f523ca8f5eb5), C(8b22932f89b27513), C(331cd6ecbfadc1bb), + C(d1bfe4df12b04cbf), C(f58c17243fd63842), C(3a453cdba80a60af), + C(5737b2ca7470ea95), C(d1bfe4df12b04cbf), C(f58c17243fd63842), + C(3a453cdba80a60af), C(5737b2ca7470ea95), C(54d44a3f4477030c), + C(8168e02d4869aa7f), C(77f383a17778559d), C(95e1737d77a268fc), + C(a490aff5)}, + {C(21a378ef76828208), C(a5c13037fa841da2), C(506d22a53fbe9812), + C(61c9c95d91017da5), C(16f7c83ba68f5279), C(9c0619b0808d05f7), + C(83c117ce4e6b70a3), C(61c9c95d91017da5), C(16f7c83ba68f5279), + C(9c0619b0808d05f7), C(83c117ce4e6b70a3), C(cfb4c8af7fd01413), + C(fdef04e602e72296), C(ed6124d337889b1), C(4919c86707b830da), C(dfad65b4)}, + {C(ccdd5600054b16ca), C(f78846e84204cb7b), C(1f9faec82c24eac9), + C(58634004c7b2d19a), C(24bb5f51ed3b9073), C(46409de018033d00), + C(4a9805eed5ac802e), C(58634004c7b2d19a), C(24bb5f51ed3b9073), + C(46409de018033d00), C(4a9805eed5ac802e), C(e18de8db306baf82), + C(46bbf75f1fa025ff), C(5faf2fb09be09487), C(3fbc62bd4e558fb3), C(1d07dfb)}, + {C(7854468f4e0cabd0), C(3a3f6b4f098d0692), C(ae2423ec7799d30d), + C(29c3529eb165eeba), C(443de3703b657c35), C(66acbce31ae1bc8d), + C(1acc99effe1d547e), C(29c3529eb165eeba), C(443de3703b657c35), + C(66acbce31ae1bc8d), C(1acc99effe1d547e), C(cf07f8a57906573d), + C(31bafb0bbb9a86e7), C(40c69492702a9346), C(7df61fdaa0b858af), + C(416df9a0)}, + {C(7f88db5346d8f997), C(88eac9aacc653798), C(68a4d0295f8eefa1), + C(ae59ca86f4c3323d), C(25906c09906d5c4c), C(8dd2aa0c0a6584ae), + C(232a7d96b38f40e9), C(ae59ca86f4c3323d), C(25906c09906d5c4c), + C(8dd2aa0c0a6584ae), C(232a7d96b38f40e9), C(8986ee00a2ed0042), + C(c49ae7e428c8a7d1), C(b7dd8280713ac9c2), C(e018720aed1ebc28), + C(1f8fb9cc)}, + {C(bb3fb5fb01d60fcf), C(1b7cc0847a215eb6), C(1246c994437990a1), + C(d4edc954c07cd8f3), C(224f47e7c00a30ab), C(d5ad7ad7f41ef0c6), + C(59e089281d869fd7), C(d4edc954c07cd8f3), C(224f47e7c00a30ab), + C(d5ad7ad7f41ef0c6), C(59e089281d869fd7), C(f29340d07a14b6f1), + C(c87c5ef76d9c4ef3), C(463118794193a9a), C(2922dcb0540f0dbc), C(7abf48e3)}, + {C(2e783e1761acd84d), C(39158042bac975a0), C(1cd21c5a8071188d), + C(b1b7ec44f9302176), C(5cb476450dc0c297), C(dc5ef652521ef6a2), + C(3cc79a9e334e1f84), C(b1b7ec44f9302176), C(5cb476450dc0c297), + C(dc5ef652521ef6a2), C(3cc79a9e334e1f84), C(769e2a283dbcc651), + C(9f24b105c8511d3f), C(c31c15575de2f27e), C(ecfecf32c3ae2d66), + C(dea4e3dd)}, + {C(392058251cf22acc), C(944ec4475ead4620), C(b330a10b5cb94166), + C(54bc9bee7cbe1767), C(485820bdbe442431), C(54d6120ea2972e90), + C(f437a0341f29b72a), C(54bc9bee7cbe1767), C(485820bdbe442431), + C(54d6120ea2972e90), C(f437a0341f29b72a), C(8f30885c784d5704), + C(aa95376b16c7906a), C(e826928cfaf93dc3), C(20e8f54d1c16d7d8), + C(c6064f22)}, + {C(adf5c1e5d6419947), C(2a9747bc659d28aa), C(95c5b8cb1f5d62c), + C(80973ea532b0f310), C(a471829aa9c17dd9), C(c2ff3479394804ab), + C(6bf44f8606753636), C(80973ea532b0f310), C(a471829aa9c17dd9), + C(c2ff3479394804ab), C(6bf44f8606753636), C(5184d2973e6dd827), + C(121b96369a332d9a), C(5c25d3475ab69e50), C(26d2961d62884168), + C(743bed9c)}, + {C(6bc1db2c2bee5aba), C(e63b0ed635307398), C(7b2eca111f30dbbc), + C(230d2b3e47f09830), C(ec8624a821c1caf4), C(ea6ec411cdbf1cb1), + C(5f38ae82af364e27), C(230d2b3e47f09830), C(ec8624a821c1caf4), + C(ea6ec411cdbf1cb1), C(5f38ae82af364e27), C(a519ef515ea7187c), + C(6bad5efa7ebae05f), C(748abacb11a74a63), C(a28eef963d1396eb), + C(fce254d5)}, + {C(b00f898229efa508), C(83b7590ad7f6985c), C(2780e70a0592e41d), + C(7122413bdbc94035), C(e7f90fae33bf7763), C(4b6bd0fb30b12387), + C(557359c0c44f48ca), C(7122413bdbc94035), C(e7f90fae33bf7763), + C(4b6bd0fb30b12387), C(557359c0c44f48ca), C(d5656c3d6bc5f0d), + C(983ff8e5e784da99), C(628479671b445bf), C(e179a1e27ce68f5d), C(e47ec9d1)}, + {C(b56eb769ce0d9a8c), C(ce196117bfbcaf04), C(b26c3c3797d66165), + C(5ed12338f630ab76), C(fab19fcb319116d), C(167f5f42b521724b), + C(c4aa56c409568d74), C(5ed12338f630ab76), C(fab19fcb319116d), + C(167f5f42b521724b), C(c4aa56c409568d74), C(75fff4b42f8e9778), + C(94218f94710c1ea3), C(b7b05efb738b06a6), C(83fff2deabf9cd3), C(334a145c)}, + {C(70c0637675b94150), C(259e1669305b0a15), C(46e1dd9fd387a58d), + C(fca4e5bc9292788e), C(cd509dc1facce41c), C(bbba575a59d82fe), + C(4e2e71c15b45d4d3), C(fca4e5bc9292788e), C(cd509dc1facce41c), + C(bbba575a59d82fe), C(4e2e71c15b45d4d3), C(5dc54582ead999c), + C(72612d1571963c6f), C(30318a9d2d3d1829), C(785dd00f4cc9c9a0), + C(adec1e3c)}, + {C(74c0b8a6821faafe), C(abac39d7491370e7), C(faf0b2a48a4e6aed), + C(967e970df9673d2a), C(d465247cffa415c0), C(33a1df0ca1107722), + C(49fc2a10adce4a32), C(967e970df9673d2a), C(d465247cffa415c0), + C(33a1df0ca1107722), C(49fc2a10adce4a32), C(c5707e079a284308), + C(573028266635dda6), C(f786f5eee6127fa0), C(b30d79cebfb51266), + C(f6a9fbf8)}, + {C(5fb5e48ac7b7fa4f), C(a96170f08f5acbc7), C(bbf5c63d4f52a1e5), + C(6cc09e60700563e9), C(d18f23221e964791), C(ffc23eeef7af26eb), + C(693a954a3622a315), C(815308a32a9b0daf), C(efb2ab27bf6fd0bd), + C(9f1ffc0986111118), C(f9a3aa1778ea3985), C(698fe54b2b93933b), + C(dacc2b28404d0f10), C(815308a32a9b0daf), C(efb2ab27bf6fd0bd), + C(5398210c)}, +}; + +void TestUnchanging(const uint64_t* expected, int offset, int len) { + const uint128 u = CityHash128(data + offset, len); + const uint128 v = CityHash128WithSeed(data + offset, len, kSeed128); + EXPECT_EQ(expected[0], CityHash64(data + offset, len)); + EXPECT_EQ(expected[15], CityHash32(data + offset, len)); + EXPECT_EQ(expected[1], CityHash64WithSeed(data + offset, len, kSeed0)); + EXPECT_EQ(expected[2], + CityHash64WithSeeds(data + offset, len, kSeed0, kSeed1)); + EXPECT_EQ(expected[3], Uint128Low64(u)); + EXPECT_EQ(expected[4], Uint128High64(u)); + EXPECT_EQ(expected[5], Uint128Low64(v)); + EXPECT_EQ(expected[6], Uint128High64(v)); +#ifdef __SSE4_2__ + const uint128 y = CityHashCrc128(data + offset, len); + const uint128 z = CityHashCrc128WithSeed(data + offset, len, kSeed128); + uint64_t crc256_results[4]; + CityHashCrc256(data + offset, len, crc256_results); + EXPECT_EQ(expected[7], Uint128Low64(y)); + EXPECT_EQ(expected[8], Uint128High64(y)); + EXPECT_EQ(expected[9], Uint128Low64(z)); + EXPECT_EQ(expected[10], Uint128High64(z)); + for (int i = 0; i < 4; i++) { + EXPECT_EQ(expected[11 + i], crc256_results[i]); + } +#endif +} + +TEST(CityHashTest, Unchanging) { + setup(); + int i = 0; + for (; i < kTestSize - 1; i++) { + TestUnchanging(testdata[i], i * i, i); + } + TestUnchanging(testdata[i], 0, kDataSize); +} + +} // namespace hash_internal +} // namespace absl diff --git a/absl/hash/internal/hash.cc b/absl/hash/internal/hash.cc new file mode 100644 index 00000000..4bf64096 --- /dev/null +++ b/absl/hash/internal/hash.cc @@ -0,0 +1,23 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/hash/internal/hash.h" + +namespace absl { +namespace hash_internal { + +ABSL_CONST_INIT const void* const CityHashState::kSeed = &kSeed; + +} // namespace hash_internal +} // namespace absl diff --git a/absl/hash/internal/hash.h b/absl/hash/internal/hash.h new file mode 100644 index 00000000..4543d679 --- /dev/null +++ b/absl/hash/internal/hash.h @@ -0,0 +1,885 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: hash.h +// ----------------------------------------------------------------------------- +// +#ifndef ABSL_HASH_INTERNAL_HASH_H_ +#define ABSL_HASH_INTERNAL_HASH_H_ + +#include <algorithm> +#include <array> +#include <cmath> +#include <cstring> +#include <deque> +#include <forward_list> +#include <functional> +#include <iterator> +#include <limits> +#include <list> +#include <map> +#include <memory> +#include <set> +#include <string> +#include <tuple> +#include <type_traits> +#include <utility> +#include <vector> + +#include "absl/base/internal/endian.h" +#include "absl/base/port.h" +#include "absl/container/fixed_array.h" +#include "absl/meta/type_traits.h" +#include "absl/numeric/int128.h" +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" +#include "absl/types/variant.h" +#include "absl/utility/utility.h" +#include "absl/hash/internal/city.h" + +namespace absl { +namespace hash_internal { + +// HashStateBase +// +// A hash state object represents an intermediate state in the computation +// of an unspecified hash algorithm. `HashStateBase` provides a CRTP style +// base class for hash state implementations. Developers adding type support +// for `absl::Hash` should not rely on any parts of the state object other than +// the following member functions: +// +// * HashStateBase::combine() +// * HashStateBase::combine_contiguous() +// +// A derived hash state class of type `H` must provide a static member function +// with a signature similar to the following: +// +// `static H combine_contiguous(H state, const unsigned char*, size_t)`. +// +// `HashStateBase` will provide a complete implementations for a hash state +// object in terms of this method. +// +// Example: +// +// // Use CRTP to define your derived class. +// struct MyHashState : HashStateBase<MyHashState> { +// static H combine_contiguous(H state, const unsigned char*, size_t); +// using MyHashState::HashStateBase::combine; +// using MyHashState::HashStateBase::combine_contiguous; +// }; +template <typename H> +class HashStateBase { + public: + // HashStateBase::combine() + // + // Combines an arbitrary number of values into a hash state, returning the + // updated state. + // + // Each of the value types `T` must be separately hashable by the Abseil + // hashing framework. + // + // NOTE: + // + // state = H::combine(std::move(state), value1, value2, value3); + // + // is guaranteed to produce the same hash expansion as: + // + // state = H::combine(std::move(state), value1); + // state = H::combine(std::move(state), value2); + // state = H::combine(std::move(state), value3); + template <typename T, typename... Ts> + static H combine(H state, const T& value, const Ts&... values); + static H combine(H state) { return state; } + + // HashStateBase::combine_contiguous() + // + // Combines a contiguous array of `size` elements into a hash state, returning + // the updated state. + // + // NOTE: + // + // state = H::combine_contiguous(std::move(state), data, size); + // + // is NOT guaranteed to produce the same hash expansion as a for-loop (it may + // perform internal optimizations). If you need this guarantee, use the + // for-loop instead. + template <typename T> + static H combine_contiguous(H state, const T* data, size_t size); +}; + +// is_uniquely_represented +// +// `is_uniquely_represented<T>` is a trait class that indicates whether `T` +// is uniquely represented. +// +// A type is "uniquely represented" if two equal values of that type are +// guaranteed to have the same bytes in their underlying storage. In other +// words, if `a == b`, then `memcmp(&a, &b, sizeof(T))` is guaranteed to be +// zero. This property cannot be detected automatically, so this trait is false +// by default, but can be specialized by types that wish to assert that they are +// uniquely represented. This makes them eligible for certain optimizations. +// +// If you have any doubt whatsoever, do not specialize this template. +// The default is completely safe, and merely disables some optimizations +// that will not matter for most types. Specializing this template, +// on the other hand, can be very hazardous. +// +// To be uniquely represented, a type must not have multiple ways of +// representing the same value; for example, float and double are not +// uniquely represented, because they have distinct representations for +// +0 and -0. Furthermore, the type's byte representation must consist +// solely of user-controlled data, with no padding bits and no compiler- +// controlled data such as vptrs or sanitizer metadata. This is usually +// very difficult to guarantee, because in most cases the compiler can +// insert data and padding bits at its own discretion. +// +// If you specialize this template for a type `T`, you must do so in the file +// that defines that type (or in this file). If you define that specialization +// anywhere else, `is_uniquely_represented<T>` could have different meanings +// in different places. +// +// The Enable parameter is meaningless; it is provided as a convenience, +// to support certain SFINAE techniques when defining specializations. +template <typename T, typename Enable = void> +struct is_uniquely_represented : std::false_type {}; + +// is_uniquely_represented<unsigned char> +// +// unsigned char is a synonym for "byte", so it is guaranteed to be +// uniquely represented. +template <> +struct is_uniquely_represented<unsigned char> : std::true_type {}; + +// is_uniquely_represented for non-standard integral types +// +// Integral types other than bool should be uniquely represented on any +// platform that this will plausibly be ported to. +template <typename Integral> +struct is_uniquely_represented< + Integral, typename std::enable_if<std::is_integral<Integral>::value>::type> + : std::true_type {}; + +// is_uniquely_represented<bool> +// +// +template <> +struct is_uniquely_represented<bool> : std::false_type {}; + +// hash_bytes() +// +// Convenience function that combines `hash_state` with the byte representation +// of `value`. +template <typename H, typename T> +H hash_bytes(H hash_state, const T& value) { + const unsigned char* start = reinterpret_cast<const unsigned char*>(&value); + return H::combine_contiguous(std::move(hash_state), start, sizeof(value)); +} + +// ----------------------------------------------------------------------------- +// AbslHashValue for Basic Types +// ----------------------------------------------------------------------------- + +// Note: Default `AbslHashValue` implementations live in `hash_internal`. This +// allows us to block lexical scope lookup when doing an unqualified call to +// `AbslHashValue` below. User-defined implementations of `AbslHashValue` can +// only be found via ADL. + +// AbslHashValue() for hashing bool values +// +// We use SFINAE to ensure that this overload only accepts bool, not types that +// are convertible to bool. +template <typename H, typename B> +typename std::enable_if<std::is_same<B, bool>::value, H>::type AbslHashValue( + H hash_state, B value) { + return H::combine(std::move(hash_state), + static_cast<unsigned char>(value ? 1 : 0)); +} + +// AbslHashValue() for hashing enum values +template <typename H, typename Enum> +typename std::enable_if<std::is_enum<Enum>::value, H>::type AbslHashValue( + H hash_state, Enum e) { + // In practice, we could almost certainly just invoke hash_bytes directly, + // but it's possible that a sanitizer might one day want to + // store data in the unused bits of an enum. To avoid that risk, we + // convert to the underlying type before hashing. Hopefully this will get + // optimized away; if not, we can reopen discussion with c-toolchain-team. + return H::combine(std::move(hash_state), + static_cast<typename std::underlying_type<Enum>::type>(e)); +} +// AbslHashValue() for hashing floating-point values +template <typename H, typename Float> +typename std::enable_if<std::is_floating_point<Float>::value, H>::type +AbslHashValue(H hash_state, Float value) { + return hash_internal::hash_bytes(std::move(hash_state), + value == 0 ? 0 : value); +} + +// Long double has the property that it might have extra unused bytes in it. +// For example, in x86 sizeof(long double)==16 but it only really uses 80-bits +// of it. This means we can't use hash_bytes on a long double and have to +// convert it to something else first. +template <typename H> +H AbslHashValue(H hash_state, long double value) { + const int category = std::fpclassify(value); + switch (category) { + case FP_INFINITE: + // Add the sign bit to differentiate between +Inf and -Inf + hash_state = H::combine(std::move(hash_state), std::signbit(value)); + break; + + case FP_NAN: + case FP_ZERO: + default: + // Category is enough for these. + break; + + case FP_NORMAL: + case FP_SUBNORMAL: + // We can't convert `value` directly to double because this would have + // undefined behavior if the value is out of range. + // std::frexp gives us a value in the range (-1, -.5] or [.5, 1) that is + // guaranteed to be in range for `double`. The truncation is + // implementation defined, but that works as long as it is deterministic. + int exp; + auto mantissa = static_cast<double>(std::frexp(value, &exp)); + hash_state = H::combine(std::move(hash_state), mantissa, exp); + } + + return H::combine(std::move(hash_state), category); +} + +// AbslHashValue() for hashing pointers +template <typename H, typename T> +H AbslHashValue(H hash_state, T* ptr) { + return hash_internal::hash_bytes(std::move(hash_state), ptr); +} + +// AbslHashValue() for hashing nullptr_t +template <typename H> +H AbslHashValue(H hash_state, std::nullptr_t) { + return H::combine(std::move(hash_state), static_cast<void*>(nullptr)); +} + +// ----------------------------------------------------------------------------- +// AbslHashValue for Composite Types +// ----------------------------------------------------------------------------- + +// is_hashable() +// +// Trait class which returns true if T is hashable by the absl::Hash framework. +// Used for the AbslHashValue implementations for composite types below. +template <typename T> +struct is_hashable; + +// AbslHashValue() for hashing pairs +template <typename H, typename T1, typename T2> +typename std::enable_if<is_hashable<T1>::value && is_hashable<T2>::value, + H>::type +AbslHashValue(H hash_state, const std::pair<T1, T2>& p) { + return H::combine(std::move(hash_state), p.first, p.second); +} + +// hash_tuple() +// +// Helper function for hashing a tuple. The third argument should +// be an index_sequence running from 0 to tuple_size<Tuple> - 1. +template <typename H, typename Tuple, size_t... Is> +H hash_tuple(H hash_state, const Tuple& t, absl::index_sequence<Is...>) { + return H::combine(std::move(hash_state), std::get<Is>(t)...); +} + +// AbslHashValue for hashing tuples +template <typename H, typename... Ts> +#if _MSC_VER +// This SFINAE gets MSVC confused under some conditions. Let's just disable it +// for now. +H +#else +typename std::enable_if<absl::conjunction<is_hashable<Ts>...>::value, H>::type +#endif +AbslHashValue(H hash_state, const std::tuple<Ts...>& t) { + return hash_internal::hash_tuple(std::move(hash_state), t, + absl::make_index_sequence<sizeof...(Ts)>()); +} + +// ----------------------------------------------------------------------------- +// AbslHashValue for Pointers +// ----------------------------------------------------------------------------- + +// AbslHashValue for hashing unique_ptr +template <typename H, typename T, typename D> +H AbslHashValue(H hash_state, const std::unique_ptr<T, D>& ptr) { + return H::combine(std::move(hash_state), ptr.get()); +} + +// AbslHashValue for hashing shared_ptr +template <typename H, typename T> +H AbslHashValue(H hash_state, const std::shared_ptr<T>& ptr) { + return H::combine(std::move(hash_state), ptr.get()); +} + +// ----------------------------------------------------------------------------- +// AbslHashValue for String-Like Types +// ----------------------------------------------------------------------------- + +// AbslHashValue for hashing strings +// +// All the string-like types supported here provide the same hash expansion for +// the same character sequence. These types are: +// +// - `std::string` (and std::basic_string<char, std::char_traits<char>, A> for +// any allocator A) +// - `absl::string_view` and `std::string_view` +// +// For simplicity, we currently support only `char` strings. This support may +// be broadened, if necessary, but with some caution - this overload would +// misbehave in cases where the traits' `eq()` member isn't equivalent to `==` +// on the underlying character type. +template <typename H> +H AbslHashValue(H hash_state, absl::string_view str) { + return H::combine( + H::combine_contiguous(std::move(hash_state), str.data(), str.size()), + str.size()); +} + +// ----------------------------------------------------------------------------- +// AbslHashValue for Sequence Containers +// ----------------------------------------------------------------------------- + +// AbslHashValue for hashing std::array +template <typename H, typename T, size_t N> +typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue( + H hash_state, const std::array<T, N>& array) { + return H::combine_contiguous(std::move(hash_state), array.data(), + array.size()); +} + +// AbslHashValue for hashing std::deque +template <typename H, typename T, typename Allocator> +typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue( + H hash_state, const std::deque<T, Allocator>& deque) { + // TODO(gromer): investigate a more efficient implementation taking + // advantage of the chunk structure. + for (const auto& t : deque) { + hash_state = H::combine(std::move(hash_state), t); + } + return H::combine(std::move(hash_state), deque.size()); +} + +// AbslHashValue for hashing std::forward_list +template <typename H, typename T, typename Allocator> +typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue( + H hash_state, const std::forward_list<T, Allocator>& list) { + size_t size = 0; + for (const T& t : list) { + hash_state = H::combine(std::move(hash_state), t); + ++size; + } + return H::combine(std::move(hash_state), size); +} + +// AbslHashValue for hashing std::list +template <typename H, typename T, typename Allocator> +typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue( + H hash_state, const std::list<T, Allocator>& list) { + for (const auto& t : list) { + hash_state = H::combine(std::move(hash_state), t); + } + return H::combine(std::move(hash_state), list.size()); +} + +// AbslHashValue for hashing std::vector +// +// Do not use this for vector<bool>. It does not have a .data(), and a fallback +// for std::hash<> is most likely faster. +template <typename H, typename T, typename Allocator> +typename std::enable_if<is_hashable<T>::value && !std::is_same<T, bool>::value, + H>::type +AbslHashValue(H hash_state, const std::vector<T, Allocator>& vector) { + return H::combine(H::combine_contiguous(std::move(hash_state), vector.data(), + vector.size()), + vector.size()); +} + +// ----------------------------------------------------------------------------- +// AbslHashValue for Ordered Associative Containers +// ----------------------------------------------------------------------------- + +// AbslHashValue for hashing std::map +template <typename H, typename Key, typename T, typename Compare, + typename Allocator> +typename std::enable_if<is_hashable<Key>::value && is_hashable<T>::value, + H>::type +AbslHashValue(H hash_state, const std::map<Key, T, Compare, Allocator>& map) { + for (const auto& t : map) { + hash_state = H::combine(std::move(hash_state), t); + } + return H::combine(std::move(hash_state), map.size()); +} + +// AbslHashValue for hashing std::multimap +template <typename H, typename Key, typename T, typename Compare, + typename Allocator> +typename std::enable_if<is_hashable<Key>::value && is_hashable<T>::value, + H>::type +AbslHashValue(H hash_state, + const std::multimap<Key, T, Compare, Allocator>& map) { + for (const auto& t : map) { + hash_state = H::combine(std::move(hash_state), t); + } + return H::combine(std::move(hash_state), map.size()); +} + +// AbslHashValue for hashing std::set +template <typename H, typename Key, typename Compare, typename Allocator> +typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue( + H hash_state, const std::set<Key, Compare, Allocator>& set) { + for (const auto& t : set) { + hash_state = H::combine(std::move(hash_state), t); + } + return H::combine(std::move(hash_state), set.size()); +} + +// AbslHashValue for hashing std::multiset +template <typename H, typename Key, typename Compare, typename Allocator> +typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue( + H hash_state, const std::multiset<Key, Compare, Allocator>& set) { + for (const auto& t : set) { + hash_state = H::combine(std::move(hash_state), t); + } + return H::combine(std::move(hash_state), set.size()); +} + +// ----------------------------------------------------------------------------- +// AbslHashValue for Wrapper Types +// ----------------------------------------------------------------------------- + +// AbslHashValue for hashing absl::optional +template <typename H, typename T> +typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue( + H hash_state, const absl::optional<T>& opt) { + if (opt) hash_state = H::combine(std::move(hash_state), *opt); + return H::combine(std::move(hash_state), opt.has_value()); +} + +// VariantVisitor +template <typename H> +struct VariantVisitor { + H&& hash_state; + template <typename T> + H operator()(const T& t) const { + return H::combine(std::move(hash_state), t); + } +}; + +// AbslHashValue for hashing absl::variant +template <typename H, typename... T> +typename std::enable_if<conjunction<is_hashable<T>...>::value, H>::type +AbslHashValue(H hash_state, const absl::variant<T...>& v) { + if (!v.valueless_by_exception()) { + hash_state = absl::visit(VariantVisitor<H>{std::move(hash_state)}, v); + } + return H::combine(std::move(hash_state), v.index()); +} +// ----------------------------------------------------------------------------- + +// hash_range_or_bytes() +// +// Mixes all values in the range [data, data+size) into the hash state. +// This overload accepts only uniquely-represented types, and hashes them by +// hashing the entire range of bytes. +template <typename H, typename T> +typename std::enable_if<is_uniquely_represented<T>::value, H>::type +hash_range_or_bytes(H hash_state, const T* data, size_t size) { + const auto* bytes = reinterpret_cast<const unsigned char*>(data); + return H::combine_contiguous(std::move(hash_state), bytes, sizeof(T) * size); +} + +// hash_range_or_bytes() +template <typename H, typename T> +typename std::enable_if<!is_uniquely_represented<T>::value, H>::type +hash_range_or_bytes(H hash_state, const T* data, size_t size) { + for (const auto end = data + size; data < end; ++data) { + hash_state = H::combine(std::move(hash_state), *data); + } + return hash_state; +} + +// InvokeHashTag +// +// InvokeHash(H, const T&) invokes the appropriate hash implementation for a +// hasher of type `H` and a value of type `T`. If `T` is not hashable, there +// will be no matching overload of InvokeHash(). +// Note: Some platforms (eg MSVC) do not support the detect idiom on +// std::hash. In those platforms the last fallback will be std::hash and +// InvokeHash() will always have a valid overload even if std::hash<T> is not +// valid. +// +// We try the following options in order: +// * If is_uniquely_represented, hash bytes directly. +// * ADL AbslHashValue(H, const T&) call. +// * std::hash<T> + +// In MSVC we can't probe std::hash or stdext::hash because it triggers a +// static_assert instead of failing substitution. +#if defined(_MSC_VER) +#undef ABSL_HASH_INTERNAL_CAN_POISON_ +#else // _MSC_VER +#define ABSL_HASH_INTERNAL_CAN_POISON_ 1 +#endif // _MSC_VER + +#if defined(ABSL_INTERNAL_LEGACY_HASH_NAMESPACE) && \ + ABSL_HASH_INTERNAL_CAN_POISON_ +#define ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_ 1 +#endif + +enum class InvokeHashTag { + kUniquelyRepresented, + kHashValue, +#if ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_ + kLegacyHash, +#endif // ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_ + kStdHash, + kNone +}; + +// HashSelect +// +// Type trait to select the appropriate hash implementation to use. +// HashSelect<T>::value is an instance of InvokeHashTag that indicates the best +// available hashing mechanism. +// See `Note` above about MSVC. +template <typename T> +struct HashSelect { + private: + struct State : HashStateBase<State> { + static State combine_contiguous(State hash_state, const unsigned char*, + size_t); + using State::HashStateBase::combine_contiguous; + }; + + // `Probe<V, Tag>::value` evaluates to `V<T>::value` if it is a valid + // expression, and `false` otherwise. + // `Probe<V, Tag>::tag` always evaluates to `Tag`. + template <template <typename> class V, InvokeHashTag Tag> + struct Probe { + private: + template <typename U, typename std::enable_if<V<U>::value, int>::type = 0> + static std::true_type Test(int); + template <typename U> + static std::false_type Test(char); + + public: + static constexpr InvokeHashTag kTag = Tag; + static constexpr bool value = decltype( + Test<absl::remove_const_t<absl::remove_reference_t<T>>>(0))::value; + }; + + template <typename U> + using ProbeUniquelyRepresented = is_uniquely_represented<U>; + + template <typename U> + using ProbeHashValue = + std::is_same<State, decltype(AbslHashValue(std::declval<State>(), + std::declval<const U&>()))>; + +#if ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_ + template <typename U> + using ProbeLegacyHash = + std::is_convertible<decltype(ABSL_INTERNAL_LEGACY_HASH_NAMESPACE::hash< + U>()(std::declval<const U&>())), + size_t>; +#endif // ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_ + + template <typename U> + using ProbeStdHash = +#if ABSL_HASH_INTERNAL_CAN_POISON_ + std::is_convertible<decltype(std::hash<U>()(std::declval<const U&>())), + size_t>; +#else // ABSL_HASH_INTERNAL_CAN_POISON_ + std::true_type; +#endif // ABSL_HASH_INTERNAL_CAN_POISON_ + + template <typename U> + using ProbeNone = std::true_type; + + public: + // Probe each implementation in order. + // disjunction provides short circuting wrt instantiation. + static constexpr InvokeHashTag value = absl::disjunction< + Probe<ProbeUniquelyRepresented, InvokeHashTag::kUniquelyRepresented>, + Probe<ProbeHashValue, InvokeHashTag::kHashValue>, +#if ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_ + Probe<ProbeLegacyHash, InvokeHashTag::kLegacyHash>, +#endif // ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_ + Probe<ProbeStdHash, InvokeHashTag::kStdHash>, + Probe<ProbeNone, InvokeHashTag::kNone>>::kTag; +}; + +template <typename T> +struct is_hashable : std::integral_constant<bool, HashSelect<T>::value != + InvokeHashTag::kNone> {}; + +template <typename H, typename T> +absl::enable_if_t<HashSelect<T>::value == InvokeHashTag::kUniquelyRepresented, + H> +InvokeHash(H state, const T& value) { + return hash_internal::hash_bytes(std::move(state), value); +} + +template <typename H, typename T> +absl::enable_if_t<HashSelect<T>::value == InvokeHashTag::kHashValue, H> +InvokeHash(H state, const T& value) { + return AbslHashValue(std::move(state), value); +} + +#if ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_ +template <typename H, typename T> +absl::enable_if_t<HashSelect<T>::value == InvokeHashTag::kLegacyHash, H> +InvokeHash(H state, const T& value) { + return hash_internal::hash_bytes( + std::move(state), ABSL_INTERNAL_LEGACY_HASH_NAMESPACE::hash<T>{}(value)); +} +#endif // ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_ + +template <typename H, typename T> +absl::enable_if_t<HashSelect<T>::value == InvokeHashTag::kStdHash, H> +InvokeHash(H state, const T& value) { + return hash_internal::hash_bytes(std::move(state), std::hash<T>{}(value)); +} + +// CityHashState +class CityHashState : public HashStateBase<CityHashState> { + // absl::uint128 is not an alias or a thin wrapper around the intrinsic. + // We use the intrinsic when available to improve performance. +#ifdef ABSL_HAVE_INTRINSIC_INT128 + using uint128 = __uint128_t; +#else // ABSL_HAVE_INTRINSIC_INT128 + using uint128 = absl::uint128; +#endif // ABSL_HAVE_INTRINSIC_INT128 + + static constexpr uint64_t kMul = + sizeof(size_t) == 4 ? uint64_t{0xcc9e2d51} : uint64_t{0x9ddfea08eb382d69}; + + template <typename T> + using IntegralFastPath = + conjunction<std::is_integral<T>, is_uniquely_represented<T>>; + + public: + // Move only + CityHashState(CityHashState&&) = default; + CityHashState& operator=(CityHashState&&) = default; + + // CityHashState::combine_contiguous() + // + // Fundamental base case for hash recursion: mixes the given range of bytes + // into the hash state. + static CityHashState combine_contiguous(CityHashState hash_state, + const unsigned char* first, + size_t size) { + return CityHashState( + CombineContiguousImpl(hash_state.state_, first, size, + std::integral_constant<int, sizeof(size_t)>{})); + } + using CityHashState::HashStateBase::combine_contiguous; + + // CityHashState::hash() + // + // For performance reasons in non-opt mode, we specialize this for + // integral types. + // Otherwise we would be instantiating and calling dozens of functions for + // something that is just one multiplication and a couple xor's. + // The result should be the same as running the whole algorithm, but faster. + template <typename T, absl::enable_if_t<IntegralFastPath<T>::value, int> = 0> + static size_t hash(T value) { + return static_cast<size_t>(Mix(Seed(), static_cast<uint64_t>(value))); + } + + // Overload of CityHashState::hash() + template <typename T, absl::enable_if_t<!IntegralFastPath<T>::value, int> = 0> + static size_t hash(const T& value) { + return static_cast<size_t>(combine(CityHashState{}, value).state_); + } + + private: + // Invoked only once for a given argument; that plus the fact that this is + // move-only ensures that there is only one non-moved-from object. + CityHashState() : state_(Seed()) {} + + // Workaround for MSVC bug. + // We make the type copyable to fix the calling convention, even though we + // never actually copy it. Keep it private to not affect the public API of the + // type. + CityHashState(const CityHashState&) = default; + + explicit CityHashState(uint64_t state) : state_(state) {} + + // Implementation of the base case for combine_contiguous where we actually + // mix the bytes into the state. + // Dispatch to different implementations of the combine_contiguous depending + // on the value of `sizeof(size_t)`. + static uint64_t CombineContiguousImpl(uint64_t state, + const unsigned char* first, size_t len, + std::integral_constant<int, 4> + /* sizeof_size_t */); + static uint64_t CombineContiguousImpl(uint64_t state, + const unsigned char* first, size_t len, + std::integral_constant<int, 8> + /* sizeof_size_t*/); + + // Reads 9 to 16 bytes from p. + // The first 8 bytes are in .first, the rest (zero padded) bytes are in + // .second. + static std::pair<uint64_t, uint64_t> Read9To16(const unsigned char* p, + size_t len) { + uint64_t high = little_endian::Load64(p + len - 8); + return {little_endian::Load64(p), high >> (128 - len * 8)}; + } + + // Reads 4 to 8 bytes from p. Zero pads to fill uint64_t. + static uint64_t Read4To8(const unsigned char* p, size_t len) { + return (static_cast<uint64_t>(little_endian::Load32(p + len - 4)) + << (len - 4) * 8) | + little_endian::Load32(p); + } + + // Reads 1 to 3 bytes from p. Zero pads to fill uint32_t. + static uint32_t Read1To3(const unsigned char* p, size_t len) { + return static_cast<uint32_t>((p[0]) | // + (p[len / 2] << (len / 2 * 8)) | // + (p[len - 1] << ((len - 1) * 8))); + } + + ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Mix(uint64_t state, uint64_t v) { + using MultType = + absl::conditional_t<sizeof(size_t) == 4, uint64_t, uint128>; + // We do the addition in 64-bit space to make sure the 128-bit + // multiplication is fast. If we were to do it as MultType the compiler has + // to assume that the high word is non-zero and needs to perform 2 + // multiplications instead of one. + MultType m = state + v; + m *= kMul; + return static_cast<uint64_t>(m ^ (m >> (sizeof(m) * 8 / 2))); + } + + // Seed() + // + // A non-deterministic seed. + // + // The current purpose of this seed is to generate non-deterministic results + // and prevent having users depend on the particular hash values. + // It is not meant as a security feature right now, but it leaves the door + // open to upgrade it to a true per-process random seed. A true random seed + // costs more and we don't need to pay for that right now. + // + // On platforms with ASLR, we take advantage of it to make a per-process + // random value. + // See https://en.wikipedia.org/wiki/Address_space_layout_randomization + // + // On other platforms this is still going to be non-deterministic but most + // probably per-build and not per-process. + ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Seed() { + return static_cast<uint64_t>(reinterpret_cast<uintptr_t>(kSeed)); + } + static const void* const kSeed; + + uint64_t state_; +}; + +// CityHashState::CombineContiguousImpl() +inline uint64_t CityHashState::CombineContiguousImpl( + uint64_t state, const unsigned char* first, size_t len, + std::integral_constant<int, 4> /* sizeof_size_t */) { + // For large values we use CityHash, for small ones we just use a + // multiplicative hash. + uint64_t v; + if (len > 8) { + v = absl::hash_internal::CityHash32(reinterpret_cast<const char*>(first), len); + } else if (len >= 4) { + v = Read4To8(first, len); + } else if (len > 0) { + v = Read1To3(first, len); + } else { + // Empty ranges have no effect. + return state; + } + return Mix(state, v); +} + +// Overload of CityHashState::CombineContiguousImpl() +inline uint64_t CityHashState::CombineContiguousImpl( + uint64_t state, const unsigned char* first, size_t len, + std::integral_constant<int, 8> /* sizeof_size_t */) { + // For large values we use CityHash, for small ones we just use a + // multiplicative hash. + uint64_t v; + if (len > 16) { + v = absl::hash_internal::CityHash64(reinterpret_cast<const char*>(first), len); + } else if (len > 8) { + auto p = Read9To16(first, len); + state = Mix(state, p.first); + v = p.second; + } else if (len >= 4) { + v = Read4To8(first, len); + } else if (len > 0) { + v = Read1To3(first, len); + } else { + // Empty ranges have no effect. + return state; + } + return Mix(state, v); +} + + +struct AggregateBarrier {}; + +// HashImpl + +// Add a private base class to make sure this type is not an aggregate. +// Aggregates can be aggregate initialized even if the default constructor is +// deleted. +struct PoisonedHash : private AggregateBarrier { + PoisonedHash() = delete; + PoisonedHash(const PoisonedHash&) = delete; + PoisonedHash& operator=(const PoisonedHash&) = delete; +}; + +template <typename T> +struct HashImpl { + size_t operator()(const T& value) const { return CityHashState::hash(value); } +}; + +template <typename T> +struct Hash + : absl::conditional_t<is_hashable<T>::value, HashImpl<T>, PoisonedHash> {}; + +template <typename H> +template <typename T, typename... Ts> +H HashStateBase<H>::combine(H state, const T& value, const Ts&... values) { + return H::combine(hash_internal::InvokeHash(std::move(state), value), + values...); +} + +// HashStateBase::combine_contiguous() +template <typename H> +template <typename T> +H HashStateBase<H>::combine_contiguous(H state, const T* data, size_t size) { + return hash_internal::hash_range_or_bytes(std::move(state), data, size); +} +} // namespace hash_internal +} // namespace absl + +#endif // ABSL_HASH_INTERNAL_HASH_H_ diff --git a/absl/hash/internal/print_hash_of.cc b/absl/hash/internal/print_hash_of.cc new file mode 100644 index 00000000..b6df31cc --- /dev/null +++ b/absl/hash/internal/print_hash_of.cc @@ -0,0 +1,23 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include <cstdlib> + +#include "absl/hash/hash.h" + +// Prints the hash of argv[1]. +int main(int argc, char** argv) { + if (argc < 2) return 1; + printf("%zu\n", absl::Hash<int>{}(std::atoi(argv[1]))); // NOLINT +} diff --git a/absl/hash/internal/spy_hash_state.h b/absl/hash/internal/spy_hash_state.h new file mode 100644 index 00000000..03d795b0 --- /dev/null +++ b/absl/hash/internal/spy_hash_state.h @@ -0,0 +1,218 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_HASH_INTERNAL_SPY_HASH_STATE_H_ +#define ABSL_HASH_INTERNAL_SPY_HASH_STATE_H_ + +#include <ostream> +#include <string> +#include <vector> + +#include "absl/hash/hash.h" +#include "absl/strings/match.h" +#include "absl/strings/str_format.h" +#include "absl/strings/str_join.h" + +namespace absl { +namespace hash_internal { + +// SpyHashState is an implementation of the HashState API that simply +// accumulates all input bytes in an internal buffer. This makes it useful +// for testing AbslHashValue overloads (so long as they are templated on the +// HashState parameter), since it can report the exact hash representation +// that the AbslHashValue overload produces. +// +// Sample usage: +// EXPECT_EQ(SpyHashState::combine(SpyHashState(), foo), +// SpyHashState::combine(SpyHashState(), bar)); +template <typename T> +class SpyHashStateImpl : public HashStateBase<SpyHashStateImpl<T>> { + public: + SpyHashStateImpl() + : error_(std::make_shared<absl::optional<std::string>>()) { + static_assert(std::is_void<T>::value, ""); + } + + // Move-only + SpyHashStateImpl(const SpyHashStateImpl&) = delete; + SpyHashStateImpl& operator=(const SpyHashStateImpl&) = delete; + + SpyHashStateImpl(SpyHashStateImpl&& other) noexcept { + *this = std::move(other); + } + + SpyHashStateImpl& operator=(SpyHashStateImpl&& other) noexcept { + hash_representation_ = std::move(other.hash_representation_); + error_ = other.error_; + moved_from_ = other.moved_from_; + other.moved_from_ = true; + return *this; + } + + template <typename U> + SpyHashStateImpl(SpyHashStateImpl<U>&& other) { // NOLINT + hash_representation_ = std::move(other.hash_representation_); + error_ = other.error_; + moved_from_ = other.moved_from_; + other.moved_from_ = true; + } + + template <typename A, typename... Args> + static SpyHashStateImpl combine(SpyHashStateImpl s, const A& a, + const Args&... args) { + // Pass an instance of SpyHashStateImpl<A> when trying to combine `A`. This + // allows us to test that the user only uses this instance for combine calls + // and does not call AbslHashValue directly. + // See AbslHashValue implementation at the bottom. + s = SpyHashStateImpl<A>::HashStateBase::combine(std::move(s), a); + return SpyHashStateImpl::combine(std::move(s), args...); + } + static SpyHashStateImpl combine(SpyHashStateImpl s) { + if (direct_absl_hash_value_error_) { + *s.error_ = "AbslHashValue should not be invoked directly."; + } else if (s.moved_from_) { + *s.error_ = "Used moved-from instance of the hash state object."; + } + return s; + } + + static void SetDirectAbslHashValueError() { + direct_absl_hash_value_error_ = true; + } + + // Two SpyHashStateImpl objects are equal if they hold equal hash + // representations. + friend bool operator==(const SpyHashStateImpl& lhs, + const SpyHashStateImpl& rhs) { + return lhs.hash_representation_ == rhs.hash_representation_; + } + + friend bool operator!=(const SpyHashStateImpl& lhs, + const SpyHashStateImpl& rhs) { + return !(lhs == rhs); + } + + enum class CompareResult { + kEqual, + kASuffixB, + kBSuffixA, + kUnequal, + }; + + static CompareResult Compare(const SpyHashStateImpl& a, + const SpyHashStateImpl& b) { + const std::string a_flat = absl::StrJoin(a.hash_representation_, ""); + const std::string b_flat = absl::StrJoin(b.hash_representation_, ""); + if (a_flat == b_flat) return CompareResult::kEqual; + if (absl::EndsWith(a_flat, b_flat)) return CompareResult::kBSuffixA; + if (absl::EndsWith(b_flat, a_flat)) return CompareResult::kASuffixB; + return CompareResult::kUnequal; + } + + // operator<< prints the hash representation as a hex and ASCII dump, to + // facilitate debugging. + friend std::ostream& operator<<(std::ostream& out, + const SpyHashStateImpl& hash_state) { + out << "[\n"; + for (auto& s : hash_state.hash_representation_) { + size_t offset = 0; + for (char c : s) { + if (offset % 16 == 0) { + out << absl::StreamFormat("\n0x%04x: ", offset); + } + if (offset % 2 == 0) { + out << " "; + } + out << absl::StreamFormat("%02x", c); + ++offset; + } + out << "\n"; + } + return out << "]"; + } + + // The base case of the combine recursion, which writes raw bytes into the + // internal buffer. + static SpyHashStateImpl combine_contiguous(SpyHashStateImpl hash_state, + const unsigned char* begin, + size_t size) { + hash_state.hash_representation_.emplace_back( + reinterpret_cast<const char*>(begin), size); + return hash_state; + } + + using SpyHashStateImpl::HashStateBase::combine_contiguous; + + absl::optional<std::string> error() const { + if (moved_from_) { + return "Returned a moved-from instance of the hash state object."; + } + return *error_; + } + + private: + template <typename U> + friend class SpyHashStateImpl; + + // This is true if SpyHashStateImpl<T> has been passed to a call of + // AbslHashValue with the wrong type. This detects that the user called + // AbslHashValue directly (because the hash state type does not match). + static bool direct_absl_hash_value_error_; + + + std::vector<std::string> hash_representation_; + // This is a shared_ptr because we want all instances of the particular + // SpyHashState run to share the field. This way we can set the error for + // use-after-move and all the copies will see it. + std::shared_ptr<absl::optional<std::string>> error_; + bool moved_from_ = false; +}; + +template <typename T> +bool SpyHashStateImpl<T>::direct_absl_hash_value_error_; + +template <bool& B> +struct OdrUse { + constexpr OdrUse() {} + bool& b = B; +}; + +template <void (*)()> +struct RunOnStartup { + static bool run; + static constexpr OdrUse<run> kOdrUse{}; +}; + +template <void (*f)()> +bool RunOnStartup<f>::run = (f(), true); + +template < + typename T, typename U, + // Only trigger for when (T != U), + absl::enable_if_t<!std::is_same<T, U>::value, int> = 0, + // This statement works in two ways: + // - First, it instantiates RunOnStartup and forces the initialization of + // `run`, which set the global variable. + // - Second, it triggers a SFINAE error disabling the overload to prevent + // compile time errors. If we didn't disable the overload we would get + // ambiguous overload errors, which we don't want. + int = RunOnStartup<SpyHashStateImpl<T>::SetDirectAbslHashValueError>::run> +void AbslHashValue(SpyHashStateImpl<T>, const U&); + +using SpyHashState = SpyHashStateImpl<void>; + +} // namespace hash_internal +} // namespace absl + +#endif // ABSL_HASH_INTERNAL_SPY_HASH_STATE_H_ |