summaryrefslogtreecommitdiff
path: root/absl/container/internal
diff options
context:
space:
mode:
authorGravatar Abseil Team <absl-team@google.com>2018-12-04 11:01:12 -0800
committerGravatar Ashley Hedberg <ahedberg@google.com>2018-12-04 16:54:40 -0500
commitfcb104594b0bb4b8ac306cb2f55ecdad40974683 (patch)
treed2d79d246c6a894ca6716f47c15ebb7b8796b36a /absl/container/internal
parent6c7de165d1c82684359ccb630bb5f83263fa5ebc (diff)
Creation of LTS branch "lts_2018_12_18"20181200
- 44b0fafc62d9b8f192e8180cbe9c4b806b339d57 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 926bfeb9fff223429c12224b7514243886323e8d Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 13327debebc5c2d1d4991b69fe50450e340e50e4 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 3088e76c597e068479e82508b1770a7ad0c806b6 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - f6ae816808cd913e0e2b3e2af14f328fa1071af0 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - a06c4a1d9093137b7217a5aaba8920d62e835dc0 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 7b46e1d31a6b08b1c6da2a13e7b151a20446fa07 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 070f6e47b33a2909d039e620c873204f78809492 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 7990fd459e9339467814ddb95000c87cb1e4d945 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - f95179062eb65ce40895cc76f1398cce25394369 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - cc8dcd307b76a575d2e3e0958a4fe4c7193c2f68 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - a705aa78dc76fc5c79d501e61dcc077eca68a8a4 Merge pull request #194 from Mizux/windows by Xiaoyi Zhang <zhangxy988@gmail.com> - a4c3ffff11eec0ee45742f915c255e9f870b7e0f Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 01174578651b73021d9b8c3820f6fea707dacdf0 Merge pull request #201 from ccawley2011/fix-byteswap by Matt Calabrese <38107210+mattcalabrese-google@users.noreply.github.com> - f86f9413856b65afdd61fea938d684b8ab73115a Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 94c298e2a0ae409e283cab96c954a685bd865a70 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 0884a6a04e4497d11b1b398cc0e422b118bf977a Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - c16d5557cd05119b5b7b1318ef778ebe3195b4a1 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 45221ccc4ed643e4209b0cc5798e97203f108fa8 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 2019e17a520575ab365b2b5134d71068182c70b8 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 5b70a8910b2e6fb0ce5193a41873139a126d2f7f Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - a00bdd176d66ef0b417d9576052a19091fbdf891 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - f340f773edab951656b19b6f1a77c964a78ec4c2 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 445998d7ac4e5d3c50411d377e3b50e960d2d6c2 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - e821380d69a549dc64900693942789d21aa4df5e Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - f21d187b80e3b7f08fb279775ea9c8b48c636030 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 5441bbe1db5d0f2ca24b5b60166367b0966790af Fix code snippet in comment (#174) by Loo Rong Jie <loorongjie@gmail.com> - 5aae0cffae8ffaacab965756169b34e511b353df Fix CMake build (#173) by Stephan Dollberg <stephan.dollberg@gmail.com> - 48cd2c3f351ff188bc85684b84a91b6e6d17d896 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - e291c279e458761e77a69b09b129d3d1e81f1e80 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - e01d95528ea2137a4a27a88d1f57c6cb260aafed Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 8ff1374008259719b54a8cb128ef951c02da164c Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 02451914b9ad5320f81f56a89f3eef1f8683227c Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 921fd5cf02ec0d665439a790148d59faa7d4a72c Merge pull request #166 from rongjiecomputer/cmake-test by Gennadiy Civil <gennadiycivil@users.noreply.github.com> - fb462224c058487763f263b7995d70efd0242c17 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - c075ad321696fa5072e097f0a51e4fe76a6fe13e Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 0f4bc966754ec6cd28d5f03467d56f1efdc598e3 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 6c7e5ffc43decd92f7bdfc510ad8a245a20b6dea Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - d6df769173bf0263489f98874b93034db0e479a2 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 28080f5f050c9530aa9f2b39c60d8217038d64ff Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 9c987f429bba32fb4446280fd3b91e2472d71d4d Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 5e7d459eeca7bc53deab0ee9634601386b53d7c0 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - bed5bd6e185c7e0311f3a1f2dab4c96083dac636 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - fefc83638fb69395d259ed245699310610429064 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - d8cfe9f2a77fbee02c09642491e62a3f3677e0f6 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - ad5c960b2eb914881d1ceba0e996a0a8f3f6ca59 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 86f0fe93ad9d6d033a319476736a3256369c1f75 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - f0f15c2778b0e4959244dd25e63f445a455870f5 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 29ff6d4860070bf8fcbd39c8805d0c32d56628a3 Removed "warning treated as error" flag from MSVC (#153) by vocaviking <vocaviking@users.noreply.github.com> - 083d04dd4a62ebbf037079b06e49b323c5e1192a Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - bea85b52733022294eef108a2e42d77b616ddca2 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 8f96be6ca60d967bd4b37f93d0a03bcff4145200 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 92e07e5590752d6b8e67f7f2f86c6286561e8cea Merge pull request #152 from clnperez/fix-multi-defines-p... by Derek Mauro <761129+derekmauro@users.noreply.github.com> - 2125e6444a9de9e41f21ecdc674dd7d8759c149d Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 9acad869d21731f5bc50430a33fe61cc0ffcbb0b Merge pull request #150 from OlafvdSpek/patch-2 by Jonathan Cohen <cohenjon@google.com> - c2e00d341913bf03b4597ade5b056042e23e8c58 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 9e060686d1c325f34f9806b45fe77bafeed00aee Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 7aa411ceafc1272a28579cca739a97a2fb79055a Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 2c5af55ed34850d8b7dd46177c8ca53fdfda920e Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 44aa275286baf97fc13529aca547a88b180beb08 Merge pull request #143 from rongjiecomputer/kernel by Xiaoyi Zhang <zhangxy988@gmail.com> - 42f22a28401c952f1fc5942231c7fdac80811bf5 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - b973bc53ef366f0253b85eeed9a79b241884a843 Merge pull request #139 from siepkes/smartos-support by ahedberg <ahedberg@google.com> - e0def7473e52336f58759e11db4cd9467e5e0356 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - f826f1d489b61b64df1d94afbe5981841a82e5fa Merge pull request #138 from edbaunton/remove-deprecated-... by ahedberg <ahedberg@google.com> - 7b50a4a94b0c7df68b3a854c850b551aaef0a8b4 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - a5030ca5125b9d557ecfeea8acc8b1a8e49f6d27 Merge pull request #144 from rongjiecomputer/winsock2 by Xiaoyi Zhang <zhangxy988@gmail.com> - 02687955b7ca8fc02ada9b14bc247deeb108d341 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 8f612ebb152fb7e05643a2bcf78cb89a8c0641ad Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 134496a31d8b324f762de3bee9a002658c984456 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - ba8d6cf07766263723e86736f20a51c1c9c67b19 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - be1e84b988fceabcea4fc9e93f899539f0c81901 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 16ac2ec2e38cdf47f9330a312e319d57da659c10 Merge pull request #134 from rongjiecomputer/cmake by Alex Strelnikov <strel@google.com> - 7efd8dc0f1075356e9c7caa950afd1ecf854e8b9 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 87a4c07856e7dc69958019d47b2f02ae47746ec0 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 4491d606df34c44efda47b6d17b605262f17e182 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> GitOrigin-RevId: 44b0fafc62d9b8f192e8180cbe9c4b806b339d57 Change-Id: I2c427b5b41b2d34101922048b00f3d9dafcb498d
Diffstat (limited to 'absl/container/internal')
-rw-r--r--absl/container/internal/compressed_tuple.h177
-rw-r--r--absl/container/internal/compressed_tuple_test.cc168
-rw-r--r--absl/container/internal/container_memory.h407
-rw-r--r--absl/container/internal/container_memory_test.cc190
-rw-r--r--absl/container/internal/hash_function_defaults.h145
-rw-r--r--absl/container/internal/hash_function_defaults_test.cc303
-rw-r--r--absl/container/internal/hash_generator_testing.cc74
-rw-r--r--absl/container/internal/hash_generator_testing.h152
-rw-r--r--absl/container/internal/hash_policy_testing.h184
-rw-r--r--absl/container/internal/hash_policy_testing_test.cc45
-rw-r--r--absl/container/internal/hash_policy_traits.h191
-rw-r--r--absl/container/internal/hash_policy_traits_test.cc144
-rw-r--r--absl/container/internal/hashtable_debug.h110
-rw-r--r--absl/container/internal/hashtable_debug_hooks.h83
-rw-r--r--absl/container/internal/layout.h740
-rw-r--r--absl/container/internal/layout_test.cc1557
-rw-r--r--absl/container/internal/node_hash_policy.h90
-rw-r--r--absl/container/internal/node_hash_policy_test.cc69
-rw-r--r--absl/container/internal/raw_hash_map.h187
-rw-r--r--absl/container/internal/raw_hash_set.cc48
-rw-r--r--absl/container/internal/raw_hash_set.h1950
-rw-r--r--absl/container/internal/raw_hash_set_allocator_test.cc430
-rw-r--r--absl/container/internal/raw_hash_set_test.cc1830
-rw-r--r--absl/container/internal/test_instance_tracker.cc5
-rw-r--r--absl/container/internal/test_instance_tracker.h56
-rw-r--r--absl/container/internal/test_instance_tracker_test.cc22
-rw-r--r--absl/container/internal/tracked.h80
-rw-r--r--absl/container/internal/unordered_map_constructor_test.h407
-rw-r--r--absl/container/internal/unordered_map_lookup_test.h117
-rw-r--r--absl/container/internal/unordered_map_modifiers_test.h275
-rw-r--r--absl/container/internal/unordered_map_test.cc40
-rw-r--r--absl/container/internal/unordered_set_constructor_test.h411
-rw-r--r--absl/container/internal/unordered_set_lookup_test.h91
-rw-r--r--absl/container/internal/unordered_set_modifiers_test.h190
-rw-r--r--absl/container/internal/unordered_set_test.cc39
35 files changed, 10998 insertions, 9 deletions
diff --git a/absl/container/internal/compressed_tuple.h b/absl/container/internal/compressed_tuple.h
new file mode 100644
index 00000000..29fe7c12
--- /dev/null
+++ b/absl/container/internal/compressed_tuple.h
@@ -0,0 +1,177 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Helper class to perform the Empty Base Optimization.
+// Ts can contain classes and non-classes, empty or not. For the ones that
+// are empty classes, we perform the optimization. If all types in Ts are empty
+// classes, then CompressedTuple<Ts...> is itself an empty class.
+//
+// To access the members, use member get<N>() function.
+//
+// Eg:
+// absl::container_internal::CompressedTuple<int, T1, T2, T3> value(7, t1, t2,
+// t3);
+// assert(value.get<0>() == 7);
+// T1& t1 = value.get<1>();
+// const T2& t2 = value.get<2>();
+// ...
+//
+// http://en.cppreference.com/w/cpp/language/ebo
+
+#ifndef ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_
+#define ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_
+
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "absl/utility/utility.h"
+
+#ifdef _MSC_VER
+// We need to mark these classes with this declspec to ensure that
+// CompressedTuple happens.
+#define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC __declspec(empty_bases)
+#else // _MSC_VER
+#define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC
+#endif // _MSC_VER
+
+namespace absl {
+inline namespace lts_2018_12_18 {
+namespace container_internal {
+
+template <typename... Ts>
+class CompressedTuple;
+
+namespace internal_compressed_tuple {
+
+template <typename D, size_t I>
+struct Elem;
+template <typename... B, size_t I>
+struct Elem<CompressedTuple<B...>, I>
+ : std::tuple_element<I, std::tuple<B...>> {};
+template <typename D, size_t I>
+using ElemT = typename Elem<D, I>::type;
+
+// Use the __is_final intrinsic if available. Where it's not available, classes
+// declared with the 'final' specifier cannot be used as CompressedTuple
+// elements.
+// TODO(sbenza): Replace this with std::is_final in C++14.
+template <typename T>
+constexpr bool IsFinal() {
+#if defined(__clang__) || defined(__GNUC__)
+ return __is_final(T);
+#else
+ return false;
+#endif
+}
+
+template <typename T>
+constexpr bool ShouldUseBase() {
+ return std::is_class<T>::value && std::is_empty<T>::value && !IsFinal<T>();
+}
+
+// The storage class provides two specializations:
+// - For empty classes, it stores T as a base class.
+// - For everything else, it stores T as a member.
+template <typename D, size_t I, bool = ShouldUseBase<ElemT<D, I>>()>
+struct Storage {
+ using T = ElemT<D, I>;
+ T value;
+ constexpr Storage() = default;
+ explicit constexpr Storage(T&& v) : value(absl::forward<T>(v)) {}
+ constexpr const T& get() const { return value; }
+ T& get() { return value; }
+};
+
+template <typename D, size_t I>
+struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage<D, I, true>
+ : ElemT<D, I> {
+ using T = internal_compressed_tuple::ElemT<D, I>;
+ constexpr Storage() = default;
+ explicit constexpr Storage(T&& v) : T(absl::forward<T>(v)) {}
+ constexpr const T& get() const { return *this; }
+ T& get() { return *this; }
+};
+
+template <typename D, typename I>
+struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl;
+
+template <typename... Ts, size_t... I>
+struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC
+ CompressedTupleImpl<CompressedTuple<Ts...>, absl::index_sequence<I...>>
+ // We use the dummy identity function through std::integral_constant to
+ // convince MSVC of accepting and expanding I in that context. Without it
+ // you would get:
+ // error C3548: 'I': parameter pack cannot be used in this context
+ : Storage<CompressedTuple<Ts...>,
+ std::integral_constant<size_t, I>::value>... {
+ constexpr CompressedTupleImpl() = default;
+ explicit constexpr CompressedTupleImpl(Ts&&... args)
+ : Storage<CompressedTuple<Ts...>, I>(absl::forward<Ts>(args))... {}
+};
+
+} // namespace internal_compressed_tuple
+
+// Helper class to perform the Empty Base Class Optimization.
+// Ts can contain classes and non-classes, empty or not. For the ones that
+// are empty classes, we perform the CompressedTuple. If all types in Ts are
+// empty classes, then CompressedTuple<Ts...> is itself an empty class.
+//
+// To access the members, use member .get<N>() function.
+//
+// Eg:
+// absl::container_internal::CompressedTuple<int, T1, T2, T3> value(7, t1, t2,
+// t3);
+// assert(value.get<0>() == 7);
+// T1& t1 = value.get<1>();
+// const T2& t2 = value.get<2>();
+// ...
+//
+// http://en.cppreference.com/w/cpp/language/ebo
+template <typename... Ts>
+class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple
+ : private internal_compressed_tuple::CompressedTupleImpl<
+ CompressedTuple<Ts...>, absl::index_sequence_for<Ts...>> {
+ private:
+ template <int I>
+ using ElemT = internal_compressed_tuple::ElemT<CompressedTuple, I>;
+
+ public:
+ constexpr CompressedTuple() = default;
+ explicit constexpr CompressedTuple(Ts... base)
+ : CompressedTuple::CompressedTupleImpl(absl::forward<Ts>(base)...) {}
+
+ template <int I>
+ ElemT<I>& get() {
+ return internal_compressed_tuple::Storage<CompressedTuple, I>::get();
+ }
+
+ template <int I>
+ constexpr const ElemT<I>& get() const {
+ return internal_compressed_tuple::Storage<CompressedTuple, I>::get();
+ }
+};
+
+// Explicit specialization for a zero-element tuple
+// (needed to avoid ambiguous overloads for the default constructor).
+template <>
+class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple<> {};
+
+} // namespace container_internal
+} // inline namespace lts_2018_12_18
+} // namespace absl
+
+#undef ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC
+
+#endif // ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_
diff --git a/absl/container/internal/compressed_tuple_test.cc b/absl/container/internal/compressed_tuple_test.cc
new file mode 100644
index 00000000..2b5ed4a4
--- /dev/null
+++ b/absl/container/internal/compressed_tuple_test.cc
@@ -0,0 +1,168 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/compressed_tuple.h"
+
+#include <string>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+namespace absl {
+inline namespace lts_2018_12_18 {
+namespace container_internal {
+namespace {
+
+template <int>
+struct Empty {};
+
+template <typename T>
+struct NotEmpty {
+ T value;
+};
+
+template <typename T, typename U>
+struct TwoValues {
+ T value1;
+ U value2;
+};
+
+TEST(CompressedTupleTest, Sizeof) {
+ EXPECT_EQ(sizeof(int), sizeof(CompressedTuple<int>));
+ EXPECT_EQ(sizeof(int), sizeof(CompressedTuple<int, Empty<0>>));
+ EXPECT_EQ(sizeof(int), sizeof(CompressedTuple<int, Empty<0>, Empty<1>>));
+ EXPECT_EQ(sizeof(int),
+ sizeof(CompressedTuple<int, Empty<0>, Empty<1>, Empty<2>>));
+
+ EXPECT_EQ(sizeof(TwoValues<int, double>),
+ sizeof(CompressedTuple<int, NotEmpty<double>>));
+ EXPECT_EQ(sizeof(TwoValues<int, double>),
+ sizeof(CompressedTuple<int, Empty<0>, NotEmpty<double>>));
+ EXPECT_EQ(sizeof(TwoValues<int, double>),
+ sizeof(CompressedTuple<int, Empty<0>, NotEmpty<double>, Empty<1>>));
+}
+
+TEST(CompressedTupleTest, Access) {
+ struct S {
+ std::string x;
+ };
+ CompressedTuple<int, Empty<0>, S> x(7, {}, S{"ABC"});
+ EXPECT_EQ(sizeof(x), sizeof(TwoValues<int, S>));
+ EXPECT_EQ(7, x.get<0>());
+ EXPECT_EQ("ABC", x.get<2>().x);
+}
+
+TEST(CompressedTupleTest, NonClasses) {
+ CompressedTuple<int, const char*> x(7, "ABC");
+ EXPECT_EQ(7, x.get<0>());
+ EXPECT_STREQ("ABC", x.get<1>());
+}
+
+TEST(CompressedTupleTest, MixClassAndNonClass) {
+ CompressedTuple<int, const char*, Empty<0>, NotEmpty<double>> x(7, "ABC", {},
+ {1.25});
+ struct Mock {
+ int v;
+ const char* p;
+ double d;
+ };
+ EXPECT_EQ(sizeof(x), sizeof(Mock));
+ EXPECT_EQ(7, x.get<0>());
+ EXPECT_STREQ("ABC", x.get<1>());
+ EXPECT_EQ(1.25, x.get<3>().value);
+}
+
+TEST(CompressedTupleTest, Nested) {
+ CompressedTuple<int, CompressedTuple<int>,
+ CompressedTuple<int, CompressedTuple<int>>>
+ x(1, CompressedTuple<int>(2),
+ CompressedTuple<int, CompressedTuple<int>>(3, CompressedTuple<int>(4)));
+ EXPECT_EQ(1, x.get<0>());
+ EXPECT_EQ(2, x.get<1>().get<0>());
+ EXPECT_EQ(3, x.get<2>().get<0>());
+ EXPECT_EQ(4, x.get<2>().get<1>().get<0>());
+
+ CompressedTuple<Empty<0>, Empty<0>,
+ CompressedTuple<Empty<0>, CompressedTuple<Empty<0>>>>
+ y;
+ std::set<Empty<0>*> empties{&y.get<0>(), &y.get<1>(), &y.get<2>().get<0>(),
+ &y.get<2>().get<1>().get<0>()};
+#ifdef _MSC_VER
+ // MSVC has a bug where many instances of the same base class are layed out in
+ // the same address when using __declspec(empty_bases).
+ // This will be fixed in a future version of MSVC.
+ int expected = 1;
+#else
+ int expected = 4;
+#endif
+ EXPECT_EQ(expected, sizeof(y));
+ EXPECT_EQ(expected, empties.size());
+ EXPECT_EQ(sizeof(y), sizeof(Empty<0>) * empties.size());
+
+ EXPECT_EQ(4 * sizeof(char),
+ sizeof(CompressedTuple<CompressedTuple<char, char>,
+ CompressedTuple<char, char>>));
+ EXPECT_TRUE(
+ (std::is_empty<CompressedTuple<CompressedTuple<Empty<0>>,
+ CompressedTuple<Empty<1>>>>::value));
+}
+
+TEST(CompressedTupleTest, Reference) {
+ int i = 7;
+ std::string s = "Very long std::string that goes in the heap";
+ CompressedTuple<int, int&, std::string, std::string&> x(i, i, s, s);
+
+ // Sanity check. We should have not moved from `s`
+ EXPECT_EQ(s, "Very long std::string that goes in the heap");
+
+ EXPECT_EQ(x.get<0>(), x.get<1>());
+ EXPECT_NE(&x.get<0>(), &x.get<1>());
+ EXPECT_EQ(&x.get<1>(), &i);
+
+ EXPECT_EQ(x.get<2>(), x.get<3>());
+ EXPECT_NE(&x.get<2>(), &x.get<3>());
+ EXPECT_EQ(&x.get<3>(), &s);
+}
+
+TEST(CompressedTupleTest, NoElements) {
+ CompressedTuple<> x;
+ static_cast<void>(x); // Silence -Wunused-variable.
+ EXPECT_TRUE(std::is_empty<CompressedTuple<>>::value);
+}
+
+TEST(CompressedTupleTest, Constexpr) {
+ constexpr CompressedTuple<int, double, CompressedTuple<int>> x(
+ 7, 1.25, CompressedTuple<int>(5));
+ constexpr int x0 = x.get<0>();
+ constexpr double x1 = x.get<1>();
+ constexpr int x2 = x.get<2>().get<0>();
+ EXPECT_EQ(x0, 7);
+ EXPECT_EQ(x1, 1.25);
+ EXPECT_EQ(x2, 5);
+}
+
+#if defined(__clang__) || defined(__GNUC__)
+TEST(CompressedTupleTest, EmptyFinalClass) {
+ struct S final {
+ int f() const { return 5; }
+ };
+ CompressedTuple<S> x;
+ EXPECT_EQ(x.get<0>().f(), 5);
+}
+#endif
+
+} // namespace
+} // namespace container_internal
+} // inline namespace lts_2018_12_18
+} // namespace absl
diff --git a/absl/container/internal/container_memory.h b/absl/container/internal/container_memory.h
new file mode 100644
index 00000000..ddccbe05
--- /dev/null
+++ b/absl/container/internal/container_memory.h
@@ -0,0 +1,407 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
+#define ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
+
+#ifdef ADDRESS_SANITIZER
+#include <sanitizer/asan_interface.h>
+#endif
+
+#ifdef MEMORY_SANITIZER
+#include <sanitizer/msan_interface.h>
+#endif
+
+#include <cassert>
+#include <cstddef>
+#include <memory>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "absl/memory/memory.h"
+#include "absl/utility/utility.h"
+
+namespace absl {
+inline namespace lts_2018_12_18 {
+namespace container_internal {
+
+// Allocates at least n bytes aligned to the specified alignment.
+// Alignment must be a power of 2. It must be positive.
+//
+// Note that many allocators don't honor alignment requirements above certain
+// threshold (usually either alignof(std::max_align_t) or alignof(void*)).
+// Allocate() doesn't apply alignment corrections. If the underlying allocator
+// returns insufficiently alignment pointer, that's what you are going to get.
+template <size_t Alignment, class Alloc>
+void* Allocate(Alloc* alloc, size_t n) {
+ static_assert(Alignment > 0, "");
+ assert(n && "n must be positive");
+ struct alignas(Alignment) M {};
+ using A = typename absl::allocator_traits<Alloc>::template rebind_alloc<M>;
+ using AT = typename absl::allocator_traits<Alloc>::template rebind_traits<M>;
+ A mem_alloc(*alloc);
+ void* p = AT::allocate(mem_alloc, (n + sizeof(M) - 1) / sizeof(M));
+ assert(reinterpret_cast<uintptr_t>(p) % Alignment == 0 &&
+ "allocator does not respect alignment");
+ return p;
+}
+
+// The pointer must have been previously obtained by calling
+// Allocate<Alignment>(alloc, n).
+template <size_t Alignment, class Alloc>
+void Deallocate(Alloc* alloc, void* p, size_t n) {
+ static_assert(Alignment > 0, "");
+ assert(n && "n must be positive");
+ struct alignas(Alignment) M {};
+ using A = typename absl::allocator_traits<Alloc>::template rebind_alloc<M>;
+ using AT = typename absl::allocator_traits<Alloc>::template rebind_traits<M>;
+ A mem_alloc(*alloc);
+ AT::deallocate(mem_alloc, static_cast<M*>(p),
+ (n + sizeof(M) - 1) / sizeof(M));
+}
+
+namespace memory_internal {
+
+// Constructs T into uninitialized storage pointed by `ptr` using the args
+// specified in the tuple.
+template <class Alloc, class T, class Tuple, size_t... I>
+void ConstructFromTupleImpl(Alloc* alloc, T* ptr, Tuple&& t,
+ absl::index_sequence<I...>) {
+ absl::allocator_traits<Alloc>::construct(
+ *alloc, ptr, std::get<I>(std::forward<Tuple>(t))...);
+}
+
+template <class T, class F>
+struct WithConstructedImplF {
+ template <class... Args>
+ decltype(std::declval<F>()(std::declval<T>())) operator()(
+ Args&&... args) const {
+ return std::forward<F>(f)(T(std::forward<Args>(args)...));
+ }
+ F&& f;
+};
+
+template <class T, class Tuple, size_t... Is, class F>
+decltype(std::declval<F>()(std::declval<T>())) WithConstructedImpl(
+ Tuple&& t, absl::index_sequence<Is...>, F&& f) {
+ return WithConstructedImplF<T, F>{std::forward<F>(f)}(
+ std::get<Is>(std::forward<Tuple>(t))...);
+}
+
+template <class T, size_t... Is>
+auto TupleRefImpl(T&& t, absl::index_sequence<Is...>)
+ -> decltype(std::forward_as_tuple(std::get<Is>(std::forward<T>(t))...)) {
+ return std::forward_as_tuple(std::get<Is>(std::forward<T>(t))...);
+}
+
+// Returns a tuple of references to the elements of the input tuple. T must be a
+// tuple.
+template <class T>
+auto TupleRef(T&& t) -> decltype(
+ TupleRefImpl(std::forward<T>(t),
+ absl::make_index_sequence<
+ std::tuple_size<typename std::decay<T>::type>::value>())) {
+ return TupleRefImpl(
+ std::forward<T>(t),
+ absl::make_index_sequence<
+ std::tuple_size<typename std::decay<T>::type>::value>());
+}
+
+template <class F, class K, class V>
+decltype(std::declval<F>()(std::declval<const K&>(), std::piecewise_construct,
+ std::declval<std::tuple<K>>(), std::declval<V>()))
+DecomposePairImpl(F&& f, std::pair<std::tuple<K>, V> p) {
+ const auto& key = std::get<0>(p.first);
+ return std::forward<F>(f)(key, std::piecewise_construct, std::move(p.first),
+ std::move(p.second));
+}
+
+} // namespace memory_internal
+
+// Constructs T into uninitialized storage pointed by `ptr` using the args
+// specified in the tuple.
+template <class Alloc, class T, class Tuple>
+void ConstructFromTuple(Alloc* alloc, T* ptr, Tuple&& t) {
+ memory_internal::ConstructFromTupleImpl(
+ alloc, ptr, std::forward<Tuple>(t),
+ absl::make_index_sequence<
+ std::tuple_size<typename std::decay<Tuple>::type>::value>());
+}
+
+// Constructs T using the args specified in the tuple and calls F with the
+// constructed value.
+template <class T, class Tuple, class F>
+decltype(std::declval<F>()(std::declval<T>())) WithConstructed(
+ Tuple&& t, F&& f) {
+ return memory_internal::WithConstructedImpl<T>(
+ std::forward<Tuple>(t),
+ absl::make_index_sequence<
+ std::tuple_size<typename std::decay<Tuple>::type>::value>(),
+ std::forward<F>(f));
+}
+
+// Given arguments of an std::pair's consructor, PairArgs() returns a pair of
+// tuples with references to the passed arguments. The tuples contain
+// constructor arguments for the first and the second elements of the pair.
+//
+// The following two snippets are equivalent.
+//
+// 1. std::pair<F, S> p(args...);
+//
+// 2. auto a = PairArgs(args...);
+// std::pair<F, S> p(std::piecewise_construct,
+// std::move(p.first), std::move(p.second));
+inline std::pair<std::tuple<>, std::tuple<>> PairArgs() { return {}; }
+template <class F, class S>
+std::pair<std::tuple<F&&>, std::tuple<S&&>> PairArgs(F&& f, S&& s) {
+ return {std::piecewise_construct, std::forward_as_tuple(std::forward<F>(f)),
+ std::forward_as_tuple(std::forward<S>(s))};
+}
+template <class F, class S>
+std::pair<std::tuple<const F&>, std::tuple<const S&>> PairArgs(
+ const std::pair<F, S>& p) {
+ return PairArgs(p.first, p.second);
+}
+template <class F, class S>
+std::pair<std::tuple<F&&>, std::tuple<S&&>> PairArgs(std::pair<F, S>&& p) {
+ return PairArgs(std::forward<F>(p.first), std::forward<S>(p.second));
+}
+template <class F, class S>
+auto PairArgs(std::piecewise_construct_t, F&& f, S&& s)
+ -> decltype(std::make_pair(memory_internal::TupleRef(std::forward<F>(f)),
+ memory_internal::TupleRef(std::forward<S>(s)))) {
+ return std::make_pair(memory_internal::TupleRef(std::forward<F>(f)),
+ memory_internal::TupleRef(std::forward<S>(s)));
+}
+
+// A helper function for implementing apply() in map policies.
+template <class F, class... Args>
+auto DecomposePair(F&& f, Args&&... args)
+ -> decltype(memory_internal::DecomposePairImpl(
+ std::forward<F>(f), PairArgs(std::forward<Args>(args)...))) {
+ return memory_internal::DecomposePairImpl(
+ std::forward<F>(f), PairArgs(std::forward<Args>(args)...));
+}
+
+// A helper function for implementing apply() in set policies.
+template <class F, class Arg>
+decltype(std::declval<F>()(std::declval<const Arg&>(), std::declval<Arg>()))
+DecomposeValue(F&& f, Arg&& arg) {
+ const auto& key = arg;
+ return std::forward<F>(f)(key, std::forward<Arg>(arg));
+}
+
+// Helper functions for asan and msan.
+inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) {
+#ifdef ADDRESS_SANITIZER
+ ASAN_POISON_MEMORY_REGION(m, s);
+#endif
+#ifdef MEMORY_SANITIZER
+ __msan_poison(m, s);
+#endif
+ (void)m;
+ (void)s;
+}
+
+inline void SanitizerUnpoisonMemoryRegion(const void* m, size_t s) {
+#ifdef ADDRESS_SANITIZER
+ ASAN_UNPOISON_MEMORY_REGION(m, s);
+#endif
+#ifdef MEMORY_SANITIZER
+ __msan_unpoison(m, s);
+#endif
+ (void)m;
+ (void)s;
+}
+
+template <typename T>
+inline void SanitizerPoisonObject(const T* object) {
+ SanitizerPoisonMemoryRegion(object, sizeof(T));
+}
+
+template <typename T>
+inline void SanitizerUnpoisonObject(const T* object) {
+ SanitizerUnpoisonMemoryRegion(object, sizeof(T));
+}
+
+namespace memory_internal {
+
+// If Pair is a standard-layout type, OffsetOf<Pair>::kFirst and
+// OffsetOf<Pair>::kSecond are equivalent to offsetof(Pair, first) and
+// offsetof(Pair, second) respectively. Otherwise they are -1.
+//
+// The purpose of OffsetOf is to avoid calling offsetof() on non-standard-layout
+// type, which is non-portable.
+template <class Pair, class = std::true_type>
+struct OffsetOf {
+ static constexpr size_t kFirst = -1;
+ static constexpr size_t kSecond = -1;
+};
+
+template <class Pair>
+struct OffsetOf<Pair, typename std::is_standard_layout<Pair>::type> {
+ static constexpr size_t kFirst = offsetof(Pair, first);
+ static constexpr size_t kSecond = offsetof(Pair, second);
+};
+
+template <class K, class V>
+struct IsLayoutCompatible {
+ private:
+ struct Pair {
+ K first;
+ V second;
+ };
+
+ // Is P layout-compatible with Pair?
+ template <class P>
+ static constexpr bool LayoutCompatible() {
+ return std::is_standard_layout<P>() && sizeof(P) == sizeof(Pair) &&
+ alignof(P) == alignof(Pair) &&
+ memory_internal::OffsetOf<P>::kFirst ==
+ memory_internal::OffsetOf<Pair>::kFirst &&
+ memory_internal::OffsetOf<P>::kSecond ==
+ memory_internal::OffsetOf<Pair>::kSecond;
+ }
+
+ public:
+ // Whether pair<const K, V> and pair<K, V> are layout-compatible. If they are,
+ // then it is safe to store them in a union and read from either.
+ static constexpr bool value = std::is_standard_layout<K>() &&
+ std::is_standard_layout<Pair>() &&
+ memory_internal::OffsetOf<Pair>::kFirst == 0 &&
+ LayoutCompatible<std::pair<K, V>>() &&
+ LayoutCompatible<std::pair<const K, V>>();
+};
+
+} // namespace memory_internal
+
+// If kMutableKeys is false, only the value member is accessed.
+//
+// If kMutableKeys is true, key is accessed through all slots while value and
+// mutable_value are accessed only via INITIALIZED slots. Slots are created and
+// destroyed via mutable_value so that the key can be moved later.
+template <class K, class V>
+union slot_type {
+ private:
+ static void emplace(slot_type* slot) {
+ // The construction of union doesn't do anything at runtime but it allows us
+ // to access its members without violating aliasing rules.
+ new (slot) slot_type;
+ }
+ // If pair<const K, V> and pair<K, V> are layout-compatible, we can accept one
+ // or the other via slot_type. We are also free to access the key via
+ // slot_type::key in this case.
+ using kMutableKeys =
+ std::integral_constant<bool,
+ memory_internal::IsLayoutCompatible<K, V>::value>;
+
+ public:
+ slot_type() {}
+ ~slot_type() = delete;
+ using value_type = std::pair<const K, V>;
+ using mutable_value_type = std::pair<K, V>;
+
+ value_type value;
+ mutable_value_type mutable_value;
+ K key;
+
+ template <class Allocator, class... Args>
+ static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
+ emplace(slot);
+ if (kMutableKeys::value) {
+ absl::allocator_traits<Allocator>::construct(*alloc, &slot->mutable_value,
+ std::forward<Args>(args)...);
+ } else {
+ absl::allocator_traits<Allocator>::construct(*alloc, &slot->value,
+ std::forward<Args>(args)...);
+ }
+ }
+
+ // Construct this slot by moving from another slot.
+ template <class Allocator>
+ static void construct(Allocator* alloc, slot_type* slot, slot_type* other) {
+ emplace(slot);
+ if (kMutableKeys::value) {
+ absl::allocator_traits<Allocator>::construct(
+ *alloc, &slot->mutable_value, std::move(other->mutable_value));
+ } else {
+ absl::allocator_traits<Allocator>::construct(*alloc, &slot->value,
+ std::move(other->value));
+ }
+ }
+
+ template <class Allocator>
+ static void destroy(Allocator* alloc, slot_type* slot) {
+ if (kMutableKeys::value) {
+ absl::allocator_traits<Allocator>::destroy(*alloc, &slot->mutable_value);
+ } else {
+ absl::allocator_traits<Allocator>::destroy(*alloc, &slot->value);
+ }
+ }
+
+ template <class Allocator>
+ static void transfer(Allocator* alloc, slot_type* new_slot,
+ slot_type* old_slot) {
+ emplace(new_slot);
+ if (kMutableKeys::value) {
+ absl::allocator_traits<Allocator>::construct(
+ *alloc, &new_slot->mutable_value, std::move(old_slot->mutable_value));
+ } else {
+ absl::allocator_traits<Allocator>::construct(*alloc, &new_slot->value,
+ std::move(old_slot->value));
+ }
+ destroy(alloc, old_slot);
+ }
+
+ template <class Allocator>
+ static void swap(Allocator* alloc, slot_type* a, slot_type* b) {
+ if (kMutableKeys::value) {
+ using std::swap;
+ swap(a->mutable_value, b->mutable_value);
+ } else {
+ value_type tmp = std::move(a->value);
+ absl::allocator_traits<Allocator>::destroy(*alloc, &a->value);
+ absl::allocator_traits<Allocator>::construct(*alloc, &a->value,
+ std::move(b->value));
+ absl::allocator_traits<Allocator>::destroy(*alloc, &b->value);
+ absl::allocator_traits<Allocator>::construct(*alloc, &b->value,
+ std::move(tmp));
+ }
+ }
+
+ template <class Allocator>
+ static void move(Allocator* alloc, slot_type* src, slot_type* dest) {
+ if (kMutableKeys::value) {
+ dest->mutable_value = std::move(src->mutable_value);
+ } else {
+ absl::allocator_traits<Allocator>::destroy(*alloc, &dest->value);
+ absl::allocator_traits<Allocator>::construct(*alloc, &dest->value,
+ std::move(src->value));
+ }
+ }
+
+ template <class Allocator>
+ static void move(Allocator* alloc, slot_type* first, slot_type* last,
+ slot_type* result) {
+ for (slot_type *src = first, *dest = result; src != last; ++src, ++dest)
+ move(alloc, src, dest);
+ }
+};
+
+} // namespace container_internal
+} // inline namespace lts_2018_12_18
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
diff --git a/absl/container/internal/container_memory_test.cc b/absl/container/internal/container_memory_test.cc
new file mode 100644
index 00000000..da87ca20
--- /dev/null
+++ b/absl/container/internal/container_memory_test.cc
@@ -0,0 +1,190 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/container_memory.h"
+
+#include <cstdint>
+#include <tuple>
+#include <utility>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+inline namespace lts_2018_12_18 {
+namespace container_internal {
+namespace {
+
+using ::testing::Pair;
+
+TEST(Memory, AlignmentLargerThanBase) {
+ std::allocator<int8_t> alloc;
+ void* mem = Allocate<2>(&alloc, 3);
+ EXPECT_EQ(0, reinterpret_cast<uintptr_t>(mem) % 2);
+ memcpy(mem, "abc", 3);
+ Deallocate<2>(&alloc, mem, 3);
+}
+
+TEST(Memory, AlignmentSmallerThanBase) {
+ std::allocator<int64_t> alloc;
+ void* mem = Allocate<2>(&alloc, 3);
+ EXPECT_EQ(0, reinterpret_cast<uintptr_t>(mem) % 2);
+ memcpy(mem, "abc", 3);
+ Deallocate<2>(&alloc, mem, 3);
+}
+
+class Fixture : public ::testing::Test {
+ using Alloc = std::allocator<std::string>;
+
+ public:
+ Fixture() { ptr_ = std::allocator_traits<Alloc>::allocate(*alloc(), 1); }
+ ~Fixture() override {
+ std::allocator_traits<Alloc>::destroy(*alloc(), ptr_);
+ std::allocator_traits<Alloc>::deallocate(*alloc(), ptr_, 1);
+ }
+ std::string* ptr() { return ptr_; }
+ Alloc* alloc() { return &alloc_; }
+
+ private:
+ Alloc alloc_;
+ std::string* ptr_;
+};
+
+TEST_F(Fixture, ConstructNoArgs) {
+ ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple());
+ EXPECT_EQ(*ptr(), "");
+}
+
+TEST_F(Fixture, ConstructOneArg) {
+ ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple("abcde"));
+ EXPECT_EQ(*ptr(), "abcde");
+}
+
+TEST_F(Fixture, ConstructTwoArg) {
+ ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple(5, 'a'));
+ EXPECT_EQ(*ptr(), "aaaaa");
+}
+
+TEST(PairArgs, NoArgs) {
+ EXPECT_THAT(PairArgs(),
+ Pair(std::forward_as_tuple(), std::forward_as_tuple()));
+}
+
+TEST(PairArgs, TwoArgs) {
+ EXPECT_EQ(
+ std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')),
+ PairArgs(1, 'A'));
+}
+
+TEST(PairArgs, Pair) {
+ EXPECT_EQ(
+ std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')),
+ PairArgs(std::make_pair(1, 'A')));
+}
+
+TEST(PairArgs, Piecewise) {
+ EXPECT_EQ(
+ std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')),
+ PairArgs(std::piecewise_construct, std::forward_as_tuple(1),
+ std::forward_as_tuple('A')));
+}
+
+TEST(WithConstructed, Simple) {
+ EXPECT_EQ(1, WithConstructed<absl::string_view>(
+ std::make_tuple(std::string("a")),
+ [](absl::string_view str) { return str.size(); }));
+}
+
+template <class F, class Arg>
+decltype(DecomposeValue(std::declval<F>(), std::declval<Arg>()))
+DecomposeValueImpl(int, F&& f, Arg&& arg) {
+ return DecomposeValue(std::forward<F>(f), std::forward<Arg>(arg));
+}
+
+template <class F, class Arg>
+const char* DecomposeValueImpl(char, F&& f, Arg&& arg) {
+ return "not decomposable";
+}
+
+template <class F, class Arg>
+decltype(DecomposeValueImpl(0, std::declval<F>(), std::declval<Arg>()))
+TryDecomposeValue(F&& f, Arg&& arg) {
+ return DecomposeValueImpl(0, std::forward<F>(f), std::forward<Arg>(arg));
+}
+
+TEST(DecomposeValue, Decomposable) {
+ auto f = [](const int& x, int&& y) {
+ EXPECT_EQ(&x, &y);
+ EXPECT_EQ(42, x);
+ return 'A';
+ };
+ EXPECT_EQ('A', TryDecomposeValue(f, 42));
+}
+
+TEST(DecomposeValue, NotDecomposable) {
+ auto f = [](void*) {
+ ADD_FAILURE() << "Must not be called";
+ return 'A';
+ };
+ EXPECT_STREQ("not decomposable", TryDecomposeValue(f, 42));
+}
+
+template <class F, class... Args>
+decltype(DecomposePair(std::declval<F>(), std::declval<Args>()...))
+DecomposePairImpl(int, F&& f, Args&&... args) {
+ return DecomposePair(std::forward<F>(f), std::forward<Args>(args)...);
+}
+
+template <class F, class... Args>
+const char* DecomposePairImpl(char, F&& f, Args&&... args) {
+ return "not decomposable";
+}
+
+template <class F, class... Args>
+decltype(DecomposePairImpl(0, std::declval<F>(), std::declval<Args>()...))
+TryDecomposePair(F&& f, Args&&... args) {
+ return DecomposePairImpl(0, std::forward<F>(f), std::forward<Args>(args)...);
+}
+
+TEST(DecomposePair, Decomposable) {
+ auto f = [](const int& x, std::piecewise_construct_t, std::tuple<int&&> k,
+ std::tuple<double>&& v) {
+ EXPECT_EQ(&x, &std::get<0>(k));
+ EXPECT_EQ(42, x);
+ EXPECT_EQ(0.5, std::get<0>(v));
+ return 'A';
+ };
+ EXPECT_EQ('A', TryDecomposePair(f, 42, 0.5));
+ EXPECT_EQ('A', TryDecomposePair(f, std::make_pair(42, 0.5)));
+ EXPECT_EQ('A', TryDecomposePair(f, std::piecewise_construct,
+ std::make_tuple(42), std::make_tuple(0.5)));
+}
+
+TEST(DecomposePair, NotDecomposable) {
+ auto f = [](...) {
+ ADD_FAILURE() << "Must not be called";
+ return 'A';
+ };
+ EXPECT_STREQ("not decomposable",
+ TryDecomposePair(f));
+ EXPECT_STREQ("not decomposable",
+ TryDecomposePair(f, std::piecewise_construct, std::make_tuple(),
+ std::make_tuple(0.5)));
+}
+
+} // namespace
+} // namespace container_internal
+} // inline namespace lts_2018_12_18
+} // namespace absl
diff --git a/absl/container/internal/hash_function_defaults.h b/absl/container/internal/hash_function_defaults.h
new file mode 100644
index 00000000..72c75fa0
--- /dev/null
+++ b/absl/container/internal/hash_function_defaults.h
@@ -0,0 +1,145 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Define the default Hash and Eq functions for SwissTable containers.
+//
+// std::hash<T> and std::equal_to<T> are not appropriate hash and equal
+// functions for SwissTable containers. There are two reasons for this.
+//
+// SwissTable containers are power of 2 sized containers:
+//
+// This means they use the lower bits of the hash value to find the slot for
+// each entry. The typical hash function for integral types is the identity.
+// This is a very weak hash function for SwissTable and any power of 2 sized
+// hashtable implementation which will lead to excessive collisions. For
+// SwissTable we use murmur3 style mixing to reduce collisions to a minimum.
+//
+// SwissTable containers support heterogeneous lookup:
+//
+// In order to make heterogeneous lookup work, hash and equal functions must be
+// polymorphic. At the same time they have to satisfy the same requirements the
+// C++ standard imposes on hash functions and equality operators. That is:
+//
+// if hash_default_eq<T>(a, b) returns true for any a and b of type T, then
+// hash_default_hash<T>(a) must equal hash_default_hash<T>(b)
+//
+// For SwissTable containers this requirement is relaxed to allow a and b of
+// any and possibly different types. Note that like the standard the hash and
+// equal functions are still bound to T. This is important because some type U
+// can be hashed by/tested for equality differently depending on T. A notable
+// example is `const char*`. `const char*` is treated as a c-style string when
+// the hash function is hash<string> but as a pointer when the hash function is
+// hash<void*>.
+//
+#ifndef ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_
+#define ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_
+
+#include <stdint.h>
+#include <cstddef>
+#include <memory>
+#include <string>
+#include <type_traits>
+
+#include "absl/base/config.h"
+#include "absl/hash/hash.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+inline namespace lts_2018_12_18 {
+namespace container_internal {
+
+// The hash of an object of type T is computed by using absl::Hash.
+template <class T, class E = void>
+struct HashEq {
+ using Hash = absl::Hash<T>;
+ using Eq = std::equal_to<T>;
+};
+
+struct StringHash {
+ using is_transparent = void;
+
+ size_t operator()(absl::string_view v) const {
+ return absl::Hash<absl::string_view>{}(v);
+ }
+};
+
+// Supports heterogeneous lookup for string-like elements.
+struct StringHashEq {
+ using Hash = StringHash;
+ struct Eq {
+ using is_transparent = void;
+ bool operator()(absl::string_view lhs, absl::string_view rhs) const {
+ return lhs == rhs;
+ }
+ };
+};
+template <>
+struct HashEq<std::string> : StringHashEq {};
+template <>
+struct HashEq<absl::string_view> : StringHashEq {};
+
+// Supports heterogeneous lookup for pointers and smart pointers.
+template <class T>
+struct HashEq<T*> {
+ struct Hash {
+ using is_transparent = void;
+ template <class U>
+ size_t operator()(const U& ptr) const {
+ return absl::Hash<const T*>{}(HashEq::ToPtr(ptr));
+ }
+ };
+ struct Eq {
+ using is_transparent = void;
+ template <class A, class B>
+ bool operator()(const A& a, const B& b) const {
+ return HashEq::ToPtr(a) == HashEq::ToPtr(b);
+ }
+ };
+
+ private:
+ static const T* ToPtr(const T* ptr) { return ptr; }
+ template <class U, class D>
+ static const T* ToPtr(const std::unique_ptr<U, D>& ptr) {
+ return ptr.get();
+ }
+ template <class U>
+ static const T* ToPtr(const std::shared_ptr<U>& ptr) {
+ return ptr.get();
+ }
+};
+
+template <class T, class D>
+struct HashEq<std::unique_ptr<T, D>> : HashEq<T*> {};
+template <class T>
+struct HashEq<std::shared_ptr<T>> : HashEq<T*> {};
+
+// This header's visibility is restricted. If you need to access the default
+// hasher please use the container's ::hasher alias instead.
+//
+// Example: typename Hash = typename absl::flat_hash_map<K, V>::hasher
+template <class T>
+using hash_default_hash = typename container_internal::HashEq<T>::Hash;
+
+// This header's visibility is restricted. If you need to access the default
+// key equal please use the container's ::key_equal alias instead.
+//
+// Example: typename Eq = typename absl::flat_hash_map<K, V, Hash>::key_equal
+template <class T>
+using hash_default_eq = typename container_internal::HashEq<T>::Eq;
+
+} // namespace container_internal
+} // inline namespace lts_2018_12_18
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_
diff --git a/absl/container/internal/hash_function_defaults_test.cc b/absl/container/internal/hash_function_defaults_test.cc
new file mode 100644
index 00000000..4610843a
--- /dev/null
+++ b/absl/container/internal/hash_function_defaults_test.cc
@@ -0,0 +1,303 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/hash_function_defaults.h"
+
+#include <functional>
+#include <type_traits>
+#include <utility>
+
+#include "gtest/gtest.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+inline namespace lts_2018_12_18 {
+namespace container_internal {
+namespace {
+
+using ::testing::Types;
+
+TEST(Eq, Int32) {
+ hash_default_eq<int32_t> eq;
+ EXPECT_TRUE(eq(1, 1u));
+ EXPECT_TRUE(eq(1, char{1}));
+ EXPECT_TRUE(eq(1, true));
+ EXPECT_TRUE(eq(1, double{1.1}));
+ EXPECT_FALSE(eq(1, char{2}));
+ EXPECT_FALSE(eq(1, 2u));
+ EXPECT_FALSE(eq(1, false));
+ EXPECT_FALSE(eq(1, 2.));
+}
+
+TEST(Hash, Int32) {
+ hash_default_hash<int32_t> hash;
+ auto h = hash(1);
+ EXPECT_EQ(h, hash(1u));
+ EXPECT_EQ(h, hash(char{1}));
+ EXPECT_EQ(h, hash(true));
+ EXPECT_EQ(h, hash(double{1.1}));
+ EXPECT_NE(h, hash(2u));
+ EXPECT_NE(h, hash(char{2}));
+ EXPECT_NE(h, hash(false));
+ EXPECT_NE(h, hash(2.));
+}
+
+enum class MyEnum { A, B, C, D };
+
+TEST(Eq, Enum) {
+ hash_default_eq<MyEnum> eq;
+ EXPECT_TRUE(eq(MyEnum::A, MyEnum::A));
+ EXPECT_FALSE(eq(MyEnum::A, MyEnum::B));
+}
+
+TEST(Hash, Enum) {
+ hash_default_hash<MyEnum> hash;
+
+ for (MyEnum e : {MyEnum::A, MyEnum::B, MyEnum::C}) {
+ auto h = hash(e);
+ EXPECT_EQ(h, hash_default_hash<int>{}(static_cast<int>(e)));
+ EXPECT_NE(h, hash(MyEnum::D));
+ }
+}
+
+using StringTypes = ::testing::Types<std::string, absl::string_view>;
+
+template <class T>
+struct EqString : ::testing::Test {
+ hash_default_eq<T> key_eq;
+};
+
+TYPED_TEST_CASE(EqString, StringTypes);
+
+template <class T>
+struct HashString : ::testing::Test {
+ hash_default_hash<T> hasher;
+};
+
+TYPED_TEST_CASE(HashString, StringTypes);
+
+TYPED_TEST(EqString, Works) {
+ auto eq = this->key_eq;
+ EXPECT_TRUE(eq("a", "a"));
+ EXPECT_TRUE(eq("a", absl::string_view("a")));
+ EXPECT_TRUE(eq("a", std::string("a")));
+ EXPECT_FALSE(eq("a", "b"));
+ EXPECT_FALSE(eq("a", absl::string_view("b")));
+ EXPECT_FALSE(eq("a", std::string("b")));
+}
+
+TYPED_TEST(HashString, Works) {
+ auto hash = this->hasher;
+ auto h = hash("a");
+ EXPECT_EQ(h, hash(absl::string_view("a")));
+ EXPECT_EQ(h, hash(std::string("a")));
+ EXPECT_NE(h, hash(absl::string_view("b")));
+ EXPECT_NE(h, hash(std::string("b")));
+}
+
+struct NoDeleter {
+ template <class T>
+ void operator()(const T* ptr) const {}
+};
+
+using PointerTypes =
+ ::testing::Types<const int*, int*, std::unique_ptr<const int>,
+ std::unique_ptr<const int, NoDeleter>,
+ std::unique_ptr<int>, std::unique_ptr<int, NoDeleter>,
+ std::shared_ptr<const int>, std::shared_ptr<int>>;
+
+template <class T>
+struct EqPointer : ::testing::Test {
+ hash_default_eq<T> key_eq;
+};
+
+TYPED_TEST_CASE(EqPointer, PointerTypes);
+
+template <class T>
+struct HashPointer : ::testing::Test {
+ hash_default_hash<T> hasher;
+};
+
+TYPED_TEST_CASE(HashPointer, PointerTypes);
+
+TYPED_TEST(EqPointer, Works) {
+ int dummy;
+ auto eq = this->key_eq;
+ auto sptr = std::make_shared<int>();
+ std::shared_ptr<const int> csptr = sptr;
+ int* ptr = sptr.get();
+ const int* cptr = ptr;
+ std::unique_ptr<int, NoDeleter> uptr(ptr);
+ std::unique_ptr<const int, NoDeleter> cuptr(ptr);
+
+ EXPECT_TRUE(eq(ptr, cptr));
+ EXPECT_TRUE(eq(ptr, sptr));
+ EXPECT_TRUE(eq(ptr, uptr));
+ EXPECT_TRUE(eq(ptr, csptr));
+ EXPECT_TRUE(eq(ptr, cuptr));
+ EXPECT_FALSE(eq(&dummy, cptr));
+ EXPECT_FALSE(eq(&dummy, sptr));
+ EXPECT_FALSE(eq(&dummy, uptr));
+ EXPECT_FALSE(eq(&dummy, csptr));
+ EXPECT_FALSE(eq(&dummy, cuptr));
+}
+
+TEST(Hash, DerivedAndBase) {
+ struct Base {};
+ struct Derived : Base {};
+
+ hash_default_hash<Base*> hasher;
+
+ Base base;
+ Derived derived;
+ EXPECT_NE(hasher(&base), hasher(&derived));
+ EXPECT_EQ(hasher(static_cast<Base*>(&derived)), hasher(&derived));
+
+ auto dp = std::make_shared<Derived>();
+ EXPECT_EQ(hasher(static_cast<Base*>(dp.get())), hasher(dp));
+}
+
+TEST(Hash, FunctionPointer) {
+ using Func = int (*)();
+ hash_default_hash<Func> hasher;
+ hash_default_eq<Func> eq;
+
+ Func p1 = [] { return 1; }, p2 = [] { return 2; };
+ EXPECT_EQ(hasher(p1), hasher(p1));
+ EXPECT_TRUE(eq(p1, p1));
+
+ EXPECT_NE(hasher(p1), hasher(p2));
+ EXPECT_FALSE(eq(p1, p2));
+}
+
+TYPED_TEST(HashPointer, Works) {
+ int dummy;
+ auto hash = this->hasher;
+ auto sptr = std::make_shared<int>();
+ std::shared_ptr<const int> csptr = sptr;
+ int* ptr = sptr.get();
+ const int* cptr = ptr;
+ std::unique_ptr<int, NoDeleter> uptr(ptr);
+ std::unique_ptr<const int, NoDeleter> cuptr(ptr);
+
+ EXPECT_EQ(hash(ptr), hash(cptr));
+ EXPECT_EQ(hash(ptr), hash(sptr));
+ EXPECT_EQ(hash(ptr), hash(uptr));
+ EXPECT_EQ(hash(ptr), hash(csptr));
+ EXPECT_EQ(hash(ptr), hash(cuptr));
+ EXPECT_NE(hash(&dummy), hash(cptr));
+ EXPECT_NE(hash(&dummy), hash(sptr));
+ EXPECT_NE(hash(&dummy), hash(uptr));
+ EXPECT_NE(hash(&dummy), hash(csptr));
+ EXPECT_NE(hash(&dummy), hash(cuptr));
+}
+
+// Cartesian product of (string, std::string, absl::string_view)
+// with (string, std::string, absl::string_view, const char*).
+using StringTypesCartesianProduct = Types<
+ // clang-format off
+
+ std::pair<std::string, std::string>,
+ std::pair<std::string, absl::string_view>,
+ std::pair<std::string, const char*>,
+
+ std::pair<absl::string_view, std::string>,
+ std::pair<absl::string_view, absl::string_view>,
+ std::pair<absl::string_view, const char*>>;
+// clang-format on
+
+constexpr char kFirstString[] = "abc123";
+constexpr char kSecondString[] = "ijk456";
+
+template <typename T>
+struct StringLikeTest : public ::testing::Test {
+ typename T::first_type a1{kFirstString};
+ typename T::second_type b1{kFirstString};
+ typename T::first_type a2{kSecondString};
+ typename T::second_type b2{kSecondString};
+ hash_default_eq<typename T::first_type> eq;
+ hash_default_hash<typename T::first_type> hash;
+};
+
+TYPED_TEST_CASE_P(StringLikeTest);
+
+TYPED_TEST_P(StringLikeTest, Eq) {
+ EXPECT_TRUE(this->eq(this->a1, this->b1));
+ EXPECT_TRUE(this->eq(this->b1, this->a1));
+}
+
+TYPED_TEST_P(StringLikeTest, NotEq) {
+ EXPECT_FALSE(this->eq(this->a1, this->b2));
+ EXPECT_FALSE(this->eq(this->b2, this->a1));
+}
+
+TYPED_TEST_P(StringLikeTest, HashEq) {
+ EXPECT_EQ(this->hash(this->a1), this->hash(this->b1));
+ EXPECT_EQ(this->hash(this->a2), this->hash(this->b2));
+ // It would be a poor hash function which collides on these strings.
+ EXPECT_NE(this->hash(this->a1), this->hash(this->b2));
+}
+
+TYPED_TEST_CASE(StringLikeTest, StringTypesCartesianProduct);
+
+} // namespace
+} // namespace container_internal
+} // inline namespace lts_2018_12_18
+} // namespace absl
+
+enum Hash : size_t {
+ kStd = 0x2, // std::hash
+#ifdef _MSC_VER
+ kExtension = kStd, // In MSVC, std::hash == ::hash
+#else // _MSC_VER
+ kExtension = 0x4, // ::hash (GCC extension)
+#endif // _MSC_VER
+};
+
+// H is a bitmask of Hash enumerations.
+// Hashable<H> is hashable via all means specified in H.
+template <int H>
+struct Hashable {
+ static constexpr bool HashableBy(Hash h) { return h & H; }
+};
+
+namespace std {
+template <int H>
+struct hash<Hashable<H>> {
+ template <class E = Hashable<H>,
+ class = typename std::enable_if<E::HashableBy(kStd)>::type>
+ size_t operator()(E) const {
+ return kStd;
+ }
+};
+} // namespace std
+
+namespace absl {
+inline namespace lts_2018_12_18 {
+namespace container_internal {
+namespace {
+
+template <class T>
+size_t Hash(const T& v) {
+ return hash_default_hash<T>()(v);
+}
+
+TEST(Delegate, HashDispatch) {
+ EXPECT_EQ(Hash(kStd), Hash(Hashable<kStd>()));
+}
+
+} // namespace
+} // namespace container_internal
+} // inline namespace lts_2018_12_18
+} // namespace absl
diff --git a/absl/container/internal/hash_generator_testing.cc b/absl/container/internal/hash_generator_testing.cc
new file mode 100644
index 00000000..aef41d72
--- /dev/null
+++ b/absl/container/internal/hash_generator_testing.cc
@@ -0,0 +1,74 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/hash_generator_testing.h"
+
+#include <deque>
+
+namespace absl {
+inline namespace lts_2018_12_18 {
+namespace container_internal {
+namespace hash_internal {
+namespace {
+
+class RandomDeviceSeedSeq {
+ public:
+ using result_type = typename std::random_device::result_type;
+
+ template <class Iterator>
+ void generate(Iterator start, Iterator end) {
+ while (start != end) {
+ *start = gen_();
+ ++start;
+ }
+ }
+
+ private:
+ std::random_device gen_;
+};
+
+} // namespace
+
+std::mt19937_64* GetSharedRng() {
+ RandomDeviceSeedSeq seed_seq;
+ static auto* rng = new std::mt19937_64(seed_seq);
+ return rng;
+}
+
+std::string Generator<std::string>::operator()() const {
+ // NOLINTNEXTLINE(runtime/int)
+ std::uniform_int_distribution<short> chars(0x20, 0x7E);
+ std::string res;
+ res.resize(32);
+ std::generate(res.begin(), res.end(),
+ [&]() { return chars(*GetSharedRng()); });
+ return res;
+}
+
+absl::string_view Generator<absl::string_view>::operator()() const {
+ static auto* arena = new std::deque<std::string>();
+ // NOLINTNEXTLINE(runtime/int)
+ std::uniform_int_distribution<short> chars(0x20, 0x7E);
+ arena->emplace_back();
+ auto& res = arena->back();
+ res.resize(32);
+ std::generate(res.begin(), res.end(),
+ [&]() { return chars(*GetSharedRng()); });
+ return res;
+}
+
+} // namespace hash_internal
+} // namespace container_internal
+} // inline namespace lts_2018_12_18
+} // namespace absl
diff --git a/absl/container/internal/hash_generator_testing.h b/absl/container/internal/hash_generator_testing.h
new file mode 100644
index 00000000..65e88964
--- /dev/null
+++ b/absl/container/internal/hash_generator_testing.h
@@ -0,0 +1,152 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Generates random values for testing. Specialized only for the few types we
+// care about.
+
+#ifndef ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_
+#define ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_
+
+#include <stdint.h>
+#include <algorithm>
+#include <iosfwd>
+#include <random>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "absl/container/internal/hash_policy_testing.h"
+#include "absl/meta/type_traits.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+inline namespace lts_2018_12_18 {
+namespace container_internal {
+namespace hash_internal {
+namespace generator_internal {
+
+template <class Container, class = void>
+struct IsMap : std::false_type {};
+
+template <class Map>
+struct IsMap<Map, absl::void_t<typename Map::mapped_type>> : std::true_type {};
+
+} // namespace generator_internal
+
+std::mt19937_64* GetSharedRng();
+
+enum Enum {
+ kEnumEmpty,
+ kEnumDeleted,
+};
+
+enum class EnumClass : uint64_t {
+ kEmpty,
+ kDeleted,
+};
+
+inline std::ostream& operator<<(std::ostream& o, const EnumClass& ec) {
+ return o << static_cast<uint64_t>(ec);
+}
+
+template <class T, class E = void>
+struct Generator;
+
+template <class T>
+struct Generator<T, typename std::enable_if<std::is_integral<T>::value>::type> {
+ T operator()() const {
+ std::uniform_int_distribution<T> dist;
+ return dist(*GetSharedRng());
+ }
+};
+
+template <>
+struct Generator<Enum> {
+ Enum operator()() const {
+ std::uniform_int_distribution<typename std::underlying_type<Enum>::type>
+ dist;
+ while (true) {
+ auto variate = dist(*GetSharedRng());
+ if (variate != kEnumEmpty && variate != kEnumDeleted)
+ return static_cast<Enum>(variate);
+ }
+ }
+};
+
+template <>
+struct Generator<EnumClass> {
+ EnumClass operator()() const {
+ std::uniform_int_distribution<
+ typename std::underlying_type<EnumClass>::type>
+ dist;
+ while (true) {
+ EnumClass variate = static_cast<EnumClass>(dist(*GetSharedRng()));
+ if (variate != EnumClass::kEmpty && variate != EnumClass::kDeleted)
+ return static_cast<EnumClass>(variate);
+ }
+ }
+};
+
+template <>
+struct Generator<std::string> {
+ std::string operator()() const;
+};
+
+template <>
+struct Generator<absl::string_view> {
+ absl::string_view operator()() const;
+};
+
+template <>
+struct Generator<NonStandardLayout> {
+ NonStandardLayout operator()() const {
+ return NonStandardLayout(Generator<std::string>()());
+ }
+};
+
+template <class K, class V>
+struct Generator<std::pair<K, V>> {
+ std::pair<K, V> operator()() const {
+ return std::pair<K, V>(Generator<typename std::decay<K>::type>()(),
+ Generator<typename std::decay<V>::type>()());
+ }
+};
+
+template <class... Ts>
+struct Generator<std::tuple<Ts...>> {
+ std::tuple<Ts...> operator()() const {
+ return std::tuple<Ts...>(Generator<typename std::decay<Ts>::type>()()...);
+ }
+};
+
+template <class U>
+struct Generator<U, absl::void_t<decltype(std::declval<U&>().key()),
+ decltype(std::declval<U&>().value())>>
+ : Generator<std::pair<
+ typename std::decay<decltype(std::declval<U&>().key())>::type,
+ typename std::decay<decltype(std::declval<U&>().value())>::type>> {};
+
+template <class Container>
+using GeneratedType = decltype(
+ std::declval<const Generator<
+ typename std::conditional<generator_internal::IsMap<Container>::value,
+ typename Container::value_type,
+ typename Container::key_type>::type>&>()());
+
+} // namespace hash_internal
+} // namespace container_internal
+} // inline namespace lts_2018_12_18
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_
diff --git a/absl/container/internal/hash_policy_testing.h b/absl/container/internal/hash_policy_testing.h
new file mode 100644
index 00000000..9c310ad4
--- /dev/null
+++ b/absl/container/internal/hash_policy_testing.h
@@ -0,0 +1,184 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Utilities to help tests verify that hash tables properly handle stateful
+// allocators and hash functions.
+
+#ifndef ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_
+#define ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_
+
+#include <cstdlib>
+#include <limits>
+#include <memory>
+#include <ostream>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include "absl/hash/hash.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+inline namespace lts_2018_12_18 {
+namespace container_internal {
+namespace hash_testing_internal {
+
+template <class Derived>
+struct WithId {
+ WithId() : id_(next_id<Derived>()) {}
+ WithId(const WithId& that) : id_(that.id_) {}
+ WithId(WithId&& that) : id_(that.id_) { that.id_ = 0; }
+ WithId& operator=(const WithId& that) {
+ id_ = that.id_;
+ return *this;
+ }
+ WithId& operator=(WithId&& that) {
+ id_ = that.id_;
+ that.id_ = 0;
+ return *this;
+ }
+
+ size_t id() const { return id_; }
+
+ friend bool operator==(const WithId& a, const WithId& b) {
+ return a.id_ == b.id_;
+ }
+ friend bool operator!=(const WithId& a, const WithId& b) { return !(a == b); }
+
+ protected:
+ explicit WithId(size_t id) : id_(id) {}
+
+ private:
+ size_t id_;
+
+ template <class T>
+ static size_t next_id() {
+ // 0 is reserved for moved from state.
+ static size_t gId = 1;
+ return gId++;
+ }
+};
+
+} // namespace hash_testing_internal
+
+struct NonStandardLayout {
+ NonStandardLayout() {}
+ explicit NonStandardLayout(std::string s) : value(std::move(s)) {}
+ virtual ~NonStandardLayout() {}
+
+ friend bool operator==(const NonStandardLayout& a,
+ const NonStandardLayout& b) {
+ return a.value == b.value;
+ }
+ friend bool operator!=(const NonStandardLayout& a,
+ const NonStandardLayout& b) {
+ return a.value != b.value;
+ }
+
+ template <typename H>
+ friend H AbslHashValue(H h, const NonStandardLayout& v) {
+ return H::combine(std::move(h), v.value);
+ }
+
+ std::string value;
+};
+
+struct StatefulTestingHash
+ : absl::container_internal::hash_testing_internal::WithId<
+ StatefulTestingHash> {
+ template <class T>
+ size_t operator()(const T& t) const {
+ return absl::Hash<T>{}(t);
+ }
+};
+
+struct StatefulTestingEqual
+ : absl::container_internal::hash_testing_internal::WithId<
+ StatefulTestingEqual> {
+ template <class T, class U>
+ bool operator()(const T& t, const U& u) const {
+ return t == u;
+ }
+};
+
+// It is expected that Alloc() == Alloc() for all allocators so we cannot use
+// WithId base. We need to explicitly assign ids.
+template <class T = int>
+struct Alloc : std::allocator<T> {
+ using propagate_on_container_swap = std::true_type;
+
+ // Using old paradigm for this to ensure compatibility.
+ explicit Alloc(size_t id = 0) : id_(id) {}
+
+ Alloc(const Alloc&) = default;
+ Alloc& operator=(const Alloc&) = default;
+
+ template <class U>
+ Alloc(const Alloc<U>& that) : std::allocator<T>(that), id_(that.id()) {}
+
+ template <class U>
+ struct rebind {
+ using other = Alloc<U>;
+ };
+
+ size_t id() const { return id_; }
+
+ friend bool operator==(const Alloc& a, const Alloc& b) {
+ return a.id_ == b.id_;
+ }
+ friend bool operator!=(const Alloc& a, const Alloc& b) { return !(a == b); }
+
+ private:
+ size_t id_ = (std::numeric_limits<size_t>::max)();
+};
+
+template <class Map>
+auto items(const Map& m) -> std::vector<
+ std::pair<typename Map::key_type, typename Map::mapped_type>> {
+ using std::get;
+ std::vector<std::pair<typename Map::key_type, typename Map::mapped_type>> res;
+ res.reserve(m.size());
+ for (const auto& v : m) res.emplace_back(get<0>(v), get<1>(v));
+ return res;
+}
+
+template <class Set>
+auto keys(const Set& s)
+ -> std::vector<typename std::decay<typename Set::key_type>::type> {
+ std::vector<typename std::decay<typename Set::key_type>::type> res;
+ res.reserve(s.size());
+ for (const auto& v : s) res.emplace_back(v);
+ return res;
+}
+
+} // namespace container_internal
+} // inline namespace lts_2018_12_18
+} // namespace absl
+
+// ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS is false for glibcxx versions
+// where the unordered containers are missing certain constructors that
+// take allocator arguments. This test is defined ad-hoc for the platforms
+// we care about (notably Crosstool 17) because libstdcxx's useless
+// versioning scheme precludes a more principled solution.
+// From GCC-4.9 Changelog: (src: https://gcc.gnu.org/gcc-4.9/changes.html)
+// "the unordered associative containers in <unordered_map> and <unordered_set>
+// meet the allocator-aware container requirements;"
+#if (defined(__GLIBCXX__) && __GLIBCXX__ <= 20140425 ) || \
+( __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 9 ))
+#define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 0
+#else
+#define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 1
+#endif
+
+#endif // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_
diff --git a/absl/container/internal/hash_policy_testing_test.cc b/absl/container/internal/hash_policy_testing_test.cc
new file mode 100644
index 00000000..00c436b3
--- /dev/null
+++ b/absl/container/internal/hash_policy_testing_test.cc
@@ -0,0 +1,45 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/hash_policy_testing.h"
+
+#include "gtest/gtest.h"
+
+namespace absl {
+inline namespace lts_2018_12_18 {
+namespace container_internal {
+namespace {
+
+TEST(_, Hash) {
+ StatefulTestingHash h1;
+ EXPECT_EQ(1, h1.id());
+ StatefulTestingHash h2;
+ EXPECT_EQ(2, h2.id());
+ StatefulTestingHash h1c(h1);
+ EXPECT_EQ(1, h1c.id());
+ StatefulTestingHash h2m(std::move(h2));
+ EXPECT_EQ(2, h2m.id());
+ EXPECT_EQ(0, h2.id());
+ StatefulTestingHash h3;
+ EXPECT_EQ(3, h3.id());
+ h3 = StatefulTestingHash();
+ EXPECT_EQ(4, h3.id());
+ h3 = std::move(h1);
+ EXPECT_EQ(1, h3.id());
+}
+
+} // namespace
+} // namespace container_internal
+} // inline namespace lts_2018_12_18
+} // namespace absl
diff --git a/absl/container/internal/hash_policy_traits.h b/absl/container/internal/hash_policy_traits.h
new file mode 100644
index 00000000..41e26212
--- /dev/null
+++ b/absl/container/internal/hash_policy_traits.h
@@ -0,0 +1,191 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_
+#define ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_
+
+#include <cstddef>
+#include <memory>
+#include <type_traits>
+#include <utility>
+
+#include "absl/meta/type_traits.h"
+
+namespace absl {
+inline namespace lts_2018_12_18 {
+namespace container_internal {
+
+// Defines how slots are initialized/destroyed/moved.
+template <class Policy, class = void>
+struct hash_policy_traits {
+ private:
+ struct ReturnKey {
+ // We return `Key` here.
+ // When Key=T&, we forward the lvalue reference.
+ // When Key=T, we return by value to avoid a dangling reference.
+ // eg, for string_hash_map.
+ template <class Key, class... Args>
+ Key operator()(Key&& k, const Args&...) const {
+ return std::forward<Key>(k);
+ }
+ };
+
+ template <class P = Policy, class = void>
+ struct ConstantIteratorsImpl : std::false_type {};
+
+ template <class P>
+ struct ConstantIteratorsImpl<P, absl::void_t<typename P::constant_iterators>>
+ : P::constant_iterators {};
+
+ public:
+ // The actual object stored in the hash table.
+ using slot_type = typename Policy::slot_type;
+
+ // The type of the keys stored in the hashtable.
+ using key_type = typename Policy::key_type;
+
+ // The argument type for insertions into the hashtable. This is different
+ // from value_type for increased performance. See initializer_list constructor
+ // and insert() member functions for more details.
+ using init_type = typename Policy::init_type;
+
+ using reference = decltype(Policy::element(std::declval<slot_type*>()));
+ using pointer = typename std::remove_reference<reference>::type*;
+ using value_type = typename std::remove_reference<reference>::type;
+
+ // Policies can set this variable to tell raw_hash_set that all iterators
+ // should be constant, even `iterator`. This is useful for set-like
+ // containers.
+ // Defaults to false if not provided by the policy.
+ using constant_iterators = ConstantIteratorsImpl<>;
+
+ // PRECONDITION: `slot` is UNINITIALIZED
+ // POSTCONDITION: `slot` is INITIALIZED
+ template <class Alloc, class... Args>
+ static void construct(Alloc* alloc, slot_type* slot, Args&&... args) {
+ Policy::construct(alloc, slot, std::forward<Args>(args)...);
+ }
+
+ // PRECONDITION: `slot` is INITIALIZED
+ // POSTCONDITION: `slot` is UNINITIALIZED
+ template <class Alloc>
+ static void destroy(Alloc* alloc, slot_type* slot) {
+ Policy::destroy(alloc, slot);
+ }
+
+ // Transfers the `old_slot` to `new_slot`. Any memory allocated by the
+ // allocator inside `old_slot` to `new_slot` can be transferred.
+ //
+ // OPTIONAL: defaults to:
+ //
+ // clone(new_slot, std::move(*old_slot));
+ // destroy(old_slot);
+ //
+ // PRECONDITION: `new_slot` is UNINITIALIZED and `old_slot` is INITIALIZED
+ // POSTCONDITION: `new_slot` is INITIALIZED and `old_slot` is
+ // UNINITIALIZED
+ template <class Alloc>
+ static void transfer(Alloc* alloc, slot_type* new_slot, slot_type* old_slot) {
+ transfer_impl(alloc, new_slot, old_slot, 0);
+ }
+
+ // PRECONDITION: `slot` is INITIALIZED
+ // POSTCONDITION: `slot` is INITIALIZED
+ template <class P = Policy>
+ static auto element(slot_type* slot) -> decltype(P::element(slot)) {
+ return P::element(slot);
+ }
+
+ // Returns the amount of memory owned by `slot`, exclusive of `sizeof(*slot)`.
+ //
+ // If `slot` is nullptr, returns the constant amount of memory owned by any
+ // full slot or -1 if slots own variable amounts of memory.
+ //
+ // PRECONDITION: `slot` is INITIALIZED or nullptr
+ template <class P = Policy>
+ static size_t space_used(const slot_type* slot) {
+ return P::space_used(slot);
+ }
+
+ // Provides generalized access to the key for elements, both for elements in
+ // the table and for elements that have not yet been inserted (or even
+ // constructed). We would like an API that allows us to say: `key(args...)`
+ // but we cannot do that for all cases, so we use this more general API that
+ // can be used for many things, including the following:
+ //
+ // - Given an element in a table, get its key.
+ // - Given an element initializer, get its key.
+ // - Given `emplace()` arguments, get the element key.
+ //
+ // Implementations of this must adhere to a very strict technical
+ // specification around aliasing and consuming arguments:
+ //
+ // Let `value_type` be the result type of `element()` without ref- and
+ // cv-qualifiers. The first argument is a functor, the rest are constructor
+ // arguments for `value_type`. Returns `std::forward<F>(f)(k, xs...)`, where
+ // `k` is the element key, and `xs...` are the new constructor arguments for
+ // `value_type`. It's allowed for `k` to alias `xs...`, and for both to alias
+ // `ts...`. The key won't be touched once `xs...` are used to construct an
+ // element; `ts...` won't be touched at all, which allows `apply()` to consume
+ // any rvalues among them.
+ //
+ // If `value_type` is constructible from `Ts&&...`, `Policy::apply()` must not
+ // trigger a hard compile error unless it originates from `f`. In other words,
+ // `Policy::apply()` must be SFINAE-friendly. If `value_type` is not
+ // constructible from `Ts&&...`, either SFINAE or a hard compile error is OK.
+ //
+ // If `Ts...` is `[cv] value_type[&]` or `[cv] init_type[&]`,
+ // `Policy::apply()` must work. A compile error is not allowed, SFINAE or not.
+ template <class F, class... Ts, class P = Policy>
+ static auto apply(F&& f, Ts&&... ts)
+ -> decltype(P::apply(std::forward<F>(f), std::forward<Ts>(ts)...)) {
+ return P::apply(std::forward<F>(f), std::forward<Ts>(ts)...);
+ }
+
+ // Returns the "key" portion of the slot.
+ // Used for node handle manipulation.
+ template <class P = Policy>
+ static auto key(slot_type* slot)
+ -> decltype(P::apply(ReturnKey(), element(slot))) {
+ return P::apply(ReturnKey(), element(slot));
+ }
+
+ // Returns the "value" (as opposed to the "key") portion of the element. Used
+ // by maps to implement `operator[]`, `at()` and `insert_or_assign()`.
+ template <class T, class P = Policy>
+ static auto value(T* elem) -> decltype(P::value(elem)) {
+ return P::value(elem);
+ }
+
+ private:
+ // Use auto -> decltype as an enabler.
+ template <class Alloc, class P = Policy>
+ static auto transfer_impl(Alloc* alloc, slot_type* new_slot,
+ slot_type* old_slot, int)
+ -> decltype((void)P::transfer(alloc, new_slot, old_slot)) {
+ P::transfer(alloc, new_slot, old_slot);
+ }
+ template <class Alloc>
+ static void transfer_impl(Alloc* alloc, slot_type* new_slot,
+ slot_type* old_slot, char) {
+ construct(alloc, new_slot, std::move(element(old_slot)));
+ destroy(alloc, old_slot);
+ }
+};
+
+} // namespace container_internal
+} // inline namespace lts_2018_12_18
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_
diff --git a/absl/container/internal/hash_policy_traits_test.cc b/absl/container/internal/hash_policy_traits_test.cc
new file mode 100644
index 00000000..07cecdfa
--- /dev/null
+++ b/absl/container/internal/hash_policy_traits_test.cc
@@ -0,0 +1,144 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/hash_policy_traits.h"
+
+#include <functional>
+#include <memory>
+#include <new>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+namespace absl {
+inline namespace lts_2018_12_18 {
+namespace container_internal {
+namespace {
+
+using ::testing::MockFunction;
+using ::testing::Return;
+using ::testing::ReturnRef;
+
+using Alloc = std::allocator<int>;
+using Slot = int;
+
+struct PolicyWithoutOptionalOps {
+ using slot_type = Slot;
+ using key_type = Slot;
+ using init_type = Slot;
+
+ static std::function<void(void*, Slot*, Slot)> construct;
+ static std::function<void(void*, Slot*)> destroy;
+
+ static std::function<Slot&(Slot*)> element;
+ static int apply(int v) { return apply_impl(v); }
+ static std::function<int(int)> apply_impl;
+ static std::function<Slot&(Slot*)> value;
+};
+
+std::function<void(void*, Slot*, Slot)> PolicyWithoutOptionalOps::construct;
+std::function<void(void*, Slot*)> PolicyWithoutOptionalOps::destroy;
+
+std::function<Slot&(Slot*)> PolicyWithoutOptionalOps::element;
+std::function<int(int)> PolicyWithoutOptionalOps::apply_impl;
+std::function<Slot&(Slot*)> PolicyWithoutOptionalOps::value;
+
+struct PolicyWithOptionalOps : PolicyWithoutOptionalOps {
+ static std::function<void(void*, Slot*, Slot*)> transfer;
+};
+
+std::function<void(void*, Slot*, Slot*)> PolicyWithOptionalOps::transfer;
+
+struct Test : ::testing::Test {
+ Test() {
+ PolicyWithoutOptionalOps::construct = [&](void* a1, Slot* a2, Slot a3) {
+ construct.Call(a1, a2, std::move(a3));
+ };
+ PolicyWithoutOptionalOps::destroy = [&](void* a1, Slot* a2) {
+ destroy.Call(a1, a2);
+ };
+
+ PolicyWithoutOptionalOps::element = [&](Slot* a1) -> Slot& {
+ return element.Call(a1);
+ };
+ PolicyWithoutOptionalOps::apply_impl = [&](int a1) -> int {
+ return apply.Call(a1);
+ };
+ PolicyWithoutOptionalOps::value = [&](Slot* a1) -> Slot& {
+ return value.Call(a1);
+ };
+
+ PolicyWithOptionalOps::transfer = [&](void* a1, Slot* a2, Slot* a3) {
+ return transfer.Call(a1, a2, a3);
+ };
+ }
+
+ std::allocator<int> alloc;
+ int a = 53;
+
+ MockFunction<void(void*, Slot*, Slot)> construct;
+ MockFunction<void(void*, Slot*)> destroy;
+
+ MockFunction<Slot&(Slot*)> element;
+ MockFunction<int(int)> apply;
+ MockFunction<Slot&(Slot*)> value;
+
+ MockFunction<void(void*, Slot*, Slot*)> transfer;
+};
+
+TEST_F(Test, construct) {
+ EXPECT_CALL(construct, Call(&alloc, &a, 53));
+ hash_policy_traits<PolicyWithoutOptionalOps>::construct(&alloc, &a, 53);
+}
+
+TEST_F(Test, destroy) {
+ EXPECT_CALL(destroy, Call(&alloc, &a));
+ hash_policy_traits<PolicyWithoutOptionalOps>::destroy(&alloc, &a);
+}
+
+TEST_F(Test, element) {
+ int b = 0;
+ EXPECT_CALL(element, Call(&a)).WillOnce(ReturnRef(b));
+ EXPECT_EQ(&b, &hash_policy_traits<PolicyWithoutOptionalOps>::element(&a));
+}
+
+TEST_F(Test, apply) {
+ EXPECT_CALL(apply, Call(42)).WillOnce(Return(1337));
+ EXPECT_EQ(1337, (hash_policy_traits<PolicyWithoutOptionalOps>::apply(42)));
+}
+
+TEST_F(Test, value) {
+ int b = 0;
+ EXPECT_CALL(value, Call(&a)).WillOnce(ReturnRef(b));
+ EXPECT_EQ(&b, &hash_policy_traits<PolicyWithoutOptionalOps>::value(&a));
+}
+
+TEST_F(Test, without_transfer) {
+ int b = 42;
+ EXPECT_CALL(element, Call(&b)).WillOnce(::testing::ReturnRef(b));
+ EXPECT_CALL(construct, Call(&alloc, &a, b));
+ EXPECT_CALL(destroy, Call(&alloc, &b));
+ hash_policy_traits<PolicyWithoutOptionalOps>::transfer(&alloc, &a, &b);
+}
+
+TEST_F(Test, with_transfer) {
+ int b = 42;
+ EXPECT_CALL(transfer, Call(&alloc, &a, &b));
+ hash_policy_traits<PolicyWithOptionalOps>::transfer(&alloc, &a, &b);
+}
+
+} // namespace
+} // namespace container_internal
+} // inline namespace lts_2018_12_18
+} // namespace absl
diff --git a/absl/container/internal/hashtable_debug.h b/absl/container/internal/hashtable_debug.h
new file mode 100644
index 00000000..b6a43512
--- /dev/null
+++ b/absl/container/internal/hashtable_debug.h
@@ -0,0 +1,110 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This library provides APIs to debug the probing behavior of hash tables.
+//
+// In general, the probing behavior is a black box for users and only the
+// side effects can be measured in the form of performance differences.
+// These APIs give a glimpse on the actual behavior of the probing algorithms in
+// these hashtables given a specified hash function and a set of elements.
+//
+// The probe count distribution can be used to assess the quality of the hash
+// function for that particular hash table. Note that a hash function that
+// performs well in one hash table implementation does not necessarily performs
+// well in a different one.
+//
+// This library supports std::unordered_{set,map}, dense_hash_{set,map} and
+// absl::{flat,node,string}_hash_{set,map}.
+
+#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_
+#define ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_
+
+#include <cstddef>
+#include <algorithm>
+#include <type_traits>
+#include <vector>
+
+#include "absl/container/internal/hashtable_debug_hooks.h"
+
+namespace absl {
+inline namespace lts_2018_12_18 {
+namespace container_internal {
+
+// Returns the number of probes required to lookup `key`. Returns 0 for a
+// search with no collisions. Higher values mean more hash collisions occurred;
+// however, the exact meaning of this number varies according to the container
+// type.
+template <typename C>
+size_t GetHashtableDebugNumProbes(
+ const C& c, const typename C::key_type& key) {
+ return absl::container_internal::hashtable_debug_internal::
+ HashtableDebugAccess<C>::GetNumProbes(c, key);
+}
+
+// Gets a histogram of the number of probes for each elements in the container.
+// The sum of all the values in the vector is equal to container.size().
+template <typename C>
+std::vector<size_t> GetHashtableDebugNumProbesHistogram(const C& container) {
+ std::vector<size_t> v;
+ for (auto it = container.begin(); it != container.end(); ++it) {
+ size_t num_probes = GetHashtableDebugNumProbes(
+ container,
+ absl::container_internal::hashtable_debug_internal::GetKey<C>(*it, 0));
+ v.resize(std::max(v.size(), num_probes + 1));
+ v[num_probes]++;
+ }
+ return v;
+}
+
+struct HashtableDebugProbeSummary {
+ size_t total_elements;
+ size_t total_num_probes;
+ double mean;
+};
+
+// Gets a summary of the probe count distribution for the elements in the
+// container.
+template <typename C>
+HashtableDebugProbeSummary GetHashtableDebugProbeSummary(const C& container) {
+ auto probes = GetHashtableDebugNumProbesHistogram(container);
+ HashtableDebugProbeSummary summary = {};
+ for (size_t i = 0; i < probes.size(); ++i) {
+ summary.total_elements += probes[i];
+ summary.total_num_probes += probes[i] * i;
+ }
+ summary.mean = 1.0 * summary.total_num_probes / summary.total_elements;
+ return summary;
+}
+
+// Returns the number of bytes requested from the allocator by the container
+// and not freed.
+template <typename C>
+size_t AllocatedByteSize(const C& c) {
+ return absl::container_internal::hashtable_debug_internal::
+ HashtableDebugAccess<C>::AllocatedByteSize(c);
+}
+
+// Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type `C`
+// and `c.size()` is equal to `num_elements`.
+template <typename C>
+size_t LowerBoundAllocatedByteSize(size_t num_elements) {
+ return absl::container_internal::hashtable_debug_internal::
+ HashtableDebugAccess<C>::LowerBoundAllocatedByteSize(num_elements);
+}
+
+} // namespace container_internal
+} // inline namespace lts_2018_12_18
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_
diff --git a/absl/container/internal/hashtable_debug_hooks.h b/absl/container/internal/hashtable_debug_hooks.h
new file mode 100644
index 00000000..50ba6ba5
--- /dev/null
+++ b/absl/container/internal/hashtable_debug_hooks.h
@@ -0,0 +1,83 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Provides the internal API for hashtable_debug.h.
+
+#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_
+#define ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_
+
+#include <cstddef>
+
+#include <algorithm>
+#include <type_traits>
+#include <vector>
+
+namespace absl {
+inline namespace lts_2018_12_18 {
+namespace container_internal {
+namespace hashtable_debug_internal {
+
+// If it is a map, call get<0>().
+using std::get;
+template <typename T, typename = typename T::mapped_type>
+auto GetKey(const typename T::value_type& pair, int) -> decltype(get<0>(pair)) {
+ return get<0>(pair);
+}
+
+// If it is not a map, return the value directly.
+template <typename T>
+const typename T::key_type& GetKey(const typename T::key_type& key, char) {
+ return key;
+}
+
+// Containers should specialize this to provide debug information for that
+// container.
+template <class Container, typename Enabler = void>
+struct HashtableDebugAccess {
+ // Returns the number of probes required to find `key` in `c`. The "number of
+ // probes" is a concept that can vary by container. Implementations should
+ // return 0 when `key` was found in the minimum number of operations and
+ // should increment the result for each non-trivial operation required to find
+ // `key`.
+ //
+ // The default implementation uses the bucket api from the standard and thus
+ // works for `std::unordered_*` containers.
+ static size_t GetNumProbes(const Container& c,
+ const typename Container::key_type& key) {
+ if (!c.bucket_count()) return {};
+ size_t num_probes = 0;
+ size_t bucket = c.bucket(key);
+ for (auto it = c.begin(bucket), e = c.end(bucket);; ++it, ++num_probes) {
+ if (it == e) return num_probes;
+ if (c.key_eq()(key, GetKey<Container>(*it, 0))) return num_probes;
+ }
+ }
+
+ // Returns the number of bytes requested from the allocator by the container
+ // and not freed.
+ //
+ // static size_t AllocatedByteSize(const Container& c);
+
+ // Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type
+ // `Container` and `c.size()` is equal to `num_elements`.
+ //
+ // static size_t LowerBoundAllocatedByteSize(size_t num_elements);
+};
+
+} // namespace hashtable_debug_internal
+} // namespace container_internal
+} // inline namespace lts_2018_12_18
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_
diff --git a/absl/container/internal/layout.h b/absl/container/internal/layout.h
new file mode 100644
index 00000000..f11a6ad2
--- /dev/null
+++ b/absl/container/internal/layout.h
@@ -0,0 +1,740 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// MOTIVATION AND TUTORIAL
+//
+// If you want to put in a single heap allocation N doubles followed by M ints,
+// it's easy if N and M are known at compile time.
+//
+// struct S {
+// double a[N];
+// int b[M];
+// };
+//
+// S* p = new S;
+//
+// But what if N and M are known only in run time? Class template Layout to the
+// rescue! It's a portable generalization of the technique known as struct hack.
+//
+// // This object will tell us everything we need to know about the memory
+// // layout of double[N] followed by int[M]. It's structurally identical to
+// // size_t[2] that stores N and M. It's very cheap to create.
+// const Layout<double, int> layout(N, M);
+//
+// // Allocate enough memory for both arrays. `AllocSize()` tells us how much
+// // memory is needed. We are free to use any allocation function we want as
+// // long as it returns aligned memory.
+// std::unique_ptr<unsigned char[]> p(new unsigned char[layout.AllocSize()]);
+//
+// // Obtain the pointer to the array of doubles.
+// // Equivalent to `reinterpret_cast<double*>(p.get())`.
+// //
+// // We could have written layout.Pointer<0>(p) instead. If all the types are
+// // unique you can use either form, but if some types are repeated you must
+// // use the index form.
+// double* a = layout.Pointer<double>(p.get());
+//
+// // Obtain the pointer to the array of ints.
+// // Equivalent to `reinterpret_cast<int*>(p.get() + N * 8)`.
+// int* b = layout.Pointer<int>(p);
+//
+// If we are unable to specify sizes of all fields, we can pass as many sizes as
+// we can to `Partial()`. In return, it'll allow us to access the fields whose
+// locations and sizes can be computed from the provided information.
+// `Partial()` comes in handy when the array sizes are embedded into the
+// allocation.
+//
+// // size_t[1] containing N, size_t[1] containing M, double[N], int[M].
+// using L = Layout<size_t, size_t, double, int>;
+//
+// unsigned char* Allocate(size_t n, size_t m) {
+// const L layout(1, 1, n, m);
+// unsigned char* p = new unsigned char[layout.AllocSize()];
+// *layout.Pointer<0>(p) = n;
+// *layout.Pointer<1>(p) = m;
+// return p;
+// }
+//
+// void Use(unsigned char* p) {
+// // First, extract N and M.
+// // Specify that the first array has only one element. Using `prefix` we
+// // can access the first two arrays but not more.
+// constexpr auto prefix = L::Partial(1);
+// size_t n = *prefix.Pointer<0>(p);
+// size_t m = *prefix.Pointer<1>(p);
+//
+// // Now we can get pointers to the payload.
+// const L layout(1, 1, n, m);
+// double* a = layout.Pointer<double>(p);
+// int* b = layout.Pointer<int>(p);
+// }
+//
+// The layout we used above combines fixed-size with dynamically-sized fields.
+// This is quite common. Layout is optimized for this use case and generates
+// optimal code. All computations that can be performed at compile time are
+// indeed performed at compile time.
+//
+// Efficiency tip: The order of fields matters. In `Layout<T1, ..., TN>` try to
+// ensure that `alignof(T1) >= ... >= alignof(TN)`. This way you'll have no
+// padding in between arrays.
+//
+// You can manually override the alignment of an array by wrapping the type in
+// `Aligned<T, N>`. `Layout<..., Aligned<T, N>, ...>` has exactly the same API
+// and behavior as `Layout<..., T, ...>` except that the first element of the
+// array of `T` is aligned to `N` (the rest of the elements follow without
+// padding). `N` cannot be less than `alignof(T)`.
+//
+// `AllocSize()` and `Pointer()` are the most basic methods for dealing with
+// memory layouts. Check out the reference or code below to discover more.
+//
+// EXAMPLE
+//
+// // Immutable move-only string with sizeof equal to sizeof(void*). The
+// // string size and the characters are kept in the same heap allocation.
+// class CompactString {
+// public:
+// CompactString(const char* s = "") {
+// const size_t size = strlen(s);
+// // size_t[1] followed by char[size + 1].
+// const L layout(1, size + 1);
+// p_.reset(new unsigned char[layout.AllocSize()]);
+// // If running under ASAN, mark the padding bytes, if any, to catch
+// // memory errors.
+// layout.PoisonPadding(p_.get());
+// // Store the size in the allocation.
+// *layout.Pointer<size_t>(p_.get()) = size;
+// // Store the characters in the allocation.
+// memcpy(layout.Pointer<char>(p_.get()), s, size + 1);
+// }
+//
+// size_t size() const {
+// // Equivalent to reinterpret_cast<size_t&>(*p).
+// return *L::Partial().Pointer<size_t>(p_.get());
+// }
+//
+// const char* c_str() const {
+// // Equivalent to reinterpret_cast<char*>(p.get() + sizeof(size_t)).
+// // The argument in Partial(1) specifies that we have size_t[1] in front
+// // of the characters.
+// return L::Partial(1).Pointer<char>(p_.get());
+// }
+//
+// private:
+// // Our heap allocation contains a size_t followed by an array of chars.
+// using L = Layout<size_t, char>;
+// std::unique_ptr<unsigned char[]> p_;
+// };
+//
+// int main() {
+// CompactString s = "hello";
+// assert(s.size() == 5);
+// assert(strcmp(s.c_str(), "hello") == 0);
+// }
+//
+// DOCUMENTATION
+//
+// The interface exported by this file consists of:
+// - class `Layout<>` and its public members.
+// - The public members of class `internal_layout::LayoutImpl<>`. That class
+// isn't intended to be used directly, and its name and template parameter
+// list are internal implementation details, but the class itself provides
+// most of the functionality in this file. See comments on its members for
+// detailed documentation.
+//
+// `Layout<T1,... Tn>::Partial(count1,..., countm)` (where `m` <= `n`) returns a
+// `LayoutImpl<>` object. `Layout<T1,..., Tn> layout(count1,..., countn)`
+// creates a `Layout` object, which exposes the same functionality by inheriting
+// from `LayoutImpl<>`.
+
+#ifndef ABSL_CONTAINER_INTERNAL_LAYOUT_H_
+#define ABSL_CONTAINER_INTERNAL_LAYOUT_H_
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <ostream>
+#include <string>
+#include <tuple>
+#include <type_traits>
+#include <typeinfo>
+#include <utility>
+
+#ifdef ADDRESS_SANITIZER
+#include <sanitizer/asan_interface.h>
+#endif
+
+#include "absl/meta/type_traits.h"
+#include "absl/strings/str_cat.h"
+#include "absl/types/span.h"
+#include "absl/utility/utility.h"
+
+#if defined(__GXX_RTTI)
+#define ABSL_INTERNAL_HAS_CXA_DEMANGLE
+#endif
+
+#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE
+#include <cxxabi.h>
+#endif
+
+namespace absl {
+inline namespace lts_2018_12_18 {
+namespace container_internal {
+
+// A type wrapper that instructs `Layout` to use the specific alignment for the
+// array. `Layout<..., Aligned<T, N>, ...>` has exactly the same API
+// and behavior as `Layout<..., T, ...>` except that the first element of the
+// array of `T` is aligned to `N` (the rest of the elements follow without
+// padding).
+//
+// Requires: `N >= alignof(T)` and `N` is a power of 2.
+template <class T, size_t N>
+struct Aligned;
+
+namespace internal_layout {
+
+template <class T>
+struct NotAligned {};
+
+template <class T, size_t N>
+struct NotAligned<const Aligned<T, N>> {
+ static_assert(sizeof(T) == 0, "Aligned<T, N> cannot be const-qualified");
+};
+
+template <size_t>
+using IntToSize = size_t;
+
+template <class>
+using TypeToSize = size_t;
+
+template <class T>
+struct Type : NotAligned<T> {
+ using type = T;
+};
+
+template <class T, size_t N>
+struct Type<Aligned<T, N>> {
+ using type = T;
+};
+
+template <class T>
+struct SizeOf : NotAligned<T>, std::integral_constant<size_t, sizeof(T)> {};
+
+template <class T, size_t N>
+struct SizeOf<Aligned<T, N>> : std::integral_constant<size_t, sizeof(T)> {};
+
+// Note: workaround for https://gcc.gnu.org/PR88115
+template <class T>
+struct AlignOf : NotAligned<T> {
+ static constexpr size_t value = alignof(T);
+};
+
+template <class T, size_t N>
+struct AlignOf<Aligned<T, N>> {
+ static_assert(N % alignof(T) == 0,
+ "Custom alignment can't be lower than the type's alignment");
+ static constexpr size_t value = N;
+};
+
+// Does `Ts...` contain `T`?
+template <class T, class... Ts>
+using Contains = absl::disjunction<std::is_same<T, Ts>...>;
+
+template <class From, class To>
+using CopyConst =
+ typename std::conditional<std::is_const<From>::value, const To, To>::type;
+
+// Note: We're not qualifying this with absl:: because it doesn't compile under
+// MSVC.
+template <class T>
+using SliceType = Span<T>;
+
+// This namespace contains no types. It prevents functions defined in it from
+// being found by ADL.
+namespace adl_barrier {
+
+template <class Needle, class... Ts>
+constexpr size_t Find(Needle, Needle, Ts...) {
+ static_assert(!Contains<Needle, Ts...>(), "Duplicate element type");
+ return 0;
+}
+
+template <class Needle, class T, class... Ts>
+constexpr size_t Find(Needle, T, Ts...) {
+ return adl_barrier::Find(Needle(), Ts()...) + 1;
+}
+
+constexpr bool IsPow2(size_t n) { return !(n & (n - 1)); }
+
+// Returns `q * m` for the smallest `q` such that `q * m >= n`.
+// Requires: `m` is a power of two. It's enforced by IsLegalElementType below.
+constexpr size_t Align(size_t n, size_t m) { return (n + m - 1) & ~(m - 1); }
+
+constexpr size_t Min(size_t a, size_t b) { return b < a ? b : a; }
+
+constexpr size_t Max(size_t a) { return a; }
+
+template <class... Ts>
+constexpr size_t Max(size_t a, size_t b, Ts... rest) {
+ return adl_barrier::Max(b < a ? a : b, rest...);
+}
+
+template <class T>
+std::string TypeName() {
+ std::string out;
+ int status = 0;
+ char* demangled = nullptr;
+#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE
+ demangled = abi::__cxa_demangle(typeid(T).name(), nullptr, nullptr, &status);
+#endif
+ if (status == 0 && demangled != nullptr) { // Demangling succeeded.
+ absl::StrAppend(&out, "<", demangled, ">");
+ free(demangled);
+ } else {
+#if defined(__GXX_RTTI) || defined(_CPPRTTI)
+ absl::StrAppend(&out, "<", typeid(T).name(), ">");
+#endif
+ }
+ return out;
+}
+
+} // namespace adl_barrier
+
+template <bool C>
+using EnableIf = typename std::enable_if<C, int>::type;
+
+// Can `T` be a template argument of `Layout`?
+template <class T>
+using IsLegalElementType = std::integral_constant<
+ bool, !std::is_reference<T>::value && !std::is_volatile<T>::value &&
+ !std::is_reference<typename Type<T>::type>::value &&
+ !std::is_volatile<typename Type<T>::type>::value &&
+ adl_barrier::IsPow2(AlignOf<T>::value)>;
+
+template <class Elements, class SizeSeq, class OffsetSeq>
+class LayoutImpl;
+
+// Public base class of `Layout` and the result type of `Layout::Partial()`.
+//
+// `Elements...` contains all template arguments of `Layout` that created this
+// instance.
+//
+// `SizeSeq...` is `[0, NumSizes)` where `NumSizes` is the number of arguments
+// passed to `Layout::Partial()` or `Layout::Layout()`.
+//
+// `OffsetSeq...` is `[0, NumOffsets)` where `NumOffsets` is
+// `Min(sizeof...(Elements), NumSizes + 1)` (the number of arrays for which we
+// can compute offsets).
+template <class... Elements, size_t... SizeSeq, size_t... OffsetSeq>
+class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
+ absl::index_sequence<OffsetSeq...>> {
+ private:
+ static_assert(sizeof...(Elements) > 0, "At least one field is required");
+ static_assert(absl::conjunction<IsLegalElementType<Elements>...>::value,
+ "Invalid element type (see IsLegalElementType)");
+
+ enum {
+ NumTypes = sizeof...(Elements),
+ NumSizes = sizeof...(SizeSeq),
+ NumOffsets = sizeof...(OffsetSeq),
+ };
+
+ // These are guaranteed by `Layout`.
+ static_assert(NumOffsets == adl_barrier::Min(NumTypes, NumSizes + 1),
+ "Internal error");
+ static_assert(NumTypes > 0, "Internal error");
+
+ // Returns the index of `T` in `Elements...`. Results in a compilation error
+ // if `Elements...` doesn't contain exactly one instance of `T`.
+ template <class T>
+ static constexpr size_t ElementIndex() {
+ static_assert(Contains<Type<T>, Type<typename Type<Elements>::type>...>(),
+ "Type not found");
+ return adl_barrier::Find(Type<T>(),
+ Type<typename Type<Elements>::type>()...);
+ }
+
+ template <size_t N>
+ using ElementAlignment =
+ AlignOf<typename std::tuple_element<N, std::tuple<Elements...>>::type>;
+
+ public:
+ // Element types of all arrays packed in a tuple.
+ using ElementTypes = std::tuple<typename Type<Elements>::type...>;
+
+ // Element type of the Nth array.
+ template <size_t N>
+ using ElementType = typename std::tuple_element<N, ElementTypes>::type;
+
+ constexpr explicit LayoutImpl(IntToSize<SizeSeq>... sizes)
+ : size_{sizes...} {}
+
+ // Alignment of the layout, equal to the strictest alignment of all elements.
+ // All pointers passed to the methods of layout must be aligned to this value.
+ static constexpr size_t Alignment() {
+ return adl_barrier::Max(AlignOf<Elements>::value...);
+ }
+
+ // Offset in bytes of the Nth array.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // assert(x.Offset<0>() == 0); // The ints starts from 0.
+ // assert(x.Offset<1>() == 16); // The doubles starts from 16.
+ //
+ // Requires: `N <= NumSizes && N < sizeof...(Ts)`.
+ template <size_t N, EnableIf<N == 0> = 0>
+ constexpr size_t Offset() const {
+ return 0;
+ }
+
+ template <size_t N, EnableIf<N != 0> = 0>
+ constexpr size_t Offset() const {
+ static_assert(N < NumOffsets, "Index out of bounds");
+ return adl_barrier::Align(
+ Offset<N - 1>() + SizeOf<ElementType<N - 1>>() * size_[N - 1],
+ ElementAlignment<N>::value);
+ }
+
+ // Offset in bytes of the array with the specified element type. There must
+ // be exactly one such array and its zero-based index must be at most
+ // `NumSizes`.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // assert(x.Offset<int>() == 0); // The ints starts from 0.
+ // assert(x.Offset<double>() == 16); // The doubles starts from 16.
+ template <class T>
+ constexpr size_t Offset() const {
+ return Offset<ElementIndex<T>()>();
+ }
+
+ // Offsets in bytes of all arrays for which the offsets are known.
+ constexpr std::array<size_t, NumOffsets> Offsets() const {
+ return {{Offset<OffsetSeq>()...}};
+ }
+
+ // The number of elements in the Nth array. This is the Nth argument of
+ // `Layout::Partial()` or `Layout::Layout()` (zero-based).
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // assert(x.Size<0>() == 3);
+ // assert(x.Size<1>() == 4);
+ //
+ // Requires: `N < NumSizes`.
+ template <size_t N>
+ constexpr size_t Size() const {
+ static_assert(N < NumSizes, "Index out of bounds");
+ return size_[N];
+ }
+
+ // The number of elements in the array with the specified element type.
+ // There must be exactly one such array and its zero-based index must be
+ // at most `NumSizes`.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // assert(x.Size<int>() == 3);
+ // assert(x.Size<double>() == 4);
+ template <class T>
+ constexpr size_t Size() const {
+ return Size<ElementIndex<T>()>();
+ }
+
+ // The number of elements of all arrays for which they are known.
+ constexpr std::array<size_t, NumSizes> Sizes() const {
+ return {{Size<SizeSeq>()...}};
+ }
+
+ // Pointer to the beginning of the Nth array.
+ //
+ // `Char` must be `[const] [signed|unsigned] char`.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // unsigned char* p = new unsigned char[x.AllocSize()];
+ // int* ints = x.Pointer<0>(p);
+ // double* doubles = x.Pointer<1>(p);
+ //
+ // Requires: `N <= NumSizes && N < sizeof...(Ts)`.
+ // Requires: `p` is aligned to `Alignment()`.
+ template <size_t N, class Char>
+ CopyConst<Char, ElementType<N>>* Pointer(Char* p) const {
+ using C = typename std::remove_const<Char>::type;
+ static_assert(
+ std::is_same<C, char>() || std::is_same<C, unsigned char>() ||
+ std::is_same<C, signed char>(),
+ "The argument must be a pointer to [const] [signed|unsigned] char");
+ constexpr size_t alignment = Alignment();
+ (void)alignment;
+ assert(reinterpret_cast<uintptr_t>(p) % alignment == 0);
+ return reinterpret_cast<CopyConst<Char, ElementType<N>>*>(p + Offset<N>());
+ }
+
+ // Pointer to the beginning of the array with the specified element type.
+ // There must be exactly one such array and its zero-based index must be at
+ // most `NumSizes`.
+ //
+ // `Char` must be `[const] [signed|unsigned] char`.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // unsigned char* p = new unsigned char[x.AllocSize()];
+ // int* ints = x.Pointer<int>(p);
+ // double* doubles = x.Pointer<double>(p);
+ //
+ // Requires: `p` is aligned to `Alignment()`.
+ template <class T, class Char>
+ CopyConst<Char, T>* Pointer(Char* p) const {
+ return Pointer<ElementIndex<T>()>(p);
+ }
+
+ // Pointers to all arrays for which pointers are known.
+ //
+ // `Char` must be `[const] [signed|unsigned] char`.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // unsigned char* p = new unsigned char[x.AllocSize()];
+ //
+ // int* ints;
+ // double* doubles;
+ // std::tie(ints, doubles) = x.Pointers(p);
+ //
+ // Requires: `p` is aligned to `Alignment()`.
+ //
+ // Note: We're not using ElementType alias here because it does not compile
+ // under MSVC.
+ template <class Char>
+ std::tuple<CopyConst<
+ Char, typename std::tuple_element<OffsetSeq, ElementTypes>::type>*...>
+ Pointers(Char* p) const {
+ return std::tuple<CopyConst<Char, ElementType<OffsetSeq>>*...>(
+ Pointer<OffsetSeq>(p)...);
+ }
+
+ // The Nth array.
+ //
+ // `Char` must be `[const] [signed|unsigned] char`.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // unsigned char* p = new unsigned char[x.AllocSize()];
+ // Span<int> ints = x.Slice<0>(p);
+ // Span<double> doubles = x.Slice<1>(p);
+ //
+ // Requires: `N < NumSizes`.
+ // Requires: `p` is aligned to `Alignment()`.
+ template <size_t N, class Char>
+ SliceType<CopyConst<Char, ElementType<N>>> Slice(Char* p) const {
+ return SliceType<CopyConst<Char, ElementType<N>>>(Pointer<N>(p), Size<N>());
+ }
+
+ // The array with the specified element type. There must be exactly one
+ // such array and its zero-based index must be less than `NumSizes`.
+ //
+ // `Char` must be `[const] [signed|unsigned] char`.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // unsigned char* p = new unsigned char[x.AllocSize()];
+ // Span<int> ints = x.Slice<int>(p);
+ // Span<double> doubles = x.Slice<double>(p);
+ //
+ // Requires: `p` is aligned to `Alignment()`.
+ template <class T, class Char>
+ SliceType<CopyConst<Char, T>> Slice(Char* p) const {
+ return Slice<ElementIndex<T>()>(p);
+ }
+
+ // All arrays with known sizes.
+ //
+ // `Char` must be `[const] [signed|unsigned] char`.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // unsigned char* p = new unsigned char[x.AllocSize()];
+ //
+ // Span<int> ints;
+ // Span<double> doubles;
+ // std::tie(ints, doubles) = x.Slices(p);
+ //
+ // Requires: `p` is aligned to `Alignment()`.
+ //
+ // Note: We're not using ElementType alias here because it does not compile
+ // under MSVC.
+ template <class Char>
+ std::tuple<SliceType<CopyConst<
+ Char, typename std::tuple_element<SizeSeq, ElementTypes>::type>>...>
+ Slices(Char* p) const {
+ // Workaround for https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63875 (fixed
+ // in 6.1).
+ (void)p;
+ return std::tuple<SliceType<CopyConst<Char, ElementType<SizeSeq>>>...>(
+ Slice<SizeSeq>(p)...);
+ }
+
+ // The size of the allocation that fits all arrays.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // unsigned char* p = new unsigned char[x.AllocSize()]; // 48 bytes
+ //
+ // Requires: `NumSizes == sizeof...(Ts)`.
+ constexpr size_t AllocSize() const {
+ static_assert(NumTypes == NumSizes, "You must specify sizes of all fields");
+ return Offset<NumTypes - 1>() +
+ SizeOf<ElementType<NumTypes - 1>>() * size_[NumTypes - 1];
+ }
+
+ // If built with --config=asan, poisons padding bytes (if any) in the
+ // allocation. The pointer must point to a memory block at least
+ // `AllocSize()` bytes in length.
+ //
+ // `Char` must be `[const] [signed|unsigned] char`.
+ //
+ // Requires: `p` is aligned to `Alignment()`.
+ template <class Char, size_t N = NumOffsets - 1, EnableIf<N == 0> = 0>
+ void PoisonPadding(const Char* p) const {
+ Pointer<0>(p); // verify the requirements on `Char` and `p`
+ }
+
+ template <class Char, size_t N = NumOffsets - 1, EnableIf<N != 0> = 0>
+ void PoisonPadding(const Char* p) const {
+ static_assert(N < NumOffsets, "Index out of bounds");
+ (void)p;
+#ifdef ADDRESS_SANITIZER
+ PoisonPadding<Char, N - 1>(p);
+ // The `if` is an optimization. It doesn't affect the observable behaviour.
+ if (ElementAlignment<N - 1>::value % ElementAlignment<N>::value) {
+ size_t start =
+ Offset<N - 1>() + SizeOf<ElementType<N - 1>>() * size_[N - 1];
+ ASAN_POISON_MEMORY_REGION(p + start, Offset<N>() - start);
+ }
+#endif
+ }
+
+ // Human-readable description of the memory layout. Useful for debugging.
+ // Slow.
+ //
+ // // char[5], 3 bytes of padding, int[3], 4 bytes of padding, followed
+ // // by an unknown number of doubles.
+ // auto x = Layout<char, int, double>::Partial(5, 3);
+ // assert(x.DebugString() ==
+ // "@0<char>(1)[5]; @8<int>(4)[3]; @24<double>(8)");
+ //
+ // Each field is in the following format: @offset<type>(sizeof)[size] (<type>
+ // may be missing depending on the target platform). For example,
+ // @8<int>(4)[3] means that at offset 8 we have an array of ints, where each
+ // int is 4 bytes, and we have 3 of those ints. The size of the last field may
+ // be missing (as in the example above). Only fields with known offsets are
+ // described. Type names may differ across platforms: one compiler might
+ // produce "unsigned*" where another produces "unsigned int *".
+ std::string DebugString() const {
+ const auto offsets = Offsets();
+ const size_t sizes[] = {SizeOf<ElementType<OffsetSeq>>()...};
+ const std::string types[] = {adl_barrier::TypeName<ElementType<OffsetSeq>>()...};
+ std::string res = absl::StrCat("@0", types[0], "(", sizes[0], ")");
+ for (size_t i = 0; i != NumOffsets - 1; ++i) {
+ absl::StrAppend(&res, "[", size_[i], "]; @", offsets[i + 1], types[i + 1],
+ "(", sizes[i + 1], ")");
+ }
+ // NumSizes is a constant that may be zero. Some compilers cannot see that
+ // inside the if statement "size_[NumSizes - 1]" must be valid.
+ int last = static_cast<int>(NumSizes) - 1;
+ if (NumTypes == NumSizes && last >= 0) {
+ absl::StrAppend(&res, "[", size_[last], "]");
+ }
+ return res;
+ }
+
+ private:
+ // Arguments of `Layout::Partial()` or `Layout::Layout()`.
+ size_t size_[NumSizes > 0 ? NumSizes : 1];
+};
+
+template <size_t NumSizes, class... Ts>
+using LayoutType = LayoutImpl<
+ std::tuple<Ts...>, absl::make_index_sequence<NumSizes>,
+ absl::make_index_sequence<adl_barrier::Min(sizeof...(Ts), NumSizes + 1)>>;
+
+} // namespace internal_layout
+
+// Descriptor of arrays of various types and sizes laid out in memory one after
+// another. See the top of the file for documentation.
+//
+// Check out the public API of internal_layout::LayoutImpl above. The type is
+// internal to the library but its methods are public, and they are inherited
+// by `Layout`.
+template <class... Ts>
+class Layout : public internal_layout::LayoutType<sizeof...(Ts), Ts...> {
+ public:
+ static_assert(sizeof...(Ts) > 0, "At least one field is required");
+ static_assert(
+ absl::conjunction<internal_layout::IsLegalElementType<Ts>...>::value,
+ "Invalid element type (see IsLegalElementType)");
+
+ // The result type of `Partial()` with `NumSizes` arguments.
+ template <size_t NumSizes>
+ using PartialType = internal_layout::LayoutType<NumSizes, Ts...>;
+
+ // `Layout` knows the element types of the arrays we want to lay out in
+ // memory but not the number of elements in each array.
+ // `Partial(size1, ..., sizeN)` allows us to specify the latter. The
+ // resulting immutable object can be used to obtain pointers to the
+ // individual arrays.
+ //
+ // It's allowed to pass fewer array sizes than the number of arrays. E.g.,
+ // if all you need is to the offset of the second array, you only need to
+ // pass one argument -- the number of elements in the first array.
+ //
+ // // int[3] followed by 4 bytes of padding and an unknown number of
+ // // doubles.
+ // auto x = Layout<int, double>::Partial(3);
+ // // doubles start at byte 16.
+ // assert(x.Offset<1>() == 16);
+ //
+ // If you know the number of elements in all arrays, you can still call
+ // `Partial()` but it's more convenient to use the constructor of `Layout`.
+ //
+ // Layout<int, double> x(3, 5);
+ //
+ // Note: The sizes of the arrays must be specified in number of elements,
+ // not in bytes.
+ //
+ // Requires: `sizeof...(Sizes) <= sizeof...(Ts)`.
+ // Requires: all arguments are convertible to `size_t`.
+ template <class... Sizes>
+ static constexpr PartialType<sizeof...(Sizes)> Partial(Sizes&&... sizes) {
+ static_assert(sizeof...(Sizes) <= sizeof...(Ts), "");
+ return PartialType<sizeof...(Sizes)>(absl::forward<Sizes>(sizes)...);
+ }
+
+ // Creates a layout with the sizes of all arrays specified. If you know
+ // only the sizes of the first N arrays (where N can be zero), you can use
+ // `Partial()` defined above. The constructor is essentially equivalent to
+ // calling `Partial()` and passing in all array sizes; the constructor is
+ // provided as a convenient abbreviation.
+ //
+ // Note: The sizes of the arrays must be specified in number of elements,
+ // not in bytes.
+ constexpr explicit Layout(internal_layout::TypeToSize<Ts>... sizes)
+ : internal_layout::LayoutType<sizeof...(Ts), Ts...>(sizes...) {}
+};
+
+} // namespace container_internal
+} // inline namespace lts_2018_12_18
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_LAYOUT_H_
diff --git a/absl/container/internal/layout_test.cc b/absl/container/internal/layout_test.cc
new file mode 100644
index 00000000..b9f98471
--- /dev/null
+++ b/absl/container/internal/layout_test.cc
@@ -0,0 +1,1557 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/layout.h"
+
+// We need ::max_align_t because some libstdc++ versions don't provide
+// std::max_align_t
+#include <stddef.h>
+#include <cstdint>
+#include <memory>
+#include <sstream>
+#include <type_traits>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/types/span.h"
+
+namespace absl {
+inline namespace lts_2018_12_18 {
+namespace container_internal {
+namespace {
+
+using ::absl::Span;
+using ::testing::ElementsAre;
+
+size_t Distance(const void* from, const void* to) {
+ ABSL_RAW_CHECK(from <= to, "Distance must be non-negative");
+ return static_cast<const char*>(to) - static_cast<const char*>(from);
+}
+
+template <class Expected, class Actual>
+Expected Type(Actual val) {
+ static_assert(std::is_same<Expected, Actual>(), "");
+ return val;
+}
+
+// Helper class to test different size and alignments.
+struct alignas(8) Int128 {
+ uint64_t a, b;
+ friend bool operator==(Int128 lhs, Int128 rhs) {
+ return std::tie(lhs.a, lhs.b) == std::tie(rhs.a, rhs.b);
+ }
+
+ static std::string Name() {
+ return internal_layout::adl_barrier::TypeName<Int128>();
+ }
+};
+
+// Properties of types that this test relies on.
+static_assert(sizeof(int8_t) == 1, "");
+static_assert(alignof(int8_t) == 1, "");
+static_assert(sizeof(int16_t) == 2, "");
+static_assert(alignof(int16_t) == 2, "");
+static_assert(sizeof(int32_t) == 4, "");
+static_assert(alignof(int32_t) == 4, "");
+static_assert(sizeof(Int128) == 16, "");
+static_assert(alignof(Int128) == 8, "");
+
+template <class Expected, class Actual>
+void SameType() {
+ static_assert(std::is_same<Expected, Actual>(), "");
+}
+
+TEST(Layout, ElementType) {
+ {
+ using L = Layout<int32_t>;
+ SameType<int32_t, L::ElementType<0>>();
+ SameType<int32_t, decltype(L::Partial())::ElementType<0>>();
+ SameType<int32_t, decltype(L::Partial(0))::ElementType<0>>();
+ }
+ {
+ using L = Layout<int32_t, int32_t>;
+ SameType<int32_t, L::ElementType<0>>();
+ SameType<int32_t, L::ElementType<1>>();
+ SameType<int32_t, decltype(L::Partial())::ElementType<0>>();
+ SameType<int32_t, decltype(L::Partial())::ElementType<1>>();
+ SameType<int32_t, decltype(L::Partial(0))::ElementType<0>>();
+ SameType<int32_t, decltype(L::Partial(0))::ElementType<1>>();
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ SameType<int8_t, L::ElementType<0>>();
+ SameType<int32_t, L::ElementType<1>>();
+ SameType<Int128, L::ElementType<2>>();
+ SameType<int8_t, decltype(L::Partial())::ElementType<0>>();
+ SameType<int8_t, decltype(L::Partial(0))::ElementType<0>>();
+ SameType<int32_t, decltype(L::Partial(0))::ElementType<1>>();
+ SameType<int8_t, decltype(L::Partial(0, 0))::ElementType<0>>();
+ SameType<int32_t, decltype(L::Partial(0, 0))::ElementType<1>>();
+ SameType<Int128, decltype(L::Partial(0, 0))::ElementType<2>>();
+ SameType<int8_t, decltype(L::Partial(0, 0, 0))::ElementType<0>>();
+ SameType<int32_t, decltype(L::Partial(0, 0, 0))::ElementType<1>>();
+ SameType<Int128, decltype(L::Partial(0, 0, 0))::ElementType<2>>();
+ }
+}
+
+TEST(Layout, ElementTypes) {
+ {
+ using L = Layout<int32_t>;
+ SameType<std::tuple<int32_t>, L::ElementTypes>();
+ SameType<std::tuple<int32_t>, decltype(L::Partial())::ElementTypes>();
+ SameType<std::tuple<int32_t>, decltype(L::Partial(0))::ElementTypes>();
+ }
+ {
+ using L = Layout<int32_t, int32_t>;
+ SameType<std::tuple<int32_t, int32_t>, L::ElementTypes>();
+ SameType<std::tuple<int32_t, int32_t>, decltype(L::Partial())::ElementTypes>();
+ SameType<std::tuple<int32_t, int32_t>, decltype(L::Partial(0))::ElementTypes>();
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ SameType<std::tuple<int8_t, int32_t, Int128>, L::ElementTypes>();
+ SameType<std::tuple<int8_t, int32_t, Int128>,
+ decltype(L::Partial())::ElementTypes>();
+ SameType<std::tuple<int8_t, int32_t, Int128>,
+ decltype(L::Partial(0))::ElementTypes>();
+ SameType<std::tuple<int8_t, int32_t, Int128>,
+ decltype(L::Partial(0, 0))::ElementTypes>();
+ SameType<std::tuple<int8_t, int32_t, Int128>,
+ decltype(L::Partial(0, 0, 0))::ElementTypes>();
+ }
+}
+
+TEST(Layout, OffsetByIndex) {
+ {
+ using L = Layout<int32_t>;
+ EXPECT_EQ(0, L::Partial().Offset<0>());
+ EXPECT_EQ(0, L::Partial(3).Offset<0>());
+ EXPECT_EQ(0, L(3).Offset<0>());
+ }
+ {
+ using L = Layout<int32_t, int32_t>;
+ EXPECT_EQ(0, L::Partial().Offset<0>());
+ EXPECT_EQ(0, L::Partial(3).Offset<0>());
+ EXPECT_EQ(12, L::Partial(3).Offset<1>());
+ EXPECT_EQ(0, L::Partial(3, 5).Offset<0>());
+ EXPECT_EQ(12, L::Partial(3, 5).Offset<1>());
+ EXPECT_EQ(0, L(3, 5).Offset<0>());
+ EXPECT_EQ(12, L(3, 5).Offset<1>());
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_EQ(0, L::Partial().Offset<0>());
+ EXPECT_EQ(0, L::Partial(0).Offset<0>());
+ EXPECT_EQ(0, L::Partial(0).Offset<1>());
+ EXPECT_EQ(0, L::Partial(1).Offset<0>());
+ EXPECT_EQ(4, L::Partial(1).Offset<1>());
+ EXPECT_EQ(0, L::Partial(5).Offset<0>());
+ EXPECT_EQ(8, L::Partial(5).Offset<1>());
+ EXPECT_EQ(0, L::Partial(0, 0).Offset<0>());
+ EXPECT_EQ(0, L::Partial(0, 0).Offset<1>());
+ EXPECT_EQ(0, L::Partial(0, 0).Offset<2>());
+ EXPECT_EQ(0, L::Partial(1, 0).Offset<0>());
+ EXPECT_EQ(4, L::Partial(1, 0).Offset<1>());
+ EXPECT_EQ(8, L::Partial(1, 0).Offset<2>());
+ EXPECT_EQ(0, L::Partial(5, 3).Offset<0>());
+ EXPECT_EQ(8, L::Partial(5, 3).Offset<1>());
+ EXPECT_EQ(24, L::Partial(5, 3).Offset<2>());
+ EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<0>());
+ EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<1>());
+ EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<2>());
+ EXPECT_EQ(0, L::Partial(1, 0, 0).Offset<0>());
+ EXPECT_EQ(4, L::Partial(1, 0, 0).Offset<1>());
+ EXPECT_EQ(8, L::Partial(1, 0, 0).Offset<2>());
+ EXPECT_EQ(0, L::Partial(5, 3, 1).Offset<0>());
+ EXPECT_EQ(24, L::Partial(5, 3, 1).Offset<2>());
+ EXPECT_EQ(8, L::Partial(5, 3, 1).Offset<1>());
+ EXPECT_EQ(0, L(5, 3, 1).Offset<0>());
+ EXPECT_EQ(24, L(5, 3, 1).Offset<2>());
+ EXPECT_EQ(8, L(5, 3, 1).Offset<1>());
+ }
+}
+
+TEST(Layout, OffsetByType) {
+ {
+ using L = Layout<int32_t>;
+ EXPECT_EQ(0, L::Partial().Offset<int32_t>());
+ EXPECT_EQ(0, L::Partial(3).Offset<int32_t>());
+ EXPECT_EQ(0, L(3).Offset<int32_t>());
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_EQ(0, L::Partial().Offset<int8_t>());
+ EXPECT_EQ(0, L::Partial(0).Offset<int8_t>());
+ EXPECT_EQ(0, L::Partial(0).Offset<int32_t>());
+ EXPECT_EQ(0, L::Partial(1).Offset<int8_t>());
+ EXPECT_EQ(4, L::Partial(1).Offset<int32_t>());
+ EXPECT_EQ(0, L::Partial(5).Offset<int8_t>());
+ EXPECT_EQ(8, L::Partial(5).Offset<int32_t>());
+ EXPECT_EQ(0, L::Partial(0, 0).Offset<int8_t>());
+ EXPECT_EQ(0, L::Partial(0, 0).Offset<int32_t>());
+ EXPECT_EQ(0, L::Partial(0, 0).Offset<Int128>());
+ EXPECT_EQ(0, L::Partial(1, 0).Offset<int8_t>());
+ EXPECT_EQ(4, L::Partial(1, 0).Offset<int32_t>());
+ EXPECT_EQ(8, L::Partial(1, 0).Offset<Int128>());
+ EXPECT_EQ(0, L::Partial(5, 3).Offset<int8_t>());
+ EXPECT_EQ(8, L::Partial(5, 3).Offset<int32_t>());
+ EXPECT_EQ(24, L::Partial(5, 3).Offset<Int128>());
+ EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<int8_t>());
+ EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<int32_t>());
+ EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<Int128>());
+ EXPECT_EQ(0, L::Partial(1, 0, 0).Offset<int8_t>());
+ EXPECT_EQ(4, L::Partial(1, 0, 0).Offset<int32_t>());
+ EXPECT_EQ(8, L::Partial(1, 0, 0).Offset<Int128>());
+ EXPECT_EQ(0, L::Partial(5, 3, 1).Offset<int8_t>());
+ EXPECT_EQ(24, L::Partial(5, 3, 1).Offset<Int128>());
+ EXPECT_EQ(8, L::Partial(5, 3, 1).Offset<int32_t>());
+ EXPECT_EQ(0, L(5, 3, 1).Offset<int8_t>());
+ EXPECT_EQ(24, L(5, 3, 1).Offset<Int128>());
+ EXPECT_EQ(8, L(5, 3, 1).Offset<int32_t>());
+ }
+}
+
+TEST(Layout, Offsets) {
+ {
+ using L = Layout<int32_t>;
+ EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0));
+ EXPECT_THAT(L::Partial(3).Offsets(), ElementsAre(0));
+ EXPECT_THAT(L(3).Offsets(), ElementsAre(0));
+ }
+ {
+ using L = Layout<int32_t, int32_t>;
+ EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0));
+ EXPECT_THAT(L::Partial(3).Offsets(), ElementsAre(0, 12));
+ EXPECT_THAT(L::Partial(3, 5).Offsets(), ElementsAre(0, 12));
+ EXPECT_THAT(L(3, 5).Offsets(), ElementsAre(0, 12));
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0));
+ EXPECT_THAT(L::Partial(1).Offsets(), ElementsAre(0, 4));
+ EXPECT_THAT(L::Partial(5).Offsets(), ElementsAre(0, 8));
+ EXPECT_THAT(L::Partial(0, 0).Offsets(), ElementsAre(0, 0, 0));
+ EXPECT_THAT(L::Partial(1, 0).Offsets(), ElementsAre(0, 4, 8));
+ EXPECT_THAT(L::Partial(5, 3).Offsets(), ElementsAre(0, 8, 24));
+ EXPECT_THAT(L::Partial(0, 0, 0).Offsets(), ElementsAre(0, 0, 0));
+ EXPECT_THAT(L::Partial(1, 0, 0).Offsets(), ElementsAre(0, 4, 8));
+ EXPECT_THAT(L::Partial(5, 3, 1).Offsets(), ElementsAre(0, 8, 24));
+ EXPECT_THAT(L(5, 3, 1).Offsets(), ElementsAre(0, 8, 24));
+ }
+}
+
+TEST(Layout, AllocSize) {
+ {
+ using L = Layout<int32_t>;
+ EXPECT_EQ(0, L::Partial(0).AllocSize());
+ EXPECT_EQ(12, L::Partial(3).AllocSize());
+ EXPECT_EQ(12, L(3).AllocSize());
+ }
+ {
+ using L = Layout<int32_t, int32_t>;
+ EXPECT_EQ(32, L::Partial(3, 5).AllocSize());
+ EXPECT_EQ(32, L(3, 5).AllocSize());
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_EQ(0, L::Partial(0, 0, 0).AllocSize());
+ EXPECT_EQ(8, L::Partial(1, 0, 0).AllocSize());
+ EXPECT_EQ(8, L::Partial(0, 1, 0).AllocSize());
+ EXPECT_EQ(16, L::Partial(0, 0, 1).AllocSize());
+ EXPECT_EQ(24, L::Partial(1, 1, 1).AllocSize());
+ EXPECT_EQ(136, L::Partial(3, 5, 7).AllocSize());
+ EXPECT_EQ(136, L(3, 5, 7).AllocSize());
+ }
+}
+
+TEST(Layout, SizeByIndex) {
+ {
+ using L = Layout<int32_t>;
+ EXPECT_EQ(0, L::Partial(0).Size<0>());
+ EXPECT_EQ(3, L::Partial(3).Size<0>());
+ EXPECT_EQ(3, L(3).Size<0>());
+ }
+ {
+ using L = Layout<int32_t, int32_t>;
+ EXPECT_EQ(0, L::Partial(0).Size<0>());
+ EXPECT_EQ(3, L::Partial(3).Size<0>());
+ EXPECT_EQ(3, L::Partial(3, 5).Size<0>());
+ EXPECT_EQ(5, L::Partial(3, 5).Size<1>());
+ EXPECT_EQ(3, L(3, 5).Size<0>());
+ EXPECT_EQ(5, L(3, 5).Size<1>());
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_EQ(3, L::Partial(3).Size<0>());
+ EXPECT_EQ(3, L::Partial(3, 5).Size<0>());
+ EXPECT_EQ(5, L::Partial(3, 5).Size<1>());
+ EXPECT_EQ(3, L::Partial(3, 5, 7).Size<0>());
+ EXPECT_EQ(5, L::Partial(3, 5, 7).Size<1>());
+ EXPECT_EQ(7, L::Partial(3, 5, 7).Size<2>());
+ EXPECT_EQ(3, L(3, 5, 7).Size<0>());
+ EXPECT_EQ(5, L(3, 5, 7).Size<1>());
+ EXPECT_EQ(7, L(3, 5, 7).Size<2>());
+ }
+}
+
+TEST(Layout, SizeByType) {
+ {
+ using L = Layout<int32_t>;
+ EXPECT_EQ(0, L::Partial(0).Size<int32_t>());
+ EXPECT_EQ(3, L::Partial(3).Size<int32_t>());
+ EXPECT_EQ(3, L(3).Size<int32_t>());
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_EQ(3, L::Partial(3).Size<int8_t>());
+ EXPECT_EQ(3, L::Partial(3, 5).Size<int8_t>());
+ EXPECT_EQ(5, L::Partial(3, 5).Size<int32_t>());
+ EXPECT_EQ(3, L::Partial(3, 5, 7).Size<int8_t>());
+ EXPECT_EQ(5, L::Partial(3, 5, 7).Size<int32_t>());
+ EXPECT_EQ(7, L::Partial(3, 5, 7).Size<Int128>());
+ EXPECT_EQ(3, L(3, 5, 7).Size<int8_t>());
+ EXPECT_EQ(5, L(3, 5, 7).Size<int32_t>());
+ EXPECT_EQ(7, L(3, 5, 7).Size<Int128>());
+ }
+}
+
+TEST(Layout, Sizes) {
+ {
+ using L = Layout<int32_t>;
+ EXPECT_THAT(L::Partial().Sizes(), ElementsAre());
+ EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3));
+ EXPECT_THAT(L(3).Sizes(), ElementsAre(3));
+ }
+ {
+ using L = Layout<int32_t, int32_t>;
+ EXPECT_THAT(L::Partial().Sizes(), ElementsAre());
+ EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3));
+ EXPECT_THAT(L::Partial(3, 5).Sizes(), ElementsAre(3, 5));
+ EXPECT_THAT(L(3, 5).Sizes(), ElementsAre(3, 5));
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_THAT(L::Partial().Sizes(), ElementsAre());
+ EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3));
+ EXPECT_THAT(L::Partial(3, 5).Sizes(), ElementsAre(3, 5));
+ EXPECT_THAT(L::Partial(3, 5, 7).Sizes(), ElementsAre(3, 5, 7));
+ EXPECT_THAT(L(3, 5, 7).Sizes(), ElementsAre(3, 5, 7));
+ }
+}
+
+TEST(Layout, PointerByIndex) {
+ alignas(max_align_t) const unsigned char p[100] = {};
+ {
+ using L = Layout<int32_t>;
+ EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<0>(p))));
+ EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p))));
+ EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3).Pointer<0>(p))));
+ }
+ {
+ using L = Layout<int32_t, int32_t>;
+ EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<0>(p))));
+ EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p))));
+ EXPECT_EQ(12, Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<1>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<0>(p))));
+ EXPECT_EQ(12,
+ Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<1>(p))));
+ EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3, 5).Pointer<0>(p))));
+ EXPECT_EQ(12, Distance(p, Type<const int32_t*>(L(3, 5).Pointer<1>(p))));
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial().Pointer<0>(p))));
+ EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(0).Pointer<0>(p))));
+ EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<1>(p))));
+ EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(1).Pointer<0>(p))));
+ EXPECT_EQ(4, Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<1>(p))));
+ EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(5).Pointer<0>(p))));
+ EXPECT_EQ(8, Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<1>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<const int8_t*>(L::Partial(0, 0).Pointer<0>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<const int32_t*>(L::Partial(0, 0).Pointer<1>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<const Int128*>(L::Partial(0, 0).Pointer<2>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<const int8_t*>(L::Partial(1, 0).Pointer<0>(p))));
+ EXPECT_EQ(4,
+ Distance(p, Type<const int32_t*>(L::Partial(1, 0).Pointer<1>(p))));
+ EXPECT_EQ(8,
+ Distance(p, Type<const Int128*>(L::Partial(1, 0).Pointer<2>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<const int8_t*>(L::Partial(5, 3).Pointer<0>(p))));
+ EXPECT_EQ(8,
+ Distance(p, Type<const int32_t*>(L::Partial(5, 3).Pointer<1>(p))));
+ EXPECT_EQ(24,
+ Distance(p, Type<const Int128*>(L::Partial(5, 3).Pointer<2>(p))));
+ EXPECT_EQ(
+ 0, Distance(p, Type<const int8_t*>(L::Partial(0, 0, 0).Pointer<0>(p))));
+ EXPECT_EQ(
+ 0, Distance(p, Type<const int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p))));
+ EXPECT_EQ(
+ 0, Distance(p, Type<const Int128*>(L::Partial(0, 0, 0).Pointer<2>(p))));
+ EXPECT_EQ(
+ 0, Distance(p, Type<const int8_t*>(L::Partial(1, 0, 0).Pointer<0>(p))));
+ EXPECT_EQ(
+ 4, Distance(p, Type<const int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p))));
+ EXPECT_EQ(
+ 8, Distance(p, Type<const Int128*>(L::Partial(1, 0, 0).Pointer<2>(p))));
+ EXPECT_EQ(
+ 0, Distance(p, Type<const int8_t*>(L::Partial(5, 3, 1).Pointer<0>(p))));
+ EXPECT_EQ(
+ 24,
+ Distance(p, Type<const Int128*>(L::Partial(5, 3, 1).Pointer<2>(p))));
+ EXPECT_EQ(
+ 8, Distance(p, Type<const int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p))));
+ EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L(5, 3, 1).Pointer<0>(p))));
+ EXPECT_EQ(24, Distance(p, Type<const Int128*>(L(5, 3, 1).Pointer<2>(p))));
+ EXPECT_EQ(8, Distance(p, Type<const int32_t*>(L(5, 3, 1).Pointer<1>(p))));
+ }
+}
+
+TEST(Layout, PointerByType) {
+ alignas(max_align_t) const unsigned char p[100] = {};
+ {
+ using L = Layout<int32_t>;
+ EXPECT_EQ(0,
+ Distance(p, Type<const int32_t*>(L::Partial().Pointer<int32_t>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<int32_t>(p))));
+ EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3).Pointer<int32_t>(p))));
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial().Pointer<int8_t>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<const int8_t*>(L::Partial(0).Pointer<int8_t>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<int32_t>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<const int8_t*>(L::Partial(1).Pointer<int8_t>(p))));
+ EXPECT_EQ(4,
+ Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<int32_t>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<const int8_t*>(L::Partial(5).Pointer<int8_t>(p))));
+ EXPECT_EQ(8,
+ Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<int32_t>(p))));
+ EXPECT_EQ(
+ 0, Distance(p, Type<const int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p))));
+ EXPECT_EQ(
+ 0, Distance(p, Type<const int32_t*>(L::Partial(0, 0).Pointer<int32_t>(p))));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<const Int128*>(L::Partial(0, 0).Pointer<Int128>(p))));
+ EXPECT_EQ(
+ 0, Distance(p, Type<const int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p))));
+ EXPECT_EQ(
+ 4, Distance(p, Type<const int32_t*>(L::Partial(1, 0).Pointer<int32_t>(p))));
+ EXPECT_EQ(
+ 8,
+ Distance(p, Type<const Int128*>(L::Partial(1, 0).Pointer<Int128>(p))));
+ EXPECT_EQ(
+ 0, Distance(p, Type<const int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p))));
+ EXPECT_EQ(
+ 8, Distance(p, Type<const int32_t*>(L::Partial(5, 3).Pointer<int32_t>(p))));
+ EXPECT_EQ(
+ 24,
+ Distance(p, Type<const Int128*>(L::Partial(5, 3).Pointer<Int128>(p))));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<const int8_t*>(L::Partial(0, 0, 0).Pointer<int8_t>(p))));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<const int32_t*>(L::Partial(0, 0, 0).Pointer<int32_t>(p))));
+ EXPECT_EQ(0, Distance(p, Type<const Int128*>(
+ L::Partial(0, 0, 0).Pointer<Int128>(p))));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<const int8_t*>(L::Partial(1, 0, 0).Pointer<int8_t>(p))));
+ EXPECT_EQ(
+ 4,
+ Distance(p, Type<const int32_t*>(L::Partial(1, 0, 0).Pointer<int32_t>(p))));
+ EXPECT_EQ(8, Distance(p, Type<const Int128*>(
+ L::Partial(1, 0, 0).Pointer<Int128>(p))));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<const int8_t*>(L::Partial(5, 3, 1).Pointer<int8_t>(p))));
+ EXPECT_EQ(24, Distance(p, Type<const Int128*>(
+ L::Partial(5, 3, 1).Pointer<Int128>(p))));
+ EXPECT_EQ(
+ 8,
+ Distance(p, Type<const int32_t*>(L::Partial(5, 3, 1).Pointer<int32_t>(p))));
+ EXPECT_EQ(24,
+ Distance(p, Type<const Int128*>(L(5, 3, 1).Pointer<Int128>(p))));
+ EXPECT_EQ(8, Distance(p, Type<const int32_t*>(L(5, 3, 1).Pointer<int32_t>(p))));
+ }
+}
+
+TEST(Layout, MutablePointerByIndex) {
+ alignas(max_align_t) unsigned char p[100];
+ {
+ using L = Layout<int32_t>;
+ EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial().Pointer<0>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(3).Pointer<0>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int32_t*>(L(3).Pointer<0>(p))));
+ }
+ {
+ using L = Layout<int32_t, int32_t>;
+ EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial().Pointer<0>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(3).Pointer<0>(p))));
+ EXPECT_EQ(12, Distance(p, Type<int32_t*>(L::Partial(3).Pointer<1>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(3, 5).Pointer<0>(p))));
+ EXPECT_EQ(12, Distance(p, Type<int32_t*>(L::Partial(3, 5).Pointer<1>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int32_t*>(L(3, 5).Pointer<0>(p))));
+ EXPECT_EQ(12, Distance(p, Type<int32_t*>(L(3, 5).Pointer<1>(p))));
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial().Pointer<0>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0).Pointer<0>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0).Pointer<1>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1).Pointer<0>(p))));
+ EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1).Pointer<1>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5).Pointer<0>(p))));
+ EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5).Pointer<1>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0, 0).Pointer<0>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0, 0).Pointer<1>(p))));
+ EXPECT_EQ(0, Distance(p, Type<Int128*>(L::Partial(0, 0).Pointer<2>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1, 0).Pointer<0>(p))));
+ EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1, 0).Pointer<1>(p))));
+ EXPECT_EQ(8, Distance(p, Type<Int128*>(L::Partial(1, 0).Pointer<2>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5, 3).Pointer<0>(p))));
+ EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5, 3).Pointer<1>(p))));
+ EXPECT_EQ(24, Distance(p, Type<Int128*>(L::Partial(5, 3).Pointer<2>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0, 0, 0).Pointer<0>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p))));
+ EXPECT_EQ(0, Distance(p, Type<Int128*>(L::Partial(0, 0, 0).Pointer<2>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1, 0, 0).Pointer<0>(p))));
+ EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p))));
+ EXPECT_EQ(8, Distance(p, Type<Int128*>(L::Partial(1, 0, 0).Pointer<2>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5, 3, 1).Pointer<0>(p))));
+ EXPECT_EQ(24,
+ Distance(p, Type<Int128*>(L::Partial(5, 3, 1).Pointer<2>(p))));
+ EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L(5, 3, 1).Pointer<0>(p))));
+ EXPECT_EQ(24, Distance(p, Type<Int128*>(L(5, 3, 1).Pointer<2>(p))));
+ EXPECT_EQ(8, Distance(p, Type<int32_t*>(L(5, 3, 1).Pointer<1>(p))));
+ }
+}
+
+TEST(Layout, MutablePointerByType) {
+ alignas(max_align_t) unsigned char p[100];
+ {
+ using L = Layout<int32_t>;
+ EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial().Pointer<int32_t>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(3).Pointer<int32_t>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int32_t*>(L(3).Pointer<int32_t>(p))));
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial().Pointer<int8_t>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0).Pointer<int8_t>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0).Pointer<int32_t>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1).Pointer<int8_t>(p))));
+ EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1).Pointer<int32_t>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5).Pointer<int8_t>(p))));
+ EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5).Pointer<int32_t>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0, 0).Pointer<int32_t>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<Int128*>(L::Partial(0, 0).Pointer<Int128>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p))));
+ EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1, 0).Pointer<int32_t>(p))));
+ EXPECT_EQ(8,
+ Distance(p, Type<Int128*>(L::Partial(1, 0).Pointer<Int128>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p))));
+ EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5, 3).Pointer<int32_t>(p))));
+ EXPECT_EQ(24,
+ Distance(p, Type<Int128*>(L::Partial(5, 3).Pointer<Int128>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<int8_t*>(L::Partial(0, 0, 0).Pointer<int8_t>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<int32_t>(p))));
+ EXPECT_EQ(
+ 0, Distance(p, Type<Int128*>(L::Partial(0, 0, 0).Pointer<Int128>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<int8_t*>(L::Partial(1, 0, 0).Pointer<int8_t>(p))));
+ EXPECT_EQ(4,
+ Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<int32_t>(p))));
+ EXPECT_EQ(
+ 8, Distance(p, Type<Int128*>(L::Partial(1, 0, 0).Pointer<Int128>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<int8_t*>(L::Partial(5, 3, 1).Pointer<int8_t>(p))));
+ EXPECT_EQ(
+ 24, Distance(p, Type<Int128*>(L::Partial(5, 3, 1).Pointer<Int128>(p))));
+ EXPECT_EQ(8,
+ Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<int32_t>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L(5, 3, 1).Pointer<int8_t>(p))));
+ EXPECT_EQ(24, Distance(p, Type<Int128*>(L(5, 3, 1).Pointer<Int128>(p))));
+ EXPECT_EQ(8, Distance(p, Type<int32_t*>(L(5, 3, 1).Pointer<int32_t>(p))));
+ }
+}
+
+TEST(Layout, Pointers) {
+ alignas(max_align_t) const unsigned char p[100] = {};
+ using L = Layout<int8_t, int8_t, Int128>;
+ {
+ const auto x = L::Partial();
+ EXPECT_EQ(std::make_tuple(x.Pointer<0>(p)),
+ Type<std::tuple<const int8_t*>>(x.Pointers(p)));
+ }
+ {
+ const auto x = L::Partial(1);
+ EXPECT_EQ(std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p)),
+ (Type<std::tuple<const int8_t*, const int8_t*>>(x.Pointers(p))));
+ }
+ {
+ const auto x = L::Partial(1, 2);
+ EXPECT_EQ(
+ std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
+ (Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
+ x.Pointers(p))));
+ }
+ {
+ const auto x = L::Partial(1, 2, 3);
+ EXPECT_EQ(
+ std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
+ (Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
+ x.Pointers(p))));
+ }
+ {
+ const L x(1, 2, 3);
+ EXPECT_EQ(
+ std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
+ (Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
+ x.Pointers(p))));
+ }
+}
+
+TEST(Layout, MutablePointers) {
+ alignas(max_align_t) unsigned char p[100];
+ using L = Layout<int8_t, int8_t, Int128>;
+ {
+ const auto x = L::Partial();
+ EXPECT_EQ(std::make_tuple(x.Pointer<0>(p)),
+ Type<std::tuple<int8_t*>>(x.Pointers(p)));
+ }
+ {
+ const auto x = L::Partial(1);
+ EXPECT_EQ(std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p)),
+ (Type<std::tuple<int8_t*, int8_t*>>(x.Pointers(p))));
+ }
+ {
+ const auto x = L::Partial(1, 2);
+ EXPECT_EQ(
+ std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
+ (Type<std::tuple<int8_t*, int8_t*, Int128*>>(x.Pointers(p))));
+ }
+ {
+ const auto x = L::Partial(1, 2, 3);
+ EXPECT_EQ(
+ std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
+ (Type<std::tuple<int8_t*, int8_t*, Int128*>>(x.Pointers(p))));
+ }
+ {
+ const L x(1, 2, 3);
+ EXPECT_EQ(
+ std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
+ (Type<std::tuple<int8_t*, int8_t*, Int128*>>(x.Pointers(p))));
+ }
+}
+
+TEST(Layout, SliceByIndexSize) {
+ alignas(max_align_t) const unsigned char p[100] = {};
+ {
+ using L = Layout<int32_t>;
+ EXPECT_EQ(0, L::Partial(0).Slice<0>(p).size());
+ EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size());
+ EXPECT_EQ(3, L(3).Slice<0>(p).size());
+ }
+ {
+ using L = Layout<int32_t, int32_t>;
+ EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size());
+ EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size());
+ EXPECT_EQ(5, L(3, 5).Slice<1>(p).size());
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size());
+ EXPECT_EQ(3, L::Partial(3, 5).Slice<0>(p).size());
+ EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size());
+ EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<0>(p).size());
+ EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<1>(p).size());
+ EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<2>(p).size());
+ EXPECT_EQ(3, L(3, 5, 7).Slice<0>(p).size());
+ EXPECT_EQ(5, L(3, 5, 7).Slice<1>(p).size());
+ EXPECT_EQ(7, L(3, 5, 7).Slice<2>(p).size());
+ }
+}
+
+TEST(Layout, SliceByTypeSize) {
+ alignas(max_align_t) const unsigned char p[100] = {};
+ {
+ using L = Layout<int32_t>;
+ EXPECT_EQ(0, L::Partial(0).Slice<int32_t>(p).size());
+ EXPECT_EQ(3, L::Partial(3).Slice<int32_t>(p).size());
+ EXPECT_EQ(3, L(3).Slice<int32_t>(p).size());
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_EQ(3, L::Partial(3).Slice<int8_t>(p).size());
+ EXPECT_EQ(3, L::Partial(3, 5).Slice<int8_t>(p).size());
+ EXPECT_EQ(5, L::Partial(3, 5).Slice<int32_t>(p).size());
+ EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<int8_t>(p).size());
+ EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<int32_t>(p).size());
+ EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<Int128>(p).size());
+ EXPECT_EQ(3, L(3, 5, 7).Slice<int8_t>(p).size());
+ EXPECT_EQ(5, L(3, 5, 7).Slice<int32_t>(p).size());
+ EXPECT_EQ(7, L(3, 5, 7).Slice<Int128>(p).size());
+ }
+}
+
+TEST(Layout, MutableSliceByIndexSize) {
+ alignas(max_align_t) unsigned char p[100];
+ {
+ using L = Layout<int32_t>;
+ EXPECT_EQ(0, L::Partial(0).Slice<0>(p).size());
+ EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size());
+ EXPECT_EQ(3, L(3).Slice<0>(p).size());
+ }
+ {
+ using L = Layout<int32_t, int32_t>;
+ EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size());
+ EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size());
+ EXPECT_EQ(5, L(3, 5).Slice<1>(p).size());
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size());
+ EXPECT_EQ(3, L::Partial(3, 5).Slice<0>(p).size());
+ EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size());
+ EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<0>(p).size());
+ EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<1>(p).size());
+ EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<2>(p).size());
+ EXPECT_EQ(3, L(3, 5, 7).Slice<0>(p).size());
+ EXPECT_EQ(5, L(3, 5, 7).Slice<1>(p).size());
+ EXPECT_EQ(7, L(3, 5, 7).Slice<2>(p).size());
+ }
+}
+
+TEST(Layout, MutableSliceByTypeSize) {
+ alignas(max_align_t) unsigned char p[100];
+ {
+ using L = Layout<int32_t>;
+ EXPECT_EQ(0, L::Partial(0).Slice<int32_t>(p).size());
+ EXPECT_EQ(3, L::Partial(3).Slice<int32_t>(p).size());
+ EXPECT_EQ(3, L(3).Slice<int32_t>(p).size());
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_EQ(3, L::Partial(3).Slice<int8_t>(p).size());
+ EXPECT_EQ(3, L::Partial(3, 5).Slice<int8_t>(p).size());
+ EXPECT_EQ(5, L::Partial(3, 5).Slice<int32_t>(p).size());
+ EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<int8_t>(p).size());
+ EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<int32_t>(p).size());
+ EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<Int128>(p).size());
+ EXPECT_EQ(3, L(3, 5, 7).Slice<int8_t>(p).size());
+ EXPECT_EQ(5, L(3, 5, 7).Slice<int32_t>(p).size());
+ EXPECT_EQ(7, L(3, 5, 7).Slice<Int128>(p).size());
+ }
+}
+
+TEST(Layout, SliceByIndexData) {
+ alignas(max_align_t) const unsigned char p[100] = {};
+ {
+ using L = Layout<int32_t>;
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<const int32_t>>(L::Partial(0).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data()));
+ EXPECT_EQ(0, Distance(p, Type<Span<const int32_t>>(L(3).Slice<0>(p)).data()));
+ }
+ {
+ using L = Layout<int32_t, int32_t>;
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(p,
+ Type<Span<const int32_t>>(L::Partial(3, 5).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 12,
+ Distance(p,
+ Type<Span<const int32_t>>(L::Partial(3, 5).Slice<1>(p)).data()));
+ EXPECT_EQ(0,
+ Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<0>(p)).data()));
+ EXPECT_EQ(12,
+ Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<1>(p)).data()));
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<const int8_t>>(L::Partial(0).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<const int8_t>>(L::Partial(1).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<const int8_t>>(L::Partial(5).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(
+ p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(p,
+ Type<Span<const int32_t>>(L::Partial(0, 0).Slice<1>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(
+ p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 4,
+ Distance(p,
+ Type<Span<const int32_t>>(L::Partial(1, 0).Slice<1>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(
+ p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 8,
+ Distance(p,
+ Type<Span<const int32_t>>(L::Partial(5, 3).Slice<1>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(
+ p, Type<Span<const int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(
+ p,
+ Type<Span<const int32_t>>(L::Partial(0, 0, 0).Slice<1>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(
+ p,
+ Type<Span<const Int128>>(L::Partial(0, 0, 0).Slice<2>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(
+ p, Type<Span<const int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 4,
+ Distance(
+ p,
+ Type<Span<const int32_t>>(L::Partial(1, 0, 0).Slice<1>(p)).data()));
+ EXPECT_EQ(
+ 8,
+ Distance(
+ p,
+ Type<Span<const Int128>>(L::Partial(1, 0, 0).Slice<2>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(
+ p, Type<Span<const int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 24,
+ Distance(
+ p,
+ Type<Span<const Int128>>(L::Partial(5, 3, 1).Slice<2>(p)).data()));
+ EXPECT_EQ(
+ 8,
+ Distance(
+ p,
+ Type<Span<const int32_t>>(L::Partial(5, 3, 1).Slice<1>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(p, Type<Span<const int8_t>>(L(5, 3, 1).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 24,
+ Distance(p, Type<Span<const Int128>>(L(5, 3, 1).Slice<2>(p)).data()));
+ EXPECT_EQ(
+ 8, Distance(p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<1>(p)).data()));
+ }
+}
+
+TEST(Layout, SliceByTypeData) {
+ alignas(max_align_t) const unsigned char p[100] = {};
+ {
+ using L = Layout<int32_t>;
+ EXPECT_EQ(
+ 0,
+ Distance(
+ p, Type<Span<const int32_t>>(L::Partial(0).Slice<int32_t>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(
+ p, Type<Span<const int32_t>>(L::Partial(3).Slice<int32_t>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(p, Type<Span<const int32_t>>(L(3).Slice<int32_t>(p)).data()));
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_EQ(
+ 0, Distance(
+ p, Type<Span<const int8_t>>(L::Partial(0).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(
+ p, Type<Span<const int8_t>>(L::Partial(1).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(
+ p, Type<Span<const int8_t>>(L::Partial(5).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(
+ p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(
+ p,
+ Type<Span<const int32_t>>(L::Partial(0, 0).Slice<int32_t>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(
+ p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 4,
+ Distance(
+ p,
+ Type<Span<const int32_t>>(L::Partial(1, 0).Slice<int32_t>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(
+ p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 8,
+ Distance(
+ p,
+ Type<Span<const int32_t>>(L::Partial(5, 3).Slice<int32_t>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(
+ p,
+ Type<Span<const int8_t>>(L::Partial(0, 0, 0).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<const int32_t>>(L::Partial(0, 0, 0).Slice<int32_t>(p))
+ .data()));
+ EXPECT_EQ(0, Distance(p, Type<Span<const Int128>>(
+ L::Partial(0, 0, 0).Slice<Int128>(p))
+ .data()));
+ EXPECT_EQ(
+ 0,
+ Distance(
+ p,
+ Type<Span<const int8_t>>(L::Partial(1, 0, 0).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 4,
+ Distance(p, Type<Span<const int32_t>>(L::Partial(1, 0, 0).Slice<int32_t>(p))
+ .data()));
+ EXPECT_EQ(8, Distance(p, Type<Span<const Int128>>(
+ L::Partial(1, 0, 0).Slice<Int128>(p))
+ .data()));
+ EXPECT_EQ(
+ 0,
+ Distance(
+ p,
+ Type<Span<const int8_t>>(L::Partial(5, 3, 1).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(24, Distance(p, Type<Span<const Int128>>(
+ L::Partial(5, 3, 1).Slice<Int128>(p))
+ .data()));
+ EXPECT_EQ(
+ 8,
+ Distance(p, Type<Span<const int32_t>>(L::Partial(5, 3, 1).Slice<int32_t>(p))
+ .data()));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<const int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 24,
+ Distance(p,
+ Type<Span<const Int128>>(L(5, 3, 1).Slice<Int128>(p)).data()));
+ EXPECT_EQ(
+ 8, Distance(
+ p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data()));
+ }
+}
+
+TEST(Layout, MutableSliceByIndexData) {
+ alignas(max_align_t) unsigned char p[100];
+ {
+ using L = Layout<int32_t>;
+ EXPECT_EQ(0,
+ Distance(p, Type<Span<int32_t>>(L::Partial(0).Slice<0>(p)).data()));
+ EXPECT_EQ(0,
+ Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data()));
+ EXPECT_EQ(0, Distance(p, Type<Span<int32_t>>(L(3).Slice<0>(p)).data()));
+ }
+ {
+ using L = Layout<int32_t, int32_t>;
+ EXPECT_EQ(0,
+ Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(p, Type<Span<int32_t>>(L::Partial(3, 5).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 12,
+ Distance(p, Type<Span<int32_t>>(L::Partial(3, 5).Slice<1>(p)).data()));
+ EXPECT_EQ(0, Distance(p, Type<Span<int32_t>>(L(3, 5).Slice<0>(p)).data()));
+ EXPECT_EQ(12, Distance(p, Type<Span<int32_t>>(L(3, 5).Slice<1>(p)).data()));
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_EQ(0,
+ Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<0>(p)).data()));
+ EXPECT_EQ(0,
+ Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<0>(p)).data()));
+ EXPECT_EQ(0,
+ Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(p, Type<Span<int8_t>>(L::Partial(0, 0).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<1>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(p, Type<Span<int8_t>>(L::Partial(1, 0).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 4, Distance(p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<1>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(p, Type<Span<int8_t>>(L::Partial(5, 3).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 8, Distance(p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<1>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<1>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(
+ p, Type<Span<Int128>>(L::Partial(0, 0, 0).Slice<2>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 4,
+ Distance(p, Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<1>(p)).data()));
+ EXPECT_EQ(
+ 8, Distance(
+ p, Type<Span<Int128>>(L::Partial(1, 0, 0).Slice<2>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 24, Distance(
+ p, Type<Span<Int128>>(L::Partial(5, 3, 1).Slice<2>(p)).data()));
+ EXPECT_EQ(
+ 8,
+ Distance(p, Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<1>(p)).data()));
+ EXPECT_EQ(0, Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<0>(p)).data()));
+ EXPECT_EQ(24,
+ Distance(p, Type<Span<Int128>>(L(5, 3, 1).Slice<2>(p)).data()));
+ EXPECT_EQ(8, Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<1>(p)).data()));
+ }
+}
+
+TEST(Layout, MutableSliceByTypeData) {
+ alignas(max_align_t) unsigned char p[100];
+ {
+ using L = Layout<int32_t>;
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<int32_t>>(L::Partial(0).Slice<int32_t>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<int32_t>(p)).data()));
+ EXPECT_EQ(0, Distance(p, Type<Span<int32_t>>(L(3).Slice<int32_t>(p)).data()));
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_EQ(
+ 0, Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<int8_t>>(L::Partial(0, 0).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(
+ p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<int32_t>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<int8_t>>(L::Partial(1, 0).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 4, Distance(
+ p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<int32_t>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<int8_t>>(L::Partial(5, 3).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 8, Distance(
+ p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<int32_t>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(
+ p, Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(
+ p, Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<int32_t>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(
+ p,
+ Type<Span<Int128>>(L::Partial(0, 0, 0).Slice<Int128>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(
+ p, Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 4,
+ Distance(
+ p, Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<int32_t>(p)).data()));
+ EXPECT_EQ(
+ 8,
+ Distance(
+ p,
+ Type<Span<Int128>>(L::Partial(1, 0, 0).Slice<Int128>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(
+ p, Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 24,
+ Distance(
+ p,
+ Type<Span<Int128>>(L::Partial(5, 3, 1).Slice<Int128>(p)).data()));
+ EXPECT_EQ(
+ 8,
+ Distance(
+ p, Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<int32_t>(p)).data()));
+ EXPECT_EQ(0,
+ Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 24,
+ Distance(p, Type<Span<Int128>>(L(5, 3, 1).Slice<Int128>(p)).data()));
+ EXPECT_EQ(
+ 8, Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data()));
+ }
+}
+
+MATCHER_P(IsSameSlice, slice, "") {
+ return arg.size() == slice.size() && arg.data() == slice.data();
+}
+
+template <typename... M>
+class TupleMatcher {
+ public:
+ explicit TupleMatcher(M... matchers) : matchers_(std::move(matchers)...) {}
+
+ template <typename Tuple>
+ bool MatchAndExplain(const Tuple& p,
+ testing::MatchResultListener* /* listener */) const {
+ static_assert(std::tuple_size<Tuple>::value == sizeof...(M), "");
+ return MatchAndExplainImpl(
+ p, absl::make_index_sequence<std::tuple_size<Tuple>::value>{});
+ }
+
+ // For the matcher concept. Left empty as we don't really need the diagnostics
+ // right now.
+ void DescribeTo(::std::ostream* os) const {}
+ void DescribeNegationTo(::std::ostream* os) const {}
+
+ private:
+ template <typename Tuple, size_t... Is>
+ bool MatchAndExplainImpl(const Tuple& p, absl::index_sequence<Is...>) const {
+ // Using std::min as a simple variadic "and".
+ return std::min(
+ {true, testing::SafeMatcherCast<
+ const typename std::tuple_element<Is, Tuple>::type&>(
+ std::get<Is>(matchers_))
+ .Matches(std::get<Is>(p))...});
+ }
+
+ std::tuple<M...> matchers_;
+};
+
+template <typename... M>
+testing::PolymorphicMatcher<TupleMatcher<M...>> Tuple(M... matchers) {
+ return testing::MakePolymorphicMatcher(
+ TupleMatcher<M...>(std::move(matchers)...));
+}
+
+TEST(Layout, Slices) {
+ alignas(max_align_t) const unsigned char p[100] = {};
+ using L = Layout<int8_t, int8_t, Int128>;
+ {
+ const auto x = L::Partial();
+ EXPECT_THAT(Type<std::tuple<>>(x.Slices(p)), Tuple());
+ }
+ {
+ const auto x = L::Partial(1);
+ EXPECT_THAT(Type<std::tuple<Span<const int8_t>>>(x.Slices(p)),
+ Tuple(IsSameSlice(x.Slice<0>(p))));
+ }
+ {
+ const auto x = L::Partial(1, 2);
+ EXPECT_THAT(
+ (Type<std::tuple<Span<const int8_t>, Span<const int8_t>>>(x.Slices(p))),
+ Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p))));
+ }
+ {
+ const auto x = L::Partial(1, 2, 3);
+ EXPECT_THAT((Type<std::tuple<Span<const int8_t>, Span<const int8_t>,
+ Span<const Int128>>>(x.Slices(p))),
+ Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
+ IsSameSlice(x.Slice<2>(p))));
+ }
+ {
+ const L x(1, 2, 3);
+ EXPECT_THAT((Type<std::tuple<Span<const int8_t>, Span<const int8_t>,
+ Span<const Int128>>>(x.Slices(p))),
+ Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
+ IsSameSlice(x.Slice<2>(p))));
+ }
+}
+
+TEST(Layout, MutableSlices) {
+ alignas(max_align_t) unsigned char p[100] = {};
+ using L = Layout<int8_t, int8_t, Int128>;
+ {
+ const auto x = L::Partial();
+ EXPECT_THAT(Type<std::tuple<>>(x.Slices(p)), Tuple());
+ }
+ {
+ const auto x = L::Partial(1);
+ EXPECT_THAT(Type<std::tuple<Span<int8_t>>>(x.Slices(p)),
+ Tuple(IsSameSlice(x.Slice<0>(p))));
+ }
+ {
+ const auto x = L::Partial(1, 2);
+ EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>>>(x.Slices(p))),
+ Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p))));
+ }
+ {
+ const auto x = L::Partial(1, 2, 3);
+ EXPECT_THAT(
+ (Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(x.Slices(p))),
+ Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
+ IsSameSlice(x.Slice<2>(p))));
+ }
+ {
+ const L x(1, 2, 3);
+ EXPECT_THAT(
+ (Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(x.Slices(p))),
+ Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
+ IsSameSlice(x.Slice<2>(p))));
+ }
+}
+
+TEST(Layout, UnalignedTypes) {
+ constexpr Layout<unsigned char, unsigned char, unsigned char> x(1, 2, 3);
+ alignas(max_align_t) unsigned char p[x.AllocSize() + 1];
+ EXPECT_THAT(x.Pointers(p + 1), Tuple(p + 1, p + 2, p + 4));
+}
+
+TEST(Layout, CustomAlignment) {
+ constexpr Layout<unsigned char, Aligned<unsigned char, 8>> x(1, 2);
+ alignas(max_align_t) unsigned char p[x.AllocSize()];
+ EXPECT_EQ(10, x.AllocSize());
+ EXPECT_THAT(x.Pointers(p), Tuple(p + 0, p + 8));
+}
+
+TEST(Layout, OverAligned) {
+ constexpr size_t M = alignof(max_align_t);
+ constexpr Layout<unsigned char, Aligned<unsigned char, 2 * M>> x(1, 3);
+ alignas(2 * M) unsigned char p[x.AllocSize()];
+ EXPECT_EQ(2 * M + 3, x.AllocSize());
+ EXPECT_THAT(x.Pointers(p), Tuple(p + 0, p + 2 * M));
+}
+
+TEST(Layout, Alignment) {
+ static_assert(Layout<int8_t>::Alignment() == 1, "");
+ static_assert(Layout<int32_t>::Alignment() == 4, "");
+ static_assert(Layout<int64_t>::Alignment() == 8, "");
+ static_assert(Layout<Aligned<int8_t, 64>>::Alignment() == 64, "");
+ static_assert(Layout<int8_t, int32_t, int64_t>::Alignment() == 8, "");
+ static_assert(Layout<int8_t, int64_t, int32_t>::Alignment() == 8, "");
+ static_assert(Layout<int32_t, int8_t, int64_t>::Alignment() == 8, "");
+ static_assert(Layout<int32_t, int64_t, int8_t>::Alignment() == 8, "");
+ static_assert(Layout<int64_t, int8_t, int32_t>::Alignment() == 8, "");
+ static_assert(Layout<int64_t, int32_t, int8_t>::Alignment() == 8, "");
+}
+
+TEST(Layout, ConstexprPartial) {
+ constexpr size_t M = alignof(max_align_t);
+ constexpr Layout<unsigned char, Aligned<unsigned char, 2 * M>> x(1, 3);
+ static_assert(x.Partial(1).template Offset<1>() == 2 * M, "");
+}
+// [from, to)
+struct Region {
+ size_t from;
+ size_t to;
+};
+
+void ExpectRegionPoisoned(const unsigned char* p, size_t n, bool poisoned) {
+#ifdef ADDRESS_SANITIZER
+ for (size_t i = 0; i != n; ++i) {
+ EXPECT_EQ(poisoned, __asan_address_is_poisoned(p + i));
+ }
+#endif
+}
+
+template <size_t N>
+void ExpectPoisoned(const unsigned char (&buf)[N],
+ std::initializer_list<Region> reg) {
+ size_t prev = 0;
+ for (const Region& r : reg) {
+ ExpectRegionPoisoned(buf + prev, r.from - prev, false);
+ ExpectRegionPoisoned(buf + r.from, r.to - r.from, true);
+ prev = r.to;
+ }
+ ExpectRegionPoisoned(buf + prev, N - prev, false);
+}
+
+TEST(Layout, PoisonPadding) {
+ using L = Layout<int8_t, int64_t, int32_t, Int128>;
+
+ constexpr size_t n = L::Partial(1, 2, 3, 4).AllocSize();
+ {
+ constexpr auto x = L::Partial();
+ alignas(max_align_t) const unsigned char c[n] = {};
+ x.PoisonPadding(c);
+ EXPECT_EQ(x.Slices(c), x.Slices(c));
+ ExpectPoisoned(c, {});
+ }
+ {
+ constexpr auto x = L::Partial(1);
+ alignas(max_align_t) const unsigned char c[n] = {};
+ x.PoisonPadding(c);
+ EXPECT_EQ(x.Slices(c), x.Slices(c));
+ ExpectPoisoned(c, {{1, 8}});
+ }
+ {
+ constexpr auto x = L::Partial(1, 2);
+ alignas(max_align_t) const unsigned char c[n] = {};
+ x.PoisonPadding(c);
+ EXPECT_EQ(x.Slices(c), x.Slices(c));
+ ExpectPoisoned(c, {{1, 8}});
+ }
+ {
+ constexpr auto x = L::Partial(1, 2, 3);
+ alignas(max_align_t) const unsigned char c[n] = {};
+ x.PoisonPadding(c);
+ EXPECT_EQ(x.Slices(c), x.Slices(c));
+ ExpectPoisoned(c, {{1, 8}, {36, 40}});
+ }
+ {
+ constexpr auto x = L::Partial(1, 2, 3, 4);
+ alignas(max_align_t) const unsigned char c[n] = {};
+ x.PoisonPadding(c);
+ EXPECT_EQ(x.Slices(c), x.Slices(c));
+ ExpectPoisoned(c, {{1, 8}, {36, 40}});
+ }
+ {
+ constexpr L x(1, 2, 3, 4);
+ alignas(max_align_t) const unsigned char c[n] = {};
+ x.PoisonPadding(c);
+ EXPECT_EQ(x.Slices(c), x.Slices(c));
+ ExpectPoisoned(c, {{1, 8}, {36, 40}});
+ }
+}
+
+TEST(Layout, DebugString) {
+ {
+ constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial();
+ EXPECT_EQ("@0<signed char>(1)", x.DebugString());
+ }
+ {
+ constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial(1);
+ EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)", x.DebugString());
+ }
+ {
+ constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2);
+ EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)",
+ x.DebugString());
+ }
+ {
+ constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3);
+ EXPECT_EQ(
+ "@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
+ "@16" +
+ Int128::Name() + "(16)",
+ x.DebugString());
+ }
+ {
+ constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3, 4);
+ EXPECT_EQ(
+ "@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
+ "@16" +
+ Int128::Name() + "(16)[4]",
+ x.DebugString());
+ }
+ {
+ constexpr Layout<int8_t, int32_t, int8_t, Int128> x(1, 2, 3, 4);
+ EXPECT_EQ(
+ "@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
+ "@16" +
+ Int128::Name() + "(16)[4]",
+ x.DebugString());
+ }
+}
+
+TEST(Layout, CharTypes) {
+ constexpr Layout<int32_t> x(1);
+ alignas(max_align_t) char c[x.AllocSize()] = {};
+ alignas(max_align_t) unsigned char uc[x.AllocSize()] = {};
+ alignas(max_align_t) signed char sc[x.AllocSize()] = {};
+ alignas(max_align_t) const char cc[x.AllocSize()] = {};
+ alignas(max_align_t) const unsigned char cuc[x.AllocSize()] = {};
+ alignas(max_align_t) const signed char csc[x.AllocSize()] = {};
+
+ Type<int32_t*>(x.Pointer<0>(c));
+ Type<int32_t*>(x.Pointer<0>(uc));
+ Type<int32_t*>(x.Pointer<0>(sc));
+ Type<const int32_t*>(x.Pointer<0>(cc));
+ Type<const int32_t*>(x.Pointer<0>(cuc));
+ Type<const int32_t*>(x.Pointer<0>(csc));
+
+ Type<int32_t*>(x.Pointer<int32_t>(c));
+ Type<int32_t*>(x.Pointer<int32_t>(uc));
+ Type<int32_t*>(x.Pointer<int32_t>(sc));
+ Type<const int32_t*>(x.Pointer<int32_t>(cc));
+ Type<const int32_t*>(x.Pointer<int32_t>(cuc));
+ Type<const int32_t*>(x.Pointer<int32_t>(csc));
+
+ Type<std::tuple<int32_t*>>(x.Pointers(c));
+ Type<std::tuple<int32_t*>>(x.Pointers(uc));
+ Type<std::tuple<int32_t*>>(x.Pointers(sc));
+ Type<std::tuple<const int32_t*>>(x.Pointers(cc));
+ Type<std::tuple<const int32_t*>>(x.Pointers(cuc));
+ Type<std::tuple<const int32_t*>>(x.Pointers(csc));
+
+ Type<Span<int32_t>>(x.Slice<0>(c));
+ Type<Span<int32_t>>(x.Slice<0>(uc));
+ Type<Span<int32_t>>(x.Slice<0>(sc));
+ Type<Span<const int32_t>>(x.Slice<0>(cc));
+ Type<Span<const int32_t>>(x.Slice<0>(cuc));
+ Type<Span<const int32_t>>(x.Slice<0>(csc));
+
+ Type<std::tuple<Span<int32_t>>>(x.Slices(c));
+ Type<std::tuple<Span<int32_t>>>(x.Slices(uc));
+ Type<std::tuple<Span<int32_t>>>(x.Slices(sc));
+ Type<std::tuple<Span<const int32_t>>>(x.Slices(cc));
+ Type<std::tuple<Span<const int32_t>>>(x.Slices(cuc));
+ Type<std::tuple<Span<const int32_t>>>(x.Slices(csc));
+}
+
+TEST(Layout, ConstElementType) {
+ constexpr Layout<const int32_t> x(1);
+ alignas(int32_t) char c[x.AllocSize()] = {};
+ const char* cc = c;
+ const int32_t* p = reinterpret_cast<const int32_t*>(cc);
+
+ EXPECT_EQ(alignof(int32_t), x.Alignment());
+
+ EXPECT_EQ(0, x.Offset<0>());
+ EXPECT_EQ(0, x.Offset<const int32_t>());
+
+ EXPECT_THAT(x.Offsets(), ElementsAre(0));
+
+ EXPECT_EQ(1, x.Size<0>());
+ EXPECT_EQ(1, x.Size<const int32_t>());
+
+ EXPECT_THAT(x.Sizes(), ElementsAre(1));
+
+ EXPECT_EQ(sizeof(int32_t), x.AllocSize());
+
+ EXPECT_EQ(p, Type<const int32_t*>(x.Pointer<0>(c)));
+ EXPECT_EQ(p, Type<const int32_t*>(x.Pointer<0>(cc)));
+
+ EXPECT_EQ(p, Type<const int32_t*>(x.Pointer<const int32_t>(c)));
+ EXPECT_EQ(p, Type<const int32_t*>(x.Pointer<const int32_t>(cc)));
+
+ EXPECT_THAT(Type<std::tuple<const int32_t*>>(x.Pointers(c)), Tuple(p));
+ EXPECT_THAT(Type<std::tuple<const int32_t*>>(x.Pointers(cc)), Tuple(p));
+
+ EXPECT_THAT(Type<Span<const int32_t>>(x.Slice<0>(c)),
+ IsSameSlice(Span<const int32_t>(p, 1)));
+ EXPECT_THAT(Type<Span<const int32_t>>(x.Slice<0>(cc)),
+ IsSameSlice(Span<const int32_t>(p, 1)));
+
+ EXPECT_THAT(Type<Span<const int32_t>>(x.Slice<const int32_t>(c)),
+ IsSameSlice(Span<const int32_t>(p, 1)));
+ EXPECT_THAT(Type<Span<const int32_t>>(x.Slice<const int32_t>(cc)),
+ IsSameSlice(Span<const int32_t>(p, 1)));
+
+ EXPECT_THAT(Type<std::tuple<Span<const int32_t>>>(x.Slices(c)),
+ Tuple(IsSameSlice(Span<const int32_t>(p, 1))));
+ EXPECT_THAT(Type<std::tuple<Span<const int32_t>>>(x.Slices(cc)),
+ Tuple(IsSameSlice(Span<const int32_t>(p, 1))));
+}
+
+namespace example {
+
+// Immutable move-only string with sizeof equal to sizeof(void*). The string
+// size and the characters are kept in the same heap allocation.
+class CompactString {
+ public:
+ CompactString(const char* s = "") { // NOLINT
+ const size_t size = strlen(s);
+ // size_t[1], followed by char[size + 1].
+ // This statement doesn't allocate memory.
+ const L layout(1, size + 1);
+ // AllocSize() tells us how much memory we need to allocate for all our
+ // data.
+ p_.reset(new unsigned char[layout.AllocSize()]);
+ // If running under ASAN, mark the padding bytes, if any, to catch memory
+ // errors.
+ layout.PoisonPadding(p_.get());
+ // Store the size in the allocation.
+ // Pointer<size_t>() is a synonym for Pointer<0>().
+ *layout.Pointer<size_t>(p_.get()) = size;
+ // Store the characters in the allocation.
+ memcpy(layout.Pointer<char>(p_.get()), s, size + 1);
+ }
+
+ size_t size() const {
+ // Equivalent to reinterpret_cast<size_t&>(*p).
+ return *L::Partial().Pointer<size_t>(p_.get());
+ }
+
+ const char* c_str() const {
+ // Equivalent to reinterpret_cast<char*>(p.get() + sizeof(size_t)).
+ // The argument in Partial(1) specifies that we have size_t[1] in front of
+ // the characters.
+ return L::Partial(1).Pointer<char>(p_.get());
+ }
+
+ private:
+ // Our heap allocation contains a size_t followed by an array of chars.
+ using L = Layout<size_t, char>;
+ std::unique_ptr<unsigned char[]> p_;
+};
+
+TEST(CompactString, Works) {
+ CompactString s = "hello";
+ EXPECT_EQ(5, s.size());
+ EXPECT_STREQ("hello", s.c_str());
+}
+
+} // namespace example
+
+} // namespace
+} // namespace container_internal
+} // inline namespace lts_2018_12_18
+} // namespace absl
diff --git a/absl/container/internal/node_hash_policy.h b/absl/container/internal/node_hash_policy.h
new file mode 100644
index 00000000..e8d89f63
--- /dev/null
+++ b/absl/container/internal/node_hash_policy.h
@@ -0,0 +1,90 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Adapts a policy for nodes.
+//
+// The node policy should model:
+//
+// struct Policy {
+// // Returns a new node allocated and constructed using the allocator, using
+// // the specified arguments.
+// template <class Alloc, class... Args>
+// value_type* new_element(Alloc* alloc, Args&&... args) const;
+//
+// // Destroys and deallocates node using the allocator.
+// template <class Alloc>
+// void delete_element(Alloc* alloc, value_type* node) const;
+// };
+//
+// It may also optionally define `value()` and `apply()`. For documentation on
+// these, see hash_policy_traits.h.
+
+#ifndef ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_
+#define ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_
+
+#include <cassert>
+#include <cstddef>
+#include <memory>
+#include <type_traits>
+#include <utility>
+
+namespace absl {
+inline namespace lts_2018_12_18 {
+namespace container_internal {
+
+template <class Reference, class Policy>
+struct node_hash_policy {
+ static_assert(std::is_lvalue_reference<Reference>::value, "");
+
+ using slot_type = typename std::remove_cv<
+ typename std::remove_reference<Reference>::type>::type*;
+
+ template <class Alloc, class... Args>
+ static void construct(Alloc* alloc, slot_type* slot, Args&&... args) {
+ *slot = Policy::new_element(alloc, std::forward<Args>(args)...);
+ }
+
+ template <class Alloc>
+ static void destroy(Alloc* alloc, slot_type* slot) {
+ Policy::delete_element(alloc, *slot);
+ }
+
+ template <class Alloc>
+ static void transfer(Alloc*, slot_type* new_slot, slot_type* old_slot) {
+ *new_slot = *old_slot;
+ }
+
+ static size_t space_used(const slot_type* slot) {
+ if (slot == nullptr) return Policy::element_space_used(nullptr);
+ return Policy::element_space_used(*slot);
+ }
+
+ static Reference element(slot_type* slot) { return **slot; }
+
+ template <class T, class P = Policy>
+ static auto value(T* elem) -> decltype(P::value(elem)) {
+ return P::value(elem);
+ }
+
+ template <class... Ts, class P = Policy>
+ static auto apply(Ts&&... ts) -> decltype(P::apply(std::forward<Ts>(ts)...)) {
+ return P::apply(std::forward<Ts>(ts)...);
+ }
+};
+
+} // namespace container_internal
+} // inline namespace lts_2018_12_18
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_
diff --git a/absl/container/internal/node_hash_policy_test.cc b/absl/container/internal/node_hash_policy_test.cc
new file mode 100644
index 00000000..a73c7bba
--- /dev/null
+++ b/absl/container/internal/node_hash_policy_test.cc
@@ -0,0 +1,69 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/node_hash_policy.h"
+
+#include <memory>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/container/internal/hash_policy_traits.h"
+
+namespace absl {
+inline namespace lts_2018_12_18 {
+namespace container_internal {
+namespace {
+
+using ::testing::Pointee;
+
+struct Policy : node_hash_policy<int&, Policy> {
+ using key_type = int;
+ using init_type = int;
+
+ template <class Alloc>
+ static int* new_element(Alloc* alloc, int value) {
+ return new int(value);
+ }
+
+ template <class Alloc>
+ static void delete_element(Alloc* alloc, int* elem) {
+ delete elem;
+ }
+};
+
+using NodePolicy = hash_policy_traits<Policy>;
+
+struct NodeTest : ::testing::Test {
+ std::allocator<int> alloc;
+ int n = 53;
+ int* a = &n;
+};
+
+TEST_F(NodeTest, ConstructDestroy) {
+ NodePolicy::construct(&alloc, &a, 42);
+ EXPECT_THAT(a, Pointee(42));
+ NodePolicy::destroy(&alloc, &a);
+}
+
+TEST_F(NodeTest, transfer) {
+ int s = 42;
+ int* b = &s;
+ NodePolicy::transfer(&alloc, &a, &b);
+ EXPECT_EQ(&s, a);
+}
+
+} // namespace
+} // namespace container_internal
+} // inline namespace lts_2018_12_18
+} // namespace absl
diff --git a/absl/container/internal/raw_hash_map.h b/absl/container/internal/raw_hash_map.h
new file mode 100644
index 00000000..53d4619a
--- /dev/null
+++ b/absl/container/internal/raw_hash_map.h
@@ -0,0 +1,187 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_
+#define ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_
+
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "absl/container/internal/container_memory.h"
+#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export
+
+namespace absl {
+inline namespace lts_2018_12_18 {
+namespace container_internal {
+
+template <class Policy, class Hash, class Eq, class Alloc>
+class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> {
+ // P is Policy. It's passed as a template argument to support maps that have
+ // incomplete types as values, as in unordered_map<K, IncompleteType>.
+ // MappedReference<> may be a non-reference type.
+ template <class P>
+ using MappedReference = decltype(P::value(
+ std::addressof(std::declval<typename raw_hash_map::reference>())));
+
+ // MappedConstReference<> may be a non-reference type.
+ template <class P>
+ using MappedConstReference = decltype(P::value(
+ std::addressof(std::declval<typename raw_hash_map::const_reference>())));
+
+ using KeyArgImpl = container_internal::KeyArg<IsTransparent<Eq>::value &&
+ IsTransparent<Hash>::value>;
+
+ public:
+ using key_type = typename Policy::key_type;
+ using mapped_type = typename Policy::mapped_type;
+ template <class K>
+ using key_arg = typename KeyArgImpl::template type<K, key_type>;
+
+ static_assert(!std::is_reference<key_type>::value, "");
+ // TODO(alkis): remove this assertion and verify that reference mapped_type is
+ // supported.
+ static_assert(!std::is_reference<mapped_type>::value, "");
+
+ using iterator = typename raw_hash_map::raw_hash_set::iterator;
+ using const_iterator = typename raw_hash_map::raw_hash_set::const_iterator;
+
+ raw_hash_map() {}
+ using raw_hash_map::raw_hash_set::raw_hash_set;
+
+ // The last two template parameters ensure that both arguments are rvalues
+ // (lvalue arguments are handled by the overloads below). This is necessary
+ // for supporting bitfield arguments.
+ //
+ // union { int n : 1; };
+ // flat_hash_map<int, int> m;
+ // m.insert_or_assign(n, n);
+ template <class K = key_type, class V = mapped_type, K* = nullptr,
+ V* = nullptr>
+ std::pair<iterator, bool> insert_or_assign(key_arg<K>&& k, V&& v) {
+ return insert_or_assign_impl(std::forward<K>(k), std::forward<V>(v));
+ }
+
+ template <class K = key_type, class V = mapped_type, K* = nullptr>
+ std::pair<iterator, bool> insert_or_assign(key_arg<K>&& k, const V& v) {
+ return insert_or_assign_impl(std::forward<K>(k), v);
+ }
+
+ template <class K = key_type, class V = mapped_type, V* = nullptr>
+ std::pair<iterator, bool> insert_or_assign(const key_arg<K>& k, V&& v) {
+ return insert_or_assign_impl(k, std::forward<V>(v));
+ }
+
+ template <class K = key_type, class V = mapped_type>
+ std::pair<iterator, bool> insert_or_assign(const key_arg<K>& k, const V& v) {
+ return insert_or_assign_impl(k, v);
+ }
+
+ template <class K = key_type, class V = mapped_type, K* = nullptr,
+ V* = nullptr>
+ iterator insert_or_assign(const_iterator, key_arg<K>&& k, V&& v) {
+ return insert_or_assign(std::forward<K>(k), std::forward<V>(v)).first;
+ }
+
+ template <class K = key_type, class V = mapped_type, K* = nullptr>
+ iterator insert_or_assign(const_iterator, key_arg<K>&& k, const V& v) {
+ return insert_or_assign(std::forward<K>(k), v).first;
+ }
+
+ template <class K = key_type, class V = mapped_type, V* = nullptr>
+ iterator insert_or_assign(const_iterator, const key_arg<K>& k, V&& v) {
+ return insert_or_assign(k, std::forward<V>(v)).first;
+ }
+
+ template <class K = key_type, class V = mapped_type>
+ iterator insert_or_assign(const_iterator, const key_arg<K>& k, const V& v) {
+ return insert_or_assign(k, v).first;
+ }
+
+ template <class K = key_type, class... Args,
+ typename std::enable_if<
+ !std::is_convertible<K, const_iterator>::value, int>::type = 0,
+ K* = nullptr>
+ std::pair<iterator, bool> try_emplace(key_arg<K>&& k, Args&&... args) {
+ return try_emplace_impl(std::forward<K>(k), std::forward<Args>(args)...);
+ }
+
+ template <class K = key_type, class... Args,
+ typename std::enable_if<
+ !std::is_convertible<K, const_iterator>::value, int>::type = 0>
+ std::pair<iterator, bool> try_emplace(const key_arg<K>& k, Args&&... args) {
+ return try_emplace_impl(k, std::forward<Args>(args)...);
+ }
+
+ template <class K = key_type, class... Args, K* = nullptr>
+ iterator try_emplace(const_iterator, key_arg<K>&& k, Args&&... args) {
+ return try_emplace(std::forward<K>(k), std::forward<Args>(args)...).first;
+ }
+
+ template <class K = key_type, class... Args>
+ iterator try_emplace(const_iterator, const key_arg<K>& k, Args&&... args) {
+ return try_emplace(k, std::forward<Args>(args)...).first;
+ }
+
+ template <class K = key_type, class P = Policy>
+ MappedReference<P> at(const key_arg<K>& key) {
+ auto it = this->find(key);
+ if (it == this->end()) std::abort();
+ return Policy::value(&*it);
+ }
+
+ template <class K = key_type, class P = Policy>
+ MappedConstReference<P> at(const key_arg<K>& key) const {
+ auto it = this->find(key);
+ if (it == this->end()) std::abort();
+ return Policy::value(&*it);
+ }
+
+ template <class K = key_type, class P = Policy, K* = nullptr>
+ MappedReference<P> operator[](key_arg<K>&& key) {
+ return Policy::value(&*try_emplace(std::forward<K>(key)).first);
+ }
+
+ template <class K = key_type, class P = Policy>
+ MappedReference<P> operator[](const key_arg<K>& key) {
+ return Policy::value(&*try_emplace(key).first);
+ }
+
+ private:
+ template <class K, class V>
+ std::pair<iterator, bool> insert_or_assign_impl(K&& k, V&& v) {
+ auto res = this->find_or_prepare_insert(k);
+ if (res.second)
+ this->emplace_at(res.first, std::forward<K>(k), std::forward<V>(v));
+ else
+ Policy::value(&*this->iterator_at(res.first)) = std::forward<V>(v);
+ return {this->iterator_at(res.first), res.second};
+ }
+
+ template <class K = key_type, class... Args>
+ std::pair<iterator, bool> try_emplace_impl(K&& k, Args&&... args) {
+ auto res = this->find_or_prepare_insert(k);
+ if (res.second)
+ this->emplace_at(res.first, std::piecewise_construct,
+ std::forward_as_tuple(std::forward<K>(k)),
+ std::forward_as_tuple(std::forward<Args>(args)...));
+ return {this->iterator_at(res.first), res.second};
+ }
+};
+
+} // namespace container_internal
+} // inline namespace lts_2018_12_18
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_
diff --git a/absl/container/internal/raw_hash_set.cc b/absl/container/internal/raw_hash_set.cc
new file mode 100644
index 00000000..4e690dac
--- /dev/null
+++ b/absl/container/internal/raw_hash_set.cc
@@ -0,0 +1,48 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/raw_hash_set.h"
+
+#include <atomic>
+#include <cstddef>
+
+#include "absl/base/config.h"
+
+namespace absl {
+inline namespace lts_2018_12_18 {
+namespace container_internal {
+
+constexpr size_t Group::kWidth;
+
+// Returns "random" seed.
+inline size_t RandomSeed() {
+#if ABSL_HAVE_THREAD_LOCAL
+ static thread_local size_t counter = 0;
+ size_t value = ++counter;
+#else // ABSL_HAVE_THREAD_LOCAL
+ static std::atomic<size_t> counter(0);
+ size_t value = counter.fetch_add(1, std::memory_order_relaxed);
+#endif // ABSL_HAVE_THREAD_LOCAL
+ return value ^ static_cast<size_t>(reinterpret_cast<uintptr_t>(&counter));
+}
+
+bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl) {
+ // To avoid problems with weak hashes and single bit tests, we use % 13.
+ // TODO(kfm,sbenza): revisit after we do unconditional mixing
+ return (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6;
+}
+
+} // namespace container_internal
+} // inline namespace lts_2018_12_18
+} // namespace absl
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h
new file mode 100644
index 00000000..0c42e4ae
--- /dev/null
+++ b/absl/container/internal/raw_hash_set.h
@@ -0,0 +1,1950 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// An open-addressing
+// hashtable with quadratic probing.
+//
+// This is a low level hashtable on top of which different interfaces can be
+// implemented, like flat_hash_set, node_hash_set, string_hash_set, etc.
+//
+// The table interface is similar to that of std::unordered_set. Notable
+// differences are that most member functions support heterogeneous keys when
+// BOTH the hash and eq functions are marked as transparent. They do so by
+// providing a typedef called `is_transparent`.
+//
+// When heterogeneous lookup is enabled, functions that take key_type act as if
+// they have an overload set like:
+//
+// iterator find(const key_type& key);
+// template <class K>
+// iterator find(const K& key);
+//
+// size_type erase(const key_type& key);
+// template <class K>
+// size_type erase(const K& key);
+//
+// std::pair<iterator, iterator> equal_range(const key_type& key);
+// template <class K>
+// std::pair<iterator, iterator> equal_range(const K& key);
+//
+// When heterogeneous lookup is disabled, only the explicit `key_type` overloads
+// exist.
+//
+// find() also supports passing the hash explicitly:
+//
+// iterator find(const key_type& key, size_t hash);
+// template <class U>
+// iterator find(const U& key, size_t hash);
+//
+// In addition the pointer to element and iterator stability guarantees are
+// weaker: all iterators and pointers are invalidated after a new element is
+// inserted.
+//
+// IMPLEMENTATION DETAILS
+//
+// The table stores elements inline in a slot array. In addition to the slot
+// array the table maintains some control state per slot. The extra state is one
+// byte per slot and stores empty or deleted marks, or alternatively 7 bits from
+// the hash of an occupied slot. The table is split into logical groups of
+// slots, like so:
+//
+// Group 1 Group 2 Group 3
+// +---------------+---------------+---------------+
+// | | | | | | | | | | | | | | | | | | | | | | | | |
+// +---------------+---------------+---------------+
+//
+// On lookup the hash is split into two parts:
+// - H2: 7 bits (those stored in the control bytes)
+// - H1: the rest of the bits
+// The groups are probed using H1. For each group the slots are matched to H2 in
+// parallel. Because H2 is 7 bits (128 states) and the number of slots per group
+// is low (8 or 16) in almost all cases a match in H2 is also a lookup hit.
+//
+// On insert, once the right group is found (as in lookup), its slots are
+// filled in order.
+//
+// On erase a slot is cleared. In case the group did not have any empty slots
+// before the erase, the erased slot is marked as deleted.
+//
+// Groups without empty slots (but maybe with deleted slots) extend the probe
+// sequence. The probing algorithm is quadratic. Given N the number of groups,
+// the probing function for the i'th probe is:
+//
+// P(0) = H1 % N
+//
+// P(i) = (P(i - 1) + i) % N
+//
+// This probing function guarantees that after N probes, all the groups of the
+// table will be probed exactly once.
+
+#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
+#define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
+
+#ifndef SWISSTABLE_HAVE_SSE2
+#if defined(__SSE2__) || \
+ (defined(_MSC_VER) && \
+ (defined(_M_X64) || (defined(_M_IX86) && _M_IX86_FP >= 2)))
+#define SWISSTABLE_HAVE_SSE2 1
+#else
+#define SWISSTABLE_HAVE_SSE2 0
+#endif
+#endif
+
+#ifndef SWISSTABLE_HAVE_SSSE3
+#ifdef __SSSE3__
+#define SWISSTABLE_HAVE_SSSE3 1
+#else
+#define SWISSTABLE_HAVE_SSSE3 0
+#endif
+#endif
+
+#if SWISSTABLE_HAVE_SSSE3 && !SWISSTABLE_HAVE_SSE2
+#error "Bad configuration!"
+#endif
+
+#if SWISSTABLE_HAVE_SSE2
+#include <emmintrin.h>
+#endif
+
+#if SWISSTABLE_HAVE_SSSE3
+#include <tmmintrin.h>
+#endif
+
+#include <algorithm>
+#include <cmath>
+#include <cstdint>
+#include <cstring>
+#include <iterator>
+#include <limits>
+#include <memory>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "absl/base/internal/bits.h"
+#include "absl/base/internal/endian.h"
+#include "absl/base/port.h"
+#include "absl/container/internal/compressed_tuple.h"
+#include "absl/container/internal/container_memory.h"
+#include "absl/container/internal/hash_policy_traits.h"
+#include "absl/container/internal/hashtable_debug_hooks.h"
+#include "absl/container/internal/layout.h"
+#include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
+#include "absl/types/optional.h"
+#include "absl/utility/utility.h"
+
+namespace absl {
+inline namespace lts_2018_12_18 {
+namespace container_internal {
+
+template <size_t Width>
+class probe_seq {
+ public:
+ probe_seq(size_t hash, size_t mask) {
+ assert(((mask + 1) & mask) == 0 && "not a mask");
+ mask_ = mask;
+ offset_ = hash & mask_;
+ }
+ size_t offset() const { return offset_; }
+ size_t offset(size_t i) const { return (offset_ + i) & mask_; }
+
+ void next() {
+ index_ += Width;
+ offset_ += index_;
+ offset_ &= mask_;
+ }
+ // 0-based probe index. The i-th probe in the probe sequence.
+ size_t index() const { return index_; }
+
+ private:
+ size_t mask_;
+ size_t offset_;
+ size_t index_ = 0;
+};
+
+template <class ContainerKey, class Hash, class Eq>
+struct RequireUsableKey {
+ template <class PassedKey, class... Args>
+ std::pair<
+ decltype(std::declval<const Hash&>()(std::declval<const PassedKey&>())),
+ decltype(std::declval<const Eq&>()(std::declval<const ContainerKey&>(),
+ std::declval<const PassedKey&>()))>*
+ operator()(const PassedKey&, const Args&...) const;
+};
+
+template <class E, class Policy, class Hash, class Eq, class... Ts>
+struct IsDecomposable : std::false_type {};
+
+template <class Policy, class Hash, class Eq, class... Ts>
+struct IsDecomposable<
+ absl::void_t<decltype(
+ Policy::apply(RequireUsableKey<typename Policy::key_type, Hash, Eq>(),
+ std::declval<Ts>()...))>,
+ Policy, Hash, Eq, Ts...> : std::true_type {};
+
+template <class, class = void>
+struct IsTransparent : std::false_type {};
+template <class T>
+struct IsTransparent<T, absl::void_t<typename T::is_transparent>>
+ : std::true_type {};
+
+// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
+template <class T>
+constexpr bool IsNoThrowSwappable() {
+ using std::swap;
+ return noexcept(swap(std::declval<T&>(), std::declval<T&>()));
+}
+
+template <typename T>
+int TrailingZeros(T x) {
+ return sizeof(T) == 8 ? base_internal::CountTrailingZerosNonZero64(
+ static_cast<uint64_t>(x))
+ : base_internal::CountTrailingZerosNonZero32(
+ static_cast<uint32_t>(x));
+}
+
+template <typename T>
+int LeadingZeros(T x) {
+ return sizeof(T) == 8
+ ? base_internal::CountLeadingZeros64(static_cast<uint64_t>(x))
+ : base_internal::CountLeadingZeros32(static_cast<uint32_t>(x));
+}
+
+// An abstraction over a bitmask. It provides an easy way to iterate through the
+// indexes of the set bits of a bitmask. When Shift=0 (platforms with SSE),
+// this is a true bitmask. On non-SSE, platforms the arithematic used to
+// emulate the SSE behavior works in bytes (Shift=3) and leaves each bytes as
+// either 0x00 or 0x80.
+//
+// For example:
+// for (int i : BitMask<uint32_t, 16>(0x5)) -> yields 0, 2
+// for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
+template <class T, int SignificantBits, int Shift = 0>
+class BitMask {
+ static_assert(std::is_unsigned<T>::value, "");
+ static_assert(Shift == 0 || Shift == 3, "");
+
+ public:
+ // These are useful for unit tests (gunit).
+ using value_type = int;
+ using iterator = BitMask;
+ using const_iterator = BitMask;
+
+ explicit BitMask(T mask) : mask_(mask) {}
+ BitMask& operator++() {
+ mask_ &= (mask_ - 1);
+ return *this;
+ }
+ explicit operator bool() const { return mask_ != 0; }
+ int operator*() const { return LowestBitSet(); }
+ int LowestBitSet() const {
+ return container_internal::TrailingZeros(mask_) >> Shift;
+ }
+ int HighestBitSet() const {
+ return (sizeof(T) * CHAR_BIT - container_internal::LeadingZeros(mask_) -
+ 1) >>
+ Shift;
+ }
+
+ BitMask begin() const { return *this; }
+ BitMask end() const { return BitMask(0); }
+
+ int TrailingZeros() const {
+ return container_internal::TrailingZeros(mask_) >> Shift;
+ }
+
+ int LeadingZeros() const {
+ constexpr int total_significant_bits = SignificantBits << Shift;
+ constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
+ return container_internal::LeadingZeros(mask_ << extra_bits) >> Shift;
+ }
+
+ private:
+ friend bool operator==(const BitMask& a, const BitMask& b) {
+ return a.mask_ == b.mask_;
+ }
+ friend bool operator!=(const BitMask& a, const BitMask& b) {
+ return a.mask_ != b.mask_;
+ }
+
+ T mask_;
+};
+
+using ctrl_t = signed char;
+using h2_t = uint8_t;
+
+// The values here are selected for maximum performance. See the static asserts
+// below for details.
+enum Ctrl : ctrl_t {
+ kEmpty = -128, // 0b10000000
+ kDeleted = -2, // 0b11111110
+ kSentinel = -1, // 0b11111111
+};
+static_assert(
+ kEmpty & kDeleted & kSentinel & 0x80,
+ "Special markers need to have the MSB to make checking for them efficient");
+static_assert(kEmpty < kSentinel && kDeleted < kSentinel,
+ "kEmpty and kDeleted must be smaller than kSentinel to make the "
+ "SIMD test of IsEmptyOrDeleted() efficient");
+static_assert(kSentinel == -1,
+ "kSentinel must be -1 to elide loading it from memory into SIMD "
+ "registers (pcmpeqd xmm, xmm)");
+static_assert(kEmpty == -128,
+ "kEmpty must be -128 to make the SIMD check for its "
+ "existence efficient (psignb xmm, xmm)");
+static_assert(~kEmpty & ~kDeleted & kSentinel & 0x7F,
+ "kEmpty and kDeleted must share an unset bit that is not shared "
+ "by kSentinel to make the scalar test for MatchEmptyOrDeleted() "
+ "efficient");
+static_assert(kDeleted == -2,
+ "kDeleted must be -2 to make the implementation of "
+ "ConvertSpecialToEmptyAndFullToDeleted efficient");
+
+// A single block of empty control bytes for tables without any slots allocated.
+// This enables removing a branch in the hot path of find().
+inline ctrl_t* EmptyGroup() {
+ alignas(16) static constexpr ctrl_t empty_group[] = {
+ kSentinel, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty,
+ kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty};
+ return const_cast<ctrl_t*>(empty_group);
+}
+
+// Mixes a randomly generated per-process seed with `hash` and `ctrl` to
+// randomize insertion order within groups.
+bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl);
+
+// Returns a hash seed.
+//
+// The seed consists of the ctrl_ pointer, which adds enough entropy to ensure
+// non-determinism of iteration order in most cases.
+inline size_t HashSeed(const ctrl_t* ctrl) {
+ // The low bits of the pointer have little or no entropy because of
+ // alignment. We shift the pointer to try to use higher entropy bits. A
+ // good number seems to be 12 bits, because that aligns with page size.
+ return reinterpret_cast<uintptr_t>(ctrl) >> 12;
+}
+
+inline size_t H1(size_t hash, const ctrl_t* ctrl) {
+ return (hash >> 7) ^ HashSeed(ctrl);
+}
+inline ctrl_t H2(size_t hash) { return hash & 0x7F; }
+
+inline bool IsEmpty(ctrl_t c) { return c == kEmpty; }
+inline bool IsFull(ctrl_t c) { return c >= 0; }
+inline bool IsDeleted(ctrl_t c) { return c == kDeleted; }
+inline bool IsEmptyOrDeleted(ctrl_t c) { return c < kSentinel; }
+
+#if SWISSTABLE_HAVE_SSE2
+
+// https://github.com/abseil/abseil-cpp/issues/209
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
+// _mm_cmpgt_epi8 is broken under GCC with -funsigned-char
+// Work around this by using the portable implementation of Group
+// when using -funsigned-char under GCC.
+inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) {
+#if defined(__GNUC__) && !defined(__clang__)
+ if (std::is_unsigned<char>::value) {
+ const __m128i mask = _mm_set1_epi8(0x80);
+ const __m128i diff = _mm_subs_epi8(b, a);
+ return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask);
+ }
+#endif
+ return _mm_cmpgt_epi8(a, b);
+}
+
+struct GroupSse2Impl {
+ static constexpr size_t kWidth = 16; // the number of slots per group
+
+ explicit GroupSse2Impl(const ctrl_t* pos) {
+ ctrl = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pos));
+ }
+
+ // Returns a bitmask representing the positions of slots that match hash.
+ BitMask<uint32_t, kWidth> Match(h2_t hash) const {
+ auto match = _mm_set1_epi8(hash);
+ return BitMask<uint32_t, kWidth>(
+ _mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)));
+ }
+
+ // Returns a bitmask representing the positions of empty slots.
+ BitMask<uint32_t, kWidth> MatchEmpty() const {
+#if SWISSTABLE_HAVE_SSSE3
+ // This only works because kEmpty is -128.
+ return BitMask<uint32_t, kWidth>(
+ _mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)));
+#else
+ return Match(kEmpty);
+#endif
+ }
+
+ // Returns a bitmask representing the positions of empty or deleted slots.
+ BitMask<uint32_t, kWidth> MatchEmptyOrDeleted() const {
+ auto special = _mm_set1_epi8(kSentinel);
+ return BitMask<uint32_t, kWidth>(
+ _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)));
+ }
+
+ // Returns the number of trailing empty or deleted elements in the group.
+ uint32_t CountLeadingEmptyOrDeleted() const {
+ auto special = _mm_set1_epi8(kSentinel);
+ return TrailingZeros(
+ _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1);
+ }
+
+ void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
+ auto msbs = _mm_set1_epi8(static_cast<char>(-128));
+ auto x126 = _mm_set1_epi8(126);
+#if SWISSTABLE_HAVE_SSSE3
+ auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
+#else
+ auto zero = _mm_setzero_si128();
+ auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl);
+ auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126));
+#endif
+ _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res);
+ }
+
+ __m128i ctrl;
+};
+#endif // SWISSTABLE_HAVE_SSE2
+
+struct GroupPortableImpl {
+ static constexpr size_t kWidth = 8;
+
+ explicit GroupPortableImpl(const ctrl_t* pos)
+ : ctrl(little_endian::Load64(pos)) {}
+
+ BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
+ // For the technique, see:
+ // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
+ // (Determine if a word has a byte equal to n).
+ //
+ // Caveat: there are false positives but:
+ // - they only occur if there is a real match
+ // - they never occur on kEmpty, kDeleted, kSentinel
+ // - they will be handled gracefully by subsequent checks in code
+ //
+ // Example:
+ // v = 0x1716151413121110
+ // hash = 0x12
+ // retval = (v - lsbs) & ~v & msbs = 0x0000000080800000
+ constexpr uint64_t msbs = 0x8080808080808080ULL;
+ constexpr uint64_t lsbs = 0x0101010101010101ULL;
+ auto x = ctrl ^ (lsbs * hash);
+ return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & msbs);
+ }
+
+ BitMask<uint64_t, kWidth, 3> MatchEmpty() const {
+ constexpr uint64_t msbs = 0x8080808080808080ULL;
+ return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 6)) & msbs);
+ }
+
+ BitMask<uint64_t, kWidth, 3> MatchEmptyOrDeleted() const {
+ constexpr uint64_t msbs = 0x8080808080808080ULL;
+ return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 7)) & msbs);
+ }
+
+ uint32_t CountLeadingEmptyOrDeleted() const {
+ constexpr uint64_t gaps = 0x00FEFEFEFEFEFEFEULL;
+ return (TrailingZeros(((~ctrl & (ctrl >> 7)) | gaps) + 1) + 7) >> 3;
+ }
+
+ void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
+ constexpr uint64_t msbs = 0x8080808080808080ULL;
+ constexpr uint64_t lsbs = 0x0101010101010101ULL;
+ auto x = ctrl & msbs;
+ auto res = (~x + (x >> 7)) & ~lsbs;
+ little_endian::Store64(dst, res);
+ }
+
+ uint64_t ctrl;
+};
+
+#if SWISSTABLE_HAVE_SSE2
+using Group = GroupSse2Impl;
+#else
+using Group = GroupPortableImpl;
+#endif
+
+template <class Policy, class Hash, class Eq, class Alloc>
+class raw_hash_set;
+
+inline bool IsValidCapacity(size_t n) {
+ return ((n + 1) & n) == 0 && n >= Group::kWidth - 1;
+}
+
+// PRECONDITION:
+// IsValidCapacity(capacity)
+// ctrl[capacity] == kSentinel
+// ctrl[i] != kSentinel for all i < capacity
+// Applies mapping for every byte in ctrl:
+// DELETED -> EMPTY
+// EMPTY -> EMPTY
+// FULL -> DELETED
+inline void ConvertDeletedToEmptyAndFullToDeleted(
+ ctrl_t* ctrl, size_t capacity) {
+ assert(ctrl[capacity] == kSentinel);
+ assert(IsValidCapacity(capacity));
+ for (ctrl_t* pos = ctrl; pos != ctrl + capacity + 1; pos += Group::kWidth) {
+ Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
+ }
+ // Copy the cloned ctrl bytes.
+ std::memcpy(ctrl + capacity + 1, ctrl, Group::kWidth);
+ ctrl[capacity] = kSentinel;
+}
+
+// Rounds up the capacity to the next power of 2 minus 1 and ensures it is
+// greater or equal to Group::kWidth - 1.
+inline size_t NormalizeCapacity(size_t n) {
+ constexpr size_t kMinCapacity = Group::kWidth - 1;
+ return n <= kMinCapacity
+ ? kMinCapacity
+ : (std::numeric_limits<size_t>::max)() >> LeadingZeros(n);
+}
+
+// The node_handle concept from C++17.
+// We specialize node_handle for sets and maps. node_handle_base holds the
+// common API of both.
+template <typename Policy, typename Alloc>
+class node_handle_base {
+ protected:
+ using PolicyTraits = hash_policy_traits<Policy>;
+ using slot_type = typename PolicyTraits::slot_type;
+
+ public:
+ using allocator_type = Alloc;
+
+ constexpr node_handle_base() {}
+ node_handle_base(node_handle_base&& other) noexcept {
+ *this = std::move(other);
+ }
+ ~node_handle_base() { destroy(); }
+ node_handle_base& operator=(node_handle_base&& other) {
+ destroy();
+ if (!other.empty()) {
+ alloc_ = other.alloc_;
+ PolicyTraits::transfer(alloc(), slot(), other.slot());
+ other.reset();
+ }
+ return *this;
+ }
+
+ bool empty() const noexcept { return !alloc_; }
+ explicit operator bool() const noexcept { return !empty(); }
+ allocator_type get_allocator() const { return *alloc_; }
+
+ protected:
+ template <typename, typename, typename, typename>
+ friend class raw_hash_set;
+
+ node_handle_base(const allocator_type& a, slot_type* s) : alloc_(a) {
+ PolicyTraits::transfer(alloc(), slot(), s);
+ }
+
+ void destroy() {
+ if (!empty()) {
+ PolicyTraits::destroy(alloc(), slot());
+ reset();
+ }
+ }
+
+ void reset() {
+ assert(alloc_.has_value());
+ alloc_ = absl::nullopt;
+ }
+
+ slot_type* slot() const {
+ assert(!empty());
+ return reinterpret_cast<slot_type*>(std::addressof(slot_space_));
+ }
+ allocator_type* alloc() { return std::addressof(*alloc_); }
+
+ private:
+ absl::optional<allocator_type> alloc_;
+ mutable absl::aligned_storage_t<sizeof(slot_type), alignof(slot_type)>
+ slot_space_;
+};
+
+// For sets.
+template <typename Policy, typename Alloc, typename = void>
+class node_handle : public node_handle_base<Policy, Alloc> {
+ using Base = typename node_handle::node_handle_base;
+
+ public:
+ using value_type = typename Base::PolicyTraits::value_type;
+
+ constexpr node_handle() {}
+
+ value_type& value() const {
+ return Base::PolicyTraits::element(this->slot());
+ }
+
+ private:
+ template <typename, typename, typename, typename>
+ friend class raw_hash_set;
+
+ node_handle(const Alloc& a, typename Base::slot_type* s) : Base(a, s) {}
+};
+
+// For maps.
+template <typename Policy, typename Alloc>
+class node_handle<Policy, Alloc, absl::void_t<typename Policy::mapped_type>>
+ : public node_handle_base<Policy, Alloc> {
+ using Base = typename node_handle::node_handle_base;
+
+ public:
+ using key_type = typename Policy::key_type;
+ using mapped_type = typename Policy::mapped_type;
+
+ constexpr node_handle() {}
+
+ auto key() const -> decltype(Base::PolicyTraits::key(this->slot())) {
+ return Base::PolicyTraits::key(this->slot());
+ }
+
+ mapped_type& mapped() const {
+ return Base::PolicyTraits::value(
+ &Base::PolicyTraits::element(this->slot()));
+ }
+
+ private:
+ template <typename, typename, typename, typename>
+ friend class raw_hash_set;
+
+ node_handle(const Alloc& a, typename Base::slot_type* s) : Base(a, s) {}
+};
+
+// Implement the insert_return_type<> concept of C++17.
+template <class Iterator, class NodeType>
+struct insert_return_type {
+ Iterator position;
+ bool inserted;
+ NodeType node;
+};
+
+// Helper trait to allow or disallow arbitrary keys when the hash and
+// eq functions are transparent.
+// It is very important that the inner template is an alias and that the type it
+// produces is not a dependent type. Otherwise, type deduction would fail.
+template <bool is_transparent>
+struct KeyArg {
+ // Transparent. Forward `K`.
+ template <typename K, typename key_type>
+ using type = K;
+};
+
+template <>
+struct KeyArg<false> {
+ // Not transparent. Always use `key_type`.
+ template <typename K, typename key_type>
+ using type = key_type;
+};
+
+// Policy: a policy defines how to perform different operations on
+// the slots of the hashtable (see hash_policy_traits.h for the full interface
+// of policy).
+//
+// Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The
+// functor should accept a key and return size_t as hash. For best performance
+// it is important that the hash function provides high entropy across all bits
+// of the hash.
+//
+// Eq: a (possibly polymorphic) functor that compares two keys for equality. It
+// should accept two (of possibly different type) keys and return a bool: true
+// if they are equal, false if they are not. If two keys compare equal, then
+// their hash values as defined by Hash MUST be equal.
+//
+// Allocator: an Allocator [http://devdocs.io/cpp/concept/allocator] with which
+// the storage of the hashtable will be allocated and the elements will be
+// constructed and destroyed.
+template <class Policy, class Hash, class Eq, class Alloc>
+class raw_hash_set {
+ using PolicyTraits = hash_policy_traits<Policy>;
+ using KeyArgImpl = container_internal::KeyArg<IsTransparent<Eq>::value &&
+ IsTransparent<Hash>::value>;
+
+ public:
+ using init_type = typename PolicyTraits::init_type;
+ using key_type = typename PolicyTraits::key_type;
+ // TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user
+ // code fixes!
+ using slot_type = typename PolicyTraits::slot_type;
+ using allocator_type = Alloc;
+ using size_type = size_t;
+ using difference_type = ptrdiff_t;
+ using hasher = Hash;
+ using key_equal = Eq;
+ using policy_type = Policy;
+ using value_type = typename PolicyTraits::value_type;
+ using reference = value_type&;
+ using const_reference = const value_type&;
+ using pointer = typename absl::allocator_traits<
+ allocator_type>::template rebind_traits<value_type>::pointer;
+ using const_pointer = typename absl::allocator_traits<
+ allocator_type>::template rebind_traits<value_type>::const_pointer;
+
+ // Alias used for heterogeneous lookup functions.
+ // `key_arg<K>` evaluates to `K` when the functors are transparent and to
+ // `key_type` otherwise. It permits template argument deduction on `K` for the
+ // transparent case.
+ template <class K>
+ using key_arg = typename KeyArgImpl::template type<K, key_type>;
+
+ private:
+ // Give an early error when key_type is not hashable/eq.
+ auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k));
+ auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k));
+
+ using Layout = absl::container_internal::Layout<ctrl_t, slot_type>;
+
+ static Layout MakeLayout(size_t capacity) {
+ assert(IsValidCapacity(capacity));
+ return Layout(capacity + Group::kWidth + 1, capacity);
+ }
+
+ using AllocTraits = absl::allocator_traits<allocator_type>;
+ using SlotAlloc = typename absl::allocator_traits<
+ allocator_type>::template rebind_alloc<slot_type>;
+ using SlotAllocTraits = typename absl::allocator_traits<
+ allocator_type>::template rebind_traits<slot_type>;
+
+ static_assert(std::is_lvalue_reference<reference>::value,
+ "Policy::element() must return a reference");
+
+ template <typename T>
+ struct SameAsElementReference
+ : std::is_same<typename std::remove_cv<
+ typename std::remove_reference<reference>::type>::type,
+ typename std::remove_cv<
+ typename std::remove_reference<T>::type>::type> {};
+
+ // An enabler for insert(T&&): T must be convertible to init_type or be the
+ // same as [cv] value_type [ref].
+ // Note: we separate SameAsElementReference into its own type to avoid using
+ // reference unless we need to. MSVC doesn't seem to like it in some
+ // cases.
+ template <class T>
+ using RequiresInsertable = typename std::enable_if<
+ absl::disjunction<std::is_convertible<T, init_type>,
+ SameAsElementReference<T>>::value,
+ int>::type;
+
+ // RequiresNotInit is a workaround for gcc prior to 7.1.
+ // See https://godbolt.org/g/Y4xsUh.
+ template <class T>
+ using RequiresNotInit =
+ typename std::enable_if<!std::is_same<T, init_type>::value, int>::type;
+
+ template <class... Ts>
+ using IsDecomposable = IsDecomposable<void, PolicyTraits, Hash, Eq, Ts...>;
+
+ public:
+ static_assert(std::is_same<pointer, value_type*>::value,
+ "Allocators with custom pointer types are not supported");
+ static_assert(std::is_same<const_pointer, const value_type*>::value,
+ "Allocators with custom pointer types are not supported");
+
+ class iterator {
+ friend class raw_hash_set;
+
+ public:
+ using iterator_category = std::forward_iterator_tag;
+ using value_type = typename raw_hash_set::value_type;
+ using reference =
+ absl::conditional_t<PolicyTraits::constant_iterators::value,
+ const value_type&, value_type&>;
+ using pointer = absl::remove_reference_t<reference>*;
+ using difference_type = typename raw_hash_set::difference_type;
+
+ iterator() {}
+
+ // PRECONDITION: not an end() iterator.
+ reference operator*() const { return PolicyTraits::element(slot_); }
+
+ // PRECONDITION: not an end() iterator.
+ pointer operator->() const { return &operator*(); }
+
+ // PRECONDITION: not an end() iterator.
+ iterator& operator++() {
+ ++ctrl_;
+ ++slot_;
+ skip_empty_or_deleted();
+ return *this;
+ }
+ // PRECONDITION: not an end() iterator.
+ iterator operator++(int) {
+ auto tmp = *this;
+ ++*this;
+ return tmp;
+ }
+
+ friend bool operator==(const iterator& a, const iterator& b) {
+ return a.ctrl_ == b.ctrl_;
+ }
+ friend bool operator!=(const iterator& a, const iterator& b) {
+ return !(a == b);
+ }
+
+ private:
+ iterator(ctrl_t* ctrl) : ctrl_(ctrl) {} // for end()
+ iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) {}
+
+ void skip_empty_or_deleted() {
+ while (IsEmptyOrDeleted(*ctrl_)) {
+ // ctrl is not necessarily aligned to Group::kWidth. It is also likely
+ // to read past the space for ctrl bytes and into slots. This is ok
+ // because ctrl has sizeof() == 1 and slot has sizeof() >= 1 so there
+ // is no way to read outside the combined slot array.
+ uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted();
+ ctrl_ += shift;
+ slot_ += shift;
+ }
+ }
+
+ ctrl_t* ctrl_ = nullptr;
+ slot_type* slot_;
+ };
+
+ class const_iterator {
+ friend class raw_hash_set;
+
+ public:
+ using iterator_category = typename iterator::iterator_category;
+ using value_type = typename raw_hash_set::value_type;
+ using reference = typename raw_hash_set::const_reference;
+ using pointer = typename raw_hash_set::const_pointer;
+ using difference_type = typename raw_hash_set::difference_type;
+
+ const_iterator() {}
+ // Implicit construction from iterator.
+ const_iterator(iterator i) : inner_(std::move(i)) {}
+
+ reference operator*() const { return *inner_; }
+ pointer operator->() const { return inner_.operator->(); }
+
+ const_iterator& operator++() {
+ ++inner_;
+ return *this;
+ }
+ const_iterator operator++(int) { return inner_++; }
+
+ friend bool operator==(const const_iterator& a, const const_iterator& b) {
+ return a.inner_ == b.inner_;
+ }
+ friend bool operator!=(const const_iterator& a, const const_iterator& b) {
+ return !(a == b);
+ }
+
+ private:
+ const_iterator(const ctrl_t* ctrl, const slot_type* slot)
+ : inner_(const_cast<ctrl_t*>(ctrl), const_cast<slot_type*>(slot)) {}
+
+ iterator inner_;
+ };
+
+ using node_type = container_internal::node_handle<Policy, Alloc>;
+
+ raw_hash_set() noexcept(
+ std::is_nothrow_default_constructible<hasher>::value&&
+ std::is_nothrow_default_constructible<key_equal>::value&&
+ std::is_nothrow_default_constructible<allocator_type>::value) {}
+
+ explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(),
+ const key_equal& eq = key_equal(),
+ const allocator_type& alloc = allocator_type())
+ : ctrl_(EmptyGroup()), settings_(0, hash, eq, alloc) {
+ if (bucket_count) {
+ capacity_ = NormalizeCapacity(bucket_count);
+ growth_left() = static_cast<size_t>(capacity_ * kMaxLoadFactor);
+ initialize_slots();
+ }
+ }
+
+ raw_hash_set(size_t bucket_count, const hasher& hash,
+ const allocator_type& alloc)
+ : raw_hash_set(bucket_count, hash, key_equal(), alloc) {}
+
+ raw_hash_set(size_t bucket_count, const allocator_type& alloc)
+ : raw_hash_set(bucket_count, hasher(), key_equal(), alloc) {}
+
+ explicit raw_hash_set(const allocator_type& alloc)
+ : raw_hash_set(0, hasher(), key_equal(), alloc) {}
+
+ template <class InputIter>
+ raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0,
+ const hasher& hash = hasher(), const key_equal& eq = key_equal(),
+ const allocator_type& alloc = allocator_type())
+ : raw_hash_set(bucket_count, hash, eq, alloc) {
+ insert(first, last);
+ }
+
+ template <class InputIter>
+ raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
+ const hasher& hash, const allocator_type& alloc)
+ : raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) {}
+
+ template <class InputIter>
+ raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
+ const allocator_type& alloc)
+ : raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) {}
+
+ template <class InputIter>
+ raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc)
+ : raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) {}
+
+ // Instead of accepting std::initializer_list<value_type> as the first
+ // argument like std::unordered_set<value_type> does, we have two overloads
+ // that accept std::initializer_list<T> and std::initializer_list<init_type>.
+ // This is advantageous for performance.
+ //
+ // // Turns {"abc", "def"} into std::initializer_list<std::string>, then copies
+ // // the strings into the set.
+ // std::unordered_set<std::string> s = {"abc", "def"};
+ //
+ // // Turns {"abc", "def"} into std::initializer_list<const char*>, then
+ // // copies the strings into the set.
+ // absl::flat_hash_set<std::string> s = {"abc", "def"};
+ //
+ // The same trick is used in insert().
+ //
+ // The enabler is necessary to prevent this constructor from triggering where
+ // the copy constructor is meant to be called.
+ //
+ // absl::flat_hash_set<int> a, b{a};
+ //
+ // RequiresNotInit<T> is a workaround for gcc prior to 7.1.
+ template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
+ raw_hash_set(std::initializer_list<T> init, size_t bucket_count = 0,
+ const hasher& hash = hasher(), const key_equal& eq = key_equal(),
+ const allocator_type& alloc = allocator_type())
+ : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
+
+ raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count = 0,
+ const hasher& hash = hasher(), const key_equal& eq = key_equal(),
+ const allocator_type& alloc = allocator_type())
+ : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
+
+ template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
+ raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
+ const hasher& hash, const allocator_type& alloc)
+ : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
+
+ raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
+ const hasher& hash, const allocator_type& alloc)
+ : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
+
+ template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
+ raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
+ const allocator_type& alloc)
+ : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
+
+ raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
+ const allocator_type& alloc)
+ : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
+
+ template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
+ raw_hash_set(std::initializer_list<T> init, const allocator_type& alloc)
+ : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
+
+ raw_hash_set(std::initializer_list<init_type> init,
+ const allocator_type& alloc)
+ : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
+
+ raw_hash_set(const raw_hash_set& that)
+ : raw_hash_set(that, AllocTraits::select_on_container_copy_construction(
+ that.alloc_ref())) {}
+
+ raw_hash_set(const raw_hash_set& that, const allocator_type& a)
+ : raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) {
+ reserve(that.size());
+ // Because the table is guaranteed to be empty, we can do something faster
+ // than a full `insert`.
+ for (const auto& v : that) {
+ const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v);
+ const size_t i = find_first_non_full(hash);
+ set_ctrl(i, H2(hash));
+ emplace_at(i, v);
+ }
+ size_ = that.size();
+ growth_left() -= that.size();
+ }
+
+ raw_hash_set(raw_hash_set&& that) noexcept(
+ std::is_nothrow_copy_constructible<hasher>::value&&
+ std::is_nothrow_copy_constructible<key_equal>::value&&
+ std::is_nothrow_copy_constructible<allocator_type>::value)
+ : ctrl_(absl::exchange(that.ctrl_, EmptyGroup())),
+ slots_(absl::exchange(that.slots_, nullptr)),
+ size_(absl::exchange(that.size_, 0)),
+ capacity_(absl::exchange(that.capacity_, 0)),
+ // Hash, equality and allocator are copied instead of moved because
+ // `that` must be left valid. If Hash is std::function<Key>, moving it
+ // would create a nullptr functor that cannot be called.
+ settings_(that.settings_) {
+ // growth_left was copied above, reset the one from `that`.
+ that.growth_left() = 0;
+ }
+
+ raw_hash_set(raw_hash_set&& that, const allocator_type& a)
+ : ctrl_(EmptyGroup()),
+ slots_(nullptr),
+ size_(0),
+ capacity_(0),
+ settings_(0, that.hash_ref(), that.eq_ref(), a) {
+ if (a == that.alloc_ref()) {
+ std::swap(ctrl_, that.ctrl_);
+ std::swap(slots_, that.slots_);
+ std::swap(size_, that.size_);
+ std::swap(capacity_, that.capacity_);
+ std::swap(growth_left(), that.growth_left());
+ } else {
+ reserve(that.size());
+ // Note: this will copy elements of dense_set and unordered_set instead of
+ // moving them. This can be fixed if it ever becomes an issue.
+ for (auto& elem : that) insert(std::move(elem));
+ }
+ }
+
+ raw_hash_set& operator=(const raw_hash_set& that) {
+ raw_hash_set tmp(that,
+ AllocTraits::propagate_on_container_copy_assignment::value
+ ? that.alloc_ref()
+ : alloc_ref());
+ swap(tmp);
+ return *this;
+ }
+
+ raw_hash_set& operator=(raw_hash_set&& that) noexcept(
+ absl::allocator_traits<allocator_type>::is_always_equal::value&&
+ std::is_nothrow_move_assignable<hasher>::value&&
+ std::is_nothrow_move_assignable<key_equal>::value) {
+ // TODO(sbenza): We should only use the operations from the noexcept clause
+ // to make sure we actually adhere to that contract.
+ return move_assign(
+ std::move(that),
+ typename AllocTraits::propagate_on_container_move_assignment());
+ }
+
+ ~raw_hash_set() { destroy_slots(); }
+
+ iterator begin() {
+ auto it = iterator_at(0);
+ it.skip_empty_or_deleted();
+ return it;
+ }
+ iterator end() { return {ctrl_ + capacity_}; }
+
+ const_iterator begin() const {
+ return const_cast<raw_hash_set*>(this)->begin();
+ }
+ const_iterator end() const { return const_cast<raw_hash_set*>(this)->end(); }
+ const_iterator cbegin() const { return begin(); }
+ const_iterator cend() const { return end(); }
+
+ bool empty() const { return !size(); }
+ size_t size() const { return size_; }
+ size_t capacity() const { return capacity_; }
+ size_t max_size() const { return (std::numeric_limits<size_t>::max)(); }
+
+ void clear() {
+ // Iterating over this container is O(bucket_count()). When bucket_count()
+ // is much greater than size(), iteration becomes prohibitively expensive.
+ // For clear() it is more important to reuse the allocated array when the
+ // container is small because allocation takes comparatively long time
+ // compared to destruction of the elements of the container. So we pick the
+ // largest bucket_count() threshold for which iteration is still fast and
+ // past that we simply deallocate the array.
+ if (capacity_ > 127) {
+ destroy_slots();
+ } else if (capacity_) {
+ for (size_t i = 0; i != capacity_; ++i) {
+ if (IsFull(ctrl_[i])) {
+ PolicyTraits::destroy(&alloc_ref(), slots_ + i);
+ }
+ }
+ size_ = 0;
+ reset_ctrl();
+ growth_left() = static_cast<size_t>(capacity_ * kMaxLoadFactor);
+ }
+ assert(empty());
+ }
+
+ // This overload kicks in when the argument is an rvalue of insertable and
+ // decomposable type other than init_type.
+ //
+ // flat_hash_map<std::string, int> m;
+ // m.insert(std::make_pair("abc", 42));
+ template <class T, RequiresInsertable<T> = 0,
+ typename std::enable_if<IsDecomposable<T>::value, int>::type = 0,
+ T* = nullptr>
+ std::pair<iterator, bool> insert(T&& value) {
+ return emplace(std::forward<T>(value));
+ }
+
+ // This overload kicks in when the argument is a bitfield or an lvalue of
+ // insertable and decomposable type.
+ //
+ // union { int n : 1; };
+ // flat_hash_set<int> s;
+ // s.insert(n);
+ //
+ // flat_hash_set<std::string> s;
+ // const char* p = "hello";
+ // s.insert(p);
+ //
+ // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace
+ // RequiresInsertable<T> with RequiresInsertable<const T&>.
+ // We are hitting this bug: https://godbolt.org/g/1Vht4f.
+ template <
+ class T, RequiresInsertable<T> = 0,
+ typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
+ std::pair<iterator, bool> insert(const T& value) {
+ return emplace(value);
+ }
+
+ // This overload kicks in when the argument is an rvalue of init_type. Its
+ // purpose is to handle brace-init-list arguments.
+ //
+ // flat_hash_set<std::string, int> s;
+ // s.insert({"abc", 42});
+ std::pair<iterator, bool> insert(init_type&& value) {
+ return emplace(std::move(value));
+ }
+
+ template <class T, RequiresInsertable<T> = 0,
+ typename std::enable_if<IsDecomposable<T>::value, int>::type = 0,
+ T* = nullptr>
+ iterator insert(const_iterator, T&& value) {
+ return insert(std::forward<T>(value)).first;
+ }
+
+ // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace
+ // RequiresInsertable<T> with RequiresInsertable<const T&>.
+ // We are hitting this bug: https://godbolt.org/g/1Vht4f.
+ template <
+ class T, RequiresInsertable<T> = 0,
+ typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
+ iterator insert(const_iterator, const T& value) {
+ return insert(value).first;
+ }
+
+ iterator insert(const_iterator, init_type&& value) {
+ return insert(std::move(value)).first;
+ }
+
+ template <class InputIt>
+ void insert(InputIt first, InputIt last) {
+ for (; first != last; ++first) insert(*first);
+ }
+
+ template <class T, RequiresNotInit<T> = 0, RequiresInsertable<const T&> = 0>
+ void insert(std::initializer_list<T> ilist) {
+ insert(ilist.begin(), ilist.end());
+ }
+
+ void insert(std::initializer_list<init_type> ilist) {
+ insert(ilist.begin(), ilist.end());
+ }
+
+ insert_return_type<iterator, node_type> insert(node_type&& node) {
+ if (!node) return {end(), false, node_type()};
+ const auto& elem = PolicyTraits::element(node.slot());
+ auto res = PolicyTraits::apply(
+ InsertSlot<false>{*this, std::move(*node.slot())}, elem);
+ if (res.second) {
+ node.reset();
+ return {res.first, true, node_type()};
+ } else {
+ return {res.first, false, std::move(node)};
+ }
+ }
+
+ iterator insert(const_iterator, node_type&& node) {
+ return insert(std::move(node)).first;
+ }
+
+ // This overload kicks in if we can deduce the key from args. This enables us
+ // to avoid constructing value_type if an entry with the same key already
+ // exists.
+ //
+ // For example:
+ //
+ // flat_hash_map<std::string, std::string> m = {{"abc", "def"}};
+ // // Creates no std::string copies and makes no heap allocations.
+ // m.emplace("abc", "xyz");
+ template <class... Args, typename std::enable_if<
+ IsDecomposable<Args...>::value, int>::type = 0>
+ std::pair<iterator, bool> emplace(Args&&... args) {
+ return PolicyTraits::apply(EmplaceDecomposable{*this},
+ std::forward<Args>(args)...);
+ }
+
+ // This overload kicks in if we cannot deduce the key from args. It constructs
+ // value_type unconditionally and then either moves it into the table or
+ // destroys.
+ template <class... Args, typename std::enable_if<
+ !IsDecomposable<Args...>::value, int>::type = 0>
+ std::pair<iterator, bool> emplace(Args&&... args) {
+ typename std::aligned_storage<sizeof(slot_type), alignof(slot_type)>::type
+ raw;
+ slot_type* slot = reinterpret_cast<slot_type*>(&raw);
+
+ PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...);
+ const auto& elem = PolicyTraits::element(slot);
+ return PolicyTraits::apply(InsertSlot<true>{*this, std::move(*slot)}, elem);
+ }
+
+ template <class... Args>
+ iterator emplace_hint(const_iterator, Args&&... args) {
+ return emplace(std::forward<Args>(args)...).first;
+ }
+
+ // Extension API: support for lazy emplace.
+ //
+ // Looks up key in the table. If found, returns the iterator to the element.
+ // Otherwise calls f with one argument of type raw_hash_set::constructor. f
+ // MUST call raw_hash_set::constructor with arguments as if a
+ // raw_hash_set::value_type is constructed, otherwise the behavior is
+ // undefined.
+ //
+ // For example:
+ //
+ // std::unordered_set<ArenaString> s;
+ // // Makes ArenaStr even if "abc" is in the map.
+ // s.insert(ArenaString(&arena, "abc"));
+ //
+ // flat_hash_set<ArenaStr> s;
+ // // Makes ArenaStr only if "abc" is not in the map.
+ // s.lazy_emplace("abc", [&](const constructor& ctor) {
+ // ctor(&arena, "abc");
+ // });
+ //
+ // WARNING: This API is currently experimental. If there is a way to implement
+ // the same thing with the rest of the API, prefer that.
+ class constructor {
+ friend class raw_hash_set;
+
+ public:
+ template <class... Args>
+ void operator()(Args&&... args) const {
+ assert(*slot_);
+ PolicyTraits::construct(alloc_, *slot_, std::forward<Args>(args)...);
+ *slot_ = nullptr;
+ }
+
+ private:
+ constructor(allocator_type* a, slot_type** slot) : alloc_(a), slot_(slot) {}
+
+ allocator_type* alloc_;
+ slot_type** slot_;
+ };
+
+ template <class K = key_type, class F>
+ iterator lazy_emplace(const key_arg<K>& key, F&& f) {
+ auto res = find_or_prepare_insert(key);
+ if (res.second) {
+ slot_type* slot = slots_ + res.first;
+ std::forward<F>(f)(constructor(&alloc_ref(), &slot));
+ assert(!slot);
+ }
+ return iterator_at(res.first);
+ }
+
+ // Extension API: support for heterogeneous keys.
+ //
+ // std::unordered_set<std::string> s;
+ // // Turns "abc" into std::string.
+ // s.erase("abc");
+ //
+ // flat_hash_set<std::string> s;
+ // // Uses "abc" directly without copying it into std::string.
+ // s.erase("abc");
+ template <class K = key_type>
+ size_type erase(const key_arg<K>& key) {
+ auto it = find(key);
+ if (it == end()) return 0;
+ erase(it);
+ return 1;
+ }
+
+ // Erases the element pointed to by `it`. Unlike `std::unordered_set::erase`,
+ // this method returns void to reduce algorithmic complexity to O(1). In
+ // order to erase while iterating across a map, use the following idiom (which
+ // also works for standard containers):
+ //
+ // for (auto it = m.begin(), end = m.end(); it != end;) {
+ // if (<pred>) {
+ // m.erase(it++);
+ // } else {
+ // ++it;
+ // }
+ // }
+ void erase(const_iterator cit) { erase(cit.inner_); }
+
+ // This overload is necessary because otherwise erase<K>(const K&) would be
+ // a better match if non-const iterator is passed as an argument.
+ void erase(iterator it) {
+ assert(it != end());
+ PolicyTraits::destroy(&alloc_ref(), it.slot_);
+ erase_meta_only(it);
+ }
+
+ iterator erase(const_iterator first, const_iterator last) {
+ while (first != last) {
+ erase(first++);
+ }
+ return last.inner_;
+ }
+
+ // Moves elements from `src` into `this`.
+ // If the element already exists in `this`, it is left unmodified in `src`.
+ template <typename H, typename E>
+ void merge(raw_hash_set<Policy, H, E, Alloc>& src) { // NOLINT
+ assert(this != &src);
+ for (auto it = src.begin(), e = src.end(); it != e; ++it) {
+ if (PolicyTraits::apply(InsertSlot<false>{*this, std::move(*it.slot_)},
+ PolicyTraits::element(it.slot_))
+ .second) {
+ src.erase_meta_only(it);
+ }
+ }
+ }
+
+ template <typename H, typename E>
+ void merge(raw_hash_set<Policy, H, E, Alloc>&& src) {
+ merge(src);
+ }
+
+ node_type extract(const_iterator position) {
+ node_type node(alloc_ref(), position.inner_.slot_);
+ erase_meta_only(position);
+ return node;
+ }
+
+ template <
+ class K = key_type,
+ typename std::enable_if<!std::is_same<K, iterator>::value, int>::type = 0>
+ node_type extract(const key_arg<K>& key) {
+ auto it = find(key);
+ return it == end() ? node_type() : extract(const_iterator{it});
+ }
+
+ void swap(raw_hash_set& that) noexcept(
+ IsNoThrowSwappable<hasher>() && IsNoThrowSwappable<key_equal>() &&
+ (!AllocTraits::propagate_on_container_swap::value ||
+ IsNoThrowSwappable<allocator_type>())) {
+ using std::swap;
+ swap(ctrl_, that.ctrl_);
+ swap(slots_, that.slots_);
+ swap(size_, that.size_);
+ swap(capacity_, that.capacity_);
+ swap(growth_left(), that.growth_left());
+ swap(hash_ref(), that.hash_ref());
+ swap(eq_ref(), that.eq_ref());
+ if (AllocTraits::propagate_on_container_swap::value) {
+ swap(alloc_ref(), that.alloc_ref());
+ } else {
+ // If the allocators do not compare equal it is officially undefined
+ // behavior. We choose to do nothing.
+ }
+ }
+
+ void rehash(size_t n) {
+ if (n == 0 && capacity_ == 0) return;
+ if (n == 0 && size_ == 0) return destroy_slots();
+ auto m = NormalizeCapacity(std::max(n, NumSlotsFast(size())));
+ // n == 0 unconditionally rehashes as per the standard.
+ if (n == 0 || m > capacity_) {
+ resize(m);
+ }
+ }
+
+ void reserve(size_t n) {
+ rehash(NumSlotsFast(n));
+ }
+
+ // Extension API: support for heterogeneous keys.
+ //
+ // std::unordered_set<std::string> s;
+ // // Turns "abc" into std::string.
+ // s.count("abc");
+ //
+ // ch_set<std::string> s;
+ // // Uses "abc" directly without copying it into std::string.
+ // s.count("abc");
+ template <class K = key_type>
+ size_t count(const key_arg<K>& key) const {
+ return find(key) == end() ? 0 : 1;
+ }
+
+ // Issues CPU prefetch instructions for the memory needed to find or insert
+ // a key. Like all lookup functions, this support heterogeneous keys.
+ //
+ // NOTE: This is a very low level operation and should not be used without
+ // specific benchmarks indicating its importance.
+ template <class K = key_type>
+ void prefetch(const key_arg<K>& key) const {
+ (void)key;
+#if defined(__GNUC__)
+ auto seq = probe(hash_ref()(key));
+ __builtin_prefetch(static_cast<const void*>(ctrl_ + seq.offset()));
+ __builtin_prefetch(static_cast<const void*>(slots_ + seq.offset()));
+#endif // __GNUC__
+ }
+
+ // The API of find() has two extensions.
+ //
+ // 1. The hash can be passed by the user. It must be equal to the hash of the
+ // key.
+ //
+ // 2. The type of the key argument doesn't have to be key_type. This is so
+ // called heterogeneous key support.
+ template <class K = key_type>
+ iterator find(const key_arg<K>& key, size_t hash) {
+ auto seq = probe(hash);
+ while (true) {
+ Group g{ctrl_ + seq.offset()};
+ for (int i : g.Match(H2(hash))) {
+ if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
+ EqualElement<K>{key, eq_ref()},
+ PolicyTraits::element(slots_ + seq.offset(i)))))
+ return iterator_at(seq.offset(i));
+ }
+ if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return end();
+ seq.next();
+ }
+ }
+ template <class K = key_type>
+ iterator find(const key_arg<K>& key) {
+ return find(key, hash_ref()(key));
+ }
+
+ template <class K = key_type>
+ const_iterator find(const key_arg<K>& key, size_t hash) const {
+ return const_cast<raw_hash_set*>(this)->find(key, hash);
+ }
+ template <class K = key_type>
+ const_iterator find(const key_arg<K>& key) const {
+ return find(key, hash_ref()(key));
+ }
+
+ template <class K = key_type>
+ bool contains(const key_arg<K>& key) const {
+ return find(key) != end();
+ }
+
+ template <class K = key_type>
+ std::pair<iterator, iterator> equal_range(const key_arg<K>& key) {
+ auto it = find(key);
+ if (it != end()) return {it, std::next(it)};
+ return {it, it};
+ }
+ template <class K = key_type>
+ std::pair<const_iterator, const_iterator> equal_range(
+ const key_arg<K>& key) const {
+ auto it = find(key);
+ if (it != end()) return {it, std::next(it)};
+ return {it, it};
+ }
+
+ size_t bucket_count() const { return capacity_; }
+ float load_factor() const {
+ return capacity_ ? static_cast<double>(size()) / capacity_ : 0.0;
+ }
+ float max_load_factor() const { return 1.0f; }
+ void max_load_factor(float) {
+ // Does nothing.
+ }
+
+ hasher hash_function() const { return hash_ref(); }
+ key_equal key_eq() const { return eq_ref(); }
+ allocator_type get_allocator() const { return alloc_ref(); }
+
+ friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) {
+ if (a.size() != b.size()) return false;
+ const raw_hash_set* outer = &a;
+ const raw_hash_set* inner = &b;
+ if (outer->capacity() > inner->capacity()) std::swap(outer, inner);
+ for (const value_type& elem : *outer)
+ if (!inner->has_element(elem)) return false;
+ return true;
+ }
+
+ friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) {
+ return !(a == b);
+ }
+
+ friend void swap(raw_hash_set& a,
+ raw_hash_set& b) noexcept(noexcept(a.swap(b))) {
+ a.swap(b);
+ }
+
+ private:
+ template <class Container, typename Enabler>
+ friend struct absl::container_internal::hashtable_debug_internal::
+ HashtableDebugAccess;
+
+ struct FindElement {
+ template <class K, class... Args>
+ const_iterator operator()(const K& key, Args&&...) const {
+ return s.find(key);
+ }
+ const raw_hash_set& s;
+ };
+
+ struct HashElement {
+ template <class K, class... Args>
+ size_t operator()(const K& key, Args&&...) const {
+ return h(key);
+ }
+ const hasher& h;
+ };
+
+ template <class K1>
+ struct EqualElement {
+ template <class K2, class... Args>
+ bool operator()(const K2& lhs, Args&&...) const {
+ return eq(lhs, rhs);
+ }
+ const K1& rhs;
+ const key_equal& eq;
+ };
+
+ struct EmplaceDecomposable {
+ template <class K, class... Args>
+ std::pair<iterator, bool> operator()(const K& key, Args&&... args) const {
+ auto res = s.find_or_prepare_insert(key);
+ if (res.second) {
+ s.emplace_at(res.first, std::forward<Args>(args)...);
+ }
+ return {s.iterator_at(res.first), res.second};
+ }
+ raw_hash_set& s;
+ };
+
+ template <bool do_destroy>
+ struct InsertSlot {
+ template <class K, class... Args>
+ std::pair<iterator, bool> operator()(const K& key, Args&&...) && {
+ auto res = s.find_or_prepare_insert(key);
+ if (res.second) {
+ PolicyTraits::transfer(&s.alloc_ref(), s.slots_ + res.first, &slot);
+ } else if (do_destroy) {
+ PolicyTraits::destroy(&s.alloc_ref(), &slot);
+ }
+ return {s.iterator_at(res.first), res.second};
+ }
+ raw_hash_set& s;
+ // Constructed slot. Either moved into place or destroyed.
+ slot_type&& slot;
+ };
+
+ // Computes std::ceil(n / kMaxLoadFactor). Faster than calling std::ceil.
+ static inline size_t NumSlotsFast(size_t n) {
+ return static_cast<size_t>(
+ (n * kMaxLoadFactorDenominator + (kMaxLoadFactorNumerator - 1)) /
+ kMaxLoadFactorNumerator);
+ }
+
+ // "erases" the object from the container, except that it doesn't actually
+ // destroy the object. It only updates all the metadata of the class.
+ // This can be used in conjunction with Policy::transfer to move the object to
+ // another place.
+ void erase_meta_only(const_iterator it) {
+ assert(IsFull(*it.inner_.ctrl_) && "erasing a dangling iterator");
+ --size_;
+ const size_t index = it.inner_.ctrl_ - ctrl_;
+ const size_t index_before = (index - Group::kWidth) & capacity_;
+ const auto empty_after = Group(it.inner_.ctrl_).MatchEmpty();
+ const auto empty_before = Group(ctrl_ + index_before).MatchEmpty();
+
+ // We count how many consecutive non empties we have to the right and to the
+ // left of `it`. If the sum is >= kWidth then there is at least one probe
+ // window that might have seen a full group.
+ bool was_never_full =
+ empty_before && empty_after &&
+ static_cast<size_t>(empty_after.TrailingZeros() +
+ empty_before.LeadingZeros()) < Group::kWidth;
+
+ set_ctrl(index, was_never_full ? kEmpty : kDeleted);
+ growth_left() += was_never_full;
+ }
+
+ void initialize_slots() {
+ assert(capacity_);
+ auto layout = MakeLayout(capacity_);
+ char* mem = static_cast<char*>(
+ Allocate<Layout::Alignment()>(&alloc_ref(), layout.AllocSize()));
+ ctrl_ = reinterpret_cast<ctrl_t*>(layout.template Pointer<0>(mem));
+ slots_ = layout.template Pointer<1>(mem);
+ reset_ctrl();
+ growth_left() = static_cast<size_t>(capacity_ * kMaxLoadFactor) - size_;
+ }
+
+ void destroy_slots() {
+ if (!capacity_) return;
+ for (size_t i = 0; i != capacity_; ++i) {
+ if (IsFull(ctrl_[i])) {
+ PolicyTraits::destroy(&alloc_ref(), slots_ + i);
+ }
+ }
+ auto layout = MakeLayout(capacity_);
+ // Unpoison before returning the memory to the allocator.
+ SanitizerUnpoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_);
+ Deallocate<Layout::Alignment()>(&alloc_ref(), ctrl_, layout.AllocSize());
+ ctrl_ = EmptyGroup();
+ slots_ = nullptr;
+ size_ = 0;
+ capacity_ = 0;
+ growth_left() = 0;
+ }
+
+ void resize(size_t new_capacity) {
+ assert(IsValidCapacity(new_capacity));
+ auto* old_ctrl = ctrl_;
+ auto* old_slots = slots_;
+ const size_t old_capacity = capacity_;
+ capacity_ = new_capacity;
+ initialize_slots();
+
+ for (size_t i = 0; i != old_capacity; ++i) {
+ if (IsFull(old_ctrl[i])) {
+ size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
+ PolicyTraits::element(old_slots + i));
+ size_t new_i = find_first_non_full(hash);
+ set_ctrl(new_i, H2(hash));
+ PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i);
+ }
+ }
+ if (old_capacity) {
+ SanitizerUnpoisonMemoryRegion(old_slots,
+ sizeof(slot_type) * old_capacity);
+ auto layout = MakeLayout(old_capacity);
+ Deallocate<Layout::Alignment()>(&alloc_ref(), old_ctrl,
+ layout.AllocSize());
+ }
+ }
+
+ void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE {
+ assert(IsValidCapacity(capacity_));
+ // Algorithm:
+ // - mark all DELETED slots as EMPTY
+ // - mark all FULL slots as DELETED
+ // - for each slot marked as DELETED
+ // hash = Hash(element)
+ // target = find_first_non_full(hash)
+ // if target is in the same group
+ // mark slot as FULL
+ // else if target is EMPTY
+ // transfer element to target
+ // mark slot as EMPTY
+ // mark target as FULL
+ // else if target is DELETED
+ // swap current element with target element
+ // mark target as FULL
+ // repeat procedure for current slot with moved from element (target)
+ ConvertDeletedToEmptyAndFullToDeleted(ctrl_, capacity_);
+ typename std::aligned_storage<sizeof(slot_type), alignof(slot_type)>::type
+ raw;
+ slot_type* slot = reinterpret_cast<slot_type*>(&raw);
+ for (size_t i = 0; i != capacity_; ++i) {
+ if (!IsDeleted(ctrl_[i])) continue;
+ size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
+ PolicyTraits::element(slots_ + i));
+ size_t new_i = find_first_non_full(hash);
+
+ // Verify if the old and new i fall within the same group wrt the hash.
+ // If they do, we don't need to move the object as it falls already in the
+ // best probe we can.
+ const auto probe_index = [&](size_t pos) {
+ return ((pos - probe(hash).offset()) & capacity_) / Group::kWidth;
+ };
+
+ // Element doesn't move.
+ if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) {
+ set_ctrl(i, H2(hash));
+ continue;
+ }
+ if (IsEmpty(ctrl_[new_i])) {
+ // Transfer element to the empty spot.
+ // set_ctrl poisons/unpoisons the slots so we have to call it at the
+ // right time.
+ set_ctrl(new_i, H2(hash));
+ PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slots_ + i);
+ set_ctrl(i, kEmpty);
+ } else {
+ assert(IsDeleted(ctrl_[new_i]));
+ set_ctrl(new_i, H2(hash));
+ // Until we are done rehashing, DELETED marks previously FULL slots.
+ // Swap i and new_i elements.
+ PolicyTraits::transfer(&alloc_ref(), slot, slots_ + i);
+ PolicyTraits::transfer(&alloc_ref(), slots_ + i, slots_ + new_i);
+ PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slot);
+ --i; // repeat
+ }
+ }
+ growth_left() = static_cast<size_t>(capacity_ * kMaxLoadFactor) - size_;
+ }
+
+ void rehash_and_grow_if_necessary() {
+ if (capacity_ == 0) {
+ resize(Group::kWidth - 1);
+ } else if (size() <= kMaxLoadFactor / 2 * capacity_) {
+ // Squash DELETED without growing if there is enough capacity.
+ drop_deletes_without_resize();
+ } else {
+ // Otherwise grow the container.
+ resize(capacity_ * 2 + 1);
+ }
+ }
+
+ bool has_element(const value_type& elem) const {
+ size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem);
+ auto seq = probe(hash);
+ while (true) {
+ Group g{ctrl_ + seq.offset()};
+ for (int i : g.Match(H2(hash))) {
+ if (ABSL_PREDICT_TRUE(PolicyTraits::element(slots_ + seq.offset(i)) ==
+ elem))
+ return true;
+ }
+ if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return false;
+ seq.next();
+ assert(seq.index() < capacity_ && "full table!");
+ }
+ return false;
+ }
+
+ // Probes the raw_hash_set with the probe sequence for hash and returns the
+ // pointer to the first empty or deleted slot.
+ // NOTE: this function must work with tables having both kEmpty and kDelete
+ // in one group. Such tables appears during drop_deletes_without_resize.
+ //
+ // This function is very useful when insertions happen and:
+ // - the input is already a set
+ // - there are enough slots
+ // - the element with the hash is not in the table
+ size_t find_first_non_full(size_t hash) {
+ auto seq = probe(hash);
+ while (true) {
+ Group g{ctrl_ + seq.offset()};
+ auto mask = g.MatchEmptyOrDeleted();
+ if (mask) {
+#if !defined(NDEBUG)
+ // We want to force small tables to have random entries too, so
+ // in debug build we will randomly insert in either the front or back of
+ // the group.
+ // TODO(kfm,sbenza): revisit after we do unconditional mixing
+ if (ShouldInsertBackwards(hash, ctrl_))
+ return seq.offset(mask.HighestBitSet());
+ else
+ return seq.offset(mask.LowestBitSet());
+#else
+ return seq.offset(mask.LowestBitSet());
+#endif
+ }
+ assert(seq.index() < capacity_ && "full table!");
+ seq.next();
+ }
+ }
+
+ // TODO(alkis): Optimize this assuming *this and that don't overlap.
+ raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) {
+ raw_hash_set tmp(std::move(that));
+ swap(tmp);
+ return *this;
+ }
+ raw_hash_set& move_assign(raw_hash_set&& that, std::false_type) {
+ raw_hash_set tmp(std::move(that), alloc_ref());
+ swap(tmp);
+ return *this;
+ }
+
+ protected:
+ template <class K>
+ std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
+ auto hash = hash_ref()(key);
+ auto seq = probe(hash);
+ while (true) {
+ Group g{ctrl_ + seq.offset()};
+ for (int i : g.Match(H2(hash))) {
+ if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
+ EqualElement<K>{key, eq_ref()},
+ PolicyTraits::element(slots_ + seq.offset(i)))))
+ return {seq.offset(i), false};
+ }
+ if (ABSL_PREDICT_TRUE(g.MatchEmpty())) break;
+ seq.next();
+ }
+ return {prepare_insert(hash), true};
+ }
+
+ size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
+ size_t target = find_first_non_full(hash);
+ if (ABSL_PREDICT_FALSE(growth_left() == 0 && !IsDeleted(ctrl_[target]))) {
+ rehash_and_grow_if_necessary();
+ target = find_first_non_full(hash);
+ }
+ ++size_;
+ growth_left() -= IsEmpty(ctrl_[target]);
+ set_ctrl(target, H2(hash));
+ return target;
+ }
+
+ // Constructs the value in the space pointed by the iterator. This only works
+ // after an unsuccessful find_or_prepare_insert() and before any other
+ // modifications happen in the raw_hash_set.
+ //
+ // PRECONDITION: i is an index returned from find_or_prepare_insert(k), where
+ // k is the key decomposed from `forward<Args>(args)...`, and the bool
+ // returned by find_or_prepare_insert(k) was true.
+ // POSTCONDITION: *m.iterator_at(i) == value_type(forward<Args>(args)...).
+ template <class... Args>
+ void emplace_at(size_t i, Args&&... args) {
+ PolicyTraits::construct(&alloc_ref(), slots_ + i,
+ std::forward<Args>(args)...);
+
+ assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) ==
+ iterator_at(i) &&
+ "constructed value does not match the lookup key");
+ }
+
+ iterator iterator_at(size_t i) { return {ctrl_ + i, slots_ + i}; }
+ const_iterator iterator_at(size_t i) const { return {ctrl_ + i, slots_ + i}; }
+
+ private:
+ friend struct RawHashSetTestOnlyAccess;
+
+ probe_seq<Group::kWidth> probe(size_t hash) const {
+ return probe_seq<Group::kWidth>(H1(hash, ctrl_), capacity_);
+ }
+
+ // Reset all ctrl bytes back to kEmpty, except the sentinel.
+ void reset_ctrl() {
+ std::memset(ctrl_, kEmpty, capacity_ + Group::kWidth);
+ ctrl_[capacity_] = kSentinel;
+ SanitizerPoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_);
+ }
+
+ // Sets the control byte, and if `i < Group::kWidth`, set the cloned byte at
+ // the end too.
+ void set_ctrl(size_t i, ctrl_t h) {
+ assert(i < capacity_);
+
+ if (IsFull(h)) {
+ SanitizerUnpoisonObject(slots_ + i);
+ } else {
+ SanitizerPoisonObject(slots_ + i);
+ }
+
+ ctrl_[i] = h;
+ ctrl_[((i - Group::kWidth) & capacity_) + Group::kWidth] = h;
+ }
+
+ size_t& growth_left() { return settings_.template get<0>(); }
+
+ hasher& hash_ref() { return settings_.template get<1>(); }
+ const hasher& hash_ref() const { return settings_.template get<1>(); }
+ key_equal& eq_ref() { return settings_.template get<2>(); }
+ const key_equal& eq_ref() const { return settings_.template get<2>(); }
+ allocator_type& alloc_ref() { return settings_.template get<3>(); }
+ const allocator_type& alloc_ref() const {
+ return settings_.template get<3>();
+ }
+
+ // On average each group has 2 empty slot (for the vectorized case).
+ static constexpr int64_t kMaxLoadFactorNumerator = 14;
+ static constexpr int64_t kMaxLoadFactorDenominator = 16;
+ static constexpr float kMaxLoadFactor =
+ 1.0 * kMaxLoadFactorNumerator / kMaxLoadFactorDenominator;
+
+ // TODO(alkis): Investigate removing some of these fields:
+ // - ctrl/slots can be derived from each other
+ // - size can be moved into the slot array
+ ctrl_t* ctrl_ = EmptyGroup(); // [(capacity + 1) * ctrl_t]
+ slot_type* slots_ = nullptr; // [capacity * slot_type]
+ size_t size_ = 0; // number of full slots
+ size_t capacity_ = 0; // total number of slots
+ absl::container_internal::CompressedTuple<size_t /* growth_left */, hasher,
+ key_equal, allocator_type>
+ settings_{0, hasher{}, key_equal{}, allocator_type{}};
+};
+
+namespace hashtable_debug_internal {
+template <typename Set>
+struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
+ using Traits = typename Set::PolicyTraits;
+ using Slot = typename Traits::slot_type;
+
+ static size_t GetNumProbes(const Set& set,
+ const typename Set::key_type& key) {
+ size_t num_probes = 0;
+ size_t hash = set.hash_ref()(key);
+ auto seq = set.probe(hash);
+ while (true) {
+ container_internal::Group g{set.ctrl_ + seq.offset()};
+ for (int i : g.Match(container_internal::H2(hash))) {
+ if (Traits::apply(
+ typename Set::template EqualElement<typename Set::key_type>{
+ key, set.eq_ref()},
+ Traits::element(set.slots_ + seq.offset(i))))
+ return num_probes;
+ ++num_probes;
+ }
+ if (g.MatchEmpty()) return num_probes;
+ seq.next();
+ ++num_probes;
+ }
+ }
+
+ static size_t AllocatedByteSize(const Set& c) {
+ size_t capacity = c.capacity_;
+ if (capacity == 0) return 0;
+ auto layout = Set::MakeLayout(capacity);
+ size_t m = layout.AllocSize();
+
+ size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
+ if (per_slot != ~size_t{}) {
+ m += per_slot * c.size();
+ } else {
+ for (size_t i = 0; i != capacity; ++i) {
+ if (container_internal::IsFull(c.ctrl_[i])) {
+ m += Traits::space_used(c.slots_ + i);
+ }
+ }
+ }
+ return m;
+ }
+
+ static size_t LowerBoundAllocatedByteSize(size_t size) {
+ size_t capacity = container_internal::NormalizeCapacity(
+ std::ceil(size / Set::kMaxLoadFactor));
+ if (capacity == 0) return 0;
+ auto layout = Set::MakeLayout(capacity);
+ size_t m = layout.AllocSize();
+ size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
+ if (per_slot != ~size_t{}) {
+ m += per_slot * size;
+ }
+ return m;
+ }
+};
+
+} // namespace hashtable_debug_internal
+} // namespace container_internal
+} // inline namespace lts_2018_12_18
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
diff --git a/absl/container/internal/raw_hash_set_allocator_test.cc b/absl/container/internal/raw_hash_set_allocator_test.cc
new file mode 100644
index 00000000..f5779d62
--- /dev/null
+++ b/absl/container/internal/raw_hash_set_allocator_test.cc
@@ -0,0 +1,430 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <limits>
+#include <scoped_allocator>
+
+#include "gtest/gtest.h"
+#include "absl/container/internal/raw_hash_set.h"
+#include "absl/container/internal/tracked.h"
+
+namespace absl {
+inline namespace lts_2018_12_18 {
+namespace container_internal {
+namespace {
+
+enum AllocSpec {
+ kPropagateOnCopy = 1,
+ kPropagateOnMove = 2,
+ kPropagateOnSwap = 4,
+};
+
+struct AllocState {
+ size_t num_allocs = 0;
+ std::set<void*> owned;
+};
+
+template <class T,
+ int Spec = kPropagateOnCopy | kPropagateOnMove | kPropagateOnSwap>
+class CheckedAlloc {
+ public:
+ template <class, int>
+ friend class CheckedAlloc;
+
+ using value_type = T;
+
+ CheckedAlloc() {}
+ explicit CheckedAlloc(size_t id) : id_(id) {}
+ CheckedAlloc(const CheckedAlloc&) = default;
+ CheckedAlloc& operator=(const CheckedAlloc&) = default;
+
+ template <class U>
+ CheckedAlloc(const CheckedAlloc<U, Spec>& that)
+ : id_(that.id_), state_(that.state_) {}
+
+ template <class U>
+ struct rebind {
+ using other = CheckedAlloc<U, Spec>;
+ };
+
+ using propagate_on_container_copy_assignment =
+ std::integral_constant<bool, (Spec & kPropagateOnCopy) != 0>;
+
+ using propagate_on_container_move_assignment =
+ std::integral_constant<bool, (Spec & kPropagateOnMove) != 0>;
+
+ using propagate_on_container_swap =
+ std::integral_constant<bool, (Spec & kPropagateOnSwap) != 0>;
+
+ CheckedAlloc select_on_container_copy_construction() const {
+ if (Spec & kPropagateOnCopy) return *this;
+ return {};
+ }
+
+ T* allocate(size_t n) {
+ T* ptr = std::allocator<T>().allocate(n);
+ track_alloc(ptr);
+ return ptr;
+ }
+ void deallocate(T* ptr, size_t n) {
+ memset(ptr, 0, n * sizeof(T)); // The freed memory must be unpoisoned.
+ track_dealloc(ptr);
+ return std::allocator<T>().deallocate(ptr, n);
+ }
+
+ friend bool operator==(const CheckedAlloc& a, const CheckedAlloc& b) {
+ return a.id_ == b.id_;
+ }
+ friend bool operator!=(const CheckedAlloc& a, const CheckedAlloc& b) {
+ return !(a == b);
+ }
+
+ size_t num_allocs() const { return state_->num_allocs; }
+
+ void swap(CheckedAlloc& that) {
+ using std::swap;
+ swap(id_, that.id_);
+ swap(state_, that.state_);
+ }
+
+ friend void swap(CheckedAlloc& a, CheckedAlloc& b) { a.swap(b); }
+
+ friend std::ostream& operator<<(std::ostream& o, const CheckedAlloc& a) {
+ return o << "alloc(" << a.id_ << ")";
+ }
+
+ private:
+ void track_alloc(void* ptr) {
+ AllocState* state = state_.get();
+ ++state->num_allocs;
+ if (!state->owned.insert(ptr).second)
+ ADD_FAILURE() << *this << " got previously allocated memory: " << ptr;
+ }
+ void track_dealloc(void* ptr) {
+ if (state_->owned.erase(ptr) != 1)
+ ADD_FAILURE() << *this
+ << " deleting memory owned by another allocator: " << ptr;
+ }
+
+ size_t id_ = std::numeric_limits<size_t>::max();
+
+ std::shared_ptr<AllocState> state_ = std::make_shared<AllocState>();
+};
+
+struct Identity {
+ int32_t operator()(int32_t v) const { return v; }
+};
+
+struct Policy {
+ using slot_type = Tracked<int32_t>;
+ using init_type = Tracked<int32_t>;
+ using key_type = int32_t;
+
+ template <class allocator_type, class... Args>
+ static void construct(allocator_type* alloc, slot_type* slot,
+ Args&&... args) {
+ std::allocator_traits<allocator_type>::construct(
+ *alloc, slot, std::forward<Args>(args)...);
+ }
+
+ template <class allocator_type>
+ static void destroy(allocator_type* alloc, slot_type* slot) {
+ std::allocator_traits<allocator_type>::destroy(*alloc, slot);
+ }
+
+ template <class allocator_type>
+ static void transfer(allocator_type* alloc, slot_type* new_slot,
+ slot_type* old_slot) {
+ construct(alloc, new_slot, std::move(*old_slot));
+ destroy(alloc, old_slot);
+ }
+
+ template <class F>
+ static auto apply(F&& f, int32_t v) -> decltype(std::forward<F>(f)(v, v)) {
+ return std::forward<F>(f)(v, v);
+ }
+
+ template <class F>
+ static auto apply(F&& f, const slot_type& v)
+ -> decltype(std::forward<F>(f)(v.val(), v)) {
+ return std::forward<F>(f)(v.val(), v);
+ }
+
+ template <class F>
+ static auto apply(F&& f, slot_type&& v)
+ -> decltype(std::forward<F>(f)(v.val(), std::move(v))) {
+ return std::forward<F>(f)(v.val(), std::move(v));
+ }
+
+ static slot_type& element(slot_type* slot) { return *slot; }
+};
+
+template <int Spec>
+struct PropagateTest : public ::testing::Test {
+ using Alloc = CheckedAlloc<Tracked<int32_t>, Spec>;
+
+ using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, Alloc>;
+
+ PropagateTest() {
+ EXPECT_EQ(a1, t1.get_allocator());
+ EXPECT_NE(a2, t1.get_allocator());
+ }
+
+ Alloc a1 = Alloc(1);
+ Table t1 = Table(0, a1);
+ Alloc a2 = Alloc(2);
+};
+
+using PropagateOnAll =
+ PropagateTest<kPropagateOnCopy | kPropagateOnMove | kPropagateOnSwap>;
+using NoPropagateOnCopy = PropagateTest<kPropagateOnMove | kPropagateOnSwap>;
+using NoPropagateOnMove = PropagateTest<kPropagateOnCopy | kPropagateOnSwap>;
+
+TEST_F(PropagateOnAll, Empty) { EXPECT_EQ(0, a1.num_allocs()); }
+
+TEST_F(PropagateOnAll, InsertAllocates) {
+ auto it = t1.insert(0).first;
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, InsertDecomposes) {
+ auto it = t1.insert(0).first;
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(0, it->num_copies());
+
+ EXPECT_FALSE(t1.insert(0).second);
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, RehashMoves) {
+ auto it = t1.insert(0).first;
+ EXPECT_EQ(0, it->num_moves());
+ t1.rehash(2 * t1.capacity());
+ EXPECT_EQ(2, a1.num_allocs());
+ it = t1.find(0);
+ EXPECT_EQ(1, it->num_moves());
+ EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, CopyConstructor) {
+ auto it = t1.insert(0).first;
+ Table u(t1);
+ EXPECT_EQ(2, a1.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(1, it->num_copies());
+}
+
+TEST_F(NoPropagateOnCopy, CopyConstructor) {
+ auto it = t1.insert(0).first;
+ Table u(t1);
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(1, u.get_allocator().num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(1, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, CopyConstructorWithSameAlloc) {
+ auto it = t1.insert(0).first;
+ Table u(t1, a1);
+ EXPECT_EQ(2, a1.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(1, it->num_copies());
+}
+
+TEST_F(NoPropagateOnCopy, CopyConstructorWithSameAlloc) {
+ auto it = t1.insert(0).first;
+ Table u(t1, a1);
+ EXPECT_EQ(2, a1.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(1, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, CopyConstructorWithDifferentAlloc) {
+ auto it = t1.insert(0).first;
+ Table u(t1, a2);
+ EXPECT_EQ(a2, u.get_allocator());
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(1, a2.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(1, it->num_copies());
+}
+
+TEST_F(NoPropagateOnCopy, CopyConstructorWithDifferentAlloc) {
+ auto it = t1.insert(0).first;
+ Table u(t1, a2);
+ EXPECT_EQ(a2, u.get_allocator());
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(1, a2.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(1, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, MoveConstructor) {
+ auto it = t1.insert(0).first;
+ Table u(std::move(t1));
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(NoPropagateOnMove, MoveConstructor) {
+ auto it = t1.insert(0).first;
+ Table u(std::move(t1));
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, MoveConstructorWithSameAlloc) {
+ auto it = t1.insert(0).first;
+ Table u(std::move(t1), a1);
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(NoPropagateOnMove, MoveConstructorWithSameAlloc) {
+ auto it = t1.insert(0).first;
+ Table u(std::move(t1), a1);
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, MoveConstructorWithDifferentAlloc) {
+ auto it = t1.insert(0).first;
+ Table u(std::move(t1), a2);
+ it = u.find(0);
+ EXPECT_EQ(a2, u.get_allocator());
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(1, a2.num_allocs());
+ EXPECT_EQ(1, it->num_moves());
+ EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(NoPropagateOnMove, MoveConstructorWithDifferentAlloc) {
+ auto it = t1.insert(0).first;
+ Table u(std::move(t1), a2);
+ it = u.find(0);
+ EXPECT_EQ(a2, u.get_allocator());
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(1, a2.num_allocs());
+ EXPECT_EQ(1, it->num_moves());
+ EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, CopyAssignmentWithSameAlloc) {
+ auto it = t1.insert(0).first;
+ Table u(0, a1);
+ u = t1;
+ EXPECT_EQ(2, a1.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(1, it->num_copies());
+}
+
+TEST_F(NoPropagateOnCopy, CopyAssignmentWithSameAlloc) {
+ auto it = t1.insert(0).first;
+ Table u(0, a1);
+ u = t1;
+ EXPECT_EQ(2, a1.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(1, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, CopyAssignmentWithDifferentAlloc) {
+ auto it = t1.insert(0).first;
+ Table u(0, a2);
+ u = t1;
+ EXPECT_EQ(a1, u.get_allocator());
+ EXPECT_EQ(2, a1.num_allocs());
+ EXPECT_EQ(0, a2.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(1, it->num_copies());
+}
+
+TEST_F(NoPropagateOnCopy, CopyAssignmentWithDifferentAlloc) {
+ auto it = t1.insert(0).first;
+ Table u(0, a2);
+ u = t1;
+ EXPECT_EQ(a2, u.get_allocator());
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(1, a2.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(1, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, MoveAssignmentWithSameAlloc) {
+ auto it = t1.insert(0).first;
+ Table u(0, a1);
+ u = std::move(t1);
+ EXPECT_EQ(a1, u.get_allocator());
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(NoPropagateOnMove, MoveAssignmentWithSameAlloc) {
+ auto it = t1.insert(0).first;
+ Table u(0, a1);
+ u = std::move(t1);
+ EXPECT_EQ(a1, u.get_allocator());
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, MoveAssignmentWithDifferentAlloc) {
+ auto it = t1.insert(0).first;
+ Table u(0, a2);
+ u = std::move(t1);
+ EXPECT_EQ(a1, u.get_allocator());
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(0, a2.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(NoPropagateOnMove, MoveAssignmentWithDifferentAlloc) {
+ auto it = t1.insert(0).first;
+ Table u(0, a2);
+ u = std::move(t1);
+ it = u.find(0);
+ EXPECT_EQ(a2, u.get_allocator());
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(1, a2.num_allocs());
+ EXPECT_EQ(1, it->num_moves());
+ EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, Swap) {
+ auto it = t1.insert(0).first;
+ Table u(0, a2);
+ u.swap(t1);
+ EXPECT_EQ(a1, u.get_allocator());
+ EXPECT_EQ(a2, t1.get_allocator());
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(0, a2.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(0, it->num_copies());
+}
+
+} // namespace
+} // namespace container_internal
+} // inline namespace lts_2018_12_18
+} // namespace absl
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
new file mode 100644
index 00000000..302f9758
--- /dev/null
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -0,0 +1,1830 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/raw_hash_set.h"
+
+#include <cmath>
+#include <cstdint>
+#include <deque>
+#include <functional>
+#include <memory>
+#include <numeric>
+#include <random>
+#include <string>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/attributes.h"
+#include "absl/base/internal/cycleclock.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/container/internal/container_memory.h"
+#include "absl/container/internal/hash_function_defaults.h"
+#include "absl/container/internal/hash_policy_testing.h"
+#include "absl/container/internal/hashtable_debug.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+inline namespace lts_2018_12_18 {
+namespace container_internal {
+
+struct RawHashSetTestOnlyAccess {
+ template <typename C>
+ static auto GetSlots(const C& c) -> decltype(c.slots_) {
+ return c.slots_;
+ }
+};
+
+namespace {
+
+using ::testing::DoubleNear;
+using ::testing::ElementsAre;
+using ::testing::Optional;
+using ::testing::Pair;
+using ::testing::UnorderedElementsAre;
+
+TEST(Util, NormalizeCapacity) {
+ constexpr size_t kMinCapacity = Group::kWidth - 1;
+ EXPECT_EQ(kMinCapacity, NormalizeCapacity(0));
+ EXPECT_EQ(kMinCapacity, NormalizeCapacity(1));
+ EXPECT_EQ(kMinCapacity, NormalizeCapacity(2));
+ EXPECT_EQ(kMinCapacity, NormalizeCapacity(kMinCapacity));
+ EXPECT_EQ(kMinCapacity * 2 + 1, NormalizeCapacity(kMinCapacity + 1));
+ EXPECT_EQ(kMinCapacity * 2 + 1, NormalizeCapacity(kMinCapacity + 2));
+}
+
+TEST(Util, probe_seq) {
+ probe_seq<16> seq(0, 127);
+ auto gen = [&]() {
+ size_t res = seq.offset();
+ seq.next();
+ return res;
+ };
+ std::vector<size_t> offsets(8);
+ std::generate_n(offsets.begin(), 8, gen);
+ EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64));
+ seq = probe_seq<16>(128, 127);
+ std::generate_n(offsets.begin(), 8, gen);
+ EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64));
+}
+
+TEST(BitMask, Smoke) {
+ EXPECT_FALSE((BitMask<uint8_t, 8>(0)));
+ EXPECT_TRUE((BitMask<uint8_t, 8>(5)));
+
+ EXPECT_THAT((BitMask<uint8_t, 8>(0)), ElementsAre());
+ EXPECT_THAT((BitMask<uint8_t, 8>(0x1)), ElementsAre(0));
+ EXPECT_THAT((BitMask<uint8_t, 8>(0x2)), ElementsAre(1));
+ EXPECT_THAT((BitMask<uint8_t, 8>(0x3)), ElementsAre(0, 1));
+ EXPECT_THAT((BitMask<uint8_t, 8>(0x4)), ElementsAre(2));
+ EXPECT_THAT((BitMask<uint8_t, 8>(0x5)), ElementsAre(0, 2));
+ EXPECT_THAT((BitMask<uint8_t, 8>(0x55)), ElementsAre(0, 2, 4, 6));
+ EXPECT_THAT((BitMask<uint8_t, 8>(0xAA)), ElementsAre(1, 3, 5, 7));
+}
+
+TEST(BitMask, WithShift) {
+ // See the non-SSE version of Group for details on what this math is for.
+ uint64_t ctrl = 0x1716151413121110;
+ uint64_t hash = 0x12;
+ constexpr uint64_t msbs = 0x8080808080808080ULL;
+ constexpr uint64_t lsbs = 0x0101010101010101ULL;
+ auto x = ctrl ^ (lsbs * hash);
+ uint64_t mask = (x - lsbs) & ~x & msbs;
+ EXPECT_EQ(0x0000000080800000, mask);
+
+ BitMask<uint64_t, 8, 3> b(mask);
+ EXPECT_EQ(*b, 2);
+}
+
+TEST(BitMask, LeadingTrailing) {
+ EXPECT_EQ((BitMask<uint32_t, 16>(0b0001101001000000).LeadingZeros()), 3);
+ EXPECT_EQ((BitMask<uint32_t, 16>(0b0001101001000000).TrailingZeros()), 6);
+
+ EXPECT_EQ((BitMask<uint32_t, 16>(0b0000000000000001).LeadingZeros()), 15);
+ EXPECT_EQ((BitMask<uint32_t, 16>(0b0000000000000001).TrailingZeros()), 0);
+
+ EXPECT_EQ((BitMask<uint32_t, 16>(0b1000000000000000).LeadingZeros()), 0);
+ EXPECT_EQ((BitMask<uint32_t, 16>(0b1000000000000000).TrailingZeros()), 15);
+
+ EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).LeadingZeros()), 3);
+ EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).TrailingZeros()), 1);
+
+ EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000000000000080).LeadingZeros()), 7);
+ EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000000000000080).TrailingZeros()), 0);
+
+ EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x8000000000000000).LeadingZeros()), 0);
+ EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x8000000000000000).TrailingZeros()), 7);
+}
+
+TEST(Group, EmptyGroup) {
+ for (h2_t h = 0; h != 128; ++h) EXPECT_FALSE(Group{EmptyGroup()}.Match(h));
+}
+
+TEST(Group, Match) {
+ if (Group::kWidth == 16) {
+ ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
+ 7, 5, 3, 1, 1, 1, 1, 1};
+ EXPECT_THAT(Group{group}.Match(0), ElementsAre());
+ EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 11, 12, 13, 14, 15));
+ EXPECT_THAT(Group{group}.Match(3), ElementsAre(3, 10));
+ EXPECT_THAT(Group{group}.Match(5), ElementsAre(5, 9));
+ EXPECT_THAT(Group{group}.Match(7), ElementsAre(7, 8));
+ } else if (Group::kWidth == 8) {
+ ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
+ EXPECT_THAT(Group{group}.Match(0), ElementsAre());
+ EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 5, 7));
+ EXPECT_THAT(Group{group}.Match(2), ElementsAre(2, 4));
+ } else {
+ FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
+ }
+}
+
+TEST(Group, MatchEmpty) {
+ if (Group::kWidth == 16) {
+ ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
+ 7, 5, 3, 1, 1, 1, 1, 1};
+ EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0, 4));
+ } else if (Group::kWidth == 8) {
+ ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
+ EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0));
+ } else {
+ FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
+ }
+}
+
+TEST(Group, MatchEmptyOrDeleted) {
+ if (Group::kWidth == 16) {
+ ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
+ 7, 5, 3, 1, 1, 1, 1, 1};
+ EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 2, 4));
+ } else if (Group::kWidth == 8) {
+ ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
+ EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 3));
+ } else {
+ FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
+ }
+}
+
+TEST(Batch, DropDeletes) {
+ constexpr size_t kCapacity = 63;
+ constexpr size_t kGroupWidth = container_internal::Group::kWidth;
+ std::vector<ctrl_t> ctrl(kCapacity + 1 + kGroupWidth);
+ ctrl[kCapacity] = kSentinel;
+ std::vector<ctrl_t> pattern = {kEmpty, 2, kDeleted, 2, kEmpty, 1, kDeleted};
+ for (size_t i = 0; i != kCapacity; ++i) {
+ ctrl[i] = pattern[i % pattern.size()];
+ if (i < kGroupWidth - 1)
+ ctrl[i + kCapacity + 1] = pattern[i % pattern.size()];
+ }
+ ConvertDeletedToEmptyAndFullToDeleted(ctrl.data(), kCapacity);
+ ASSERT_EQ(ctrl[kCapacity], kSentinel);
+ for (size_t i = 0; i < kCapacity + 1 + kGroupWidth; ++i) {
+ ctrl_t expected = pattern[i % (kCapacity + 1) % pattern.size()];
+ if (i == kCapacity) expected = kSentinel;
+ if (expected == kDeleted) expected = kEmpty;
+ if (IsFull(expected)) expected = kDeleted;
+ EXPECT_EQ(ctrl[i], expected)
+ << i << " " << int{pattern[i % pattern.size()]};
+ }
+}
+
+TEST(Group, CountLeadingEmptyOrDeleted) {
+ const std::vector<ctrl_t> empty_examples = {kEmpty, kDeleted};
+ const std::vector<ctrl_t> full_examples = {0, 1, 2, 3, 5, 9, 127, kSentinel};
+
+ for (ctrl_t empty : empty_examples) {
+ std::vector<ctrl_t> e(Group::kWidth, empty);
+ EXPECT_EQ(Group::kWidth, Group{e.data()}.CountLeadingEmptyOrDeleted());
+ for (ctrl_t full : full_examples) {
+ for (size_t i = 0; i != Group::kWidth; ++i) {
+ std::vector<ctrl_t> f(Group::kWidth, empty);
+ f[i] = full;
+ EXPECT_EQ(i, Group{f.data()}.CountLeadingEmptyOrDeleted());
+ }
+ std::vector<ctrl_t> f(Group::kWidth, empty);
+ f[Group::kWidth * 2 / 3] = full;
+ f[Group::kWidth / 2] = full;
+ EXPECT_EQ(
+ Group::kWidth / 2, Group{f.data()}.CountLeadingEmptyOrDeleted());
+ }
+ }
+}
+
+struct IntPolicy {
+ using slot_type = int64_t;
+ using key_type = int64_t;
+ using init_type = int64_t;
+
+ static void construct(void*, int64_t* slot, int64_t v) { *slot = v; }
+ static void destroy(void*, int64_t*) {}
+ static void transfer(void*, int64_t* new_slot, int64_t* old_slot) {
+ *new_slot = *old_slot;
+ }
+
+ static int64_t& element(slot_type* slot) { return *slot; }
+
+ template <class F>
+ static auto apply(F&& f, int64_t x) -> decltype(std::forward<F>(f)(x, x)) {
+ return std::forward<F>(f)(x, x);
+ }
+};
+
+class StringPolicy {
+ template <class F, class K, class V,
+ class = typename std::enable_if<
+ std::is_convertible<const K&, absl::string_view>::value>::type>
+ decltype(std::declval<F>()(
+ std::declval<const absl::string_view&>(), std::piecewise_construct,
+ std::declval<std::tuple<K>>(),
+ std::declval<V>())) static apply_impl(F&& f,
+ std::pair<std::tuple<K>, V> p) {
+ const absl::string_view& key = std::get<0>(p.first);
+ return std::forward<F>(f)(key, std::piecewise_construct, std::move(p.first),
+ std::move(p.second));
+ }
+
+ public:
+ struct slot_type {
+ struct ctor {};
+
+ template <class... Ts>
+ slot_type(ctor, Ts&&... ts) : pair(std::forward<Ts>(ts)...) {}
+
+ std::pair<std::string, std::string> pair;
+ };
+
+ using key_type = std::string;
+ using init_type = std::pair<std::string, std::string>;
+
+ template <class allocator_type, class... Args>
+ static void construct(allocator_type* alloc, slot_type* slot, Args... args) {
+ std::allocator_traits<allocator_type>::construct(
+ *alloc, slot, typename slot_type::ctor(), std::forward<Args>(args)...);
+ }
+
+ template <class allocator_type>
+ static void destroy(allocator_type* alloc, slot_type* slot) {
+ std::allocator_traits<allocator_type>::destroy(*alloc, slot);
+ }
+
+ template <class allocator_type>
+ static void transfer(allocator_type* alloc, slot_type* new_slot,
+ slot_type* old_slot) {
+ construct(alloc, new_slot, std::move(old_slot->pair));
+ destroy(alloc, old_slot);
+ }
+
+ static std::pair<std::string, std::string>& element(slot_type* slot) {
+ return slot->pair;
+ }
+
+ template <class F, class... Args>
+ static auto apply(F&& f, Args&&... args)
+ -> decltype(apply_impl(std::forward<F>(f),
+ PairArgs(std::forward<Args>(args)...))) {
+ return apply_impl(std::forward<F>(f),
+ PairArgs(std::forward<Args>(args)...));
+ }
+};
+
+struct StringHash : absl::Hash<absl::string_view> {
+ using is_transparent = void;
+};
+struct StringEq : std::equal_to<absl::string_view> {
+ using is_transparent = void;
+};
+
+struct StringTable
+ : raw_hash_set<StringPolicy, StringHash, StringEq, std::allocator<int>> {
+ using Base = typename StringTable::raw_hash_set;
+ StringTable() {}
+ using Base::Base;
+};
+
+struct IntTable
+ : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
+ std::equal_to<int64_t>, std::allocator<int64_t>> {
+ using Base = typename IntTable::raw_hash_set;
+ IntTable() {}
+ using Base::Base;
+};
+
+struct BadFastHash {
+ template <class T>
+ size_t operator()(const T&) const {
+ return 0;
+ }
+};
+
+struct BadTable : raw_hash_set<IntPolicy, BadFastHash, std::equal_to<int>,
+ std::allocator<int>> {
+ using Base = typename BadTable::raw_hash_set;
+ BadTable() {}
+ using Base::Base;
+};
+
+TEST(Table, EmptyFunctorOptimization) {
+ static_assert(std::is_empty<std::equal_to<absl::string_view>>::value, "");
+ static_assert(std::is_empty<std::allocator<int>>::value, "");
+
+ struct MockTable {
+ void* ctrl;
+ void* slots;
+ size_t size;
+ size_t capacity;
+ size_t growth_left;
+ };
+ struct StatelessHash {
+ size_t operator()(absl::string_view) const { return 0; }
+ };
+ struct StatefulHash : StatelessHash {
+ size_t dummy;
+ };
+
+ EXPECT_EQ(
+ sizeof(MockTable),
+ sizeof(
+ raw_hash_set<StringPolicy, StatelessHash,
+ std::equal_to<absl::string_view>, std::allocator<int>>));
+
+ EXPECT_EQ(
+ sizeof(MockTable) + sizeof(StatefulHash),
+ sizeof(
+ raw_hash_set<StringPolicy, StatefulHash,
+ std::equal_to<absl::string_view>, std::allocator<int>>));
+}
+
+TEST(Table, Empty) {
+ IntTable t;
+ EXPECT_EQ(0, t.size());
+ EXPECT_TRUE(t.empty());
+}
+
+#ifdef __GNUC__
+template <class T>
+ABSL_ATTRIBUTE_ALWAYS_INLINE inline void DoNotOptimize(const T& v) {
+ asm volatile("" : : "r,m"(v) : "memory");
+}
+#endif
+
+TEST(Table, Prefetch) {
+ IntTable t;
+ t.emplace(1);
+ // Works for both present and absent keys.
+ t.prefetch(1);
+ t.prefetch(2);
+
+ // Do not run in debug mode, when prefetch is not implemented, or when
+ // sanitizers are enabled.
+#if defined(NDEBUG) && defined(__GNUC__) && !defined(ADDRESS_SANITIZER) && \
+ !defined(MEMORY_SANITIZER) && !defined(THREAD_SANITIZER) && \
+ !defined(UNDEFINED_BEHAVIOR_SANITIZER)
+ const auto now = [] { return absl::base_internal::CycleClock::Now(); };
+
+ // Make size enough to not fit in L2 cache (16.7 Mb)
+ static constexpr int size = 1 << 22;
+ for (int i = 0; i < size; ++i) t.insert(i);
+
+ int64_t no_prefetch = 0, prefetch = 0;
+ for (int iter = 0; iter < 10; ++iter) {
+ int64_t time = now();
+ for (int i = 0; i < size; ++i) {
+ DoNotOptimize(t.find(i));
+ }
+ no_prefetch += now() - time;
+
+ time = now();
+ for (int i = 0; i < size; ++i) {
+ t.prefetch(i + 20);
+ DoNotOptimize(t.find(i));
+ }
+ prefetch += now() - time;
+ }
+
+ // no_prefetch is at least 30% slower.
+ EXPECT_GE(1.0 * no_prefetch / prefetch, 1.3);
+#endif
+}
+
+TEST(Table, LookupEmpty) {
+ IntTable t;
+ auto it = t.find(0);
+ EXPECT_TRUE(it == t.end());
+}
+
+TEST(Table, Insert1) {
+ IntTable t;
+ EXPECT_TRUE(t.find(0) == t.end());
+ auto res = t.emplace(0);
+ EXPECT_TRUE(res.second);
+ EXPECT_THAT(*res.first, 0);
+ EXPECT_EQ(1, t.size());
+ EXPECT_THAT(*t.find(0), 0);
+}
+
+TEST(Table, Insert2) {
+ IntTable t;
+ EXPECT_TRUE(t.find(0) == t.end());
+ auto res = t.emplace(0);
+ EXPECT_TRUE(res.second);
+ EXPECT_THAT(*res.first, 0);
+ EXPECT_EQ(1, t.size());
+ EXPECT_TRUE(t.find(1) == t.end());
+ res = t.emplace(1);
+ EXPECT_TRUE(res.second);
+ EXPECT_THAT(*res.first, 1);
+ EXPECT_EQ(2, t.size());
+ EXPECT_THAT(*t.find(0), 0);
+ EXPECT_THAT(*t.find(1), 1);
+}
+
+TEST(Table, InsertCollision) {
+ BadTable t;
+ EXPECT_TRUE(t.find(1) == t.end());
+ auto res = t.emplace(1);
+ EXPECT_TRUE(res.second);
+ EXPECT_THAT(*res.first, 1);
+ EXPECT_EQ(1, t.size());
+
+ EXPECT_TRUE(t.find(2) == t.end());
+ res = t.emplace(2);
+ EXPECT_THAT(*res.first, 2);
+ EXPECT_TRUE(res.second);
+ EXPECT_EQ(2, t.size());
+
+ EXPECT_THAT(*t.find(1), 1);
+ EXPECT_THAT(*t.find(2), 2);
+}
+
+// Test that we do not add existent element in case we need to search through
+// many groups with deleted elements
+TEST(Table, InsertCollisionAndFindAfterDelete) {
+ BadTable t; // all elements go to the same group.
+ // Have at least 2 groups with Group::kWidth collisions
+ // plus some extra collisions in the last group.
+ constexpr size_t kNumInserts = Group::kWidth * 2 + 5;
+ for (size_t i = 0; i < kNumInserts; ++i) {
+ auto res = t.emplace(i);
+ EXPECT_TRUE(res.second);
+ EXPECT_THAT(*res.first, i);
+ EXPECT_EQ(i + 1, t.size());
+ }
+
+ // Remove elements one by one and check
+ // that we still can find all other elements.
+ for (size_t i = 0; i < kNumInserts; ++i) {
+ EXPECT_EQ(1, t.erase(i)) << i;
+ for (size_t j = i + 1; j < kNumInserts; ++j) {
+ EXPECT_THAT(*t.find(j), j);
+ auto res = t.emplace(j);
+ EXPECT_FALSE(res.second) << i << " " << j;
+ EXPECT_THAT(*res.first, j);
+ EXPECT_EQ(kNumInserts - i - 1, t.size());
+ }
+ }
+ EXPECT_TRUE(t.empty());
+}
+
+TEST(Table, LazyEmplace) {
+ StringTable t;
+ bool called = false;
+ auto it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) {
+ called = true;
+ f("abc", "ABC");
+ });
+ EXPECT_TRUE(called);
+ EXPECT_THAT(*it, Pair("abc", "ABC"));
+ called = false;
+ it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) {
+ called = true;
+ f("abc", "DEF");
+ });
+ EXPECT_FALSE(called);
+ EXPECT_THAT(*it, Pair("abc", "ABC"));
+}
+
+TEST(Table, ContainsEmpty) {
+ IntTable t;
+
+ EXPECT_FALSE(t.contains(0));
+}
+
+TEST(Table, Contains1) {
+ IntTable t;
+
+ EXPECT_TRUE(t.insert(0).second);
+ EXPECT_TRUE(t.contains(0));
+ EXPECT_FALSE(t.contains(1));
+
+ EXPECT_EQ(1, t.erase(0));
+ EXPECT_FALSE(t.contains(0));
+}
+
+TEST(Table, Contains2) {
+ IntTable t;
+
+ EXPECT_TRUE(t.insert(0).second);
+ EXPECT_TRUE(t.contains(0));
+ EXPECT_FALSE(t.contains(1));
+
+ t.clear();
+ EXPECT_FALSE(t.contains(0));
+}
+
+int decompose_constructed;
+struct DecomposeType {
+ DecomposeType(int i) : i(i) { // NOLINT
+ ++decompose_constructed;
+ }
+
+ explicit DecomposeType(const char* d) : DecomposeType(*d) {}
+
+ int i;
+};
+
+struct DecomposeHash {
+ using is_transparent = void;
+ size_t operator()(DecomposeType a) const { return a.i; }
+ size_t operator()(int a) const { return a; }
+ size_t operator()(const char* a) const { return *a; }
+};
+
+struct DecomposeEq {
+ using is_transparent = void;
+ bool operator()(DecomposeType a, DecomposeType b) const { return a.i == b.i; }
+ bool operator()(DecomposeType a, int b) const { return a.i == b; }
+ bool operator()(DecomposeType a, const char* b) const { return a.i == *b; }
+};
+
+struct DecomposePolicy {
+ using slot_type = DecomposeType;
+ using key_type = DecomposeType;
+ using init_type = DecomposeType;
+
+ template <typename T>
+ static void construct(void*, DecomposeType* slot, T&& v) {
+ *slot = DecomposeType(std::forward<T>(v));
+ }
+ static void destroy(void*, DecomposeType*) {}
+ static DecomposeType& element(slot_type* slot) { return *slot; }
+
+ template <class F, class T>
+ static auto apply(F&& f, const T& x) -> decltype(std::forward<F>(f)(x, x)) {
+ return std::forward<F>(f)(x, x);
+ }
+};
+
+template <typename Hash, typename Eq>
+void TestDecompose(bool construct_three) {
+ DecomposeType elem{0};
+ const int one = 1;
+ const char* three_p = "3";
+ const auto& three = three_p;
+
+ raw_hash_set<DecomposePolicy, Hash, Eq, std::allocator<int>> set1;
+
+ decompose_constructed = 0;
+ int expected_constructed = 0;
+ EXPECT_EQ(expected_constructed, decompose_constructed);
+ set1.insert(elem);
+ EXPECT_EQ(expected_constructed, decompose_constructed);
+ set1.insert(1);
+ EXPECT_EQ(++expected_constructed, decompose_constructed);
+ set1.emplace("3");
+ EXPECT_EQ(++expected_constructed, decompose_constructed);
+ EXPECT_EQ(expected_constructed, decompose_constructed);
+
+ { // insert(T&&)
+ set1.insert(1);
+ EXPECT_EQ(expected_constructed, decompose_constructed);
+ }
+
+ { // insert(const T&)
+ set1.insert(one);
+ EXPECT_EQ(expected_constructed, decompose_constructed);
+ }
+
+ { // insert(hint, T&&)
+ set1.insert(set1.begin(), 1);
+ EXPECT_EQ(expected_constructed, decompose_constructed);
+ }
+
+ { // insert(hint, const T&)
+ set1.insert(set1.begin(), one);
+ EXPECT_EQ(expected_constructed, decompose_constructed);
+ }
+
+ { // emplace(...)
+ set1.emplace(1);
+ EXPECT_EQ(expected_constructed, decompose_constructed);
+ set1.emplace("3");
+ expected_constructed += construct_three;
+ EXPECT_EQ(expected_constructed, decompose_constructed);
+ set1.emplace(one);
+ EXPECT_EQ(expected_constructed, decompose_constructed);
+ set1.emplace(three);
+ expected_constructed += construct_three;
+ EXPECT_EQ(expected_constructed, decompose_constructed);
+ }
+
+ { // emplace_hint(...)
+ set1.emplace_hint(set1.begin(), 1);
+ EXPECT_EQ(expected_constructed, decompose_constructed);
+ set1.emplace_hint(set1.begin(), "3");
+ expected_constructed += construct_three;
+ EXPECT_EQ(expected_constructed, decompose_constructed);
+ set1.emplace_hint(set1.begin(), one);
+ EXPECT_EQ(expected_constructed, decompose_constructed);
+ set1.emplace_hint(set1.begin(), three);
+ expected_constructed += construct_three;
+ EXPECT_EQ(expected_constructed, decompose_constructed);
+ }
+}
+
+TEST(Table, Decompose) {
+ TestDecompose<DecomposeHash, DecomposeEq>(false);
+
+ struct TransparentHashIntOverload {
+ size_t operator()(DecomposeType a) const { return a.i; }
+ size_t operator()(int a) const { return a; }
+ };
+ struct TransparentEqIntOverload {
+ bool operator()(DecomposeType a, DecomposeType b) const {
+ return a.i == b.i;
+ }
+ bool operator()(DecomposeType a, int b) const { return a.i == b; }
+ };
+ TestDecompose<TransparentHashIntOverload, DecomposeEq>(true);
+ TestDecompose<TransparentHashIntOverload, TransparentEqIntOverload>(true);
+ TestDecompose<DecomposeHash, TransparentEqIntOverload>(true);
+}
+
+// Returns the largest m such that a table with m elements has the same number
+// of buckets as a table with n elements.
+size_t MaxDensitySize(size_t n) {
+ IntTable t;
+ t.reserve(n);
+ for (size_t i = 0; i != n; ++i) t.emplace(i);
+ const size_t c = t.bucket_count();
+ while (c == t.bucket_count()) t.emplace(n++);
+ return t.size() - 1;
+}
+
+struct Modulo1000Hash {
+ size_t operator()(int x) const { return x % 1000; }
+};
+
+struct Modulo1000HashTable
+ : public raw_hash_set<IntPolicy, Modulo1000Hash, std::equal_to<int>,
+ std::allocator<int>> {};
+
+// Test that rehash with no resize happen in case of many deleted slots.
+TEST(Table, RehashWithNoResize) {
+ Modulo1000HashTable t;
+ // Adding the same length (and the same hash) strings
+ // to have at least kMinFullGroups groups
+ // with Group::kWidth collisions. Then fill up to MaxDensitySize;
+ const size_t kMinFullGroups = 7;
+ std::vector<int> keys;
+ for (size_t i = 0; i < MaxDensitySize(Group::kWidth * kMinFullGroups); ++i) {
+ int k = i * 1000;
+ t.emplace(k);
+ keys.push_back(k);
+ }
+ const size_t capacity = t.capacity();
+
+ // Remove elements from all groups except the first and the last one.
+ // All elements removed from full groups will be marked as kDeleted.
+ const size_t erase_begin = Group::kWidth / 2;
+ const size_t erase_end = (t.size() / Group::kWidth - 1) * Group::kWidth;
+ for (size_t i = erase_begin; i < erase_end; ++i) {
+ EXPECT_EQ(1, t.erase(keys[i])) << i;
+ }
+ keys.erase(keys.begin() + erase_begin, keys.begin() + erase_end);
+
+ auto last_key = keys.back();
+ size_t last_key_num_probes = GetHashtableDebugNumProbes(t, last_key);
+
+ // Make sure that we have to make a lot of probes for last key.
+ ASSERT_GT(last_key_num_probes, kMinFullGroups);
+
+ int x = 1;
+ // Insert and erase one element, before inplace rehash happen.
+ while (last_key_num_probes == GetHashtableDebugNumProbes(t, last_key)) {
+ t.emplace(x);
+ ASSERT_EQ(capacity, t.capacity());
+ // All elements should be there.
+ ASSERT_TRUE(t.find(x) != t.end()) << x;
+ for (const auto& k : keys) {
+ ASSERT_TRUE(t.find(k) != t.end()) << k;
+ }
+ t.erase(x);
+ ++x;
+ }
+}
+
+TEST(Table, InsertEraseStressTest) {
+ IntTable t;
+ const size_t kMinElementCount = 250;
+ std::deque<int> keys;
+ size_t i = 0;
+ for (; i < MaxDensitySize(kMinElementCount); ++i) {
+ t.emplace(i);
+ keys.push_back(i);
+ }
+ const size_t kNumIterations = 1000000;
+ for (; i < kNumIterations; ++i) {
+ ASSERT_EQ(1, t.erase(keys.front()));
+ keys.pop_front();
+ t.emplace(i);
+ keys.push_back(i);
+ }
+}
+
+TEST(Table, InsertOverloads) {
+ StringTable t;
+ // These should all trigger the insert(init_type) overload.
+ t.insert({{}, {}});
+ t.insert({"ABC", {}});
+ t.insert({"DEF", "!!!"});
+
+ EXPECT_THAT(t, UnorderedElementsAre(Pair("", ""), Pair("ABC", ""),
+ Pair("DEF", "!!!")));
+}
+
+TEST(Table, LargeTable) {
+ IntTable t;
+ for (int64_t i = 0; i != 100000; ++i) t.emplace(i << 40);
+ for (int64_t i = 0; i != 100000; ++i) ASSERT_EQ(i << 40, *t.find(i << 40));
+}
+
+// Timeout if copy is quadratic as it was in Rust.
+TEST(Table, EnsureNonQuadraticAsInRust) {
+ static const size_t kLargeSize = 1 << 15;
+
+ IntTable t;
+ for (size_t i = 0; i != kLargeSize; ++i) {
+ t.insert(i);
+ }
+
+ // If this is quadratic, the test will timeout.
+ IntTable t2;
+ for (const auto& entry : t) t2.insert(entry);
+}
+
+TEST(Table, ClearBug) {
+ IntTable t;
+ constexpr size_t capacity = container_internal::Group::kWidth - 1;
+ constexpr size_t max_size = capacity / 2;
+ for (size_t i = 0; i < max_size; ++i) {
+ t.insert(i);
+ }
+ ASSERT_EQ(capacity, t.capacity());
+ intptr_t original = reinterpret_cast<intptr_t>(&*t.find(2));
+ t.clear();
+ ASSERT_EQ(capacity, t.capacity());
+ for (size_t i = 0; i < max_size; ++i) {
+ t.insert(i);
+ }
+ ASSERT_EQ(capacity, t.capacity());
+ intptr_t second = reinterpret_cast<intptr_t>(&*t.find(2));
+ // We are checking that original and second are close enough to each other
+ // that they are probably still in the same group. This is not strictly
+ // guaranteed.
+ EXPECT_LT(std::abs(original - second),
+ capacity * sizeof(IntTable::value_type));
+}
+
+TEST(Table, Erase) {
+ IntTable t;
+ EXPECT_TRUE(t.find(0) == t.end());
+ auto res = t.emplace(0);
+ EXPECT_TRUE(res.second);
+ EXPECT_EQ(1, t.size());
+ t.erase(res.first);
+ EXPECT_EQ(0, t.size());
+ EXPECT_TRUE(t.find(0) == t.end());
+}
+
+// Collect N bad keys by following algorithm:
+// 1. Create an empty table and reserve it to 2 * N.
+// 2. Insert N random elements.
+// 3. Take first Group::kWidth - 1 to bad_keys array.
+// 4. Clear the table without resize.
+// 5. Go to point 2 while N keys not collected
+std::vector<int64_t> CollectBadMergeKeys(size_t N) {
+ static constexpr int kGroupSize = Group::kWidth - 1;
+
+ auto topk_range = [](size_t b, size_t e, IntTable* t) -> std::vector<int64_t> {
+ for (size_t i = b; i != e; ++i) {
+ t->emplace(i);
+ }
+ std::vector<int64_t> res;
+ res.reserve(kGroupSize);
+ auto it = t->begin();
+ for (size_t i = b; i != e && i != b + kGroupSize; ++i, ++it) {
+ res.push_back(*it);
+ }
+ return res;
+ };
+
+ std::vector<int64_t> bad_keys;
+ bad_keys.reserve(N);
+ IntTable t;
+ t.reserve(N * 2);
+
+ for (size_t b = 0; bad_keys.size() < N; b += N) {
+ auto keys = topk_range(b, b + N, &t);
+ bad_keys.insert(bad_keys.end(), keys.begin(), keys.end());
+ t.erase(t.begin(), t.end());
+ EXPECT_TRUE(t.empty());
+ }
+ return bad_keys;
+}
+
+struct ProbeStats {
+ // Number of elements with specific probe length over all tested tables.
+ std::vector<size_t> all_probes_histogram;
+ // Ratios total_probe_length/size for every tested table.
+ std::vector<double> single_table_ratios;
+
+ friend ProbeStats operator+(const ProbeStats& a, const ProbeStats& b) {
+ ProbeStats res = a;
+ res.all_probes_histogram.resize(std::max(res.all_probes_histogram.size(),
+ b.all_probes_histogram.size()));
+ std::transform(b.all_probes_histogram.begin(), b.all_probes_histogram.end(),
+ res.all_probes_histogram.begin(),
+ res.all_probes_histogram.begin(), std::plus<size_t>());
+ res.single_table_ratios.insert(res.single_table_ratios.end(),
+ b.single_table_ratios.begin(),
+ b.single_table_ratios.end());
+ return res;
+ }
+
+ // Average ratio total_probe_length/size over tables.
+ double AvgRatio() const {
+ return std::accumulate(single_table_ratios.begin(),
+ single_table_ratios.end(), 0.0) /
+ single_table_ratios.size();
+ }
+
+ // Maximum ratio total_probe_length/size over tables.
+ double MaxRatio() const {
+ return *std::max_element(single_table_ratios.begin(),
+ single_table_ratios.end());
+ }
+
+ // Percentile ratio total_probe_length/size over tables.
+ double PercentileRatio(double Percentile = 0.95) const {
+ auto r = single_table_ratios;
+ auto mid = r.begin() + static_cast<size_t>(r.size() * Percentile);
+ if (mid != r.end()) {
+ std::nth_element(r.begin(), mid, r.end());
+ return *mid;
+ } else {
+ return MaxRatio();
+ }
+ }
+
+ // Maximum probe length over all elements and all tables.
+ size_t MaxProbe() const { return all_probes_histogram.size(); }
+
+ // Fraction of elements with specified probe length.
+ std::vector<double> ProbeNormalizedHistogram() const {
+ double total_elements = std::accumulate(all_probes_histogram.begin(),
+ all_probes_histogram.end(), 0ull);
+ std::vector<double> res;
+ for (size_t p : all_probes_histogram) {
+ res.push_back(p / total_elements);
+ }
+ return res;
+ }
+
+ size_t PercentileProbe(double Percentile = 0.99) const {
+ size_t idx = 0;
+ for (double p : ProbeNormalizedHistogram()) {
+ if (Percentile > p) {
+ Percentile -= p;
+ ++idx;
+ } else {
+ return idx;
+ }
+ }
+ return idx;
+ }
+
+ friend std::ostream& operator<<(std::ostream& out, const ProbeStats& s) {
+ out << "{AvgRatio:" << s.AvgRatio() << ", MaxRatio:" << s.MaxRatio()
+ << ", PercentileRatio:" << s.PercentileRatio()
+ << ", MaxProbe:" << s.MaxProbe() << ", Probes=[";
+ for (double p : s.ProbeNormalizedHistogram()) {
+ out << p << ",";
+ }
+ out << "]}";
+
+ return out;
+ }
+};
+
+struct ExpectedStats {
+ double avg_ratio;
+ double max_ratio;
+ std::vector<std::pair<double, double>> pecentile_ratios;
+ std::vector<std::pair<double, double>> pecentile_probes;
+
+ friend std::ostream& operator<<(std::ostream& out, const ExpectedStats& s) {
+ out << "{AvgRatio:" << s.avg_ratio << ", MaxRatio:" << s.max_ratio
+ << ", PercentileRatios: [";
+ for (auto el : s.pecentile_ratios) {
+ out << el.first << ":" << el.second << ", ";
+ }
+ out << "], PercentileProbes: [";
+ for (auto el : s.pecentile_probes) {
+ out << el.first << ":" << el.second << ", ";
+ }
+ out << "]}";
+
+ return out;
+ }
+};
+
+void VerifyStats(size_t size, const ExpectedStats& exp,
+ const ProbeStats& stats) {
+ EXPECT_LT(stats.AvgRatio(), exp.avg_ratio) << size << " " << stats;
+ EXPECT_LT(stats.MaxRatio(), exp.max_ratio) << size << " " << stats;
+ for (auto pr : exp.pecentile_ratios) {
+ EXPECT_LE(stats.PercentileRatio(pr.first), pr.second)
+ << size << " " << pr.first << " " << stats;
+ }
+
+ for (auto pr : exp.pecentile_probes) {
+ EXPECT_LE(stats.PercentileProbe(pr.first), pr.second)
+ << size << " " << pr.first << " " << stats;
+ }
+}
+
+using ProbeStatsPerSize = std::map<size_t, ProbeStats>;
+
+// Collect total ProbeStats on num_iters iterations of the following algorithm:
+// 1. Create new table and reserve it to keys.size() * 2
+// 2. Insert all keys xored with seed
+// 3. Collect ProbeStats from final table.
+ProbeStats CollectProbeStatsOnKeysXoredWithSeed(const std::vector<int64_t>& keys,
+ size_t num_iters) {
+ const size_t reserve_size = keys.size() * 2;
+
+ ProbeStats stats;
+
+ int64_t seed = 0x71b1a19b907d6e33;
+ while (num_iters--) {
+ seed = static_cast<int64_t>(static_cast<uint64_t>(seed) * 17 + 13);
+ IntTable t1;
+ t1.reserve(reserve_size);
+ for (const auto& key : keys) {
+ t1.emplace(key ^ seed);
+ }
+
+ auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1);
+ stats.all_probes_histogram.resize(
+ std::max(stats.all_probes_histogram.size(), probe_histogram.size()));
+ std::transform(probe_histogram.begin(), probe_histogram.end(),
+ stats.all_probes_histogram.begin(),
+ stats.all_probes_histogram.begin(), std::plus<size_t>());
+
+ size_t total_probe_seq_length = 0;
+ for (size_t i = 0; i < probe_histogram.size(); ++i) {
+ total_probe_seq_length += i * probe_histogram[i];
+ }
+ stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 /
+ keys.size());
+ t1.erase(t1.begin(), t1.end());
+ }
+ return stats;
+}
+
+ExpectedStats XorSeedExpectedStats() {
+ constexpr bool kRandomizesInserts =
+#if NDEBUG
+ false;
+#else // NDEBUG
+ true;
+#endif // NDEBUG
+
+ // The effective load factor is larger in non-opt mode because we insert
+ // elements out of order.
+ switch (container_internal::Group::kWidth) {
+ case 8:
+ if (kRandomizesInserts) {
+ return {0.05,
+ 1.0,
+ {{0.95, 0.5}},
+ {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
+ } else {
+ return {0.05,
+ 2.0,
+ {{0.95, 0.1}},
+ {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
+ }
+ case 16:
+ if (kRandomizesInserts) {
+ return {0.1,
+ 1.0,
+ {{0.95, 0.1}},
+ {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
+ } else {
+ return {0.05,
+ 1.0,
+ {{0.95, 0.05}},
+ {{0.95, 0}, {0.99, 1}, {0.999, 4}, {0.9999, 10}}};
+ }
+ }
+ ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width");
+ return {};
+}
+TEST(Table, DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength) {
+ ProbeStatsPerSize stats;
+ std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
+ for (size_t size : sizes) {
+ stats[size] =
+ CollectProbeStatsOnKeysXoredWithSeed(CollectBadMergeKeys(size), 200);
+ }
+ auto expected = XorSeedExpectedStats();
+ for (size_t size : sizes) {
+ auto& stat = stats[size];
+ VerifyStats(size, expected, stat);
+ }
+}
+
+// Collect total ProbeStats on num_iters iterations of the following algorithm:
+// 1. Create new table
+// 2. Select 10% of keys and insert 10 elements key * 17 + j * 13
+// 3. Collect ProbeStats from final table
+ProbeStats CollectProbeStatsOnLinearlyTransformedKeys(
+ const std::vector<int64_t>& keys, size_t num_iters) {
+ ProbeStats stats;
+
+ std::random_device rd;
+ std::mt19937 rng(rd());
+ auto linear_transform = [](size_t x, size_t y) { return x * 17 + y * 13; };
+ std::uniform_int_distribution<size_t> dist(0, keys.size()-1);
+ while (num_iters--) {
+ IntTable t1;
+ size_t num_keys = keys.size() / 10;
+ size_t start = dist(rng);
+ for (size_t i = 0; i != num_keys; ++i) {
+ for (size_t j = 0; j != 10; ++j) {
+ t1.emplace(linear_transform(keys[(i + start) % keys.size()], j));
+ }
+ }
+
+ auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1);
+ stats.all_probes_histogram.resize(
+ std::max(stats.all_probes_histogram.size(), probe_histogram.size()));
+ std::transform(probe_histogram.begin(), probe_histogram.end(),
+ stats.all_probes_histogram.begin(),
+ stats.all_probes_histogram.begin(), std::plus<size_t>());
+
+ size_t total_probe_seq_length = 0;
+ for (size_t i = 0; i < probe_histogram.size(); ++i) {
+ total_probe_seq_length += i * probe_histogram[i];
+ }
+ stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 /
+ t1.size());
+ t1.erase(t1.begin(), t1.end());
+ }
+ return stats;
+}
+
+ExpectedStats LinearTransformExpectedStats() {
+ constexpr bool kRandomizesInserts =
+#if NDEBUG
+ false;
+#else // NDEBUG
+ true;
+#endif // NDEBUG
+
+ // The effective load factor is larger in non-opt mode because we insert
+ // elements out of order.
+ switch (container_internal::Group::kWidth) {
+ case 8:
+ if (kRandomizesInserts) {
+ return {0.1,
+ 0.5,
+ {{0.95, 0.3}},
+ {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
+ } else {
+ return {0.15,
+ 0.5,
+ {{0.95, 0.3}},
+ {{0.95, 0}, {0.99, 3}, {0.999, 15}, {0.9999, 25}}};
+ }
+ case 16:
+ if (kRandomizesInserts) {
+ return {0.1,
+ 0.4,
+ {{0.95, 0.3}},
+ {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
+ } else {
+ return {0.05,
+ 0.2,
+ {{0.95, 0.1}},
+ {{0.95, 0}, {0.99, 1}, {0.999, 6}, {0.9999, 10}}};
+ }
+ }
+ ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width");
+ return {};
+}
+TEST(Table, DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength) {
+ ProbeStatsPerSize stats;
+ std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
+ for (size_t size : sizes) {
+ stats[size] = CollectProbeStatsOnLinearlyTransformedKeys(
+ CollectBadMergeKeys(size), 300);
+ }
+ auto expected = LinearTransformExpectedStats();
+ for (size_t size : sizes) {
+ auto& stat = stats[size];
+ VerifyStats(size, expected, stat);
+ }
+}
+
+TEST(Table, EraseCollision) {
+ BadTable t;
+
+ // 1 2 3
+ t.emplace(1);
+ t.emplace(2);
+ t.emplace(3);
+ EXPECT_THAT(*t.find(1), 1);
+ EXPECT_THAT(*t.find(2), 2);
+ EXPECT_THAT(*t.find(3), 3);
+ EXPECT_EQ(3, t.size());
+
+ // 1 DELETED 3
+ t.erase(t.find(2));
+ EXPECT_THAT(*t.find(1), 1);
+ EXPECT_TRUE(t.find(2) == t.end());
+ EXPECT_THAT(*t.find(3), 3);
+ EXPECT_EQ(2, t.size());
+
+ // DELETED DELETED 3
+ t.erase(t.find(1));
+ EXPECT_TRUE(t.find(1) == t.end());
+ EXPECT_TRUE(t.find(2) == t.end());
+ EXPECT_THAT(*t.find(3), 3);
+ EXPECT_EQ(1, t.size());
+
+ // DELETED DELETED DELETED
+ t.erase(t.find(3));
+ EXPECT_TRUE(t.find(1) == t.end());
+ EXPECT_TRUE(t.find(2) == t.end());
+ EXPECT_TRUE(t.find(3) == t.end());
+ EXPECT_EQ(0, t.size());
+}
+
+TEST(Table, EraseInsertProbing) {
+ BadTable t(100);
+
+ // 1 2 3 4
+ t.emplace(1);
+ t.emplace(2);
+ t.emplace(3);
+ t.emplace(4);
+
+ // 1 DELETED 3 DELETED
+ t.erase(t.find(2));
+ t.erase(t.find(4));
+
+ // 1 10 3 11 12
+ t.emplace(10);
+ t.emplace(11);
+ t.emplace(12);
+
+ EXPECT_EQ(5, t.size());
+ EXPECT_THAT(t, UnorderedElementsAre(1, 10, 3, 11, 12));
+}
+
+TEST(Table, Clear) {
+ IntTable t;
+ EXPECT_TRUE(t.find(0) == t.end());
+ t.clear();
+ EXPECT_TRUE(t.find(0) == t.end());
+ auto res = t.emplace(0);
+ EXPECT_TRUE(res.second);
+ EXPECT_EQ(1, t.size());
+ t.clear();
+ EXPECT_EQ(0, t.size());
+ EXPECT_TRUE(t.find(0) == t.end());
+}
+
+TEST(Table, Swap) {
+ IntTable t;
+ EXPECT_TRUE(t.find(0) == t.end());
+ auto res = t.emplace(0);
+ EXPECT_TRUE(res.second);
+ EXPECT_EQ(1, t.size());
+ IntTable u;
+ t.swap(u);
+ EXPECT_EQ(0, t.size());
+ EXPECT_EQ(1, u.size());
+ EXPECT_TRUE(t.find(0) == t.end());
+ EXPECT_THAT(*u.find(0), 0);
+}
+
+TEST(Table, Rehash) {
+ IntTable t;
+ EXPECT_TRUE(t.find(0) == t.end());
+ t.emplace(0);
+ t.emplace(1);
+ EXPECT_EQ(2, t.size());
+ t.rehash(128);
+ EXPECT_EQ(2, t.size());
+ EXPECT_THAT(*t.find(0), 0);
+ EXPECT_THAT(*t.find(1), 1);
+}
+
+TEST(Table, RehashDoesNotRehashWhenNotNecessary) {
+ IntTable t;
+ t.emplace(0);
+ t.emplace(1);
+ auto* p = &*t.find(0);
+ t.rehash(1);
+ EXPECT_EQ(p, &*t.find(0));
+}
+
+TEST(Table, RehashZeroDoesNotAllocateOnEmptyTable) {
+ IntTable t;
+ t.rehash(0);
+ EXPECT_EQ(0, t.bucket_count());
+}
+
+TEST(Table, RehashZeroDeallocatesEmptyTable) {
+ IntTable t;
+ t.emplace(0);
+ t.clear();
+ EXPECT_NE(0, t.bucket_count());
+ t.rehash(0);
+ EXPECT_EQ(0, t.bucket_count());
+}
+
+TEST(Table, RehashZeroForcesRehash) {
+ IntTable t;
+ t.emplace(0);
+ t.emplace(1);
+ auto* p = &*t.find(0);
+ t.rehash(0);
+ EXPECT_NE(p, &*t.find(0));
+}
+
+TEST(Table, ConstructFromInitList) {
+ using P = std::pair<std::string, std::string>;
+ struct Q {
+ operator P() const { return {}; }
+ };
+ StringTable t = {P(), Q(), {}, {{}, {}}};
+}
+
+TEST(Table, CopyConstruct) {
+ IntTable t;
+ t.max_load_factor(.321f);
+ t.emplace(0);
+ EXPECT_EQ(1, t.size());
+ {
+ IntTable u(t);
+ EXPECT_EQ(1, u.size());
+ EXPECT_EQ(t.max_load_factor(), u.max_load_factor());
+ EXPECT_THAT(*u.find(0), 0);
+ }
+ {
+ IntTable u{t};
+ EXPECT_EQ(1, u.size());
+ EXPECT_EQ(t.max_load_factor(), u.max_load_factor());
+ EXPECT_THAT(*u.find(0), 0);
+ }
+ {
+ IntTable u = t;
+ EXPECT_EQ(1, u.size());
+ EXPECT_EQ(t.max_load_factor(), u.max_load_factor());
+ EXPECT_THAT(*u.find(0), 0);
+ }
+}
+
+TEST(Table, CopyConstructWithAlloc) {
+ StringTable t;
+ t.max_load_factor(.321f);
+ t.emplace("a", "b");
+ EXPECT_EQ(1, t.size());
+ StringTable u(t, Alloc<std::pair<std::string, std::string>>());
+ EXPECT_EQ(1, u.size());
+ EXPECT_EQ(t.max_load_factor(), u.max_load_factor());
+ EXPECT_THAT(*u.find("a"), Pair("a", "b"));
+}
+
+struct ExplicitAllocIntTable
+ : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
+ std::equal_to<int64_t>, Alloc<int64_t>> {
+ ExplicitAllocIntTable() {}
+};
+
+TEST(Table, AllocWithExplicitCtor) {
+ ExplicitAllocIntTable t;
+ EXPECT_EQ(0, t.size());
+}
+
+TEST(Table, MoveConstruct) {
+ {
+ StringTable t;
+ t.max_load_factor(.321f);
+ const float lf = t.max_load_factor();
+ t.emplace("a", "b");
+ EXPECT_EQ(1, t.size());
+
+ StringTable u(std::move(t));
+ EXPECT_EQ(1, u.size());
+ EXPECT_EQ(lf, u.max_load_factor());
+ EXPECT_THAT(*u.find("a"), Pair("a", "b"));
+ }
+ {
+ StringTable t;
+ t.max_load_factor(.321f);
+ const float lf = t.max_load_factor();
+ t.emplace("a", "b");
+ EXPECT_EQ(1, t.size());
+
+ StringTable u{std::move(t)};
+ EXPECT_EQ(1, u.size());
+ EXPECT_EQ(lf, u.max_load_factor());
+ EXPECT_THAT(*u.find("a"), Pair("a", "b"));
+ }
+ {
+ StringTable t;
+ t.max_load_factor(.321f);
+ const float lf = t.max_load_factor();
+ t.emplace("a", "b");
+ EXPECT_EQ(1, t.size());
+
+ StringTable u = std::move(t);
+ EXPECT_EQ(1, u.size());
+ EXPECT_EQ(lf, u.max_load_factor());
+ EXPECT_THAT(*u.find("a"), Pair("a", "b"));
+ }
+}
+
+TEST(Table, MoveConstructWithAlloc) {
+ StringTable t;
+ t.max_load_factor(.321f);
+ const float lf = t.max_load_factor();
+ t.emplace("a", "b");
+ EXPECT_EQ(1, t.size());
+ StringTable u(std::move(t), Alloc<std::pair<std::string, std::string>>());
+ EXPECT_EQ(1, u.size());
+ EXPECT_EQ(lf, u.max_load_factor());
+ EXPECT_THAT(*u.find("a"), Pair("a", "b"));
+}
+
+TEST(Table, CopyAssign) {
+ StringTable t;
+ t.max_load_factor(.321f);
+ t.emplace("a", "b");
+ EXPECT_EQ(1, t.size());
+ StringTable u;
+ u = t;
+ EXPECT_EQ(1, u.size());
+ EXPECT_EQ(t.max_load_factor(), u.max_load_factor());
+ EXPECT_THAT(*u.find("a"), Pair("a", "b"));
+}
+
+TEST(Table, CopySelfAssign) {
+ StringTable t;
+ t.max_load_factor(.321f);
+ const float lf = t.max_load_factor();
+ t.emplace("a", "b");
+ EXPECT_EQ(1, t.size());
+ t = *&t;
+ EXPECT_EQ(1, t.size());
+ EXPECT_EQ(lf, t.max_load_factor());
+ EXPECT_THAT(*t.find("a"), Pair("a", "b"));
+}
+
+TEST(Table, MoveAssign) {
+ StringTable t;
+ t.max_load_factor(.321f);
+ const float lf = t.max_load_factor();
+ t.emplace("a", "b");
+ EXPECT_EQ(1, t.size());
+ StringTable u;
+ u = std::move(t);
+ EXPECT_EQ(1, u.size());
+ EXPECT_EQ(lf, u.max_load_factor());
+ EXPECT_THAT(*u.find("a"), Pair("a", "b"));
+}
+
+TEST(Table, Equality) {
+ StringTable t;
+ std::vector<std::pair<std::string, std::string>> v = {{"a", "b"}, {"aa", "bb"}};
+ t.insert(std::begin(v), std::end(v));
+ StringTable u = t;
+ EXPECT_EQ(u, t);
+}
+
+TEST(Table, Equality2) {
+ StringTable t;
+ std::vector<std::pair<std::string, std::string>> v1 = {{"a", "b"}, {"aa", "bb"}};
+ t.insert(std::begin(v1), std::end(v1));
+ StringTable u;
+ std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"}, {"aa", "aa"}};
+ u.insert(std::begin(v2), std::end(v2));
+ EXPECT_NE(u, t);
+}
+
+TEST(Table, Equality3) {
+ StringTable t;
+ std::vector<std::pair<std::string, std::string>> v1 = {{"b", "b"}, {"bb", "bb"}};
+ t.insert(std::begin(v1), std::end(v1));
+ StringTable u;
+ std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"}, {"aa", "aa"}};
+ u.insert(std::begin(v2), std::end(v2));
+ EXPECT_NE(u, t);
+}
+
+TEST(Table, NumDeletedRegression) {
+ IntTable t;
+ t.emplace(0);
+ t.erase(t.find(0));
+ // construct over a deleted slot.
+ t.emplace(0);
+ t.clear();
+}
+
+TEST(Table, FindFullDeletedRegression) {
+ IntTable t;
+ for (int i = 0; i < 1000; ++i) {
+ t.emplace(i);
+ t.erase(t.find(i));
+ }
+ EXPECT_EQ(0, t.size());
+}
+
+TEST(Table, ReplacingDeletedSlotDoesNotRehash) {
+ size_t n;
+ {
+ // Compute n such that n is the maximum number of elements before rehash.
+ IntTable t;
+ t.emplace(0);
+ size_t c = t.bucket_count();
+ for (n = 1; c == t.bucket_count(); ++n) t.emplace(n);
+ --n;
+ }
+ IntTable t;
+ t.rehash(n);
+ const size_t c = t.bucket_count();
+ for (size_t i = 0; i != n; ++i) t.emplace(i);
+ EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n;
+ t.erase(0);
+ t.emplace(0);
+ EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n;
+}
+
+TEST(Table, NoThrowMoveConstruct) {
+ ASSERT_TRUE(
+ std::is_nothrow_copy_constructible<absl::Hash<absl::string_view>>::value);
+ ASSERT_TRUE(std::is_nothrow_copy_constructible<
+ std::equal_to<absl::string_view>>::value);
+ ASSERT_TRUE(std::is_nothrow_copy_constructible<std::allocator<int>>::value);
+ EXPECT_TRUE(std::is_nothrow_move_constructible<StringTable>::value);
+}
+
+TEST(Table, NoThrowMoveAssign) {
+ ASSERT_TRUE(
+ std::is_nothrow_move_assignable<absl::Hash<absl::string_view>>::value);
+ ASSERT_TRUE(
+ std::is_nothrow_move_assignable<std::equal_to<absl::string_view>>::value);
+ ASSERT_TRUE(std::is_nothrow_move_assignable<std::allocator<int>>::value);
+ ASSERT_TRUE(
+ absl::allocator_traits<std::allocator<int>>::is_always_equal::value);
+ EXPECT_TRUE(std::is_nothrow_move_assignable<StringTable>::value);
+}
+
+TEST(Table, NoThrowSwappable) {
+ ASSERT_TRUE(
+ container_internal::IsNoThrowSwappable<absl::Hash<absl::string_view>>());
+ ASSERT_TRUE(container_internal::IsNoThrowSwappable<
+ std::equal_to<absl::string_view>>());
+ ASSERT_TRUE(container_internal::IsNoThrowSwappable<std::allocator<int>>());
+ EXPECT_TRUE(container_internal::IsNoThrowSwappable<StringTable>());
+}
+
+TEST(Table, HeterogeneousLookup) {
+ struct Hash {
+ size_t operator()(int64_t i) const { return i; }
+ size_t operator()(double i) const {
+ ADD_FAILURE();
+ return i;
+ }
+ };
+ struct Eq {
+ bool operator()(int64_t a, int64_t b) const { return a == b; }
+ bool operator()(double a, int64_t b) const {
+ ADD_FAILURE();
+ return a == b;
+ }
+ bool operator()(int64_t a, double b) const {
+ ADD_FAILURE();
+ return a == b;
+ }
+ bool operator()(double a, double b) const {
+ ADD_FAILURE();
+ return a == b;
+ }
+ };
+
+ struct THash {
+ using is_transparent = void;
+ size_t operator()(int64_t i) const { return i; }
+ size_t operator()(double i) const { return i; }
+ };
+ struct TEq {
+ using is_transparent = void;
+ bool operator()(int64_t a, int64_t b) const { return a == b; }
+ bool operator()(double a, int64_t b) const { return a == b; }
+ bool operator()(int64_t a, double b) const { return a == b; }
+ bool operator()(double a, double b) const { return a == b; }
+ };
+
+ raw_hash_set<IntPolicy, Hash, Eq, Alloc<int64_t>> s{0, 1, 2};
+ // It will convert to int64_t before the query.
+ EXPECT_EQ(1, *s.find(double{1.1}));
+
+ raw_hash_set<IntPolicy, THash, TEq, Alloc<int64_t>> ts{0, 1, 2};
+ // It will try to use the double, and fail to find the object.
+ EXPECT_TRUE(ts.find(1.1) == ts.end());
+}
+
+template <class Table>
+using CallFind = decltype(std::declval<Table&>().find(17));
+
+template <class Table>
+using CallErase = decltype(std::declval<Table&>().erase(17));
+
+template <class Table>
+using CallExtract = decltype(std::declval<Table&>().extract(17));
+
+template <class Table>
+using CallPrefetch = decltype(std::declval<Table&>().prefetch(17));
+
+template <class Table>
+using CallCount = decltype(std::declval<Table&>().count(17));
+
+template <template <typename> class C, class Table, class = void>
+struct VerifyResultOf : std::false_type {};
+
+template <template <typename> class C, class Table>
+struct VerifyResultOf<C, Table, absl::void_t<C<Table>>> : std::true_type {};
+
+TEST(Table, HeterogeneousLookupOverloads) {
+ using NonTransparentTable =
+ raw_hash_set<StringPolicy, absl::Hash<absl::string_view>,
+ std::equal_to<absl::string_view>, std::allocator<int>>;
+
+ EXPECT_FALSE((VerifyResultOf<CallFind, NonTransparentTable>()));
+ EXPECT_FALSE((VerifyResultOf<CallErase, NonTransparentTable>()));
+ EXPECT_FALSE((VerifyResultOf<CallExtract, NonTransparentTable>()));
+ EXPECT_FALSE((VerifyResultOf<CallPrefetch, NonTransparentTable>()));
+ EXPECT_FALSE((VerifyResultOf<CallCount, NonTransparentTable>()));
+
+ using TransparentTable = raw_hash_set<
+ StringPolicy,
+ absl::container_internal::hash_default_hash<absl::string_view>,
+ absl::container_internal::hash_default_eq<absl::string_view>,
+ std::allocator<int>>;
+
+ EXPECT_TRUE((VerifyResultOf<CallFind, TransparentTable>()));
+ EXPECT_TRUE((VerifyResultOf<CallErase, TransparentTable>()));
+ EXPECT_TRUE((VerifyResultOf<CallExtract, TransparentTable>()));
+ EXPECT_TRUE((VerifyResultOf<CallPrefetch, TransparentTable>()));
+ EXPECT_TRUE((VerifyResultOf<CallCount, TransparentTable>()));
+}
+
+// TODO(alkis): Expand iterator tests.
+TEST(Iterator, IsDefaultConstructible) {
+ StringTable::iterator i;
+ EXPECT_TRUE(i == StringTable::iterator());
+}
+
+TEST(ConstIterator, IsDefaultConstructible) {
+ StringTable::const_iterator i;
+ EXPECT_TRUE(i == StringTable::const_iterator());
+}
+
+TEST(Iterator, ConvertsToConstIterator) {
+ StringTable::iterator i;
+ EXPECT_TRUE(i == StringTable::const_iterator());
+}
+
+TEST(Iterator, Iterates) {
+ IntTable t;
+ for (size_t i = 3; i != 6; ++i) EXPECT_TRUE(t.emplace(i).second);
+ EXPECT_THAT(t, UnorderedElementsAre(3, 4, 5));
+}
+
+TEST(Table, Merge) {
+ StringTable t1, t2;
+ t1.emplace("0", "-0");
+ t1.emplace("1", "-1");
+ t2.emplace("0", "~0");
+ t2.emplace("2", "~2");
+
+ EXPECT_THAT(t1, UnorderedElementsAre(Pair("0", "-0"), Pair("1", "-1")));
+ EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0"), Pair("2", "~2")));
+
+ t1.merge(t2);
+ EXPECT_THAT(t1, UnorderedElementsAre(Pair("0", "-0"), Pair("1", "-1"),
+ Pair("2", "~2")));
+ EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0")));
+}
+
+TEST(Nodes, EmptyNodeType) {
+ using node_type = StringTable::node_type;
+ node_type n;
+ EXPECT_FALSE(n);
+ EXPECT_TRUE(n.empty());
+
+ EXPECT_TRUE((std::is_same<node_type::allocator_type,
+ StringTable::allocator_type>::value));
+}
+
+TEST(Nodes, ExtractInsert) {
+ constexpr char k0[] = "Very long std::string zero.";
+ constexpr char k1[] = "Very long std::string one.";
+ constexpr char k2[] = "Very long std::string two.";
+ StringTable t = {{k0, ""}, {k1, ""}, {k2, ""}};
+ EXPECT_THAT(t,
+ UnorderedElementsAre(Pair(k0, ""), Pair(k1, ""), Pair(k2, "")));
+
+ auto node = t.extract(k0);
+ EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
+ EXPECT_TRUE(node);
+ EXPECT_FALSE(node.empty());
+
+ StringTable t2;
+ auto res = t2.insert(std::move(node));
+ EXPECT_TRUE(res.inserted);
+ EXPECT_THAT(*res.position, Pair(k0, ""));
+ EXPECT_FALSE(res.node);
+ EXPECT_THAT(t2, UnorderedElementsAre(Pair(k0, "")));
+
+ // Not there.
+ EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
+ node = t.extract("Not there!");
+ EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
+ EXPECT_FALSE(node);
+
+ // Inserting nothing.
+ res = t2.insert(std::move(node));
+ EXPECT_FALSE(res.inserted);
+ EXPECT_EQ(res.position, t2.end());
+ EXPECT_FALSE(res.node);
+ EXPECT_THAT(t2, UnorderedElementsAre(Pair(k0, "")));
+
+ t.emplace(k0, "1");
+ node = t.extract(k0);
+
+ // Insert duplicate.
+ res = t2.insert(std::move(node));
+ EXPECT_FALSE(res.inserted);
+ EXPECT_THAT(*res.position, Pair(k0, ""));
+ EXPECT_TRUE(res.node);
+ EXPECT_FALSE(node);
+}
+
+StringTable MakeSimpleTable(size_t size) {
+ StringTable t;
+ for (size_t i = 0; i < size; ++i) t.emplace(std::string(1, 'A' + i), "");
+ return t;
+}
+
+std::string OrderOfIteration(const StringTable& t) {
+ std::string order;
+ for (auto& p : t) order += p.first;
+ return order;
+}
+
+TEST(Table, IterationOrderChangesByInstance) {
+ // Needs to be more than kWidth elements to be able to affect order.
+ const StringTable reference = MakeSimpleTable(20);
+
+ // Since order is non-deterministic we can't just try once and verify.
+ // We'll try until we find that order changed. It should not take many tries
+ // for that.
+ // Important: we have to keep the old tables around. Otherwise tcmalloc will
+ // just give us the same blocks and we would be doing the same order again.
+ std::vector<StringTable> garbage;
+ for (int i = 0; i < 10; ++i) {
+ auto trial = MakeSimpleTable(20);
+ if (OrderOfIteration(trial) != OrderOfIteration(reference)) {
+ // We are done.
+ return;
+ }
+ garbage.push_back(std::move(trial));
+ }
+ FAIL();
+}
+
+TEST(Table, IterationOrderChangesOnRehash) {
+ // Since order is non-deterministic we can't just try once and verify.
+ // We'll try until we find that order changed. It should not take many tries
+ // for that.
+ // Important: we have to keep the old tables around. Otherwise tcmalloc will
+ // just give us the same blocks and we would be doing the same order again.
+ std::vector<StringTable> garbage;
+ for (int i = 0; i < 10; ++i) {
+ // Needs to be more than kWidth elements to be able to affect order.
+ StringTable t = MakeSimpleTable(20);
+ const std::string reference = OrderOfIteration(t);
+ // Force rehash to the same size.
+ t.rehash(0);
+ std::string trial = OrderOfIteration(t);
+ if (trial != reference) {
+ // We are done.
+ return;
+ }
+ garbage.push_back(std::move(t));
+ }
+ FAIL();
+}
+
+TEST(Table, IterationOrderChangesForSmallTables) {
+ // Since order is non-deterministic we can't just try once and verify.
+ // We'll try until we find that order changed.
+ // Important: we have to keep the old tables around. Otherwise tcmalloc will
+ // just give us the same blocks and we would be doing the same order again.
+ StringTable reference_table = MakeSimpleTable(5);
+ const std::string reference = OrderOfIteration(reference_table);
+ std::vector<StringTable> garbage;
+ for (int i = 0; i < 50; ++i) {
+ StringTable t = MakeSimpleTable(5);
+ std::string trial = OrderOfIteration(t);
+ if (trial != reference) {
+ // We are done.
+ return;
+ }
+ garbage.push_back(std::move(t));
+ }
+ FAIL() << "Iteration order remained the same across many attempts.";
+}
+
+// Confirm that we assert if we try to erase() end().
+TEST(TableDeathTest, EraseOfEndAsserts) {
+ // Use an assert with side-effects to figure out if they are actually enabled.
+ bool assert_enabled = false;
+ assert([&]() {
+ assert_enabled = true;
+ return true;
+ }());
+ if (!assert_enabled) return;
+
+ IntTable t;
+ // Extra simple "regexp" as regexp support is highly varied across platforms.
+ constexpr char kDeathMsg[] = "it != end";
+ EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()), kDeathMsg);
+}
+
+#ifdef ADDRESS_SANITIZER
+TEST(Sanitizer, PoisoningUnused) {
+ IntTable t;
+ // Insert something to force an allocation.
+ int64_t& v1 = *t.insert(0).first;
+
+ // Make sure there is something to test.
+ ASSERT_GT(t.capacity(), 1);
+
+ int64_t* slots = RawHashSetTestOnlyAccess::GetSlots(t);
+ for (size_t i = 0; i < t.capacity(); ++i) {
+ EXPECT_EQ(slots + i != &v1, __asan_address_is_poisoned(slots + i));
+ }
+}
+
+TEST(Sanitizer, PoisoningOnErase) {
+ IntTable t;
+ int64_t& v = *t.insert(0).first;
+
+ EXPECT_FALSE(__asan_address_is_poisoned(&v));
+ t.erase(0);
+ EXPECT_TRUE(__asan_address_is_poisoned(&v));
+}
+#endif // ADDRESS_SANITIZER
+
+} // namespace
+} // namespace container_internal
+} // inline namespace lts_2018_12_18
+} // namespace absl
diff --git a/absl/container/internal/test_instance_tracker.cc b/absl/container/internal/test_instance_tracker.cc
index d1aa0978..91441729 100644
--- a/absl/container/internal/test_instance_tracker.cc
+++ b/absl/container/internal/test_instance_tracker.cc
@@ -15,14 +15,15 @@
#include "absl/container/internal/test_instance_tracker.h"
namespace absl {
-inline namespace lts_2018_06_20 {
+inline namespace lts_2018_12_18 {
namespace test_internal {
int BaseCountedInstance::num_instances_ = 0;
int BaseCountedInstance::num_live_instances_ = 0;
int BaseCountedInstance::num_moves_ = 0;
int BaseCountedInstance::num_copies_ = 0;
int BaseCountedInstance::num_swaps_ = 0;
+int BaseCountedInstance::num_comparisons_ = 0;
} // namespace test_internal
-} // inline namespace lts_2018_06_20
+} // inline namespace lts_2018_12_18
} // namespace absl
diff --git a/absl/container/internal/test_instance_tracker.h b/absl/container/internal/test_instance_tracker.h
index b4a84656..060077d0 100644
--- a/absl/container/internal/test_instance_tracker.h
+++ b/absl/container/internal/test_instance_tracker.h
@@ -19,12 +19,12 @@
#include <ostream>
namespace absl {
-inline namespace lts_2018_06_20 {
+inline namespace lts_2018_12_18 {
namespace test_internal {
// A type that counts number of occurences of the type, the live occurrences of
-// the type, as well as the number of copies, moves, and swaps that have
-// occurred on the type. This is used as a base class for the copyable,
+// the type, as well as the number of copies, moves, swaps, and comparisons that
+// have occurred on the type. This is used as a base class for the copyable,
// copyable+movable, and movable types below that are used in actual tests. Use
// InstanceTracker in tests to track the number of instances.
class BaseCountedInstance {
@@ -67,6 +67,36 @@ class BaseCountedInstance {
return *this;
}
+ bool operator==(const BaseCountedInstance& x) const {
+ ++num_comparisons_;
+ return value_ == x.value_;
+ }
+
+ bool operator!=(const BaseCountedInstance& x) const {
+ ++num_comparisons_;
+ return value_ != x.value_;
+ }
+
+ bool operator<(const BaseCountedInstance& x) const {
+ ++num_comparisons_;
+ return value_ < x.value_;
+ }
+
+ bool operator>(const BaseCountedInstance& x) const {
+ ++num_comparisons_;
+ return value_ > x.value_;
+ }
+
+ bool operator<=(const BaseCountedInstance& x) const {
+ ++num_comparisons_;
+ return value_ <= x.value_;
+ }
+
+ bool operator>=(const BaseCountedInstance& x) const {
+ ++num_comparisons_;
+ return value_ >= x.value_;
+ }
+
int value() const {
if (!is_live_) std::abort();
return value_;
@@ -109,6 +139,9 @@ class BaseCountedInstance {
// Number of times that BaseCountedInstance objects were swapped.
static int num_swaps_;
+
+ // Number of times that BaseCountedInstance objects were compared.
+ static int num_comparisons_;
};
// Helper to track the BaseCountedInstance instance counters. Expects that the
@@ -153,13 +186,21 @@ class InstanceTracker {
// construction or the last call to ResetCopiesMovesSwaps().
int swaps() const { return BaseCountedInstance::num_swaps_ - start_swaps_; }
- // Resets the base values for moves, copies and swaps to the current values,
- // so that subsequent Get*() calls for moves, copies and swaps will compare to
- // the situation at the point of this call.
+ // Returns the number of comparisons on BaseCountedInstance objects since
+ // construction or the last call to ResetCopiesMovesSwaps().
+ int comparisons() const {
+ return BaseCountedInstance::num_comparisons_ - start_comparisons_;
+ }
+
+ // Resets the base values for moves, copies, comparisons, and swaps to the
+ // current values, so that subsequent Get*() calls for moves, copies,
+ // comparisons, and swaps will compare to the situation at the point of this
+ // call.
void ResetCopiesMovesSwaps() {
start_moves_ = BaseCountedInstance::num_moves_;
start_copies_ = BaseCountedInstance::num_copies_;
start_swaps_ = BaseCountedInstance::num_swaps_;
+ start_comparisons_ = BaseCountedInstance::num_comparisons_;
}
private:
@@ -168,6 +209,7 @@ class InstanceTracker {
int start_moves_;
int start_copies_;
int start_swaps_;
+ int start_comparisons_;
};
// Copyable, not movable.
@@ -216,7 +258,7 @@ class MovableOnlyInstance : public BaseCountedInstance {
};
} // namespace test_internal
-} // inline namespace lts_2018_06_20
+} // inline namespace lts_2018_12_18
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_TEST_INSTANCE_TRACKER_H_
diff --git a/absl/container/internal/test_instance_tracker_test.cc b/absl/container/internal/test_instance_tracker_test.cc
index 9efb6771..0ae57636 100644
--- a/absl/container/internal/test_instance_tracker_test.cc
+++ b/absl/container/internal/test_instance_tracker_test.cc
@@ -157,4 +157,26 @@ TEST(TestInstanceTracker, ExistingInstances) {
EXPECT_EQ(1, tracker.moves());
}
+TEST(TestInstanceTracker, Comparisons) {
+ InstanceTracker tracker;
+ MovableOnlyInstance one(1), two(2);
+
+ EXPECT_EQ(0, tracker.comparisons());
+ EXPECT_FALSE(one == two);
+ EXPECT_EQ(1, tracker.comparisons());
+ EXPECT_TRUE(one != two);
+ EXPECT_EQ(2, tracker.comparisons());
+ EXPECT_TRUE(one < two);
+ EXPECT_EQ(3, tracker.comparisons());
+ EXPECT_FALSE(one > two);
+ EXPECT_EQ(4, tracker.comparisons());
+ EXPECT_TRUE(one <= two);
+ EXPECT_EQ(5, tracker.comparisons());
+ EXPECT_FALSE(one >= two);
+ EXPECT_EQ(6, tracker.comparisons());
+
+ tracker.ResetCopiesMovesSwaps();
+ EXPECT_EQ(0, tracker.comparisons());
+}
+
} // namespace
diff --git a/absl/container/internal/tracked.h b/absl/container/internal/tracked.h
new file mode 100644
index 00000000..f72c46ea
--- /dev/null
+++ b/absl/container/internal/tracked.h
@@ -0,0 +1,80 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_TRACKED_H_
+#define ABSL_CONTAINER_INTERNAL_TRACKED_H_
+
+#include <stddef.h>
+#include <memory>
+#include <utility>
+
+namespace absl {
+inline namespace lts_2018_12_18 {
+namespace container_internal {
+
+// A class that tracks its copies and moves so that it can be queried in tests.
+template <class T>
+class Tracked {
+ public:
+ Tracked() {}
+ // NOLINTNEXTLINE(runtime/explicit)
+ Tracked(const T& val) : val_(val) {}
+ Tracked(const Tracked& that)
+ : val_(that.val_),
+ num_moves_(that.num_moves_),
+ num_copies_(that.num_copies_) {
+ ++(*num_copies_);
+ }
+ Tracked(Tracked&& that)
+ : val_(std::move(that.val_)),
+ num_moves_(std::move(that.num_moves_)),
+ num_copies_(std::move(that.num_copies_)) {
+ ++(*num_moves_);
+ }
+ Tracked& operator=(const Tracked& that) {
+ val_ = that.val_;
+ num_moves_ = that.num_moves_;
+ num_copies_ = that.num_copies_;
+ ++(*num_copies_);
+ }
+ Tracked& operator=(Tracked&& that) {
+ val_ = std::move(that.val_);
+ num_moves_ = std::move(that.num_moves_);
+ num_copies_ = std::move(that.num_copies_);
+ ++(*num_moves_);
+ }
+
+ const T& val() const { return val_; }
+
+ friend bool operator==(const Tracked& a, const Tracked& b) {
+ return a.val_ == b.val_;
+ }
+ friend bool operator!=(const Tracked& a, const Tracked& b) {
+ return !(a == b);
+ }
+
+ size_t num_copies() { return *num_copies_; }
+ size_t num_moves() { return *num_moves_; }
+
+ private:
+ T val_;
+ std::shared_ptr<size_t> num_moves_ = std::make_shared<size_t>(0);
+ std::shared_ptr<size_t> num_copies_ = std::make_shared<size_t>(0);
+};
+
+} // namespace container_internal
+} // inline namespace lts_2018_12_18
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_TRACKED_H_
diff --git a/absl/container/internal/unordered_map_constructor_test.h b/absl/container/internal/unordered_map_constructor_test.h
new file mode 100644
index 00000000..14ceeecb
--- /dev/null
+++ b/absl/container/internal/unordered_map_constructor_test.h
@@ -0,0 +1,407 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_
+
+#include <algorithm>
+#include <vector>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/container/internal/hash_generator_testing.h"
+#include "absl/container/internal/hash_policy_testing.h"
+
+namespace absl {
+inline namespace lts_2018_12_18 {
+namespace container_internal {
+
+template <class UnordMap>
+class ConstructorTest : public ::testing::Test {};
+
+TYPED_TEST_CASE_P(ConstructorTest);
+
+TYPED_TEST_P(ConstructorTest, NoArgs) {
+ TypeParam m;
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCount) {
+ TypeParam m(123);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHash) {
+ using H = typename TypeParam::hasher;
+ H hasher;
+ TypeParam m(123, hasher);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHashEqual) {
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ H hasher;
+ E equal;
+ TypeParam m(123, hasher, equal);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.key_eq(), equal);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHashEqualAlloc) {
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(123, hasher, equal, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.key_eq(), equal);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountAlloc) {
+#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17)
+ using A = typename TypeParam::allocator_type;
+ A alloc(0);
+ TypeParam m(123, alloc);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+#endif
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) {
+#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17)
+ using H = typename TypeParam::hasher;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ A alloc(0);
+ TypeParam m(123, hasher, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+#endif
+}
+
+TYPED_TEST_P(ConstructorTest, BucketAlloc) {
+#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
+ using A = typename TypeParam::allocator_type;
+ A alloc(0);
+ TypeParam m(alloc);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+#endif
+}
+
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end(), 123, hasher, equal, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.key_eq(), equal);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) {
+#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17)
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using A = typename TypeParam::allocator_type;
+ A alloc(0);
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end(), 123, alloc);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+#endif
+}
+
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) {
+#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17)
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ A alloc(0);
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end(), 123, hasher, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+#endif
+}
+
+TYPED_TEST_P(ConstructorTest, CopyConstructor) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(123, hasher, equal, alloc);
+ for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+ TypeParam n(m);
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_EQ(m.get_allocator(), n.get_allocator());
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) {
+#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(123, hasher, equal, alloc);
+ for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+ TypeParam n(m, A(11));
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_NE(m.get_allocator(), n.get_allocator());
+ EXPECT_EQ(m, n);
+#endif
+}
+
+// TODO(alkis): Test non-propagating allocators on copy constructors.
+
+TYPED_TEST_P(ConstructorTest, MoveConstructor) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(123, hasher, equal, alloc);
+ for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+ TypeParam t(m);
+ TypeParam n(std::move(t));
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_EQ(m.get_allocator(), n.get_allocator());
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) {
+#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(123, hasher, equal, alloc);
+ for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+ TypeParam t(m);
+ TypeParam n(std::move(t), A(1));
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_NE(m.get_allocator(), n.get_allocator());
+ EXPECT_EQ(m, n);
+#endif
+}
+
+// TODO(alkis): Test non-propagating allocators on move constructors.
+
+TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(values, 123, hasher, equal, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.key_eq(), equal);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) {
+#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17)
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using A = typename TypeParam::allocator_type;
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ A alloc(0);
+ TypeParam m(values, 123, alloc);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+#endif
+}
+
+TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) {
+#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17)
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ A alloc(0);
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ TypeParam m(values, 123, hasher, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+#endif
+}
+
+TYPED_TEST_P(ConstructorTest, Assignment) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ hash_internal::Generator<T> gen;
+ TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
+ TypeParam n;
+ n = m;
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_EQ(m, n);
+}
+
+// TODO(alkis): Test [non-]propagating allocators on move/copy assignments
+// (it depends on traits).
+
+TYPED_TEST_P(ConstructorTest, MoveAssignment) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ hash_internal::Generator<T> gen;
+ TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
+ TypeParam t(m);
+ TypeParam n;
+ n = std::move(t);
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ TypeParam m;
+ m = values;
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ TypeParam m({gen(), gen(), gen()});
+ TypeParam n({gen()});
+ n = m;
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ TypeParam m({gen(), gen(), gen()});
+ TypeParam t(m);
+ TypeParam n({gen()});
+ n = std::move(t);
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ TypeParam m;
+ m = values;
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ TypeParam m(values);
+ m = *&m; // Avoid -Wself-assign
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+// We cannot test self move as standard states that it leaves standard
+// containers in unspecified state (and in practice in causes memory-leak
+// according to heap-checker!).
+
+REGISTER_TYPED_TEST_CASE_P(
+ ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual,
+ BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc,
+ BucketAlloc, InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc,
+ InputIteratorBucketHashAlloc, CopyConstructor, CopyConstructorAlloc,
+ MoveConstructor, MoveConstructorAlloc, InitializerListBucketHashEqualAlloc,
+ InitializerListBucketAlloc, InitializerListBucketHashAlloc, Assignment,
+ MoveAssignment, AssignmentFromInitializerList,
+ AssignmentOverwritesExisting, MoveAssignmentOverwritesExisting,
+ AssignmentFromInitializerListOverwritesExisting, AssignmentOnSelf);
+
+} // namespace container_internal
+} // inline namespace lts_2018_12_18
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_
diff --git a/absl/container/internal/unordered_map_lookup_test.h b/absl/container/internal/unordered_map_lookup_test.h
new file mode 100644
index 00000000..d767aa8d
--- /dev/null
+++ b/absl/container/internal/unordered_map_lookup_test.h
@@ -0,0 +1,117 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/container/internal/hash_generator_testing.h"
+#include "absl/container/internal/hash_policy_testing.h"
+
+namespace absl {
+inline namespace lts_2018_12_18 {
+namespace container_internal {
+
+template <class UnordMap>
+class LookupTest : public ::testing::Test {};
+
+TYPED_TEST_CASE_P(LookupTest);
+
+TYPED_TEST_P(LookupTest, At) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ for (const auto& p : values) {
+ const auto& val = m.at(p.first);
+ EXPECT_EQ(p.second, val) << ::testing::PrintToString(p.first);
+ }
+}
+
+TYPED_TEST_P(LookupTest, OperatorBracket) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ for (const auto& p : values) {
+ auto& val = m[p.first];
+ EXPECT_EQ(V(), val) << ::testing::PrintToString(p.first);
+ val = p.second;
+ }
+ for (const auto& p : values)
+ EXPECT_EQ(p.second, m[p.first]) << ::testing::PrintToString(p.first);
+}
+
+TYPED_TEST_P(LookupTest, Count) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ for (const auto& p : values)
+ EXPECT_EQ(0, m.count(p.first)) << ::testing::PrintToString(p.first);
+ m.insert(values.begin(), values.end());
+ for (const auto& p : values)
+ EXPECT_EQ(1, m.count(p.first)) << ::testing::PrintToString(p.first);
+}
+
+TYPED_TEST_P(LookupTest, Find) {
+ using std::get;
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ for (const auto& p : values)
+ EXPECT_TRUE(m.end() == m.find(p.first))
+ << ::testing::PrintToString(p.first);
+ m.insert(values.begin(), values.end());
+ for (const auto& p : values) {
+ auto it = m.find(p.first);
+ EXPECT_TRUE(m.end() != it) << ::testing::PrintToString(p.first);
+ EXPECT_EQ(p.second, get<1>(*it)) << ::testing::PrintToString(p.first);
+ }
+}
+
+TYPED_TEST_P(LookupTest, EqualRange) {
+ using std::get;
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ for (const auto& p : values) {
+ auto r = m.equal_range(p.first);
+ ASSERT_EQ(0, std::distance(r.first, r.second));
+ }
+ m.insert(values.begin(), values.end());
+ for (const auto& p : values) {
+ auto r = m.equal_range(p.first);
+ ASSERT_EQ(1, std::distance(r.first, r.second));
+ EXPECT_EQ(p.second, get<1>(*r.first)) << ::testing::PrintToString(p.first);
+ }
+}
+
+REGISTER_TYPED_TEST_CASE_P(LookupTest, At, OperatorBracket, Count, Find,
+ EqualRange);
+
+} // namespace container_internal
+} // inline namespace lts_2018_12_18
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_
diff --git a/absl/container/internal/unordered_map_modifiers_test.h b/absl/container/internal/unordered_map_modifiers_test.h
new file mode 100644
index 00000000..5d7f1fe3
--- /dev/null
+++ b/absl/container/internal/unordered_map_modifiers_test.h
@@ -0,0 +1,275 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/container/internal/hash_generator_testing.h"
+#include "absl/container/internal/hash_policy_testing.h"
+
+namespace absl {
+inline namespace lts_2018_12_18 {
+namespace container_internal {
+
+template <class UnordMap>
+class ModifiersTest : public ::testing::Test {};
+
+TYPED_TEST_CASE_P(ModifiersTest);
+
+TYPED_TEST_P(ModifiersTest, Clear) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ m.clear();
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAre());
+ EXPECT_TRUE(m.empty());
+}
+
+TYPED_TEST_P(ModifiersTest, Insert) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ auto p = m.insert(val);
+ EXPECT_TRUE(p.second);
+ EXPECT_EQ(val, *p.first);
+ T val2 = {val.first, hash_internal::Generator<V>()()};
+ p = m.insert(val2);
+ EXPECT_FALSE(p.second);
+ EXPECT_EQ(val, *p.first);
+}
+
+TYPED_TEST_P(ModifiersTest, InsertHint) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ auto it = m.insert(m.end(), val);
+ EXPECT_TRUE(it != m.end());
+ EXPECT_EQ(val, *it);
+ T val2 = {val.first, hash_internal::Generator<V>()()};
+ it = m.insert(it, val2);
+ EXPECT_TRUE(it != m.end());
+ EXPECT_EQ(val, *it);
+}
+
+TYPED_TEST_P(ModifiersTest, InsertRange) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ m.insert(values.begin(), values.end());
+ ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+TYPED_TEST_P(ModifiersTest, InsertOrAssign) {
+#ifdef UNORDERED_MAP_CXX17
+ using std::get;
+ using K = typename TypeParam::key_type;
+ using V = typename TypeParam::mapped_type;
+ K k = hash_internal::Generator<K>()();
+ V val = hash_internal::Generator<V>()();
+ TypeParam m;
+ auto p = m.insert_or_assign(k, val);
+ EXPECT_TRUE(p.second);
+ EXPECT_EQ(k, get<0>(*p.first));
+ EXPECT_EQ(val, get<1>(*p.first));
+ V val2 = hash_internal::Generator<V>()();
+ p = m.insert_or_assign(k, val2);
+ EXPECT_FALSE(p.second);
+ EXPECT_EQ(k, get<0>(*p.first));
+ EXPECT_EQ(val2, get<1>(*p.first));
+#endif
+}
+
+TYPED_TEST_P(ModifiersTest, InsertOrAssignHint) {
+#ifdef UNORDERED_MAP_CXX17
+ using std::get;
+ using K = typename TypeParam::key_type;
+ using V = typename TypeParam::mapped_type;
+ K k = hash_internal::Generator<K>()();
+ V val = hash_internal::Generator<V>()();
+ TypeParam m;
+ auto it = m.insert_or_assign(m.end(), k, val);
+ EXPECT_TRUE(it != m.end());
+ EXPECT_EQ(k, get<0>(*it));
+ EXPECT_EQ(val, get<1>(*it));
+ V val2 = hash_internal::Generator<V>()();
+ it = m.insert_or_assign(it, k, val2);
+ EXPECT_EQ(k, get<0>(*it));
+ EXPECT_EQ(val2, get<1>(*it));
+#endif
+}
+
+TYPED_TEST_P(ModifiersTest, Emplace) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
+ // with test traits/policy.
+ auto p = m.emplace(val);
+ EXPECT_TRUE(p.second);
+ EXPECT_EQ(val, *p.first);
+ T val2 = {val.first, hash_internal::Generator<V>()()};
+ p = m.emplace(val2);
+ EXPECT_FALSE(p.second);
+ EXPECT_EQ(val, *p.first);
+}
+
+TYPED_TEST_P(ModifiersTest, EmplaceHint) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
+ // with test traits/policy.
+ auto it = m.emplace_hint(m.end(), val);
+ EXPECT_EQ(val, *it);
+ T val2 = {val.first, hash_internal::Generator<V>()()};
+ it = m.emplace_hint(it, val2);
+ EXPECT_EQ(val, *it);
+}
+
+TYPED_TEST_P(ModifiersTest, TryEmplace) {
+#ifdef UNORDERED_MAP_CXX17
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
+ // with test traits/policy.
+ auto p = m.try_emplace(val.first, val.second);
+ EXPECT_TRUE(p.second);
+ EXPECT_EQ(val, *p.first);
+ T val2 = {val.first, hash_internal::Generator<V>()()};
+ p = m.try_emplace(val2.first, val2.second);
+ EXPECT_FALSE(p.second);
+ EXPECT_EQ(val, *p.first);
+#endif
+}
+
+TYPED_TEST_P(ModifiersTest, TryEmplaceHint) {
+#ifdef UNORDERED_MAP_CXX17
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
+ // with test traits/policy.
+ auto it = m.try_emplace(m.end(), val.first, val.second);
+ EXPECT_EQ(val, *it);
+ T val2 = {val.first, hash_internal::Generator<V>()()};
+ it = m.try_emplace(it, val2.first, val2.second);
+ EXPECT_EQ(val, *it);
+#endif
+}
+
+template <class V>
+using IfNotVoid = typename std::enable_if<!std::is_void<V>::value, V>::type;
+
+// In openmap we chose not to return the iterator from erase because that's
+// more expensive. As such we adapt erase to return an iterator here.
+struct EraseFirst {
+ template <class Map>
+ auto operator()(Map* m, int) const
+ -> IfNotVoid<decltype(m->erase(m->begin()))> {
+ return m->erase(m->begin());
+ }
+ template <class Map>
+ typename Map::iterator operator()(Map* m, ...) const {
+ auto it = m->begin();
+ m->erase(it++);
+ return it;
+ }
+};
+
+TYPED_TEST_P(ModifiersTest, Erase) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using std::get;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ auto& first = *m.begin();
+ std::vector<T> values2;
+ for (const auto& val : values)
+ if (get<0>(val) != get<0>(first)) values2.push_back(val);
+ auto it = EraseFirst()(&m, 0);
+ ASSERT_TRUE(it != m.end());
+ EXPECT_EQ(1, std::count(values2.begin(), values2.end(), *it));
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values2.begin(),
+ values2.end()));
+}
+
+TYPED_TEST_P(ModifiersTest, EraseRange) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ auto it = m.erase(m.begin(), m.end());
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAre());
+ EXPECT_TRUE(it == m.end());
+}
+
+TYPED_TEST_P(ModifiersTest, EraseKey) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_EQ(1, m.erase(values[0].first));
+ EXPECT_EQ(0, std::count(m.begin(), m.end(), values[0]));
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values.begin() + 1,
+ values.end()));
+}
+
+TYPED_TEST_P(ModifiersTest, Swap) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> v1;
+ std::vector<T> v2;
+ std::generate_n(std::back_inserter(v1), 5, hash_internal::Generator<T>());
+ std::generate_n(std::back_inserter(v2), 5, hash_internal::Generator<T>());
+ TypeParam m1(v1.begin(), v1.end());
+ TypeParam m2(v2.begin(), v2.end());
+ EXPECT_THAT(items(m1), ::testing::UnorderedElementsAreArray(v1));
+ EXPECT_THAT(items(m2), ::testing::UnorderedElementsAreArray(v2));
+ m1.swap(m2);
+ EXPECT_THAT(items(m1), ::testing::UnorderedElementsAreArray(v2));
+ EXPECT_THAT(items(m2), ::testing::UnorderedElementsAreArray(v1));
+}
+
+// TODO(alkis): Write tests for extract.
+// TODO(alkis): Write tests for merge.
+
+REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint,
+ InsertRange, InsertOrAssign, InsertOrAssignHint,
+ Emplace, EmplaceHint, TryEmplace, TryEmplaceHint,
+ Erase, EraseRange, EraseKey, Swap);
+
+} // namespace container_internal
+} // inline namespace lts_2018_12_18
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_
diff --git a/absl/container/internal/unordered_map_test.cc b/absl/container/internal/unordered_map_test.cc
new file mode 100644
index 00000000..548f69f7
--- /dev/null
+++ b/absl/container/internal/unordered_map_test.cc
@@ -0,0 +1,40 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <unordered_map>
+
+#include "absl/container/internal/unordered_map_constructor_test.h"
+#include "absl/container/internal/unordered_map_lookup_test.h"
+#include "absl/container/internal/unordered_map_modifiers_test.h"
+
+namespace absl {
+inline namespace lts_2018_12_18 {
+namespace container_internal {
+namespace {
+
+using MapTypes = ::testing::Types<
+ std::unordered_map<int, int, StatefulTestingHash, StatefulTestingEqual,
+ Alloc<std::pair<const int, int>>>,
+ std::unordered_map<std::string, std::string, StatefulTestingHash,
+ StatefulTestingEqual,
+ Alloc<std::pair<const std::string, std::string>>>>;
+
+INSTANTIATE_TYPED_TEST_CASE_P(UnorderedMap, ConstructorTest, MapTypes);
+INSTANTIATE_TYPED_TEST_CASE_P(UnorderedMap, LookupTest, MapTypes);
+INSTANTIATE_TYPED_TEST_CASE_P(UnorderedMap, ModifiersTest, MapTypes);
+
+} // namespace
+} // namespace container_internal
+} // inline namespace lts_2018_12_18
+} // namespace absl
diff --git a/absl/container/internal/unordered_set_constructor_test.h b/absl/container/internal/unordered_set_constructor_test.h
new file mode 100644
index 00000000..f370b249
--- /dev/null
+++ b/absl/container/internal/unordered_set_constructor_test.h
@@ -0,0 +1,411 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_
+
+#include <algorithm>
+#include <vector>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/container/internal/hash_generator_testing.h"
+#include "absl/container/internal/hash_policy_testing.h"
+
+namespace absl {
+inline namespace lts_2018_12_18 {
+namespace container_internal {
+
+template <class UnordMap>
+class ConstructorTest : public ::testing::Test {};
+
+TYPED_TEST_CASE_P(ConstructorTest);
+
+TYPED_TEST_P(ConstructorTest, NoArgs) {
+ TypeParam m;
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCount) {
+ TypeParam m(123);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHash) {
+ using H = typename TypeParam::hasher;
+ H hasher;
+ TypeParam m(123, hasher);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHashEqual) {
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ H hasher;
+ E equal;
+ TypeParam m(123, hasher, equal);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.key_eq(), equal);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHashEqualAlloc) {
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(123, hasher, equal, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.key_eq(), equal);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+
+ const auto& cm = m;
+ EXPECT_EQ(cm.hash_function(), hasher);
+ EXPECT_EQ(cm.key_eq(), equal);
+ EXPECT_EQ(cm.get_allocator(), alloc);
+ EXPECT_TRUE(cm.empty());
+ EXPECT_THAT(keys(cm), ::testing::UnorderedElementsAre());
+ EXPECT_GE(cm.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountAlloc) {
+#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17)
+ using A = typename TypeParam::allocator_type;
+ A alloc(0);
+ TypeParam m(123, alloc);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+#endif
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) {
+#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17)
+ using H = typename TypeParam::hasher;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ A alloc(0);
+ TypeParam m(123, hasher, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+#endif
+}
+
+TYPED_TEST_P(ConstructorTest, BucketAlloc) {
+#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
+ using A = typename TypeParam::allocator_type;
+ A alloc(0);
+ TypeParam m(alloc);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+#endif
+}
+
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ std::vector<T> values;
+ for (size_t i = 0; i != 10; ++i)
+ values.push_back(hash_internal::Generator<T>()());
+ TypeParam m(values.begin(), values.end(), 123, hasher, equal, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.key_eq(), equal);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) {
+#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17)
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using A = typename TypeParam::allocator_type;
+ A alloc(0);
+ std::vector<T> values;
+ for (size_t i = 0; i != 10; ++i)
+ values.push_back(hash_internal::Generator<T>()());
+ TypeParam m(values.begin(), values.end(), 123, alloc);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+#endif
+}
+
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) {
+#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17)
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ A alloc(0);
+ std::vector<T> values;
+ for (size_t i = 0; i != 10; ++i)
+ values.push_back(hash_internal::Generator<T>()());
+ TypeParam m(values.begin(), values.end(), 123, hasher, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+#endif
+}
+
+TYPED_TEST_P(ConstructorTest, CopyConstructor) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(123, hasher, equal, alloc);
+ for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+ TypeParam n(m);
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_EQ(m.get_allocator(), n.get_allocator());
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) {
+#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(123, hasher, equal, alloc);
+ for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+ TypeParam n(m, A(11));
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_NE(m.get_allocator(), n.get_allocator());
+ EXPECT_EQ(m, n);
+#endif
+}
+
+// TODO(alkis): Test non-propagating allocators on copy constructors.
+
+TYPED_TEST_P(ConstructorTest, MoveConstructor) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(123, hasher, equal, alloc);
+ for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+ TypeParam t(m);
+ TypeParam n(std::move(t));
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_EQ(m.get_allocator(), n.get_allocator());
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) {
+#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(123, hasher, equal, alloc);
+ for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+ TypeParam t(m);
+ TypeParam n(std::move(t), A(1));
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_NE(m.get_allocator(), n.get_allocator());
+ EXPECT_EQ(m, n);
+#endif
+}
+
+// TODO(alkis): Test non-propagating allocators on move constructors.
+
+TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(values, 123, hasher, equal, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.key_eq(), equal);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) {
+#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17)
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using A = typename TypeParam::allocator_type;
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ A alloc(0);
+ TypeParam m(values, 123, alloc);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+#endif
+}
+
+TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) {
+#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17)
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ A alloc(0);
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ TypeParam m(values, 123, hasher, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+#endif
+}
+
+TYPED_TEST_P(ConstructorTest, Assignment) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ hash_internal::Generator<T> gen;
+ TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
+ TypeParam n;
+ n = m;
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_EQ(m, n);
+}
+
+// TODO(alkis): Test [non-]propagating allocators on move/copy assignments
+// (it depends on traits).
+
+TYPED_TEST_P(ConstructorTest, MoveAssignment) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ hash_internal::Generator<T> gen;
+ TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
+ TypeParam t(m);
+ TypeParam n;
+ n = std::move(t);
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ TypeParam m;
+ m = values;
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ TypeParam m({gen(), gen(), gen()});
+ TypeParam n({gen()});
+ n = m;
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ TypeParam m({gen(), gen(), gen()});
+ TypeParam t(m);
+ TypeParam n({gen()});
+ n = std::move(t);
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ TypeParam m;
+ m = values;
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ TypeParam m(values);
+ m = *&m; // Avoid -Wself-assign.
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+REGISTER_TYPED_TEST_CASE_P(
+ ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual,
+ BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc,
+ BucketAlloc, InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc,
+ InputIteratorBucketHashAlloc, CopyConstructor, CopyConstructorAlloc,
+ MoveConstructor, MoveConstructorAlloc, InitializerListBucketHashEqualAlloc,
+ InitializerListBucketAlloc, InitializerListBucketHashAlloc, Assignment,
+ MoveAssignment, AssignmentFromInitializerList,
+ AssignmentOverwritesExisting, MoveAssignmentOverwritesExisting,
+ AssignmentFromInitializerListOverwritesExisting, AssignmentOnSelf);
+
+} // namespace container_internal
+} // inline namespace lts_2018_12_18
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_
diff --git a/absl/container/internal/unordered_set_lookup_test.h b/absl/container/internal/unordered_set_lookup_test.h
new file mode 100644
index 00000000..9174279a
--- /dev/null
+++ b/absl/container/internal/unordered_set_lookup_test.h
@@ -0,0 +1,91 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/container/internal/hash_generator_testing.h"
+#include "absl/container/internal/hash_policy_testing.h"
+
+namespace absl {
+inline namespace lts_2018_12_18 {
+namespace container_internal {
+
+template <class UnordSet>
+class LookupTest : public ::testing::Test {};
+
+TYPED_TEST_CASE_P(LookupTest);
+
+TYPED_TEST_P(LookupTest, Count) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ for (const auto& v : values)
+ EXPECT_EQ(0, m.count(v)) << ::testing::PrintToString(v);
+ m.insert(values.begin(), values.end());
+ for (const auto& v : values)
+ EXPECT_EQ(1, m.count(v)) << ::testing::PrintToString(v);
+}
+
+TYPED_TEST_P(LookupTest, Find) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ for (const auto& v : values)
+ EXPECT_TRUE(m.end() == m.find(v)) << ::testing::PrintToString(v);
+ m.insert(values.begin(), values.end());
+ for (const auto& v : values) {
+ typename TypeParam::iterator it = m.find(v);
+ static_assert(std::is_same<const typename TypeParam::value_type&,
+ decltype(*it)>::value,
+ "");
+ static_assert(std::is_same<const typename TypeParam::value_type*,
+ decltype(it.operator->())>::value,
+ "");
+ EXPECT_TRUE(m.end() != it) << ::testing::PrintToString(v);
+ EXPECT_EQ(v, *it) << ::testing::PrintToString(v);
+ }
+}
+
+TYPED_TEST_P(LookupTest, EqualRange) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ for (const auto& v : values) {
+ auto r = m.equal_range(v);
+ ASSERT_EQ(0, std::distance(r.first, r.second));
+ }
+ m.insert(values.begin(), values.end());
+ for (const auto& v : values) {
+ auto r = m.equal_range(v);
+ ASSERT_EQ(1, std::distance(r.first, r.second));
+ EXPECT_EQ(v, *r.first);
+ }
+}
+
+REGISTER_TYPED_TEST_CASE_P(LookupTest, Count, Find, EqualRange);
+
+} // namespace container_internal
+} // inline namespace lts_2018_12_18
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_
diff --git a/absl/container/internal/unordered_set_modifiers_test.h b/absl/container/internal/unordered_set_modifiers_test.h
new file mode 100644
index 00000000..0a1e9b1b
--- /dev/null
+++ b/absl/container/internal/unordered_set_modifiers_test.h
@@ -0,0 +1,190 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/container/internal/hash_generator_testing.h"
+#include "absl/container/internal/hash_policy_testing.h"
+
+namespace absl {
+inline namespace lts_2018_12_18 {
+namespace container_internal {
+
+template <class UnordSet>
+class ModifiersTest : public ::testing::Test {};
+
+TYPED_TEST_CASE_P(ModifiersTest);
+
+TYPED_TEST_P(ModifiersTest, Clear) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ m.clear();
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+ EXPECT_TRUE(m.empty());
+}
+
+TYPED_TEST_P(ModifiersTest, Insert) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ auto p = m.insert(val);
+ EXPECT_TRUE(p.second);
+ EXPECT_EQ(val, *p.first);
+ p = m.insert(val);
+ EXPECT_FALSE(p.second);
+}
+
+TYPED_TEST_P(ModifiersTest, InsertHint) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ auto it = m.insert(m.end(), val);
+ EXPECT_TRUE(it != m.end());
+ EXPECT_EQ(val, *it);
+ it = m.insert(it, val);
+ EXPECT_TRUE(it != m.end());
+ EXPECT_EQ(val, *it);
+}
+
+TYPED_TEST_P(ModifiersTest, InsertRange) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ m.insert(values.begin(), values.end());
+ ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+TYPED_TEST_P(ModifiersTest, Emplace) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
+ // with test traits/policy.
+ auto p = m.emplace(val);
+ EXPECT_TRUE(p.second);
+ EXPECT_EQ(val, *p.first);
+ p = m.emplace(val);
+ EXPECT_FALSE(p.second);
+ EXPECT_EQ(val, *p.first);
+}
+
+TYPED_TEST_P(ModifiersTest, EmplaceHint) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
+ // with test traits/policy.
+ auto it = m.emplace_hint(m.end(), val);
+ EXPECT_EQ(val, *it);
+ it = m.emplace_hint(it, val);
+ EXPECT_EQ(val, *it);
+}
+
+template <class V>
+using IfNotVoid = typename std::enable_if<!std::is_void<V>::value, V>::type;
+
+// In openmap we chose not to return the iterator from erase because that's
+// more expensive. As such we adapt erase to return an iterator here.
+struct EraseFirst {
+ template <class Map>
+ auto operator()(Map* m, int) const
+ -> IfNotVoid<decltype(m->erase(m->begin()))> {
+ return m->erase(m->begin());
+ }
+ template <class Map>
+ typename Map::iterator operator()(Map* m, ...) const {
+ auto it = m->begin();
+ m->erase(it++);
+ return it;
+ }
+};
+
+TYPED_TEST_P(ModifiersTest, Erase) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ std::vector<T> values2;
+ for (const auto& val : values)
+ if (val != *m.begin()) values2.push_back(val);
+ auto it = EraseFirst()(&m, 0);
+ ASSERT_TRUE(it != m.end());
+ EXPECT_EQ(1, std::count(values2.begin(), values2.end(), *it));
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values2.begin(),
+ values2.end()));
+}
+
+TYPED_TEST_P(ModifiersTest, EraseRange) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ auto it = m.erase(m.begin(), m.end());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+ EXPECT_TRUE(it == m.end());
+}
+
+TYPED_TEST_P(ModifiersTest, EraseKey) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_EQ(1, m.erase(values[0]));
+ EXPECT_EQ(0, std::count(m.begin(), m.end(), values[0]));
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values.begin() + 1,
+ values.end()));
+}
+
+TYPED_TEST_P(ModifiersTest, Swap) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> v1;
+ std::vector<T> v2;
+ std::generate_n(std::back_inserter(v1), 5, hash_internal::Generator<T>());
+ std::generate_n(std::back_inserter(v2), 5, hash_internal::Generator<T>());
+ TypeParam m1(v1.begin(), v1.end());
+ TypeParam m2(v2.begin(), v2.end());
+ EXPECT_THAT(keys(m1), ::testing::UnorderedElementsAreArray(v1));
+ EXPECT_THAT(keys(m2), ::testing::UnorderedElementsAreArray(v2));
+ m1.swap(m2);
+ EXPECT_THAT(keys(m1), ::testing::UnorderedElementsAreArray(v2));
+ EXPECT_THAT(keys(m2), ::testing::UnorderedElementsAreArray(v1));
+}
+
+// TODO(alkis): Write tests for extract.
+// TODO(alkis): Write tests for merge.
+
+REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint,
+ InsertRange, Emplace, EmplaceHint, Erase, EraseRange,
+ EraseKey, Swap);
+
+} // namespace container_internal
+} // inline namespace lts_2018_12_18
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_
diff --git a/absl/container/internal/unordered_set_test.cc b/absl/container/internal/unordered_set_test.cc
new file mode 100644
index 00000000..263059eb
--- /dev/null
+++ b/absl/container/internal/unordered_set_test.cc
@@ -0,0 +1,39 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <unordered_set>
+
+#include "absl/container/internal/unordered_set_constructor_test.h"
+#include "absl/container/internal/unordered_set_lookup_test.h"
+#include "absl/container/internal/unordered_set_modifiers_test.h"
+
+namespace absl {
+inline namespace lts_2018_12_18 {
+namespace container_internal {
+namespace {
+
+using SetTypes =
+ ::testing::Types<std::unordered_set<int, StatefulTestingHash,
+ StatefulTestingEqual, Alloc<int>>,
+ std::unordered_set<std::string, StatefulTestingHash,
+ StatefulTestingEqual, Alloc<std::string>>>;
+
+INSTANTIATE_TYPED_TEST_CASE_P(UnorderedSet, ConstructorTest, SetTypes);
+INSTANTIATE_TYPED_TEST_CASE_P(UnorderedSet, LookupTest, SetTypes);
+INSTANTIATE_TYPED_TEST_CASE_P(UnorderedSet, ModifiersTest, SetTypes);
+
+} // namespace
+} // namespace container_internal
+} // inline namespace lts_2018_12_18
+} // namespace absl