From fcb104594b0bb4b8ac306cb2f55ecdad40974683 Mon Sep 17 00:00:00 2001 From: Abseil Team Date: Tue, 4 Dec 2018 11:01:12 -0800 Subject: Creation of LTS branch "lts_2018_12_18" - 44b0fafc62d9b8f192e8180cbe9c4b806b339d57 Export of internal Abseil changes. by Abseil Team - 926bfeb9fff223429c12224b7514243886323e8d Export of internal Abseil changes. by Abseil Team - 13327debebc5c2d1d4991b69fe50450e340e50e4 Export of internal Abseil changes. by Abseil Team - 3088e76c597e068479e82508b1770a7ad0c806b6 Export of internal Abseil changes. by Abseil Team - f6ae816808cd913e0e2b3e2af14f328fa1071af0 Export of internal Abseil changes. by Abseil Team - a06c4a1d9093137b7217a5aaba8920d62e835dc0 Export of internal Abseil changes. by Abseil Team - 7b46e1d31a6b08b1c6da2a13e7b151a20446fa07 Export of internal Abseil changes. by Abseil Team - 070f6e47b33a2909d039e620c873204f78809492 Export of internal Abseil changes. by Abseil Team - 7990fd459e9339467814ddb95000c87cb1e4d945 Export of internal Abseil changes. by Abseil Team - f95179062eb65ce40895cc76f1398cce25394369 Export of internal Abseil changes. by Abseil Team - cc8dcd307b76a575d2e3e0958a4fe4c7193c2f68 Export of internal Abseil changes. by Abseil Team - a705aa78dc76fc5c79d501e61dcc077eca68a8a4 Merge pull request #194 from Mizux/windows by Xiaoyi Zhang - a4c3ffff11eec0ee45742f915c255e9f870b7e0f Export of internal Abseil changes. by Abseil Team - 01174578651b73021d9b8c3820f6fea707dacdf0 Merge pull request #201 from ccawley2011/fix-byteswap by Matt Calabrese <38107210+mattcalabrese-google@users.noreply.github.com> - f86f9413856b65afdd61fea938d684b8ab73115a Export of internal Abseil changes. by Abseil Team - 94c298e2a0ae409e283cab96c954a685bd865a70 Export of internal Abseil changes. by Abseil Team - 0884a6a04e4497d11b1b398cc0e422b118bf977a Export of internal Abseil changes. by Abseil Team - c16d5557cd05119b5b7b1318ef778ebe3195b4a1 Export of internal Abseil changes. by Abseil Team - 45221ccc4ed643e4209b0cc5798e97203f108fa8 Export of internal Abseil changes. by Abseil Team - 2019e17a520575ab365b2b5134d71068182c70b8 Export of internal Abseil changes. by Abseil Team - 5b70a8910b2e6fb0ce5193a41873139a126d2f7f Export of internal Abseil changes. by Abseil Team - a00bdd176d66ef0b417d9576052a19091fbdf891 Export of internal Abseil changes. by Abseil Team - f340f773edab951656b19b6f1a77c964a78ec4c2 Export of internal Abseil changes. by Abseil Team - 445998d7ac4e5d3c50411d377e3b50e960d2d6c2 Export of internal Abseil changes. by Abseil Team - e821380d69a549dc64900693942789d21aa4df5e Export of internal Abseil changes. by Abseil Team - f21d187b80e3b7f08fb279775ea9c8b48c636030 Export of internal Abseil changes. by Abseil Team - 5441bbe1db5d0f2ca24b5b60166367b0966790af Fix code snippet in comment (#174) by Loo Rong Jie - 5aae0cffae8ffaacab965756169b34e511b353df Fix CMake build (#173) by Stephan Dollberg - 48cd2c3f351ff188bc85684b84a91b6e6d17d896 Export of internal Abseil changes. by Abseil Team - e291c279e458761e77a69b09b129d3d1e81f1e80 Export of internal Abseil changes. by Abseil Team - e01d95528ea2137a4a27a88d1f57c6cb260aafed Export of internal Abseil changes. by Abseil Team - 8ff1374008259719b54a8cb128ef951c02da164c Export of internal Abseil changes. by Abseil Team - 02451914b9ad5320f81f56a89f3eef1f8683227c Export of internal Abseil changes. by Abseil Team - 921fd5cf02ec0d665439a790148d59faa7d4a72c Merge pull request #166 from rongjiecomputer/cmake-test by Gennadiy Civil - fb462224c058487763f263b7995d70efd0242c17 Export of internal Abseil changes. by Abseil Team - c075ad321696fa5072e097f0a51e4fe76a6fe13e Export of internal Abseil changes. by Abseil Team - 0f4bc966754ec6cd28d5f03467d56f1efdc598e3 Export of internal Abseil changes. by Abseil Team - 6c7e5ffc43decd92f7bdfc510ad8a245a20b6dea Export of internal Abseil changes. by Abseil Team - d6df769173bf0263489f98874b93034db0e479a2 Export of internal Abseil changes. by Abseil Team - 28080f5f050c9530aa9f2b39c60d8217038d64ff Export of internal Abseil changes. by Abseil Team - 9c987f429bba32fb4446280fd3b91e2472d71d4d Export of internal Abseil changes. by Abseil Team - 5e7d459eeca7bc53deab0ee9634601386b53d7c0 Export of internal Abseil changes. by Abseil Team - bed5bd6e185c7e0311f3a1f2dab4c96083dac636 Export of internal Abseil changes. by Abseil Team - fefc83638fb69395d259ed245699310610429064 Export of internal Abseil changes. by Abseil Team - d8cfe9f2a77fbee02c09642491e62a3f3677e0f6 Export of internal Abseil changes. by Abseil Team - ad5c960b2eb914881d1ceba0e996a0a8f3f6ca59 Export of internal Abseil changes. by Abseil Team - 86f0fe93ad9d6d033a319476736a3256369c1f75 Export of internal Abseil changes. by Abseil Team - f0f15c2778b0e4959244dd25e63f445a455870f5 Export of internal Abseil changes. by Abseil Team - 29ff6d4860070bf8fcbd39c8805d0c32d56628a3 Removed "warning treated as error" flag from MSVC (#153) by vocaviking - 083d04dd4a62ebbf037079b06e49b323c5e1192a Export of internal Abseil changes. by Abseil Team - bea85b52733022294eef108a2e42d77b616ddca2 Export of internal Abseil changes. by Abseil Team - 8f96be6ca60d967bd4b37f93d0a03bcff4145200 Export of internal Abseil changes. by Abseil Team - 92e07e5590752d6b8e67f7f2f86c6286561e8cea Merge pull request #152 from clnperez/fix-multi-defines-p... by Derek Mauro <761129+derekmauro@users.noreply.github.com> - 2125e6444a9de9e41f21ecdc674dd7d8759c149d Export of internal Abseil changes. by Abseil Team - 9acad869d21731f5bc50430a33fe61cc0ffcbb0b Merge pull request #150 from OlafvdSpek/patch-2 by Jonathan Cohen - c2e00d341913bf03b4597ade5b056042e23e8c58 Export of internal Abseil changes. by Abseil Team - 9e060686d1c325f34f9806b45fe77bafeed00aee Export of internal Abseil changes. by Abseil Team - 7aa411ceafc1272a28579cca739a97a2fb79055a Export of internal Abseil changes. by Abseil Team - 2c5af55ed34850d8b7dd46177c8ca53fdfda920e Export of internal Abseil changes. by Abseil Team - 44aa275286baf97fc13529aca547a88b180beb08 Merge pull request #143 from rongjiecomputer/kernel by Xiaoyi Zhang - 42f22a28401c952f1fc5942231c7fdac80811bf5 Export of internal Abseil changes. by Abseil Team - b973bc53ef366f0253b85eeed9a79b241884a843 Merge pull request #139 from siepkes/smartos-support by ahedberg - e0def7473e52336f58759e11db4cd9467e5e0356 Export of internal Abseil changes. by Abseil Team - f826f1d489b61b64df1d94afbe5981841a82e5fa Merge pull request #138 from edbaunton/remove-deprecated-... by ahedberg - 7b50a4a94b0c7df68b3a854c850b551aaef0a8b4 Export of internal Abseil changes. by Abseil Team - a5030ca5125b9d557ecfeea8acc8b1a8e49f6d27 Merge pull request #144 from rongjiecomputer/winsock2 by Xiaoyi Zhang - 02687955b7ca8fc02ada9b14bc247deeb108d341 Export of internal Abseil changes. by Abseil Team - 8f612ebb152fb7e05643a2bcf78cb89a8c0641ad Export of internal Abseil changes. by Abseil Team - 134496a31d8b324f762de3bee9a002658c984456 Export of internal Abseil changes. by Abseil Team - ba8d6cf07766263723e86736f20a51c1c9c67b19 Export of internal Abseil changes. by Abseil Team - be1e84b988fceabcea4fc9e93f899539f0c81901 Export of internal Abseil changes. by Abseil Team - 16ac2ec2e38cdf47f9330a312e319d57da659c10 Merge pull request #134 from rongjiecomputer/cmake by Alex Strelnikov - 7efd8dc0f1075356e9c7caa950afd1ecf854e8b9 Export of internal Abseil changes. by Abseil Team - 87a4c07856e7dc69958019d47b2f02ae47746ec0 Export of internal Abseil changes. by Abseil Team - 4491d606df34c44efda47b6d17b605262f17e182 Export of internal Abseil changes. by Abseil Team GitOrigin-RevId: 44b0fafc62d9b8f192e8180cbe9c4b806b339d57 Change-Id: I2c427b5b41b2d34101922048b00f3d9dafcb498d --- absl/container/internal/compressed_tuple.h | 177 ++ absl/container/internal/compressed_tuple_test.cc | 168 ++ absl/container/internal/container_memory.h | 407 ++++ absl/container/internal/container_memory_test.cc | 190 ++ absl/container/internal/hash_function_defaults.h | 145 ++ .../internal/hash_function_defaults_test.cc | 303 +++ absl/container/internal/hash_generator_testing.cc | 74 + absl/container/internal/hash_generator_testing.h | 152 ++ absl/container/internal/hash_policy_testing.h | 184 ++ .../container/internal/hash_policy_testing_test.cc | 45 + absl/container/internal/hash_policy_traits.h | 191 ++ absl/container/internal/hash_policy_traits_test.cc | 144 ++ absl/container/internal/hashtable_debug.h | 110 ++ absl/container/internal/hashtable_debug_hooks.h | 83 + absl/container/internal/layout.h | 740 ++++++++ absl/container/internal/layout_test.cc | 1557 ++++++++++++++++ absl/container/internal/node_hash_policy.h | 90 + absl/container/internal/node_hash_policy_test.cc | 69 + absl/container/internal/raw_hash_map.h | 187 ++ absl/container/internal/raw_hash_set.cc | 48 + absl/container/internal/raw_hash_set.h | 1950 ++++++++++++++++++++ .../internal/raw_hash_set_allocator_test.cc | 430 +++++ absl/container/internal/raw_hash_set_test.cc | 1830 ++++++++++++++++++ absl/container/internal/test_instance_tracker.cc | 5 +- absl/container/internal/test_instance_tracker.h | 56 +- .../internal/test_instance_tracker_test.cc | 22 + absl/container/internal/tracked.h | 80 + .../internal/unordered_map_constructor_test.h | 407 ++++ .../container/internal/unordered_map_lookup_test.h | 117 ++ .../internal/unordered_map_modifiers_test.h | 275 +++ absl/container/internal/unordered_map_test.cc | 40 + .../internal/unordered_set_constructor_test.h | 411 +++++ .../container/internal/unordered_set_lookup_test.h | 91 + .../internal/unordered_set_modifiers_test.h | 190 ++ absl/container/internal/unordered_set_test.cc | 39 + 35 files changed, 10998 insertions(+), 9 deletions(-) create mode 100644 absl/container/internal/compressed_tuple.h create mode 100644 absl/container/internal/compressed_tuple_test.cc create mode 100644 absl/container/internal/container_memory.h create mode 100644 absl/container/internal/container_memory_test.cc create mode 100644 absl/container/internal/hash_function_defaults.h create mode 100644 absl/container/internal/hash_function_defaults_test.cc create mode 100644 absl/container/internal/hash_generator_testing.cc create mode 100644 absl/container/internal/hash_generator_testing.h create mode 100644 absl/container/internal/hash_policy_testing.h create mode 100644 absl/container/internal/hash_policy_testing_test.cc create mode 100644 absl/container/internal/hash_policy_traits.h create mode 100644 absl/container/internal/hash_policy_traits_test.cc create mode 100644 absl/container/internal/hashtable_debug.h create mode 100644 absl/container/internal/hashtable_debug_hooks.h create mode 100644 absl/container/internal/layout.h create mode 100644 absl/container/internal/layout_test.cc create mode 100644 absl/container/internal/node_hash_policy.h create mode 100644 absl/container/internal/node_hash_policy_test.cc create mode 100644 absl/container/internal/raw_hash_map.h create mode 100644 absl/container/internal/raw_hash_set.cc create mode 100644 absl/container/internal/raw_hash_set.h create mode 100644 absl/container/internal/raw_hash_set_allocator_test.cc create mode 100644 absl/container/internal/raw_hash_set_test.cc create mode 100644 absl/container/internal/tracked.h create mode 100644 absl/container/internal/unordered_map_constructor_test.h create mode 100644 absl/container/internal/unordered_map_lookup_test.h create mode 100644 absl/container/internal/unordered_map_modifiers_test.h create mode 100644 absl/container/internal/unordered_map_test.cc create mode 100644 absl/container/internal/unordered_set_constructor_test.h create mode 100644 absl/container/internal/unordered_set_lookup_test.h create mode 100644 absl/container/internal/unordered_set_modifiers_test.h create mode 100644 absl/container/internal/unordered_set_test.cc (limited to 'absl/container/internal') diff --git a/absl/container/internal/compressed_tuple.h b/absl/container/internal/compressed_tuple.h new file mode 100644 index 00000000..29fe7c12 --- /dev/null +++ b/absl/container/internal/compressed_tuple.h @@ -0,0 +1,177 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Helper class to perform the Empty Base Optimization. +// Ts can contain classes and non-classes, empty or not. For the ones that +// are empty classes, we perform the optimization. If all types in Ts are empty +// classes, then CompressedTuple is itself an empty class. +// +// To access the members, use member get() function. +// +// Eg: +// absl::container_internal::CompressedTuple value(7, t1, t2, +// t3); +// assert(value.get<0>() == 7); +// T1& t1 = value.get<1>(); +// const T2& t2 = value.get<2>(); +// ... +// +// http://en.cppreference.com/w/cpp/language/ebo + +#ifndef ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_ +#define ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_ + +#include +#include +#include + +#include "absl/utility/utility.h" + +#ifdef _MSC_VER +// We need to mark these classes with this declspec to ensure that +// CompressedTuple happens. +#define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC __declspec(empty_bases) +#else // _MSC_VER +#define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC +#endif // _MSC_VER + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { + +template +class CompressedTuple; + +namespace internal_compressed_tuple { + +template +struct Elem; +template +struct Elem, I> + : std::tuple_element> {}; +template +using ElemT = typename Elem::type; + +// Use the __is_final intrinsic if available. Where it's not available, classes +// declared with the 'final' specifier cannot be used as CompressedTuple +// elements. +// TODO(sbenza): Replace this with std::is_final in C++14. +template +constexpr bool IsFinal() { +#if defined(__clang__) || defined(__GNUC__) + return __is_final(T); +#else + return false; +#endif +} + +template +constexpr bool ShouldUseBase() { + return std::is_class::value && std::is_empty::value && !IsFinal(); +} + +// The storage class provides two specializations: +// - For empty classes, it stores T as a base class. +// - For everything else, it stores T as a member. +template >()> +struct Storage { + using T = ElemT; + T value; + constexpr Storage() = default; + explicit constexpr Storage(T&& v) : value(absl::forward(v)) {} + constexpr const T& get() const { return value; } + T& get() { return value; } +}; + +template +struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage + : ElemT { + using T = internal_compressed_tuple::ElemT; + constexpr Storage() = default; + explicit constexpr Storage(T&& v) : T(absl::forward(v)) {} + constexpr const T& get() const { return *this; } + T& get() { return *this; } +}; + +template +struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl; + +template +struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC + CompressedTupleImpl, absl::index_sequence> + // We use the dummy identity function through std::integral_constant to + // convince MSVC of accepting and expanding I in that context. Without it + // you would get: + // error C3548: 'I': parameter pack cannot be used in this context + : Storage, + std::integral_constant::value>... { + constexpr CompressedTupleImpl() = default; + explicit constexpr CompressedTupleImpl(Ts&&... args) + : Storage, I>(absl::forward(args))... {} +}; + +} // namespace internal_compressed_tuple + +// Helper class to perform the Empty Base Class Optimization. +// Ts can contain classes and non-classes, empty or not. For the ones that +// are empty classes, we perform the CompressedTuple. If all types in Ts are +// empty classes, then CompressedTuple is itself an empty class. +// +// To access the members, use member .get() function. +// +// Eg: +// absl::container_internal::CompressedTuple value(7, t1, t2, +// t3); +// assert(value.get<0>() == 7); +// T1& t1 = value.get<1>(); +// const T2& t2 = value.get<2>(); +// ... +// +// http://en.cppreference.com/w/cpp/language/ebo +template +class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple + : private internal_compressed_tuple::CompressedTupleImpl< + CompressedTuple, absl::index_sequence_for> { + private: + template + using ElemT = internal_compressed_tuple::ElemT; + + public: + constexpr CompressedTuple() = default; + explicit constexpr CompressedTuple(Ts... base) + : CompressedTuple::CompressedTupleImpl(absl::forward(base)...) {} + + template + ElemT& get() { + return internal_compressed_tuple::Storage::get(); + } + + template + constexpr const ElemT& get() const { + return internal_compressed_tuple::Storage::get(); + } +}; + +// Explicit specialization for a zero-element tuple +// (needed to avoid ambiguous overloads for the default constructor). +template <> +class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple<> {}; + +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl + +#undef ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC + +#endif // ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_ diff --git a/absl/container/internal/compressed_tuple_test.cc b/absl/container/internal/compressed_tuple_test.cc new file mode 100644 index 00000000..2b5ed4a4 --- /dev/null +++ b/absl/container/internal/compressed_tuple_test.cc @@ -0,0 +1,168 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/compressed_tuple.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { +namespace { + +template +struct Empty {}; + +template +struct NotEmpty { + T value; +}; + +template +struct TwoValues { + T value1; + U value2; +}; + +TEST(CompressedTupleTest, Sizeof) { + EXPECT_EQ(sizeof(int), sizeof(CompressedTuple)); + EXPECT_EQ(sizeof(int), sizeof(CompressedTuple>)); + EXPECT_EQ(sizeof(int), sizeof(CompressedTuple, Empty<1>>)); + EXPECT_EQ(sizeof(int), + sizeof(CompressedTuple, Empty<1>, Empty<2>>)); + + EXPECT_EQ(sizeof(TwoValues), + sizeof(CompressedTuple>)); + EXPECT_EQ(sizeof(TwoValues), + sizeof(CompressedTuple, NotEmpty>)); + EXPECT_EQ(sizeof(TwoValues), + sizeof(CompressedTuple, NotEmpty, Empty<1>>)); +} + +TEST(CompressedTupleTest, Access) { + struct S { + std::string x; + }; + CompressedTuple, S> x(7, {}, S{"ABC"}); + EXPECT_EQ(sizeof(x), sizeof(TwoValues)); + EXPECT_EQ(7, x.get<0>()); + EXPECT_EQ("ABC", x.get<2>().x); +} + +TEST(CompressedTupleTest, NonClasses) { + CompressedTuple x(7, "ABC"); + EXPECT_EQ(7, x.get<0>()); + EXPECT_STREQ("ABC", x.get<1>()); +} + +TEST(CompressedTupleTest, MixClassAndNonClass) { + CompressedTuple, NotEmpty> x(7, "ABC", {}, + {1.25}); + struct Mock { + int v; + const char* p; + double d; + }; + EXPECT_EQ(sizeof(x), sizeof(Mock)); + EXPECT_EQ(7, x.get<0>()); + EXPECT_STREQ("ABC", x.get<1>()); + EXPECT_EQ(1.25, x.get<3>().value); +} + +TEST(CompressedTupleTest, Nested) { + CompressedTuple, + CompressedTuple>> + x(1, CompressedTuple(2), + CompressedTuple>(3, CompressedTuple(4))); + EXPECT_EQ(1, x.get<0>()); + EXPECT_EQ(2, x.get<1>().get<0>()); + EXPECT_EQ(3, x.get<2>().get<0>()); + EXPECT_EQ(4, x.get<2>().get<1>().get<0>()); + + CompressedTuple, Empty<0>, + CompressedTuple, CompressedTuple>>> + y; + std::set*> empties{&y.get<0>(), &y.get<1>(), &y.get<2>().get<0>(), + &y.get<2>().get<1>().get<0>()}; +#ifdef _MSC_VER + // MSVC has a bug where many instances of the same base class are layed out in + // the same address when using __declspec(empty_bases). + // This will be fixed in a future version of MSVC. + int expected = 1; +#else + int expected = 4; +#endif + EXPECT_EQ(expected, sizeof(y)); + EXPECT_EQ(expected, empties.size()); + EXPECT_EQ(sizeof(y), sizeof(Empty<0>) * empties.size()); + + EXPECT_EQ(4 * sizeof(char), + sizeof(CompressedTuple, + CompressedTuple>)); + EXPECT_TRUE( + (std::is_empty>, + CompressedTuple>>>::value)); +} + +TEST(CompressedTupleTest, Reference) { + int i = 7; + std::string s = "Very long std::string that goes in the heap"; + CompressedTuple x(i, i, s, s); + + // Sanity check. We should have not moved from `s` + EXPECT_EQ(s, "Very long std::string that goes in the heap"); + + EXPECT_EQ(x.get<0>(), x.get<1>()); + EXPECT_NE(&x.get<0>(), &x.get<1>()); + EXPECT_EQ(&x.get<1>(), &i); + + EXPECT_EQ(x.get<2>(), x.get<3>()); + EXPECT_NE(&x.get<2>(), &x.get<3>()); + EXPECT_EQ(&x.get<3>(), &s); +} + +TEST(CompressedTupleTest, NoElements) { + CompressedTuple<> x; + static_cast(x); // Silence -Wunused-variable. + EXPECT_TRUE(std::is_empty>::value); +} + +TEST(CompressedTupleTest, Constexpr) { + constexpr CompressedTuple> x( + 7, 1.25, CompressedTuple(5)); + constexpr int x0 = x.get<0>(); + constexpr double x1 = x.get<1>(); + constexpr int x2 = x.get<2>().get<0>(); + EXPECT_EQ(x0, 7); + EXPECT_EQ(x1, 1.25); + EXPECT_EQ(x2, 5); +} + +#if defined(__clang__) || defined(__GNUC__) +TEST(CompressedTupleTest, EmptyFinalClass) { + struct S final { + int f() const { return 5; } + }; + CompressedTuple x; + EXPECT_EQ(x.get<0>().f(), 5); +} +#endif + +} // namespace +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl diff --git a/absl/container/internal/container_memory.h b/absl/container/internal/container_memory.h new file mode 100644 index 00000000..ddccbe05 --- /dev/null +++ b/absl/container/internal/container_memory.h @@ -0,0 +1,407 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_ +#define ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_ + +#ifdef ADDRESS_SANITIZER +#include +#endif + +#ifdef MEMORY_SANITIZER +#include +#endif + +#include +#include +#include +#include +#include +#include + +#include "absl/memory/memory.h" +#include "absl/utility/utility.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { + +// Allocates at least n bytes aligned to the specified alignment. +// Alignment must be a power of 2. It must be positive. +// +// Note that many allocators don't honor alignment requirements above certain +// threshold (usually either alignof(std::max_align_t) or alignof(void*)). +// Allocate() doesn't apply alignment corrections. If the underlying allocator +// returns insufficiently alignment pointer, that's what you are going to get. +template +void* Allocate(Alloc* alloc, size_t n) { + static_assert(Alignment > 0, ""); + assert(n && "n must be positive"); + struct alignas(Alignment) M {}; + using A = typename absl::allocator_traits::template rebind_alloc; + using AT = typename absl::allocator_traits::template rebind_traits; + A mem_alloc(*alloc); + void* p = AT::allocate(mem_alloc, (n + sizeof(M) - 1) / sizeof(M)); + assert(reinterpret_cast(p) % Alignment == 0 && + "allocator does not respect alignment"); + return p; +} + +// The pointer must have been previously obtained by calling +// Allocate(alloc, n). +template +void Deallocate(Alloc* alloc, void* p, size_t n) { + static_assert(Alignment > 0, ""); + assert(n && "n must be positive"); + struct alignas(Alignment) M {}; + using A = typename absl::allocator_traits::template rebind_alloc; + using AT = typename absl::allocator_traits::template rebind_traits; + A mem_alloc(*alloc); + AT::deallocate(mem_alloc, static_cast(p), + (n + sizeof(M) - 1) / sizeof(M)); +} + +namespace memory_internal { + +// Constructs T into uninitialized storage pointed by `ptr` using the args +// specified in the tuple. +template +void ConstructFromTupleImpl(Alloc* alloc, T* ptr, Tuple&& t, + absl::index_sequence) { + absl::allocator_traits::construct( + *alloc, ptr, std::get(std::forward(t))...); +} + +template +struct WithConstructedImplF { + template + decltype(std::declval()(std::declval())) operator()( + Args&&... args) const { + return std::forward(f)(T(std::forward(args)...)); + } + F&& f; +}; + +template +decltype(std::declval()(std::declval())) WithConstructedImpl( + Tuple&& t, absl::index_sequence, F&& f) { + return WithConstructedImplF{std::forward(f)}( + std::get(std::forward(t))...); +} + +template +auto TupleRefImpl(T&& t, absl::index_sequence) + -> decltype(std::forward_as_tuple(std::get(std::forward(t))...)) { + return std::forward_as_tuple(std::get(std::forward(t))...); +} + +// Returns a tuple of references to the elements of the input tuple. T must be a +// tuple. +template +auto TupleRef(T&& t) -> decltype( + TupleRefImpl(std::forward(t), + absl::make_index_sequence< + std::tuple_size::type>::value>())) { + return TupleRefImpl( + std::forward(t), + absl::make_index_sequence< + std::tuple_size::type>::value>()); +} + +template +decltype(std::declval()(std::declval(), std::piecewise_construct, + std::declval>(), std::declval())) +DecomposePairImpl(F&& f, std::pair, V> p) { + const auto& key = std::get<0>(p.first); + return std::forward(f)(key, std::piecewise_construct, std::move(p.first), + std::move(p.second)); +} + +} // namespace memory_internal + +// Constructs T into uninitialized storage pointed by `ptr` using the args +// specified in the tuple. +template +void ConstructFromTuple(Alloc* alloc, T* ptr, Tuple&& t) { + memory_internal::ConstructFromTupleImpl( + alloc, ptr, std::forward(t), + absl::make_index_sequence< + std::tuple_size::type>::value>()); +} + +// Constructs T using the args specified in the tuple and calls F with the +// constructed value. +template +decltype(std::declval()(std::declval())) WithConstructed( + Tuple&& t, F&& f) { + return memory_internal::WithConstructedImpl( + std::forward(t), + absl::make_index_sequence< + std::tuple_size::type>::value>(), + std::forward(f)); +} + +// Given arguments of an std::pair's consructor, PairArgs() returns a pair of +// tuples with references to the passed arguments. The tuples contain +// constructor arguments for the first and the second elements of the pair. +// +// The following two snippets are equivalent. +// +// 1. std::pair p(args...); +// +// 2. auto a = PairArgs(args...); +// std::pair p(std::piecewise_construct, +// std::move(p.first), std::move(p.second)); +inline std::pair, std::tuple<>> PairArgs() { return {}; } +template +std::pair, std::tuple> PairArgs(F&& f, S&& s) { + return {std::piecewise_construct, std::forward_as_tuple(std::forward(f)), + std::forward_as_tuple(std::forward(s))}; +} +template +std::pair, std::tuple> PairArgs( + const std::pair& p) { + return PairArgs(p.first, p.second); +} +template +std::pair, std::tuple> PairArgs(std::pair&& p) { + return PairArgs(std::forward(p.first), std::forward(p.second)); +} +template +auto PairArgs(std::piecewise_construct_t, F&& f, S&& s) + -> decltype(std::make_pair(memory_internal::TupleRef(std::forward(f)), + memory_internal::TupleRef(std::forward(s)))) { + return std::make_pair(memory_internal::TupleRef(std::forward(f)), + memory_internal::TupleRef(std::forward(s))); +} + +// A helper function for implementing apply() in map policies. +template +auto DecomposePair(F&& f, Args&&... args) + -> decltype(memory_internal::DecomposePairImpl( + std::forward(f), PairArgs(std::forward(args)...))) { + return memory_internal::DecomposePairImpl( + std::forward(f), PairArgs(std::forward(args)...)); +} + +// A helper function for implementing apply() in set policies. +template +decltype(std::declval()(std::declval(), std::declval())) +DecomposeValue(F&& f, Arg&& arg) { + const auto& key = arg; + return std::forward(f)(key, std::forward(arg)); +} + +// Helper functions for asan and msan. +inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) { +#ifdef ADDRESS_SANITIZER + ASAN_POISON_MEMORY_REGION(m, s); +#endif +#ifdef MEMORY_SANITIZER + __msan_poison(m, s); +#endif + (void)m; + (void)s; +} + +inline void SanitizerUnpoisonMemoryRegion(const void* m, size_t s) { +#ifdef ADDRESS_SANITIZER + ASAN_UNPOISON_MEMORY_REGION(m, s); +#endif +#ifdef MEMORY_SANITIZER + __msan_unpoison(m, s); +#endif + (void)m; + (void)s; +} + +template +inline void SanitizerPoisonObject(const T* object) { + SanitizerPoisonMemoryRegion(object, sizeof(T)); +} + +template +inline void SanitizerUnpoisonObject(const T* object) { + SanitizerUnpoisonMemoryRegion(object, sizeof(T)); +} + +namespace memory_internal { + +// If Pair is a standard-layout type, OffsetOf::kFirst and +// OffsetOf::kSecond are equivalent to offsetof(Pair, first) and +// offsetof(Pair, second) respectively. Otherwise they are -1. +// +// The purpose of OffsetOf is to avoid calling offsetof() on non-standard-layout +// type, which is non-portable. +template +struct OffsetOf { + static constexpr size_t kFirst = -1; + static constexpr size_t kSecond = -1; +}; + +template +struct OffsetOf::type> { + static constexpr size_t kFirst = offsetof(Pair, first); + static constexpr size_t kSecond = offsetof(Pair, second); +}; + +template +struct IsLayoutCompatible { + private: + struct Pair { + K first; + V second; + }; + + // Is P layout-compatible with Pair? + template + static constexpr bool LayoutCompatible() { + return std::is_standard_layout

() && sizeof(P) == sizeof(Pair) && + alignof(P) == alignof(Pair) && + memory_internal::OffsetOf

::kFirst == + memory_internal::OffsetOf::kFirst && + memory_internal::OffsetOf

::kSecond == + memory_internal::OffsetOf::kSecond; + } + + public: + // Whether pair and pair are layout-compatible. If they are, + // then it is safe to store them in a union and read from either. + static constexpr bool value = std::is_standard_layout() && + std::is_standard_layout() && + memory_internal::OffsetOf::kFirst == 0 && + LayoutCompatible>() && + LayoutCompatible>(); +}; + +} // namespace memory_internal + +// If kMutableKeys is false, only the value member is accessed. +// +// If kMutableKeys is true, key is accessed through all slots while value and +// mutable_value are accessed only via INITIALIZED slots. Slots are created and +// destroyed via mutable_value so that the key can be moved later. +template +union slot_type { + private: + static void emplace(slot_type* slot) { + // The construction of union doesn't do anything at runtime but it allows us + // to access its members without violating aliasing rules. + new (slot) slot_type; + } + // If pair and pair are layout-compatible, we can accept one + // or the other via slot_type. We are also free to access the key via + // slot_type::key in this case. + using kMutableKeys = + std::integral_constant::value>; + + public: + slot_type() {} + ~slot_type() = delete; + using value_type = std::pair; + using mutable_value_type = std::pair; + + value_type value; + mutable_value_type mutable_value; + K key; + + template + static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { + emplace(slot); + if (kMutableKeys::value) { + absl::allocator_traits::construct(*alloc, &slot->mutable_value, + std::forward(args)...); + } else { + absl::allocator_traits::construct(*alloc, &slot->value, + std::forward(args)...); + } + } + + // Construct this slot by moving from another slot. + template + static void construct(Allocator* alloc, slot_type* slot, slot_type* other) { + emplace(slot); + if (kMutableKeys::value) { + absl::allocator_traits::construct( + *alloc, &slot->mutable_value, std::move(other->mutable_value)); + } else { + absl::allocator_traits::construct(*alloc, &slot->value, + std::move(other->value)); + } + } + + template + static void destroy(Allocator* alloc, slot_type* slot) { + if (kMutableKeys::value) { + absl::allocator_traits::destroy(*alloc, &slot->mutable_value); + } else { + absl::allocator_traits::destroy(*alloc, &slot->value); + } + } + + template + static void transfer(Allocator* alloc, slot_type* new_slot, + slot_type* old_slot) { + emplace(new_slot); + if (kMutableKeys::value) { + absl::allocator_traits::construct( + *alloc, &new_slot->mutable_value, std::move(old_slot->mutable_value)); + } else { + absl::allocator_traits::construct(*alloc, &new_slot->value, + std::move(old_slot->value)); + } + destroy(alloc, old_slot); + } + + template + static void swap(Allocator* alloc, slot_type* a, slot_type* b) { + if (kMutableKeys::value) { + using std::swap; + swap(a->mutable_value, b->mutable_value); + } else { + value_type tmp = std::move(a->value); + absl::allocator_traits::destroy(*alloc, &a->value); + absl::allocator_traits::construct(*alloc, &a->value, + std::move(b->value)); + absl::allocator_traits::destroy(*alloc, &b->value); + absl::allocator_traits::construct(*alloc, &b->value, + std::move(tmp)); + } + } + + template + static void move(Allocator* alloc, slot_type* src, slot_type* dest) { + if (kMutableKeys::value) { + dest->mutable_value = std::move(src->mutable_value); + } else { + absl::allocator_traits::destroy(*alloc, &dest->value); + absl::allocator_traits::construct(*alloc, &dest->value, + std::move(src->value)); + } + } + + template + static void move(Allocator* alloc, slot_type* first, slot_type* last, + slot_type* result) { + for (slot_type *src = first, *dest = result; src != last; ++src, ++dest) + move(alloc, src, dest); + } +}; + +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_ diff --git a/absl/container/internal/container_memory_test.cc b/absl/container/internal/container_memory_test.cc new file mode 100644 index 00000000..da87ca20 --- /dev/null +++ b/absl/container/internal/container_memory_test.cc @@ -0,0 +1,190 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/container_memory.h" + +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/strings/string_view.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { +namespace { + +using ::testing::Pair; + +TEST(Memory, AlignmentLargerThanBase) { + std::allocator alloc; + void* mem = Allocate<2>(&alloc, 3); + EXPECT_EQ(0, reinterpret_cast(mem) % 2); + memcpy(mem, "abc", 3); + Deallocate<2>(&alloc, mem, 3); +} + +TEST(Memory, AlignmentSmallerThanBase) { + std::allocator alloc; + void* mem = Allocate<2>(&alloc, 3); + EXPECT_EQ(0, reinterpret_cast(mem) % 2); + memcpy(mem, "abc", 3); + Deallocate<2>(&alloc, mem, 3); +} + +class Fixture : public ::testing::Test { + using Alloc = std::allocator; + + public: + Fixture() { ptr_ = std::allocator_traits::allocate(*alloc(), 1); } + ~Fixture() override { + std::allocator_traits::destroy(*alloc(), ptr_); + std::allocator_traits::deallocate(*alloc(), ptr_, 1); + } + std::string* ptr() { return ptr_; } + Alloc* alloc() { return &alloc_; } + + private: + Alloc alloc_; + std::string* ptr_; +}; + +TEST_F(Fixture, ConstructNoArgs) { + ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple()); + EXPECT_EQ(*ptr(), ""); +} + +TEST_F(Fixture, ConstructOneArg) { + ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple("abcde")); + EXPECT_EQ(*ptr(), "abcde"); +} + +TEST_F(Fixture, ConstructTwoArg) { + ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple(5, 'a')); + EXPECT_EQ(*ptr(), "aaaaa"); +} + +TEST(PairArgs, NoArgs) { + EXPECT_THAT(PairArgs(), + Pair(std::forward_as_tuple(), std::forward_as_tuple())); +} + +TEST(PairArgs, TwoArgs) { + EXPECT_EQ( + std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')), + PairArgs(1, 'A')); +} + +TEST(PairArgs, Pair) { + EXPECT_EQ( + std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')), + PairArgs(std::make_pair(1, 'A'))); +} + +TEST(PairArgs, Piecewise) { + EXPECT_EQ( + std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')), + PairArgs(std::piecewise_construct, std::forward_as_tuple(1), + std::forward_as_tuple('A'))); +} + +TEST(WithConstructed, Simple) { + EXPECT_EQ(1, WithConstructed( + std::make_tuple(std::string("a")), + [](absl::string_view str) { return str.size(); })); +} + +template +decltype(DecomposeValue(std::declval(), std::declval())) +DecomposeValueImpl(int, F&& f, Arg&& arg) { + return DecomposeValue(std::forward(f), std::forward(arg)); +} + +template +const char* DecomposeValueImpl(char, F&& f, Arg&& arg) { + return "not decomposable"; +} + +template +decltype(DecomposeValueImpl(0, std::declval(), std::declval())) +TryDecomposeValue(F&& f, Arg&& arg) { + return DecomposeValueImpl(0, std::forward(f), std::forward(arg)); +} + +TEST(DecomposeValue, Decomposable) { + auto f = [](const int& x, int&& y) { + EXPECT_EQ(&x, &y); + EXPECT_EQ(42, x); + return 'A'; + }; + EXPECT_EQ('A', TryDecomposeValue(f, 42)); +} + +TEST(DecomposeValue, NotDecomposable) { + auto f = [](void*) { + ADD_FAILURE() << "Must not be called"; + return 'A'; + }; + EXPECT_STREQ("not decomposable", TryDecomposeValue(f, 42)); +} + +template +decltype(DecomposePair(std::declval(), std::declval()...)) +DecomposePairImpl(int, F&& f, Args&&... args) { + return DecomposePair(std::forward(f), std::forward(args)...); +} + +template +const char* DecomposePairImpl(char, F&& f, Args&&... args) { + return "not decomposable"; +} + +template +decltype(DecomposePairImpl(0, std::declval(), std::declval()...)) +TryDecomposePair(F&& f, Args&&... args) { + return DecomposePairImpl(0, std::forward(f), std::forward(args)...); +} + +TEST(DecomposePair, Decomposable) { + auto f = [](const int& x, std::piecewise_construct_t, std::tuple k, + std::tuple&& v) { + EXPECT_EQ(&x, &std::get<0>(k)); + EXPECT_EQ(42, x); + EXPECT_EQ(0.5, std::get<0>(v)); + return 'A'; + }; + EXPECT_EQ('A', TryDecomposePair(f, 42, 0.5)); + EXPECT_EQ('A', TryDecomposePair(f, std::make_pair(42, 0.5))); + EXPECT_EQ('A', TryDecomposePair(f, std::piecewise_construct, + std::make_tuple(42), std::make_tuple(0.5))); +} + +TEST(DecomposePair, NotDecomposable) { + auto f = [](...) { + ADD_FAILURE() << "Must not be called"; + return 'A'; + }; + EXPECT_STREQ("not decomposable", + TryDecomposePair(f)); + EXPECT_STREQ("not decomposable", + TryDecomposePair(f, std::piecewise_construct, std::make_tuple(), + std::make_tuple(0.5))); +} + +} // namespace +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl diff --git a/absl/container/internal/hash_function_defaults.h b/absl/container/internal/hash_function_defaults.h new file mode 100644 index 00000000..72c75fa0 --- /dev/null +++ b/absl/container/internal/hash_function_defaults.h @@ -0,0 +1,145 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Define the default Hash and Eq functions for SwissTable containers. +// +// std::hash and std::equal_to are not appropriate hash and equal +// functions for SwissTable containers. There are two reasons for this. +// +// SwissTable containers are power of 2 sized containers: +// +// This means they use the lower bits of the hash value to find the slot for +// each entry. The typical hash function for integral types is the identity. +// This is a very weak hash function for SwissTable and any power of 2 sized +// hashtable implementation which will lead to excessive collisions. For +// SwissTable we use murmur3 style mixing to reduce collisions to a minimum. +// +// SwissTable containers support heterogeneous lookup: +// +// In order to make heterogeneous lookup work, hash and equal functions must be +// polymorphic. At the same time they have to satisfy the same requirements the +// C++ standard imposes on hash functions and equality operators. That is: +// +// if hash_default_eq(a, b) returns true for any a and b of type T, then +// hash_default_hash(a) must equal hash_default_hash(b) +// +// For SwissTable containers this requirement is relaxed to allow a and b of +// any and possibly different types. Note that like the standard the hash and +// equal functions are still bound to T. This is important because some type U +// can be hashed by/tested for equality differently depending on T. A notable +// example is `const char*`. `const char*` is treated as a c-style string when +// the hash function is hash but as a pointer when the hash function is +// hash. +// +#ifndef ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_ +#define ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_ + +#include +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/hash/hash.h" +#include "absl/strings/string_view.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { + +// The hash of an object of type T is computed by using absl::Hash. +template +struct HashEq { + using Hash = absl::Hash; + using Eq = std::equal_to; +}; + +struct StringHash { + using is_transparent = void; + + size_t operator()(absl::string_view v) const { + return absl::Hash{}(v); + } +}; + +// Supports heterogeneous lookup for string-like elements. +struct StringHashEq { + using Hash = StringHash; + struct Eq { + using is_transparent = void; + bool operator()(absl::string_view lhs, absl::string_view rhs) const { + return lhs == rhs; + } + }; +}; +template <> +struct HashEq : StringHashEq {}; +template <> +struct HashEq : StringHashEq {}; + +// Supports heterogeneous lookup for pointers and smart pointers. +template +struct HashEq { + struct Hash { + using is_transparent = void; + template + size_t operator()(const U& ptr) const { + return absl::Hash{}(HashEq::ToPtr(ptr)); + } + }; + struct Eq { + using is_transparent = void; + template + bool operator()(const A& a, const B& b) const { + return HashEq::ToPtr(a) == HashEq::ToPtr(b); + } + }; + + private: + static const T* ToPtr(const T* ptr) { return ptr; } + template + static const T* ToPtr(const std::unique_ptr& ptr) { + return ptr.get(); + } + template + static const T* ToPtr(const std::shared_ptr& ptr) { + return ptr.get(); + } +}; + +template +struct HashEq> : HashEq {}; +template +struct HashEq> : HashEq {}; + +// This header's visibility is restricted. If you need to access the default +// hasher please use the container's ::hasher alias instead. +// +// Example: typename Hash = typename absl::flat_hash_map::hasher +template +using hash_default_hash = typename container_internal::HashEq::Hash; + +// This header's visibility is restricted. If you need to access the default +// key equal please use the container's ::key_equal alias instead. +// +// Example: typename Eq = typename absl::flat_hash_map::key_equal +template +using hash_default_eq = typename container_internal::HashEq::Eq; + +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_ diff --git a/absl/container/internal/hash_function_defaults_test.cc b/absl/container/internal/hash_function_defaults_test.cc new file mode 100644 index 00000000..4610843a --- /dev/null +++ b/absl/container/internal/hash_function_defaults_test.cc @@ -0,0 +1,303 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/hash_function_defaults.h" + +#include +#include +#include + +#include "gtest/gtest.h" +#include "absl/strings/string_view.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { +namespace { + +using ::testing::Types; + +TEST(Eq, Int32) { + hash_default_eq eq; + EXPECT_TRUE(eq(1, 1u)); + EXPECT_TRUE(eq(1, char{1})); + EXPECT_TRUE(eq(1, true)); + EXPECT_TRUE(eq(1, double{1.1})); + EXPECT_FALSE(eq(1, char{2})); + EXPECT_FALSE(eq(1, 2u)); + EXPECT_FALSE(eq(1, false)); + EXPECT_FALSE(eq(1, 2.)); +} + +TEST(Hash, Int32) { + hash_default_hash hash; + auto h = hash(1); + EXPECT_EQ(h, hash(1u)); + EXPECT_EQ(h, hash(char{1})); + EXPECT_EQ(h, hash(true)); + EXPECT_EQ(h, hash(double{1.1})); + EXPECT_NE(h, hash(2u)); + EXPECT_NE(h, hash(char{2})); + EXPECT_NE(h, hash(false)); + EXPECT_NE(h, hash(2.)); +} + +enum class MyEnum { A, B, C, D }; + +TEST(Eq, Enum) { + hash_default_eq eq; + EXPECT_TRUE(eq(MyEnum::A, MyEnum::A)); + EXPECT_FALSE(eq(MyEnum::A, MyEnum::B)); +} + +TEST(Hash, Enum) { + hash_default_hash hash; + + for (MyEnum e : {MyEnum::A, MyEnum::B, MyEnum::C}) { + auto h = hash(e); + EXPECT_EQ(h, hash_default_hash{}(static_cast(e))); + EXPECT_NE(h, hash(MyEnum::D)); + } +} + +using StringTypes = ::testing::Types; + +template +struct EqString : ::testing::Test { + hash_default_eq key_eq; +}; + +TYPED_TEST_CASE(EqString, StringTypes); + +template +struct HashString : ::testing::Test { + hash_default_hash hasher; +}; + +TYPED_TEST_CASE(HashString, StringTypes); + +TYPED_TEST(EqString, Works) { + auto eq = this->key_eq; + EXPECT_TRUE(eq("a", "a")); + EXPECT_TRUE(eq("a", absl::string_view("a"))); + EXPECT_TRUE(eq("a", std::string("a"))); + EXPECT_FALSE(eq("a", "b")); + EXPECT_FALSE(eq("a", absl::string_view("b"))); + EXPECT_FALSE(eq("a", std::string("b"))); +} + +TYPED_TEST(HashString, Works) { + auto hash = this->hasher; + auto h = hash("a"); + EXPECT_EQ(h, hash(absl::string_view("a"))); + EXPECT_EQ(h, hash(std::string("a"))); + EXPECT_NE(h, hash(absl::string_view("b"))); + EXPECT_NE(h, hash(std::string("b"))); +} + +struct NoDeleter { + template + void operator()(const T* ptr) const {} +}; + +using PointerTypes = + ::testing::Types, + std::unique_ptr, + std::unique_ptr, std::unique_ptr, + std::shared_ptr, std::shared_ptr>; + +template +struct EqPointer : ::testing::Test { + hash_default_eq key_eq; +}; + +TYPED_TEST_CASE(EqPointer, PointerTypes); + +template +struct HashPointer : ::testing::Test { + hash_default_hash hasher; +}; + +TYPED_TEST_CASE(HashPointer, PointerTypes); + +TYPED_TEST(EqPointer, Works) { + int dummy; + auto eq = this->key_eq; + auto sptr = std::make_shared(); + std::shared_ptr csptr = sptr; + int* ptr = sptr.get(); + const int* cptr = ptr; + std::unique_ptr uptr(ptr); + std::unique_ptr cuptr(ptr); + + EXPECT_TRUE(eq(ptr, cptr)); + EXPECT_TRUE(eq(ptr, sptr)); + EXPECT_TRUE(eq(ptr, uptr)); + EXPECT_TRUE(eq(ptr, csptr)); + EXPECT_TRUE(eq(ptr, cuptr)); + EXPECT_FALSE(eq(&dummy, cptr)); + EXPECT_FALSE(eq(&dummy, sptr)); + EXPECT_FALSE(eq(&dummy, uptr)); + EXPECT_FALSE(eq(&dummy, csptr)); + EXPECT_FALSE(eq(&dummy, cuptr)); +} + +TEST(Hash, DerivedAndBase) { + struct Base {}; + struct Derived : Base {}; + + hash_default_hash hasher; + + Base base; + Derived derived; + EXPECT_NE(hasher(&base), hasher(&derived)); + EXPECT_EQ(hasher(static_cast(&derived)), hasher(&derived)); + + auto dp = std::make_shared(); + EXPECT_EQ(hasher(static_cast(dp.get())), hasher(dp)); +} + +TEST(Hash, FunctionPointer) { + using Func = int (*)(); + hash_default_hash hasher; + hash_default_eq eq; + + Func p1 = [] { return 1; }, p2 = [] { return 2; }; + EXPECT_EQ(hasher(p1), hasher(p1)); + EXPECT_TRUE(eq(p1, p1)); + + EXPECT_NE(hasher(p1), hasher(p2)); + EXPECT_FALSE(eq(p1, p2)); +} + +TYPED_TEST(HashPointer, Works) { + int dummy; + auto hash = this->hasher; + auto sptr = std::make_shared(); + std::shared_ptr csptr = sptr; + int* ptr = sptr.get(); + const int* cptr = ptr; + std::unique_ptr uptr(ptr); + std::unique_ptr cuptr(ptr); + + EXPECT_EQ(hash(ptr), hash(cptr)); + EXPECT_EQ(hash(ptr), hash(sptr)); + EXPECT_EQ(hash(ptr), hash(uptr)); + EXPECT_EQ(hash(ptr), hash(csptr)); + EXPECT_EQ(hash(ptr), hash(cuptr)); + EXPECT_NE(hash(&dummy), hash(cptr)); + EXPECT_NE(hash(&dummy), hash(sptr)); + EXPECT_NE(hash(&dummy), hash(uptr)); + EXPECT_NE(hash(&dummy), hash(csptr)); + EXPECT_NE(hash(&dummy), hash(cuptr)); +} + +// Cartesian product of (string, std::string, absl::string_view) +// with (string, std::string, absl::string_view, const char*). +using StringTypesCartesianProduct = Types< + // clang-format off + + std::pair, + std::pair, + std::pair, + + std::pair, + std::pair, + std::pair>; +// clang-format on + +constexpr char kFirstString[] = "abc123"; +constexpr char kSecondString[] = "ijk456"; + +template +struct StringLikeTest : public ::testing::Test { + typename T::first_type a1{kFirstString}; + typename T::second_type b1{kFirstString}; + typename T::first_type a2{kSecondString}; + typename T::second_type b2{kSecondString}; + hash_default_eq eq; + hash_default_hash hash; +}; + +TYPED_TEST_CASE_P(StringLikeTest); + +TYPED_TEST_P(StringLikeTest, Eq) { + EXPECT_TRUE(this->eq(this->a1, this->b1)); + EXPECT_TRUE(this->eq(this->b1, this->a1)); +} + +TYPED_TEST_P(StringLikeTest, NotEq) { + EXPECT_FALSE(this->eq(this->a1, this->b2)); + EXPECT_FALSE(this->eq(this->b2, this->a1)); +} + +TYPED_TEST_P(StringLikeTest, HashEq) { + EXPECT_EQ(this->hash(this->a1), this->hash(this->b1)); + EXPECT_EQ(this->hash(this->a2), this->hash(this->b2)); + // It would be a poor hash function which collides on these strings. + EXPECT_NE(this->hash(this->a1), this->hash(this->b2)); +} + +TYPED_TEST_CASE(StringLikeTest, StringTypesCartesianProduct); + +} // namespace +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl + +enum Hash : size_t { + kStd = 0x2, // std::hash +#ifdef _MSC_VER + kExtension = kStd, // In MSVC, std::hash == ::hash +#else // _MSC_VER + kExtension = 0x4, // ::hash (GCC extension) +#endif // _MSC_VER +}; + +// H is a bitmask of Hash enumerations. +// Hashable is hashable via all means specified in H. +template +struct Hashable { + static constexpr bool HashableBy(Hash h) { return h & H; } +}; + +namespace std { +template +struct hash> { + template , + class = typename std::enable_if::type> + size_t operator()(E) const { + return kStd; + } +}; +} // namespace std + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { +namespace { + +template +size_t Hash(const T& v) { + return hash_default_hash()(v); +} + +TEST(Delegate, HashDispatch) { + EXPECT_EQ(Hash(kStd), Hash(Hashable())); +} + +} // namespace +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl diff --git a/absl/container/internal/hash_generator_testing.cc b/absl/container/internal/hash_generator_testing.cc new file mode 100644 index 00000000..aef41d72 --- /dev/null +++ b/absl/container/internal/hash_generator_testing.cc @@ -0,0 +1,74 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/hash_generator_testing.h" + +#include + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { +namespace hash_internal { +namespace { + +class RandomDeviceSeedSeq { + public: + using result_type = typename std::random_device::result_type; + + template + void generate(Iterator start, Iterator end) { + while (start != end) { + *start = gen_(); + ++start; + } + } + + private: + std::random_device gen_; +}; + +} // namespace + +std::mt19937_64* GetSharedRng() { + RandomDeviceSeedSeq seed_seq; + static auto* rng = new std::mt19937_64(seed_seq); + return rng; +} + +std::string Generator::operator()() const { + // NOLINTNEXTLINE(runtime/int) + std::uniform_int_distribution chars(0x20, 0x7E); + std::string res; + res.resize(32); + std::generate(res.begin(), res.end(), + [&]() { return chars(*GetSharedRng()); }); + return res; +} + +absl::string_view Generator::operator()() const { + static auto* arena = new std::deque(); + // NOLINTNEXTLINE(runtime/int) + std::uniform_int_distribution chars(0x20, 0x7E); + arena->emplace_back(); + auto& res = arena->back(); + res.resize(32); + std::generate(res.begin(), res.end(), + [&]() { return chars(*GetSharedRng()); }); + return res; +} + +} // namespace hash_internal +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl diff --git a/absl/container/internal/hash_generator_testing.h b/absl/container/internal/hash_generator_testing.h new file mode 100644 index 00000000..65e88964 --- /dev/null +++ b/absl/container/internal/hash_generator_testing.h @@ -0,0 +1,152 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Generates random values for testing. Specialized only for the few types we +// care about. + +#ifndef ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_ +#define ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/container/internal/hash_policy_testing.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/string_view.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { +namespace hash_internal { +namespace generator_internal { + +template +struct IsMap : std::false_type {}; + +template +struct IsMap> : std::true_type {}; + +} // namespace generator_internal + +std::mt19937_64* GetSharedRng(); + +enum Enum { + kEnumEmpty, + kEnumDeleted, +}; + +enum class EnumClass : uint64_t { + kEmpty, + kDeleted, +}; + +inline std::ostream& operator<<(std::ostream& o, const EnumClass& ec) { + return o << static_cast(ec); +} + +template +struct Generator; + +template +struct Generator::value>::type> { + T operator()() const { + std::uniform_int_distribution dist; + return dist(*GetSharedRng()); + } +}; + +template <> +struct Generator { + Enum operator()() const { + std::uniform_int_distribution::type> + dist; + while (true) { + auto variate = dist(*GetSharedRng()); + if (variate != kEnumEmpty && variate != kEnumDeleted) + return static_cast(variate); + } + } +}; + +template <> +struct Generator { + EnumClass operator()() const { + std::uniform_int_distribution< + typename std::underlying_type::type> + dist; + while (true) { + EnumClass variate = static_cast(dist(*GetSharedRng())); + if (variate != EnumClass::kEmpty && variate != EnumClass::kDeleted) + return static_cast(variate); + } + } +}; + +template <> +struct Generator { + std::string operator()() const; +}; + +template <> +struct Generator { + absl::string_view operator()() const; +}; + +template <> +struct Generator { + NonStandardLayout operator()() const { + return NonStandardLayout(Generator()()); + } +}; + +template +struct Generator> { + std::pair operator()() const { + return std::pair(Generator::type>()(), + Generator::type>()()); + } +}; + +template +struct Generator> { + std::tuple operator()() const { + return std::tuple(Generator::type>()()...); + } +}; + +template +struct Generator().key()), + decltype(std::declval().value())>> + : Generator().key())>::type, + typename std::decay().value())>::type>> {}; + +template +using GeneratedType = decltype( + std::declval::value, + typename Container::value_type, + typename Container::key_type>::type>&>()()); + +} // namespace hash_internal +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_ diff --git a/absl/container/internal/hash_policy_testing.h b/absl/container/internal/hash_policy_testing.h new file mode 100644 index 00000000..9c310ad4 --- /dev/null +++ b/absl/container/internal/hash_policy_testing.h @@ -0,0 +1,184 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Utilities to help tests verify that hash tables properly handle stateful +// allocators and hash functions. + +#ifndef ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_ +#define ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/hash/hash.h" +#include "absl/strings/string_view.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { +namespace hash_testing_internal { + +template +struct WithId { + WithId() : id_(next_id()) {} + WithId(const WithId& that) : id_(that.id_) {} + WithId(WithId&& that) : id_(that.id_) { that.id_ = 0; } + WithId& operator=(const WithId& that) { + id_ = that.id_; + return *this; + } + WithId& operator=(WithId&& that) { + id_ = that.id_; + that.id_ = 0; + return *this; + } + + size_t id() const { return id_; } + + friend bool operator==(const WithId& a, const WithId& b) { + return a.id_ == b.id_; + } + friend bool operator!=(const WithId& a, const WithId& b) { return !(a == b); } + + protected: + explicit WithId(size_t id) : id_(id) {} + + private: + size_t id_; + + template + static size_t next_id() { + // 0 is reserved for moved from state. + static size_t gId = 1; + return gId++; + } +}; + +} // namespace hash_testing_internal + +struct NonStandardLayout { + NonStandardLayout() {} + explicit NonStandardLayout(std::string s) : value(std::move(s)) {} + virtual ~NonStandardLayout() {} + + friend bool operator==(const NonStandardLayout& a, + const NonStandardLayout& b) { + return a.value == b.value; + } + friend bool operator!=(const NonStandardLayout& a, + const NonStandardLayout& b) { + return a.value != b.value; + } + + template + friend H AbslHashValue(H h, const NonStandardLayout& v) { + return H::combine(std::move(h), v.value); + } + + std::string value; +}; + +struct StatefulTestingHash + : absl::container_internal::hash_testing_internal::WithId< + StatefulTestingHash> { + template + size_t operator()(const T& t) const { + return absl::Hash{}(t); + } +}; + +struct StatefulTestingEqual + : absl::container_internal::hash_testing_internal::WithId< + StatefulTestingEqual> { + template + bool operator()(const T& t, const U& u) const { + return t == u; + } +}; + +// It is expected that Alloc() == Alloc() for all allocators so we cannot use +// WithId base. We need to explicitly assign ids. +template +struct Alloc : std::allocator { + using propagate_on_container_swap = std::true_type; + + // Using old paradigm for this to ensure compatibility. + explicit Alloc(size_t id = 0) : id_(id) {} + + Alloc(const Alloc&) = default; + Alloc& operator=(const Alloc&) = default; + + template + Alloc(const Alloc& that) : std::allocator(that), id_(that.id()) {} + + template + struct rebind { + using other = Alloc; + }; + + size_t id() const { return id_; } + + friend bool operator==(const Alloc& a, const Alloc& b) { + return a.id_ == b.id_; + } + friend bool operator!=(const Alloc& a, const Alloc& b) { return !(a == b); } + + private: + size_t id_ = (std::numeric_limits::max)(); +}; + +template +auto items(const Map& m) -> std::vector< + std::pair> { + using std::get; + std::vector> res; + res.reserve(m.size()); + for (const auto& v : m) res.emplace_back(get<0>(v), get<1>(v)); + return res; +} + +template +auto keys(const Set& s) + -> std::vector::type> { + std::vector::type> res; + res.reserve(s.size()); + for (const auto& v : s) res.emplace_back(v); + return res; +} + +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl + +// ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS is false for glibcxx versions +// where the unordered containers are missing certain constructors that +// take allocator arguments. This test is defined ad-hoc for the platforms +// we care about (notably Crosstool 17) because libstdcxx's useless +// versioning scheme precludes a more principled solution. +// From GCC-4.9 Changelog: (src: https://gcc.gnu.org/gcc-4.9/changes.html) +// "the unordered associative containers in and +// meet the allocator-aware container requirements;" +#if (defined(__GLIBCXX__) && __GLIBCXX__ <= 20140425 ) || \ +( __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 9 )) +#define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 0 +#else +#define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 1 +#endif + +#endif // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_ diff --git a/absl/container/internal/hash_policy_testing_test.cc b/absl/container/internal/hash_policy_testing_test.cc new file mode 100644 index 00000000..00c436b3 --- /dev/null +++ b/absl/container/internal/hash_policy_testing_test.cc @@ -0,0 +1,45 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/hash_policy_testing.h" + +#include "gtest/gtest.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { +namespace { + +TEST(_, Hash) { + StatefulTestingHash h1; + EXPECT_EQ(1, h1.id()); + StatefulTestingHash h2; + EXPECT_EQ(2, h2.id()); + StatefulTestingHash h1c(h1); + EXPECT_EQ(1, h1c.id()); + StatefulTestingHash h2m(std::move(h2)); + EXPECT_EQ(2, h2m.id()); + EXPECT_EQ(0, h2.id()); + StatefulTestingHash h3; + EXPECT_EQ(3, h3.id()); + h3 = StatefulTestingHash(); + EXPECT_EQ(4, h3.id()); + h3 = std::move(h1); + EXPECT_EQ(1, h3.id()); +} + +} // namespace +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl diff --git a/absl/container/internal/hash_policy_traits.h b/absl/container/internal/hash_policy_traits.h new file mode 100644 index 00000000..41e26212 --- /dev/null +++ b/absl/container/internal/hash_policy_traits.h @@ -0,0 +1,191 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_ +#define ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_ + +#include +#include +#include +#include + +#include "absl/meta/type_traits.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { + +// Defines how slots are initialized/destroyed/moved. +template +struct hash_policy_traits { + private: + struct ReturnKey { + // We return `Key` here. + // When Key=T&, we forward the lvalue reference. + // When Key=T, we return by value to avoid a dangling reference. + // eg, for string_hash_map. + template + Key operator()(Key&& k, const Args&...) const { + return std::forward(k); + } + }; + + template + struct ConstantIteratorsImpl : std::false_type {}; + + template + struct ConstantIteratorsImpl> + : P::constant_iterators {}; + + public: + // The actual object stored in the hash table. + using slot_type = typename Policy::slot_type; + + // The type of the keys stored in the hashtable. + using key_type = typename Policy::key_type; + + // The argument type for insertions into the hashtable. This is different + // from value_type for increased performance. See initializer_list constructor + // and insert() member functions for more details. + using init_type = typename Policy::init_type; + + using reference = decltype(Policy::element(std::declval())); + using pointer = typename std::remove_reference::type*; + using value_type = typename std::remove_reference::type; + + // Policies can set this variable to tell raw_hash_set that all iterators + // should be constant, even `iterator`. This is useful for set-like + // containers. + // Defaults to false if not provided by the policy. + using constant_iterators = ConstantIteratorsImpl<>; + + // PRECONDITION: `slot` is UNINITIALIZED + // POSTCONDITION: `slot` is INITIALIZED + template + static void construct(Alloc* alloc, slot_type* slot, Args&&... args) { + Policy::construct(alloc, slot, std::forward(args)...); + } + + // PRECONDITION: `slot` is INITIALIZED + // POSTCONDITION: `slot` is UNINITIALIZED + template + static void destroy(Alloc* alloc, slot_type* slot) { + Policy::destroy(alloc, slot); + } + + // Transfers the `old_slot` to `new_slot`. Any memory allocated by the + // allocator inside `old_slot` to `new_slot` can be transferred. + // + // OPTIONAL: defaults to: + // + // clone(new_slot, std::move(*old_slot)); + // destroy(old_slot); + // + // PRECONDITION: `new_slot` is UNINITIALIZED and `old_slot` is INITIALIZED + // POSTCONDITION: `new_slot` is INITIALIZED and `old_slot` is + // UNINITIALIZED + template + static void transfer(Alloc* alloc, slot_type* new_slot, slot_type* old_slot) { + transfer_impl(alloc, new_slot, old_slot, 0); + } + + // PRECONDITION: `slot` is INITIALIZED + // POSTCONDITION: `slot` is INITIALIZED + template + static auto element(slot_type* slot) -> decltype(P::element(slot)) { + return P::element(slot); + } + + // Returns the amount of memory owned by `slot`, exclusive of `sizeof(*slot)`. + // + // If `slot` is nullptr, returns the constant amount of memory owned by any + // full slot or -1 if slots own variable amounts of memory. + // + // PRECONDITION: `slot` is INITIALIZED or nullptr + template + static size_t space_used(const slot_type* slot) { + return P::space_used(slot); + } + + // Provides generalized access to the key for elements, both for elements in + // the table and for elements that have not yet been inserted (or even + // constructed). We would like an API that allows us to say: `key(args...)` + // but we cannot do that for all cases, so we use this more general API that + // can be used for many things, including the following: + // + // - Given an element in a table, get its key. + // - Given an element initializer, get its key. + // - Given `emplace()` arguments, get the element key. + // + // Implementations of this must adhere to a very strict technical + // specification around aliasing and consuming arguments: + // + // Let `value_type` be the result type of `element()` without ref- and + // cv-qualifiers. The first argument is a functor, the rest are constructor + // arguments for `value_type`. Returns `std::forward(f)(k, xs...)`, where + // `k` is the element key, and `xs...` are the new constructor arguments for + // `value_type`. It's allowed for `k` to alias `xs...`, and for both to alias + // `ts...`. The key won't be touched once `xs...` are used to construct an + // element; `ts...` won't be touched at all, which allows `apply()` to consume + // any rvalues among them. + // + // If `value_type` is constructible from `Ts&&...`, `Policy::apply()` must not + // trigger a hard compile error unless it originates from `f`. In other words, + // `Policy::apply()` must be SFINAE-friendly. If `value_type` is not + // constructible from `Ts&&...`, either SFINAE or a hard compile error is OK. + // + // If `Ts...` is `[cv] value_type[&]` or `[cv] init_type[&]`, + // `Policy::apply()` must work. A compile error is not allowed, SFINAE or not. + template + static auto apply(F&& f, Ts&&... ts) + -> decltype(P::apply(std::forward(f), std::forward(ts)...)) { + return P::apply(std::forward(f), std::forward(ts)...); + } + + // Returns the "key" portion of the slot. + // Used for node handle manipulation. + template + static auto key(slot_type* slot) + -> decltype(P::apply(ReturnKey(), element(slot))) { + return P::apply(ReturnKey(), element(slot)); + } + + // Returns the "value" (as opposed to the "key") portion of the element. Used + // by maps to implement `operator[]`, `at()` and `insert_or_assign()`. + template + static auto value(T* elem) -> decltype(P::value(elem)) { + return P::value(elem); + } + + private: + // Use auto -> decltype as an enabler. + template + static auto transfer_impl(Alloc* alloc, slot_type* new_slot, + slot_type* old_slot, int) + -> decltype((void)P::transfer(alloc, new_slot, old_slot)) { + P::transfer(alloc, new_slot, old_slot); + } + template + static void transfer_impl(Alloc* alloc, slot_type* new_slot, + slot_type* old_slot, char) { + construct(alloc, new_slot, std::move(element(old_slot))); + destroy(alloc, old_slot); + } +}; + +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_ diff --git a/absl/container/internal/hash_policy_traits_test.cc b/absl/container/internal/hash_policy_traits_test.cc new file mode 100644 index 00000000..07cecdfa --- /dev/null +++ b/absl/container/internal/hash_policy_traits_test.cc @@ -0,0 +1,144 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/hash_policy_traits.h" + +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { +namespace { + +using ::testing::MockFunction; +using ::testing::Return; +using ::testing::ReturnRef; + +using Alloc = std::allocator; +using Slot = int; + +struct PolicyWithoutOptionalOps { + using slot_type = Slot; + using key_type = Slot; + using init_type = Slot; + + static std::function construct; + static std::function destroy; + + static std::function element; + static int apply(int v) { return apply_impl(v); } + static std::function apply_impl; + static std::function value; +}; + +std::function PolicyWithoutOptionalOps::construct; +std::function PolicyWithoutOptionalOps::destroy; + +std::function PolicyWithoutOptionalOps::element; +std::function PolicyWithoutOptionalOps::apply_impl; +std::function PolicyWithoutOptionalOps::value; + +struct PolicyWithOptionalOps : PolicyWithoutOptionalOps { + static std::function transfer; +}; + +std::function PolicyWithOptionalOps::transfer; + +struct Test : ::testing::Test { + Test() { + PolicyWithoutOptionalOps::construct = [&](void* a1, Slot* a2, Slot a3) { + construct.Call(a1, a2, std::move(a3)); + }; + PolicyWithoutOptionalOps::destroy = [&](void* a1, Slot* a2) { + destroy.Call(a1, a2); + }; + + PolicyWithoutOptionalOps::element = [&](Slot* a1) -> Slot& { + return element.Call(a1); + }; + PolicyWithoutOptionalOps::apply_impl = [&](int a1) -> int { + return apply.Call(a1); + }; + PolicyWithoutOptionalOps::value = [&](Slot* a1) -> Slot& { + return value.Call(a1); + }; + + PolicyWithOptionalOps::transfer = [&](void* a1, Slot* a2, Slot* a3) { + return transfer.Call(a1, a2, a3); + }; + } + + std::allocator alloc; + int a = 53; + + MockFunction construct; + MockFunction destroy; + + MockFunction element; + MockFunction apply; + MockFunction value; + + MockFunction transfer; +}; + +TEST_F(Test, construct) { + EXPECT_CALL(construct, Call(&alloc, &a, 53)); + hash_policy_traits::construct(&alloc, &a, 53); +} + +TEST_F(Test, destroy) { + EXPECT_CALL(destroy, Call(&alloc, &a)); + hash_policy_traits::destroy(&alloc, &a); +} + +TEST_F(Test, element) { + int b = 0; + EXPECT_CALL(element, Call(&a)).WillOnce(ReturnRef(b)); + EXPECT_EQ(&b, &hash_policy_traits::element(&a)); +} + +TEST_F(Test, apply) { + EXPECT_CALL(apply, Call(42)).WillOnce(Return(1337)); + EXPECT_EQ(1337, (hash_policy_traits::apply(42))); +} + +TEST_F(Test, value) { + int b = 0; + EXPECT_CALL(value, Call(&a)).WillOnce(ReturnRef(b)); + EXPECT_EQ(&b, &hash_policy_traits::value(&a)); +} + +TEST_F(Test, without_transfer) { + int b = 42; + EXPECT_CALL(element, Call(&b)).WillOnce(::testing::ReturnRef(b)); + EXPECT_CALL(construct, Call(&alloc, &a, b)); + EXPECT_CALL(destroy, Call(&alloc, &b)); + hash_policy_traits::transfer(&alloc, &a, &b); +} + +TEST_F(Test, with_transfer) { + int b = 42; + EXPECT_CALL(transfer, Call(&alloc, &a, &b)); + hash_policy_traits::transfer(&alloc, &a, &b); +} + +} // namespace +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl diff --git a/absl/container/internal/hashtable_debug.h b/absl/container/internal/hashtable_debug.h new file mode 100644 index 00000000..b6a43512 --- /dev/null +++ b/absl/container/internal/hashtable_debug.h @@ -0,0 +1,110 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This library provides APIs to debug the probing behavior of hash tables. +// +// In general, the probing behavior is a black box for users and only the +// side effects can be measured in the form of performance differences. +// These APIs give a glimpse on the actual behavior of the probing algorithms in +// these hashtables given a specified hash function and a set of elements. +// +// The probe count distribution can be used to assess the quality of the hash +// function for that particular hash table. Note that a hash function that +// performs well in one hash table implementation does not necessarily performs +// well in a different one. +// +// This library supports std::unordered_{set,map}, dense_hash_{set,map} and +// absl::{flat,node,string}_hash_{set,map}. + +#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_ +#define ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_ + +#include +#include +#include +#include + +#include "absl/container/internal/hashtable_debug_hooks.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { + +// Returns the number of probes required to lookup `key`. Returns 0 for a +// search with no collisions. Higher values mean more hash collisions occurred; +// however, the exact meaning of this number varies according to the container +// type. +template +size_t GetHashtableDebugNumProbes( + const C& c, const typename C::key_type& key) { + return absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess::GetNumProbes(c, key); +} + +// Gets a histogram of the number of probes for each elements in the container. +// The sum of all the values in the vector is equal to container.size(). +template +std::vector GetHashtableDebugNumProbesHistogram(const C& container) { + std::vector v; + for (auto it = container.begin(); it != container.end(); ++it) { + size_t num_probes = GetHashtableDebugNumProbes( + container, + absl::container_internal::hashtable_debug_internal::GetKey(*it, 0)); + v.resize(std::max(v.size(), num_probes + 1)); + v[num_probes]++; + } + return v; +} + +struct HashtableDebugProbeSummary { + size_t total_elements; + size_t total_num_probes; + double mean; +}; + +// Gets a summary of the probe count distribution for the elements in the +// container. +template +HashtableDebugProbeSummary GetHashtableDebugProbeSummary(const C& container) { + auto probes = GetHashtableDebugNumProbesHistogram(container); + HashtableDebugProbeSummary summary = {}; + for (size_t i = 0; i < probes.size(); ++i) { + summary.total_elements += probes[i]; + summary.total_num_probes += probes[i] * i; + } + summary.mean = 1.0 * summary.total_num_probes / summary.total_elements; + return summary; +} + +// Returns the number of bytes requested from the allocator by the container +// and not freed. +template +size_t AllocatedByteSize(const C& c) { + return absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess::AllocatedByteSize(c); +} + +// Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type `C` +// and `c.size()` is equal to `num_elements`. +template +size_t LowerBoundAllocatedByteSize(size_t num_elements) { + return absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess::LowerBoundAllocatedByteSize(num_elements); +} + +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_ diff --git a/absl/container/internal/hashtable_debug_hooks.h b/absl/container/internal/hashtable_debug_hooks.h new file mode 100644 index 00000000..50ba6ba5 --- /dev/null +++ b/absl/container/internal/hashtable_debug_hooks.h @@ -0,0 +1,83 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Provides the internal API for hashtable_debug.h. + +#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_ +#define ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_ + +#include + +#include +#include +#include + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { +namespace hashtable_debug_internal { + +// If it is a map, call get<0>(). +using std::get; +template +auto GetKey(const typename T::value_type& pair, int) -> decltype(get<0>(pair)) { + return get<0>(pair); +} + +// If it is not a map, return the value directly. +template +const typename T::key_type& GetKey(const typename T::key_type& key, char) { + return key; +} + +// Containers should specialize this to provide debug information for that +// container. +template +struct HashtableDebugAccess { + // Returns the number of probes required to find `key` in `c`. The "number of + // probes" is a concept that can vary by container. Implementations should + // return 0 when `key` was found in the minimum number of operations and + // should increment the result for each non-trivial operation required to find + // `key`. + // + // The default implementation uses the bucket api from the standard and thus + // works for `std::unordered_*` containers. + static size_t GetNumProbes(const Container& c, + const typename Container::key_type& key) { + if (!c.bucket_count()) return {}; + size_t num_probes = 0; + size_t bucket = c.bucket(key); + for (auto it = c.begin(bucket), e = c.end(bucket);; ++it, ++num_probes) { + if (it == e) return num_probes; + if (c.key_eq()(key, GetKey(*it, 0))) return num_probes; + } + } + + // Returns the number of bytes requested from the allocator by the container + // and not freed. + // + // static size_t AllocatedByteSize(const Container& c); + + // Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type + // `Container` and `c.size()` is equal to `num_elements`. + // + // static size_t LowerBoundAllocatedByteSize(size_t num_elements); +}; + +} // namespace hashtable_debug_internal +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_ diff --git a/absl/container/internal/layout.h b/absl/container/internal/layout.h new file mode 100644 index 00000000..f11a6ad2 --- /dev/null +++ b/absl/container/internal/layout.h @@ -0,0 +1,740 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// MOTIVATION AND TUTORIAL +// +// If you want to put in a single heap allocation N doubles followed by M ints, +// it's easy if N and M are known at compile time. +// +// struct S { +// double a[N]; +// int b[M]; +// }; +// +// S* p = new S; +// +// But what if N and M are known only in run time? Class template Layout to the +// rescue! It's a portable generalization of the technique known as struct hack. +// +// // This object will tell us everything we need to know about the memory +// // layout of double[N] followed by int[M]. It's structurally identical to +// // size_t[2] that stores N and M. It's very cheap to create. +// const Layout layout(N, M); +// +// // Allocate enough memory for both arrays. `AllocSize()` tells us how much +// // memory is needed. We are free to use any allocation function we want as +// // long as it returns aligned memory. +// std::unique_ptr p(new unsigned char[layout.AllocSize()]); +// +// // Obtain the pointer to the array of doubles. +// // Equivalent to `reinterpret_cast(p.get())`. +// // +// // We could have written layout.Pointer<0>(p) instead. If all the types are +// // unique you can use either form, but if some types are repeated you must +// // use the index form. +// double* a = layout.Pointer(p.get()); +// +// // Obtain the pointer to the array of ints. +// // Equivalent to `reinterpret_cast(p.get() + N * 8)`. +// int* b = layout.Pointer(p); +// +// If we are unable to specify sizes of all fields, we can pass as many sizes as +// we can to `Partial()`. In return, it'll allow us to access the fields whose +// locations and sizes can be computed from the provided information. +// `Partial()` comes in handy when the array sizes are embedded into the +// allocation. +// +// // size_t[1] containing N, size_t[1] containing M, double[N], int[M]. +// using L = Layout; +// +// unsigned char* Allocate(size_t n, size_t m) { +// const L layout(1, 1, n, m); +// unsigned char* p = new unsigned char[layout.AllocSize()]; +// *layout.Pointer<0>(p) = n; +// *layout.Pointer<1>(p) = m; +// return p; +// } +// +// void Use(unsigned char* p) { +// // First, extract N and M. +// // Specify that the first array has only one element. Using `prefix` we +// // can access the first two arrays but not more. +// constexpr auto prefix = L::Partial(1); +// size_t n = *prefix.Pointer<0>(p); +// size_t m = *prefix.Pointer<1>(p); +// +// // Now we can get pointers to the payload. +// const L layout(1, 1, n, m); +// double* a = layout.Pointer(p); +// int* b = layout.Pointer(p); +// } +// +// The layout we used above combines fixed-size with dynamically-sized fields. +// This is quite common. Layout is optimized for this use case and generates +// optimal code. All computations that can be performed at compile time are +// indeed performed at compile time. +// +// Efficiency tip: The order of fields matters. In `Layout` try to +// ensure that `alignof(T1) >= ... >= alignof(TN)`. This way you'll have no +// padding in between arrays. +// +// You can manually override the alignment of an array by wrapping the type in +// `Aligned`. `Layout<..., Aligned, ...>` has exactly the same API +// and behavior as `Layout<..., T, ...>` except that the first element of the +// array of `T` is aligned to `N` (the rest of the elements follow without +// padding). `N` cannot be less than `alignof(T)`. +// +// `AllocSize()` and `Pointer()` are the most basic methods for dealing with +// memory layouts. Check out the reference or code below to discover more. +// +// EXAMPLE +// +// // Immutable move-only string with sizeof equal to sizeof(void*). The +// // string size and the characters are kept in the same heap allocation. +// class CompactString { +// public: +// CompactString(const char* s = "") { +// const size_t size = strlen(s); +// // size_t[1] followed by char[size + 1]. +// const L layout(1, size + 1); +// p_.reset(new unsigned char[layout.AllocSize()]); +// // If running under ASAN, mark the padding bytes, if any, to catch +// // memory errors. +// layout.PoisonPadding(p_.get()); +// // Store the size in the allocation. +// *layout.Pointer(p_.get()) = size; +// // Store the characters in the allocation. +// memcpy(layout.Pointer(p_.get()), s, size + 1); +// } +// +// size_t size() const { +// // Equivalent to reinterpret_cast(*p). +// return *L::Partial().Pointer(p_.get()); +// } +// +// const char* c_str() const { +// // Equivalent to reinterpret_cast(p.get() + sizeof(size_t)). +// // The argument in Partial(1) specifies that we have size_t[1] in front +// // of the characters. +// return L::Partial(1).Pointer(p_.get()); +// } +// +// private: +// // Our heap allocation contains a size_t followed by an array of chars. +// using L = Layout; +// std::unique_ptr p_; +// }; +// +// int main() { +// CompactString s = "hello"; +// assert(s.size() == 5); +// assert(strcmp(s.c_str(), "hello") == 0); +// } +// +// DOCUMENTATION +// +// The interface exported by this file consists of: +// - class `Layout<>` and its public members. +// - The public members of class `internal_layout::LayoutImpl<>`. That class +// isn't intended to be used directly, and its name and template parameter +// list are internal implementation details, but the class itself provides +// most of the functionality in this file. See comments on its members for +// detailed documentation. +// +// `Layout::Partial(count1,..., countm)` (where `m` <= `n`) returns a +// `LayoutImpl<>` object. `Layout layout(count1,..., countn)` +// creates a `Layout` object, which exposes the same functionality by inheriting +// from `LayoutImpl<>`. + +#ifndef ABSL_CONTAINER_INTERNAL_LAYOUT_H_ +#define ABSL_CONTAINER_INTERNAL_LAYOUT_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef ADDRESS_SANITIZER +#include +#endif + +#include "absl/meta/type_traits.h" +#include "absl/strings/str_cat.h" +#include "absl/types/span.h" +#include "absl/utility/utility.h" + +#if defined(__GXX_RTTI) +#define ABSL_INTERNAL_HAS_CXA_DEMANGLE +#endif + +#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE +#include +#endif + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { + +// A type wrapper that instructs `Layout` to use the specific alignment for the +// array. `Layout<..., Aligned, ...>` has exactly the same API +// and behavior as `Layout<..., T, ...>` except that the first element of the +// array of `T` is aligned to `N` (the rest of the elements follow without +// padding). +// +// Requires: `N >= alignof(T)` and `N` is a power of 2. +template +struct Aligned; + +namespace internal_layout { + +template +struct NotAligned {}; + +template +struct NotAligned> { + static_assert(sizeof(T) == 0, "Aligned cannot be const-qualified"); +}; + +template +using IntToSize = size_t; + +template +using TypeToSize = size_t; + +template +struct Type : NotAligned { + using type = T; +}; + +template +struct Type> { + using type = T; +}; + +template +struct SizeOf : NotAligned, std::integral_constant {}; + +template +struct SizeOf> : std::integral_constant {}; + +// Note: workaround for https://gcc.gnu.org/PR88115 +template +struct AlignOf : NotAligned { + static constexpr size_t value = alignof(T); +}; + +template +struct AlignOf> { + static_assert(N % alignof(T) == 0, + "Custom alignment can't be lower than the type's alignment"); + static constexpr size_t value = N; +}; + +// Does `Ts...` contain `T`? +template +using Contains = absl::disjunction...>; + +template +using CopyConst = + typename std::conditional::value, const To, To>::type; + +// Note: We're not qualifying this with absl:: because it doesn't compile under +// MSVC. +template +using SliceType = Span; + +// This namespace contains no types. It prevents functions defined in it from +// being found by ADL. +namespace adl_barrier { + +template +constexpr size_t Find(Needle, Needle, Ts...) { + static_assert(!Contains(), "Duplicate element type"); + return 0; +} + +template +constexpr size_t Find(Needle, T, Ts...) { + return adl_barrier::Find(Needle(), Ts()...) + 1; +} + +constexpr bool IsPow2(size_t n) { return !(n & (n - 1)); } + +// Returns `q * m` for the smallest `q` such that `q * m >= n`. +// Requires: `m` is a power of two. It's enforced by IsLegalElementType below. +constexpr size_t Align(size_t n, size_t m) { return (n + m - 1) & ~(m - 1); } + +constexpr size_t Min(size_t a, size_t b) { return b < a ? b : a; } + +constexpr size_t Max(size_t a) { return a; } + +template +constexpr size_t Max(size_t a, size_t b, Ts... rest) { + return adl_barrier::Max(b < a ? a : b, rest...); +} + +template +std::string TypeName() { + std::string out; + int status = 0; + char* demangled = nullptr; +#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE + demangled = abi::__cxa_demangle(typeid(T).name(), nullptr, nullptr, &status); +#endif + if (status == 0 && demangled != nullptr) { // Demangling succeeded. + absl::StrAppend(&out, "<", demangled, ">"); + free(demangled); + } else { +#if defined(__GXX_RTTI) || defined(_CPPRTTI) + absl::StrAppend(&out, "<", typeid(T).name(), ">"); +#endif + } + return out; +} + +} // namespace adl_barrier + +template +using EnableIf = typename std::enable_if::type; + +// Can `T` be a template argument of `Layout`? +template +using IsLegalElementType = std::integral_constant< + bool, !std::is_reference::value && !std::is_volatile::value && + !std::is_reference::type>::value && + !std::is_volatile::type>::value && + adl_barrier::IsPow2(AlignOf::value)>; + +template +class LayoutImpl; + +// Public base class of `Layout` and the result type of `Layout::Partial()`. +// +// `Elements...` contains all template arguments of `Layout` that created this +// instance. +// +// `SizeSeq...` is `[0, NumSizes)` where `NumSizes` is the number of arguments +// passed to `Layout::Partial()` or `Layout::Layout()`. +// +// `OffsetSeq...` is `[0, NumOffsets)` where `NumOffsets` is +// `Min(sizeof...(Elements), NumSizes + 1)` (the number of arrays for which we +// can compute offsets). +template +class LayoutImpl, absl::index_sequence, + absl::index_sequence> { + private: + static_assert(sizeof...(Elements) > 0, "At least one field is required"); + static_assert(absl::conjunction...>::value, + "Invalid element type (see IsLegalElementType)"); + + enum { + NumTypes = sizeof...(Elements), + NumSizes = sizeof...(SizeSeq), + NumOffsets = sizeof...(OffsetSeq), + }; + + // These are guaranteed by `Layout`. + static_assert(NumOffsets == adl_barrier::Min(NumTypes, NumSizes + 1), + "Internal error"); + static_assert(NumTypes > 0, "Internal error"); + + // Returns the index of `T` in `Elements...`. Results in a compilation error + // if `Elements...` doesn't contain exactly one instance of `T`. + template + static constexpr size_t ElementIndex() { + static_assert(Contains, Type::type>...>(), + "Type not found"); + return adl_barrier::Find(Type(), + Type::type>()...); + } + + template + using ElementAlignment = + AlignOf>::type>; + + public: + // Element types of all arrays packed in a tuple. + using ElementTypes = std::tuple::type...>; + + // Element type of the Nth array. + template + using ElementType = typename std::tuple_element::type; + + constexpr explicit LayoutImpl(IntToSize... sizes) + : size_{sizes...} {} + + // Alignment of the layout, equal to the strictest alignment of all elements. + // All pointers passed to the methods of layout must be aligned to this value. + static constexpr size_t Alignment() { + return adl_barrier::Max(AlignOf::value...); + } + + // Offset in bytes of the Nth array. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // assert(x.Offset<0>() == 0); // The ints starts from 0. + // assert(x.Offset<1>() == 16); // The doubles starts from 16. + // + // Requires: `N <= NumSizes && N < sizeof...(Ts)`. + template = 0> + constexpr size_t Offset() const { + return 0; + } + + template = 0> + constexpr size_t Offset() const { + static_assert(N < NumOffsets, "Index out of bounds"); + return adl_barrier::Align( + Offset() + SizeOf>() * size_[N - 1], + ElementAlignment::value); + } + + // Offset in bytes of the array with the specified element type. There must + // be exactly one such array and its zero-based index must be at most + // `NumSizes`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // assert(x.Offset() == 0); // The ints starts from 0. + // assert(x.Offset() == 16); // The doubles starts from 16. + template + constexpr size_t Offset() const { + return Offset()>(); + } + + // Offsets in bytes of all arrays for which the offsets are known. + constexpr std::array Offsets() const { + return {{Offset()...}}; + } + + // The number of elements in the Nth array. This is the Nth argument of + // `Layout::Partial()` or `Layout::Layout()` (zero-based). + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // assert(x.Size<0>() == 3); + // assert(x.Size<1>() == 4); + // + // Requires: `N < NumSizes`. + template + constexpr size_t Size() const { + static_assert(N < NumSizes, "Index out of bounds"); + return size_[N]; + } + + // The number of elements in the array with the specified element type. + // There must be exactly one such array and its zero-based index must be + // at most `NumSizes`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // assert(x.Size() == 3); + // assert(x.Size() == 4); + template + constexpr size_t Size() const { + return Size()>(); + } + + // The number of elements of all arrays for which they are known. + constexpr std::array Sizes() const { + return {{Size()...}}; + } + + // Pointer to the beginning of the Nth array. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // int* ints = x.Pointer<0>(p); + // double* doubles = x.Pointer<1>(p); + // + // Requires: `N <= NumSizes && N < sizeof...(Ts)`. + // Requires: `p` is aligned to `Alignment()`. + template + CopyConst>* Pointer(Char* p) const { + using C = typename std::remove_const::type; + static_assert( + std::is_same() || std::is_same() || + std::is_same(), + "The argument must be a pointer to [const] [signed|unsigned] char"); + constexpr size_t alignment = Alignment(); + (void)alignment; + assert(reinterpret_cast(p) % alignment == 0); + return reinterpret_cast>*>(p + Offset()); + } + + // Pointer to the beginning of the array with the specified element type. + // There must be exactly one such array and its zero-based index must be at + // most `NumSizes`. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // int* ints = x.Pointer(p); + // double* doubles = x.Pointer(p); + // + // Requires: `p` is aligned to `Alignment()`. + template + CopyConst* Pointer(Char* p) const { + return Pointer()>(p); + } + + // Pointers to all arrays for which pointers are known. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // + // int* ints; + // double* doubles; + // std::tie(ints, doubles) = x.Pointers(p); + // + // Requires: `p` is aligned to `Alignment()`. + // + // Note: We're not using ElementType alias here because it does not compile + // under MSVC. + template + std::tuple::type>*...> + Pointers(Char* p) const { + return std::tuple>*...>( + Pointer(p)...); + } + + // The Nth array. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // Span ints = x.Slice<0>(p); + // Span doubles = x.Slice<1>(p); + // + // Requires: `N < NumSizes`. + // Requires: `p` is aligned to `Alignment()`. + template + SliceType>> Slice(Char* p) const { + return SliceType>>(Pointer(p), Size()); + } + + // The array with the specified element type. There must be exactly one + // such array and its zero-based index must be less than `NumSizes`. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // Span ints = x.Slice(p); + // Span doubles = x.Slice(p); + // + // Requires: `p` is aligned to `Alignment()`. + template + SliceType> Slice(Char* p) const { + return Slice()>(p); + } + + // All arrays with known sizes. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // + // Span ints; + // Span doubles; + // std::tie(ints, doubles) = x.Slices(p); + // + // Requires: `p` is aligned to `Alignment()`. + // + // Note: We're not using ElementType alias here because it does not compile + // under MSVC. + template + std::tuple::type>>...> + Slices(Char* p) const { + // Workaround for https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63875 (fixed + // in 6.1). + (void)p; + return std::tuple>>...>( + Slice(p)...); + } + + // The size of the allocation that fits all arrays. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; // 48 bytes + // + // Requires: `NumSizes == sizeof...(Ts)`. + constexpr size_t AllocSize() const { + static_assert(NumTypes == NumSizes, "You must specify sizes of all fields"); + return Offset() + + SizeOf>() * size_[NumTypes - 1]; + } + + // If built with --config=asan, poisons padding bytes (if any) in the + // allocation. The pointer must point to a memory block at least + // `AllocSize()` bytes in length. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // Requires: `p` is aligned to `Alignment()`. + template = 0> + void PoisonPadding(const Char* p) const { + Pointer<0>(p); // verify the requirements on `Char` and `p` + } + + template = 0> + void PoisonPadding(const Char* p) const { + static_assert(N < NumOffsets, "Index out of bounds"); + (void)p; +#ifdef ADDRESS_SANITIZER + PoisonPadding(p); + // The `if` is an optimization. It doesn't affect the observable behaviour. + if (ElementAlignment::value % ElementAlignment::value) { + size_t start = + Offset() + SizeOf>() * size_[N - 1]; + ASAN_POISON_MEMORY_REGION(p + start, Offset() - start); + } +#endif + } + + // Human-readable description of the memory layout. Useful for debugging. + // Slow. + // + // // char[5], 3 bytes of padding, int[3], 4 bytes of padding, followed + // // by an unknown number of doubles. + // auto x = Layout::Partial(5, 3); + // assert(x.DebugString() == + // "@0(1)[5]; @8(4)[3]; @24(8)"); + // + // Each field is in the following format: @offset(sizeof)[size] ( + // may be missing depending on the target platform). For example, + // @8(4)[3] means that at offset 8 we have an array of ints, where each + // int is 4 bytes, and we have 3 of those ints. The size of the last field may + // be missing (as in the example above). Only fields with known offsets are + // described. Type names may differ across platforms: one compiler might + // produce "unsigned*" where another produces "unsigned int *". + std::string DebugString() const { + const auto offsets = Offsets(); + const size_t sizes[] = {SizeOf>()...}; + const std::string types[] = {adl_barrier::TypeName>()...}; + std::string res = absl::StrCat("@0", types[0], "(", sizes[0], ")"); + for (size_t i = 0; i != NumOffsets - 1; ++i) { + absl::StrAppend(&res, "[", size_[i], "]; @", offsets[i + 1], types[i + 1], + "(", sizes[i + 1], ")"); + } + // NumSizes is a constant that may be zero. Some compilers cannot see that + // inside the if statement "size_[NumSizes - 1]" must be valid. + int last = static_cast(NumSizes) - 1; + if (NumTypes == NumSizes && last >= 0) { + absl::StrAppend(&res, "[", size_[last], "]"); + } + return res; + } + + private: + // Arguments of `Layout::Partial()` or `Layout::Layout()`. + size_t size_[NumSizes > 0 ? NumSizes : 1]; +}; + +template +using LayoutType = LayoutImpl< + std::tuple, absl::make_index_sequence, + absl::make_index_sequence>; + +} // namespace internal_layout + +// Descriptor of arrays of various types and sizes laid out in memory one after +// another. See the top of the file for documentation. +// +// Check out the public API of internal_layout::LayoutImpl above. The type is +// internal to the library but its methods are public, and they are inherited +// by `Layout`. +template +class Layout : public internal_layout::LayoutType { + public: + static_assert(sizeof...(Ts) > 0, "At least one field is required"); + static_assert( + absl::conjunction...>::value, + "Invalid element type (see IsLegalElementType)"); + + // The result type of `Partial()` with `NumSizes` arguments. + template + using PartialType = internal_layout::LayoutType; + + // `Layout` knows the element types of the arrays we want to lay out in + // memory but not the number of elements in each array. + // `Partial(size1, ..., sizeN)` allows us to specify the latter. The + // resulting immutable object can be used to obtain pointers to the + // individual arrays. + // + // It's allowed to pass fewer array sizes than the number of arrays. E.g., + // if all you need is to the offset of the second array, you only need to + // pass one argument -- the number of elements in the first array. + // + // // int[3] followed by 4 bytes of padding and an unknown number of + // // doubles. + // auto x = Layout::Partial(3); + // // doubles start at byte 16. + // assert(x.Offset<1>() == 16); + // + // If you know the number of elements in all arrays, you can still call + // `Partial()` but it's more convenient to use the constructor of `Layout`. + // + // Layout x(3, 5); + // + // Note: The sizes of the arrays must be specified in number of elements, + // not in bytes. + // + // Requires: `sizeof...(Sizes) <= sizeof...(Ts)`. + // Requires: all arguments are convertible to `size_t`. + template + static constexpr PartialType Partial(Sizes&&... sizes) { + static_assert(sizeof...(Sizes) <= sizeof...(Ts), ""); + return PartialType(absl::forward(sizes)...); + } + + // Creates a layout with the sizes of all arrays specified. If you know + // only the sizes of the first N arrays (where N can be zero), you can use + // `Partial()` defined above. The constructor is essentially equivalent to + // calling `Partial()` and passing in all array sizes; the constructor is + // provided as a convenient abbreviation. + // + // Note: The sizes of the arrays must be specified in number of elements, + // not in bytes. + constexpr explicit Layout(internal_layout::TypeToSize... sizes) + : internal_layout::LayoutType(sizes...) {} +}; + +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_LAYOUT_H_ diff --git a/absl/container/internal/layout_test.cc b/absl/container/internal/layout_test.cc new file mode 100644 index 00000000..b9f98471 --- /dev/null +++ b/absl/container/internal/layout_test.cc @@ -0,0 +1,1557 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/layout.h" + +// We need ::max_align_t because some libstdc++ versions don't provide +// std::max_align_t +#include +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/types/span.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { +namespace { + +using ::absl::Span; +using ::testing::ElementsAre; + +size_t Distance(const void* from, const void* to) { + ABSL_RAW_CHECK(from <= to, "Distance must be non-negative"); + return static_cast(to) - static_cast(from); +} + +template +Expected Type(Actual val) { + static_assert(std::is_same(), ""); + return val; +} + +// Helper class to test different size and alignments. +struct alignas(8) Int128 { + uint64_t a, b; + friend bool operator==(Int128 lhs, Int128 rhs) { + return std::tie(lhs.a, lhs.b) == std::tie(rhs.a, rhs.b); + } + + static std::string Name() { + return internal_layout::adl_barrier::TypeName(); + } +}; + +// Properties of types that this test relies on. +static_assert(sizeof(int8_t) == 1, ""); +static_assert(alignof(int8_t) == 1, ""); +static_assert(sizeof(int16_t) == 2, ""); +static_assert(alignof(int16_t) == 2, ""); +static_assert(sizeof(int32_t) == 4, ""); +static_assert(alignof(int32_t) == 4, ""); +static_assert(sizeof(Int128) == 16, ""); +static_assert(alignof(Int128) == 8, ""); + +template +void SameType() { + static_assert(std::is_same(), ""); +} + +TEST(Layout, ElementType) { + { + using L = Layout; + SameType>(); + SameType>(); + SameType>(); + } + { + using L = Layout; + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + } + { + using L = Layout; + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + } +} + +TEST(Layout, ElementTypes) { + { + using L = Layout; + SameType, L::ElementTypes>(); + SameType, decltype(L::Partial())::ElementTypes>(); + SameType, decltype(L::Partial(0))::ElementTypes>(); + } + { + using L = Layout; + SameType, L::ElementTypes>(); + SameType, decltype(L::Partial())::ElementTypes>(); + SameType, decltype(L::Partial(0))::ElementTypes>(); + } + { + using L = Layout; + SameType, L::ElementTypes>(); + SameType, + decltype(L::Partial())::ElementTypes>(); + SameType, + decltype(L::Partial(0))::ElementTypes>(); + SameType, + decltype(L::Partial(0, 0))::ElementTypes>(); + SameType, + decltype(L::Partial(0, 0, 0))::ElementTypes>(); + } +} + +TEST(Layout, OffsetByIndex) { + { + using L = Layout; + EXPECT_EQ(0, L::Partial().Offset<0>()); + EXPECT_EQ(0, L::Partial(3).Offset<0>()); + EXPECT_EQ(0, L(3).Offset<0>()); + } + { + using L = Layout; + EXPECT_EQ(0, L::Partial().Offset<0>()); + EXPECT_EQ(0, L::Partial(3).Offset<0>()); + EXPECT_EQ(12, L::Partial(3).Offset<1>()); + EXPECT_EQ(0, L::Partial(3, 5).Offset<0>()); + EXPECT_EQ(12, L::Partial(3, 5).Offset<1>()); + EXPECT_EQ(0, L(3, 5).Offset<0>()); + EXPECT_EQ(12, L(3, 5).Offset<1>()); + } + { + using L = Layout; + EXPECT_EQ(0, L::Partial().Offset<0>()); + EXPECT_EQ(0, L::Partial(0).Offset<0>()); + EXPECT_EQ(0, L::Partial(0).Offset<1>()); + EXPECT_EQ(0, L::Partial(1).Offset<0>()); + EXPECT_EQ(4, L::Partial(1).Offset<1>()); + EXPECT_EQ(0, L::Partial(5).Offset<0>()); + EXPECT_EQ(8, L::Partial(5).Offset<1>()); + EXPECT_EQ(0, L::Partial(0, 0).Offset<0>()); + EXPECT_EQ(0, L::Partial(0, 0).Offset<1>()); + EXPECT_EQ(0, L::Partial(0, 0).Offset<2>()); + EXPECT_EQ(0, L::Partial(1, 0).Offset<0>()); + EXPECT_EQ(4, L::Partial(1, 0).Offset<1>()); + EXPECT_EQ(8, L::Partial(1, 0).Offset<2>()); + EXPECT_EQ(0, L::Partial(5, 3).Offset<0>()); + EXPECT_EQ(8, L::Partial(5, 3).Offset<1>()); + EXPECT_EQ(24, L::Partial(5, 3).Offset<2>()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<0>()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<1>()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<2>()); + EXPECT_EQ(0, L::Partial(1, 0, 0).Offset<0>()); + EXPECT_EQ(4, L::Partial(1, 0, 0).Offset<1>()); + EXPECT_EQ(8, L::Partial(1, 0, 0).Offset<2>()); + EXPECT_EQ(0, L::Partial(5, 3, 1).Offset<0>()); + EXPECT_EQ(24, L::Partial(5, 3, 1).Offset<2>()); + EXPECT_EQ(8, L::Partial(5, 3, 1).Offset<1>()); + EXPECT_EQ(0, L(5, 3, 1).Offset<0>()); + EXPECT_EQ(24, L(5, 3, 1).Offset<2>()); + EXPECT_EQ(8, L(5, 3, 1).Offset<1>()); + } +} + +TEST(Layout, OffsetByType) { + { + using L = Layout; + EXPECT_EQ(0, L::Partial().Offset()); + EXPECT_EQ(0, L::Partial(3).Offset()); + EXPECT_EQ(0, L(3).Offset()); + } + { + using L = Layout; + EXPECT_EQ(0, L::Partial().Offset()); + EXPECT_EQ(0, L::Partial(0).Offset()); + EXPECT_EQ(0, L::Partial(0).Offset()); + EXPECT_EQ(0, L::Partial(1).Offset()); + EXPECT_EQ(4, L::Partial(1).Offset()); + EXPECT_EQ(0, L::Partial(5).Offset()); + EXPECT_EQ(8, L::Partial(5).Offset()); + EXPECT_EQ(0, L::Partial(0, 0).Offset()); + EXPECT_EQ(0, L::Partial(0, 0).Offset()); + EXPECT_EQ(0, L::Partial(0, 0).Offset()); + EXPECT_EQ(0, L::Partial(1, 0).Offset()); + EXPECT_EQ(4, L::Partial(1, 0).Offset()); + EXPECT_EQ(8, L::Partial(1, 0).Offset()); + EXPECT_EQ(0, L::Partial(5, 3).Offset()); + EXPECT_EQ(8, L::Partial(5, 3).Offset()); + EXPECT_EQ(24, L::Partial(5, 3).Offset()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset()); + EXPECT_EQ(0, L::Partial(1, 0, 0).Offset()); + EXPECT_EQ(4, L::Partial(1, 0, 0).Offset()); + EXPECT_EQ(8, L::Partial(1, 0, 0).Offset()); + EXPECT_EQ(0, L::Partial(5, 3, 1).Offset()); + EXPECT_EQ(24, L::Partial(5, 3, 1).Offset()); + EXPECT_EQ(8, L::Partial(5, 3, 1).Offset()); + EXPECT_EQ(0, L(5, 3, 1).Offset()); + EXPECT_EQ(24, L(5, 3, 1).Offset()); + EXPECT_EQ(8, L(5, 3, 1).Offset()); + } +} + +TEST(Layout, Offsets) { + { + using L = Layout; + EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0)); + EXPECT_THAT(L::Partial(3).Offsets(), ElementsAre(0)); + EXPECT_THAT(L(3).Offsets(), ElementsAre(0)); + } + { + using L = Layout; + EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0)); + EXPECT_THAT(L::Partial(3).Offsets(), ElementsAre(0, 12)); + EXPECT_THAT(L::Partial(3, 5).Offsets(), ElementsAre(0, 12)); + EXPECT_THAT(L(3, 5).Offsets(), ElementsAre(0, 12)); + } + { + using L = Layout; + EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0)); + EXPECT_THAT(L::Partial(1).Offsets(), ElementsAre(0, 4)); + EXPECT_THAT(L::Partial(5).Offsets(), ElementsAre(0, 8)); + EXPECT_THAT(L::Partial(0, 0).Offsets(), ElementsAre(0, 0, 0)); + EXPECT_THAT(L::Partial(1, 0).Offsets(), ElementsAre(0, 4, 8)); + EXPECT_THAT(L::Partial(5, 3).Offsets(), ElementsAre(0, 8, 24)); + EXPECT_THAT(L::Partial(0, 0, 0).Offsets(), ElementsAre(0, 0, 0)); + EXPECT_THAT(L::Partial(1, 0, 0).Offsets(), ElementsAre(0, 4, 8)); + EXPECT_THAT(L::Partial(5, 3, 1).Offsets(), ElementsAre(0, 8, 24)); + EXPECT_THAT(L(5, 3, 1).Offsets(), ElementsAre(0, 8, 24)); + } +} + +TEST(Layout, AllocSize) { + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).AllocSize()); + EXPECT_EQ(12, L::Partial(3).AllocSize()); + EXPECT_EQ(12, L(3).AllocSize()); + } + { + using L = Layout; + EXPECT_EQ(32, L::Partial(3, 5).AllocSize()); + EXPECT_EQ(32, L(3, 5).AllocSize()); + } + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0, 0, 0).AllocSize()); + EXPECT_EQ(8, L::Partial(1, 0, 0).AllocSize()); + EXPECT_EQ(8, L::Partial(0, 1, 0).AllocSize()); + EXPECT_EQ(16, L::Partial(0, 0, 1).AllocSize()); + EXPECT_EQ(24, L::Partial(1, 1, 1).AllocSize()); + EXPECT_EQ(136, L::Partial(3, 5, 7).AllocSize()); + EXPECT_EQ(136, L(3, 5, 7).AllocSize()); + } +} + +TEST(Layout, SizeByIndex) { + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Size<0>()); + EXPECT_EQ(3, L::Partial(3).Size<0>()); + EXPECT_EQ(3, L(3).Size<0>()); + } + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Size<0>()); + EXPECT_EQ(3, L::Partial(3).Size<0>()); + EXPECT_EQ(3, L::Partial(3, 5).Size<0>()); + EXPECT_EQ(5, L::Partial(3, 5).Size<1>()); + EXPECT_EQ(3, L(3, 5).Size<0>()); + EXPECT_EQ(5, L(3, 5).Size<1>()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Size<0>()); + EXPECT_EQ(3, L::Partial(3, 5).Size<0>()); + EXPECT_EQ(5, L::Partial(3, 5).Size<1>()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Size<0>()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Size<1>()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Size<2>()); + EXPECT_EQ(3, L(3, 5, 7).Size<0>()); + EXPECT_EQ(5, L(3, 5, 7).Size<1>()); + EXPECT_EQ(7, L(3, 5, 7).Size<2>()); + } +} + +TEST(Layout, SizeByType) { + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Size()); + EXPECT_EQ(3, L::Partial(3).Size()); + EXPECT_EQ(3, L(3).Size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Size()); + EXPECT_EQ(3, L::Partial(3, 5).Size()); + EXPECT_EQ(5, L::Partial(3, 5).Size()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Size()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Size()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Size()); + EXPECT_EQ(3, L(3, 5, 7).Size()); + EXPECT_EQ(5, L(3, 5, 7).Size()); + EXPECT_EQ(7, L(3, 5, 7).Size()); + } +} + +TEST(Layout, Sizes) { + { + using L = Layout; + EXPECT_THAT(L::Partial().Sizes(), ElementsAre()); + EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3)); + EXPECT_THAT(L(3).Sizes(), ElementsAre(3)); + } + { + using L = Layout; + EXPECT_THAT(L::Partial().Sizes(), ElementsAre()); + EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3)); + EXPECT_THAT(L::Partial(3, 5).Sizes(), ElementsAre(3, 5)); + EXPECT_THAT(L(3, 5).Sizes(), ElementsAre(3, 5)); + } + { + using L = Layout; + EXPECT_THAT(L::Partial().Sizes(), ElementsAre()); + EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3)); + EXPECT_THAT(L::Partial(3, 5).Sizes(), ElementsAre(3, 5)); + EXPECT_THAT(L::Partial(3, 5, 7).Sizes(), ElementsAre(3, 5, 7)); + EXPECT_THAT(L(3, 5, 7).Sizes(), ElementsAre(3, 5, 7)); + } +} + +TEST(Layout, PointerByIndex) { + alignas(max_align_t) const unsigned char p[100] = {}; + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(3).Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L(3).Pointer<0>(p)))); + } + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(3).Pointer<0>(p)))); + EXPECT_EQ(12, Distance(p, Type(L::Partial(3).Pointer<1>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(3, 5).Pointer<0>(p)))); + EXPECT_EQ(12, + Distance(p, Type(L::Partial(3, 5).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L(3, 5).Pointer<0>(p)))); + EXPECT_EQ(12, Distance(p, Type(L(3, 5).Pointer<1>(p)))); + } + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(1).Pointer<0>(p)))); + EXPECT_EQ(4, Distance(p, Type(L::Partial(1).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(5).Pointer<0>(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(5).Pointer<1>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0, 0).Pointer<0>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0, 0).Pointer<1>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0, 0).Pointer<2>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(1, 0).Pointer<0>(p)))); + EXPECT_EQ(4, + Distance(p, Type(L::Partial(1, 0).Pointer<1>(p)))); + EXPECT_EQ(8, + Distance(p, Type(L::Partial(1, 0).Pointer<2>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(5, 3).Pointer<0>(p)))); + EXPECT_EQ(8, + Distance(p, Type(L::Partial(5, 3).Pointer<1>(p)))); + EXPECT_EQ(24, + Distance(p, Type(L::Partial(5, 3).Pointer<2>(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<0>(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<1>(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<2>(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(1, 0, 0).Pointer<0>(p)))); + EXPECT_EQ( + 4, Distance(p, Type(L::Partial(1, 0, 0).Pointer<1>(p)))); + EXPECT_EQ( + 8, Distance(p, Type(L::Partial(1, 0, 0).Pointer<2>(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(5, 3, 1).Pointer<0>(p)))); + EXPECT_EQ( + 24, + Distance(p, Type(L::Partial(5, 3, 1).Pointer<2>(p)))); + EXPECT_EQ( + 8, Distance(p, Type(L::Partial(5, 3, 1).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L(5, 3, 1).Pointer<0>(p)))); + EXPECT_EQ(24, Distance(p, Type(L(5, 3, 1).Pointer<2>(p)))); + EXPECT_EQ(8, Distance(p, Type(L(5, 3, 1).Pointer<1>(p)))); + } +} + +TEST(Layout, PointerByType) { + alignas(max_align_t) const unsigned char p[100] = {}; + { + using L = Layout; + EXPECT_EQ(0, + Distance(p, Type(L::Partial().Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(3).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L(3).Pointer(p)))); + } + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(1).Pointer(p)))); + EXPECT_EQ(4, + Distance(p, Type(L::Partial(1).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(5).Pointer(p)))); + EXPECT_EQ(8, + Distance(p, Type(L::Partial(5).Pointer(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(0, 0).Pointer(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(0, 0).Pointer(p)))); + EXPECT_EQ( + 0, + Distance(p, Type(L::Partial(0, 0).Pointer(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(1, 0).Pointer(p)))); + EXPECT_EQ( + 4, Distance(p, Type(L::Partial(1, 0).Pointer(p)))); + EXPECT_EQ( + 8, + Distance(p, Type(L::Partial(1, 0).Pointer(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(5, 3).Pointer(p)))); + EXPECT_EQ( + 8, Distance(p, Type(L::Partial(5, 3).Pointer(p)))); + EXPECT_EQ( + 24, + Distance(p, Type(L::Partial(5, 3).Pointer(p)))); + EXPECT_EQ( + 0, + Distance(p, Type(L::Partial(0, 0, 0).Pointer(p)))); + EXPECT_EQ( + 0, + Distance(p, Type(L::Partial(0, 0, 0).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type( + L::Partial(0, 0, 0).Pointer(p)))); + EXPECT_EQ( + 0, + Distance(p, Type(L::Partial(1, 0, 0).Pointer(p)))); + EXPECT_EQ( + 4, + Distance(p, Type(L::Partial(1, 0, 0).Pointer(p)))); + EXPECT_EQ(8, Distance(p, Type( + L::Partial(1, 0, 0).Pointer(p)))); + EXPECT_EQ( + 0, + Distance(p, Type(L::Partial(5, 3, 1).Pointer(p)))); + EXPECT_EQ(24, Distance(p, Type( + L::Partial(5, 3, 1).Pointer(p)))); + EXPECT_EQ( + 8, + Distance(p, Type(L::Partial(5, 3, 1).Pointer(p)))); + EXPECT_EQ(24, + Distance(p, Type(L(5, 3, 1).Pointer(p)))); + EXPECT_EQ(8, Distance(p, Type(L(5, 3, 1).Pointer(p)))); + } +} + +TEST(Layout, MutablePointerByIndex) { + alignas(max_align_t) unsigned char p[100]; + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(3).Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L(3).Pointer<0>(p)))); + } + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(3).Pointer<0>(p)))); + EXPECT_EQ(12, Distance(p, Type(L::Partial(3).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(3, 5).Pointer<0>(p)))); + EXPECT_EQ(12, Distance(p, Type(L::Partial(3, 5).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L(3, 5).Pointer<0>(p)))); + EXPECT_EQ(12, Distance(p, Type(L(3, 5).Pointer<1>(p)))); + } + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(1).Pointer<0>(p)))); + EXPECT_EQ(4, Distance(p, Type(L::Partial(1).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(5).Pointer<0>(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(5).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0).Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0).Pointer<2>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(1, 0).Pointer<0>(p)))); + EXPECT_EQ(4, Distance(p, Type(L::Partial(1, 0).Pointer<1>(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(1, 0).Pointer<2>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(5, 3).Pointer<0>(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(5, 3).Pointer<1>(p)))); + EXPECT_EQ(24, Distance(p, Type(L::Partial(5, 3).Pointer<2>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<2>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(1, 0, 0).Pointer<0>(p)))); + EXPECT_EQ(4, Distance(p, Type(L::Partial(1, 0, 0).Pointer<1>(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(1, 0, 0).Pointer<2>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(5, 3, 1).Pointer<0>(p)))); + EXPECT_EQ(24, + Distance(p, Type(L::Partial(5, 3, 1).Pointer<2>(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(5, 3, 1).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L(5, 3, 1).Pointer<0>(p)))); + EXPECT_EQ(24, Distance(p, Type(L(5, 3, 1).Pointer<2>(p)))); + EXPECT_EQ(8, Distance(p, Type(L(5, 3, 1).Pointer<1>(p)))); + } +} + +TEST(Layout, MutablePointerByType) { + alignas(max_align_t) unsigned char p[100]; + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(3).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L(3).Pointer(p)))); + } + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(1).Pointer(p)))); + EXPECT_EQ(4, Distance(p, Type(L::Partial(1).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(5).Pointer(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(5).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0, 0).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(1, 0).Pointer(p)))); + EXPECT_EQ(4, Distance(p, Type(L::Partial(1, 0).Pointer(p)))); + EXPECT_EQ(8, + Distance(p, Type(L::Partial(1, 0).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(5, 3).Pointer(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(5, 3).Pointer(p)))); + EXPECT_EQ(24, + Distance(p, Type(L::Partial(5, 3).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0, 0, 0).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0, 0, 0).Pointer(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(0, 0, 0).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(1, 0, 0).Pointer(p)))); + EXPECT_EQ(4, + Distance(p, Type(L::Partial(1, 0, 0).Pointer(p)))); + EXPECT_EQ( + 8, Distance(p, Type(L::Partial(1, 0, 0).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(5, 3, 1).Pointer(p)))); + EXPECT_EQ( + 24, Distance(p, Type(L::Partial(5, 3, 1).Pointer(p)))); + EXPECT_EQ(8, + Distance(p, Type(L::Partial(5, 3, 1).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L(5, 3, 1).Pointer(p)))); + EXPECT_EQ(24, Distance(p, Type(L(5, 3, 1).Pointer(p)))); + EXPECT_EQ(8, Distance(p, Type(L(5, 3, 1).Pointer(p)))); + } +} + +TEST(Layout, Pointers) { + alignas(max_align_t) const unsigned char p[100] = {}; + using L = Layout; + { + const auto x = L::Partial(); + EXPECT_EQ(std::make_tuple(x.Pointer<0>(p)), + Type>(x.Pointers(p))); + } + { + const auto x = L::Partial(1); + EXPECT_EQ(std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p)), + (Type>(x.Pointers(p)))); + } + { + const auto x = L::Partial(1, 2); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type>( + x.Pointers(p)))); + } + { + const auto x = L::Partial(1, 2, 3); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type>( + x.Pointers(p)))); + } + { + const L x(1, 2, 3); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type>( + x.Pointers(p)))); + } +} + +TEST(Layout, MutablePointers) { + alignas(max_align_t) unsigned char p[100]; + using L = Layout; + { + const auto x = L::Partial(); + EXPECT_EQ(std::make_tuple(x.Pointer<0>(p)), + Type>(x.Pointers(p))); + } + { + const auto x = L::Partial(1); + EXPECT_EQ(std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p)), + (Type>(x.Pointers(p)))); + } + { + const auto x = L::Partial(1, 2); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type>(x.Pointers(p)))); + } + { + const auto x = L::Partial(1, 2, 3); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type>(x.Pointers(p)))); + } + { + const L x(1, 2, 3); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type>(x.Pointers(p)))); + } +} + +TEST(Layout, SliceByIndexSize) { + alignas(max_align_t) const unsigned char p[100] = {}; + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Slice<0>(p).size()); + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(3, L(3).Slice<0>(p).size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size()); + EXPECT_EQ(5, L(3, 5).Slice<1>(p).size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(3, L::Partial(3, 5).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<1>(p).size()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<2>(p).size()); + EXPECT_EQ(3, L(3, 5, 7).Slice<0>(p).size()); + EXPECT_EQ(5, L(3, 5, 7).Slice<1>(p).size()); + EXPECT_EQ(7, L(3, 5, 7).Slice<2>(p).size()); + } +} + +TEST(Layout, SliceByTypeSize) { + alignas(max_align_t) const unsigned char p[100] = {}; + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Slice(p).size()); + EXPECT_EQ(3, L::Partial(3).Slice(p).size()); + EXPECT_EQ(3, L(3).Slice(p).size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Slice(p).size()); + EXPECT_EQ(3, L::Partial(3, 5).Slice(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice(p).size()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Slice(p).size()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Slice(p).size()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Slice(p).size()); + EXPECT_EQ(3, L(3, 5, 7).Slice(p).size()); + EXPECT_EQ(5, L(3, 5, 7).Slice(p).size()); + EXPECT_EQ(7, L(3, 5, 7).Slice(p).size()); + } +} + +TEST(Layout, MutableSliceByIndexSize) { + alignas(max_align_t) unsigned char p[100]; + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Slice<0>(p).size()); + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(3, L(3).Slice<0>(p).size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size()); + EXPECT_EQ(5, L(3, 5).Slice<1>(p).size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(3, L::Partial(3, 5).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<1>(p).size()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<2>(p).size()); + EXPECT_EQ(3, L(3, 5, 7).Slice<0>(p).size()); + EXPECT_EQ(5, L(3, 5, 7).Slice<1>(p).size()); + EXPECT_EQ(7, L(3, 5, 7).Slice<2>(p).size()); + } +} + +TEST(Layout, MutableSliceByTypeSize) { + alignas(max_align_t) unsigned char p[100]; + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Slice(p).size()); + EXPECT_EQ(3, L::Partial(3).Slice(p).size()); + EXPECT_EQ(3, L(3).Slice(p).size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Slice(p).size()); + EXPECT_EQ(3, L::Partial(3, 5).Slice(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice(p).size()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Slice(p).size()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Slice(p).size()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Slice(p).size()); + EXPECT_EQ(3, L(3, 5, 7).Slice(p).size()); + EXPECT_EQ(5, L(3, 5, 7).Slice(p).size()); + EXPECT_EQ(7, L(3, 5, 7).Slice(p).size()); + } +} + +TEST(Layout, SliceByIndexData) { + alignas(max_align_t) const unsigned char p[100] = {}; + { + using L = Layout; + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(3).Slice<0>(p)).data())); + EXPECT_EQ(0, Distance(p, Type>(L(3).Slice<0>(p)).data())); + } + { + using L = Layout; + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(3).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, + Type>(L::Partial(3, 5).Slice<0>(p)).data())); + EXPECT_EQ( + 12, + Distance(p, + Type>(L::Partial(3, 5).Slice<1>(p)).data())); + EXPECT_EQ(0, + Distance(p, Type>(L(3, 5).Slice<0>(p)).data())); + EXPECT_EQ(12, + Distance(p, Type>(L(3, 5).Slice<1>(p)).data())); + } + { + using L = Layout; + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(1).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(5).Slice<0>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, + Type>(L::Partial(0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(1, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 4, + Distance(p, + Type>(L::Partial(1, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(5, 3).Slice<0>(p)).data())); + EXPECT_EQ( + 8, + Distance(p, + Type>(L::Partial(5, 3).Slice<1>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(0, 0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(0, 0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(0, 0, 0).Slice<2>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(1, 0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 4, + Distance( + p, + Type>(L::Partial(1, 0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, + Type>(L::Partial(1, 0, 0).Slice<2>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(5, 3, 1).Slice<0>(p)).data())); + EXPECT_EQ( + 24, + Distance( + p, + Type>(L::Partial(5, 3, 1).Slice<2>(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, + Type>(L::Partial(5, 3, 1).Slice<1>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L(5, 3, 1).Slice<0>(p)).data())); + EXPECT_EQ( + 24, + Distance(p, Type>(L(5, 3, 1).Slice<2>(p)).data())); + EXPECT_EQ( + 8, Distance(p, Type>(L(5, 3, 1).Slice<1>(p)).data())); + } +} + +TEST(Layout, SliceByTypeData) { + alignas(max_align_t) const unsigned char p[100] = {}; + { + using L = Layout; + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(3).Slice(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L(3).Slice(p)).data())); + } + { + using L = Layout; + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(0).Slice(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(1).Slice(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(5).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(1, 0).Slice(p)).data())); + EXPECT_EQ( + 4, + Distance( + p, + Type>(L::Partial(1, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(5, 3).Slice(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, + Type>(L::Partial(5, 3).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(0, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(0, 0, 0).Slice(p)) + .data())); + EXPECT_EQ(0, Distance(p, Type>( + L::Partial(0, 0, 0).Slice(p)) + .data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(1, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 4, + Distance(p, Type>(L::Partial(1, 0, 0).Slice(p)) + .data())); + EXPECT_EQ(8, Distance(p, Type>( + L::Partial(1, 0, 0).Slice(p)) + .data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(5, 3, 1).Slice(p)).data())); + EXPECT_EQ(24, Distance(p, Type>( + L::Partial(5, 3, 1).Slice(p)) + .data())); + EXPECT_EQ( + 8, + Distance(p, Type>(L::Partial(5, 3, 1).Slice(p)) + .data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L(5, 3, 1).Slice(p)).data())); + EXPECT_EQ( + 24, + Distance(p, + Type>(L(5, 3, 1).Slice(p)).data())); + EXPECT_EQ( + 8, Distance( + p, Type>(L(5, 3, 1).Slice(p)).data())); + } +} + +TEST(Layout, MutableSliceByIndexData) { + alignas(max_align_t) unsigned char p[100]; + { + using L = Layout; + EXPECT_EQ(0, + Distance(p, Type>(L::Partial(0).Slice<0>(p)).data())); + EXPECT_EQ(0, + Distance(p, Type>(L::Partial(3).Slice<0>(p)).data())); + EXPECT_EQ(0, Distance(p, Type>(L(3).Slice<0>(p)).data())); + } + { + using L = Layout; + EXPECT_EQ(0, + Distance(p, Type>(L::Partial(3).Slice<0>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(3, 5).Slice<0>(p)).data())); + EXPECT_EQ( + 12, + Distance(p, Type>(L::Partial(3, 5).Slice<1>(p)).data())); + EXPECT_EQ(0, Distance(p, Type>(L(3, 5).Slice<0>(p)).data())); + EXPECT_EQ(12, Distance(p, Type>(L(3, 5).Slice<1>(p)).data())); + } + { + using L = Layout; + EXPECT_EQ(0, + Distance(p, Type>(L::Partial(0).Slice<0>(p)).data())); + EXPECT_EQ(0, + Distance(p, Type>(L::Partial(1).Slice<0>(p)).data())); + EXPECT_EQ(0, + Distance(p, Type>(L::Partial(5).Slice<0>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(1, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 4, Distance(p, Type>(L::Partial(1, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(5, 3).Slice<0>(p)).data())); + EXPECT_EQ( + 8, Distance(p, Type>(L::Partial(5, 3).Slice<1>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(0, 0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(0, 0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(0, 0, 0).Slice<2>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(1, 0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 4, + Distance(p, Type>(L::Partial(1, 0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 8, Distance( + p, Type>(L::Partial(1, 0, 0).Slice<2>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(5, 3, 1).Slice<0>(p)).data())); + EXPECT_EQ( + 24, Distance( + p, Type>(L::Partial(5, 3, 1).Slice<2>(p)).data())); + EXPECT_EQ( + 8, + Distance(p, Type>(L::Partial(5, 3, 1).Slice<1>(p)).data())); + EXPECT_EQ(0, Distance(p, Type>(L(5, 3, 1).Slice<0>(p)).data())); + EXPECT_EQ(24, + Distance(p, Type>(L(5, 3, 1).Slice<2>(p)).data())); + EXPECT_EQ(8, Distance(p, Type>(L(5, 3, 1).Slice<1>(p)).data())); + } +} + +TEST(Layout, MutableSliceByTypeData) { + alignas(max_align_t) unsigned char p[100]; + { + using L = Layout; + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(3).Slice(p)).data())); + EXPECT_EQ(0, Distance(p, Type>(L(3).Slice(p)).data())); + } + { + using L = Layout; + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(0).Slice(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(1).Slice(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(5).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(1, 0).Slice(p)).data())); + EXPECT_EQ( + 4, Distance( + p, Type>(L::Partial(1, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(5, 3).Slice(p)).data())); + EXPECT_EQ( + 8, Distance( + p, Type>(L::Partial(5, 3).Slice(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(0, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(0, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(0, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(1, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 4, + Distance( + p, Type>(L::Partial(1, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, + Type>(L::Partial(1, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(5, 3, 1).Slice(p)).data())); + EXPECT_EQ( + 24, + Distance( + p, + Type>(L::Partial(5, 3, 1).Slice(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, Type>(L::Partial(5, 3, 1).Slice(p)).data())); + EXPECT_EQ(0, + Distance(p, Type>(L(5, 3, 1).Slice(p)).data())); + EXPECT_EQ( + 24, + Distance(p, Type>(L(5, 3, 1).Slice(p)).data())); + EXPECT_EQ( + 8, Distance(p, Type>(L(5, 3, 1).Slice(p)).data())); + } +} + +MATCHER_P(IsSameSlice, slice, "") { + return arg.size() == slice.size() && arg.data() == slice.data(); +} + +template +class TupleMatcher { + public: + explicit TupleMatcher(M... matchers) : matchers_(std::move(matchers)...) {} + + template + bool MatchAndExplain(const Tuple& p, + testing::MatchResultListener* /* listener */) const { + static_assert(std::tuple_size::value == sizeof...(M), ""); + return MatchAndExplainImpl( + p, absl::make_index_sequence::value>{}); + } + + // For the matcher concept. Left empty as we don't really need the diagnostics + // right now. + void DescribeTo(::std::ostream* os) const {} + void DescribeNegationTo(::std::ostream* os) const {} + + private: + template + bool MatchAndExplainImpl(const Tuple& p, absl::index_sequence) const { + // Using std::min as a simple variadic "and". + return std::min( + {true, testing::SafeMatcherCast< + const typename std::tuple_element::type&>( + std::get(matchers_)) + .Matches(std::get(p))...}); + } + + std::tuple matchers_; +}; + +template +testing::PolymorphicMatcher> Tuple(M... matchers) { + return testing::MakePolymorphicMatcher( + TupleMatcher(std::move(matchers)...)); +} + +TEST(Layout, Slices) { + alignas(max_align_t) const unsigned char p[100] = {}; + using L = Layout; + { + const auto x = L::Partial(); + EXPECT_THAT(Type>(x.Slices(p)), Tuple()); + } + { + const auto x = L::Partial(1); + EXPECT_THAT(Type>>(x.Slices(p)), + Tuple(IsSameSlice(x.Slice<0>(p)))); + } + { + const auto x = L::Partial(1, 2); + EXPECT_THAT( + (Type, Span>>(x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)))); + } + { + const auto x = L::Partial(1, 2, 3); + EXPECT_THAT((Type, Span, + Span>>(x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), + IsSameSlice(x.Slice<2>(p)))); + } + { + const L x(1, 2, 3); + EXPECT_THAT((Type, Span, + Span>>(x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), + IsSameSlice(x.Slice<2>(p)))); + } +} + +TEST(Layout, MutableSlices) { + alignas(max_align_t) unsigned char p[100] = {}; + using L = Layout; + { + const auto x = L::Partial(); + EXPECT_THAT(Type>(x.Slices(p)), Tuple()); + } + { + const auto x = L::Partial(1); + EXPECT_THAT(Type>>(x.Slices(p)), + Tuple(IsSameSlice(x.Slice<0>(p)))); + } + { + const auto x = L::Partial(1, 2); + EXPECT_THAT((Type, Span>>(x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)))); + } + { + const auto x = L::Partial(1, 2, 3); + EXPECT_THAT( + (Type, Span, Span>>(x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), + IsSameSlice(x.Slice<2>(p)))); + } + { + const L x(1, 2, 3); + EXPECT_THAT( + (Type, Span, Span>>(x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), + IsSameSlice(x.Slice<2>(p)))); + } +} + +TEST(Layout, UnalignedTypes) { + constexpr Layout x(1, 2, 3); + alignas(max_align_t) unsigned char p[x.AllocSize() + 1]; + EXPECT_THAT(x.Pointers(p + 1), Tuple(p + 1, p + 2, p + 4)); +} + +TEST(Layout, CustomAlignment) { + constexpr Layout> x(1, 2); + alignas(max_align_t) unsigned char p[x.AllocSize()]; + EXPECT_EQ(10, x.AllocSize()); + EXPECT_THAT(x.Pointers(p), Tuple(p + 0, p + 8)); +} + +TEST(Layout, OverAligned) { + constexpr size_t M = alignof(max_align_t); + constexpr Layout> x(1, 3); + alignas(2 * M) unsigned char p[x.AllocSize()]; + EXPECT_EQ(2 * M + 3, x.AllocSize()); + EXPECT_THAT(x.Pointers(p), Tuple(p + 0, p + 2 * M)); +} + +TEST(Layout, Alignment) { + static_assert(Layout::Alignment() == 1, ""); + static_assert(Layout::Alignment() == 4, ""); + static_assert(Layout::Alignment() == 8, ""); + static_assert(Layout>::Alignment() == 64, ""); + static_assert(Layout::Alignment() == 8, ""); + static_assert(Layout::Alignment() == 8, ""); + static_assert(Layout::Alignment() == 8, ""); + static_assert(Layout::Alignment() == 8, ""); + static_assert(Layout::Alignment() == 8, ""); + static_assert(Layout::Alignment() == 8, ""); +} + +TEST(Layout, ConstexprPartial) { + constexpr size_t M = alignof(max_align_t); + constexpr Layout> x(1, 3); + static_assert(x.Partial(1).template Offset<1>() == 2 * M, ""); +} +// [from, to) +struct Region { + size_t from; + size_t to; +}; + +void ExpectRegionPoisoned(const unsigned char* p, size_t n, bool poisoned) { +#ifdef ADDRESS_SANITIZER + for (size_t i = 0; i != n; ++i) { + EXPECT_EQ(poisoned, __asan_address_is_poisoned(p + i)); + } +#endif +} + +template +void ExpectPoisoned(const unsigned char (&buf)[N], + std::initializer_list reg) { + size_t prev = 0; + for (const Region& r : reg) { + ExpectRegionPoisoned(buf + prev, r.from - prev, false); + ExpectRegionPoisoned(buf + r.from, r.to - r.from, true); + prev = r.to; + } + ExpectRegionPoisoned(buf + prev, N - prev, false); +} + +TEST(Layout, PoisonPadding) { + using L = Layout; + + constexpr size_t n = L::Partial(1, 2, 3, 4).AllocSize(); + { + constexpr auto x = L::Partial(); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {}); + } + { + constexpr auto x = L::Partial(1); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {{1, 8}}); + } + { + constexpr auto x = L::Partial(1, 2); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {{1, 8}}); + } + { + constexpr auto x = L::Partial(1, 2, 3); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {{1, 8}, {36, 40}}); + } + { + constexpr auto x = L::Partial(1, 2, 3, 4); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {{1, 8}, {36, 40}}); + } + { + constexpr L x(1, 2, 3, 4); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {{1, 8}, {36, 40}}); + } +} + +TEST(Layout, DebugString) { + { + constexpr auto x = Layout::Partial(); + EXPECT_EQ("@0(1)", x.DebugString()); + } + { + constexpr auto x = Layout::Partial(1); + EXPECT_EQ("@0(1)[1]; @4(4)", x.DebugString()); + } + { + constexpr auto x = Layout::Partial(1, 2); + EXPECT_EQ("@0(1)[1]; @4(4)[2]; @12(1)", + x.DebugString()); + } + { + constexpr auto x = Layout::Partial(1, 2, 3); + EXPECT_EQ( + "@0(1)[1]; @4(4)[2]; @12(1)[3]; " + "@16" + + Int128::Name() + "(16)", + x.DebugString()); + } + { + constexpr auto x = Layout::Partial(1, 2, 3, 4); + EXPECT_EQ( + "@0(1)[1]; @4(4)[2]; @12(1)[3]; " + "@16" + + Int128::Name() + "(16)[4]", + x.DebugString()); + } + { + constexpr Layout x(1, 2, 3, 4); + EXPECT_EQ( + "@0(1)[1]; @4(4)[2]; @12(1)[3]; " + "@16" + + Int128::Name() + "(16)[4]", + x.DebugString()); + } +} + +TEST(Layout, CharTypes) { + constexpr Layout x(1); + alignas(max_align_t) char c[x.AllocSize()] = {}; + alignas(max_align_t) unsigned char uc[x.AllocSize()] = {}; + alignas(max_align_t) signed char sc[x.AllocSize()] = {}; + alignas(max_align_t) const char cc[x.AllocSize()] = {}; + alignas(max_align_t) const unsigned char cuc[x.AllocSize()] = {}; + alignas(max_align_t) const signed char csc[x.AllocSize()] = {}; + + Type(x.Pointer<0>(c)); + Type(x.Pointer<0>(uc)); + Type(x.Pointer<0>(sc)); + Type(x.Pointer<0>(cc)); + Type(x.Pointer<0>(cuc)); + Type(x.Pointer<0>(csc)); + + Type(x.Pointer(c)); + Type(x.Pointer(uc)); + Type(x.Pointer(sc)); + Type(x.Pointer(cc)); + Type(x.Pointer(cuc)); + Type(x.Pointer(csc)); + + Type>(x.Pointers(c)); + Type>(x.Pointers(uc)); + Type>(x.Pointers(sc)); + Type>(x.Pointers(cc)); + Type>(x.Pointers(cuc)); + Type>(x.Pointers(csc)); + + Type>(x.Slice<0>(c)); + Type>(x.Slice<0>(uc)); + Type>(x.Slice<0>(sc)); + Type>(x.Slice<0>(cc)); + Type>(x.Slice<0>(cuc)); + Type>(x.Slice<0>(csc)); + + Type>>(x.Slices(c)); + Type>>(x.Slices(uc)); + Type>>(x.Slices(sc)); + Type>>(x.Slices(cc)); + Type>>(x.Slices(cuc)); + Type>>(x.Slices(csc)); +} + +TEST(Layout, ConstElementType) { + constexpr Layout x(1); + alignas(int32_t) char c[x.AllocSize()] = {}; + const char* cc = c; + const int32_t* p = reinterpret_cast(cc); + + EXPECT_EQ(alignof(int32_t), x.Alignment()); + + EXPECT_EQ(0, x.Offset<0>()); + EXPECT_EQ(0, x.Offset()); + + EXPECT_THAT(x.Offsets(), ElementsAre(0)); + + EXPECT_EQ(1, x.Size<0>()); + EXPECT_EQ(1, x.Size()); + + EXPECT_THAT(x.Sizes(), ElementsAre(1)); + + EXPECT_EQ(sizeof(int32_t), x.AllocSize()); + + EXPECT_EQ(p, Type(x.Pointer<0>(c))); + EXPECT_EQ(p, Type(x.Pointer<0>(cc))); + + EXPECT_EQ(p, Type(x.Pointer(c))); + EXPECT_EQ(p, Type(x.Pointer(cc))); + + EXPECT_THAT(Type>(x.Pointers(c)), Tuple(p)); + EXPECT_THAT(Type>(x.Pointers(cc)), Tuple(p)); + + EXPECT_THAT(Type>(x.Slice<0>(c)), + IsSameSlice(Span(p, 1))); + EXPECT_THAT(Type>(x.Slice<0>(cc)), + IsSameSlice(Span(p, 1))); + + EXPECT_THAT(Type>(x.Slice(c)), + IsSameSlice(Span(p, 1))); + EXPECT_THAT(Type>(x.Slice(cc)), + IsSameSlice(Span(p, 1))); + + EXPECT_THAT(Type>>(x.Slices(c)), + Tuple(IsSameSlice(Span(p, 1)))); + EXPECT_THAT(Type>>(x.Slices(cc)), + Tuple(IsSameSlice(Span(p, 1)))); +} + +namespace example { + +// Immutable move-only string with sizeof equal to sizeof(void*). The string +// size and the characters are kept in the same heap allocation. +class CompactString { + public: + CompactString(const char* s = "") { // NOLINT + const size_t size = strlen(s); + // size_t[1], followed by char[size + 1]. + // This statement doesn't allocate memory. + const L layout(1, size + 1); + // AllocSize() tells us how much memory we need to allocate for all our + // data. + p_.reset(new unsigned char[layout.AllocSize()]); + // If running under ASAN, mark the padding bytes, if any, to catch memory + // errors. + layout.PoisonPadding(p_.get()); + // Store the size in the allocation. + // Pointer() is a synonym for Pointer<0>(). + *layout.Pointer(p_.get()) = size; + // Store the characters in the allocation. + memcpy(layout.Pointer(p_.get()), s, size + 1); + } + + size_t size() const { + // Equivalent to reinterpret_cast(*p). + return *L::Partial().Pointer(p_.get()); + } + + const char* c_str() const { + // Equivalent to reinterpret_cast(p.get() + sizeof(size_t)). + // The argument in Partial(1) specifies that we have size_t[1] in front of + // the characters. + return L::Partial(1).Pointer(p_.get()); + } + + private: + // Our heap allocation contains a size_t followed by an array of chars. + using L = Layout; + std::unique_ptr p_; +}; + +TEST(CompactString, Works) { + CompactString s = "hello"; + EXPECT_EQ(5, s.size()); + EXPECT_STREQ("hello", s.c_str()); +} + +} // namespace example + +} // namespace +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl diff --git a/absl/container/internal/node_hash_policy.h b/absl/container/internal/node_hash_policy.h new file mode 100644 index 00000000..e8d89f63 --- /dev/null +++ b/absl/container/internal/node_hash_policy.h @@ -0,0 +1,90 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Adapts a policy for nodes. +// +// The node policy should model: +// +// struct Policy { +// // Returns a new node allocated and constructed using the allocator, using +// // the specified arguments. +// template +// value_type* new_element(Alloc* alloc, Args&&... args) const; +// +// // Destroys and deallocates node using the allocator. +// template +// void delete_element(Alloc* alloc, value_type* node) const; +// }; +// +// It may also optionally define `value()` and `apply()`. For documentation on +// these, see hash_policy_traits.h. + +#ifndef ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_ +#define ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_ + +#include +#include +#include +#include +#include + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { + +template +struct node_hash_policy { + static_assert(std::is_lvalue_reference::value, ""); + + using slot_type = typename std::remove_cv< + typename std::remove_reference::type>::type*; + + template + static void construct(Alloc* alloc, slot_type* slot, Args&&... args) { + *slot = Policy::new_element(alloc, std::forward(args)...); + } + + template + static void destroy(Alloc* alloc, slot_type* slot) { + Policy::delete_element(alloc, *slot); + } + + template + static void transfer(Alloc*, slot_type* new_slot, slot_type* old_slot) { + *new_slot = *old_slot; + } + + static size_t space_used(const slot_type* slot) { + if (slot == nullptr) return Policy::element_space_used(nullptr); + return Policy::element_space_used(*slot); + } + + static Reference element(slot_type* slot) { return **slot; } + + template + static auto value(T* elem) -> decltype(P::value(elem)) { + return P::value(elem); + } + + template + static auto apply(Ts&&... ts) -> decltype(P::apply(std::forward(ts)...)) { + return P::apply(std::forward(ts)...); + } +}; + +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_ diff --git a/absl/container/internal/node_hash_policy_test.cc b/absl/container/internal/node_hash_policy_test.cc new file mode 100644 index 00000000..a73c7bba --- /dev/null +++ b/absl/container/internal/node_hash_policy_test.cc @@ -0,0 +1,69 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/node_hash_policy.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/container/internal/hash_policy_traits.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { +namespace { + +using ::testing::Pointee; + +struct Policy : node_hash_policy { + using key_type = int; + using init_type = int; + + template + static int* new_element(Alloc* alloc, int value) { + return new int(value); + } + + template + static void delete_element(Alloc* alloc, int* elem) { + delete elem; + } +}; + +using NodePolicy = hash_policy_traits; + +struct NodeTest : ::testing::Test { + std::allocator alloc; + int n = 53; + int* a = &n; +}; + +TEST_F(NodeTest, ConstructDestroy) { + NodePolicy::construct(&alloc, &a, 42); + EXPECT_THAT(a, Pointee(42)); + NodePolicy::destroy(&alloc, &a); +} + +TEST_F(NodeTest, transfer) { + int s = 42; + int* b = &s; + NodePolicy::transfer(&alloc, &a, &b); + EXPECT_EQ(&s, a); +} + +} // namespace +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl diff --git a/absl/container/internal/raw_hash_map.h b/absl/container/internal/raw_hash_map.h new file mode 100644 index 00000000..53d4619a --- /dev/null +++ b/absl/container/internal/raw_hash_map.h @@ -0,0 +1,187 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_ +#define ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_ + +#include +#include +#include + +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { + +template +class raw_hash_map : public raw_hash_set { + // P is Policy. It's passed as a template argument to support maps that have + // incomplete types as values, as in unordered_map. + // MappedReference<> may be a non-reference type. + template + using MappedReference = decltype(P::value( + std::addressof(std::declval()))); + + // MappedConstReference<> may be a non-reference type. + template + using MappedConstReference = decltype(P::value( + std::addressof(std::declval()))); + + using KeyArgImpl = container_internal::KeyArg::value && + IsTransparent::value>; + + public: + using key_type = typename Policy::key_type; + using mapped_type = typename Policy::mapped_type; + template + using key_arg = typename KeyArgImpl::template type; + + static_assert(!std::is_reference::value, ""); + // TODO(alkis): remove this assertion and verify that reference mapped_type is + // supported. + static_assert(!std::is_reference::value, ""); + + using iterator = typename raw_hash_map::raw_hash_set::iterator; + using const_iterator = typename raw_hash_map::raw_hash_set::const_iterator; + + raw_hash_map() {} + using raw_hash_map::raw_hash_set::raw_hash_set; + + // The last two template parameters ensure that both arguments are rvalues + // (lvalue arguments are handled by the overloads below). This is necessary + // for supporting bitfield arguments. + // + // union { int n : 1; }; + // flat_hash_map m; + // m.insert_or_assign(n, n); + template + std::pair insert_or_assign(key_arg&& k, V&& v) { + return insert_or_assign_impl(std::forward(k), std::forward(v)); + } + + template + std::pair insert_or_assign(key_arg&& k, const V& v) { + return insert_or_assign_impl(std::forward(k), v); + } + + template + std::pair insert_or_assign(const key_arg& k, V&& v) { + return insert_or_assign_impl(k, std::forward(v)); + } + + template + std::pair insert_or_assign(const key_arg& k, const V& v) { + return insert_or_assign_impl(k, v); + } + + template + iterator insert_or_assign(const_iterator, key_arg&& k, V&& v) { + return insert_or_assign(std::forward(k), std::forward(v)).first; + } + + template + iterator insert_or_assign(const_iterator, key_arg&& k, const V& v) { + return insert_or_assign(std::forward(k), v).first; + } + + template + iterator insert_or_assign(const_iterator, const key_arg& k, V&& v) { + return insert_or_assign(k, std::forward(v)).first; + } + + template + iterator insert_or_assign(const_iterator, const key_arg& k, const V& v) { + return insert_or_assign(k, v).first; + } + + template ::value, int>::type = 0, + K* = nullptr> + std::pair try_emplace(key_arg&& k, Args&&... args) { + return try_emplace_impl(std::forward(k), std::forward(args)...); + } + + template ::value, int>::type = 0> + std::pair try_emplace(const key_arg& k, Args&&... args) { + return try_emplace_impl(k, std::forward(args)...); + } + + template + iterator try_emplace(const_iterator, key_arg&& k, Args&&... args) { + return try_emplace(std::forward(k), std::forward(args)...).first; + } + + template + iterator try_emplace(const_iterator, const key_arg& k, Args&&... args) { + return try_emplace(k, std::forward(args)...).first; + } + + template + MappedReference

at(const key_arg& key) { + auto it = this->find(key); + if (it == this->end()) std::abort(); + return Policy::value(&*it); + } + + template + MappedConstReference

at(const key_arg& key) const { + auto it = this->find(key); + if (it == this->end()) std::abort(); + return Policy::value(&*it); + } + + template + MappedReference

operator[](key_arg&& key) { + return Policy::value(&*try_emplace(std::forward(key)).first); + } + + template + MappedReference

operator[](const key_arg& key) { + return Policy::value(&*try_emplace(key).first); + } + + private: + template + std::pair insert_or_assign_impl(K&& k, V&& v) { + auto res = this->find_or_prepare_insert(k); + if (res.second) + this->emplace_at(res.first, std::forward(k), std::forward(v)); + else + Policy::value(&*this->iterator_at(res.first)) = std::forward(v); + return {this->iterator_at(res.first), res.second}; + } + + template + std::pair try_emplace_impl(K&& k, Args&&... args) { + auto res = this->find_or_prepare_insert(k); + if (res.second) + this->emplace_at(res.first, std::piecewise_construct, + std::forward_as_tuple(std::forward(k)), + std::forward_as_tuple(std::forward(args)...)); + return {this->iterator_at(res.first), res.second}; + } +}; + +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_ diff --git a/absl/container/internal/raw_hash_set.cc b/absl/container/internal/raw_hash_set.cc new file mode 100644 index 00000000..4e690dac --- /dev/null +++ b/absl/container/internal/raw_hash_set.cc @@ -0,0 +1,48 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/raw_hash_set.h" + +#include +#include + +#include "absl/base/config.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { + +constexpr size_t Group::kWidth; + +// Returns "random" seed. +inline size_t RandomSeed() { +#if ABSL_HAVE_THREAD_LOCAL + static thread_local size_t counter = 0; + size_t value = ++counter; +#else // ABSL_HAVE_THREAD_LOCAL + static std::atomic counter(0); + size_t value = counter.fetch_add(1, std::memory_order_relaxed); +#endif // ABSL_HAVE_THREAD_LOCAL + return value ^ static_cast(reinterpret_cast(&counter)); +} + +bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl) { + // To avoid problems with weak hashes and single bit tests, we use % 13. + // TODO(kfm,sbenza): revisit after we do unconditional mixing + return (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6; +} + +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h new file mode 100644 index 00000000..0c42e4ae --- /dev/null +++ b/absl/container/internal/raw_hash_set.h @@ -0,0 +1,1950 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// An open-addressing +// hashtable with quadratic probing. +// +// This is a low level hashtable on top of which different interfaces can be +// implemented, like flat_hash_set, node_hash_set, string_hash_set, etc. +// +// The table interface is similar to that of std::unordered_set. Notable +// differences are that most member functions support heterogeneous keys when +// BOTH the hash and eq functions are marked as transparent. They do so by +// providing a typedef called `is_transparent`. +// +// When heterogeneous lookup is enabled, functions that take key_type act as if +// they have an overload set like: +// +// iterator find(const key_type& key); +// template +// iterator find(const K& key); +// +// size_type erase(const key_type& key); +// template +// size_type erase(const K& key); +// +// std::pair equal_range(const key_type& key); +// template +// std::pair equal_range(const K& key); +// +// When heterogeneous lookup is disabled, only the explicit `key_type` overloads +// exist. +// +// find() also supports passing the hash explicitly: +// +// iterator find(const key_type& key, size_t hash); +// template +// iterator find(const U& key, size_t hash); +// +// In addition the pointer to element and iterator stability guarantees are +// weaker: all iterators and pointers are invalidated after a new element is +// inserted. +// +// IMPLEMENTATION DETAILS +// +// The table stores elements inline in a slot array. In addition to the slot +// array the table maintains some control state per slot. The extra state is one +// byte per slot and stores empty or deleted marks, or alternatively 7 bits from +// the hash of an occupied slot. The table is split into logical groups of +// slots, like so: +// +// Group 1 Group 2 Group 3 +// +---------------+---------------+---------------+ +// | | | | | | | | | | | | | | | | | | | | | | | | | +// +---------------+---------------+---------------+ +// +// On lookup the hash is split into two parts: +// - H2: 7 bits (those stored in the control bytes) +// - H1: the rest of the bits +// The groups are probed using H1. For each group the slots are matched to H2 in +// parallel. Because H2 is 7 bits (128 states) and the number of slots per group +// is low (8 or 16) in almost all cases a match in H2 is also a lookup hit. +// +// On insert, once the right group is found (as in lookup), its slots are +// filled in order. +// +// On erase a slot is cleared. In case the group did not have any empty slots +// before the erase, the erased slot is marked as deleted. +// +// Groups without empty slots (but maybe with deleted slots) extend the probe +// sequence. The probing algorithm is quadratic. Given N the number of groups, +// the probing function for the i'th probe is: +// +// P(0) = H1 % N +// +// P(i) = (P(i - 1) + i) % N +// +// This probing function guarantees that after N probes, all the groups of the +// table will be probed exactly once. + +#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ +#define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ + +#ifndef SWISSTABLE_HAVE_SSE2 +#if defined(__SSE2__) || \ + (defined(_MSC_VER) && \ + (defined(_M_X64) || (defined(_M_IX86) && _M_IX86_FP >= 2))) +#define SWISSTABLE_HAVE_SSE2 1 +#else +#define SWISSTABLE_HAVE_SSE2 0 +#endif +#endif + +#ifndef SWISSTABLE_HAVE_SSSE3 +#ifdef __SSSE3__ +#define SWISSTABLE_HAVE_SSSE3 1 +#else +#define SWISSTABLE_HAVE_SSSE3 0 +#endif +#endif + +#if SWISSTABLE_HAVE_SSSE3 && !SWISSTABLE_HAVE_SSE2 +#error "Bad configuration!" +#endif + +#if SWISSTABLE_HAVE_SSE2 +#include +#endif + +#if SWISSTABLE_HAVE_SSSE3 +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/internal/bits.h" +#include "absl/base/internal/endian.h" +#include "absl/base/port.h" +#include "absl/container/internal/compressed_tuple.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/hash_policy_traits.h" +#include "absl/container/internal/hashtable_debug_hooks.h" +#include "absl/container/internal/layout.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" +#include "absl/types/optional.h" +#include "absl/utility/utility.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { + +template +class probe_seq { + public: + probe_seq(size_t hash, size_t mask) { + assert(((mask + 1) & mask) == 0 && "not a mask"); + mask_ = mask; + offset_ = hash & mask_; + } + size_t offset() const { return offset_; } + size_t offset(size_t i) const { return (offset_ + i) & mask_; } + + void next() { + index_ += Width; + offset_ += index_; + offset_ &= mask_; + } + // 0-based probe index. The i-th probe in the probe sequence. + size_t index() const { return index_; } + + private: + size_t mask_; + size_t offset_; + size_t index_ = 0; +}; + +template +struct RequireUsableKey { + template + std::pair< + decltype(std::declval()(std::declval())), + decltype(std::declval()(std::declval(), + std::declval()))>* + operator()(const PassedKey&, const Args&...) const; +}; + +template +struct IsDecomposable : std::false_type {}; + +template +struct IsDecomposable< + absl::void_t(), + std::declval()...))>, + Policy, Hash, Eq, Ts...> : std::true_type {}; + +template +struct IsTransparent : std::false_type {}; +template +struct IsTransparent> + : std::true_type {}; + +// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it. +template +constexpr bool IsNoThrowSwappable() { + using std::swap; + return noexcept(swap(std::declval(), std::declval())); +} + +template +int TrailingZeros(T x) { + return sizeof(T) == 8 ? base_internal::CountTrailingZerosNonZero64( + static_cast(x)) + : base_internal::CountTrailingZerosNonZero32( + static_cast(x)); +} + +template +int LeadingZeros(T x) { + return sizeof(T) == 8 + ? base_internal::CountLeadingZeros64(static_cast(x)) + : base_internal::CountLeadingZeros32(static_cast(x)); +} + +// An abstraction over a bitmask. It provides an easy way to iterate through the +// indexes of the set bits of a bitmask. When Shift=0 (platforms with SSE), +// this is a true bitmask. On non-SSE, platforms the arithematic used to +// emulate the SSE behavior works in bytes (Shift=3) and leaves each bytes as +// either 0x00 or 0x80. +// +// For example: +// for (int i : BitMask(0x5)) -> yields 0, 2 +// for (int i : BitMask(0x0000000080800000)) -> yields 2, 3 +template +class BitMask { + static_assert(std::is_unsigned::value, ""); + static_assert(Shift == 0 || Shift == 3, ""); + + public: + // These are useful for unit tests (gunit). + using value_type = int; + using iterator = BitMask; + using const_iterator = BitMask; + + explicit BitMask(T mask) : mask_(mask) {} + BitMask& operator++() { + mask_ &= (mask_ - 1); + return *this; + } + explicit operator bool() const { return mask_ != 0; } + int operator*() const { return LowestBitSet(); } + int LowestBitSet() const { + return container_internal::TrailingZeros(mask_) >> Shift; + } + int HighestBitSet() const { + return (sizeof(T) * CHAR_BIT - container_internal::LeadingZeros(mask_) - + 1) >> + Shift; + } + + BitMask begin() const { return *this; } + BitMask end() const { return BitMask(0); } + + int TrailingZeros() const { + return container_internal::TrailingZeros(mask_) >> Shift; + } + + int LeadingZeros() const { + constexpr int total_significant_bits = SignificantBits << Shift; + constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits; + return container_internal::LeadingZeros(mask_ << extra_bits) >> Shift; + } + + private: + friend bool operator==(const BitMask& a, const BitMask& b) { + return a.mask_ == b.mask_; + } + friend bool operator!=(const BitMask& a, const BitMask& b) { + return a.mask_ != b.mask_; + } + + T mask_; +}; + +using ctrl_t = signed char; +using h2_t = uint8_t; + +// The values here are selected for maximum performance. See the static asserts +// below for details. +enum Ctrl : ctrl_t { + kEmpty = -128, // 0b10000000 + kDeleted = -2, // 0b11111110 + kSentinel = -1, // 0b11111111 +}; +static_assert( + kEmpty & kDeleted & kSentinel & 0x80, + "Special markers need to have the MSB to make checking for them efficient"); +static_assert(kEmpty < kSentinel && kDeleted < kSentinel, + "kEmpty and kDeleted must be smaller than kSentinel to make the " + "SIMD test of IsEmptyOrDeleted() efficient"); +static_assert(kSentinel == -1, + "kSentinel must be -1 to elide loading it from memory into SIMD " + "registers (pcmpeqd xmm, xmm)"); +static_assert(kEmpty == -128, + "kEmpty must be -128 to make the SIMD check for its " + "existence efficient (psignb xmm, xmm)"); +static_assert(~kEmpty & ~kDeleted & kSentinel & 0x7F, + "kEmpty and kDeleted must share an unset bit that is not shared " + "by kSentinel to make the scalar test for MatchEmptyOrDeleted() " + "efficient"); +static_assert(kDeleted == -2, + "kDeleted must be -2 to make the implementation of " + "ConvertSpecialToEmptyAndFullToDeleted efficient"); + +// A single block of empty control bytes for tables without any slots allocated. +// This enables removing a branch in the hot path of find(). +inline ctrl_t* EmptyGroup() { + alignas(16) static constexpr ctrl_t empty_group[] = { + kSentinel, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, + kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty}; + return const_cast(empty_group); +} + +// Mixes a randomly generated per-process seed with `hash` and `ctrl` to +// randomize insertion order within groups. +bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl); + +// Returns a hash seed. +// +// The seed consists of the ctrl_ pointer, which adds enough entropy to ensure +// non-determinism of iteration order in most cases. +inline size_t HashSeed(const ctrl_t* ctrl) { + // The low bits of the pointer have little or no entropy because of + // alignment. We shift the pointer to try to use higher entropy bits. A + // good number seems to be 12 bits, because that aligns with page size. + return reinterpret_cast(ctrl) >> 12; +} + +inline size_t H1(size_t hash, const ctrl_t* ctrl) { + return (hash >> 7) ^ HashSeed(ctrl); +} +inline ctrl_t H2(size_t hash) { return hash & 0x7F; } + +inline bool IsEmpty(ctrl_t c) { return c == kEmpty; } +inline bool IsFull(ctrl_t c) { return c >= 0; } +inline bool IsDeleted(ctrl_t c) { return c == kDeleted; } +inline bool IsEmptyOrDeleted(ctrl_t c) { return c < kSentinel; } + +#if SWISSTABLE_HAVE_SSE2 + +// https://github.com/abseil/abseil-cpp/issues/209 +// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853 +// _mm_cmpgt_epi8 is broken under GCC with -funsigned-char +// Work around this by using the portable implementation of Group +// when using -funsigned-char under GCC. +inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) { +#if defined(__GNUC__) && !defined(__clang__) + if (std::is_unsigned::value) { + const __m128i mask = _mm_set1_epi8(0x80); + const __m128i diff = _mm_subs_epi8(b, a); + return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask); + } +#endif + return _mm_cmpgt_epi8(a, b); +} + +struct GroupSse2Impl { + static constexpr size_t kWidth = 16; // the number of slots per group + + explicit GroupSse2Impl(const ctrl_t* pos) { + ctrl = _mm_loadu_si128(reinterpret_cast(pos)); + } + + // Returns a bitmask representing the positions of slots that match hash. + BitMask Match(h2_t hash) const { + auto match = _mm_set1_epi8(hash); + return BitMask( + _mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))); + } + + // Returns a bitmask representing the positions of empty slots. + BitMask MatchEmpty() const { +#if SWISSTABLE_HAVE_SSSE3 + // This only works because kEmpty is -128. + return BitMask( + _mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))); +#else + return Match(kEmpty); +#endif + } + + // Returns a bitmask representing the positions of empty or deleted slots. + BitMask MatchEmptyOrDeleted() const { + auto special = _mm_set1_epi8(kSentinel); + return BitMask( + _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl))); + } + + // Returns the number of trailing empty or deleted elements in the group. + uint32_t CountLeadingEmptyOrDeleted() const { + auto special = _mm_set1_epi8(kSentinel); + return TrailingZeros( + _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1); + } + + void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { + auto msbs = _mm_set1_epi8(static_cast(-128)); + auto x126 = _mm_set1_epi8(126); +#if SWISSTABLE_HAVE_SSSE3 + auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs); +#else + auto zero = _mm_setzero_si128(); + auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl); + auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126)); +#endif + _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res); + } + + __m128i ctrl; +}; +#endif // SWISSTABLE_HAVE_SSE2 + +struct GroupPortableImpl { + static constexpr size_t kWidth = 8; + + explicit GroupPortableImpl(const ctrl_t* pos) + : ctrl(little_endian::Load64(pos)) {} + + BitMask Match(h2_t hash) const { + // For the technique, see: + // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord + // (Determine if a word has a byte equal to n). + // + // Caveat: there are false positives but: + // - they only occur if there is a real match + // - they never occur on kEmpty, kDeleted, kSentinel + // - they will be handled gracefully by subsequent checks in code + // + // Example: + // v = 0x1716151413121110 + // hash = 0x12 + // retval = (v - lsbs) & ~v & msbs = 0x0000000080800000 + constexpr uint64_t msbs = 0x8080808080808080ULL; + constexpr uint64_t lsbs = 0x0101010101010101ULL; + auto x = ctrl ^ (lsbs * hash); + return BitMask((x - lsbs) & ~x & msbs); + } + + BitMask MatchEmpty() const { + constexpr uint64_t msbs = 0x8080808080808080ULL; + return BitMask((ctrl & (~ctrl << 6)) & msbs); + } + + BitMask MatchEmptyOrDeleted() const { + constexpr uint64_t msbs = 0x8080808080808080ULL; + return BitMask((ctrl & (~ctrl << 7)) & msbs); + } + + uint32_t CountLeadingEmptyOrDeleted() const { + constexpr uint64_t gaps = 0x00FEFEFEFEFEFEFEULL; + return (TrailingZeros(((~ctrl & (ctrl >> 7)) | gaps) + 1) + 7) >> 3; + } + + void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { + constexpr uint64_t msbs = 0x8080808080808080ULL; + constexpr uint64_t lsbs = 0x0101010101010101ULL; + auto x = ctrl & msbs; + auto res = (~x + (x >> 7)) & ~lsbs; + little_endian::Store64(dst, res); + } + + uint64_t ctrl; +}; + +#if SWISSTABLE_HAVE_SSE2 +using Group = GroupSse2Impl; +#else +using Group = GroupPortableImpl; +#endif + +template +class raw_hash_set; + +inline bool IsValidCapacity(size_t n) { + return ((n + 1) & n) == 0 && n >= Group::kWidth - 1; +} + +// PRECONDITION: +// IsValidCapacity(capacity) +// ctrl[capacity] == kSentinel +// ctrl[i] != kSentinel for all i < capacity +// Applies mapping for every byte in ctrl: +// DELETED -> EMPTY +// EMPTY -> EMPTY +// FULL -> DELETED +inline void ConvertDeletedToEmptyAndFullToDeleted( + ctrl_t* ctrl, size_t capacity) { + assert(ctrl[capacity] == kSentinel); + assert(IsValidCapacity(capacity)); + for (ctrl_t* pos = ctrl; pos != ctrl + capacity + 1; pos += Group::kWidth) { + Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos); + } + // Copy the cloned ctrl bytes. + std::memcpy(ctrl + capacity + 1, ctrl, Group::kWidth); + ctrl[capacity] = kSentinel; +} + +// Rounds up the capacity to the next power of 2 minus 1 and ensures it is +// greater or equal to Group::kWidth - 1. +inline size_t NormalizeCapacity(size_t n) { + constexpr size_t kMinCapacity = Group::kWidth - 1; + return n <= kMinCapacity + ? kMinCapacity + : (std::numeric_limits::max)() >> LeadingZeros(n); +} + +// The node_handle concept from C++17. +// We specialize node_handle for sets and maps. node_handle_base holds the +// common API of both. +template +class node_handle_base { + protected: + using PolicyTraits = hash_policy_traits; + using slot_type = typename PolicyTraits::slot_type; + + public: + using allocator_type = Alloc; + + constexpr node_handle_base() {} + node_handle_base(node_handle_base&& other) noexcept { + *this = std::move(other); + } + ~node_handle_base() { destroy(); } + node_handle_base& operator=(node_handle_base&& other) { + destroy(); + if (!other.empty()) { + alloc_ = other.alloc_; + PolicyTraits::transfer(alloc(), slot(), other.slot()); + other.reset(); + } + return *this; + } + + bool empty() const noexcept { return !alloc_; } + explicit operator bool() const noexcept { return !empty(); } + allocator_type get_allocator() const { return *alloc_; } + + protected: + template + friend class raw_hash_set; + + node_handle_base(const allocator_type& a, slot_type* s) : alloc_(a) { + PolicyTraits::transfer(alloc(), slot(), s); + } + + void destroy() { + if (!empty()) { + PolicyTraits::destroy(alloc(), slot()); + reset(); + } + } + + void reset() { + assert(alloc_.has_value()); + alloc_ = absl::nullopt; + } + + slot_type* slot() const { + assert(!empty()); + return reinterpret_cast(std::addressof(slot_space_)); + } + allocator_type* alloc() { return std::addressof(*alloc_); } + + private: + absl::optional alloc_; + mutable absl::aligned_storage_t + slot_space_; +}; + +// For sets. +template +class node_handle : public node_handle_base { + using Base = typename node_handle::node_handle_base; + + public: + using value_type = typename Base::PolicyTraits::value_type; + + constexpr node_handle() {} + + value_type& value() const { + return Base::PolicyTraits::element(this->slot()); + } + + private: + template + friend class raw_hash_set; + + node_handle(const Alloc& a, typename Base::slot_type* s) : Base(a, s) {} +}; + +// For maps. +template +class node_handle> + : public node_handle_base { + using Base = typename node_handle::node_handle_base; + + public: + using key_type = typename Policy::key_type; + using mapped_type = typename Policy::mapped_type; + + constexpr node_handle() {} + + auto key() const -> decltype(Base::PolicyTraits::key(this->slot())) { + return Base::PolicyTraits::key(this->slot()); + } + + mapped_type& mapped() const { + return Base::PolicyTraits::value( + &Base::PolicyTraits::element(this->slot())); + } + + private: + template + friend class raw_hash_set; + + node_handle(const Alloc& a, typename Base::slot_type* s) : Base(a, s) {} +}; + +// Implement the insert_return_type<> concept of C++17. +template +struct insert_return_type { + Iterator position; + bool inserted; + NodeType node; +}; + +// Helper trait to allow or disallow arbitrary keys when the hash and +// eq functions are transparent. +// It is very important that the inner template is an alias and that the type it +// produces is not a dependent type. Otherwise, type deduction would fail. +template +struct KeyArg { + // Transparent. Forward `K`. + template + using type = K; +}; + +template <> +struct KeyArg { + // Not transparent. Always use `key_type`. + template + using type = key_type; +}; + +// Policy: a policy defines how to perform different operations on +// the slots of the hashtable (see hash_policy_traits.h for the full interface +// of policy). +// +// Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The +// functor should accept a key and return size_t as hash. For best performance +// it is important that the hash function provides high entropy across all bits +// of the hash. +// +// Eq: a (possibly polymorphic) functor that compares two keys for equality. It +// should accept two (of possibly different type) keys and return a bool: true +// if they are equal, false if they are not. If two keys compare equal, then +// their hash values as defined by Hash MUST be equal. +// +// Allocator: an Allocator [http://devdocs.io/cpp/concept/allocator] with which +// the storage of the hashtable will be allocated and the elements will be +// constructed and destroyed. +template +class raw_hash_set { + using PolicyTraits = hash_policy_traits; + using KeyArgImpl = container_internal::KeyArg::value && + IsTransparent::value>; + + public: + using init_type = typename PolicyTraits::init_type; + using key_type = typename PolicyTraits::key_type; + // TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user + // code fixes! + using slot_type = typename PolicyTraits::slot_type; + using allocator_type = Alloc; + using size_type = size_t; + using difference_type = ptrdiff_t; + using hasher = Hash; + using key_equal = Eq; + using policy_type = Policy; + using value_type = typename PolicyTraits::value_type; + using reference = value_type&; + using const_reference = const value_type&; + using pointer = typename absl::allocator_traits< + allocator_type>::template rebind_traits::pointer; + using const_pointer = typename absl::allocator_traits< + allocator_type>::template rebind_traits::const_pointer; + + // Alias used for heterogeneous lookup functions. + // `key_arg` evaluates to `K` when the functors are transparent and to + // `key_type` otherwise. It permits template argument deduction on `K` for the + // transparent case. + template + using key_arg = typename KeyArgImpl::template type; + + private: + // Give an early error when key_type is not hashable/eq. + auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k)); + auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k)); + + using Layout = absl::container_internal::Layout; + + static Layout MakeLayout(size_t capacity) { + assert(IsValidCapacity(capacity)); + return Layout(capacity + Group::kWidth + 1, capacity); + } + + using AllocTraits = absl::allocator_traits; + using SlotAlloc = typename absl::allocator_traits< + allocator_type>::template rebind_alloc; + using SlotAllocTraits = typename absl::allocator_traits< + allocator_type>::template rebind_traits; + + static_assert(std::is_lvalue_reference::value, + "Policy::element() must return a reference"); + + template + struct SameAsElementReference + : std::is_same::type>::type, + typename std::remove_cv< + typename std::remove_reference::type>::type> {}; + + // An enabler for insert(T&&): T must be convertible to init_type or be the + // same as [cv] value_type [ref]. + // Note: we separate SameAsElementReference into its own type to avoid using + // reference unless we need to. MSVC doesn't seem to like it in some + // cases. + template + using RequiresInsertable = typename std::enable_if< + absl::disjunction, + SameAsElementReference>::value, + int>::type; + + // RequiresNotInit is a workaround for gcc prior to 7.1. + // See https://godbolt.org/g/Y4xsUh. + template + using RequiresNotInit = + typename std::enable_if::value, int>::type; + + template + using IsDecomposable = IsDecomposable; + + public: + static_assert(std::is_same::value, + "Allocators with custom pointer types are not supported"); + static_assert(std::is_same::value, + "Allocators with custom pointer types are not supported"); + + class iterator { + friend class raw_hash_set; + + public: + using iterator_category = std::forward_iterator_tag; + using value_type = typename raw_hash_set::value_type; + using reference = + absl::conditional_t; + using pointer = absl::remove_reference_t*; + using difference_type = typename raw_hash_set::difference_type; + + iterator() {} + + // PRECONDITION: not an end() iterator. + reference operator*() const { return PolicyTraits::element(slot_); } + + // PRECONDITION: not an end() iterator. + pointer operator->() const { return &operator*(); } + + // PRECONDITION: not an end() iterator. + iterator& operator++() { + ++ctrl_; + ++slot_; + skip_empty_or_deleted(); + return *this; + } + // PRECONDITION: not an end() iterator. + iterator operator++(int) { + auto tmp = *this; + ++*this; + return tmp; + } + + friend bool operator==(const iterator& a, const iterator& b) { + return a.ctrl_ == b.ctrl_; + } + friend bool operator!=(const iterator& a, const iterator& b) { + return !(a == b); + } + + private: + iterator(ctrl_t* ctrl) : ctrl_(ctrl) {} // for end() + iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) {} + + void skip_empty_or_deleted() { + while (IsEmptyOrDeleted(*ctrl_)) { + // ctrl is not necessarily aligned to Group::kWidth. It is also likely + // to read past the space for ctrl bytes and into slots. This is ok + // because ctrl has sizeof() == 1 and slot has sizeof() >= 1 so there + // is no way to read outside the combined slot array. + uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted(); + ctrl_ += shift; + slot_ += shift; + } + } + + ctrl_t* ctrl_ = nullptr; + slot_type* slot_; + }; + + class const_iterator { + friend class raw_hash_set; + + public: + using iterator_category = typename iterator::iterator_category; + using value_type = typename raw_hash_set::value_type; + using reference = typename raw_hash_set::const_reference; + using pointer = typename raw_hash_set::const_pointer; + using difference_type = typename raw_hash_set::difference_type; + + const_iterator() {} + // Implicit construction from iterator. + const_iterator(iterator i) : inner_(std::move(i)) {} + + reference operator*() const { return *inner_; } + pointer operator->() const { return inner_.operator->(); } + + const_iterator& operator++() { + ++inner_; + return *this; + } + const_iterator operator++(int) { return inner_++; } + + friend bool operator==(const const_iterator& a, const const_iterator& b) { + return a.inner_ == b.inner_; + } + friend bool operator!=(const const_iterator& a, const const_iterator& b) { + return !(a == b); + } + + private: + const_iterator(const ctrl_t* ctrl, const slot_type* slot) + : inner_(const_cast(ctrl), const_cast(slot)) {} + + iterator inner_; + }; + + using node_type = container_internal::node_handle; + + raw_hash_set() noexcept( + std::is_nothrow_default_constructible::value&& + std::is_nothrow_default_constructible::value&& + std::is_nothrow_default_constructible::value) {} + + explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(), + const key_equal& eq = key_equal(), + const allocator_type& alloc = allocator_type()) + : ctrl_(EmptyGroup()), settings_(0, hash, eq, alloc) { + if (bucket_count) { + capacity_ = NormalizeCapacity(bucket_count); + growth_left() = static_cast(capacity_ * kMaxLoadFactor); + initialize_slots(); + } + } + + raw_hash_set(size_t bucket_count, const hasher& hash, + const allocator_type& alloc) + : raw_hash_set(bucket_count, hash, key_equal(), alloc) {} + + raw_hash_set(size_t bucket_count, const allocator_type& alloc) + : raw_hash_set(bucket_count, hasher(), key_equal(), alloc) {} + + explicit raw_hash_set(const allocator_type& alloc) + : raw_hash_set(0, hasher(), key_equal(), alloc) {} + + template + raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0, + const hasher& hash = hasher(), const key_equal& eq = key_equal(), + const allocator_type& alloc = allocator_type()) + : raw_hash_set(bucket_count, hash, eq, alloc) { + insert(first, last); + } + + template + raw_hash_set(InputIter first, InputIter last, size_t bucket_count, + const hasher& hash, const allocator_type& alloc) + : raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) {} + + template + raw_hash_set(InputIter first, InputIter last, size_t bucket_count, + const allocator_type& alloc) + : raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) {} + + template + raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc) + : raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) {} + + // Instead of accepting std::initializer_list as the first + // argument like std::unordered_set does, we have two overloads + // that accept std::initializer_list and std::initializer_list. + // This is advantageous for performance. + // + // // Turns {"abc", "def"} into std::initializer_list, then copies + // // the strings into the set. + // std::unordered_set s = {"abc", "def"}; + // + // // Turns {"abc", "def"} into std::initializer_list, then + // // copies the strings into the set. + // absl::flat_hash_set s = {"abc", "def"}; + // + // The same trick is used in insert(). + // + // The enabler is necessary to prevent this constructor from triggering where + // the copy constructor is meant to be called. + // + // absl::flat_hash_set a, b{a}; + // + // RequiresNotInit is a workaround for gcc prior to 7.1. + template = 0, RequiresInsertable = 0> + raw_hash_set(std::initializer_list init, size_t bucket_count = 0, + const hasher& hash = hasher(), const key_equal& eq = key_equal(), + const allocator_type& alloc = allocator_type()) + : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {} + + raw_hash_set(std::initializer_list init, size_t bucket_count = 0, + const hasher& hash = hasher(), const key_equal& eq = key_equal(), + const allocator_type& alloc = allocator_type()) + : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {} + + template = 0, RequiresInsertable = 0> + raw_hash_set(std::initializer_list init, size_t bucket_count, + const hasher& hash, const allocator_type& alloc) + : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {} + + raw_hash_set(std::initializer_list init, size_t bucket_count, + const hasher& hash, const allocator_type& alloc) + : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {} + + template = 0, RequiresInsertable = 0> + raw_hash_set(std::initializer_list init, size_t bucket_count, + const allocator_type& alloc) + : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {} + + raw_hash_set(std::initializer_list init, size_t bucket_count, + const allocator_type& alloc) + : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {} + + template = 0, RequiresInsertable = 0> + raw_hash_set(std::initializer_list init, const allocator_type& alloc) + : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {} + + raw_hash_set(std::initializer_list init, + const allocator_type& alloc) + : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {} + + raw_hash_set(const raw_hash_set& that) + : raw_hash_set(that, AllocTraits::select_on_container_copy_construction( + that.alloc_ref())) {} + + raw_hash_set(const raw_hash_set& that, const allocator_type& a) + : raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) { + reserve(that.size()); + // Because the table is guaranteed to be empty, we can do something faster + // than a full `insert`. + for (const auto& v : that) { + const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v); + const size_t i = find_first_non_full(hash); + set_ctrl(i, H2(hash)); + emplace_at(i, v); + } + size_ = that.size(); + growth_left() -= that.size(); + } + + raw_hash_set(raw_hash_set&& that) noexcept( + std::is_nothrow_copy_constructible::value&& + std::is_nothrow_copy_constructible::value&& + std::is_nothrow_copy_constructible::value) + : ctrl_(absl::exchange(that.ctrl_, EmptyGroup())), + slots_(absl::exchange(that.slots_, nullptr)), + size_(absl::exchange(that.size_, 0)), + capacity_(absl::exchange(that.capacity_, 0)), + // Hash, equality and allocator are copied instead of moved because + // `that` must be left valid. If Hash is std::function, moving it + // would create a nullptr functor that cannot be called. + settings_(that.settings_) { + // growth_left was copied above, reset the one from `that`. + that.growth_left() = 0; + } + + raw_hash_set(raw_hash_set&& that, const allocator_type& a) + : ctrl_(EmptyGroup()), + slots_(nullptr), + size_(0), + capacity_(0), + settings_(0, that.hash_ref(), that.eq_ref(), a) { + if (a == that.alloc_ref()) { + std::swap(ctrl_, that.ctrl_); + std::swap(slots_, that.slots_); + std::swap(size_, that.size_); + std::swap(capacity_, that.capacity_); + std::swap(growth_left(), that.growth_left()); + } else { + reserve(that.size()); + // Note: this will copy elements of dense_set and unordered_set instead of + // moving them. This can be fixed if it ever becomes an issue. + for (auto& elem : that) insert(std::move(elem)); + } + } + + raw_hash_set& operator=(const raw_hash_set& that) { + raw_hash_set tmp(that, + AllocTraits::propagate_on_container_copy_assignment::value + ? that.alloc_ref() + : alloc_ref()); + swap(tmp); + return *this; + } + + raw_hash_set& operator=(raw_hash_set&& that) noexcept( + absl::allocator_traits::is_always_equal::value&& + std::is_nothrow_move_assignable::value&& + std::is_nothrow_move_assignable::value) { + // TODO(sbenza): We should only use the operations from the noexcept clause + // to make sure we actually adhere to that contract. + return move_assign( + std::move(that), + typename AllocTraits::propagate_on_container_move_assignment()); + } + + ~raw_hash_set() { destroy_slots(); } + + iterator begin() { + auto it = iterator_at(0); + it.skip_empty_or_deleted(); + return it; + } + iterator end() { return {ctrl_ + capacity_}; } + + const_iterator begin() const { + return const_cast(this)->begin(); + } + const_iterator end() const { return const_cast(this)->end(); } + const_iterator cbegin() const { return begin(); } + const_iterator cend() const { return end(); } + + bool empty() const { return !size(); } + size_t size() const { return size_; } + size_t capacity() const { return capacity_; } + size_t max_size() const { return (std::numeric_limits::max)(); } + + void clear() { + // Iterating over this container is O(bucket_count()). When bucket_count() + // is much greater than size(), iteration becomes prohibitively expensive. + // For clear() it is more important to reuse the allocated array when the + // container is small because allocation takes comparatively long time + // compared to destruction of the elements of the container. So we pick the + // largest bucket_count() threshold for which iteration is still fast and + // past that we simply deallocate the array. + if (capacity_ > 127) { + destroy_slots(); + } else if (capacity_) { + for (size_t i = 0; i != capacity_; ++i) { + if (IsFull(ctrl_[i])) { + PolicyTraits::destroy(&alloc_ref(), slots_ + i); + } + } + size_ = 0; + reset_ctrl(); + growth_left() = static_cast(capacity_ * kMaxLoadFactor); + } + assert(empty()); + } + + // This overload kicks in when the argument is an rvalue of insertable and + // decomposable type other than init_type. + // + // flat_hash_map m; + // m.insert(std::make_pair("abc", 42)); + template = 0, + typename std::enable_if::value, int>::type = 0, + T* = nullptr> + std::pair insert(T&& value) { + return emplace(std::forward(value)); + } + + // This overload kicks in when the argument is a bitfield or an lvalue of + // insertable and decomposable type. + // + // union { int n : 1; }; + // flat_hash_set s; + // s.insert(n); + // + // flat_hash_set s; + // const char* p = "hello"; + // s.insert(p); + // + // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace + // RequiresInsertable with RequiresInsertable. + // We are hitting this bug: https://godbolt.org/g/1Vht4f. + template < + class T, RequiresInsertable = 0, + typename std::enable_if::value, int>::type = 0> + std::pair insert(const T& value) { + return emplace(value); + } + + // This overload kicks in when the argument is an rvalue of init_type. Its + // purpose is to handle brace-init-list arguments. + // + // flat_hash_set s; + // s.insert({"abc", 42}); + std::pair insert(init_type&& value) { + return emplace(std::move(value)); + } + + template = 0, + typename std::enable_if::value, int>::type = 0, + T* = nullptr> + iterator insert(const_iterator, T&& value) { + return insert(std::forward(value)).first; + } + + // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace + // RequiresInsertable with RequiresInsertable. + // We are hitting this bug: https://godbolt.org/g/1Vht4f. + template < + class T, RequiresInsertable = 0, + typename std::enable_if::value, int>::type = 0> + iterator insert(const_iterator, const T& value) { + return insert(value).first; + } + + iterator insert(const_iterator, init_type&& value) { + return insert(std::move(value)).first; + } + + template + void insert(InputIt first, InputIt last) { + for (; first != last; ++first) insert(*first); + } + + template = 0, RequiresInsertable = 0> + void insert(std::initializer_list ilist) { + insert(ilist.begin(), ilist.end()); + } + + void insert(std::initializer_list ilist) { + insert(ilist.begin(), ilist.end()); + } + + insert_return_type insert(node_type&& node) { + if (!node) return {end(), false, node_type()}; + const auto& elem = PolicyTraits::element(node.slot()); + auto res = PolicyTraits::apply( + InsertSlot{*this, std::move(*node.slot())}, elem); + if (res.second) { + node.reset(); + return {res.first, true, node_type()}; + } else { + return {res.first, false, std::move(node)}; + } + } + + iterator insert(const_iterator, node_type&& node) { + return insert(std::move(node)).first; + } + + // This overload kicks in if we can deduce the key from args. This enables us + // to avoid constructing value_type if an entry with the same key already + // exists. + // + // For example: + // + // flat_hash_map m = {{"abc", "def"}}; + // // Creates no std::string copies and makes no heap allocations. + // m.emplace("abc", "xyz"); + template ::value, int>::type = 0> + std::pair emplace(Args&&... args) { + return PolicyTraits::apply(EmplaceDecomposable{*this}, + std::forward(args)...); + } + + // This overload kicks in if we cannot deduce the key from args. It constructs + // value_type unconditionally and then either moves it into the table or + // destroys. + template ::value, int>::type = 0> + std::pair emplace(Args&&... args) { + typename std::aligned_storage::type + raw; + slot_type* slot = reinterpret_cast(&raw); + + PolicyTraits::construct(&alloc_ref(), slot, std::forward(args)...); + const auto& elem = PolicyTraits::element(slot); + return PolicyTraits::apply(InsertSlot{*this, std::move(*slot)}, elem); + } + + template + iterator emplace_hint(const_iterator, Args&&... args) { + return emplace(std::forward(args)...).first; + } + + // Extension API: support for lazy emplace. + // + // Looks up key in the table. If found, returns the iterator to the element. + // Otherwise calls f with one argument of type raw_hash_set::constructor. f + // MUST call raw_hash_set::constructor with arguments as if a + // raw_hash_set::value_type is constructed, otherwise the behavior is + // undefined. + // + // For example: + // + // std::unordered_set s; + // // Makes ArenaStr even if "abc" is in the map. + // s.insert(ArenaString(&arena, "abc")); + // + // flat_hash_set s; + // // Makes ArenaStr only if "abc" is not in the map. + // s.lazy_emplace("abc", [&](const constructor& ctor) { + // ctor(&arena, "abc"); + // }); + // + // WARNING: This API is currently experimental. If there is a way to implement + // the same thing with the rest of the API, prefer that. + class constructor { + friend class raw_hash_set; + + public: + template + void operator()(Args&&... args) const { + assert(*slot_); + PolicyTraits::construct(alloc_, *slot_, std::forward(args)...); + *slot_ = nullptr; + } + + private: + constructor(allocator_type* a, slot_type** slot) : alloc_(a), slot_(slot) {} + + allocator_type* alloc_; + slot_type** slot_; + }; + + template + iterator lazy_emplace(const key_arg& key, F&& f) { + auto res = find_or_prepare_insert(key); + if (res.second) { + slot_type* slot = slots_ + res.first; + std::forward(f)(constructor(&alloc_ref(), &slot)); + assert(!slot); + } + return iterator_at(res.first); + } + + // Extension API: support for heterogeneous keys. + // + // std::unordered_set s; + // // Turns "abc" into std::string. + // s.erase("abc"); + // + // flat_hash_set s; + // // Uses "abc" directly without copying it into std::string. + // s.erase("abc"); + template + size_type erase(const key_arg& key) { + auto it = find(key); + if (it == end()) return 0; + erase(it); + return 1; + } + + // Erases the element pointed to by `it`. Unlike `std::unordered_set::erase`, + // this method returns void to reduce algorithmic complexity to O(1). In + // order to erase while iterating across a map, use the following idiom (which + // also works for standard containers): + // + // for (auto it = m.begin(), end = m.end(); it != end;) { + // if () { + // m.erase(it++); + // } else { + // ++it; + // } + // } + void erase(const_iterator cit) { erase(cit.inner_); } + + // This overload is necessary because otherwise erase(const K&) would be + // a better match if non-const iterator is passed as an argument. + void erase(iterator it) { + assert(it != end()); + PolicyTraits::destroy(&alloc_ref(), it.slot_); + erase_meta_only(it); + } + + iterator erase(const_iterator first, const_iterator last) { + while (first != last) { + erase(first++); + } + return last.inner_; + } + + // Moves elements from `src` into `this`. + // If the element already exists in `this`, it is left unmodified in `src`. + template + void merge(raw_hash_set& src) { // NOLINT + assert(this != &src); + for (auto it = src.begin(), e = src.end(); it != e; ++it) { + if (PolicyTraits::apply(InsertSlot{*this, std::move(*it.slot_)}, + PolicyTraits::element(it.slot_)) + .second) { + src.erase_meta_only(it); + } + } + } + + template + void merge(raw_hash_set&& src) { + merge(src); + } + + node_type extract(const_iterator position) { + node_type node(alloc_ref(), position.inner_.slot_); + erase_meta_only(position); + return node; + } + + template < + class K = key_type, + typename std::enable_if::value, int>::type = 0> + node_type extract(const key_arg& key) { + auto it = find(key); + return it == end() ? node_type() : extract(const_iterator{it}); + } + + void swap(raw_hash_set& that) noexcept( + IsNoThrowSwappable() && IsNoThrowSwappable() && + (!AllocTraits::propagate_on_container_swap::value || + IsNoThrowSwappable())) { + using std::swap; + swap(ctrl_, that.ctrl_); + swap(slots_, that.slots_); + swap(size_, that.size_); + swap(capacity_, that.capacity_); + swap(growth_left(), that.growth_left()); + swap(hash_ref(), that.hash_ref()); + swap(eq_ref(), that.eq_ref()); + if (AllocTraits::propagate_on_container_swap::value) { + swap(alloc_ref(), that.alloc_ref()); + } else { + // If the allocators do not compare equal it is officially undefined + // behavior. We choose to do nothing. + } + } + + void rehash(size_t n) { + if (n == 0 && capacity_ == 0) return; + if (n == 0 && size_ == 0) return destroy_slots(); + auto m = NormalizeCapacity(std::max(n, NumSlotsFast(size()))); + // n == 0 unconditionally rehashes as per the standard. + if (n == 0 || m > capacity_) { + resize(m); + } + } + + void reserve(size_t n) { + rehash(NumSlotsFast(n)); + } + + // Extension API: support for heterogeneous keys. + // + // std::unordered_set s; + // // Turns "abc" into std::string. + // s.count("abc"); + // + // ch_set s; + // // Uses "abc" directly without copying it into std::string. + // s.count("abc"); + template + size_t count(const key_arg& key) const { + return find(key) == end() ? 0 : 1; + } + + // Issues CPU prefetch instructions for the memory needed to find or insert + // a key. Like all lookup functions, this support heterogeneous keys. + // + // NOTE: This is a very low level operation and should not be used without + // specific benchmarks indicating its importance. + template + void prefetch(const key_arg& key) const { + (void)key; +#if defined(__GNUC__) + auto seq = probe(hash_ref()(key)); + __builtin_prefetch(static_cast(ctrl_ + seq.offset())); + __builtin_prefetch(static_cast(slots_ + seq.offset())); +#endif // __GNUC__ + } + + // The API of find() has two extensions. + // + // 1. The hash can be passed by the user. It must be equal to the hash of the + // key. + // + // 2. The type of the key argument doesn't have to be key_type. This is so + // called heterogeneous key support. + template + iterator find(const key_arg& key, size_t hash) { + auto seq = probe(hash); + while (true) { + Group g{ctrl_ + seq.offset()}; + for (int i : g.Match(H2(hash))) { + if (ABSL_PREDICT_TRUE(PolicyTraits::apply( + EqualElement{key, eq_ref()}, + PolicyTraits::element(slots_ + seq.offset(i))))) + return iterator_at(seq.offset(i)); + } + if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return end(); + seq.next(); + } + } + template + iterator find(const key_arg& key) { + return find(key, hash_ref()(key)); + } + + template + const_iterator find(const key_arg& key, size_t hash) const { + return const_cast(this)->find(key, hash); + } + template + const_iterator find(const key_arg& key) const { + return find(key, hash_ref()(key)); + } + + template + bool contains(const key_arg& key) const { + return find(key) != end(); + } + + template + std::pair equal_range(const key_arg& key) { + auto it = find(key); + if (it != end()) return {it, std::next(it)}; + return {it, it}; + } + template + std::pair equal_range( + const key_arg& key) const { + auto it = find(key); + if (it != end()) return {it, std::next(it)}; + return {it, it}; + } + + size_t bucket_count() const { return capacity_; } + float load_factor() const { + return capacity_ ? static_cast(size()) / capacity_ : 0.0; + } + float max_load_factor() const { return 1.0f; } + void max_load_factor(float) { + // Does nothing. + } + + hasher hash_function() const { return hash_ref(); } + key_equal key_eq() const { return eq_ref(); } + allocator_type get_allocator() const { return alloc_ref(); } + + friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) { + if (a.size() != b.size()) return false; + const raw_hash_set* outer = &a; + const raw_hash_set* inner = &b; + if (outer->capacity() > inner->capacity()) std::swap(outer, inner); + for (const value_type& elem : *outer) + if (!inner->has_element(elem)) return false; + return true; + } + + friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) { + return !(a == b); + } + + friend void swap(raw_hash_set& a, + raw_hash_set& b) noexcept(noexcept(a.swap(b))) { + a.swap(b); + } + + private: + template + friend struct absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess; + + struct FindElement { + template + const_iterator operator()(const K& key, Args&&...) const { + return s.find(key); + } + const raw_hash_set& s; + }; + + struct HashElement { + template + size_t operator()(const K& key, Args&&...) const { + return h(key); + } + const hasher& h; + }; + + template + struct EqualElement { + template + bool operator()(const K2& lhs, Args&&...) const { + return eq(lhs, rhs); + } + const K1& rhs; + const key_equal& eq; + }; + + struct EmplaceDecomposable { + template + std::pair operator()(const K& key, Args&&... args) const { + auto res = s.find_or_prepare_insert(key); + if (res.second) { + s.emplace_at(res.first, std::forward(args)...); + } + return {s.iterator_at(res.first), res.second}; + } + raw_hash_set& s; + }; + + template + struct InsertSlot { + template + std::pair operator()(const K& key, Args&&...) && { + auto res = s.find_or_prepare_insert(key); + if (res.second) { + PolicyTraits::transfer(&s.alloc_ref(), s.slots_ + res.first, &slot); + } else if (do_destroy) { + PolicyTraits::destroy(&s.alloc_ref(), &slot); + } + return {s.iterator_at(res.first), res.second}; + } + raw_hash_set& s; + // Constructed slot. Either moved into place or destroyed. + slot_type&& slot; + }; + + // Computes std::ceil(n / kMaxLoadFactor). Faster than calling std::ceil. + static inline size_t NumSlotsFast(size_t n) { + return static_cast( + (n * kMaxLoadFactorDenominator + (kMaxLoadFactorNumerator - 1)) / + kMaxLoadFactorNumerator); + } + + // "erases" the object from the container, except that it doesn't actually + // destroy the object. It only updates all the metadata of the class. + // This can be used in conjunction with Policy::transfer to move the object to + // another place. + void erase_meta_only(const_iterator it) { + assert(IsFull(*it.inner_.ctrl_) && "erasing a dangling iterator"); + --size_; + const size_t index = it.inner_.ctrl_ - ctrl_; + const size_t index_before = (index - Group::kWidth) & capacity_; + const auto empty_after = Group(it.inner_.ctrl_).MatchEmpty(); + const auto empty_before = Group(ctrl_ + index_before).MatchEmpty(); + + // We count how many consecutive non empties we have to the right and to the + // left of `it`. If the sum is >= kWidth then there is at least one probe + // window that might have seen a full group. + bool was_never_full = + empty_before && empty_after && + static_cast(empty_after.TrailingZeros() + + empty_before.LeadingZeros()) < Group::kWidth; + + set_ctrl(index, was_never_full ? kEmpty : kDeleted); + growth_left() += was_never_full; + } + + void initialize_slots() { + assert(capacity_); + auto layout = MakeLayout(capacity_); + char* mem = static_cast( + Allocate(&alloc_ref(), layout.AllocSize())); + ctrl_ = reinterpret_cast(layout.template Pointer<0>(mem)); + slots_ = layout.template Pointer<1>(mem); + reset_ctrl(); + growth_left() = static_cast(capacity_ * kMaxLoadFactor) - size_; + } + + void destroy_slots() { + if (!capacity_) return; + for (size_t i = 0; i != capacity_; ++i) { + if (IsFull(ctrl_[i])) { + PolicyTraits::destroy(&alloc_ref(), slots_ + i); + } + } + auto layout = MakeLayout(capacity_); + // Unpoison before returning the memory to the allocator. + SanitizerUnpoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_); + Deallocate(&alloc_ref(), ctrl_, layout.AllocSize()); + ctrl_ = EmptyGroup(); + slots_ = nullptr; + size_ = 0; + capacity_ = 0; + growth_left() = 0; + } + + void resize(size_t new_capacity) { + assert(IsValidCapacity(new_capacity)); + auto* old_ctrl = ctrl_; + auto* old_slots = slots_; + const size_t old_capacity = capacity_; + capacity_ = new_capacity; + initialize_slots(); + + for (size_t i = 0; i != old_capacity; ++i) { + if (IsFull(old_ctrl[i])) { + size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, + PolicyTraits::element(old_slots + i)); + size_t new_i = find_first_non_full(hash); + set_ctrl(new_i, H2(hash)); + PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i); + } + } + if (old_capacity) { + SanitizerUnpoisonMemoryRegion(old_slots, + sizeof(slot_type) * old_capacity); + auto layout = MakeLayout(old_capacity); + Deallocate(&alloc_ref(), old_ctrl, + layout.AllocSize()); + } + } + + void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE { + assert(IsValidCapacity(capacity_)); + // Algorithm: + // - mark all DELETED slots as EMPTY + // - mark all FULL slots as DELETED + // - for each slot marked as DELETED + // hash = Hash(element) + // target = find_first_non_full(hash) + // if target is in the same group + // mark slot as FULL + // else if target is EMPTY + // transfer element to target + // mark slot as EMPTY + // mark target as FULL + // else if target is DELETED + // swap current element with target element + // mark target as FULL + // repeat procedure for current slot with moved from element (target) + ConvertDeletedToEmptyAndFullToDeleted(ctrl_, capacity_); + typename std::aligned_storage::type + raw; + slot_type* slot = reinterpret_cast(&raw); + for (size_t i = 0; i != capacity_; ++i) { + if (!IsDeleted(ctrl_[i])) continue; + size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, + PolicyTraits::element(slots_ + i)); + size_t new_i = find_first_non_full(hash); + + // Verify if the old and new i fall within the same group wrt the hash. + // If they do, we don't need to move the object as it falls already in the + // best probe we can. + const auto probe_index = [&](size_t pos) { + return ((pos - probe(hash).offset()) & capacity_) / Group::kWidth; + }; + + // Element doesn't move. + if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) { + set_ctrl(i, H2(hash)); + continue; + } + if (IsEmpty(ctrl_[new_i])) { + // Transfer element to the empty spot. + // set_ctrl poisons/unpoisons the slots so we have to call it at the + // right time. + set_ctrl(new_i, H2(hash)); + PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slots_ + i); + set_ctrl(i, kEmpty); + } else { + assert(IsDeleted(ctrl_[new_i])); + set_ctrl(new_i, H2(hash)); + // Until we are done rehashing, DELETED marks previously FULL slots. + // Swap i and new_i elements. + PolicyTraits::transfer(&alloc_ref(), slot, slots_ + i); + PolicyTraits::transfer(&alloc_ref(), slots_ + i, slots_ + new_i); + PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slot); + --i; // repeat + } + } + growth_left() = static_cast(capacity_ * kMaxLoadFactor) - size_; + } + + void rehash_and_grow_if_necessary() { + if (capacity_ == 0) { + resize(Group::kWidth - 1); + } else if (size() <= kMaxLoadFactor / 2 * capacity_) { + // Squash DELETED without growing if there is enough capacity. + drop_deletes_without_resize(); + } else { + // Otherwise grow the container. + resize(capacity_ * 2 + 1); + } + } + + bool has_element(const value_type& elem) const { + size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem); + auto seq = probe(hash); + while (true) { + Group g{ctrl_ + seq.offset()}; + for (int i : g.Match(H2(hash))) { + if (ABSL_PREDICT_TRUE(PolicyTraits::element(slots_ + seq.offset(i)) == + elem)) + return true; + } + if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return false; + seq.next(); + assert(seq.index() < capacity_ && "full table!"); + } + return false; + } + + // Probes the raw_hash_set with the probe sequence for hash and returns the + // pointer to the first empty or deleted slot. + // NOTE: this function must work with tables having both kEmpty and kDelete + // in one group. Such tables appears during drop_deletes_without_resize. + // + // This function is very useful when insertions happen and: + // - the input is already a set + // - there are enough slots + // - the element with the hash is not in the table + size_t find_first_non_full(size_t hash) { + auto seq = probe(hash); + while (true) { + Group g{ctrl_ + seq.offset()}; + auto mask = g.MatchEmptyOrDeleted(); + if (mask) { +#if !defined(NDEBUG) + // We want to force small tables to have random entries too, so + // in debug build we will randomly insert in either the front or back of + // the group. + // TODO(kfm,sbenza): revisit after we do unconditional mixing + if (ShouldInsertBackwards(hash, ctrl_)) + return seq.offset(mask.HighestBitSet()); + else + return seq.offset(mask.LowestBitSet()); +#else + return seq.offset(mask.LowestBitSet()); +#endif + } + assert(seq.index() < capacity_ && "full table!"); + seq.next(); + } + } + + // TODO(alkis): Optimize this assuming *this and that don't overlap. + raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) { + raw_hash_set tmp(std::move(that)); + swap(tmp); + return *this; + } + raw_hash_set& move_assign(raw_hash_set&& that, std::false_type) { + raw_hash_set tmp(std::move(that), alloc_ref()); + swap(tmp); + return *this; + } + + protected: + template + std::pair find_or_prepare_insert(const K& key) { + auto hash = hash_ref()(key); + auto seq = probe(hash); + while (true) { + Group g{ctrl_ + seq.offset()}; + for (int i : g.Match(H2(hash))) { + if (ABSL_PREDICT_TRUE(PolicyTraits::apply( + EqualElement{key, eq_ref()}, + PolicyTraits::element(slots_ + seq.offset(i))))) + return {seq.offset(i), false}; + } + if (ABSL_PREDICT_TRUE(g.MatchEmpty())) break; + seq.next(); + } + return {prepare_insert(hash), true}; + } + + size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE { + size_t target = find_first_non_full(hash); + if (ABSL_PREDICT_FALSE(growth_left() == 0 && !IsDeleted(ctrl_[target]))) { + rehash_and_grow_if_necessary(); + target = find_first_non_full(hash); + } + ++size_; + growth_left() -= IsEmpty(ctrl_[target]); + set_ctrl(target, H2(hash)); + return target; + } + + // Constructs the value in the space pointed by the iterator. This only works + // after an unsuccessful find_or_prepare_insert() and before any other + // modifications happen in the raw_hash_set. + // + // PRECONDITION: i is an index returned from find_or_prepare_insert(k), where + // k is the key decomposed from `forward(args)...`, and the bool + // returned by find_or_prepare_insert(k) was true. + // POSTCONDITION: *m.iterator_at(i) == value_type(forward(args)...). + template + void emplace_at(size_t i, Args&&... args) { + PolicyTraits::construct(&alloc_ref(), slots_ + i, + std::forward(args)...); + + assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) == + iterator_at(i) && + "constructed value does not match the lookup key"); + } + + iterator iterator_at(size_t i) { return {ctrl_ + i, slots_ + i}; } + const_iterator iterator_at(size_t i) const { return {ctrl_ + i, slots_ + i}; } + + private: + friend struct RawHashSetTestOnlyAccess; + + probe_seq probe(size_t hash) const { + return probe_seq(H1(hash, ctrl_), capacity_); + } + + // Reset all ctrl bytes back to kEmpty, except the sentinel. + void reset_ctrl() { + std::memset(ctrl_, kEmpty, capacity_ + Group::kWidth); + ctrl_[capacity_] = kSentinel; + SanitizerPoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_); + } + + // Sets the control byte, and if `i < Group::kWidth`, set the cloned byte at + // the end too. + void set_ctrl(size_t i, ctrl_t h) { + assert(i < capacity_); + + if (IsFull(h)) { + SanitizerUnpoisonObject(slots_ + i); + } else { + SanitizerPoisonObject(slots_ + i); + } + + ctrl_[i] = h; + ctrl_[((i - Group::kWidth) & capacity_) + Group::kWidth] = h; + } + + size_t& growth_left() { return settings_.template get<0>(); } + + hasher& hash_ref() { return settings_.template get<1>(); } + const hasher& hash_ref() const { return settings_.template get<1>(); } + key_equal& eq_ref() { return settings_.template get<2>(); } + const key_equal& eq_ref() const { return settings_.template get<2>(); } + allocator_type& alloc_ref() { return settings_.template get<3>(); } + const allocator_type& alloc_ref() const { + return settings_.template get<3>(); + } + + // On average each group has 2 empty slot (for the vectorized case). + static constexpr int64_t kMaxLoadFactorNumerator = 14; + static constexpr int64_t kMaxLoadFactorDenominator = 16; + static constexpr float kMaxLoadFactor = + 1.0 * kMaxLoadFactorNumerator / kMaxLoadFactorDenominator; + + // TODO(alkis): Investigate removing some of these fields: + // - ctrl/slots can be derived from each other + // - size can be moved into the slot array + ctrl_t* ctrl_ = EmptyGroup(); // [(capacity + 1) * ctrl_t] + slot_type* slots_ = nullptr; // [capacity * slot_type] + size_t size_ = 0; // number of full slots + size_t capacity_ = 0; // total number of slots + absl::container_internal::CompressedTuple + settings_{0, hasher{}, key_equal{}, allocator_type{}}; +}; + +namespace hashtable_debug_internal { +template +struct HashtableDebugAccess> { + using Traits = typename Set::PolicyTraits; + using Slot = typename Traits::slot_type; + + static size_t GetNumProbes(const Set& set, + const typename Set::key_type& key) { + size_t num_probes = 0; + size_t hash = set.hash_ref()(key); + auto seq = set.probe(hash); + while (true) { + container_internal::Group g{set.ctrl_ + seq.offset()}; + for (int i : g.Match(container_internal::H2(hash))) { + if (Traits::apply( + typename Set::template EqualElement{ + key, set.eq_ref()}, + Traits::element(set.slots_ + seq.offset(i)))) + return num_probes; + ++num_probes; + } + if (g.MatchEmpty()) return num_probes; + seq.next(); + ++num_probes; + } + } + + static size_t AllocatedByteSize(const Set& c) { + size_t capacity = c.capacity_; + if (capacity == 0) return 0; + auto layout = Set::MakeLayout(capacity); + size_t m = layout.AllocSize(); + + size_t per_slot = Traits::space_used(static_cast(nullptr)); + if (per_slot != ~size_t{}) { + m += per_slot * c.size(); + } else { + for (size_t i = 0; i != capacity; ++i) { + if (container_internal::IsFull(c.ctrl_[i])) { + m += Traits::space_used(c.slots_ + i); + } + } + } + return m; + } + + static size_t LowerBoundAllocatedByteSize(size_t size) { + size_t capacity = container_internal::NormalizeCapacity( + std::ceil(size / Set::kMaxLoadFactor)); + if (capacity == 0) return 0; + auto layout = Set::MakeLayout(capacity); + size_t m = layout.AllocSize(); + size_t per_slot = Traits::space_used(static_cast(nullptr)); + if (per_slot != ~size_t{}) { + m += per_slot * size; + } + return m; + } +}; + +} // namespace hashtable_debug_internal +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ diff --git a/absl/container/internal/raw_hash_set_allocator_test.cc b/absl/container/internal/raw_hash_set_allocator_test.cc new file mode 100644 index 00000000..f5779d62 --- /dev/null +++ b/absl/container/internal/raw_hash_set_allocator_test.cc @@ -0,0 +1,430 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#include "gtest/gtest.h" +#include "absl/container/internal/raw_hash_set.h" +#include "absl/container/internal/tracked.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { +namespace { + +enum AllocSpec { + kPropagateOnCopy = 1, + kPropagateOnMove = 2, + kPropagateOnSwap = 4, +}; + +struct AllocState { + size_t num_allocs = 0; + std::set owned; +}; + +template +class CheckedAlloc { + public: + template + friend class CheckedAlloc; + + using value_type = T; + + CheckedAlloc() {} + explicit CheckedAlloc(size_t id) : id_(id) {} + CheckedAlloc(const CheckedAlloc&) = default; + CheckedAlloc& operator=(const CheckedAlloc&) = default; + + template + CheckedAlloc(const CheckedAlloc& that) + : id_(that.id_), state_(that.state_) {} + + template + struct rebind { + using other = CheckedAlloc; + }; + + using propagate_on_container_copy_assignment = + std::integral_constant; + + using propagate_on_container_move_assignment = + std::integral_constant; + + using propagate_on_container_swap = + std::integral_constant; + + CheckedAlloc select_on_container_copy_construction() const { + if (Spec & kPropagateOnCopy) return *this; + return {}; + } + + T* allocate(size_t n) { + T* ptr = std::allocator().allocate(n); + track_alloc(ptr); + return ptr; + } + void deallocate(T* ptr, size_t n) { + memset(ptr, 0, n * sizeof(T)); // The freed memory must be unpoisoned. + track_dealloc(ptr); + return std::allocator().deallocate(ptr, n); + } + + friend bool operator==(const CheckedAlloc& a, const CheckedAlloc& b) { + return a.id_ == b.id_; + } + friend bool operator!=(const CheckedAlloc& a, const CheckedAlloc& b) { + return !(a == b); + } + + size_t num_allocs() const { return state_->num_allocs; } + + void swap(CheckedAlloc& that) { + using std::swap; + swap(id_, that.id_); + swap(state_, that.state_); + } + + friend void swap(CheckedAlloc& a, CheckedAlloc& b) { a.swap(b); } + + friend std::ostream& operator<<(std::ostream& o, const CheckedAlloc& a) { + return o << "alloc(" << a.id_ << ")"; + } + + private: + void track_alloc(void* ptr) { + AllocState* state = state_.get(); + ++state->num_allocs; + if (!state->owned.insert(ptr).second) + ADD_FAILURE() << *this << " got previously allocated memory: " << ptr; + } + void track_dealloc(void* ptr) { + if (state_->owned.erase(ptr) != 1) + ADD_FAILURE() << *this + << " deleting memory owned by another allocator: " << ptr; + } + + size_t id_ = std::numeric_limits::max(); + + std::shared_ptr state_ = std::make_shared(); +}; + +struct Identity { + int32_t operator()(int32_t v) const { return v; } +}; + +struct Policy { + using slot_type = Tracked; + using init_type = Tracked; + using key_type = int32_t; + + template + static void construct(allocator_type* alloc, slot_type* slot, + Args&&... args) { + std::allocator_traits::construct( + *alloc, slot, std::forward(args)...); + } + + template + static void destroy(allocator_type* alloc, slot_type* slot) { + std::allocator_traits::destroy(*alloc, slot); + } + + template + static void transfer(allocator_type* alloc, slot_type* new_slot, + slot_type* old_slot) { + construct(alloc, new_slot, std::move(*old_slot)); + destroy(alloc, old_slot); + } + + template + static auto apply(F&& f, int32_t v) -> decltype(std::forward(f)(v, v)) { + return std::forward(f)(v, v); + } + + template + static auto apply(F&& f, const slot_type& v) + -> decltype(std::forward(f)(v.val(), v)) { + return std::forward(f)(v.val(), v); + } + + template + static auto apply(F&& f, slot_type&& v) + -> decltype(std::forward(f)(v.val(), std::move(v))) { + return std::forward(f)(v.val(), std::move(v)); + } + + static slot_type& element(slot_type* slot) { return *slot; } +}; + +template +struct PropagateTest : public ::testing::Test { + using Alloc = CheckedAlloc, Spec>; + + using Table = raw_hash_set, Alloc>; + + PropagateTest() { + EXPECT_EQ(a1, t1.get_allocator()); + EXPECT_NE(a2, t1.get_allocator()); + } + + Alloc a1 = Alloc(1); + Table t1 = Table(0, a1); + Alloc a2 = Alloc(2); +}; + +using PropagateOnAll = + PropagateTest; +using NoPropagateOnCopy = PropagateTest; +using NoPropagateOnMove = PropagateTest; + +TEST_F(PropagateOnAll, Empty) { EXPECT_EQ(0, a1.num_allocs()); } + +TEST_F(PropagateOnAll, InsertAllocates) { + auto it = t1.insert(0).first; + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, InsertDecomposes) { + auto it = t1.insert(0).first; + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); + + EXPECT_FALSE(t1.insert(0).second); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, RehashMoves) { + auto it = t1.insert(0).first; + EXPECT_EQ(0, it->num_moves()); + t1.rehash(2 * t1.capacity()); + EXPECT_EQ(2, a1.num_allocs()); + it = t1.find(0); + EXPECT_EQ(1, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, CopyConstructor) { + auto it = t1.insert(0).first; + Table u(t1); + EXPECT_EQ(2, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(NoPropagateOnCopy, CopyConstructor) { + auto it = t1.insert(0).first; + Table u(t1); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, u.get_allocator().num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(PropagateOnAll, CopyConstructorWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(t1, a1); + EXPECT_EQ(2, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(NoPropagateOnCopy, CopyConstructorWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(t1, a1); + EXPECT_EQ(2, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(PropagateOnAll, CopyConstructorWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(t1, a2); + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, a2.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(NoPropagateOnCopy, CopyConstructorWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(t1, a2); + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, a2.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(PropagateOnAll, MoveConstructor) { + auto it = t1.insert(0).first; + Table u(std::move(t1)); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(NoPropagateOnMove, MoveConstructor) { + auto it = t1.insert(0).first; + Table u(std::move(t1)); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, MoveConstructorWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(std::move(t1), a1); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(NoPropagateOnMove, MoveConstructorWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(std::move(t1), a1); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, MoveConstructorWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(std::move(t1), a2); + it = u.find(0); + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, a2.num_allocs()); + EXPECT_EQ(1, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(NoPropagateOnMove, MoveConstructorWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(std::move(t1), a2); + it = u.find(0); + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, a2.num_allocs()); + EXPECT_EQ(1, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, CopyAssignmentWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(0, a1); + u = t1; + EXPECT_EQ(2, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(NoPropagateOnCopy, CopyAssignmentWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(0, a1); + u = t1; + EXPECT_EQ(2, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(PropagateOnAll, CopyAssignmentWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(0, a2); + u = t1; + EXPECT_EQ(a1, u.get_allocator()); + EXPECT_EQ(2, a1.num_allocs()); + EXPECT_EQ(0, a2.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(NoPropagateOnCopy, CopyAssignmentWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(0, a2); + u = t1; + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, a2.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(PropagateOnAll, MoveAssignmentWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(0, a1); + u = std::move(t1); + EXPECT_EQ(a1, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(NoPropagateOnMove, MoveAssignmentWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(0, a1); + u = std::move(t1); + EXPECT_EQ(a1, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, MoveAssignmentWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(0, a2); + u = std::move(t1); + EXPECT_EQ(a1, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, a2.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(NoPropagateOnMove, MoveAssignmentWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(0, a2); + u = std::move(t1); + it = u.find(0); + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, a2.num_allocs()); + EXPECT_EQ(1, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, Swap) { + auto it = t1.insert(0).first; + Table u(0, a2); + u.swap(t1); + EXPECT_EQ(a1, u.get_allocator()); + EXPECT_EQ(a2, t1.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, a2.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +} // namespace +} // namespace container_internal +} // inline namespace lts_2018_12_18 +} // namespace absl diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc new file mode 100644 index 00000000..302f9758 --- /dev/null +++ b/absl/container/internal/raw_hash_set_test.cc @@ -0,0 +1,1830 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/raw_hash_set.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/base/internal/cycleclock.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/hash_function_defaults.h" +#include "absl/container/internal/hash_policy_testing.h" +#include "absl/container/internal/hashtable_debug.h" +#include "absl/strings/string_view.h" + +namespace absl { +inline namespace lts_2018_12_18 { +namespace container_internal { + +struct RawHashSetTestOnlyAccess { + template + static auto GetSlots(const C& c) -> decltype(c.slots_) { + return c.slots_; + } +}; + +namespace { + +using ::testing::DoubleNear; +using ::testing::ElementsAre; +using ::testing::Optional; +using ::testing::Pair; +using ::testing::UnorderedElementsAre; + +TEST(Util, NormalizeCapacity) { + constexpr size_t kMinCapacity = Group::kWidth - 1; + EXPECT_EQ(kMinCapacity, NormalizeCapacity(0)); + EXPECT_EQ(kMinCapacity, NormalizeCapacity(1)); + EXPECT_EQ(kMinCapacity, NormalizeCapacity(2)); + EXPECT_EQ(kMinCapacity, NormalizeCapacity(kMinCapacity)); + EXPECT_EQ(kMinCapacity * 2 + 1, NormalizeCapacity(kMinCapacity + 1)); + EXPECT_EQ(kMinCapacity * 2 + 1, NormalizeCapacity(kMinCapacity + 2)); +} + +TEST(Util, probe_seq) { + probe_seq<16> seq(0, 127); + auto gen = [&]() { + size_t res = seq.offset(); + seq.next(); + return res; + }; + std::vector offsets(8); + std::generate_n(offsets.begin(), 8, gen); + EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64)); + seq = probe_seq<16>(128, 127); + std::generate_n(offsets.begin(), 8, gen); + EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64)); +} + +TEST(BitMask, Smoke) { + EXPECT_FALSE((BitMask(0))); + EXPECT_TRUE((BitMask(5))); + + EXPECT_THAT((BitMask(0)), ElementsAre()); + EXPECT_THAT((BitMask(0x1)), ElementsAre(0)); + EXPECT_THAT((BitMask(0x2)), ElementsAre(1)); + EXPECT_THAT((BitMask(0x3)), ElementsAre(0, 1)); + EXPECT_THAT((BitMask(0x4)), ElementsAre(2)); + EXPECT_THAT((BitMask(0x5)), ElementsAre(0, 2)); + EXPECT_THAT((BitMask(0x55)), ElementsAre(0, 2, 4, 6)); + EXPECT_THAT((BitMask(0xAA)), ElementsAre(1, 3, 5, 7)); +} + +TEST(BitMask, WithShift) { + // See the non-SSE version of Group for details on what this math is for. + uint64_t ctrl = 0x1716151413121110; + uint64_t hash = 0x12; + constexpr uint64_t msbs = 0x8080808080808080ULL; + constexpr uint64_t lsbs = 0x0101010101010101ULL; + auto x = ctrl ^ (lsbs * hash); + uint64_t mask = (x - lsbs) & ~x & msbs; + EXPECT_EQ(0x0000000080800000, mask); + + BitMask b(mask); + EXPECT_EQ(*b, 2); +} + +TEST(BitMask, LeadingTrailing) { + EXPECT_EQ((BitMask(0b0001101001000000).LeadingZeros()), 3); + EXPECT_EQ((BitMask(0b0001101001000000).TrailingZeros()), 6); + + EXPECT_EQ((BitMask(0b0000000000000001).LeadingZeros()), 15); + EXPECT_EQ((BitMask(0b0000000000000001).TrailingZeros()), 0); + + EXPECT_EQ((BitMask(0b1000000000000000).LeadingZeros()), 0); + EXPECT_EQ((BitMask(0b1000000000000000).TrailingZeros()), 15); + + EXPECT_EQ((BitMask(0x0000008080808000).LeadingZeros()), 3); + EXPECT_EQ((BitMask(0x0000008080808000).TrailingZeros()), 1); + + EXPECT_EQ((BitMask(0x0000000000000080).LeadingZeros()), 7); + EXPECT_EQ((BitMask(0x0000000000000080).TrailingZeros()), 0); + + EXPECT_EQ((BitMask(0x8000000000000000).LeadingZeros()), 0); + EXPECT_EQ((BitMask(0x8000000000000000).TrailingZeros()), 7); +} + +TEST(Group, EmptyGroup) { + for (h2_t h = 0; h != 128; ++h) EXPECT_FALSE(Group{EmptyGroup()}.Match(h)); +} + +TEST(Group, Match) { + if (Group::kWidth == 16) { + ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7, + 7, 5, 3, 1, 1, 1, 1, 1}; + EXPECT_THAT(Group{group}.Match(0), ElementsAre()); + EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 11, 12, 13, 14, 15)); + EXPECT_THAT(Group{group}.Match(3), ElementsAre(3, 10)); + EXPECT_THAT(Group{group}.Match(5), ElementsAre(5, 9)); + EXPECT_THAT(Group{group}.Match(7), ElementsAre(7, 8)); + } else if (Group::kWidth == 8) { + ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1}; + EXPECT_THAT(Group{group}.Match(0), ElementsAre()); + EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 5, 7)); + EXPECT_THAT(Group{group}.Match(2), ElementsAre(2, 4)); + } else { + FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth; + } +} + +TEST(Group, MatchEmpty) { + if (Group::kWidth == 16) { + ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7, + 7, 5, 3, 1, 1, 1, 1, 1}; + EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0, 4)); + } else if (Group::kWidth == 8) { + ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1}; + EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0)); + } else { + FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth; + } +} + +TEST(Group, MatchEmptyOrDeleted) { + if (Group::kWidth == 16) { + ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7, + 7, 5, 3, 1, 1, 1, 1, 1}; + EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 2, 4)); + } else if (Group::kWidth == 8) { + ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1}; + EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 3)); + } else { + FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth; + } +} + +TEST(Batch, DropDeletes) { + constexpr size_t kCapacity = 63; + constexpr size_t kGroupWidth = container_internal::Group::kWidth; + std::vector ctrl(kCapacity + 1 + kGroupWidth); + ctrl[kCapacity] = kSentinel; + std::vector pattern = {kEmpty, 2, kDeleted, 2, kEmpty, 1, kDeleted}; + for (size_t i = 0; i != kCapacity; ++i) { + ctrl[i] = pattern[i % pattern.size()]; + if (i < kGroupWidth - 1) + ctrl[i + kCapacity + 1] = pattern[i % pattern.size()]; + } + ConvertDeletedToEmptyAndFullToDeleted(ctrl.data(), kCapacity); + ASSERT_EQ(ctrl[kCapacity], kSentinel); + for (size_t i = 0; i < kCapacity + 1 + kGroupWidth; ++i) { + ctrl_t expected = pattern[i % (kCapacity + 1) % pattern.size()]; + if (i == kCapacity) expected = kSentinel; + if (expected == kDeleted) expected = kEmpty; + if (IsFull(expected)) expected = kDeleted; + EXPECT_EQ(ctrl[i], expected) + << i << " " << int{pattern[i % pattern.size()]}; + } +} + +TEST(Group, CountLeadingEmptyOrDeleted) { + const std::vector empty_examples = {kEmpty, kDeleted}; + const std::vector full_examples = {0, 1, 2, 3, 5, 9, 127, kSentinel}; + + for (ctrl_t empty : empty_examples) { + std::vector e(Group::kWidth, empty); + EXPECT_EQ(Group::kWidth, Group{e.data()}.CountLeadingEmptyOrDeleted()); + for (ctrl_t full : full_examples) { + for (size_t i = 0; i != Group::kWidth; ++i) { + std::vector f(Group::kWidth, empty); + f[i] = full; + EXPECT_EQ(i, Group{f.data()}.CountLeadingEmptyOrDeleted()); + } + std::vector f(Group::kWidth, empty); + f[Group::kWidth * 2 / 3] = full; + f[Group::kWidth / 2] = full; + EXPECT_EQ( + Group::kWidth / 2, Group{f.data()}.CountLeadingEmptyOrDeleted()); + } + } +} + +struct IntPolicy { + using slot_type = int64_t; + using key_type = int64_t; + using init_type = int64_t; + + static void construct(void*, int64_t* slot, int64_t v) { *slot = v; } + static void destroy(void*, int64_t*) {} + static void transfer(void*, int64_t* new_slot, int64_t* old_slot) { + *new_slot = *old_slot; + } + + static int64_t& element(slot_type* slot) { return *slot; } + + template + static auto apply(F&& f, int64_t x) -> decltype(std::forward(f)(x, x)) { + return std::forward(f)(x, x); + } +}; + +class StringPolicy { + template ::value>::type> + decltype(std::declval()( + std::declval(), std::piecewise_construct, + std::declval>(), + std::declval())) static apply_impl(F&& f, + std::pair, V> p) { + const absl::string_view& key = std::get<0>(p.first); + return std::forward(f)(key, std::piecewise_construct, std::move(p.first), + std::move(p.second)); + } + + public: + struct slot_type { + struct ctor {}; + + template + slot_type(ctor, Ts&&... ts) : pair(std::forward(ts)...) {} + + std::pair pair; + }; + + using key_type = std::string; + using init_type = std::pair; + + template + static void construct(allocator_type* alloc, slot_type* slot, Args... args) { + std::allocator_traits::construct( + *alloc, slot, typename slot_type::ctor(), std::forward(args)...); + } + + template + static void destroy(allocator_type* alloc, slot_type* slot) { + std::allocator_traits::destroy(*alloc, slot); + } + + template + static void transfer(allocator_type* alloc, slot_type* new_slot, + slot_type* old_slot) { + construct(alloc, new_slot, std::move(old_slot->pair)); + destroy(alloc, old_slot); + } + + static std::pair& element(slot_type* slot) { + return slot->pair; + } + + template + static auto apply(F&& f, Args&&... args) + -> decltype(apply_impl(std::forward(f), + PairArgs(std::forward(args)...))) { + return apply_impl(std::forward(f), + PairArgs(std::forward(args)...)); + } +}; + +struct StringHash : absl::Hash { + using is_transparent = void; +}; +struct StringEq : std::equal_to { + using is_transparent = void; +}; + +struct StringTable + : raw_hash_set> { + using Base = typename StringTable::raw_hash_set; + StringTable() {} + using Base::Base; +}; + +struct IntTable + : raw_hash_set, + std::equal_to, std::allocator> { + using Base = typename IntTable::raw_hash_set; + IntTable() {} + using Base::Base; +}; + +struct BadFastHash { + template + size_t operator()(const T&) const { + return 0; + } +}; + +struct BadTable : raw_hash_set, + std::allocator> { + using Base = typename BadTable::raw_hash_set; + BadTable() {} + using Base::Base; +}; + +TEST(Table, EmptyFunctorOptimization) { + static_assert(std::is_empty>::value, ""); + static_assert(std::is_empty>::value, ""); + + struct MockTable { + void* ctrl; + void* slots; + size_t size; + size_t capacity; + size_t growth_left; + }; + struct StatelessHash { + size_t operator()(absl::string_view) const { return 0; } + }; + struct StatefulHash : StatelessHash { + size_t dummy; + }; + + EXPECT_EQ( + sizeof(MockTable), + sizeof( + raw_hash_set, std::allocator>)); + + EXPECT_EQ( + sizeof(MockTable) + sizeof(StatefulHash), + sizeof( + raw_hash_set, std::allocator>)); +} + +TEST(Table, Empty) { + IntTable t; + EXPECT_EQ(0, t.size()); + EXPECT_TRUE(t.empty()); +} + +#ifdef __GNUC__ +template +ABSL_ATTRIBUTE_ALWAYS_INLINE inline void DoNotOptimize(const T& v) { + asm volatile("" : : "r,m"(v) : "memory"); +} +#endif + +TEST(Table, Prefetch) { + IntTable t; + t.emplace(1); + // Works for both present and absent keys. + t.prefetch(1); + t.prefetch(2); + + // Do not run in debug mode, when prefetch is not implemented, or when + // sanitizers are enabled. +#if defined(NDEBUG) && defined(__GNUC__) && !defined(ADDRESS_SANITIZER) && \ + !defined(MEMORY_SANITIZER) && !defined(THREAD_SANITIZER) && \ + !defined(UNDEFINED_BEHAVIOR_SANITIZER) + const auto now = [] { return absl::base_internal::CycleClock::Now(); }; + + // Make size enough to not fit in L2 cache (16.7 Mb) + static constexpr int size = 1 << 22; + for (int i = 0; i < size; ++i) t.insert(i); + + int64_t no_prefetch = 0, prefetch = 0; + for (int iter = 0; iter < 10; ++iter) { + int64_t time = now(); + for (int i = 0; i < size; ++i) { + DoNotOptimize(t.find(i)); + } + no_prefetch += now() - time; + + time = now(); + for (int i = 0; i < size; ++i) { + t.prefetch(i + 20); + DoNotOptimize(t.find(i)); + } + prefetch += now() - time; + } + + // no_prefetch is at least 30% slower. + EXPECT_GE(1.0 * no_prefetch / prefetch, 1.3); +#endif +} + +TEST(Table, LookupEmpty) { + IntTable t; + auto it = t.find(0); + EXPECT_TRUE(it == t.end()); +} + +TEST(Table, Insert1) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + auto res = t.emplace(0); + EXPECT_TRUE(res.second); + EXPECT_THAT(*res.first, 0); + EXPECT_EQ(1, t.size()); + EXPECT_THAT(*t.find(0), 0); +} + +TEST(Table, Insert2) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + auto res = t.emplace(0); + EXPECT_TRUE(res.second); + EXPECT_THAT(*res.first, 0); + EXPECT_EQ(1, t.size()); + EXPECT_TRUE(t.find(1) == t.end()); + res = t.emplace(1); + EXPECT_TRUE(res.second); + EXPECT_THAT(*res.first, 1); + EXPECT_EQ(2, t.size()); + EXPECT_THAT(*t.find(0), 0); + EXPECT_THAT(*t.find(1), 1); +} + +TEST(Table, InsertCollision) { + BadTable t; + EXPECT_TRUE(t.find(1) == t.end()); + auto res = t.emplace(1); + EXPECT_TRUE(res.second); + EXPECT_THAT(*res.first, 1); + EXPECT_EQ(1, t.size()); + + EXPECT_TRUE(t.find(2) == t.end()); + res = t.emplace(2); + EXPECT_THAT(*res.first, 2); + EXPECT_TRUE(res.second); + EXPECT_EQ(2, t.size()); + + EXPECT_THAT(*t.find(1), 1); + EXPECT_THAT(*t.find(2), 2); +} + +// Test that we do not add existent element in case we need to search through +// many groups with deleted elements +TEST(Table, InsertCollisionAndFindAfterDelete) { + BadTable t; // all elements go to the same group. + // Have at least 2 groups with Group::kWidth collisions + // plus some extra collisions in the last group. + constexpr size_t kNumInserts = Group::kWidth * 2 + 5; + for (size_t i = 0; i < kNumInserts; ++i) { + auto res = t.emplace(i); + EXPECT_TRUE(res.second); + EXPECT_THAT(*res.first, i); + EXPECT_EQ(i + 1, t.size()); + } + + // Remove elements one by one and check + // that we still can find all other elements. + for (size_t i = 0; i < kNumInserts; ++i) { + EXPECT_EQ(1, t.erase(i)) << i; + for (size_t j = i + 1; j < kNumInserts; ++j) { + EXPECT_THAT(*t.find(j), j); + auto res = t.emplace(j); + EXPECT_FALSE(res.second) << i << " " << j; + EXPECT_THAT(*res.first, j); + EXPECT_EQ(kNumInserts - i - 1, t.size()); + } + } + EXPECT_TRUE(t.empty()); +} + +TEST(Table, LazyEmplace) { + StringTable t; + bool called = false; + auto it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) { + called = true; + f("abc", "ABC"); + }); + EXPECT_TRUE(called); + EXPECT_THAT(*it, Pair("abc", "ABC")); + called = false; + it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) { + called = true; + f("abc", "DEF"); + }); + EXPECT_FALSE(called); + EXPECT_THAT(*it, Pair("abc", "ABC")); +} + +TEST(Table, ContainsEmpty) { + IntTable t; + + EXPECT_FALSE(t.contains(0)); +} + +TEST(Table, Contains1) { + IntTable t; + + EXPECT_TRUE(t.insert(0).second); + EXPECT_TRUE(t.contains(0)); + EXPECT_FALSE(t.contains(1)); + + EXPECT_EQ(1, t.erase(0)); + EXPECT_FALSE(t.contains(0)); +} + +TEST(Table, Contains2) { + IntTable t; + + EXPECT_TRUE(t.insert(0).second); + EXPECT_TRUE(t.contains(0)); + EXPECT_FALSE(t.contains(1)); + + t.clear(); + EXPECT_FALSE(t.contains(0)); +} + +int decompose_constructed; +struct DecomposeType { + DecomposeType(int i) : i(i) { // NOLINT + ++decompose_constructed; + } + + explicit DecomposeType(const char* d) : DecomposeType(*d) {} + + int i; +}; + +struct DecomposeHash { + using is_transparent = void; + size_t operator()(DecomposeType a) const { return a.i; } + size_t operator()(int a) const { return a; } + size_t operator()(const char* a) const { return *a; } +}; + +struct DecomposeEq { + using is_transparent = void; + bool operator()(DecomposeType a, DecomposeType b) const { return a.i == b.i; } + bool operator()(DecomposeType a, int b) const { return a.i == b; } + bool operator()(DecomposeType a, const char* b) const { return a.i == *b; } +}; + +struct DecomposePolicy { + using slot_type = DecomposeType; + using key_type = DecomposeType; + using init_type = DecomposeType; + + template + static void construct(void*, DecomposeType* slot, T&& v) { + *slot = DecomposeType(std::forward(v)); + } + static void destroy(void*, DecomposeType*) {} + static DecomposeType& element(slot_type* slot) { return *slot; } + + template + static auto apply(F&& f, const T& x) -> decltype(std::forward(f)(x, x)) { + return std::forward(f)(x, x); + } +}; + +template +void TestDecompose(bool construct_three) { + DecomposeType elem{0}; + const int one = 1; + const char* three_p = "3"; + const auto& three = three_p; + + raw_hash_set> set1; + + decompose_constructed = 0; + int expected_constructed = 0; + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.insert(elem); + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.insert(1); + EXPECT_EQ(++expected_constructed, decompose_constructed); + set1.emplace("3"); + EXPECT_EQ(++expected_constructed, decompose_constructed); + EXPECT_EQ(expected_constructed, decompose_constructed); + + { // insert(T&&) + set1.insert(1); + EXPECT_EQ(expected_constructed, decompose_constructed); + } + + { // insert(const T&) + set1.insert(one); + EXPECT_EQ(expected_constructed, decompose_constructed); + } + + { // insert(hint, T&&) + set1.insert(set1.begin(), 1); + EXPECT_EQ(expected_constructed, decompose_constructed); + } + + { // insert(hint, const T&) + set1.insert(set1.begin(), one); + EXPECT_EQ(expected_constructed, decompose_constructed); + } + + { // emplace(...) + set1.emplace(1); + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace("3"); + expected_constructed += construct_three; + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace(one); + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace(three); + expected_constructed += construct_three; + EXPECT_EQ(expected_constructed, decompose_constructed); + } + + { // emplace_hint(...) + set1.emplace_hint(set1.begin(), 1); + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace_hint(set1.begin(), "3"); + expected_constructed += construct_three; + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace_hint(set1.begin(), one); + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace_hint(set1.begin(), three); + expected_constructed += construct_three; + EXPECT_EQ(expected_constructed, decompose_constructed); + } +} + +TEST(Table, Decompose) { + TestDecompose(false); + + struct TransparentHashIntOverload { + size_t operator()(DecomposeType a) const { return a.i; } + size_t operator()(int a) const { return a; } + }; + struct TransparentEqIntOverload { + bool operator()(DecomposeType a, DecomposeType b) const { + return a.i == b.i; + } + bool operator()(DecomposeType a, int b) const { return a.i == b; } + }; + TestDecompose(true); + TestDecompose(true); + TestDecompose(true); +} + +// Returns the largest m such that a table with m elements has the same number +// of buckets as a table with n elements. +size_t MaxDensitySize(size_t n) { + IntTable t; + t.reserve(n); + for (size_t i = 0; i != n; ++i) t.emplace(i); + const size_t c = t.bucket_count(); + while (c == t.bucket_count()) t.emplace(n++); + return t.size() - 1; +} + +struct Modulo1000Hash { + size_t operator()(int x) const { return x % 1000; } +}; + +struct Modulo1000HashTable + : public raw_hash_set, + std::allocator> {}; + +// Test that rehash with no resize happen in case of many deleted slots. +TEST(Table, RehashWithNoResize) { + Modulo1000HashTable t; + // Adding the same length (and the same hash) strings + // to have at least kMinFullGroups groups + // with Group::kWidth collisions. Then fill up to MaxDensitySize; + const size_t kMinFullGroups = 7; + std::vector keys; + for (size_t i = 0; i < MaxDensitySize(Group::kWidth * kMinFullGroups); ++i) { + int k = i * 1000; + t.emplace(k); + keys.push_back(k); + } + const size_t capacity = t.capacity(); + + // Remove elements from all groups except the first and the last one. + // All elements removed from full groups will be marked as kDeleted. + const size_t erase_begin = Group::kWidth / 2; + const size_t erase_end = (t.size() / Group::kWidth - 1) * Group::kWidth; + for (size_t i = erase_begin; i < erase_end; ++i) { + EXPECT_EQ(1, t.erase(keys[i])) << i; + } + keys.erase(keys.begin() + erase_begin, keys.begin() + erase_end); + + auto last_key = keys.back(); + size_t last_key_num_probes = GetHashtableDebugNumProbes(t, last_key); + + // Make sure that we have to make a lot of probes for last key. + ASSERT_GT(last_key_num_probes, kMinFullGroups); + + int x = 1; + // Insert and erase one element, before inplace rehash happen. + while (last_key_num_probes == GetHashtableDebugNumProbes(t, last_key)) { + t.emplace(x); + ASSERT_EQ(capacity, t.capacity()); + // All elements should be there. + ASSERT_TRUE(t.find(x) != t.end()) << x; + for (const auto& k : keys) { + ASSERT_TRUE(t.find(k) != t.end()) << k; + } + t.erase(x); + ++x; + } +} + +TEST(Table, InsertEraseStressTest) { + IntTable t; + const size_t kMinElementCount = 250; + std::deque keys; + size_t i = 0; + for (; i < MaxDensitySize(kMinElementCount); ++i) { + t.emplace(i); + keys.push_back(i); + } + const size_t kNumIterations = 1000000; + for (; i < kNumIterations; ++i) { + ASSERT_EQ(1, t.erase(keys.front())); + keys.pop_front(); + t.emplace(i); + keys.push_back(i); + } +} + +TEST(Table, InsertOverloads) { + StringTable t; + // These should all trigger the insert(init_type) overload. + t.insert({{}, {}}); + t.insert({"ABC", {}}); + t.insert({"DEF", "!!!"}); + + EXPECT_THAT(t, UnorderedElementsAre(Pair("", ""), Pair("ABC", ""), + Pair("DEF", "!!!"))); +} + +TEST(Table, LargeTable) { + IntTable t; + for (int64_t i = 0; i != 100000; ++i) t.emplace(i << 40); + for (int64_t i = 0; i != 100000; ++i) ASSERT_EQ(i << 40, *t.find(i << 40)); +} + +// Timeout if copy is quadratic as it was in Rust. +TEST(Table, EnsureNonQuadraticAsInRust) { + static const size_t kLargeSize = 1 << 15; + + IntTable t; + for (size_t i = 0; i != kLargeSize; ++i) { + t.insert(i); + } + + // If this is quadratic, the test will timeout. + IntTable t2; + for (const auto& entry : t) t2.insert(entry); +} + +TEST(Table, ClearBug) { + IntTable t; + constexpr size_t capacity = container_internal::Group::kWidth - 1; + constexpr size_t max_size = capacity / 2; + for (size_t i = 0; i < max_size; ++i) { + t.insert(i); + } + ASSERT_EQ(capacity, t.capacity()); + intptr_t original = reinterpret_cast(&*t.find(2)); + t.clear(); + ASSERT_EQ(capacity, t.capacity()); + for (size_t i = 0; i < max_size; ++i) { + t.insert(i); + } + ASSERT_EQ(capacity, t.capacity()); + intptr_t second = reinterpret_cast(&*t.find(2)); + // We are checking that original and second are close enough to each other + // that they are probably still in the same group. This is not strictly + // guaranteed. + EXPECT_LT(std::abs(original - second), + capacity * sizeof(IntTable::value_type)); +} + +TEST(Table, Erase) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + auto res = t.emplace(0); + EXPECT_TRUE(res.second); + EXPECT_EQ(1, t.size()); + t.erase(res.first); + EXPECT_EQ(0, t.size()); + EXPECT_TRUE(t.find(0) == t.end()); +} + +// Collect N bad keys by following algorithm: +// 1. Create an empty table and reserve it to 2 * N. +// 2. Insert N random elements. +// 3. Take first Group::kWidth - 1 to bad_keys array. +// 4. Clear the table without resize. +// 5. Go to point 2 while N keys not collected +std::vector CollectBadMergeKeys(size_t N) { + static constexpr int kGroupSize = Group::kWidth - 1; + + auto topk_range = [](size_t b, size_t e, IntTable* t) -> std::vector { + for (size_t i = b; i != e; ++i) { + t->emplace(i); + } + std::vector res; + res.reserve(kGroupSize); + auto it = t->begin(); + for (size_t i = b; i != e && i != b + kGroupSize; ++i, ++it) { + res.push_back(*it); + } + return res; + }; + + std::vector bad_keys; + bad_keys.reserve(N); + IntTable t; + t.reserve(N * 2); + + for (size_t b = 0; bad_keys.size() < N; b += N) { + auto keys = topk_range(b, b + N, &t); + bad_keys.insert(bad_keys.end(), keys.begin(), keys.end()); + t.erase(t.begin(), t.end()); + EXPECT_TRUE(t.empty()); + } + return bad_keys; +} + +struct ProbeStats { + // Number of elements with specific probe length over all tested tables. + std::vector all_probes_histogram; + // Ratios total_probe_length/size for every tested table. + std::vector single_table_ratios; + + friend ProbeStats operator+(const ProbeStats& a, const ProbeStats& b) { + ProbeStats res = a; + res.all_probes_histogram.resize(std::max(res.all_probes_histogram.size(), + b.all_probes_histogram.size())); + std::transform(b.all_probes_histogram.begin(), b.all_probes_histogram.end(), + res.all_probes_histogram.begin(), + res.all_probes_histogram.begin(), std::plus()); + res.single_table_ratios.insert(res.single_table_ratios.end(), + b.single_table_ratios.begin(), + b.single_table_ratios.end()); + return res; + } + + // Average ratio total_probe_length/size over tables. + double AvgRatio() const { + return std::accumulate(single_table_ratios.begin(), + single_table_ratios.end(), 0.0) / + single_table_ratios.size(); + } + + // Maximum ratio total_probe_length/size over tables. + double MaxRatio() const { + return *std::max_element(single_table_ratios.begin(), + single_table_ratios.end()); + } + + // Percentile ratio total_probe_length/size over tables. + double PercentileRatio(double Percentile = 0.95) const { + auto r = single_table_ratios; + auto mid = r.begin() + static_cast(r.size() * Percentile); + if (mid != r.end()) { + std::nth_element(r.begin(), mid, r.end()); + return *mid; + } else { + return MaxRatio(); + } + } + + // Maximum probe length over all elements and all tables. + size_t MaxProbe() const { return all_probes_histogram.size(); } + + // Fraction of elements with specified probe length. + std::vector ProbeNormalizedHistogram() const { + double total_elements = std::accumulate(all_probes_histogram.begin(), + all_probes_histogram.end(), 0ull); + std::vector res; + for (size_t p : all_probes_histogram) { + res.push_back(p / total_elements); + } + return res; + } + + size_t PercentileProbe(double Percentile = 0.99) const { + size_t idx = 0; + for (double p : ProbeNormalizedHistogram()) { + if (Percentile > p) { + Percentile -= p; + ++idx; + } else { + return idx; + } + } + return idx; + } + + friend std::ostream& operator<<(std::ostream& out, const ProbeStats& s) { + out << "{AvgRatio:" << s.AvgRatio() << ", MaxRatio:" << s.MaxRatio() + << ", PercentileRatio:" << s.PercentileRatio() + << ", MaxProbe:" << s.MaxProbe() << ", Probes=["; + for (double p : s.ProbeNormalizedHistogram()) { + out << p << ","; + } + out << "]}"; + + return out; + } +}; + +struct ExpectedStats { + double avg_ratio; + double max_ratio; + std::vector> pecentile_ratios; + std::vector> pecentile_probes; + + friend std::ostream& operator<<(std::ostream& out, const ExpectedStats& s) { + out << "{AvgRatio:" << s.avg_ratio << ", MaxRatio:" << s.max_ratio + << ", PercentileRatios: ["; + for (auto el : s.pecentile_ratios) { + out << el.first << ":" << el.second << ", "; + } + out << "], PercentileProbes: ["; + for (auto el : s.pecentile_probes) { + out << el.first << ":" << el.second << ", "; + } + out << "]}"; + + return out; + } +}; + +void VerifyStats(size_t size, const ExpectedStats& exp, + const ProbeStats& stats) { + EXPECT_LT(stats.AvgRatio(), exp.avg_ratio) << size << " " << stats; + EXPECT_LT(stats.MaxRatio(), exp.max_ratio) << size << " " << stats; + for (auto pr : exp.pecentile_ratios) { + EXPECT_LE(stats.PercentileRatio(pr.first), pr.second) + << size << " " << pr.first << " " << stats; + } + + for (auto pr : exp.pecentile_probes) { + EXPECT_LE(stats.PercentileProbe(pr.first), pr.second) + << size << " " << pr.first << " " << stats; + } +} + +using ProbeStatsPerSize = std::map; + +// Collect total ProbeStats on num_iters iterations of the following algorithm: +// 1. Create new table and reserve it to keys.size() * 2 +// 2. Insert all keys xored with seed +// 3. Collect ProbeStats from final table. +ProbeStats CollectProbeStatsOnKeysXoredWithSeed(const std::vector& keys, + size_t num_iters) { + const size_t reserve_size = keys.size() * 2; + + ProbeStats stats; + + int64_t seed = 0x71b1a19b907d6e33; + while (num_iters--) { + seed = static_cast(static_cast(seed) * 17 + 13); + IntTable t1; + t1.reserve(reserve_size); + for (const auto& key : keys) { + t1.emplace(key ^ seed); + } + + auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1); + stats.all_probes_histogram.resize( + std::max(stats.all_probes_histogram.size(), probe_histogram.size())); + std::transform(probe_histogram.begin(), probe_histogram.end(), + stats.all_probes_histogram.begin(), + stats.all_probes_histogram.begin(), std::plus()); + + size_t total_probe_seq_length = 0; + for (size_t i = 0; i < probe_histogram.size(); ++i) { + total_probe_seq_length += i * probe_histogram[i]; + } + stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 / + keys.size()); + t1.erase(t1.begin(), t1.end()); + } + return stats; +} + +ExpectedStats XorSeedExpectedStats() { + constexpr bool kRandomizesInserts = +#if NDEBUG + false; +#else // NDEBUG + true; +#endif // NDEBUG + + // The effective load factor is larger in non-opt mode because we insert + // elements out of order. + switch (container_internal::Group::kWidth) { + case 8: + if (kRandomizesInserts) { + return {0.05, + 1.0, + {{0.95, 0.5}}, + {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}}; + } else { + return {0.05, + 2.0, + {{0.95, 0.1}}, + {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}}; + } + case 16: + if (kRandomizesInserts) { + return {0.1, + 1.0, + {{0.95, 0.1}}, + {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}}; + } else { + return {0.05, + 1.0, + {{0.95, 0.05}}, + {{0.95, 0}, {0.99, 1}, {0.999, 4}, {0.9999, 10}}}; + } + } + ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width"); + return {}; +} +TEST(Table, DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength) { + ProbeStatsPerSize stats; + std::vector sizes = {Group::kWidth << 5, Group::kWidth << 10}; + for (size_t size : sizes) { + stats[size] = + CollectProbeStatsOnKeysXoredWithSeed(CollectBadMergeKeys(size), 200); + } + auto expected = XorSeedExpectedStats(); + for (size_t size : sizes) { + auto& stat = stats[size]; + VerifyStats(size, expected, stat); + } +} + +// Collect total ProbeStats on num_iters iterations of the following algorithm: +// 1. Create new table +// 2. Select 10% of keys and insert 10 elements key * 17 + j * 13 +// 3. Collect ProbeStats from final table +ProbeStats CollectProbeStatsOnLinearlyTransformedKeys( + const std::vector& keys, size_t num_iters) { + ProbeStats stats; + + std::random_device rd; + std::mt19937 rng(rd()); + auto linear_transform = [](size_t x, size_t y) { return x * 17 + y * 13; }; + std::uniform_int_distribution dist(0, keys.size()-1); + while (num_iters--) { + IntTable t1; + size_t num_keys = keys.size() / 10; + size_t start = dist(rng); + for (size_t i = 0; i != num_keys; ++i) { + for (size_t j = 0; j != 10; ++j) { + t1.emplace(linear_transform(keys[(i + start) % keys.size()], j)); + } + } + + auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1); + stats.all_probes_histogram.resize( + std::max(stats.all_probes_histogram.size(), probe_histogram.size())); + std::transform(probe_histogram.begin(), probe_histogram.end(), + stats.all_probes_histogram.begin(), + stats.all_probes_histogram.begin(), std::plus()); + + size_t total_probe_seq_length = 0; + for (size_t i = 0; i < probe_histogram.size(); ++i) { + total_probe_seq_length += i * probe_histogram[i]; + } + stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 / + t1.size()); + t1.erase(t1.begin(), t1.end()); + } + return stats; +} + +ExpectedStats LinearTransformExpectedStats() { + constexpr bool kRandomizesInserts = +#if NDEBUG + false; +#else // NDEBUG + true; +#endif // NDEBUG + + // The effective load factor is larger in non-opt mode because we insert + // elements out of order. + switch (container_internal::Group::kWidth) { + case 8: + if (kRandomizesInserts) { + return {0.1, + 0.5, + {{0.95, 0.3}}, + {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}}; + } else { + return {0.15, + 0.5, + {{0.95, 0.3}}, + {{0.95, 0}, {0.99, 3}, {0.999, 15}, {0.9999, 25}}}; + } + case 16: + if (kRandomizesInserts) { + return {0.1, + 0.4, + {{0.95, 0.3}}, + {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}}; + } else { + return {0.05, + 0.2, + {{0.95, 0.1}}, + {{0.95, 0}, {0.99, 1}, {0.999, 6}, {0.9999, 10}}}; + } + } + ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width"); + return {}; +} +TEST(Table, DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength) { + ProbeStatsPerSize stats; + std::vector sizes = {Group::kWidth << 5, Group::kWidth << 10}; + for (size_t size : sizes) { + stats[size] = CollectProbeStatsOnLinearlyTransformedKeys( + CollectBadMergeKeys(size), 300); + } + auto expected = LinearTransformExpectedStats(); + for (size_t size : sizes) { + auto& stat = stats[size]; + VerifyStats(size, expected, stat); + } +} + +TEST(Table, EraseCollision) { + BadTable t; + + // 1 2 3 + t.emplace(1); + t.emplace(2); + t.emplace(3); + EXPECT_THAT(*t.find(1), 1); + EXPECT_THAT(*t.find(2), 2); + EXPECT_THAT(*t.find(3), 3); + EXPECT_EQ(3, t.size()); + + // 1 DELETED 3 + t.erase(t.find(2)); + EXPECT_THAT(*t.find(1), 1); + EXPECT_TRUE(t.find(2) == t.end()); + EXPECT_THAT(*t.find(3), 3); + EXPECT_EQ(2, t.size()); + + // DELETED DELETED 3 + t.erase(t.find(1)); + EXPECT_TRUE(t.find(1) == t.end()); + EXPECT_TRUE(t.find(2) == t.end()); + EXPECT_THAT(*t.find(3), 3); + EXPECT_EQ(1, t.size()); + + // DELETED DELETED DELETED + t.erase(t.find(3)); + EXPECT_TRUE(t.find(1) == t.end()); + EXPECT_TRUE(t.find(2) == t.end()); + EXPECT_TRUE(t.find(3) == t.end()); + EXPECT_EQ(0, t.size()); +} + +TEST(Table, EraseInsertProbing) { + BadTable t(100); + + // 1 2 3 4 + t.emplace(1); + t.emplace(2); + t.emplace(3); + t.emplace(4); + + // 1 DELETED 3 DELETED + t.erase(t.find(2)); + t.erase(t.find(4)); + + // 1 10 3 11 12 + t.emplace(10); + t.emplace(11); + t.emplace(12); + + EXPECT_EQ(5, t.size()); + EXPECT_THAT(t, UnorderedElementsAre(1, 10, 3, 11, 12)); +} + +TEST(Table, Clear) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + t.clear(); + EXPECT_TRUE(t.find(0) == t.end()); + auto res = t.emplace(0); + EXPECT_TRUE(res.second); + EXPECT_EQ(1, t.size()); + t.clear(); + EXPECT_EQ(0, t.size()); + EXPECT_TRUE(t.find(0) == t.end()); +} + +TEST(Table, Swap) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + auto res = t.emplace(0); + EXPECT_TRUE(res.second); + EXPECT_EQ(1, t.size()); + IntTable u; + t.swap(u); + EXPECT_EQ(0, t.size()); + EXPECT_EQ(1, u.size()); + EXPECT_TRUE(t.find(0) == t.end()); + EXPECT_THAT(*u.find(0), 0); +} + +TEST(Table, Rehash) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + t.emplace(0); + t.emplace(1); + EXPECT_EQ(2, t.size()); + t.rehash(128); + EXPECT_EQ(2, t.size()); + EXPECT_THAT(*t.find(0), 0); + EXPECT_THAT(*t.find(1), 1); +} + +TEST(Table, RehashDoesNotRehashWhenNotNecessary) { + IntTable t; + t.emplace(0); + t.emplace(1); + auto* p = &*t.find(0); + t.rehash(1); + EXPECT_EQ(p, &*t.find(0)); +} + +TEST(Table, RehashZeroDoesNotAllocateOnEmptyTable) { + IntTable t; + t.rehash(0); + EXPECT_EQ(0, t.bucket_count()); +} + +TEST(Table, RehashZeroDeallocatesEmptyTable) { + IntTable t; + t.emplace(0); + t.clear(); + EXPECT_NE(0, t.bucket_count()); + t.rehash(0); + EXPECT_EQ(0, t.bucket_count()); +} + +TEST(Table, RehashZeroForcesRehash) { + IntTable t; + t.emplace(0); + t.emplace(1); + auto* p = &*t.find(0); + t.rehash(0); + EXPECT_NE(p, &*t.find(0)); +} + +TEST(Table, ConstructFromInitList) { + using P = std::pair; + struct Q { + operator P() const { return {}; } + }; + StringTable t = {P(), Q(), {}, {{}, {}}}; +} + +TEST(Table, CopyConstruct) { + IntTable t; + t.max_load_factor(.321f); + t.emplace(0); + EXPECT_EQ(1, t.size()); + { + IntTable u(t); + EXPECT_EQ(1, u.size()); + EXPECT_EQ(t.max_load_factor(), u.max_load_factor()); + EXPECT_THAT(*u.find(0), 0); + } + { + IntTable u{t}; + EXPECT_EQ(1, u.size()); + EXPECT_EQ(t.max_load_factor(), u.max_load_factor()); + EXPECT_THAT(*u.find(0), 0); + } + { + IntTable u = t; + EXPECT_EQ(1, u.size()); + EXPECT_EQ(t.max_load_factor(), u.max_load_factor()); + EXPECT_THAT(*u.find(0), 0); + } +} + +TEST(Table, CopyConstructWithAlloc) { + StringTable t; + t.max_load_factor(.321f); + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + StringTable u(t, Alloc>()); + EXPECT_EQ(1, u.size()); + EXPECT_EQ(t.max_load_factor(), u.max_load_factor()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); +} + +struct ExplicitAllocIntTable + : raw_hash_set, + std::equal_to, Alloc> { + ExplicitAllocIntTable() {} +}; + +TEST(Table, AllocWithExplicitCtor) { + ExplicitAllocIntTable t; + EXPECT_EQ(0, t.size()); +} + +TEST(Table, MoveConstruct) { + { + StringTable t; + t.max_load_factor(.321f); + const float lf = t.max_load_factor(); + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + + StringTable u(std::move(t)); + EXPECT_EQ(1, u.size()); + EXPECT_EQ(lf, u.max_load_factor()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); + } + { + StringTable t; + t.max_load_factor(.321f); + const float lf = t.max_load_factor(); + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + + StringTable u{std::move(t)}; + EXPECT_EQ(1, u.size()); + EXPECT_EQ(lf, u.max_load_factor()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); + } + { + StringTable t; + t.max_load_factor(.321f); + const float lf = t.max_load_factor(); + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + + StringTable u = std::move(t); + EXPECT_EQ(1, u.size()); + EXPECT_EQ(lf, u.max_load_factor()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); + } +} + +TEST(Table, MoveConstructWithAlloc) { + StringTable t; + t.max_load_factor(.321f); + const float lf = t.max_load_factor(); + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + StringTable u(std::move(t), Alloc>()); + EXPECT_EQ(1, u.size()); + EXPECT_EQ(lf, u.max_load_factor()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); +} + +TEST(Table, CopyAssign) { + StringTable t; + t.max_load_factor(.321f); + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + StringTable u; + u = t; + EXPECT_EQ(1, u.size()); + EXPECT_EQ(t.max_load_factor(), u.max_load_factor()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); +} + +TEST(Table, CopySelfAssign) { + StringTable t; + t.max_load_factor(.321f); + const float lf = t.max_load_factor(); + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + t = *&t; + EXPECT_EQ(1, t.size()); + EXPECT_EQ(lf, t.max_load_factor()); + EXPECT_THAT(*t.find("a"), Pair("a", "b")); +} + +TEST(Table, MoveAssign) { + StringTable t; + t.max_load_factor(.321f); + const float lf = t.max_load_factor(); + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + StringTable u; + u = std::move(t); + EXPECT_EQ(1, u.size()); + EXPECT_EQ(lf, u.max_load_factor()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); +} + +TEST(Table, Equality) { + StringTable t; + std::vector> v = {{"a", "b"}, {"aa", "bb"}}; + t.insert(std::begin(v), std::end(v)); + StringTable u = t; + EXPECT_EQ(u, t); +} + +TEST(Table, Equality2) { + StringTable t; + std::vector> v1 = {{"a", "b"}, {"aa", "bb"}}; + t.insert(std::begin(v1), std::end(v1)); + StringTable u; + std::vector> v2 = {{"a", "a"}, {"aa", "aa"}}; + u.insert(std::begin(v2), std::end(v2)); + EXPECT_NE(u, t); +} + +TEST(Table, Equality3) { + StringTable t; + std::vector> v1 = {{"b", "b"}, {"bb", "bb"}}; + t.insert(std::begin(v1), std::end(v1)); + StringTable u; + std::vector> v2 = {{"a", "a"}, {"aa", "aa"}}; + u.insert(std::begin(v2), std::end(v2)); + EXPECT_NE(u, t); +} + +TEST(Table, NumDeletedRegression) { + IntTable t; + t.emplace(0); + t.erase(t.find(0)); + // construct over a deleted slot. + t.emplace(0); + t.clear(); +} + +TEST(Table, FindFullDeletedRegression) { + IntTable t; + for (int i = 0; i < 1000; ++i) { + t.emplace(i); + t.erase(t.find(i)); + } + EXPECT_EQ(0, t.size()); +} + +TEST(Table, ReplacingDeletedSlotDoesNotRehash) { + size_t n; + { + // Compute n such that n is the maximum number of elements before rehash. + IntTable t; + t.emplace(0); + size_t c = t.bucket_count(); + for (n = 1; c == t.bucket_count(); ++n) t.emplace(n); + --n; + } + IntTable t; + t.rehash(n); + const size_t c = t.bucket_count(); + for (size_t i = 0; i != n; ++i) t.emplace(i); + EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n; + t.erase(0); + t.emplace(0); + EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n; +} + +TEST(Table, NoThrowMoveConstruct) { + ASSERT_TRUE( + std::is_nothrow_copy_constructible>::value); + ASSERT_TRUE(std::is_nothrow_copy_constructible< + std::equal_to>::value); + ASSERT_TRUE(std::is_nothrow_copy_constructible>::value); + EXPECT_TRUE(std::is_nothrow_move_constructible::value); +} + +TEST(Table, NoThrowMoveAssign) { + ASSERT_TRUE( + std::is_nothrow_move_assignable>::value); + ASSERT_TRUE( + std::is_nothrow_move_assignable>::value); + ASSERT_TRUE(std::is_nothrow_move_assignable>::value); + ASSERT_TRUE( + absl::allocator_traits>::is_always_equal::value); + EXPECT_TRUE(std::is_nothrow_move_assignable::value); +} + +TEST(Table, NoThrowSwappable) { + ASSERT_TRUE( + container_internal::IsNoThrowSwappable>()); + ASSERT_TRUE(container_internal::IsNoThrowSwappable< + std::equal_to>()); + ASSERT_TRUE(container_internal::IsNoThrowSwappable>()); + EXPECT_TRUE(container_internal::IsNoThrowSwappable()); +} + +TEST(Table, HeterogeneousLookup) { + struct Hash { + size_t operator()(int64_t i) const { return i; } + size_t operator()(double i) const { + ADD_FAILURE(); + return i; + } + }; + struct Eq { + bool operator()(int64_t a, int64_t b) const { return a == b; } + bool operator()(double a, int64_t b) const { + ADD_FAILURE(); + return a == b; + } + bool operator()(int64_t a, double b) const { + ADD_FAILURE(); + return a == b; + } + bool operator()(double a, double b) const { + ADD_FAILURE(); + return a == b; + } + }; + + struct THash { + using is_transparent = void; + size_t operator()(int64_t i) const { return i; } + size_t operator()(double i) const { return i; } + }; + struct TEq { + using is_transparent = void; + bool operator()(int64_t a, int64_t b) const { return a == b; } + bool operator()(double a, int64_t b) const { return a == b; } + bool operator()(int64_t a, double b) const { return a == b; } + bool operator()(double a, double b) const { return a == b; } + }; + + raw_hash_set> s{0, 1, 2}; + // It will convert to int64_t before the query. + EXPECT_EQ(1, *s.find(double{1.1})); + + raw_hash_set> ts{0, 1, 2}; + // It will try to use the double, and fail to find the object. + EXPECT_TRUE(ts.find(1.1) == ts.end()); +} + +template +using CallFind = decltype(std::declval().find(17)); + +template +using CallErase = decltype(std::declval().erase(17)); + +template +using CallExtract = decltype(std::declval().extract(17)); + +template +using CallPrefetch = decltype(std::declval().prefetch(17)); + +template +using CallCount = decltype(std::declval().count(17)); + +template