summaryrefslogtreecommitdiff
path: root/absl/container/internal
diff options
context:
space:
mode:
Diffstat (limited to 'absl/container/internal')
-rw-r--r--absl/container/internal/common.h198
-rw-r--r--absl/container/internal/compressed_tuple.h162
-rw-r--r--absl/container/internal/compressed_tuple_test.cc269
-rw-r--r--absl/container/internal/container_memory.h69
-rw-r--r--absl/container/internal/container_memory_test.cc6
-rw-r--r--absl/container/internal/counting_allocator.h81
-rw-r--r--absl/container/internal/hash_function_defaults.h11
-rw-r--r--absl/container/internal/hash_function_defaults_test.cc28
-rw-r--r--absl/container/internal/hash_generator_testing.cc6
-rw-r--r--absl/container/internal/hash_generator_testing.h6
-rw-r--r--absl/container/internal/hash_policy_testing.h6
-rw-r--r--absl/container/internal/hash_policy_testing_test.cc6
-rw-r--r--absl/container/internal/hash_policy_traits.h6
-rw-r--r--absl/container/internal/hash_policy_traits_test.cc6
-rw-r--r--absl/container/internal/hashtable_debug.h8
-rw-r--r--absl/container/internal/hashtable_debug_hooks.h6
-rw-r--r--absl/container/internal/hashtablez_sampler.cc310
-rw-r--r--absl/container/internal/hashtablez_sampler.h290
-rw-r--r--absl/container/internal/hashtablez_sampler_force_weak_definition.cc29
-rw-r--r--absl/container/internal/hashtablez_sampler_test.cc357
-rw-r--r--absl/container/internal/have_sse.h49
-rw-r--r--absl/container/internal/inlined_vector.h895
-rw-r--r--absl/container/internal/layout.h9
-rw-r--r--absl/container/internal/layout_test.cc34
-rw-r--r--absl/container/internal/node_hash_policy.h6
-rw-r--r--absl/container/internal/node_hash_policy_test.cc6
-rw-r--r--absl/container/internal/raw_hash_map.h21
-rw-r--r--absl/container/internal/raw_hash_set.cc6
-rw-r--r--absl/container/internal/raw_hash_set.h392
-rw-r--r--absl/container/internal/raw_hash_set_allocator_test.cc6
-rw-r--r--absl/container/internal/raw_hash_set_test.cc304
-rw-r--r--absl/container/internal/test_instance_tracker.cc6
-rw-r--r--absl/container/internal/test_instance_tracker.h18
-rw-r--r--absl/container/internal/test_instance_tracker_test.cc4
-rw-r--r--absl/container/internal/tracked.h6
-rw-r--r--absl/container/internal/unordered_map_constructor_test.h148
-rw-r--r--absl/container/internal/unordered_map_lookup_test.h8
-rw-r--r--absl/container/internal/unordered_map_members_test.h87
-rw-r--r--absl/container/internal/unordered_map_modifiers_test.h8
-rw-r--r--absl/container/internal/unordered_map_test.cc14
-rw-r--r--absl/container/internal/unordered_set_constructor_test.h155
-rw-r--r--absl/container/internal/unordered_set_lookup_test.h8
-rw-r--r--absl/container/internal/unordered_set_members_test.h86
-rw-r--r--absl/container/internal/unordered_set_modifiers_test.h8
-rw-r--r--absl/container/internal/unordered_set_test.cc24
45 files changed, 3549 insertions, 619 deletions
diff --git a/absl/container/internal/common.h b/absl/container/internal/common.h
new file mode 100644
index 00000000..a02cd5c3
--- /dev/null
+++ b/absl/container/internal/common.h
@@ -0,0 +1,198 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_H_
+#define ABSL_CONTAINER_INTERNAL_CONTAINER_H_
+
+#include <cassert>
+#include <type_traits>
+
+#include "absl/meta/type_traits.h"
+#include "absl/types/optional.h"
+
+namespace absl {
+inline namespace lts_2019_08_08 {
+namespace container_internal {
+
+template <class, class = void>
+struct IsTransparent : std::false_type {};
+template <class T>
+struct IsTransparent<T, absl::void_t<typename T::is_transparent>>
+ : std::true_type {};
+
+template <bool is_transparent>
+struct KeyArg {
+ // Transparent. Forward `K`.
+ template <typename K, typename key_type>
+ using type = K;
+};
+
+template <>
+struct KeyArg<false> {
+ // Not transparent. Always use `key_type`.
+ template <typename K, typename key_type>
+ using type = key_type;
+};
+
+// The node_handle concept from C++17.
+// We specialize node_handle for sets and maps. node_handle_base holds the
+// common API of both.
+template <typename PolicyTraits, typename Alloc>
+class node_handle_base {
+ protected:
+ using slot_type = typename PolicyTraits::slot_type;
+
+ public:
+ using allocator_type = Alloc;
+
+ constexpr node_handle_base() {}
+ node_handle_base(node_handle_base&& other) noexcept {
+ *this = std::move(other);
+ }
+ ~node_handle_base() { destroy(); }
+ node_handle_base& operator=(node_handle_base&& other) noexcept {
+ destroy();
+ if (!other.empty()) {
+ alloc_ = other.alloc_;
+ PolicyTraits::transfer(alloc(), slot(), other.slot());
+ other.reset();
+ }
+ return *this;
+ }
+
+ bool empty() const noexcept { return !alloc_; }
+ explicit operator bool() const noexcept { return !empty(); }
+ allocator_type get_allocator() const { return *alloc_; }
+
+ protected:
+ friend struct CommonAccess;
+
+ struct transfer_tag_t {};
+ node_handle_base(transfer_tag_t, const allocator_type& a, slot_type* s)
+ : alloc_(a) {
+ PolicyTraits::transfer(alloc(), slot(), s);
+ }
+
+ struct move_tag_t {};
+ node_handle_base(move_tag_t, const allocator_type& a, slot_type* s)
+ : alloc_(a) {
+ PolicyTraits::construct(alloc(), slot(), s);
+ }
+
+ void destroy() {
+ if (!empty()) {
+ PolicyTraits::destroy(alloc(), slot());
+ reset();
+ }
+ }
+
+ void reset() {
+ assert(alloc_.has_value());
+ alloc_ = absl::nullopt;
+ }
+
+ slot_type* slot() const {
+ assert(!empty());
+ return reinterpret_cast<slot_type*>(std::addressof(slot_space_));
+ }
+ allocator_type* alloc() { return std::addressof(*alloc_); }
+
+ private:
+ absl::optional<allocator_type> alloc_;
+ mutable absl::aligned_storage_t<sizeof(slot_type), alignof(slot_type)>
+ slot_space_;
+};
+
+// For sets.
+template <typename Policy, typename PolicyTraits, typename Alloc,
+ typename = void>
+class node_handle : public node_handle_base<PolicyTraits, Alloc> {
+ using Base = typename node_handle::node_handle_base;
+
+ public:
+ using value_type = typename PolicyTraits::value_type;
+
+ constexpr node_handle() {}
+
+ value_type& value() const { return PolicyTraits::element(this->slot()); }
+
+ private:
+ friend struct CommonAccess;
+
+ using Base::Base;
+};
+
+// For maps.
+template <typename Policy, typename PolicyTraits, typename Alloc>
+class node_handle<Policy, PolicyTraits, Alloc,
+ absl::void_t<typename Policy::mapped_type>>
+ : public node_handle_base<PolicyTraits, Alloc> {
+ using Base = typename node_handle::node_handle_base;
+
+ public:
+ using key_type = typename Policy::key_type;
+ using mapped_type = typename Policy::mapped_type;
+
+ constexpr node_handle() {}
+
+ auto key() const -> decltype(PolicyTraits::key(this->slot())) {
+ return PolicyTraits::key(this->slot());
+ }
+
+ mapped_type& mapped() const {
+ return PolicyTraits::value(&PolicyTraits::element(this->slot()));
+ }
+
+ private:
+ friend struct CommonAccess;
+
+ using Base::Base;
+};
+
+// Provide access to non-public node-handle functions.
+struct CommonAccess {
+ template <typename Node>
+ static auto GetSlot(const Node& node) -> decltype(node.slot()) {
+ return node.slot();
+ }
+
+ template <typename Node>
+ static void Reset(Node* node) {
+ node->reset();
+ }
+
+ template <typename T, typename... Args>
+ static T Transfer(Args&&... args) {
+ return T(typename T::transfer_tag_t{}, std::forward<Args>(args)...);
+ }
+
+ template <typename T, typename... Args>
+ static T Move(Args&&... args) {
+ return T(typename T::move_tag_t{}, std::forward<Args>(args)...);
+ }
+};
+
+// Implement the insert_return_type<> concept of C++17.
+template <class Iterator, class NodeType>
+struct InsertReturnType {
+ Iterator position;
+ bool inserted;
+ NodeType node;
+};
+
+} // namespace container_internal
+} // inline namespace lts_2019_08_08
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_CONTAINER_H_
diff --git a/absl/container/internal/compressed_tuple.h b/absl/container/internal/compressed_tuple.h
index 29fe7c12..fbace496 100644
--- a/absl/container/internal/compressed_tuple.h
+++ b/absl/container/internal/compressed_tuple.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -27,27 +27,28 @@
// const T2& t2 = value.get<2>();
// ...
//
-// http://en.cppreference.com/w/cpp/language/ebo
+// https://en.cppreference.com/w/cpp/language/ebo
#ifndef ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_
#define ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_
+#include <initializer_list>
#include <tuple>
#include <type_traits>
#include <utility>
#include "absl/utility/utility.h"
-#ifdef _MSC_VER
+#if defined(_MSC_VER) && !defined(__NVCC__)
// We need to mark these classes with this declspec to ensure that
// CompressedTuple happens.
#define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC __declspec(empty_bases)
-#else // _MSC_VER
+#else
#define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC
-#endif // _MSC_VER
+#endif
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
template <typename... Ts>
@@ -76,57 +77,110 @@ constexpr bool IsFinal() {
#endif
}
+// We can't use EBCO on other CompressedTuples because that would mean that we
+// derive from multiple Storage<> instantiations with the same I parameter,
+// and potentially from multiple identical Storage<> instantiations. So anytime
+// we use type inheritance rather than encapsulation, we mark
+// CompressedTupleImpl, to make this easy to detect.
+struct uses_inheritance {};
+
template <typename T>
constexpr bool ShouldUseBase() {
- return std::is_class<T>::value && std::is_empty<T>::value && !IsFinal<T>();
+ return std::is_class<T>::value && std::is_empty<T>::value && !IsFinal<T>() &&
+ !std::is_base_of<uses_inheritance, T>::value;
}
// The storage class provides two specializations:
// - For empty classes, it stores T as a base class.
// - For everything else, it stores T as a member.
-template <typename D, size_t I, bool = ShouldUseBase<ElemT<D, I>>()>
+template <typename T, size_t I,
+#if defined(_MSC_VER)
+ bool UseBase =
+ ShouldUseBase<typename std::enable_if<true, T>::type>()>
+#else
+ bool UseBase = ShouldUseBase<T>()>
+#endif
struct Storage {
- using T = ElemT<D, I>;
T value;
constexpr Storage() = default;
- explicit constexpr Storage(T&& v) : value(absl::forward<T>(v)) {}
- constexpr const T& get() const { return value; }
- T& get() { return value; }
+ template <typename V>
+ explicit constexpr Storage(absl::in_place_t, V&& v)
+ : value(absl::forward<V>(v)) {}
+ constexpr const T& get() const& { return value; }
+ T& get() & { return value; }
+ constexpr const T&& get() const&& { return absl::move(*this).value; }
+ T&& get() && { return std::move(*this).value; }
};
-template <typename D, size_t I>
-struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage<D, I, true>
- : ElemT<D, I> {
- using T = internal_compressed_tuple::ElemT<D, I>;
+template <typename T, size_t I>
+struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage<T, I, true> : T {
constexpr Storage() = default;
- explicit constexpr Storage(T&& v) : T(absl::forward<T>(v)) {}
- constexpr const T& get() const { return *this; }
- T& get() { return *this; }
+
+ template <typename V>
+ explicit constexpr Storage(absl::in_place_t, V&& v)
+ : T(absl::forward<V>(v)) {}
+
+ constexpr const T& get() const& { return *this; }
+ T& get() & { return *this; }
+ constexpr const T&& get() const&& { return absl::move(*this); }
+ T&& get() && { return std::move(*this); }
};
-template <typename D, typename I>
+template <typename D, typename I, bool ShouldAnyUseBase>
struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl;
-template <typename... Ts, size_t... I>
-struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC
- CompressedTupleImpl<CompressedTuple<Ts...>, absl::index_sequence<I...>>
+template <typename... Ts, size_t... I, bool ShouldAnyUseBase>
+struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl<
+ CompressedTuple<Ts...>, absl::index_sequence<I...>, ShouldAnyUseBase>
// We use the dummy identity function through std::integral_constant to
// convince MSVC of accepting and expanding I in that context. Without it
// you would get:
// error C3548: 'I': parameter pack cannot be used in this context
- : Storage<CompressedTuple<Ts...>,
- std::integral_constant<size_t, I>::value>... {
+ : uses_inheritance,
+ Storage<Ts, std::integral_constant<size_t, I>::value>... {
constexpr CompressedTupleImpl() = default;
- explicit constexpr CompressedTupleImpl(Ts&&... args)
- : Storage<CompressedTuple<Ts...>, I>(absl::forward<Ts>(args))... {}
+ template <typename... Vs>
+ explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args)
+ : Storage<Ts, I>(absl::in_place, absl::forward<Vs>(args))... {}
+ friend CompressedTuple<Ts...>;
};
+template <typename... Ts, size_t... I>
+struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl<
+ CompressedTuple<Ts...>, absl::index_sequence<I...>, false>
+ // We use the dummy identity function as above...
+ : Storage<Ts, std::integral_constant<size_t, I>::value, false>... {
+ constexpr CompressedTupleImpl() = default;
+ template <typename... Vs>
+ explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args)
+ : Storage<Ts, I, false>(absl::in_place, absl::forward<Vs>(args))... {}
+ friend CompressedTuple<Ts...>;
+};
+
+std::false_type Or(std::initializer_list<std::false_type>);
+std::true_type Or(std::initializer_list<bool>);
+
+// MSVC requires this to be done separately rather than within the declaration
+// of CompressedTuple below.
+template <typename... Ts>
+constexpr bool ShouldAnyUseBase() {
+ return decltype(
+ Or({std::integral_constant<bool, ShouldUseBase<Ts>()>()...})){};
+}
+
+template <typename T, typename V>
+using TupleMoveConstructible = typename std::conditional<
+ std::is_reference<T>::value, std::is_convertible<V, T>,
+ std::is_constructible<T, V&&>>::type;
+
} // namespace internal_compressed_tuple
// Helper class to perform the Empty Base Class Optimization.
// Ts can contain classes and non-classes, empty or not. For the ones that
// are empty classes, we perform the CompressedTuple. If all types in Ts are
-// empty classes, then CompressedTuple<Ts...> is itself an empty class.
+// empty classes, then CompressedTuple<Ts...> is itself an empty class. (This
+// does not apply when one or more of those empty classes is itself an empty
+// CompressedTuple.)
//
// To access the members, use member .get<N>() function.
//
@@ -138,28 +192,62 @@ struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC
// const T2& t2 = value.get<2>();
// ...
//
-// http://en.cppreference.com/w/cpp/language/ebo
+// https://en.cppreference.com/w/cpp/language/ebo
template <typename... Ts>
class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple
: private internal_compressed_tuple::CompressedTupleImpl<
- CompressedTuple<Ts...>, absl::index_sequence_for<Ts...>> {
+ CompressedTuple<Ts...>, absl::index_sequence_for<Ts...>,
+ internal_compressed_tuple::ShouldAnyUseBase<Ts...>()> {
private:
template <int I>
using ElemT = internal_compressed_tuple::ElemT<CompressedTuple, I>;
+ template <int I>
+ using StorageT = internal_compressed_tuple::Storage<ElemT<I>, I>;
+
public:
+ // There seems to be a bug in MSVC dealing in which using '=default' here will
+ // cause the compiler to ignore the body of other constructors. The work-
+ // around is to explicitly implement the default constructor.
+#if defined(_MSC_VER)
+ constexpr CompressedTuple() : CompressedTuple::CompressedTupleImpl() {}
+#else
constexpr CompressedTuple() = default;
- explicit constexpr CompressedTuple(Ts... base)
- : CompressedTuple::CompressedTupleImpl(absl::forward<Ts>(base)...) {}
+#endif
+ explicit constexpr CompressedTuple(const Ts&... base)
+ : CompressedTuple::CompressedTupleImpl(absl::in_place, base...) {}
+
+ template <typename... Vs,
+ absl::enable_if_t<
+ absl::conjunction<
+ // Ensure we are not hiding default copy/move constructors.
+ absl::negation<std::is_same<void(CompressedTuple),
+ void(absl::decay_t<Vs>...)>>,
+ internal_compressed_tuple::TupleMoveConstructible<
+ Ts, Vs&&>...>::value,
+ bool> = true>
+ explicit constexpr CompressedTuple(Vs&&... base)
+ : CompressedTuple::CompressedTupleImpl(absl::in_place,
+ absl::forward<Vs>(base)...) {}
+
+ template <int I>
+ ElemT<I>& get() & {
+ return internal_compressed_tuple::Storage<ElemT<I>, I>::get();
+ }
+
+ template <int I>
+ constexpr const ElemT<I>& get() const& {
+ return StorageT<I>::get();
+ }
template <int I>
- ElemT<I>& get() {
- return internal_compressed_tuple::Storage<CompressedTuple, I>::get();
+ ElemT<I>&& get() && {
+ return std::move(*this).StorageT<I>::get();
}
template <int I>
- constexpr const ElemT<I>& get() const {
- return internal_compressed_tuple::Storage<CompressedTuple, I>::get();
+ constexpr const ElemT<I>&& get() const&& {
+ return absl::move(*this).StorageT<I>::get();
}
};
@@ -169,7 +257,7 @@ template <>
class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple<> {};
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#undef ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC
diff --git a/absl/container/internal/compressed_tuple_test.cc b/absl/container/internal/compressed_tuple_test.cc
index 2b5ed4a4..ec893b90 100644
--- a/absl/container/internal/compressed_tuple_test.cc
+++ b/absl/container/internal/compressed_tuple_test.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,18 +14,26 @@
#include "absl/container/internal/compressed_tuple.h"
+#include <memory>
#include <string>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
+#include "absl/container/internal/test_instance_tracker.h"
+#include "absl/memory/memory.h"
+#include "absl/types/any.h"
+#include "absl/types/optional.h"
+#include "absl/utility/utility.h"
-namespace absl {
-inline namespace lts_2018_12_18 {
-namespace container_internal {
-namespace {
+// These are declared at global scope purely so that error messages
+// are smaller and easier to understand.
+enum class CallType { kConstRef, kConstMove };
template <int>
-struct Empty {};
+struct Empty {
+ constexpr CallType value() const& { return CallType::kConstRef; }
+ constexpr CallType value() const&& { return CallType::kConstMove; }
+};
template <typename T>
struct NotEmpty {
@@ -38,6 +46,15 @@ struct TwoValues {
U value2;
};
+
+namespace absl {
+inline namespace lts_2019_08_08 {
+namespace container_internal {
+namespace {
+
+using absl::test_internal::CopyableMovableInstance;
+using absl::test_internal::InstanceTracker;
+
TEST(CompressedTupleTest, Sizeof) {
EXPECT_EQ(sizeof(int), sizeof(CompressedTuple<int>));
EXPECT_EQ(sizeof(int), sizeof(CompressedTuple<int, Empty<0>>));
@@ -53,6 +70,141 @@ TEST(CompressedTupleTest, Sizeof) {
sizeof(CompressedTuple<int, Empty<0>, NotEmpty<double>, Empty<1>>));
}
+TEST(CompressedTupleTest, OneMoveOnRValueConstructionTemp) {
+ InstanceTracker tracker;
+ CompressedTuple<CopyableMovableInstance> x1(CopyableMovableInstance(1));
+ EXPECT_EQ(tracker.instances(), 1);
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_LE(tracker.moves(), 1);
+ EXPECT_EQ(x1.get<0>().value(), 1);
+}
+
+TEST(CompressedTupleTest, OneMoveOnRValueConstructionMove) {
+ InstanceTracker tracker;
+
+ CopyableMovableInstance i1(1);
+ CompressedTuple<CopyableMovableInstance> x1(std::move(i1));
+ EXPECT_EQ(tracker.instances(), 2);
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_LE(tracker.moves(), 1);
+ EXPECT_EQ(x1.get<0>().value(), 1);
+}
+
+TEST(CompressedTupleTest, OneMoveOnRValueConstructionMixedTypes) {
+ InstanceTracker tracker;
+ CopyableMovableInstance i1(1);
+ CopyableMovableInstance i2(2);
+ Empty<0> empty;
+ CompressedTuple<CopyableMovableInstance, CopyableMovableInstance&, Empty<0>>
+ x1(std::move(i1), i2, empty);
+ EXPECT_EQ(x1.get<0>().value(), 1);
+ EXPECT_EQ(x1.get<1>().value(), 2);
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_EQ(tracker.moves(), 1);
+}
+
+struct IncompleteType;
+CompressedTuple<CopyableMovableInstance, IncompleteType&, Empty<0>>
+MakeWithIncomplete(CopyableMovableInstance i1,
+ IncompleteType& t, // NOLINT
+ Empty<0> empty) {
+ return CompressedTuple<CopyableMovableInstance, IncompleteType&, Empty<0>>{
+ std::move(i1), t, empty};
+}
+
+struct IncompleteType {};
+TEST(CompressedTupleTest, OneMoveOnRValueConstructionWithIncompleteType) {
+ InstanceTracker tracker;
+ CopyableMovableInstance i1(1);
+ Empty<0> empty;
+ struct DerivedType : IncompleteType {int value = 0;};
+ DerivedType fd;
+ fd.value = 7;
+
+ CompressedTuple<CopyableMovableInstance, IncompleteType&, Empty<0>> x1 =
+ MakeWithIncomplete(std::move(i1), fd, empty);
+
+ EXPECT_EQ(x1.get<0>().value(), 1);
+ EXPECT_EQ(static_cast<DerivedType&>(x1.get<1>()).value, 7);
+
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_EQ(tracker.moves(), 2);
+}
+
+TEST(CompressedTupleTest,
+ OneMoveOnRValueConstructionMixedTypes_BraceInitPoisonPillExpected) {
+ InstanceTracker tracker;
+ CopyableMovableInstance i1(1);
+ CopyableMovableInstance i2(2);
+ CompressedTuple<CopyableMovableInstance, CopyableMovableInstance&, Empty<0>>
+ x1(std::move(i1), i2, {}); // NOLINT
+ EXPECT_EQ(x1.get<0>().value(), 1);
+ EXPECT_EQ(x1.get<1>().value(), 2);
+ EXPECT_EQ(tracker.instances(), 3);
+ // We are forced into the `const Ts&...` constructor (invoking copies)
+ // because we need it to deduce the type of `{}`.
+ // std::tuple also has this behavior.
+ // Note, this test is proof that this is expected behavior, but it is not
+ // _desired_ behavior.
+ EXPECT_EQ(tracker.copies(), 1);
+ EXPECT_EQ(tracker.moves(), 0);
+}
+
+TEST(CompressedTupleTest, OneCopyOnLValueConstruction) {
+ InstanceTracker tracker;
+ CopyableMovableInstance i1(1);
+
+ CompressedTuple<CopyableMovableInstance> x1(i1);
+ EXPECT_EQ(tracker.copies(), 1);
+ EXPECT_EQ(tracker.moves(), 0);
+
+ tracker.ResetCopiesMovesSwaps();
+
+ CopyableMovableInstance i2(2);
+ const CopyableMovableInstance& i2_ref = i2;
+ CompressedTuple<CopyableMovableInstance> x2(i2_ref);
+ EXPECT_EQ(tracker.copies(), 1);
+ EXPECT_EQ(tracker.moves(), 0);
+}
+
+TEST(CompressedTupleTest, OneMoveOnRValueAccess) {
+ InstanceTracker tracker;
+ CopyableMovableInstance i1(1);
+ CompressedTuple<CopyableMovableInstance> x(std::move(i1));
+ tracker.ResetCopiesMovesSwaps();
+
+ CopyableMovableInstance i2 = std::move(x).get<0>();
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_EQ(tracker.moves(), 1);
+}
+
+TEST(CompressedTupleTest, OneCopyOnLValueAccess) {
+ InstanceTracker tracker;
+
+ CompressedTuple<CopyableMovableInstance> x(CopyableMovableInstance(0));
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_EQ(tracker.moves(), 1);
+
+ CopyableMovableInstance t = x.get<0>();
+ EXPECT_EQ(tracker.copies(), 1);
+ EXPECT_EQ(tracker.moves(), 1);
+}
+
+TEST(CompressedTupleTest, ZeroCopyOnRefAccess) {
+ InstanceTracker tracker;
+
+ CompressedTuple<CopyableMovableInstance> x(CopyableMovableInstance(0));
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_EQ(tracker.moves(), 1);
+
+ CopyableMovableInstance& t1 = x.get<0>();
+ const CopyableMovableInstance& t2 = x.get<0>();
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_EQ(tracker.moves(), 1);
+ EXPECT_EQ(t1.value(), 0);
+ EXPECT_EQ(t2.value(), 0);
+}
+
TEST(CompressedTupleTest, Access) {
struct S {
std::string x;
@@ -113,9 +265,14 @@ TEST(CompressedTupleTest, Nested) {
EXPECT_EQ(4 * sizeof(char),
sizeof(CompressedTuple<CompressedTuple<char, char>,
CompressedTuple<char, char>>));
- EXPECT_TRUE(
- (std::is_empty<CompressedTuple<CompressedTuple<Empty<0>>,
- CompressedTuple<Empty<1>>>>::value));
+ EXPECT_TRUE((std::is_empty<CompressedTuple<Empty<0>, Empty<1>>>::value));
+
+ // Make sure everything still works when things are nested.
+ struct CT_Empty : CompressedTuple<Empty<0>> {};
+ CompressedTuple<Empty<0>, CT_Empty> nested_empty;
+ auto contained = nested_empty.get<0>();
+ auto nested = nested_empty.get<1>().get<0>();
+ EXPECT_TRUE((std::is_same<decltype(contained), decltype(nested)>::value));
}
TEST(CompressedTupleTest, Reference) {
@@ -141,15 +298,103 @@ TEST(CompressedTupleTest, NoElements) {
EXPECT_TRUE(std::is_empty<CompressedTuple<>>::value);
}
+TEST(CompressedTupleTest, MoveOnlyElements) {
+ CompressedTuple<std::unique_ptr<std::string>> str_tup(
+ absl::make_unique<std::string>("str"));
+
+ CompressedTuple<CompressedTuple<std::unique_ptr<std::string>>,
+ std::unique_ptr<int>>
+ x(std::move(str_tup), absl::make_unique<int>(5));
+
+ EXPECT_EQ(*x.get<0>().get<0>(), "str");
+ EXPECT_EQ(*x.get<1>(), 5);
+
+ std::unique_ptr<std::string> x0 = std::move(x.get<0>()).get<0>();
+ std::unique_ptr<int> x1 = std::move(x).get<1>();
+
+ EXPECT_EQ(*x0, "str");
+ EXPECT_EQ(*x1, 5);
+}
+
+TEST(CompressedTupleTest, MoveConstructionMoveOnlyElements) {
+ CompressedTuple<std::unique_ptr<std::string>> base(
+ absl::make_unique<std::string>("str"));
+ EXPECT_EQ(*base.get<0>(), "str");
+
+ CompressedTuple<std::unique_ptr<std::string>> copy(std::move(base));
+ EXPECT_EQ(*copy.get<0>(), "str");
+}
+
+TEST(CompressedTupleTest, AnyElements) {
+ any a(std::string("str"));
+ CompressedTuple<any, any&> x(any(5), a);
+ EXPECT_EQ(absl::any_cast<int>(x.get<0>()), 5);
+ EXPECT_EQ(absl::any_cast<std::string>(x.get<1>()), "str");
+
+ a = 0.5f;
+ EXPECT_EQ(absl::any_cast<float>(x.get<1>()), 0.5);
+
+ // Ensure copy construction work in the face of a type with a universal
+ // implicit constructor;
+ CompressedTuple<absl::any> c{}, d(c); // NOLINT
+}
+
TEST(CompressedTupleTest, Constexpr) {
- constexpr CompressedTuple<int, double, CompressedTuple<int>> x(
- 7, 1.25, CompressedTuple<int>(5));
+ struct NonTrivialStruct {
+ constexpr NonTrivialStruct() = default;
+ constexpr int value() const { return v; }
+ int v = 5;
+ };
+ struct TrivialStruct {
+ TrivialStruct() = default;
+ constexpr int value() const { return v; }
+ int v;
+ };
+ constexpr CompressedTuple<int, double, CompressedTuple<int>, Empty<0>> x(
+ 7, 1.25, CompressedTuple<int>(5), {});
constexpr int x0 = x.get<0>();
constexpr double x1 = x.get<1>();
constexpr int x2 = x.get<2>().get<0>();
+ constexpr CallType x3 = x.get<3>().value();
+
EXPECT_EQ(x0, 7);
EXPECT_EQ(x1, 1.25);
EXPECT_EQ(x2, 5);
+ EXPECT_EQ(x3, CallType::kConstRef);
+
+#if !defined(__GNUC__) || defined(__clang__) || __GNUC__ > 4
+ constexpr CompressedTuple<Empty<0>, TrivialStruct, int> trivial = {};
+ constexpr CallType trivial0 = trivial.get<0>().value();
+ constexpr int trivial1 = trivial.get<1>().value();
+ constexpr int trivial2 = trivial.get<2>();
+
+ EXPECT_EQ(trivial0, CallType::kConstRef);
+ EXPECT_EQ(trivial1, 0);
+ EXPECT_EQ(trivial2, 0);
+#endif
+
+ constexpr CompressedTuple<Empty<0>, NonTrivialStruct, absl::optional<int>>
+ non_trivial = {};
+ constexpr CallType non_trivial0 = non_trivial.get<0>().value();
+ constexpr int non_trivial1 = non_trivial.get<1>().value();
+ constexpr absl::optional<int> non_trivial2 = non_trivial.get<2>();
+
+ EXPECT_EQ(non_trivial0, CallType::kConstRef);
+ EXPECT_EQ(non_trivial1, 5);
+ EXPECT_EQ(non_trivial2, absl::nullopt);
+
+ static constexpr char data[] = "DEF";
+ constexpr CompressedTuple<const char*> z(data);
+ constexpr const char* z1 = z.get<0>();
+ EXPECT_EQ(std::string(z1), std::string(data));
+
+#if defined(__clang__)
+ // An apparent bug in earlier versions of gcc claims these are ambiguous.
+ constexpr int x2m = absl::move(x.get<2>()).get<0>();
+ constexpr CallType x3m = absl::move(x).get<3>().value();
+ EXPECT_EQ(x2m, 5);
+ EXPECT_EQ(x3m, CallType::kConstMove);
+#endif
}
#if defined(__clang__) || defined(__GNUC__)
@@ -164,5 +409,5 @@ TEST(CompressedTupleTest, EmptyFinalClass) {
} // namespace
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
diff --git a/absl/container/internal/container_memory.h b/absl/container/internal/container_memory.h
index ddccbe05..eb6d7eb7 100644
--- a/absl/container/internal/container_memory.h
+++ b/absl/container/internal/container_memory.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -34,7 +34,7 @@
#include "absl/utility/utility.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
// Allocates at least n bytes aligned to the specified alignment.
@@ -287,13 +287,48 @@ struct IsLayoutCompatible {
} // namespace memory_internal
-// If kMutableKeys is false, only the value member is accessed.
+// The internal storage type for key-value containers like flat_hash_map.
//
-// If kMutableKeys is true, key is accessed through all slots while value and
-// mutable_value are accessed only via INITIALIZED slots. Slots are created and
-// destroyed via mutable_value so that the key can be moved later.
+// It is convenient for the value_type of a flat_hash_map<K, V> to be
+// pair<const K, V>; the "const K" prevents accidental modification of the key
+// when dealing with the reference returned from find() and similar methods.
+// However, this creates other problems; we want to be able to emplace(K, V)
+// efficiently with move operations, and similarly be able to move a
+// pair<K, V> in insert().
+//
+// The solution is this union, which aliases the const and non-const versions
+// of the pair. This also allows flat_hash_map<const K, V> to work, even though
+// that has the same efficiency issues with move in emplace() and insert() -
+// but people do it anyway.
+//
+// If kMutableKeys is false, only the value member can be accessed.
+//
+// If kMutableKeys is true, key can be accessed through all slots while value
+// and mutable_value must be accessed only via INITIALIZED slots. Slots are
+// created and destroyed via mutable_value so that the key can be moved later.
+//
+// Accessing one of the union fields while the other is active is safe as
+// long as they are layout-compatible, which is guaranteed by the definition of
+// kMutableKeys. For C++11, the relevant section of the standard is
+// https://timsong-cpp.github.io/cppwp/n3337/class.mem#19 (9.2.19)
template <class K, class V>
-union slot_type {
+union map_slot_type {
+ map_slot_type() {}
+ ~map_slot_type() = delete;
+ using value_type = std::pair<const K, V>;
+ using mutable_value_type = std::pair<K, V>;
+
+ value_type value;
+ mutable_value_type mutable_value;
+ K key;
+};
+
+template <class K, class V>
+struct map_slot_policy {
+ using slot_type = map_slot_type<K, V>;
+ using value_type = std::pair<const K, V>;
+ using mutable_value_type = std::pair<K, V>;
+
private:
static void emplace(slot_type* slot) {
// The construction of union doesn't do anything at runtime but it allows us
@@ -303,19 +338,17 @@ union slot_type {
// If pair<const K, V> and pair<K, V> are layout-compatible, we can accept one
// or the other via slot_type. We are also free to access the key via
// slot_type::key in this case.
- using kMutableKeys =
- std::integral_constant<bool,
- memory_internal::IsLayoutCompatible<K, V>::value>;
+ using kMutableKeys = memory_internal::IsLayoutCompatible<K, V>;
public:
- slot_type() {}
- ~slot_type() = delete;
- using value_type = std::pair<const K, V>;
- using mutable_value_type = std::pair<K, V>;
+ static value_type& element(slot_type* slot) { return slot->value; }
+ static const value_type& element(const slot_type* slot) {
+ return slot->value;
+ }
- value_type value;
- mutable_value_type mutable_value;
- K key;
+ static const K& key(const slot_type* slot) {
+ return kMutableKeys::value ? slot->key : slot->value.first;
+ }
template <class Allocator, class... Args>
static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
@@ -401,7 +434,7 @@ union slot_type {
};
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
diff --git a/absl/container/internal/container_memory_test.cc b/absl/container/internal/container_memory_test.cc
index da87ca20..ea9568dc 100644
--- a/absl/container/internal/container_memory_test.cc
+++ b/absl/container/internal/container_memory_test.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -23,7 +23,7 @@
#include "absl/strings/string_view.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
namespace {
@@ -186,5 +186,5 @@ TEST(DecomposePair, NotDecomposable) {
} // namespace
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
diff --git a/absl/container/internal/counting_allocator.h b/absl/container/internal/counting_allocator.h
new file mode 100644
index 00000000..94a457ca
--- /dev/null
+++ b/absl/container/internal/counting_allocator.h
@@ -0,0 +1,81 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_
+#define ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_
+
+#include <cassert>
+#include <cstdint>
+#include <memory>
+
+namespace absl {
+inline namespace lts_2019_08_08 {
+namespace container_internal {
+
+// This is a stateful allocator, but the state lives outside of the
+// allocator (in whatever test is using the allocator). This is odd
+// but helps in tests where the allocator is propagated into nested
+// containers - that chain of allocators uses the same state and is
+// thus easier to query for aggregate allocation information.
+template <typename T>
+class CountingAllocator : public std::allocator<T> {
+ public:
+ using Alloc = std::allocator<T>;
+ using pointer = typename Alloc::pointer;
+ using size_type = typename Alloc::size_type;
+
+ CountingAllocator() : bytes_used_(nullptr) {}
+ explicit CountingAllocator(int64_t* b) : bytes_used_(b) {}
+
+ template <typename U>
+ CountingAllocator(const CountingAllocator<U>& x)
+ : Alloc(x), bytes_used_(x.bytes_used_) {}
+
+ pointer allocate(size_type n,
+ std::allocator<void>::const_pointer hint = nullptr) {
+ assert(bytes_used_ != nullptr);
+ *bytes_used_ += n * sizeof(T);
+ return Alloc::allocate(n, hint);
+ }
+
+ void deallocate(pointer p, size_type n) {
+ Alloc::deallocate(p, n);
+ assert(bytes_used_ != nullptr);
+ *bytes_used_ -= n * sizeof(T);
+ }
+
+ template<typename U>
+ class rebind {
+ public:
+ using other = CountingAllocator<U>;
+ };
+
+ friend bool operator==(const CountingAllocator& a,
+ const CountingAllocator& b) {
+ return a.bytes_used_ == b.bytes_used_;
+ }
+
+ friend bool operator!=(const CountingAllocator& a,
+ const CountingAllocator& b) {
+ return !(a == b);
+ }
+
+ int64_t* bytes_used_;
+};
+
+} // namespace container_internal
+} // inline namespace lts_2019_08_08
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_
diff --git a/absl/container/internal/hash_function_defaults.h b/absl/container/internal/hash_function_defaults.h
index 72c75fa0..2155076d 100644
--- a/absl/container/internal/hash_function_defaults.h
+++ b/absl/container/internal/hash_function_defaults.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -39,8 +39,8 @@
// equal functions are still bound to T. This is important because some type U
// can be hashed by/tested for equality differently depending on T. A notable
// example is `const char*`. `const char*` is treated as a c-style string when
-// the hash function is hash<string> but as a pointer when the hash function is
-// hash<void*>.
+// the hash function is hash<std::string> but as a pointer when the hash
+// function is hash<void*>.
//
#ifndef ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_
#define ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_
@@ -56,7 +56,7 @@
#include "absl/strings/string_view.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
// The hash of an object of type T is computed by using absl::Hash.
@@ -84,6 +84,7 @@ struct StringHashEq {
}
};
};
+
template <>
struct HashEq<std::string> : StringHashEq {};
template <>
@@ -139,7 +140,7 @@ template <class T>
using hash_default_eq = typename container_internal::HashEq<T>::Eq;
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_
diff --git a/absl/container/internal/hash_function_defaults_test.cc b/absl/container/internal/hash_function_defaults_test.cc
index 4610843a..ce6133f8 100644
--- a/absl/container/internal/hash_function_defaults_test.cc
+++ b/absl/container/internal/hash_function_defaults_test.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -22,7 +22,7 @@
#include "absl/strings/string_view.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
namespace {
@@ -78,14 +78,14 @@ struct EqString : ::testing::Test {
hash_default_eq<T> key_eq;
};
-TYPED_TEST_CASE(EqString, StringTypes);
+TYPED_TEST_SUITE(EqString, StringTypes);
template <class T>
struct HashString : ::testing::Test {
hash_default_hash<T> hasher;
};
-TYPED_TEST_CASE(HashString, StringTypes);
+TYPED_TEST_SUITE(HashString, StringTypes);
TYPED_TEST(EqString, Works) {
auto eq = this->key_eq;
@@ -122,14 +122,14 @@ struct EqPointer : ::testing::Test {
hash_default_eq<T> key_eq;
};
-TYPED_TEST_CASE(EqPointer, PointerTypes);
+TYPED_TEST_SUITE(EqPointer, PointerTypes);
template <class T>
struct HashPointer : ::testing::Test {
hash_default_hash<T> hasher;
};
-TYPED_TEST_CASE(HashPointer, PointerTypes);
+TYPED_TEST_SUITE(HashPointer, PointerTypes);
TYPED_TEST(EqPointer, Works) {
int dummy;
@@ -203,15 +203,11 @@ TYPED_TEST(HashPointer, Works) {
EXPECT_NE(hash(&dummy), hash(cuptr));
}
-// Cartesian product of (string, std::string, absl::string_view)
-// with (string, std::string, absl::string_view, const char*).
+// Cartesian product of (std::string, absl::string_view)
+// with (std::string, absl::string_view, const char*).
using StringTypesCartesianProduct = Types<
// clang-format off
- std::pair<std::string, std::string>,
- std::pair<std::string, absl::string_view>,
- std::pair<std::string, const char*>,
-
std::pair<absl::string_view, std::string>,
std::pair<absl::string_view, absl::string_view>,
std::pair<absl::string_view, const char*>>;
@@ -249,11 +245,11 @@ TYPED_TEST_P(StringLikeTest, HashEq) {
EXPECT_NE(this->hash(this->a1), this->hash(this->b2));
}
-TYPED_TEST_CASE(StringLikeTest, StringTypesCartesianProduct);
+TYPED_TEST_SUITE(StringLikeTest, StringTypesCartesianProduct);
} // namespace
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
enum Hash : size_t {
@@ -284,7 +280,7 @@ struct hash<Hashable<H>> {
} // namespace std
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
namespace {
@@ -299,5 +295,5 @@ TEST(Delegate, HashDispatch) {
} // namespace
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
diff --git a/absl/container/internal/hash_generator_testing.cc b/absl/container/internal/hash_generator_testing.cc
index aef41d72..36b2571b 100644
--- a/absl/container/internal/hash_generator_testing.cc
+++ b/absl/container/internal/hash_generator_testing.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,7 +17,7 @@
#include <deque>
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
namespace hash_internal {
namespace {
@@ -70,5 +70,5 @@ absl::string_view Generator<absl::string_view>::operator()() const {
} // namespace hash_internal
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
diff --git a/absl/container/internal/hash_generator_testing.h b/absl/container/internal/hash_generator_testing.h
index 65e88964..27962c35 100644
--- a/absl/container/internal/hash_generator_testing.h
+++ b/absl/container/internal/hash_generator_testing.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -31,7 +31,7 @@
#include "absl/strings/string_view.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
namespace hash_internal {
namespace generator_internal {
@@ -146,7 +146,7 @@ using GeneratedType = decltype(
} // namespace hash_internal
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_
diff --git a/absl/container/internal/hash_policy_testing.h b/absl/container/internal/hash_policy_testing.h
index 9c310ad4..8f0d2a52 100644
--- a/absl/container/internal/hash_policy_testing.h
+++ b/absl/container/internal/hash_policy_testing.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -30,7 +30,7 @@
#include "absl/strings/string_view.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
namespace hash_testing_internal {
@@ -163,7 +163,7 @@ auto keys(const Set& s)
}
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
// ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS is false for glibcxx versions
diff --git a/absl/container/internal/hash_policy_testing_test.cc b/absl/container/internal/hash_policy_testing_test.cc
index 00c436b3..8fd1df00 100644
--- a/absl/container/internal/hash_policy_testing_test.cc
+++ b/absl/container/internal/hash_policy_testing_test.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,7 +17,7 @@
#include "gtest/gtest.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
namespace {
@@ -41,5 +41,5 @@ TEST(_, Hash) {
} // namespace
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
diff --git a/absl/container/internal/hash_policy_traits.h b/absl/container/internal/hash_policy_traits.h
index 41e26212..3d87e821 100644
--- a/absl/container/internal/hash_policy_traits.h
+++ b/absl/container/internal/hash_policy_traits.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -23,7 +23,7 @@
#include "absl/meta/type_traits.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
// Defines how slots are initialized/destroyed/moved.
@@ -185,7 +185,7 @@ struct hash_policy_traits {
};
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_
diff --git a/absl/container/internal/hash_policy_traits_test.cc b/absl/container/internal/hash_policy_traits_test.cc
index 07cecdfa..edfaf63e 100644
--- a/absl/container/internal/hash_policy_traits_test.cc
+++ b/absl/container/internal/hash_policy_traits_test.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -22,7 +22,7 @@
#include "gtest/gtest.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
namespace {
@@ -140,5 +140,5 @@ TEST_F(Test, with_transfer) {
} // namespace
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
diff --git a/absl/container/internal/hashtable_debug.h b/absl/container/internal/hashtable_debug.h
index b6a43512..1d1a9c28 100644
--- a/absl/container/internal/hashtable_debug.h
+++ b/absl/container/internal/hashtable_debug.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -38,7 +38,7 @@
#include "absl/container/internal/hashtable_debug_hooks.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
// Returns the number of probes required to lookup `key`. Returns 0 for a
@@ -61,7 +61,7 @@ std::vector<size_t> GetHashtableDebugNumProbesHistogram(const C& container) {
size_t num_probes = GetHashtableDebugNumProbes(
container,
absl::container_internal::hashtable_debug_internal::GetKey<C>(*it, 0));
- v.resize(std::max(v.size(), num_probes + 1));
+ v.resize((std::max)(v.size(), num_probes + 1));
v[num_probes]++;
}
return v;
@@ -104,7 +104,7 @@ size_t LowerBoundAllocatedByteSize(size_t num_elements) {
}
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_
diff --git a/absl/container/internal/hashtable_debug_hooks.h b/absl/container/internal/hashtable_debug_hooks.h
index 50ba6ba5..7b95fcef 100644
--- a/absl/container/internal/hashtable_debug_hooks.h
+++ b/absl/container/internal/hashtable_debug_hooks.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -24,7 +24,7 @@
#include <vector>
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
namespace hashtable_debug_internal {
@@ -77,7 +77,7 @@ struct HashtableDebugAccess {
} // namespace hashtable_debug_internal
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_
diff --git a/absl/container/internal/hashtablez_sampler.cc b/absl/container/internal/hashtablez_sampler.cc
new file mode 100644
index 00000000..2338045d
--- /dev/null
+++ b/absl/container/internal/hashtablez_sampler.cc
@@ -0,0 +1,310 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/hashtablez_sampler.h"
+
+#include <atomic>
+#include <cassert>
+#include <cmath>
+#include <functional>
+#include <limits>
+
+#include "absl/base/attributes.h"
+#include "absl/container/internal/have_sse.h"
+#include "absl/debugging/stacktrace.h"
+#include "absl/memory/memory.h"
+#include "absl/synchronization/mutex.h"
+
+namespace absl {
+inline namespace lts_2019_08_08 {
+namespace container_internal {
+constexpr int HashtablezInfo::kMaxStackDepth;
+
+namespace {
+ABSL_CONST_INIT std::atomic<bool> g_hashtablez_enabled{
+ false
+};
+ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_sample_parameter{1 << 10};
+ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_max_samples{1 << 20};
+
+// Returns the next pseudo-random value.
+// pRNG is: aX+b mod c with a = 0x5DEECE66D, b = 0xB, c = 1<<48
+// This is the lrand64 generator.
+uint64_t NextRandom(uint64_t rnd) {
+ const uint64_t prng_mult = uint64_t{0x5DEECE66D};
+ const uint64_t prng_add = 0xB;
+ const uint64_t prng_mod_power = 48;
+ const uint64_t prng_mod_mask = ~(~uint64_t{0} << prng_mod_power);
+ return (prng_mult * rnd + prng_add) & prng_mod_mask;
+}
+
+// Generates a geometric variable with the specified mean.
+// This is done by generating a random number between 0 and 1 and applying
+// the inverse cumulative distribution function for an exponential.
+// Specifically: Let m be the inverse of the sample period, then
+// the probability distribution function is m*exp(-mx) so the CDF is
+// p = 1 - exp(-mx), so
+// q = 1 - p = exp(-mx)
+// log_e(q) = -mx
+// -log_e(q)/m = x
+// log_2(q) * (-log_e(2) * 1/m) = x
+// In the code, q is actually in the range 1 to 2**26, hence the -26 below
+//
+int64_t GetGeometricVariable(int64_t mean) {
+#if ABSL_HAVE_THREAD_LOCAL
+ thread_local
+#else // ABSL_HAVE_THREAD_LOCAL
+ // SampleSlow and hence GetGeometricVariable is guarded by a single mutex when
+ // there are not thread locals. Thus, a single global rng is acceptable for
+ // that case.
+ static
+#endif // ABSL_HAVE_THREAD_LOCAL
+ uint64_t rng = []() {
+ // We don't get well distributed numbers from this so we call
+ // NextRandom() a bunch to mush the bits around. We use a global_rand
+ // to handle the case where the same thread (by memory address) gets
+ // created and destroyed repeatedly.
+ ABSL_CONST_INIT static std::atomic<uint32_t> global_rand(0);
+ uint64_t r = reinterpret_cast<uint64_t>(&rng) +
+ global_rand.fetch_add(1, std::memory_order_relaxed);
+ for (int i = 0; i < 20; ++i) {
+ r = NextRandom(r);
+ }
+ return r;
+ }();
+
+ rng = NextRandom(rng);
+
+ // Take the top 26 bits as the random number
+ // (This plus the 1<<58 sampling bound give a max possible step of
+ // 5194297183973780480 bytes.)
+ const uint64_t prng_mod_power = 48; // Number of bits in prng
+ // The uint32_t cast is to prevent a (hard-to-reproduce) NAN
+ // under piii debug for some binaries.
+ double q = static_cast<uint32_t>(rng >> (prng_mod_power - 26)) + 1.0;
+ // Put the computed p-value through the CDF of a geometric.
+ double interval = (log2(q) - 26) * (-std::log(2.0) * mean);
+
+ // Very large values of interval overflow int64_t. If we happen to
+ // hit such improbable condition, we simply cheat and clamp interval
+ // to largest supported value.
+ if (interval > static_cast<double>(std::numeric_limits<int64_t>::max() / 2)) {
+ return std::numeric_limits<int64_t>::max() / 2;
+ }
+
+ // Small values of interval are equivalent to just sampling next time.
+ if (interval < 1) {
+ return 1;
+ }
+ return static_cast<int64_t>(interval);
+}
+
+} // namespace
+
+HashtablezSampler& HashtablezSampler::Global() {
+ static auto* sampler = new HashtablezSampler();
+ return *sampler;
+}
+
+HashtablezSampler::DisposeCallback HashtablezSampler::SetDisposeCallback(
+ DisposeCallback f) {
+ return dispose_.exchange(f, std::memory_order_relaxed);
+}
+
+HashtablezInfo::HashtablezInfo() { PrepareForSampling(); }
+HashtablezInfo::~HashtablezInfo() = default;
+
+void HashtablezInfo::PrepareForSampling() {
+ capacity.store(0, std::memory_order_relaxed);
+ size.store(0, std::memory_order_relaxed);
+ num_erases.store(0, std::memory_order_relaxed);
+ max_probe_length.store(0, std::memory_order_relaxed);
+ total_probe_length.store(0, std::memory_order_relaxed);
+ hashes_bitwise_or.store(0, std::memory_order_relaxed);
+ hashes_bitwise_and.store(~size_t{}, std::memory_order_relaxed);
+
+ create_time = absl::Now();
+ // The inliner makes hardcoded skip_count difficult (especially when combined
+ // with LTO). We use the ability to exclude stacks by regex when encoding
+ // instead.
+ depth = absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth,
+ /* skip_count= */ 0);
+ dead = nullptr;
+}
+
+HashtablezSampler::HashtablezSampler()
+ : dropped_samples_(0), size_estimate_(0), all_(nullptr), dispose_(nullptr) {
+ absl::MutexLock l(&graveyard_.init_mu);
+ graveyard_.dead = &graveyard_;
+}
+
+HashtablezSampler::~HashtablezSampler() {
+ HashtablezInfo* s = all_.load(std::memory_order_acquire);
+ while (s != nullptr) {
+ HashtablezInfo* next = s->next;
+ delete s;
+ s = next;
+ }
+}
+
+void HashtablezSampler::PushNew(HashtablezInfo* sample) {
+ sample->next = all_.load(std::memory_order_relaxed);
+ while (!all_.compare_exchange_weak(sample->next, sample,
+ std::memory_order_release,
+ std::memory_order_relaxed)) {
+ }
+}
+
+void HashtablezSampler::PushDead(HashtablezInfo* sample) {
+ if (auto* dispose = dispose_.load(std::memory_order_relaxed)) {
+ dispose(*sample);
+ }
+
+ absl::MutexLock graveyard_lock(&graveyard_.init_mu);
+ absl::MutexLock sample_lock(&sample->init_mu);
+ sample->dead = graveyard_.dead;
+ graveyard_.dead = sample;
+}
+
+HashtablezInfo* HashtablezSampler::PopDead() {
+ absl::MutexLock graveyard_lock(&graveyard_.init_mu);
+
+ // The list is circular, so eventually it collapses down to
+ // graveyard_.dead == &graveyard_
+ // when it is empty.
+ HashtablezInfo* sample = graveyard_.dead;
+ if (sample == &graveyard_) return nullptr;
+
+ absl::MutexLock sample_lock(&sample->init_mu);
+ graveyard_.dead = sample->dead;
+ sample->PrepareForSampling();
+ return sample;
+}
+
+HashtablezInfo* HashtablezSampler::Register() {
+ int64_t size = size_estimate_.fetch_add(1, std::memory_order_relaxed);
+ if (size > g_hashtablez_max_samples.load(std::memory_order_relaxed)) {
+ size_estimate_.fetch_sub(1, std::memory_order_relaxed);
+ dropped_samples_.fetch_add(1, std::memory_order_relaxed);
+ return nullptr;
+ }
+
+ HashtablezInfo* sample = PopDead();
+ if (sample == nullptr) {
+ // Resurrection failed. Hire a new warlock.
+ sample = new HashtablezInfo();
+ PushNew(sample);
+ }
+
+ return sample;
+}
+
+void HashtablezSampler::Unregister(HashtablezInfo* sample) {
+ PushDead(sample);
+ size_estimate_.fetch_sub(1, std::memory_order_relaxed);
+}
+
+int64_t HashtablezSampler::Iterate(
+ const std::function<void(const HashtablezInfo& stack)>& f) {
+ HashtablezInfo* s = all_.load(std::memory_order_acquire);
+ while (s != nullptr) {
+ absl::MutexLock l(&s->init_mu);
+ if (s->dead == nullptr) {
+ f(*s);
+ }
+ s = s->next;
+ }
+
+ return dropped_samples_.load(std::memory_order_relaxed);
+}
+
+HashtablezInfo* SampleSlow(int64_t* next_sample) {
+ if (kAbslContainerInternalSampleEverything) {
+ *next_sample = 1;
+ return HashtablezSampler::Global().Register();
+ }
+
+ bool first = *next_sample < 0;
+ *next_sample = GetGeometricVariable(
+ g_hashtablez_sample_parameter.load(std::memory_order_relaxed));
+
+ // g_hashtablez_enabled can be dynamically flipped, we need to set a threshold
+ // low enough that we will start sampling in a reasonable time, so we just use
+ // the default sampling rate.
+ if (!g_hashtablez_enabled.load(std::memory_order_relaxed)) return nullptr;
+
+ // We will only be negative on our first count, so we should just retry in
+ // that case.
+ if (first) {
+ if (ABSL_PREDICT_TRUE(--*next_sample > 0)) return nullptr;
+ return SampleSlow(next_sample);
+ }
+
+ return HashtablezSampler::Global().Register();
+}
+
+#if ABSL_PER_THREAD_TLS == 1
+ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample = 0;
+#endif // ABSL_PER_THREAD_TLS == 1
+
+void UnsampleSlow(HashtablezInfo* info) {
+ HashtablezSampler::Global().Unregister(info);
+}
+
+void RecordInsertSlow(HashtablezInfo* info, size_t hash,
+ size_t distance_from_desired) {
+ // SwissTables probe in groups of 16, so scale this to count items probes and
+ // not offset from desired.
+ size_t probe_length = distance_from_desired;
+#if SWISSTABLE_HAVE_SSE2
+ probe_length /= 16;
+#else
+ probe_length /= 8;
+#endif
+
+ info->hashes_bitwise_and.fetch_and(hash, std::memory_order_relaxed);
+ info->hashes_bitwise_or.fetch_or(hash, std::memory_order_relaxed);
+ info->max_probe_length.store(
+ std::max(info->max_probe_length.load(std::memory_order_relaxed),
+ probe_length),
+ std::memory_order_relaxed);
+ info->total_probe_length.fetch_add(probe_length, std::memory_order_relaxed);
+ info->size.fetch_add(1, std::memory_order_relaxed);
+}
+
+void SetHashtablezEnabled(bool enabled) {
+ g_hashtablez_enabled.store(enabled, std::memory_order_release);
+}
+
+void SetHashtablezSampleParameter(int32_t rate) {
+ if (rate > 0) {
+ g_hashtablez_sample_parameter.store(rate, std::memory_order_release);
+ } else {
+ ABSL_RAW_LOG(ERROR, "Invalid hashtablez sample rate: %lld",
+ static_cast<long long>(rate)); // NOLINT(runtime/int)
+ }
+}
+
+void SetHashtablezMaxSamples(int32_t max) {
+ if (max > 0) {
+ g_hashtablez_max_samples.store(max, std::memory_order_release);
+ } else {
+ ABSL_RAW_LOG(ERROR, "Invalid hashtablez max samples: %lld",
+ static_cast<long long>(max)); // NOLINT(runtime/int)
+ }
+}
+
+} // namespace container_internal
+} // inline namespace lts_2019_08_08
+} // namespace absl
diff --git a/absl/container/internal/hashtablez_sampler.h b/absl/container/internal/hashtablez_sampler.h
new file mode 100644
index 00000000..f17c425c
--- /dev/null
+++ b/absl/container/internal/hashtablez_sampler.h
@@ -0,0 +1,290 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: hashtablez_sampler.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines the API for a low level library to sample hashtables
+// and collect runtime statistics about them.
+//
+// `HashtablezSampler` controls the lifecycle of `HashtablezInfo` objects which
+// store information about a single sample.
+//
+// `Record*` methods store information into samples.
+// `Sample()` and `Unsample()` make use of a single global sampler with
+// properties controlled by the flags hashtablez_enabled,
+// hashtablez_sample_rate, and hashtablez_max_samples.
+//
+// WARNING
+//
+// Using this sampling API may cause sampled Swiss tables to use the global
+// allocator (operator `new`) in addition to any custom allocator. If you
+// are using a table in an unusual circumstance where allocation or calling a
+// linux syscall is unacceptable, this could interfere.
+//
+// This utility is internal-only. Use at your own risk.
+
+#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_
+#define ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_
+
+#include <atomic>
+#include <functional>
+#include <memory>
+#include <vector>
+
+#include "absl/base/internal/per_thread_tls.h"
+#include "absl/base/optimization.h"
+#include "absl/container/internal/have_sse.h"
+#include "absl/synchronization/mutex.h"
+#include "absl/utility/utility.h"
+
+namespace absl {
+inline namespace lts_2019_08_08 {
+namespace container_internal {
+
+// Stores information about a sampled hashtable. All mutations to this *must*
+// be made through `Record*` functions below. All reads from this *must* only
+// occur in the callback to `HashtablezSampler::Iterate`.
+struct HashtablezInfo {
+ // Constructs the object but does not fill in any fields.
+ HashtablezInfo();
+ ~HashtablezInfo();
+ HashtablezInfo(const HashtablezInfo&) = delete;
+ HashtablezInfo& operator=(const HashtablezInfo&) = delete;
+
+ // Puts the object into a clean state, fills in the logically `const` members,
+ // blocking for any readers that are currently sampling the object.
+ void PrepareForSampling() EXCLUSIVE_LOCKS_REQUIRED(init_mu);
+
+ // These fields are mutated by the various Record* APIs and need to be
+ // thread-safe.
+ std::atomic<size_t> capacity;
+ std::atomic<size_t> size;
+ std::atomic<size_t> num_erases;
+ std::atomic<size_t> max_probe_length;
+ std::atomic<size_t> total_probe_length;
+ std::atomic<size_t> hashes_bitwise_or;
+ std::atomic<size_t> hashes_bitwise_and;
+
+ // `HashtablezSampler` maintains intrusive linked lists for all samples. See
+ // comments on `HashtablezSampler::all_` for details on these. `init_mu`
+ // guards the ability to restore the sample to a pristine state. This
+ // prevents races with sampling and resurrecting an object.
+ absl::Mutex init_mu;
+ HashtablezInfo* next;
+ HashtablezInfo* dead GUARDED_BY(init_mu);
+
+ // All of the fields below are set by `PrepareForSampling`, they must not be
+ // mutated in `Record*` functions. They are logically `const` in that sense.
+ // These are guarded by init_mu, but that is not externalized to clients, who
+ // can only read them during `HashtablezSampler::Iterate` which will hold the
+ // lock.
+ static constexpr int kMaxStackDepth = 64;
+ absl::Time create_time;
+ int32_t depth;
+ void* stack[kMaxStackDepth];
+};
+
+inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) {
+#if SWISSTABLE_HAVE_SSE2
+ total_probe_length /= 16;
+#else
+ total_probe_length /= 8;
+#endif
+ info->total_probe_length.store(total_probe_length, std::memory_order_relaxed);
+ info->num_erases.store(0, std::memory_order_relaxed);
+}
+
+inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size,
+ size_t capacity) {
+ info->size.store(size, std::memory_order_relaxed);
+ info->capacity.store(capacity, std::memory_order_relaxed);
+ if (size == 0) {
+ // This is a clear, reset the total/num_erases too.
+ RecordRehashSlow(info, 0);
+ }
+}
+
+void RecordInsertSlow(HashtablezInfo* info, size_t hash,
+ size_t distance_from_desired);
+
+inline void RecordEraseSlow(HashtablezInfo* info) {
+ info->size.fetch_sub(1, std::memory_order_relaxed);
+ info->num_erases.fetch_add(1, std::memory_order_relaxed);
+}
+
+HashtablezInfo* SampleSlow(int64_t* next_sample);
+void UnsampleSlow(HashtablezInfo* info);
+
+class HashtablezInfoHandle {
+ public:
+ explicit HashtablezInfoHandle() : info_(nullptr) {}
+ explicit HashtablezInfoHandle(HashtablezInfo* info) : info_(info) {}
+ ~HashtablezInfoHandle() {
+ if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+ UnsampleSlow(info_);
+ }
+
+ HashtablezInfoHandle(const HashtablezInfoHandle&) = delete;
+ HashtablezInfoHandle& operator=(const HashtablezInfoHandle&) = delete;
+
+ HashtablezInfoHandle(HashtablezInfoHandle&& o) noexcept
+ : info_(absl::exchange(o.info_, nullptr)) {}
+ HashtablezInfoHandle& operator=(HashtablezInfoHandle&& o) noexcept {
+ if (ABSL_PREDICT_FALSE(info_ != nullptr)) {
+ UnsampleSlow(info_);
+ }
+ info_ = absl::exchange(o.info_, nullptr);
+ return *this;
+ }
+
+ inline void RecordStorageChanged(size_t size, size_t capacity) {
+ if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+ RecordStorageChangedSlow(info_, size, capacity);
+ }
+
+ inline void RecordRehash(size_t total_probe_length) {
+ if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+ RecordRehashSlow(info_, total_probe_length);
+ }
+
+ inline void RecordInsert(size_t hash, size_t distance_from_desired) {
+ if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+ RecordInsertSlow(info_, hash, distance_from_desired);
+ }
+
+ inline void RecordErase() {
+ if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+ RecordEraseSlow(info_);
+ }
+
+ friend inline void swap(HashtablezInfoHandle& lhs,
+ HashtablezInfoHandle& rhs) {
+ std::swap(lhs.info_, rhs.info_);
+ }
+
+ private:
+ friend class HashtablezInfoHandlePeer;
+ HashtablezInfo* info_;
+};
+
+#if ABSL_PER_THREAD_TLS == 1
+extern ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample;
+#endif // ABSL_PER_THREAD_TLS
+
+// Returns an RAII sampling handle that manages registration and unregistation
+// with the global sampler.
+inline HashtablezInfoHandle Sample() {
+#if ABSL_PER_THREAD_TLS == 0
+ static auto* mu = new absl::Mutex;
+ static int64_t global_next_sample = 0;
+ absl::MutexLock l(mu);
+#endif // !ABSL_HAVE_THREAD_LOCAL
+
+ if (ABSL_PREDICT_TRUE(--global_next_sample > 0)) {
+ return HashtablezInfoHandle(nullptr);
+ }
+ return HashtablezInfoHandle(SampleSlow(&global_next_sample));
+}
+
+// Holds samples and their associated stack traces with a soft limit of
+// `SetHashtablezMaxSamples()`.
+//
+// Thread safe.
+class HashtablezSampler {
+ public:
+ // Returns a global Sampler.
+ static HashtablezSampler& Global();
+
+ HashtablezSampler();
+ ~HashtablezSampler();
+
+ // Registers for sampling. Returns an opaque registration info.
+ HashtablezInfo* Register();
+
+ // Unregisters the sample.
+ void Unregister(HashtablezInfo* sample);
+
+ // The dispose callback will be called on all samples the moment they are
+ // being unregistered. Only affects samples that are unregistered after the
+ // callback has been set.
+ // Returns the previous callback.
+ using DisposeCallback = void (*)(const HashtablezInfo&);
+ DisposeCallback SetDisposeCallback(DisposeCallback f);
+
+ // Iterates over all the registered `StackInfo`s. Returning the number of
+ // samples that have been dropped.
+ int64_t Iterate(const std::function<void(const HashtablezInfo& stack)>& f);
+
+ private:
+ void PushNew(HashtablezInfo* sample);
+ void PushDead(HashtablezInfo* sample);
+ HashtablezInfo* PopDead();
+
+ std::atomic<size_t> dropped_samples_;
+ std::atomic<size_t> size_estimate_;
+
+ // Intrusive lock free linked lists for tracking samples.
+ //
+ // `all_` records all samples (they are never removed from this list) and is
+ // terminated with a `nullptr`.
+ //
+ // `graveyard_.dead` is a circular linked list. When it is empty,
+ // `graveyard_.dead == &graveyard`. The list is circular so that
+ // every item on it (even the last) has a non-null dead pointer. This allows
+ // `Iterate` to determine if a given sample is live or dead using only
+ // information on the sample itself.
+ //
+ // For example, nodes [A, B, C, D, E] with [A, C, E] alive and [B, D] dead
+ // looks like this (G is the Graveyard):
+ //
+ // +---+ +---+ +---+ +---+ +---+
+ // all -->| A |--->| B |--->| C |--->| D |--->| E |
+ // | | | | | | | | | |
+ // +---+ | | +->| |-+ | | +->| |-+ | |
+ // | G | +---+ | +---+ | +---+ | +---+ | +---+
+ // | | | | | |
+ // | | --------+ +--------+ |
+ // +---+ |
+ // ^ |
+ // +--------------------------------------+
+ //
+ std::atomic<HashtablezInfo*> all_;
+ HashtablezInfo graveyard_;
+
+ std::atomic<DisposeCallback> dispose_;
+};
+
+// Enables or disables sampling for Swiss tables.
+void SetHashtablezEnabled(bool enabled);
+
+// Sets the rate at which Swiss tables will be sampled.
+void SetHashtablezSampleParameter(int32_t rate);
+
+// Sets a soft max for the number of samples that will be kept.
+void SetHashtablezMaxSamples(int32_t max);
+
+// Configuration override.
+// This allows process-wide sampling without depending on order of
+// initialization of static storage duration objects.
+// The definition of this constant is weak, which allows us to inject a
+// different value for it at link time.
+extern "C" const bool kAbslContainerInternalSampleEverything;
+
+} // namespace container_internal
+} // inline namespace lts_2019_08_08
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_
diff --git a/absl/container/internal/hashtablez_sampler_force_weak_definition.cc b/absl/container/internal/hashtablez_sampler_force_weak_definition.cc
new file mode 100644
index 00000000..d3f41c7c
--- /dev/null
+++ b/absl/container/internal/hashtablez_sampler_force_weak_definition.cc
@@ -0,0 +1,29 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/hashtablez_sampler.h"
+
+#include "absl/base/attributes.h"
+
+namespace absl {
+inline namespace lts_2019_08_08 {
+namespace container_internal {
+
+// See hashtablez_sampler.h for details.
+extern "C" ABSL_ATTRIBUTE_WEAK const bool
+ kAbslContainerInternalSampleEverything = false;
+
+} // namespace container_internal
+} // inline namespace lts_2019_08_08
+} // namespace absl
diff --git a/absl/container/internal/hashtablez_sampler_test.cc b/absl/container/internal/hashtablez_sampler_test.cc
new file mode 100644
index 00000000..bdae75f3
--- /dev/null
+++ b/absl/container/internal/hashtablez_sampler_test.cc
@@ -0,0 +1,357 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/hashtablez_sampler.h"
+
+#include <atomic>
+#include <limits>
+#include <random>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/attributes.h"
+#include "absl/container/internal/have_sse.h"
+#include "absl/synchronization/blocking_counter.h"
+#include "absl/synchronization/internal/thread_pool.h"
+#include "absl/synchronization/mutex.h"
+#include "absl/synchronization/notification.h"
+#include "absl/time/clock.h"
+#include "absl/time/time.h"
+
+#if SWISSTABLE_HAVE_SSE2
+constexpr int kProbeLength = 16;
+#else
+constexpr int kProbeLength = 8;
+#endif
+
+namespace absl {
+inline namespace lts_2019_08_08 {
+namespace container_internal {
+class HashtablezInfoHandlePeer {
+ public:
+ static bool IsSampled(const HashtablezInfoHandle& h) {
+ return h.info_ != nullptr;
+ }
+
+ static HashtablezInfo* GetInfo(HashtablezInfoHandle* h) { return h->info_; }
+};
+
+namespace {
+using ::absl::synchronization_internal::ThreadPool;
+using ::testing::IsEmpty;
+using ::testing::UnorderedElementsAre;
+
+std::vector<size_t> GetSizes(HashtablezSampler* s) {
+ std::vector<size_t> res;
+ s->Iterate([&](const HashtablezInfo& info) {
+ res.push_back(info.size.load(std::memory_order_acquire));
+ });
+ return res;
+}
+
+HashtablezInfo* Register(HashtablezSampler* s, size_t size) {
+ auto* info = s->Register();
+ assert(info != nullptr);
+ info->size.store(size);
+ return info;
+}
+
+TEST(HashtablezInfoTest, PrepareForSampling) {
+ absl::Time test_start = absl::Now();
+ HashtablezInfo info;
+ absl::MutexLock l(&info.init_mu);
+ info.PrepareForSampling();
+
+ EXPECT_EQ(info.capacity.load(), 0);
+ EXPECT_EQ(info.size.load(), 0);
+ EXPECT_EQ(info.num_erases.load(), 0);
+ EXPECT_EQ(info.max_probe_length.load(), 0);
+ EXPECT_EQ(info.total_probe_length.load(), 0);
+ EXPECT_EQ(info.hashes_bitwise_or.load(), 0);
+ EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{});
+ EXPECT_GE(info.create_time, test_start);
+
+ info.capacity.store(1, std::memory_order_relaxed);
+ info.size.store(1, std::memory_order_relaxed);
+ info.num_erases.store(1, std::memory_order_relaxed);
+ info.max_probe_length.store(1, std::memory_order_relaxed);
+ info.total_probe_length.store(1, std::memory_order_relaxed);
+ info.hashes_bitwise_or.store(1, std::memory_order_relaxed);
+ info.hashes_bitwise_and.store(1, std::memory_order_relaxed);
+ info.create_time = test_start - absl::Hours(20);
+
+ info.PrepareForSampling();
+ EXPECT_EQ(info.capacity.load(), 0);
+ EXPECT_EQ(info.size.load(), 0);
+ EXPECT_EQ(info.num_erases.load(), 0);
+ EXPECT_EQ(info.max_probe_length.load(), 0);
+ EXPECT_EQ(info.total_probe_length.load(), 0);
+ EXPECT_EQ(info.hashes_bitwise_or.load(), 0);
+ EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{});
+ EXPECT_GE(info.create_time, test_start);
+}
+
+TEST(HashtablezInfoTest, RecordStorageChanged) {
+ HashtablezInfo info;
+ absl::MutexLock l(&info.init_mu);
+ info.PrepareForSampling();
+ RecordStorageChangedSlow(&info, 17, 47);
+ EXPECT_EQ(info.size.load(), 17);
+ EXPECT_EQ(info.capacity.load(), 47);
+ RecordStorageChangedSlow(&info, 20, 20);
+ EXPECT_EQ(info.size.load(), 20);
+ EXPECT_EQ(info.capacity.load(), 20);
+}
+
+TEST(HashtablezInfoTest, RecordInsert) {
+ HashtablezInfo info;
+ absl::MutexLock l(&info.init_mu);
+ info.PrepareForSampling();
+ EXPECT_EQ(info.max_probe_length.load(), 0);
+ RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength);
+ EXPECT_EQ(info.max_probe_length.load(), 6);
+ EXPECT_EQ(info.hashes_bitwise_and.load(), 0x0000FF00);
+ EXPECT_EQ(info.hashes_bitwise_or.load(), 0x0000FF00);
+ RecordInsertSlow(&info, 0x000FF000, 4 * kProbeLength);
+ EXPECT_EQ(info.max_probe_length.load(), 6);
+ EXPECT_EQ(info.hashes_bitwise_and.load(), 0x0000F000);
+ EXPECT_EQ(info.hashes_bitwise_or.load(), 0x000FFF00);
+ RecordInsertSlow(&info, 0x00FF0000, 12 * kProbeLength);
+ EXPECT_EQ(info.max_probe_length.load(), 12);
+ EXPECT_EQ(info.hashes_bitwise_and.load(), 0x00000000);
+ EXPECT_EQ(info.hashes_bitwise_or.load(), 0x00FFFF00);
+}
+
+TEST(HashtablezInfoTest, RecordErase) {
+ HashtablezInfo info;
+ absl::MutexLock l(&info.init_mu);
+ info.PrepareForSampling();
+ EXPECT_EQ(info.num_erases.load(), 0);
+ EXPECT_EQ(info.size.load(), 0);
+ RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength);
+ EXPECT_EQ(info.size.load(), 1);
+ RecordEraseSlow(&info);
+ EXPECT_EQ(info.size.load(), 0);
+ EXPECT_EQ(info.num_erases.load(), 1);
+}
+
+TEST(HashtablezInfoTest, RecordRehash) {
+ HashtablezInfo info;
+ absl::MutexLock l(&info.init_mu);
+ info.PrepareForSampling();
+ RecordInsertSlow(&info, 0x1, 0);
+ RecordInsertSlow(&info, 0x2, kProbeLength);
+ RecordInsertSlow(&info, 0x4, kProbeLength);
+ RecordInsertSlow(&info, 0x8, 2 * kProbeLength);
+ EXPECT_EQ(info.size.load(), 4);
+ EXPECT_EQ(info.total_probe_length.load(), 4);
+
+ RecordEraseSlow(&info);
+ RecordEraseSlow(&info);
+ EXPECT_EQ(info.size.load(), 2);
+ EXPECT_EQ(info.total_probe_length.load(), 4);
+ EXPECT_EQ(info.num_erases.load(), 2);
+
+ RecordRehashSlow(&info, 3 * kProbeLength);
+ EXPECT_EQ(info.size.load(), 2);
+ EXPECT_EQ(info.total_probe_length.load(), 3);
+ EXPECT_EQ(info.num_erases.load(), 0);
+}
+
+TEST(HashtablezSamplerTest, SmallSampleParameter) {
+ SetHashtablezEnabled(true);
+ SetHashtablezSampleParameter(100);
+
+ for (int i = 0; i < 1000; ++i) {
+ int64_t next_sample = 0;
+ HashtablezInfo* sample = SampleSlow(&next_sample);
+ EXPECT_GT(next_sample, 0);
+ EXPECT_NE(sample, nullptr);
+ UnsampleSlow(sample);
+ }
+}
+
+TEST(HashtablezSamplerTest, LargeSampleParameter) {
+ SetHashtablezEnabled(true);
+ SetHashtablezSampleParameter(std::numeric_limits<int32_t>::max());
+
+ for (int i = 0; i < 1000; ++i) {
+ int64_t next_sample = 0;
+ HashtablezInfo* sample = SampleSlow(&next_sample);
+ EXPECT_GT(next_sample, 0);
+ EXPECT_NE(sample, nullptr);
+ UnsampleSlow(sample);
+ }
+}
+
+TEST(HashtablezSamplerTest, Sample) {
+ SetHashtablezEnabled(true);
+ SetHashtablezSampleParameter(100);
+ int64_t num_sampled = 0;
+ int64_t total = 0;
+ double sample_rate = 0.0;
+ for (int i = 0; i < 1000000; ++i) {
+ HashtablezInfoHandle h = Sample();
+ ++total;
+ if (HashtablezInfoHandlePeer::IsSampled(h)) {
+ ++num_sampled;
+ }
+ sample_rate = static_cast<double>(num_sampled) / total;
+ if (0.005 < sample_rate && sample_rate < 0.015) break;
+ }
+ EXPECT_NEAR(sample_rate, 0.01, 0.005);
+}
+
+TEST(HashtablezSamplerTest, Handle) {
+ auto& sampler = HashtablezSampler::Global();
+ HashtablezInfoHandle h(sampler.Register());
+ auto* info = HashtablezInfoHandlePeer::GetInfo(&h);
+ info->hashes_bitwise_and.store(0x12345678, std::memory_order_relaxed);
+
+ bool found = false;
+ sampler.Iterate([&](const HashtablezInfo& h) {
+ if (&h == info) {
+ EXPECT_EQ(h.hashes_bitwise_and.load(), 0x12345678);
+ found = true;
+ }
+ });
+ EXPECT_TRUE(found);
+
+ h = HashtablezInfoHandle();
+ found = false;
+ sampler.Iterate([&](const HashtablezInfo& h) {
+ if (&h == info) {
+ // this will only happen if some other thread has resurrected the info
+ // the old handle was using.
+ if (h.hashes_bitwise_and.load() == 0x12345678) {
+ found = true;
+ }
+ }
+ });
+ EXPECT_FALSE(found);
+}
+
+TEST(HashtablezSamplerTest, Registration) {
+ HashtablezSampler sampler;
+ auto* info1 = Register(&sampler, 1);
+ EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1));
+
+ auto* info2 = Register(&sampler, 2);
+ EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1, 2));
+ info1->size.store(3);
+ EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(3, 2));
+
+ sampler.Unregister(info1);
+ sampler.Unregister(info2);
+}
+
+TEST(HashtablezSamplerTest, Unregistration) {
+ HashtablezSampler sampler;
+ std::vector<HashtablezInfo*> infos;
+ for (size_t i = 0; i < 3; ++i) {
+ infos.push_back(Register(&sampler, i));
+ }
+ EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 1, 2));
+
+ sampler.Unregister(infos[1]);
+ EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2));
+
+ infos.push_back(Register(&sampler, 3));
+ infos.push_back(Register(&sampler, 4));
+ EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 3, 4));
+ sampler.Unregister(infos[3]);
+ EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 4));
+
+ sampler.Unregister(infos[0]);
+ sampler.Unregister(infos[2]);
+ sampler.Unregister(infos[4]);
+ EXPECT_THAT(GetSizes(&sampler), IsEmpty());
+}
+
+TEST(HashtablezSamplerTest, MultiThreaded) {
+ HashtablezSampler sampler;
+ Notification stop;
+ ThreadPool pool(10);
+
+ for (int i = 0; i < 10; ++i) {
+ pool.Schedule([&sampler, &stop]() {
+ std::random_device rd;
+ std::mt19937 gen(rd());
+
+ std::vector<HashtablezInfo*> infoz;
+ while (!stop.HasBeenNotified()) {
+ if (infoz.empty()) {
+ infoz.push_back(sampler.Register());
+ }
+ switch (std::uniform_int_distribution<>(0, 2)(gen)) {
+ case 0: {
+ infoz.push_back(sampler.Register());
+ break;
+ }
+ case 1: {
+ size_t p =
+ std::uniform_int_distribution<>(0, infoz.size() - 1)(gen);
+ HashtablezInfo* info = infoz[p];
+ infoz[p] = infoz.back();
+ infoz.pop_back();
+ sampler.Unregister(info);
+ break;
+ }
+ case 2: {
+ absl::Duration oldest = absl::ZeroDuration();
+ sampler.Iterate([&](const HashtablezInfo& info) {
+ oldest = std::max(oldest, absl::Now() - info.create_time);
+ });
+ ASSERT_GE(oldest, absl::ZeroDuration());
+ break;
+ }
+ }
+ }
+ });
+ }
+ // The threads will hammer away. Give it a little bit of time for tsan to
+ // spot errors.
+ absl::SleepFor(absl::Seconds(3));
+ stop.Notify();
+}
+
+TEST(HashtablezSamplerTest, Callback) {
+ HashtablezSampler sampler;
+
+ auto* info1 = Register(&sampler, 1);
+ auto* info2 = Register(&sampler, 2);
+
+ static const HashtablezInfo* expected;
+
+ auto callback = [](const HashtablezInfo& info) {
+ // We can't use `info` outside of this callback because the object will be
+ // disposed as soon as we return from here.
+ EXPECT_EQ(&info, expected);
+ };
+
+ // Set the callback.
+ EXPECT_EQ(sampler.SetDisposeCallback(callback), nullptr);
+ expected = info1;
+ sampler.Unregister(info1);
+
+ // Unset the callback.
+ EXPECT_EQ(callback, sampler.SetDisposeCallback(nullptr));
+ expected = nullptr; // no more calls.
+ sampler.Unregister(info2);
+}
+
+} // namespace
+} // namespace container_internal
+} // inline namespace lts_2019_08_08
+} // namespace absl
diff --git a/absl/container/internal/have_sse.h b/absl/container/internal/have_sse.h
new file mode 100644
index 00000000..43414418
--- /dev/null
+++ b/absl/container/internal/have_sse.h
@@ -0,0 +1,49 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Shared config probing for SSE instructions used in Swiss tables.
+#ifndef ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
+#define ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
+
+#ifndef SWISSTABLE_HAVE_SSE2
+#if defined(__SSE2__) || \
+ (defined(_MSC_VER) && \
+ (defined(_M_X64) || (defined(_M_IX86) && _M_IX86_FP >= 2)))
+#define SWISSTABLE_HAVE_SSE2 1
+#else
+#define SWISSTABLE_HAVE_SSE2 0
+#endif
+#endif
+
+#ifndef SWISSTABLE_HAVE_SSSE3
+#ifdef __SSSE3__
+#define SWISSTABLE_HAVE_SSSE3 1
+#else
+#define SWISSTABLE_HAVE_SSSE3 0
+#endif
+#endif
+
+#if SWISSTABLE_HAVE_SSSE3 && !SWISSTABLE_HAVE_SSE2
+#error "Bad configuration!"
+#endif
+
+#if SWISSTABLE_HAVE_SSE2
+#include <emmintrin.h>
+#endif
+
+#if SWISSTABLE_HAVE_SSSE3
+#include <tmmintrin.h>
+#endif
+
+#endif // ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
diff --git a/absl/container/internal/inlined_vector.h b/absl/container/internal/inlined_vector.h
new file mode 100644
index 00000000..123e04c9
--- /dev/null
+++ b/absl/container/internal/inlined_vector.h
@@ -0,0 +1,895 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_
+#define ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_
+
+#include <algorithm>
+#include <cstddef>
+#include <cstring>
+#include <iterator>
+#include <limits>
+#include <memory>
+#include <utility>
+
+#include "absl/base/macros.h"
+#include "absl/container/internal/compressed_tuple.h"
+#include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
+#include "absl/types/span.h"
+
+namespace absl {
+inline namespace lts_2019_08_08 {
+namespace inlined_vector_internal {
+
+template <typename Iterator>
+using IsAtLeastForwardIterator = std::is_convertible<
+ typename std::iterator_traits<Iterator>::iterator_category,
+ std::forward_iterator_tag>;
+
+template <typename AllocatorType>
+using IsMemcpyOk = absl::conjunction<
+ std::is_same<std::allocator<typename AllocatorType::value_type>,
+ AllocatorType>,
+ absl::is_trivially_copy_constructible<typename AllocatorType::value_type>,
+ absl::is_trivially_copy_assignable<typename AllocatorType::value_type>,
+ absl::is_trivially_destructible<typename AllocatorType::value_type>>;
+
+template <typename AllocatorType, typename ValueType, typename SizeType>
+void DestroyElements(AllocatorType* alloc_ptr, ValueType* destroy_first,
+ SizeType destroy_size) {
+ using AllocatorTraits = absl::allocator_traits<AllocatorType>;
+
+ if (destroy_first != nullptr) {
+ for (auto i = destroy_size; i != 0;) {
+ --i;
+ AllocatorTraits::destroy(*alloc_ptr, destroy_first + i);
+ }
+
+#ifndef NDEBUG
+ // Overwrite unused memory with `0xab` so we can catch uninitialized usage.
+ //
+ // Cast to `void*` to tell the compiler that we don't care that we might be
+ // scribbling on a vtable pointer.
+ auto* memory_ptr = static_cast<void*>(destroy_first);
+ auto memory_size = sizeof(ValueType) * destroy_size;
+ std::memset(memory_ptr, 0xab, memory_size);
+#endif // NDEBUG
+ }
+}
+
+template <typename AllocatorType, typename ValueType, typename ValueAdapter,
+ typename SizeType>
+void ConstructElements(AllocatorType* alloc_ptr, ValueType* construct_first,
+ ValueAdapter* values_ptr, SizeType construct_size) {
+ for (SizeType i = 0; i < construct_size; ++i) {
+ ABSL_INTERNAL_TRY {
+ values_ptr->ConstructNext(alloc_ptr, construct_first + i);
+ }
+ ABSL_INTERNAL_CATCH_ANY {
+ inlined_vector_internal::DestroyElements(alloc_ptr, construct_first, i);
+ ABSL_INTERNAL_RETHROW;
+ }
+ }
+}
+
+template <typename ValueType, typename ValueAdapter, typename SizeType>
+void AssignElements(ValueType* assign_first, ValueAdapter* values_ptr,
+ SizeType assign_size) {
+ for (SizeType i = 0; i < assign_size; ++i) {
+ values_ptr->AssignNext(assign_first + i);
+ }
+}
+
+template <typename AllocatorType>
+struct StorageView {
+ using pointer = typename AllocatorType::pointer;
+ using size_type = typename AllocatorType::size_type;
+
+ pointer data;
+ size_type size;
+ size_type capacity;
+};
+
+template <typename AllocatorType, typename Iterator>
+class IteratorValueAdapter {
+ using pointer = typename AllocatorType::pointer;
+ using AllocatorTraits = absl::allocator_traits<AllocatorType>;
+
+ public:
+ explicit IteratorValueAdapter(const Iterator& it) : it_(it) {}
+
+ void ConstructNext(AllocatorType* alloc_ptr, pointer construct_at) {
+ AllocatorTraits::construct(*alloc_ptr, construct_at, *it_);
+ ++it_;
+ }
+
+ void AssignNext(pointer assign_at) {
+ *assign_at = *it_;
+ ++it_;
+ }
+
+ private:
+ Iterator it_;
+};
+
+template <typename AllocatorType>
+class CopyValueAdapter {
+ using pointer = typename AllocatorType::pointer;
+ using const_pointer = typename AllocatorType::const_pointer;
+ using const_reference = typename AllocatorType::const_reference;
+ using AllocatorTraits = absl::allocator_traits<AllocatorType>;
+
+ public:
+ explicit CopyValueAdapter(const_reference v) : ptr_(std::addressof(v)) {}
+
+ void ConstructNext(AllocatorType* alloc_ptr, pointer construct_at) {
+ AllocatorTraits::construct(*alloc_ptr, construct_at, *ptr_);
+ }
+
+ void AssignNext(pointer assign_at) { *assign_at = *ptr_; }
+
+ private:
+ const_pointer ptr_;
+};
+
+template <typename AllocatorType>
+class DefaultValueAdapter {
+ using pointer = typename AllocatorType::pointer;
+ using value_type = typename AllocatorType::value_type;
+ using AllocatorTraits = absl::allocator_traits<AllocatorType>;
+
+ public:
+ explicit DefaultValueAdapter() {}
+
+ void ConstructNext(AllocatorType* alloc_ptr, pointer construct_at) {
+ AllocatorTraits::construct(*alloc_ptr, construct_at);
+ }
+
+ void AssignNext(pointer assign_at) { *assign_at = value_type(); }
+};
+
+template <typename AllocatorType>
+class AllocationTransaction {
+ using value_type = typename AllocatorType::value_type;
+ using pointer = typename AllocatorType::pointer;
+ using size_type = typename AllocatorType::size_type;
+ using AllocatorTraits = absl::allocator_traits<AllocatorType>;
+
+ public:
+ explicit AllocationTransaction(AllocatorType* alloc_ptr)
+ : alloc_data_(*alloc_ptr, nullptr) {}
+
+ ~AllocationTransaction() {
+ if (DidAllocate()) {
+ AllocatorTraits::deallocate(GetAllocator(), GetData(), GetCapacity());
+ }
+ }
+
+ AllocationTransaction(const AllocationTransaction&) = delete;
+ void operator=(const AllocationTransaction&) = delete;
+
+ AllocatorType& GetAllocator() { return alloc_data_.template get<0>(); }
+ pointer& GetData() { return alloc_data_.template get<1>(); }
+ size_type& GetCapacity() { return capacity_; }
+
+ bool DidAllocate() { return GetData() != nullptr; }
+ pointer Allocate(size_type capacity) {
+ GetData() = AllocatorTraits::allocate(GetAllocator(), capacity);
+ GetCapacity() = capacity;
+ return GetData();
+ }
+
+ private:
+ container_internal::CompressedTuple<AllocatorType, pointer> alloc_data_;
+ size_type capacity_ = 0;
+};
+
+template <typename AllocatorType>
+class ConstructionTransaction {
+ using pointer = typename AllocatorType::pointer;
+ using size_type = typename AllocatorType::size_type;
+
+ public:
+ explicit ConstructionTransaction(AllocatorType* alloc_ptr)
+ : alloc_data_(*alloc_ptr, nullptr) {}
+
+ ~ConstructionTransaction() {
+ if (DidConstruct()) {
+ inlined_vector_internal::DestroyElements(std::addressof(GetAllocator()),
+ GetData(), GetSize());
+ }
+ }
+
+ ConstructionTransaction(const ConstructionTransaction&) = delete;
+ void operator=(const ConstructionTransaction&) = delete;
+
+ AllocatorType& GetAllocator() { return alloc_data_.template get<0>(); }
+ pointer& GetData() { return alloc_data_.template get<1>(); }
+ size_type& GetSize() { return size_; }
+
+ bool DidConstruct() { return GetData() != nullptr; }
+ template <typename ValueAdapter>
+ void Construct(pointer data, ValueAdapter* values_ptr, size_type size) {
+ inlined_vector_internal::ConstructElements(std::addressof(GetAllocator()),
+ data, values_ptr, size);
+ GetData() = data;
+ GetSize() = size;
+ }
+ void Commit() {
+ GetData() = nullptr;
+ GetSize() = 0;
+ }
+
+ private:
+ container_internal::CompressedTuple<AllocatorType, pointer> alloc_data_;
+ size_type size_ = 0;
+};
+
+template <typename T, size_t N, typename A>
+class Storage {
+ public:
+ using allocator_type = A;
+ using value_type = typename allocator_type::value_type;
+ using pointer = typename allocator_type::pointer;
+ using const_pointer = typename allocator_type::const_pointer;
+ using reference = typename allocator_type::reference;
+ using const_reference = typename allocator_type::const_reference;
+ using rvalue_reference = typename allocator_type::value_type&&;
+ using size_type = typename allocator_type::size_type;
+ using difference_type = typename allocator_type::difference_type;
+ using iterator = pointer;
+ using const_iterator = const_pointer;
+ using reverse_iterator = std::reverse_iterator<iterator>;
+ using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+ using MoveIterator = std::move_iterator<iterator>;
+ using AllocatorTraits = absl::allocator_traits<allocator_type>;
+ using IsMemcpyOk = inlined_vector_internal::IsMemcpyOk<allocator_type>;
+
+ using StorageView = inlined_vector_internal::StorageView<allocator_type>;
+
+ template <typename Iterator>
+ using IteratorValueAdapter =
+ inlined_vector_internal::IteratorValueAdapter<allocator_type, Iterator>;
+ using CopyValueAdapter =
+ inlined_vector_internal::CopyValueAdapter<allocator_type>;
+ using DefaultValueAdapter =
+ inlined_vector_internal::DefaultValueAdapter<allocator_type>;
+
+ using AllocationTransaction =
+ inlined_vector_internal::AllocationTransaction<allocator_type>;
+ using ConstructionTransaction =
+ inlined_vector_internal::ConstructionTransaction<allocator_type>;
+
+ static size_type NextCapacity(size_type current_capacity) {
+ return current_capacity * 2;
+ }
+
+ static size_type ComputeCapacity(size_type current_capacity,
+ size_type requested_capacity) {
+ return (std::max)(NextCapacity(current_capacity), requested_capacity);
+ }
+
+ // ---------------------------------------------------------------------------
+ // Storage Constructors and Destructor
+ // ---------------------------------------------------------------------------
+
+ Storage() : metadata_() {}
+
+ explicit Storage(const allocator_type& alloc)
+ : metadata_(alloc, /* empty and inlined */ 0) {}
+
+ ~Storage() {
+ pointer data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData();
+ inlined_vector_internal::DestroyElements(GetAllocPtr(), data, GetSize());
+ DeallocateIfAllocated();
+ }
+
+ // ---------------------------------------------------------------------------
+ // Storage Member Accessors
+ // ---------------------------------------------------------------------------
+
+ size_type& GetSizeAndIsAllocated() { return metadata_.template get<1>(); }
+
+ const size_type& GetSizeAndIsAllocated() const {
+ return metadata_.template get<1>();
+ }
+
+ size_type GetSize() const { return GetSizeAndIsAllocated() >> 1; }
+
+ bool GetIsAllocated() const { return GetSizeAndIsAllocated() & 1; }
+
+ pointer GetAllocatedData() { return data_.allocated.allocated_data; }
+
+ const_pointer GetAllocatedData() const {
+ return data_.allocated.allocated_data;
+ }
+
+ pointer GetInlinedData() {
+ return reinterpret_cast<pointer>(
+ std::addressof(data_.inlined.inlined_data[0]));
+ }
+
+ const_pointer GetInlinedData() const {
+ return reinterpret_cast<const_pointer>(
+ std::addressof(data_.inlined.inlined_data[0]));
+ }
+
+ size_type GetAllocatedCapacity() const {
+ return data_.allocated.allocated_capacity;
+ }
+
+ size_type GetInlinedCapacity() const { return static_cast<size_type>(N); }
+
+ StorageView MakeStorageView() {
+ return GetIsAllocated()
+ ? StorageView{GetAllocatedData(), GetSize(),
+ GetAllocatedCapacity()}
+ : StorageView{GetInlinedData(), GetSize(), GetInlinedCapacity()};
+ }
+
+ allocator_type* GetAllocPtr() {
+ return std::addressof(metadata_.template get<0>());
+ }
+
+ const allocator_type* GetAllocPtr() const {
+ return std::addressof(metadata_.template get<0>());
+ }
+
+ // ---------------------------------------------------------------------------
+ // Storage Member Mutators
+ // ---------------------------------------------------------------------------
+
+ template <typename ValueAdapter>
+ void Initialize(ValueAdapter values, size_type new_size);
+
+ template <typename ValueAdapter>
+ void Assign(ValueAdapter values, size_type new_size);
+
+ template <typename ValueAdapter>
+ void Resize(ValueAdapter values, size_type new_size);
+
+ template <typename ValueAdapter>
+ iterator Insert(const_iterator pos, ValueAdapter values,
+ size_type insert_count);
+
+ template <typename... Args>
+ reference EmplaceBack(Args&&... args);
+
+ iterator Erase(const_iterator from, const_iterator to);
+
+ void Reserve(size_type requested_capacity);
+
+ void ShrinkToFit();
+
+ void Swap(Storage* other_storage_ptr);
+
+ void SetIsAllocated() {
+ GetSizeAndIsAllocated() |= static_cast<size_type>(1);
+ }
+
+ void UnsetIsAllocated() {
+ GetSizeAndIsAllocated() &= ((std::numeric_limits<size_type>::max)() - 1);
+ }
+
+ void SetSize(size_type size) {
+ GetSizeAndIsAllocated() =
+ (size << 1) | static_cast<size_type>(GetIsAllocated());
+ }
+
+ void SetAllocatedSize(size_type size) {
+ GetSizeAndIsAllocated() = (size << 1) | static_cast<size_type>(1);
+ }
+
+ void SetInlinedSize(size_type size) {
+ GetSizeAndIsAllocated() = size << static_cast<size_type>(1);
+ }
+
+ void AddSize(size_type count) {
+ GetSizeAndIsAllocated() += count << static_cast<size_type>(1);
+ }
+
+ void SubtractSize(size_type count) {
+ assert(count <= GetSize());
+
+ GetSizeAndIsAllocated() -= count << static_cast<size_type>(1);
+ }
+
+ void SetAllocatedData(pointer data, size_type capacity) {
+ data_.allocated.allocated_data = data;
+ data_.allocated.allocated_capacity = capacity;
+ }
+
+ void AcquireAllocatedData(AllocationTransaction* allocation_tx_ptr) {
+ SetAllocatedData(allocation_tx_ptr->GetData(),
+ allocation_tx_ptr->GetCapacity());
+ allocation_tx_ptr->GetData() = nullptr;
+ allocation_tx_ptr->GetCapacity() = 0;
+ }
+
+ void MemcpyFrom(const Storage& other_storage) {
+ assert(IsMemcpyOk::value || other_storage.GetIsAllocated());
+
+ GetSizeAndIsAllocated() = other_storage.GetSizeAndIsAllocated();
+ data_ = other_storage.data_;
+ }
+
+ void DeallocateIfAllocated() {
+ if (GetIsAllocated()) {
+ AllocatorTraits::deallocate(*GetAllocPtr(), GetAllocatedData(),
+ GetAllocatedCapacity());
+ }
+ }
+
+ private:
+ using Metadata =
+ container_internal::CompressedTuple<allocator_type, size_type>;
+
+ struct Allocated {
+ pointer allocated_data;
+ size_type allocated_capacity;
+ };
+
+ struct Inlined {
+ using InlinedDataElement =
+ absl::aligned_storage_t<sizeof(value_type), alignof(value_type)>;
+ InlinedDataElement inlined_data[N];
+ };
+
+ union Data {
+ Allocated allocated;
+ Inlined inlined;
+ };
+
+ Metadata metadata_;
+ Data data_;
+};
+
+template <typename T, size_t N, typename A>
+template <typename ValueAdapter>
+auto Storage<T, N, A>::Initialize(ValueAdapter values, size_type new_size)
+ -> void {
+ // Only callable from constructors!
+ assert(!GetIsAllocated());
+ assert(GetSize() == 0);
+
+ pointer construct_data;
+
+ if (new_size > GetInlinedCapacity()) {
+ // Because this is only called from the `InlinedVector` constructors, it's
+ // safe to take on the allocation with size `0`. If `ConstructElements(...)`
+ // throws, deallocation will be automatically handled by `~Storage()`.
+ size_type new_capacity = ComputeCapacity(GetInlinedCapacity(), new_size);
+ pointer new_data = AllocatorTraits::allocate(*GetAllocPtr(), new_capacity);
+
+ SetAllocatedData(new_data, new_capacity);
+ SetIsAllocated();
+
+ construct_data = new_data;
+ } else {
+ construct_data = GetInlinedData();
+ }
+
+ inlined_vector_internal::ConstructElements(GetAllocPtr(), construct_data,
+ &values, new_size);
+
+ // Since the initial size was guaranteed to be `0` and the allocated bit is
+ // already correct for either case, *adding* `new_size` gives us the correct
+ // result faster than setting it directly.
+ AddSize(new_size);
+}
+
+template <typename T, size_t N, typename A>
+template <typename ValueAdapter>
+auto Storage<T, N, A>::Assign(ValueAdapter values, size_type new_size) -> void {
+ StorageView storage_view = MakeStorageView();
+
+ AllocationTransaction allocation_tx(GetAllocPtr());
+
+ absl::Span<value_type> assign_loop;
+ absl::Span<value_type> construct_loop;
+ absl::Span<value_type> destroy_loop;
+
+ if (new_size > storage_view.capacity) {
+ size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size);
+ pointer new_data = allocation_tx.Allocate(new_capacity);
+
+ construct_loop = {new_data, new_size};
+ destroy_loop = {storage_view.data, storage_view.size};
+ } else if (new_size > storage_view.size) {
+ assign_loop = {storage_view.data, storage_view.size};
+ construct_loop = {storage_view.data + storage_view.size,
+ new_size - storage_view.size};
+ } else {
+ assign_loop = {storage_view.data, new_size};
+ destroy_loop = {storage_view.data + new_size, storage_view.size - new_size};
+ }
+
+ inlined_vector_internal::AssignElements(assign_loop.data(), &values,
+ assign_loop.size());
+
+ inlined_vector_internal::ConstructElements(
+ GetAllocPtr(), construct_loop.data(), &values, construct_loop.size());
+
+ inlined_vector_internal::DestroyElements(GetAllocPtr(), destroy_loop.data(),
+ destroy_loop.size());
+
+ if (allocation_tx.DidAllocate()) {
+ DeallocateIfAllocated();
+ AcquireAllocatedData(&allocation_tx);
+ SetIsAllocated();
+ }
+
+ SetSize(new_size);
+}
+
+template <typename T, size_t N, typename A>
+template <typename ValueAdapter>
+auto Storage<T, N, A>::Resize(ValueAdapter values, size_type new_size) -> void {
+ StorageView storage_view = MakeStorageView();
+
+ AllocationTransaction allocation_tx(GetAllocPtr());
+ ConstructionTransaction construction_tx(GetAllocPtr());
+
+ IteratorValueAdapter<MoveIterator> move_values(
+ MoveIterator(storage_view.data));
+
+ absl::Span<value_type> construct_loop;
+ absl::Span<value_type> move_construct_loop;
+ absl::Span<value_type> destroy_loop;
+
+ if (new_size > storage_view.capacity) {
+ size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size);
+ pointer new_data = allocation_tx.Allocate(new_capacity);
+ construct_loop = {new_data + storage_view.size,
+ new_size - storage_view.size};
+ move_construct_loop = {new_data, storage_view.size};
+ destroy_loop = {storage_view.data, storage_view.size};
+ } else if (new_size > storage_view.size) {
+ construct_loop = {storage_view.data + storage_view.size,
+ new_size - storage_view.size};
+ } else {
+ destroy_loop = {storage_view.data + new_size, storage_view.size - new_size};
+ }
+
+ construction_tx.Construct(construct_loop.data(), &values,
+ construct_loop.size());
+
+ inlined_vector_internal::ConstructElements(
+ GetAllocPtr(), move_construct_loop.data(), &move_values,
+ move_construct_loop.size());
+
+ inlined_vector_internal::DestroyElements(GetAllocPtr(), destroy_loop.data(),
+ destroy_loop.size());
+
+ construction_tx.Commit();
+ if (allocation_tx.DidAllocate()) {
+ DeallocateIfAllocated();
+ AcquireAllocatedData(&allocation_tx);
+ SetIsAllocated();
+ }
+
+ SetSize(new_size);
+}
+
+template <typename T, size_t N, typename A>
+template <typename ValueAdapter>
+auto Storage<T, N, A>::Insert(const_iterator pos, ValueAdapter values,
+ size_type insert_count) -> iterator {
+ StorageView storage_view = MakeStorageView();
+
+ size_type insert_index =
+ std::distance(const_iterator(storage_view.data), pos);
+ size_type insert_end_index = insert_index + insert_count;
+ size_type new_size = storage_view.size + insert_count;
+
+ if (new_size > storage_view.capacity) {
+ AllocationTransaction allocation_tx(GetAllocPtr());
+ ConstructionTransaction construction_tx(GetAllocPtr());
+ ConstructionTransaction move_construciton_tx(GetAllocPtr());
+
+ IteratorValueAdapter<MoveIterator> move_values(
+ MoveIterator(storage_view.data));
+
+ size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size);
+ pointer new_data = allocation_tx.Allocate(new_capacity);
+
+ construction_tx.Construct(new_data + insert_index, &values, insert_count);
+
+ move_construciton_tx.Construct(new_data, &move_values, insert_index);
+
+ inlined_vector_internal::ConstructElements(
+ GetAllocPtr(), new_data + insert_end_index, &move_values,
+ storage_view.size - insert_index);
+
+ inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
+ storage_view.size);
+
+ construction_tx.Commit();
+ move_construciton_tx.Commit();
+ DeallocateIfAllocated();
+ AcquireAllocatedData(&allocation_tx);
+
+ SetAllocatedSize(new_size);
+ return iterator(new_data + insert_index);
+ } else {
+ size_type move_construction_destination_index =
+ (std::max)(insert_end_index, storage_view.size);
+
+ ConstructionTransaction move_construction_tx(GetAllocPtr());
+
+ IteratorValueAdapter<MoveIterator> move_construction_values(
+ MoveIterator(storage_view.data +
+ (move_construction_destination_index - insert_count)));
+ absl::Span<value_type> move_construction = {
+ storage_view.data + move_construction_destination_index,
+ new_size - move_construction_destination_index};
+
+ pointer move_assignment_values = storage_view.data + insert_index;
+ absl::Span<value_type> move_assignment = {
+ storage_view.data + insert_end_index,
+ move_construction_destination_index - insert_end_index};
+
+ absl::Span<value_type> insert_assignment = {move_assignment_values,
+ move_construction.size()};
+
+ absl::Span<value_type> insert_construction = {
+ insert_assignment.data() + insert_assignment.size(),
+ insert_count - insert_assignment.size()};
+
+ move_construction_tx.Construct(move_construction.data(),
+ &move_construction_values,
+ move_construction.size());
+
+ for (pointer destination = move_assignment.data() + move_assignment.size(),
+ last_destination = move_assignment.data(),
+ source = move_assignment_values + move_assignment.size();
+ ;) {
+ --destination;
+ --source;
+ if (destination < last_destination) break;
+ *destination = std::move(*source);
+ }
+
+ inlined_vector_internal::AssignElements(insert_assignment.data(), &values,
+ insert_assignment.size());
+
+ inlined_vector_internal::ConstructElements(
+ GetAllocPtr(), insert_construction.data(), &values,
+ insert_construction.size());
+
+ move_construction_tx.Commit();
+
+ AddSize(insert_count);
+ return iterator(storage_view.data + insert_index);
+ }
+}
+
+template <typename T, size_t N, typename A>
+template <typename... Args>
+auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> reference {
+ StorageView storage_view = MakeStorageView();
+
+ AllocationTransaction allocation_tx(GetAllocPtr());
+
+ IteratorValueAdapter<MoveIterator> move_values(
+ MoveIterator(storage_view.data));
+
+ pointer construct_data;
+
+ if (storage_view.size == storage_view.capacity) {
+ size_type new_capacity = NextCapacity(storage_view.capacity);
+ pointer new_data = allocation_tx.Allocate(new_capacity);
+
+ construct_data = new_data;
+ } else {
+ construct_data = storage_view.data;
+ }
+
+ pointer end = construct_data + storage_view.size;
+
+ AllocatorTraits::construct(*GetAllocPtr(), end, std::forward<Args>(args)...);
+
+ if (allocation_tx.DidAllocate()) {
+ ABSL_INTERNAL_TRY {
+ inlined_vector_internal::ConstructElements(
+ GetAllocPtr(), allocation_tx.GetData(), &move_values,
+ storage_view.size);
+ }
+ ABSL_INTERNAL_CATCH_ANY {
+ AllocatorTraits::destroy(*GetAllocPtr(), end);
+ ABSL_INTERNAL_RETHROW;
+ }
+
+ inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
+ storage_view.size);
+
+ DeallocateIfAllocated();
+ AcquireAllocatedData(&allocation_tx);
+ SetIsAllocated();
+ }
+
+ AddSize(1);
+ return *end;
+}
+
+template <typename T, size_t N, typename A>
+auto Storage<T, N, A>::Erase(const_iterator from, const_iterator to)
+ -> iterator {
+ assert(from != to);
+
+ StorageView storage_view = MakeStorageView();
+
+ size_type erase_size = std::distance(from, to);
+ size_type erase_index =
+ std::distance(const_iterator(storage_view.data), from);
+ size_type erase_end_index = erase_index + erase_size;
+
+ IteratorValueAdapter<MoveIterator> move_values(
+ MoveIterator(storage_view.data + erase_end_index));
+
+ inlined_vector_internal::AssignElements(storage_view.data + erase_index,
+ &move_values,
+ storage_view.size - erase_end_index);
+
+ inlined_vector_internal::DestroyElements(
+ GetAllocPtr(), storage_view.data + (storage_view.size - erase_size),
+ erase_size);
+
+ SubtractSize(erase_size);
+ return iterator(storage_view.data + erase_index);
+}
+
+template <typename T, size_t N, typename A>
+auto Storage<T, N, A>::Reserve(size_type requested_capacity) -> void {
+ StorageView storage_view = MakeStorageView();
+
+ if (ABSL_PREDICT_FALSE(requested_capacity <= storage_view.capacity)) return;
+
+ AllocationTransaction allocation_tx(GetAllocPtr());
+
+ IteratorValueAdapter<MoveIterator> move_values(
+ MoveIterator(storage_view.data));
+
+ size_type new_capacity =
+ ComputeCapacity(storage_view.capacity, requested_capacity);
+ pointer new_data = allocation_tx.Allocate(new_capacity);
+
+ inlined_vector_internal::ConstructElements(GetAllocPtr(), new_data,
+ &move_values, storage_view.size);
+
+ inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
+ storage_view.size);
+
+ DeallocateIfAllocated();
+ AcquireAllocatedData(&allocation_tx);
+ SetIsAllocated();
+}
+
+template <typename T, size_t N, typename A>
+auto Storage<T, N, A>::ShrinkToFit() -> void {
+ // May only be called on allocated instances!
+ assert(GetIsAllocated());
+
+ StorageView storage_view{GetAllocatedData(), GetSize(),
+ GetAllocatedCapacity()};
+
+ if (ABSL_PREDICT_FALSE(storage_view.size == storage_view.capacity)) return;
+
+ AllocationTransaction allocation_tx(GetAllocPtr());
+
+ IteratorValueAdapter<MoveIterator> move_values(
+ MoveIterator(storage_view.data));
+
+ pointer construct_data;
+
+ if (storage_view.size > GetInlinedCapacity()) {
+ size_type new_capacity = storage_view.size;
+ pointer new_data = allocation_tx.Allocate(new_capacity);
+
+ construct_data = new_data;
+ } else {
+ construct_data = GetInlinedData();
+ }
+
+ ABSL_INTERNAL_TRY {
+ inlined_vector_internal::ConstructElements(GetAllocPtr(), construct_data,
+ &move_values, storage_view.size);
+ }
+ ABSL_INTERNAL_CATCH_ANY {
+ SetAllocatedData(storage_view.data, storage_view.capacity);
+ ABSL_INTERNAL_RETHROW;
+ }
+
+ inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
+ storage_view.size);
+
+ AllocatorTraits::deallocate(*GetAllocPtr(), storage_view.data,
+ storage_view.capacity);
+
+ if (allocation_tx.DidAllocate()) {
+ AcquireAllocatedData(&allocation_tx);
+ } else {
+ UnsetIsAllocated();
+ }
+}
+
+template <typename T, size_t N, typename A>
+auto Storage<T, N, A>::Swap(Storage* other_storage_ptr) -> void {
+ using std::swap;
+ assert(this != other_storage_ptr);
+
+ if (GetIsAllocated() && other_storage_ptr->GetIsAllocated()) {
+ swap(data_.allocated, other_storage_ptr->data_.allocated);
+ } else if (!GetIsAllocated() && !other_storage_ptr->GetIsAllocated()) {
+ Storage* small_ptr = this;
+ Storage* large_ptr = other_storage_ptr;
+ if (small_ptr->GetSize() > large_ptr->GetSize()) swap(small_ptr, large_ptr);
+
+ for (size_type i = 0; i < small_ptr->GetSize(); ++i) {
+ swap(small_ptr->GetInlinedData()[i], large_ptr->GetInlinedData()[i]);
+ }
+
+ IteratorValueAdapter<MoveIterator> move_values(
+ MoveIterator(large_ptr->GetInlinedData() + small_ptr->GetSize()));
+
+ inlined_vector_internal::ConstructElements(
+ large_ptr->GetAllocPtr(),
+ small_ptr->GetInlinedData() + small_ptr->GetSize(), &move_values,
+ large_ptr->GetSize() - small_ptr->GetSize());
+
+ inlined_vector_internal::DestroyElements(
+ large_ptr->GetAllocPtr(),
+ large_ptr->GetInlinedData() + small_ptr->GetSize(),
+ large_ptr->GetSize() - small_ptr->GetSize());
+ } else {
+ Storage* allocated_ptr = this;
+ Storage* inlined_ptr = other_storage_ptr;
+ if (!allocated_ptr->GetIsAllocated()) swap(allocated_ptr, inlined_ptr);
+
+ StorageView allocated_storage_view{allocated_ptr->GetAllocatedData(),
+ allocated_ptr->GetSize(),
+ allocated_ptr->GetAllocatedCapacity()};
+
+ IteratorValueAdapter<MoveIterator> move_values(
+ MoveIterator(inlined_ptr->GetInlinedData()));
+
+ ABSL_INTERNAL_TRY {
+ inlined_vector_internal::ConstructElements(
+ inlined_ptr->GetAllocPtr(), allocated_ptr->GetInlinedData(),
+ &move_values, inlined_ptr->GetSize());
+ }
+ ABSL_INTERNAL_CATCH_ANY {
+ allocated_ptr->SetAllocatedData(allocated_storage_view.data,
+ allocated_storage_view.capacity);
+ ABSL_INTERNAL_RETHROW;
+ }
+
+ inlined_vector_internal::DestroyElements(inlined_ptr->GetAllocPtr(),
+ inlined_ptr->GetInlinedData(),
+ inlined_ptr->GetSize());
+
+ inlined_ptr->SetAllocatedData(allocated_storage_view.data,
+ allocated_storage_view.capacity);
+ }
+
+ swap(GetSizeAndIsAllocated(), other_storage_ptr->GetSizeAndIsAllocated());
+ swap(*GetAllocPtr(), *other_storage_ptr->GetAllocPtr());
+}
+
+} // namespace inlined_vector_internal
+} // inline namespace lts_2019_08_08
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_
diff --git a/absl/container/internal/layout.h b/absl/container/internal/layout.h
index f11a6ad2..3924b8aa 100644
--- a/absl/container/internal/layout.h
+++ b/absl/container/internal/layout.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -188,7 +188,7 @@
#endif
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
// A type wrapper that instructs `Layout` to use the specific alignment for the
@@ -644,7 +644,8 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
std::string DebugString() const {
const auto offsets = Offsets();
const size_t sizes[] = {SizeOf<ElementType<OffsetSeq>>()...};
- const std::string types[] = {adl_barrier::TypeName<ElementType<OffsetSeq>>()...};
+ const std::string types[] = {
+ adl_barrier::TypeName<ElementType<OffsetSeq>>()...};
std::string res = absl::StrCat("@0", types[0], "(", sizes[0], ")");
for (size_t i = 0; i != NumOffsets - 1; ++i) {
absl::StrAppend(&res, "[", size_[i], "]; @", offsets[i + 1], types[i + 1],
@@ -734,7 +735,7 @@ class Layout : public internal_layout::LayoutType<sizeof...(Ts), Ts...> {
};
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_LAYOUT_H_
diff --git a/absl/container/internal/layout_test.cc b/absl/container/internal/layout_test.cc
index b9f98471..44d84607 100644
--- a/absl/container/internal/layout_test.cc
+++ b/absl/container/internal/layout_test.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -28,7 +28,7 @@
#include "absl/types/span.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
namespace {
@@ -46,7 +46,7 @@ Expected Type(Actual val) {
return val;
}
-// Helper class to test different size and alignments.
+// Helper classes to test different size and alignments.
struct alignas(8) Int128 {
uint64_t a, b;
friend bool operator==(Int128 lhs, Int128 rhs) {
@@ -58,6 +58,14 @@ struct alignas(8) Int128 {
}
};
+// int64_t is *not* 8-byte aligned on all platforms!
+struct alignas(8) Int64 {
+ int64_t a;
+ friend bool operator==(Int64 lhs, Int64 rhs) {
+ return lhs.a == rhs.a;
+ }
+};
+
// Properties of types that this test relies on.
static_assert(sizeof(int8_t) == 1, "");
static_assert(alignof(int8_t) == 1, "");
@@ -65,6 +73,8 @@ static_assert(sizeof(int16_t) == 2, "");
static_assert(alignof(int16_t) == 2, "");
static_assert(sizeof(int32_t) == 4, "");
static_assert(alignof(int32_t) == 4, "");
+static_assert(sizeof(Int64) == 8, "");
+static_assert(alignof(Int64) == 8, "");
static_assert(sizeof(Int128) == 16, "");
static_assert(alignof(Int128) == 8, "");
@@ -1282,14 +1292,14 @@ TEST(Layout, OverAligned) {
TEST(Layout, Alignment) {
static_assert(Layout<int8_t>::Alignment() == 1, "");
static_assert(Layout<int32_t>::Alignment() == 4, "");
- static_assert(Layout<int64_t>::Alignment() == 8, "");
+ static_assert(Layout<Int64>::Alignment() == 8, "");
static_assert(Layout<Aligned<int8_t, 64>>::Alignment() == 64, "");
- static_assert(Layout<int8_t, int32_t, int64_t>::Alignment() == 8, "");
- static_assert(Layout<int8_t, int64_t, int32_t>::Alignment() == 8, "");
- static_assert(Layout<int32_t, int8_t, int64_t>::Alignment() == 8, "");
- static_assert(Layout<int32_t, int64_t, int8_t>::Alignment() == 8, "");
- static_assert(Layout<int64_t, int8_t, int32_t>::Alignment() == 8, "");
- static_assert(Layout<int64_t, int32_t, int8_t>::Alignment() == 8, "");
+ static_assert(Layout<int8_t, int32_t, Int64>::Alignment() == 8, "");
+ static_assert(Layout<int8_t, Int64, int32_t>::Alignment() == 8, "");
+ static_assert(Layout<int32_t, int8_t, Int64>::Alignment() == 8, "");
+ static_assert(Layout<int32_t, Int64, int8_t>::Alignment() == 8, "");
+ static_assert(Layout<Int64, int8_t, int32_t>::Alignment() == 8, "");
+ static_assert(Layout<Int64, int32_t, int8_t>::Alignment() == 8, "");
}
TEST(Layout, ConstexprPartial) {
@@ -1324,7 +1334,7 @@ void ExpectPoisoned(const unsigned char (&buf)[N],
}
TEST(Layout, PoisonPadding) {
- using L = Layout<int8_t, int64_t, int32_t, Int128>;
+ using L = Layout<int8_t, Int64, int32_t, Int128>;
constexpr size_t n = L::Partial(1, 2, 3, 4).AllocSize();
{
@@ -1553,5 +1563,5 @@ TEST(CompactString, Works) {
} // namespace
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
diff --git a/absl/container/internal/node_hash_policy.h b/absl/container/internal/node_hash_policy.h
index e8d89f63..d7581360 100644
--- a/absl/container/internal/node_hash_policy.h
+++ b/absl/container/internal/node_hash_policy.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -40,7 +40,7 @@
#include <utility>
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
template <class Reference, class Policy>
@@ -84,7 +84,7 @@ struct node_hash_policy {
};
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_
diff --git a/absl/container/internal/node_hash_policy_test.cc b/absl/container/internal/node_hash_policy_test.cc
index a73c7bba..d53b7364 100644
--- a/absl/container/internal/node_hash_policy_test.cc
+++ b/absl/container/internal/node_hash_policy_test.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -21,7 +21,7 @@
#include "absl/container/internal/hash_policy_traits.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
namespace {
@@ -65,5 +65,5 @@ TEST_F(NodeTest, transfer) {
} // namespace
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
diff --git a/absl/container/internal/raw_hash_map.h b/absl/container/internal/raw_hash_map.h
index 53d4619a..00caa373 100644
--- a/absl/container/internal/raw_hash_map.h
+++ b/absl/container/internal/raw_hash_map.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,11 +19,12 @@
#include <type_traits>
#include <utility>
+#include "absl/base/internal/throw_delegate.h"
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
template <class Policy, class Hash, class Eq, class Alloc>
@@ -40,8 +41,8 @@ class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> {
using MappedConstReference = decltype(P::value(
std::addressof(std::declval<typename raw_hash_map::const_reference>())));
- using KeyArgImpl = container_internal::KeyArg<IsTransparent<Eq>::value &&
- IsTransparent<Hash>::value>;
+ using KeyArgImpl =
+ KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>;
public:
using key_type = typename Policy::key_type;
@@ -137,14 +138,20 @@ class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> {
template <class K = key_type, class P = Policy>
MappedReference<P> at(const key_arg<K>& key) {
auto it = this->find(key);
- if (it == this->end()) std::abort();
+ if (it == this->end()) {
+ base_internal::ThrowStdOutOfRange(
+ "absl::container_internal::raw_hash_map<>::at");
+ }
return Policy::value(&*it);
}
template <class K = key_type, class P = Policy>
MappedConstReference<P> at(const key_arg<K>& key) const {
auto it = this->find(key);
- if (it == this->end()) std::abort();
+ if (it == this->end()) {
+ base_internal::ThrowStdOutOfRange(
+ "absl::container_internal::raw_hash_map<>::at");
+ }
return Policy::value(&*it);
}
@@ -181,7 +188,7 @@ class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> {
};
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_
diff --git a/absl/container/internal/raw_hash_set.cc b/absl/container/internal/raw_hash_set.cc
index 4e690dac..02e74e21 100644
--- a/absl/container/internal/raw_hash_set.cc
+++ b/absl/container/internal/raw_hash_set.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -20,7 +20,7 @@
#include "absl/base/config.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
constexpr size_t Group::kWidth;
@@ -44,5 +44,5 @@ bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl) {
}
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h
index 0c42e4ae..7b379d4f 100644
--- a/absl/container/internal/raw_hash_set.h
+++ b/absl/container/internal/raw_hash_set.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -91,36 +91,6 @@
#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
#define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
-#ifndef SWISSTABLE_HAVE_SSE2
-#if defined(__SSE2__) || \
- (defined(_MSC_VER) && \
- (defined(_M_X64) || (defined(_M_IX86) && _M_IX86_FP >= 2)))
-#define SWISSTABLE_HAVE_SSE2 1
-#else
-#define SWISSTABLE_HAVE_SSE2 0
-#endif
-#endif
-
-#ifndef SWISSTABLE_HAVE_SSSE3
-#ifdef __SSSE3__
-#define SWISSTABLE_HAVE_SSSE3 1
-#else
-#define SWISSTABLE_HAVE_SSSE3 0
-#endif
-#endif
-
-#if SWISSTABLE_HAVE_SSSE3 && !SWISSTABLE_HAVE_SSE2
-#error "Bad configuration!"
-#endif
-
-#if SWISSTABLE_HAVE_SSE2
-#include <emmintrin.h>
-#endif
-
-#if SWISSTABLE_HAVE_SSSE3
-#include <tmmintrin.h>
-#endif
-
#include <algorithm>
#include <cmath>
#include <cstdint>
@@ -135,18 +105,20 @@
#include "absl/base/internal/bits.h"
#include "absl/base/internal/endian.h"
#include "absl/base/port.h"
+#include "absl/container/internal/common.h"
#include "absl/container/internal/compressed_tuple.h"
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/hash_policy_traits.h"
#include "absl/container/internal/hashtable_debug_hooks.h"
+#include "absl/container/internal/hashtablez_sampler.h"
+#include "absl/container/internal/have_sse.h"
#include "absl/container/internal/layout.h"
#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
-#include "absl/types/optional.h"
#include "absl/utility/utility.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
template <size_t Width>
@@ -194,12 +166,6 @@ struct IsDecomposable<
std::declval<Ts>()...))>,
Policy, Hash, Eq, Ts...> : std::true_type {};
-template <class, class = void>
-struct IsTransparent : std::false_type {};
-template <class T>
-struct IsTransparent<T, absl::void_t<typename T::is_transparent>>
- : std::true_type {};
-
// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
template <class T>
constexpr bool IsNoThrowSwappable() {
@@ -385,7 +351,7 @@ struct GroupSse2Impl {
return BitMask<uint32_t, kWidth>(
_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)));
#else
- return Match(kEmpty);
+ return Match(static_cast<h2_t>(kEmpty));
#endif
}
@@ -481,9 +447,7 @@ using Group = GroupPortableImpl;
template <class Policy, class Hash, class Eq, class Alloc>
class raw_hash_set;
-inline bool IsValidCapacity(size_t n) {
- return ((n + 1) & n) == 0 && n >= Group::kWidth - 1;
-}
+inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
// PRECONDITION:
// IsValidCapacity(capacity)
@@ -505,152 +469,32 @@ inline void ConvertDeletedToEmptyAndFullToDeleted(
ctrl[capacity] = kSentinel;
}
-// Rounds up the capacity to the next power of 2 minus 1 and ensures it is
-// greater or equal to Group::kWidth - 1.
+// Rounds up the capacity to the next power of 2 minus 1, with a minimum of 1.
inline size_t NormalizeCapacity(size_t n) {
- constexpr size_t kMinCapacity = Group::kWidth - 1;
- return n <= kMinCapacity
- ? kMinCapacity
- : (std::numeric_limits<size_t>::max)() >> LeadingZeros(n);
+ return n ? ~size_t{} >> LeadingZeros(n) : 1;
}
-// The node_handle concept from C++17.
-// We specialize node_handle for sets and maps. node_handle_base holds the
-// common API of both.
-template <typename Policy, typename Alloc>
-class node_handle_base {
- protected:
- using PolicyTraits = hash_policy_traits<Policy>;
- using slot_type = typename PolicyTraits::slot_type;
-
- public:
- using allocator_type = Alloc;
-
- constexpr node_handle_base() {}
- node_handle_base(node_handle_base&& other) noexcept {
- *this = std::move(other);
- }
- ~node_handle_base() { destroy(); }
- node_handle_base& operator=(node_handle_base&& other) {
- destroy();
- if (!other.empty()) {
- alloc_ = other.alloc_;
- PolicyTraits::transfer(alloc(), slot(), other.slot());
- other.reset();
- }
- return *this;
- }
-
- bool empty() const noexcept { return !alloc_; }
- explicit operator bool() const noexcept { return !empty(); }
- allocator_type get_allocator() const { return *alloc_; }
-
- protected:
- template <typename, typename, typename, typename>
- friend class raw_hash_set;
-
- node_handle_base(const allocator_type& a, slot_type* s) : alloc_(a) {
- PolicyTraits::transfer(alloc(), slot(), s);
- }
-
- void destroy() {
- if (!empty()) {
- PolicyTraits::destroy(alloc(), slot());
- reset();
- }
- }
-
- void reset() {
- assert(alloc_.has_value());
- alloc_ = absl::nullopt;
- }
-
- slot_type* slot() const {
- assert(!empty());
- return reinterpret_cast<slot_type*>(std::addressof(slot_space_));
- }
- allocator_type* alloc() { return std::addressof(*alloc_); }
-
- private:
- absl::optional<allocator_type> alloc_;
- mutable absl::aligned_storage_t<sizeof(slot_type), alignof(slot_type)>
- slot_space_;
-};
-
-// For sets.
-template <typename Policy, typename Alloc, typename = void>
-class node_handle : public node_handle_base<Policy, Alloc> {
- using Base = typename node_handle::node_handle_base;
-
- public:
- using value_type = typename Base::PolicyTraits::value_type;
-
- constexpr node_handle() {}
-
- value_type& value() const {
- return Base::PolicyTraits::element(this->slot());
- }
-
- private:
- template <typename, typename, typename, typename>
- friend class raw_hash_set;
-
- node_handle(const Alloc& a, typename Base::slot_type* s) : Base(a, s) {}
-};
-
-// For maps.
-template <typename Policy, typename Alloc>
-class node_handle<Policy, Alloc, absl::void_t<typename Policy::mapped_type>>
- : public node_handle_base<Policy, Alloc> {
- using Base = typename node_handle::node_handle_base;
-
- public:
- using key_type = typename Policy::key_type;
- using mapped_type = typename Policy::mapped_type;
-
- constexpr node_handle() {}
-
- auto key() const -> decltype(Base::PolicyTraits::key(this->slot())) {
- return Base::PolicyTraits::key(this->slot());
- }
-
- mapped_type& mapped() const {
- return Base::PolicyTraits::value(
- &Base::PolicyTraits::element(this->slot()));
+// We use 7/8th as maximum load factor.
+// For 16-wide groups, that gives an average of two empty slots per group.
+inline size_t CapacityToGrowth(size_t capacity) {
+ assert(IsValidCapacity(capacity));
+ // `capacity*7/8`
+ if (Group::kWidth == 8 && capacity == 7) {
+ // x-x/8 does not work when x==7.
+ return 6;
}
-
- private:
- template <typename, typename, typename, typename>
- friend class raw_hash_set;
-
- node_handle(const Alloc& a, typename Base::slot_type* s) : Base(a, s) {}
-};
-
-// Implement the insert_return_type<> concept of C++17.
-template <class Iterator, class NodeType>
-struct insert_return_type {
- Iterator position;
- bool inserted;
- NodeType node;
-};
-
-// Helper trait to allow or disallow arbitrary keys when the hash and
-// eq functions are transparent.
-// It is very important that the inner template is an alias and that the type it
-// produces is not a dependent type. Otherwise, type deduction would fail.
-template <bool is_transparent>
-struct KeyArg {
- // Transparent. Forward `K`.
- template <typename K, typename key_type>
- using type = K;
-};
-
-template <>
-struct KeyArg<false> {
- // Not transparent. Always use `key_type`.
- template <typename K, typename key_type>
- using type = key_type;
-};
+ return capacity - capacity / 8;
+}
+// From desired "growth" to a lowerbound of the necessary capacity.
+// Might not be a valid one and required NormalizeCapacity().
+inline size_t GrowthToLowerboundCapacity(size_t growth) {
+ // `growth*8/7`
+ if (Group::kWidth == 8 && growth == 7) {
+ // x+(x-1)/7 does not work when x==7.
+ return 8;
+ }
+ return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7);
+}
// Policy: a policy defines how to perform different operations on
// the slots of the hashtable (see hash_policy_traits.h for the full interface
@@ -666,14 +510,14 @@ struct KeyArg<false> {
// if they are equal, false if they are not. If two keys compare equal, then
// their hash values as defined by Hash MUST be equal.
//
-// Allocator: an Allocator [http://devdocs.io/cpp/concept/allocator] with which
+// Allocator: an Allocator [https://devdocs.io/cpp/concept/allocator] with which
// the storage of the hashtable will be allocated and the elements will be
// constructed and destroyed.
template <class Policy, class Hash, class Eq, class Alloc>
class raw_hash_set {
using PolicyTraits = hash_policy_traits<Policy>;
- using KeyArgImpl = container_internal::KeyArg<IsTransparent<Eq>::value &&
- IsTransparent<Hash>::value>;
+ using KeyArgImpl =
+ KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>;
public:
using init_type = typename PolicyTraits::init_type;
@@ -814,7 +658,11 @@ class raw_hash_set {
}
ctrl_t* ctrl_ = nullptr;
- slot_type* slot_;
+ // To avoid uninitialized member warnigs, put slot_ in an anonymous union.
+ // The member is not initialized on singleton and end iterators.
+ union {
+ slot_type* slot_;
+ };
};
class const_iterator {
@@ -854,7 +702,8 @@ class raw_hash_set {
iterator inner_;
};
- using node_type = container_internal::node_handle<Policy, Alloc>;
+ using node_type = node_handle<Policy, hash_policy_traits<Policy>, Alloc>;
+ using insert_return_type = InsertReturnType<iterator, node_type>;
raw_hash_set() noexcept(
std::is_nothrow_default_constructible<hasher>::value&&
@@ -867,7 +716,7 @@ class raw_hash_set {
: ctrl_(EmptyGroup()), settings_(0, hash, eq, alloc) {
if (bucket_count) {
capacity_ = NormalizeCapacity(bucket_count);
- growth_left() = static_cast<size_t>(capacity_ * kMaxLoadFactor);
+ reset_growth_left();
initialize_slots();
}
}
@@ -909,8 +758,8 @@ class raw_hash_set {
// that accept std::initializer_list<T> and std::initializer_list<init_type>.
// This is advantageous for performance.
//
- // // Turns {"abc", "def"} into std::initializer_list<std::string>, then copies
- // // the strings into the set.
+ // // Turns {"abc", "def"} into std::initializer_list<std::string>, then
+ // // copies the strings into the set.
// std::unordered_set<std::string> s = {"abc", "def"};
//
// // Turns {"abc", "def"} into std::initializer_list<const char*>, then
@@ -973,9 +822,10 @@ class raw_hash_set {
// than a full `insert`.
for (const auto& v : that) {
const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v);
- const size_t i = find_first_non_full(hash);
- set_ctrl(i, H2(hash));
- emplace_at(i, v);
+ auto target = find_first_non_full(hash);
+ set_ctrl(target.offset, H2(hash));
+ emplace_at(target.offset, v);
+ infoz_.RecordInsert(hash, target.probe_length);
}
size_ = that.size();
growth_left() -= that.size();
@@ -989,6 +839,7 @@ class raw_hash_set {
slots_(absl::exchange(that.slots_, nullptr)),
size_(absl::exchange(that.size_, 0)),
capacity_(absl::exchange(that.capacity_, 0)),
+ infoz_(absl::exchange(that.infoz_, HashtablezInfoHandle())),
// Hash, equality and allocator are copied instead of moved because
// `that` must be left valid. If Hash is std::function<Key>, moving it
// would create a nullptr functor that cannot be called.
@@ -1009,6 +860,7 @@ class raw_hash_set {
std::swap(size_, that.size_);
std::swap(capacity_, that.capacity_);
std::swap(growth_left(), that.growth_left());
+ std::swap(infoz_, that.infoz_);
} else {
reserve(that.size());
// Note: this will copy elements of dense_set and unordered_set instead of
@@ -1058,7 +910,7 @@ class raw_hash_set {
size_t capacity() const { return capacity_; }
size_t max_size() const { return (std::numeric_limits<size_t>::max)(); }
- void clear() {
+ ABSL_ATTRIBUTE_REINITIALIZES void clear() {
// Iterating over this container is O(bucket_count()). When bucket_count()
// is much greater than size(), iteration becomes prohibitively expensive.
// For clear() it is more important to reuse the allocated array when the
@@ -1076,9 +928,10 @@ class raw_hash_set {
}
size_ = 0;
reset_ctrl();
- growth_left() = static_cast<size_t>(capacity_ * kMaxLoadFactor);
+ reset_growth_left();
}
assert(empty());
+ infoz_.RecordStorageChanged(0, capacity_);
}
// This overload kicks in when the argument is an rvalue of insertable and
@@ -1117,7 +970,7 @@ class raw_hash_set {
// This overload kicks in when the argument is an rvalue of init_type. Its
// purpose is to handle brace-init-list arguments.
//
- // flat_hash_set<std::string, int> s;
+ // flat_hash_map<std::string, int> s;
// s.insert({"abc", 42});
std::pair<iterator, bool> insert(init_type&& value) {
return emplace(std::move(value));
@@ -1158,13 +1011,14 @@ class raw_hash_set {
insert(ilist.begin(), ilist.end());
}
- insert_return_type<iterator, node_type> insert(node_type&& node) {
+ insert_return_type insert(node_type&& node) {
if (!node) return {end(), false, node_type()};
- const auto& elem = PolicyTraits::element(node.slot());
+ const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node));
auto res = PolicyTraits::apply(
- InsertSlot<false>{*this, std::move(*node.slot())}, elem);
+ InsertSlot<false>{*this, std::move(*CommonAccess::GetSlot(node))},
+ elem);
if (res.second) {
- node.reset();
+ CommonAccess::Reset(&node);
return {res.first, true, node_type()};
} else {
return {res.first, false, std::move(node)};
@@ -1328,7 +1182,8 @@ class raw_hash_set {
}
node_type extract(const_iterator position) {
- node_type node(alloc_ref(), position.inner_.slot_);
+ auto node =
+ CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_);
erase_meta_only(position);
return node;
}
@@ -1353,6 +1208,7 @@ class raw_hash_set {
swap(growth_left(), that.growth_left());
swap(hash_ref(), that.hash_ref());
swap(eq_ref(), that.eq_ref());
+ swap(infoz_, that.infoz_);
if (AllocTraits::propagate_on_container_swap::value) {
swap(alloc_ref(), that.alloc_ref());
} else {
@@ -1363,17 +1219,21 @@ class raw_hash_set {
void rehash(size_t n) {
if (n == 0 && capacity_ == 0) return;
- if (n == 0 && size_ == 0) return destroy_slots();
- auto m = NormalizeCapacity(std::max(n, NumSlotsFast(size())));
+ if (n == 0 && size_ == 0) {
+ destroy_slots();
+ infoz_.RecordStorageChanged(0, 0);
+ return;
+ }
+ // bitor is a faster way of doing `max` here. We will round up to the next
+ // power-of-2-minus-1, so bitor is good enough.
+ auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size()));
// n == 0 unconditionally rehashes as per the standard.
if (n == 0 || m > capacity_) {
resize(m);
}
}
- void reserve(size_t n) {
- rehash(NumSlotsFast(n));
- }
+ void reserve(size_t n) { rehash(GrowthToLowerboundCapacity(n)); }
// Extension API: support for heterogeneous keys.
//
@@ -1551,13 +1411,6 @@ class raw_hash_set {
slot_type&& slot;
};
- // Computes std::ceil(n / kMaxLoadFactor). Faster than calling std::ceil.
- static inline size_t NumSlotsFast(size_t n) {
- return static_cast<size_t>(
- (n * kMaxLoadFactorDenominator + (kMaxLoadFactorNumerator - 1)) /
- kMaxLoadFactorNumerator);
- }
-
// "erases" the object from the container, except that it doesn't actually
// destroy the object. It only updates all the metadata of the class.
// This can be used in conjunction with Policy::transfer to move the object to
@@ -1580,17 +1433,34 @@ class raw_hash_set {
set_ctrl(index, was_never_full ? kEmpty : kDeleted);
growth_left() += was_never_full;
+ infoz_.RecordErase();
}
void initialize_slots() {
assert(capacity_);
+ // Folks with custom allocators often make unwarranted assumptions about the
+ // behavior of their classes vis-a-vis trivial destructability and what
+ // calls they will or wont make. Avoid sampling for people with custom
+ // allocators to get us out of this mess. This is not a hard guarantee but
+ // a workaround while we plan the exact guarantee we want to provide.
+ //
+ // People are often sloppy with the exact type of their allocator (sometimes
+ // it has an extra const or is missing the pair, but rebinds made it work
+ // anyway). To avoid the ambiguity, we work off SlotAlloc which we have
+ // bound more carefully.
+ if (std::is_same<SlotAlloc, std::allocator<slot_type>>::value &&
+ slots_ == nullptr) {
+ infoz_ = Sample();
+ }
+
auto layout = MakeLayout(capacity_);
char* mem = static_cast<char*>(
Allocate<Layout::Alignment()>(&alloc_ref(), layout.AllocSize()));
ctrl_ = reinterpret_cast<ctrl_t*>(layout.template Pointer<0>(mem));
slots_ = layout.template Pointer<1>(mem);
reset_ctrl();
- growth_left() = static_cast<size_t>(capacity_ * kMaxLoadFactor) - size_;
+ reset_growth_left();
+ infoz_.RecordStorageChanged(size_, capacity_);
}
void destroy_slots() {
@@ -1619,11 +1489,14 @@ class raw_hash_set {
capacity_ = new_capacity;
initialize_slots();
+ size_t total_probe_length = 0;
for (size_t i = 0; i != old_capacity; ++i) {
if (IsFull(old_ctrl[i])) {
size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
PolicyTraits::element(old_slots + i));
- size_t new_i = find_first_non_full(hash);
+ auto target = find_first_non_full(hash);
+ size_t new_i = target.offset;
+ total_probe_length += target.probe_length;
set_ctrl(new_i, H2(hash));
PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i);
}
@@ -1635,10 +1508,12 @@ class raw_hash_set {
Deallocate<Layout::Alignment()>(&alloc_ref(), old_ctrl,
layout.AllocSize());
}
+ infoz_.RecordRehash(total_probe_length);
}
void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE {
assert(IsValidCapacity(capacity_));
+ assert(!is_small());
// Algorithm:
// - mark all DELETED slots as EMPTY
// - mark all FULL slots as DELETED
@@ -1658,12 +1533,15 @@ class raw_hash_set {
ConvertDeletedToEmptyAndFullToDeleted(ctrl_, capacity_);
typename std::aligned_storage<sizeof(slot_type), alignof(slot_type)>::type
raw;
+ size_t total_probe_length = 0;
slot_type* slot = reinterpret_cast<slot_type*>(&raw);
for (size_t i = 0; i != capacity_; ++i) {
if (!IsDeleted(ctrl_[i])) continue;
size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
PolicyTraits::element(slots_ + i));
- size_t new_i = find_first_non_full(hash);
+ auto target = find_first_non_full(hash);
+ size_t new_i = target.offset;
+ total_probe_length += target.probe_length;
// Verify if the old and new i fall within the same group wrt the hash.
// If they do, we don't need to move the object as it falls already in the
@@ -1695,13 +1573,14 @@ class raw_hash_set {
--i; // repeat
}
}
- growth_left() = static_cast<size_t>(capacity_ * kMaxLoadFactor) - size_;
+ reset_growth_left();
+ infoz_.RecordRehash(total_probe_length);
}
void rehash_and_grow_if_necessary() {
if (capacity_ == 0) {
- resize(Group::kWidth - 1);
- } else if (size() <= kMaxLoadFactor / 2 * capacity_) {
+ resize(1);
+ } else if (size() <= CapacityToGrowth(capacity()) / 2) {
// Squash DELETED without growing if there is enough capacity.
drop_deletes_without_resize();
} else {
@@ -1736,24 +1615,26 @@ class raw_hash_set {
// - the input is already a set
// - there are enough slots
// - the element with the hash is not in the table
- size_t find_first_non_full(size_t hash) {
+ struct FindInfo {
+ size_t offset;
+ size_t probe_length;
+ };
+ FindInfo find_first_non_full(size_t hash) {
auto seq = probe(hash);
while (true) {
Group g{ctrl_ + seq.offset()};
auto mask = g.MatchEmptyOrDeleted();
if (mask) {
#if !defined(NDEBUG)
- // We want to force small tables to have random entries too, so
- // in debug build we will randomly insert in either the front or back of
+ // We want to add entropy even when ASLR is not enabled.
+ // In debug build we will randomly insert in either the front or back of
// the group.
// TODO(kfm,sbenza): revisit after we do unconditional mixing
- if (ShouldInsertBackwards(hash, ctrl_))
- return seq.offset(mask.HighestBitSet());
- else
- return seq.offset(mask.LowestBitSet());
-#else
- return seq.offset(mask.LowestBitSet());
+ if (!is_small() && ShouldInsertBackwards(hash, ctrl_)) {
+ return {seq.offset(mask.HighestBitSet()), seq.index()};
+ }
#endif
+ return {seq.offset(mask.LowestBitSet()), seq.index()};
}
assert(seq.index() < capacity_ && "full table!");
seq.next();
@@ -1792,15 +1673,17 @@ class raw_hash_set {
}
size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
- size_t target = find_first_non_full(hash);
- if (ABSL_PREDICT_FALSE(growth_left() == 0 && !IsDeleted(ctrl_[target]))) {
+ auto target = find_first_non_full(hash);
+ if (ABSL_PREDICT_FALSE(growth_left() == 0 &&
+ !IsDeleted(ctrl_[target.offset]))) {
rehash_and_grow_if_necessary();
target = find_first_non_full(hash);
}
++size_;
- growth_left() -= IsEmpty(ctrl_[target]);
- set_ctrl(target, H2(hash));
- return target;
+ growth_left() -= IsEmpty(ctrl_[target.offset]);
+ set_ctrl(target.offset, H2(hash));
+ infoz_.RecordInsert(hash, target.probe_length);
+ return target.offset;
}
// Constructs the value in the space pointed by the iterator. This only works
@@ -1838,6 +1721,10 @@ class raw_hash_set {
SanitizerPoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_);
}
+ void reset_growth_left() {
+ growth_left() = CapacityToGrowth(capacity()) - size_;
+ }
+
// Sets the control byte, and if `i < Group::kWidth`, set the cloned byte at
// the end too.
void set_ctrl(size_t i, ctrl_t h) {
@@ -1850,11 +1737,28 @@ class raw_hash_set {
}
ctrl_[i] = h;
- ctrl_[((i - Group::kWidth) & capacity_) + Group::kWidth] = h;
+ ctrl_[((i - Group::kWidth) & capacity_) + 1 +
+ ((Group::kWidth - 1) & capacity_)] = h;
}
size_t& growth_left() { return settings_.template get<0>(); }
+ // The representation of the object has two modes:
+ // - small: For capacities < kWidth-1
+ // - large: For the rest.
+ //
+ // Differences:
+ // - In small mode we are able to use the whole capacity. The extra control
+ // bytes give us at least one "empty" control byte to stop the iteration.
+ // This is important to make 1 a valid capacity.
+ //
+ // - In small mode only the first `capacity()` control bytes after the
+ // sentinel are valid. The rest contain dummy kEmpty values that do not
+ // represent a real slot. This is important to take into account on
+ // find_first_non_full(), where we never try ShouldInsertBackwards() for
+ // small tables.
+ bool is_small() const { return capacity_ < Group::kWidth - 1; }
+
hasher& hash_ref() { return settings_.template get<1>(); }
const hasher& hash_ref() const { return settings_.template get<1>(); }
key_equal& eq_ref() { return settings_.template get<2>(); }
@@ -1864,12 +1768,6 @@ class raw_hash_set {
return settings_.template get<3>();
}
- // On average each group has 2 empty slot (for the vectorized case).
- static constexpr int64_t kMaxLoadFactorNumerator = 14;
- static constexpr int64_t kMaxLoadFactorDenominator = 16;
- static constexpr float kMaxLoadFactor =
- 1.0 * kMaxLoadFactorNumerator / kMaxLoadFactorDenominator;
-
// TODO(alkis): Investigate removing some of these fields:
// - ctrl/slots can be derived from each other
// - size can be moved into the slot array
@@ -1877,6 +1775,7 @@ class raw_hash_set {
slot_type* slots_ = nullptr; // [capacity * slot_type]
size_t size_ = 0; // number of full slots
size_t capacity_ = 0; // total number of slots
+ HashtablezInfoHandle infoz_;
absl::container_internal::CompressedTuple<size_t /* growth_left */, hasher,
key_equal, allocator_type>
settings_{0, hasher{}, key_equal{}, allocator_type{}};
@@ -1929,10 +1828,9 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
}
static size_t LowerBoundAllocatedByteSize(size_t size) {
- size_t capacity = container_internal::NormalizeCapacity(
- std::ceil(size / Set::kMaxLoadFactor));
+ size_t capacity = GrowthToLowerboundCapacity(size);
if (capacity == 0) return 0;
- auto layout = Set::MakeLayout(capacity);
+ auto layout = Set::MakeLayout(NormalizeCapacity(capacity));
size_t m = layout.AllocSize();
size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
if (per_slot != ~size_t{}) {
@@ -1944,7 +1842,7 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
} // namespace hashtable_debug_internal
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
diff --git a/absl/container/internal/raw_hash_set_allocator_test.cc b/absl/container/internal/raw_hash_set_allocator_test.cc
index f5779d62..5188b3ae 100644
--- a/absl/container/internal/raw_hash_set_allocator_test.cc
+++ b/absl/container/internal/raw_hash_set_allocator_test.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -20,7 +20,7 @@
#include "absl/container/internal/tracked.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
namespace {
@@ -426,5 +426,5 @@ TEST_F(PropagateOnAll, Swap) {
} // namespace
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
index 302f9758..2783f5c4 100644
--- a/absl/container/internal/raw_hash_set_test.cc
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -35,7 +35,7 @@
#include "absl/strings/string_view.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
struct RawHashSetTestOnlyAccess {
@@ -49,18 +49,47 @@ namespace {
using ::testing::DoubleNear;
using ::testing::ElementsAre;
+using ::testing::Ge;
+using ::testing::Lt;
using ::testing::Optional;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
TEST(Util, NormalizeCapacity) {
- constexpr size_t kMinCapacity = Group::kWidth - 1;
- EXPECT_EQ(kMinCapacity, NormalizeCapacity(0));
- EXPECT_EQ(kMinCapacity, NormalizeCapacity(1));
- EXPECT_EQ(kMinCapacity, NormalizeCapacity(2));
- EXPECT_EQ(kMinCapacity, NormalizeCapacity(kMinCapacity));
- EXPECT_EQ(kMinCapacity * 2 + 1, NormalizeCapacity(kMinCapacity + 1));
- EXPECT_EQ(kMinCapacity * 2 + 1, NormalizeCapacity(kMinCapacity + 2));
+ EXPECT_EQ(1, NormalizeCapacity(0));
+ EXPECT_EQ(1, NormalizeCapacity(1));
+ EXPECT_EQ(3, NormalizeCapacity(2));
+ EXPECT_EQ(3, NormalizeCapacity(3));
+ EXPECT_EQ(7, NormalizeCapacity(4));
+ EXPECT_EQ(7, NormalizeCapacity(7));
+ EXPECT_EQ(15, NormalizeCapacity(8));
+ EXPECT_EQ(15, NormalizeCapacity(15));
+ EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 1));
+ EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 2));
+}
+
+TEST(Util, GrowthAndCapacity) {
+ // Verify that GrowthToCapacity gives the minimum capacity that has enough
+ // growth.
+ for (size_t growth = 0; growth < 10000; ++growth) {
+ SCOPED_TRACE(growth);
+ size_t capacity = NormalizeCapacity(GrowthToLowerboundCapacity(growth));
+ // The capacity is large enough for `growth`
+ EXPECT_THAT(CapacityToGrowth(capacity), Ge(growth));
+ if (growth != 0 && capacity > 1) {
+ // There is no smaller capacity that works.
+ EXPECT_THAT(CapacityToGrowth(capacity / 2), Lt(growth));
+ }
+ }
+
+ for (size_t capacity = Group::kWidth - 1; capacity < 10000;
+ capacity = 2 * capacity + 1) {
+ SCOPED_TRACE(capacity);
+ size_t growth = CapacityToGrowth(capacity);
+ EXPECT_THAT(growth, Lt(capacity));
+ EXPECT_LE(GrowthToLowerboundCapacity(growth), capacity);
+ EXPECT_EQ(NormalizeCapacity(GrowthToLowerboundCapacity(growth)), capacity);
+ }
}
TEST(Util, probe_seq) {
@@ -107,14 +136,14 @@ TEST(BitMask, WithShift) {
}
TEST(BitMask, LeadingTrailing) {
- EXPECT_EQ((BitMask<uint32_t, 16>(0b0001101001000000).LeadingZeros()), 3);
- EXPECT_EQ((BitMask<uint32_t, 16>(0b0001101001000000).TrailingZeros()), 6);
+ EXPECT_EQ((BitMask<uint32_t, 16>(0x00001a40).LeadingZeros()), 3);
+ EXPECT_EQ((BitMask<uint32_t, 16>(0x00001a40).TrailingZeros()), 6);
- EXPECT_EQ((BitMask<uint32_t, 16>(0b0000000000000001).LeadingZeros()), 15);
- EXPECT_EQ((BitMask<uint32_t, 16>(0b0000000000000001).TrailingZeros()), 0);
+ EXPECT_EQ((BitMask<uint32_t, 16>(0x00000001).LeadingZeros()), 15);
+ EXPECT_EQ((BitMask<uint32_t, 16>(0x00000001).TrailingZeros()), 0);
- EXPECT_EQ((BitMask<uint32_t, 16>(0b1000000000000000).LeadingZeros()), 0);
- EXPECT_EQ((BitMask<uint32_t, 16>(0b1000000000000000).TrailingZeros()), 15);
+ EXPECT_EQ((BitMask<uint32_t, 16>(0x00008000).LeadingZeros()), 0);
+ EXPECT_EQ((BitMask<uint32_t, 16>(0x00008000).TrailingZeros()), 15);
EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).LeadingZeros()), 3);
EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).TrailingZeros()), 1);
@@ -315,7 +344,25 @@ struct IntTable
: raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
std::equal_to<int64_t>, std::allocator<int64_t>> {
using Base = typename IntTable::raw_hash_set;
- IntTable() {}
+ using Base::Base;
+};
+
+template <typename T>
+struct CustomAlloc : std::allocator<T> {
+ CustomAlloc() {}
+
+ template <typename U>
+ CustomAlloc(const CustomAlloc<U>& other) {}
+
+ template<class U> struct rebind {
+ using other = CustomAlloc<U>;
+ };
+};
+
+struct CustomAllocIntTable
+ : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
+ std::equal_to<int64_t>, CustomAlloc<int64_t>> {
+ using Base = typename CustomAllocIntTable::raw_hash_set;
using Base::Base;
};
@@ -343,6 +390,7 @@ TEST(Table, EmptyFunctorOptimization) {
size_t size;
size_t capacity;
size_t growth_left;
+ void* infoz;
};
struct StatelessHash {
size_t operator()(absl::string_view) const { return 0; }
@@ -385,10 +433,11 @@ TEST(Table, Prefetch) {
t.prefetch(2);
// Do not run in debug mode, when prefetch is not implemented, or when
- // sanitizers are enabled.
-#if defined(NDEBUG) && defined(__GNUC__) && !defined(ADDRESS_SANITIZER) && \
- !defined(MEMORY_SANITIZER) && !defined(THREAD_SANITIZER) && \
- !defined(UNDEFINED_BEHAVIOR_SANITIZER)
+ // sanitizers are enabled, or on WebAssembly.
+#if defined(NDEBUG) && defined(__GNUC__) && defined(__x86_64__) && \
+ !defined(ADDRESS_SANITIZER) && !defined(MEMORY_SANITIZER) && \
+ !defined(THREAD_SANITIZER) && !defined(UNDEFINED_BEHAVIOR_SANITIZER) && \
+ !defined(__EMSCRIPTEN__)
const auto now = [] { return absl::base_internal::CycleClock::Now(); };
// Make size enough to not fit in L2 cache (16.7 Mb)
@@ -785,7 +834,7 @@ TEST(Table, EnsureNonQuadraticAsInRust) {
TEST(Table, ClearBug) {
IntTable t;
constexpr size_t capacity = container_internal::Group::kWidth - 1;
- constexpr size_t max_size = capacity / 2;
+ constexpr size_t max_size = capacity / 2 + 1;
for (size_t i = 0; i < max_size; ++i) {
t.insert(i);
}
@@ -816,6 +865,25 @@ TEST(Table, Erase) {
EXPECT_TRUE(t.find(0) == t.end());
}
+TEST(Table, EraseMaintainsValidIterator) {
+ IntTable t;
+ const int kNumElements = 100;
+ for (int i = 0; i < kNumElements; i ++) {
+ EXPECT_TRUE(t.emplace(i).second);
+ }
+ EXPECT_EQ(t.size(), kNumElements);
+
+ int num_erase_calls = 0;
+ auto it = t.begin();
+ while (it != t.end()) {
+ t.erase(it++);
+ num_erase_calls++;
+ }
+
+ EXPECT_TRUE(t.empty());
+ EXPECT_EQ(num_erase_calls, kNumElements);
+}
+
// Collect N bad keys by following algorithm:
// 1. Create an empty table and reserve it to 2 * N.
// 2. Insert N random elements.
@@ -1014,7 +1082,7 @@ ProbeStats CollectProbeStatsOnKeysXoredWithSeed(const std::vector<int64_t>& keys
ExpectedStats XorSeedExpectedStats() {
constexpr bool kRandomizesInserts =
-#if NDEBUG
+#ifdef NDEBUG
false;
#else // NDEBUG
true;
@@ -1051,6 +1119,7 @@ ExpectedStats XorSeedExpectedStats() {
ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width");
return {};
}
+
TEST(Table, DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength) {
ProbeStatsPerSize stats;
std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
@@ -1107,7 +1176,7 @@ ProbeStats CollectProbeStatsOnLinearlyTransformedKeys(
ExpectedStats LinearTransformExpectedStats() {
constexpr bool kRandomizesInserts =
-#if NDEBUG
+#ifdef NDEBUG
false;
#else // NDEBUG
true;
@@ -1144,6 +1213,7 @@ ExpectedStats LinearTransformExpectedStats() {
ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width");
return {};
}
+
TEST(Table, DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength) {
ProbeStatsPerSize stats;
std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
@@ -1296,37 +1366,31 @@ TEST(Table, ConstructFromInitList) {
TEST(Table, CopyConstruct) {
IntTable t;
- t.max_load_factor(.321f);
t.emplace(0);
EXPECT_EQ(1, t.size());
{
IntTable u(t);
EXPECT_EQ(1, u.size());
- EXPECT_EQ(t.max_load_factor(), u.max_load_factor());
EXPECT_THAT(*u.find(0), 0);
}
{
IntTable u{t};
EXPECT_EQ(1, u.size());
- EXPECT_EQ(t.max_load_factor(), u.max_load_factor());
EXPECT_THAT(*u.find(0), 0);
}
{
IntTable u = t;
EXPECT_EQ(1, u.size());
- EXPECT_EQ(t.max_load_factor(), u.max_load_factor());
EXPECT_THAT(*u.find(0), 0);
}
}
TEST(Table, CopyConstructWithAlloc) {
StringTable t;
- t.max_load_factor(.321f);
t.emplace("a", "b");
EXPECT_EQ(1, t.size());
StringTable u(t, Alloc<std::pair<std::string, std::string>>());
EXPECT_EQ(1, u.size());
- EXPECT_EQ(t.max_load_factor(), u.max_load_factor());
EXPECT_THAT(*u.find("a"), Pair("a", "b"));
}
@@ -1344,94 +1408,75 @@ TEST(Table, AllocWithExplicitCtor) {
TEST(Table, MoveConstruct) {
{
StringTable t;
- t.max_load_factor(.321f);
- const float lf = t.max_load_factor();
t.emplace("a", "b");
EXPECT_EQ(1, t.size());
StringTable u(std::move(t));
EXPECT_EQ(1, u.size());
- EXPECT_EQ(lf, u.max_load_factor());
EXPECT_THAT(*u.find("a"), Pair("a", "b"));
}
{
StringTable t;
- t.max_load_factor(.321f);
- const float lf = t.max_load_factor();
t.emplace("a", "b");
EXPECT_EQ(1, t.size());
StringTable u{std::move(t)};
EXPECT_EQ(1, u.size());
- EXPECT_EQ(lf, u.max_load_factor());
EXPECT_THAT(*u.find("a"), Pair("a", "b"));
}
{
StringTable t;
- t.max_load_factor(.321f);
- const float lf = t.max_load_factor();
t.emplace("a", "b");
EXPECT_EQ(1, t.size());
StringTable u = std::move(t);
EXPECT_EQ(1, u.size());
- EXPECT_EQ(lf, u.max_load_factor());
EXPECT_THAT(*u.find("a"), Pair("a", "b"));
}
}
TEST(Table, MoveConstructWithAlloc) {
StringTable t;
- t.max_load_factor(.321f);
- const float lf = t.max_load_factor();
t.emplace("a", "b");
EXPECT_EQ(1, t.size());
StringTable u(std::move(t), Alloc<std::pair<std::string, std::string>>());
EXPECT_EQ(1, u.size());
- EXPECT_EQ(lf, u.max_load_factor());
EXPECT_THAT(*u.find("a"), Pair("a", "b"));
}
TEST(Table, CopyAssign) {
StringTable t;
- t.max_load_factor(.321f);
t.emplace("a", "b");
EXPECT_EQ(1, t.size());
StringTable u;
u = t;
EXPECT_EQ(1, u.size());
- EXPECT_EQ(t.max_load_factor(), u.max_load_factor());
EXPECT_THAT(*u.find("a"), Pair("a", "b"));
}
TEST(Table, CopySelfAssign) {
StringTable t;
- t.max_load_factor(.321f);
- const float lf = t.max_load_factor();
t.emplace("a", "b");
EXPECT_EQ(1, t.size());
t = *&t;
EXPECT_EQ(1, t.size());
- EXPECT_EQ(lf, t.max_load_factor());
EXPECT_THAT(*t.find("a"), Pair("a", "b"));
}
TEST(Table, MoveAssign) {
StringTable t;
- t.max_load_factor(.321f);
- const float lf = t.max_load_factor();
t.emplace("a", "b");
EXPECT_EQ(1, t.size());
StringTable u;
u = std::move(t);
EXPECT_EQ(1, u.size());
- EXPECT_EQ(lf, u.max_load_factor());
EXPECT_THAT(*u.find("a"), Pair("a", "b"));
}
TEST(Table, Equality) {
StringTable t;
- std::vector<std::pair<std::string, std::string>> v = {{"a", "b"}, {"aa", "bb"}};
+ std::vector<std::pair<std::string, std::string>> v = {{"a", "b"},
+ {"aa", "bb"}};
t.insert(std::begin(v), std::end(v));
StringTable u = t;
EXPECT_EQ(u, t);
@@ -1439,20 +1484,24 @@ TEST(Table, Equality) {
TEST(Table, Equality2) {
StringTable t;
- std::vector<std::pair<std::string, std::string>> v1 = {{"a", "b"}, {"aa", "bb"}};
+ std::vector<std::pair<std::string, std::string>> v1 = {{"a", "b"},
+ {"aa", "bb"}};
t.insert(std::begin(v1), std::end(v1));
StringTable u;
- std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"}, {"aa", "aa"}};
+ std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"},
+ {"aa", "aa"}};
u.insert(std::begin(v2), std::end(v2));
EXPECT_NE(u, t);
}
TEST(Table, Equality3) {
StringTable t;
- std::vector<std::pair<std::string, std::string>> v1 = {{"b", "b"}, {"bb", "bb"}};
+ std::vector<std::pair<std::string, std::string>> v1 = {{"b", "b"},
+ {"bb", "bb"}};
t.insert(std::begin(v1), std::end(v1));
StringTable u;
- std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"}, {"aa", "aa"}};
+ std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"},
+ {"aa", "aa"}};
u.insert(std::begin(v2), std::end(v2));
EXPECT_NE(u, t);
}
@@ -1677,7 +1726,7 @@ TEST(Nodes, ExtractInsert) {
EXPECT_FALSE(node.empty());
StringTable t2;
- auto res = t2.insert(std::move(node));
+ StringTable::insert_return_type res = t2.insert(std::move(node));
EXPECT_TRUE(res.inserted);
EXPECT_THAT(*res.position, Pair(k0, ""));
EXPECT_FALSE(res.node);
@@ -1707,80 +1756,74 @@ TEST(Nodes, ExtractInsert) {
EXPECT_FALSE(node);
}
-StringTable MakeSimpleTable(size_t size) {
- StringTable t;
- for (size_t i = 0; i < size; ++i) t.emplace(std::string(1, 'A' + i), "");
+IntTable MakeSimpleTable(size_t size) {
+ IntTable t;
+ while (t.size() < size) t.insert(t.size());
return t;
}
-std::string OrderOfIteration(const StringTable& t) {
- std::string order;
- for (auto& p : t) order += p.first;
- return order;
+std::vector<int> OrderOfIteration(const IntTable& t) {
+ return {t.begin(), t.end()};
}
+// These IterationOrderChanges tests depend on non-deterministic behavior.
+// We are injecting non-determinism from the pointer of the table, but do so in
+// a way that only the page matters. We have to retry enough times to make sure
+// we are touching different memory pages to cause the ordering to change.
+// We also need to keep the old tables around to avoid getting the same memory
+// blocks over and over.
TEST(Table, IterationOrderChangesByInstance) {
- // Needs to be more than kWidth elements to be able to affect order.
- const StringTable reference = MakeSimpleTable(20);
-
- // Since order is non-deterministic we can't just try once and verify.
- // We'll try until we find that order changed. It should not take many tries
- // for that.
- // Important: we have to keep the old tables around. Otherwise tcmalloc will
- // just give us the same blocks and we would be doing the same order again.
- std::vector<StringTable> garbage;
- for (int i = 0; i < 10; ++i) {
- auto trial = MakeSimpleTable(20);
- if (OrderOfIteration(trial) != OrderOfIteration(reference)) {
- // We are done.
- return;
+ for (size_t size : {2, 6, 12, 20}) {
+ const auto reference_table = MakeSimpleTable(size);
+ const auto reference = OrderOfIteration(reference_table);
+
+ std::vector<IntTable> tables;
+ bool found_difference = false;
+ for (int i = 0; !found_difference && i < 5000; ++i) {
+ tables.push_back(MakeSimpleTable(size));
+ found_difference = OrderOfIteration(tables.back()) != reference;
+ }
+ if (!found_difference) {
+ FAIL()
+ << "Iteration order remained the same across many attempts with size "
+ << size;
}
- garbage.push_back(std::move(trial));
}
- FAIL();
}
TEST(Table, IterationOrderChangesOnRehash) {
- // Since order is non-deterministic we can't just try once and verify.
- // We'll try until we find that order changed. It should not take many tries
- // for that.
- // Important: we have to keep the old tables around. Otherwise tcmalloc will
- // just give us the same blocks and we would be doing the same order again.
- std::vector<StringTable> garbage;
- for (int i = 0; i < 10; ++i) {
- // Needs to be more than kWidth elements to be able to affect order.
- StringTable t = MakeSimpleTable(20);
- const std::string reference = OrderOfIteration(t);
+ std::vector<IntTable> garbage;
+ for (int i = 0; i < 5000; ++i) {
+ auto t = MakeSimpleTable(20);
+ const auto reference = OrderOfIteration(t);
// Force rehash to the same size.
t.rehash(0);
- std::string trial = OrderOfIteration(t);
+ auto trial = OrderOfIteration(t);
if (trial != reference) {
// We are done.
return;
}
garbage.push_back(std::move(t));
}
- FAIL();
+ FAIL() << "Iteration order remained the same across many attempts.";
}
-TEST(Table, IterationOrderChangesForSmallTables) {
- // Since order is non-deterministic we can't just try once and verify.
- // We'll try until we find that order changed.
- // Important: we have to keep the old tables around. Otherwise tcmalloc will
- // just give us the same blocks and we would be doing the same order again.
- StringTable reference_table = MakeSimpleTable(5);
- const std::string reference = OrderOfIteration(reference_table);
- std::vector<StringTable> garbage;
- for (int i = 0; i < 50; ++i) {
- StringTable t = MakeSimpleTable(5);
- std::string trial = OrderOfIteration(t);
- if (trial != reference) {
- // We are done.
- return;
- }
- garbage.push_back(std::move(t));
- }
- FAIL() << "Iteration order remained the same across many attempts.";
+// Verify that pointers are invalidated as soon as a second element is inserted.
+// This prevents dependency on pointer stability on small tables.
+TEST(Table, UnstablePointers) {
+ IntTable table;
+
+ const auto addr = [&](int i) {
+ return reinterpret_cast<uintptr_t>(&*table.find(i));
+ };
+
+ table.insert(0);
+ const uintptr_t old_ptr = addr(0);
+
+ // This causes a rehash.
+ table.insert(1);
+
+ EXPECT_NE(old_ptr, addr(0));
}
// Confirm that we assert if we try to erase() end().
@@ -1799,9 +1842,52 @@ TEST(TableDeathTest, EraseOfEndAsserts) {
EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()), kDeathMsg);
}
+TEST(RawHashSamplerTest, Sample) {
+ // Enable the feature even if the prod default is off.
+ SetHashtablezEnabled(true);
+ SetHashtablezSampleParameter(100);
+
+ auto& sampler = HashtablezSampler::Global();
+ size_t start_size = 0;
+ start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; });
+
+ std::vector<IntTable> tables;
+ for (int i = 0; i < 1000000; ++i) {
+ tables.emplace_back();
+ tables.back().insert(1);
+ }
+ size_t end_size = 0;
+ end_size += sampler.Iterate([&](const HashtablezInfo&) { ++end_size; });
+
+ EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
+ 0.01, 0.005);
+}
+
+TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
+ // Enable the feature even if the prod default is off.
+ SetHashtablezEnabled(true);
+ SetHashtablezSampleParameter(100);
+
+ auto& sampler = HashtablezSampler::Global();
+ size_t start_size = 0;
+ start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; });
+
+ std::vector<CustomAllocIntTable> tables;
+ for (int i = 0; i < 1000000; ++i) {
+ tables.emplace_back();
+ tables.back().insert(1);
+ }
+ size_t end_size = 0;
+ end_size += sampler.Iterate([&](const HashtablezInfo&) { ++end_size; });
+
+ EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
+ 0.00, 0.001);
+}
+
#ifdef ADDRESS_SANITIZER
TEST(Sanitizer, PoisoningUnused) {
IntTable t;
+ t.reserve(5);
// Insert something to force an allocation.
int64_t& v1 = *t.insert(0).first;
@@ -1826,5 +1912,5 @@ TEST(Sanitizer, PoisoningOnErase) {
} // namespace
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
diff --git a/absl/container/internal/test_instance_tracker.cc b/absl/container/internal/test_instance_tracker.cc
index 91441729..f4b283fd 100644
--- a/absl/container/internal/test_instance_tracker.cc
+++ b/absl/container/internal/test_instance_tracker.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -15,7 +15,7 @@
#include "absl/container/internal/test_instance_tracker.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace test_internal {
int BaseCountedInstance::num_instances_ = 0;
int BaseCountedInstance::num_live_instances_ = 0;
@@ -25,5 +25,5 @@ int BaseCountedInstance::num_swaps_ = 0;
int BaseCountedInstance::num_comparisons_ = 0;
} // namespace test_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
diff --git a/absl/container/internal/test_instance_tracker.h b/absl/container/internal/test_instance_tracker.h
index 060077d0..ab7f9f22 100644
--- a/absl/container/internal/test_instance_tracker.h
+++ b/absl/container/internal/test_instance_tracker.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -18,11 +18,13 @@
#include <cstdlib>
#include <ostream>
+#include "absl/types/compare.h"
+
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace test_internal {
-// A type that counts number of occurences of the type, the live occurrences of
+// A type that counts number of occurrences of the type, the live occurrences of
// the type, as well as the number of copies, moves, swaps, and comparisons that
// have occurred on the type. This is used as a base class for the copyable,
// copyable+movable, and movable types below that are used in actual tests. Use
@@ -97,6 +99,14 @@ class BaseCountedInstance {
return value_ >= x.value_;
}
+ absl::weak_ordering compare(const BaseCountedInstance& x) const {
+ ++num_comparisons_;
+ return value_ < x.value_
+ ? absl::weak_ordering::less
+ : value_ == x.value_ ? absl::weak_ordering::equivalent
+ : absl::weak_ordering::greater;
+ }
+
int value() const {
if (!is_live_) std::abort();
return value_;
@@ -258,7 +268,7 @@ class MovableOnlyInstance : public BaseCountedInstance {
};
} // namespace test_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_TEST_INSTANCE_TRACKER_H_
diff --git a/absl/container/internal/test_instance_tracker_test.cc b/absl/container/internal/test_instance_tracker_test.cc
index 0ae57636..1c6a4fa7 100644
--- a/absl/container/internal/test_instance_tracker_test.cc
+++ b/absl/container/internal/test_instance_tracker_test.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -174,6 +174,8 @@ TEST(TestInstanceTracker, Comparisons) {
EXPECT_EQ(5, tracker.comparisons());
EXPECT_FALSE(one >= two);
EXPECT_EQ(6, tracker.comparisons());
+ EXPECT_TRUE(one.compare(two) < 0); // NOLINT
+ EXPECT_EQ(7, tracker.comparisons());
tracker.ResetCopiesMovesSwaps();
EXPECT_EQ(0, tracker.comparisons());
diff --git a/absl/container/internal/tracked.h b/absl/container/internal/tracked.h
index f72c46ea..e9e6b95d 100644
--- a/absl/container/internal/tracked.h
+++ b/absl/container/internal/tracked.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -20,7 +20,7 @@
#include <utility>
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
// A class that tracks its copies and moves so that it can be queried in tests.
@@ -74,7 +74,7 @@ class Tracked {
};
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_TRACKED_H_
diff --git a/absl/container/internal/unordered_map_constructor_test.h b/absl/container/internal/unordered_map_constructor_test.h
index 14ceeecb..b64b5520 100644
--- a/absl/container/internal/unordered_map_constructor_test.h
+++ b/absl/container/internal/unordered_map_constructor_test.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -24,13 +24,13 @@
#include "absl/container/internal/hash_policy_testing.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
template <class UnordMap>
class ConstructorTest : public ::testing::Test {};
-TYPED_TEST_CASE_P(ConstructorTest);
+TYPED_TEST_SUITE_P(ConstructorTest);
TYPED_TEST_P(ConstructorTest, NoArgs) {
TypeParam m;
@@ -84,8 +84,28 @@ TYPED_TEST_P(ConstructorTest, BucketCountHashEqualAlloc) {
EXPECT_GE(m.bucket_count(), 123);
}
-TYPED_TEST_P(ConstructorTest, BucketCountAlloc) {
+template <typename T>
+struct is_std_unordered_map : std::false_type {};
+
+template <typename... T>
+struct is_std_unordered_map<std::unordered_map<T...>> : std::true_type {};
+
#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17)
+using has_cxx14_std_apis = std::true_type;
+#else
+using has_cxx14_std_apis = std::false_type;
+#endif
+
+template <typename T>
+using expect_cxx14_apis =
+ absl::disjunction<absl::negation<is_std_unordered_map<T>>,
+ has_cxx14_std_apis>;
+
+template <typename TypeParam>
+void BucketCountAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void BucketCountAllocTest(std::true_type) {
using A = typename TypeParam::allocator_type;
A alloc(0);
TypeParam m(123, alloc);
@@ -93,11 +113,17 @@ TYPED_TEST_P(ConstructorTest, BucketCountAlloc) {
EXPECT_TRUE(m.empty());
EXPECT_THAT(m, ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
-#endif
}
-TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) {
-#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17)
+TYPED_TEST_P(ConstructorTest, BucketCountAlloc) {
+ BucketCountAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void BucketCountHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void BucketCountHashAllocTest(std::true_type) {
using H = typename TypeParam::hasher;
using A = typename TypeParam::allocator_type;
H hasher;
@@ -108,18 +134,38 @@ TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) {
EXPECT_TRUE(m.empty());
EXPECT_THAT(m, ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
-#endif
}
-TYPED_TEST_P(ConstructorTest, BucketAlloc) {
+TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) {
+ BucketCountHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
+using has_alloc_std_constructors = std::true_type;
+#else
+using has_alloc_std_constructors = std::false_type;
+#endif
+
+template <typename T>
+using expect_alloc_constructors =
+ absl::disjunction<absl::negation<is_std_unordered_map<T>>,
+ has_alloc_std_constructors>;
+
+template <typename TypeParam>
+void AllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void AllocTest(std::true_type) {
using A = typename TypeParam::allocator_type;
A alloc(0);
TypeParam m(alloc);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_TRUE(m.empty());
EXPECT_THAT(m, ::testing::UnorderedElementsAre());
-#endif
+}
+
+TYPED_TEST_P(ConstructorTest, Alloc) {
+ AllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
}
TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) {
@@ -141,8 +187,11 @@ TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) {
EXPECT_GE(m.bucket_count(), 123);
}
-TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) {
-#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17)
+template <typename TypeParam>
+void InputIteratorBucketAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InputIteratorBucketAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using A = typename TypeParam::allocator_type;
A alloc(0);
@@ -153,11 +202,17 @@ TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) {
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
-#endif
}
-TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) {
-#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17)
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) {
+ InputIteratorBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void InputIteratorBucketHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InputIteratorBucketHashAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using A = typename TypeParam::allocator_type;
@@ -171,7 +226,10 @@ TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) {
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
-#endif
+}
+
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) {
+ InputIteratorBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
}
TYPED_TEST_P(ConstructorTest, CopyConstructor) {
@@ -191,8 +249,11 @@ TYPED_TEST_P(ConstructorTest, CopyConstructor) {
EXPECT_EQ(m, n);
}
-TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) {
-#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
+template <typename TypeParam>
+void CopyConstructorAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void CopyConstructorAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
@@ -207,7 +268,10 @@ TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) {
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_NE(m.get_allocator(), n.get_allocator());
EXPECT_EQ(m, n);
-#endif
+}
+
+TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) {
+ CopyConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
}
// TODO(alkis): Test non-propagating allocators on copy constructors.
@@ -230,8 +294,11 @@ TYPED_TEST_P(ConstructorTest, MoveConstructor) {
EXPECT_EQ(m, n);
}
-TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) {
-#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
+template <typename TypeParam>
+void MoveConstructorAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void MoveConstructorAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
@@ -247,7 +314,10 @@ TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) {
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_NE(m.get_allocator(), n.get_allocator());
EXPECT_EQ(m, n);
-#endif
+}
+
+TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) {
+ MoveConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
}
// TODO(alkis): Test non-propagating allocators on move constructors.
@@ -270,8 +340,11 @@ TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) {
EXPECT_GE(m.bucket_count(), 123);
}
-TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) {
-#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17)
+template <typename TypeParam>
+void InitializerListBucketAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InitializerListBucketAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using A = typename TypeParam::allocator_type;
hash_internal::Generator<T> gen;
@@ -281,11 +354,17 @@ TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) {
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
-#endif
}
-TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) {
-#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17)
+TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) {
+ InitializerListBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void InitializerListBucketHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InitializerListBucketHashAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using A = typename TypeParam::allocator_type;
@@ -298,7 +377,10 @@ TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) {
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
-#endif
+}
+
+TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) {
+ InitializerListBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
}
TYPED_TEST_P(ConstructorTest, Assignment) {
@@ -391,17 +473,17 @@ TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) {
REGISTER_TYPED_TEST_CASE_P(
ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual,
- BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc,
- BucketAlloc, InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc,
+ BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc,
+ InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc,
InputIteratorBucketHashAlloc, CopyConstructor, CopyConstructorAlloc,
MoveConstructor, MoveConstructorAlloc, InitializerListBucketHashEqualAlloc,
InitializerListBucketAlloc, InitializerListBucketHashAlloc, Assignment,
- MoveAssignment, AssignmentFromInitializerList,
- AssignmentOverwritesExisting, MoveAssignmentOverwritesExisting,
+ MoveAssignment, AssignmentFromInitializerList, AssignmentOverwritesExisting,
+ MoveAssignmentOverwritesExisting,
AssignmentFromInitializerListOverwritesExisting, AssignmentOnSelf);
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_
diff --git a/absl/container/internal/unordered_map_lookup_test.h b/absl/container/internal/unordered_map_lookup_test.h
index d767aa8d..9ad78a79 100644
--- a/absl/container/internal/unordered_map_lookup_test.h
+++ b/absl/container/internal/unordered_map_lookup_test.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -21,13 +21,13 @@
#include "absl/container/internal/hash_policy_testing.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
template <class UnordMap>
class LookupTest : public ::testing::Test {};
-TYPED_TEST_CASE_P(LookupTest);
+TYPED_TEST_SUITE_P(LookupTest);
TYPED_TEST_P(LookupTest, At) {
using T = hash_internal::GeneratedType<TypeParam>;
@@ -111,7 +111,7 @@ REGISTER_TYPED_TEST_CASE_P(LookupTest, At, OperatorBracket, Count, Find,
EqualRange);
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_
diff --git a/absl/container/internal/unordered_map_members_test.h b/absl/container/internal/unordered_map_members_test.h
new file mode 100644
index 00000000..c4600405
--- /dev/null
+++ b/absl/container/internal/unordered_map_members_test.h
@@ -0,0 +1,87 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MEMBERS_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MEMBERS_TEST_H_
+
+#include <type_traits>
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/meta/type_traits.h"
+
+namespace absl {
+inline namespace lts_2019_08_08 {
+namespace container_internal {
+
+template <class UnordMap>
+class MembersTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(MembersTest);
+
+template <typename T>
+void UseType() {}
+
+TYPED_TEST_P(MembersTest, Typedefs) {
+ EXPECT_TRUE((std::is_same<std::pair<const typename TypeParam::key_type,
+ typename TypeParam::mapped_type>,
+ typename TypeParam::value_type>()));
+ EXPECT_TRUE((absl::conjunction<
+ absl::negation<std::is_signed<typename TypeParam::size_type>>,
+ std::is_integral<typename TypeParam::size_type>>()));
+ EXPECT_TRUE((absl::conjunction<
+ std::is_signed<typename TypeParam::difference_type>,
+ std::is_integral<typename TypeParam::difference_type>>()));
+ EXPECT_TRUE((std::is_convertible<
+ decltype(std::declval<const typename TypeParam::hasher&>()(
+ std::declval<const typename TypeParam::key_type&>())),
+ size_t>()));
+ EXPECT_TRUE((std::is_convertible<
+ decltype(std::declval<const typename TypeParam::key_equal&>()(
+ std::declval<const typename TypeParam::key_type&>(),
+ std::declval<const typename TypeParam::key_type&>())),
+ bool>()));
+ EXPECT_TRUE((std::is_same<typename TypeParam::allocator_type::value_type,
+ typename TypeParam::value_type>()));
+ EXPECT_TRUE((std::is_same<typename TypeParam::value_type&,
+ typename TypeParam::reference>()));
+ EXPECT_TRUE((std::is_same<const typename TypeParam::value_type&,
+ typename TypeParam::const_reference>()));
+ EXPECT_TRUE((std::is_same<typename std::allocator_traits<
+ typename TypeParam::allocator_type>::pointer,
+ typename TypeParam::pointer>()));
+ EXPECT_TRUE(
+ (std::is_same<typename std::allocator_traits<
+ typename TypeParam::allocator_type>::const_pointer,
+ typename TypeParam::const_pointer>()));
+}
+
+TYPED_TEST_P(MembersTest, SimpleFunctions) {
+ EXPECT_GT(TypeParam().max_size(), 0);
+}
+
+TYPED_TEST_P(MembersTest, BeginEnd) {
+ TypeParam t = {typename TypeParam::value_type{}};
+ EXPECT_EQ(t.begin(), t.cbegin());
+ EXPECT_EQ(t.end(), t.cend());
+ EXPECT_NE(t.begin(), t.end());
+ EXPECT_NE(t.cbegin(), t.cend());
+}
+
+REGISTER_TYPED_TEST_SUITE_P(MembersTest, Typedefs, SimpleFunctions, BeginEnd);
+
+} // namespace container_internal
+} // inline namespace lts_2019_08_08
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MEMBERS_TEST_H_
diff --git a/absl/container/internal/unordered_map_modifiers_test.h b/absl/container/internal/unordered_map_modifiers_test.h
index 5d7f1fe3..89dd7894 100644
--- a/absl/container/internal/unordered_map_modifiers_test.h
+++ b/absl/container/internal/unordered_map_modifiers_test.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -21,13 +21,13 @@
#include "absl/container/internal/hash_policy_testing.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
template <class UnordMap>
class ModifiersTest : public ::testing::Test {};
-TYPED_TEST_CASE_P(ModifiersTest);
+TYPED_TEST_SUITE_P(ModifiersTest);
TYPED_TEST_P(ModifiersTest, Clear) {
using T = hash_internal::GeneratedType<TypeParam>;
@@ -269,7 +269,7 @@ REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint,
Erase, EraseRange, EraseKey, Swap);
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_
diff --git a/absl/container/internal/unordered_map_test.cc b/absl/container/internal/unordered_map_test.cc
index 548f69f7..51a90af8 100644
--- a/absl/container/internal/unordered_map_test.cc
+++ b/absl/container/internal/unordered_map_test.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,10 +16,11 @@
#include "absl/container/internal/unordered_map_constructor_test.h"
#include "absl/container/internal/unordered_map_lookup_test.h"
+#include "absl/container/internal/unordered_map_members_test.h"
#include "absl/container/internal/unordered_map_modifiers_test.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
namespace {
@@ -30,11 +31,12 @@ using MapTypes = ::testing::Types<
StatefulTestingEqual,
Alloc<std::pair<const std::string, std::string>>>>;
-INSTANTIATE_TYPED_TEST_CASE_P(UnorderedMap, ConstructorTest, MapTypes);
-INSTANTIATE_TYPED_TEST_CASE_P(UnorderedMap, LookupTest, MapTypes);
-INSTANTIATE_TYPED_TEST_CASE_P(UnorderedMap, ModifiersTest, MapTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedMap, ConstructorTest, MapTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedMap, LookupTest, MapTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedMap, MembersTest, MapTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedMap, ModifiersTest, MapTypes);
} // namespace
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
diff --git a/absl/container/internal/unordered_set_constructor_test.h b/absl/container/internal/unordered_set_constructor_test.h
index f370b249..ac73a896 100644
--- a/absl/container/internal/unordered_set_constructor_test.h
+++ b/absl/container/internal/unordered_set_constructor_test.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,21 +16,23 @@
#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_
#include <algorithm>
+#include <unordered_set>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/internal/hash_generator_testing.h"
#include "absl/container/internal/hash_policy_testing.h"
+#include "absl/meta/type_traits.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
template <class UnordMap>
class ConstructorTest : public ::testing::Test {};
-TYPED_TEST_CASE_P(ConstructorTest);
+TYPED_TEST_SUITE_P(ConstructorTest);
TYPED_TEST_P(ConstructorTest, NoArgs) {
TypeParam m;
@@ -92,8 +94,28 @@ TYPED_TEST_P(ConstructorTest, BucketCountHashEqualAlloc) {
EXPECT_GE(cm.bucket_count(), 123);
}
-TYPED_TEST_P(ConstructorTest, BucketCountAlloc) {
+template <typename T>
+struct is_std_unordered_set : std::false_type {};
+
+template <typename... T>
+struct is_std_unordered_set<std::unordered_set<T...>> : std::true_type {};
+
#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17)
+using has_cxx14_std_apis = std::true_type;
+#else
+using has_cxx14_std_apis = std::false_type;
+#endif
+
+template <typename T>
+using expect_cxx14_apis =
+ absl::disjunction<absl::negation<is_std_unordered_set<T>>,
+ has_cxx14_std_apis>;
+
+template <typename TypeParam>
+void BucketCountAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void BucketCountAllocTest(std::true_type) {
using A = typename TypeParam::allocator_type;
A alloc(0);
TypeParam m(123, alloc);
@@ -101,11 +123,17 @@ TYPED_TEST_P(ConstructorTest, BucketCountAlloc) {
EXPECT_TRUE(m.empty());
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
-#endif
}
-TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) {
-#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17)
+TYPED_TEST_P(ConstructorTest, BucketCountAlloc) {
+ BucketCountAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void BucketCountHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void BucketCountHashAllocTest(std::true_type) {
using H = typename TypeParam::hasher;
using A = typename TypeParam::allocator_type;
H hasher;
@@ -116,18 +144,38 @@ TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) {
EXPECT_TRUE(m.empty());
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
-#endif
}
-TYPED_TEST_P(ConstructorTest, BucketAlloc) {
+TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) {
+ BucketCountHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
+using has_alloc_std_constructors = std::true_type;
+#else
+using has_alloc_std_constructors = std::false_type;
+#endif
+
+template <typename T>
+using expect_alloc_constructors =
+ absl::disjunction<absl::negation<is_std_unordered_set<T>>,
+ has_alloc_std_constructors>;
+
+template <typename TypeParam>
+void AllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void AllocTest(std::true_type) {
using A = typename TypeParam::allocator_type;
A alloc(0);
TypeParam m(alloc);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_TRUE(m.empty());
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
-#endif
+}
+
+TYPED_TEST_P(ConstructorTest, Alloc) {
+ AllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
}
TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) {
@@ -149,8 +197,11 @@ TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) {
EXPECT_GE(m.bucket_count(), 123);
}
-TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) {
-#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17)
+template <typename TypeParam>
+void InputIteratorBucketAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InputIteratorBucketAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using A = typename TypeParam::allocator_type;
A alloc(0);
@@ -161,11 +212,17 @@ TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) {
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
-#endif
}
-TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) {
-#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17)
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) {
+ InputIteratorBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void InputIteratorBucketHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InputIteratorBucketHashAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using A = typename TypeParam::allocator_type;
@@ -179,7 +236,10 @@ TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) {
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
-#endif
+}
+
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) {
+ InputIteratorBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
}
TYPED_TEST_P(ConstructorTest, CopyConstructor) {
@@ -197,10 +257,14 @@ TYPED_TEST_P(ConstructorTest, CopyConstructor) {
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_EQ(m.get_allocator(), n.get_allocator());
EXPECT_EQ(m, n);
+ EXPECT_NE(TypeParam(0, hasher, equal, alloc), n);
}
-TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) {
-#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
+template <typename TypeParam>
+void CopyConstructorAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void CopyConstructorAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
@@ -215,7 +279,10 @@ TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) {
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_NE(m.get_allocator(), n.get_allocator());
EXPECT_EQ(m, n);
-#endif
+}
+
+TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) {
+ CopyConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
}
// TODO(alkis): Test non-propagating allocators on copy constructors.
@@ -238,8 +305,11 @@ TYPED_TEST_P(ConstructorTest, MoveConstructor) {
EXPECT_EQ(m, n);
}
-TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) {
-#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
+template <typename TypeParam>
+void MoveConstructorAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void MoveConstructorAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
@@ -255,7 +325,10 @@ TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) {
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_NE(m.get_allocator(), n.get_allocator());
EXPECT_EQ(m, n);
-#endif
+}
+
+TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) {
+ MoveConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
}
// TODO(alkis): Test non-propagating allocators on move constructors.
@@ -278,8 +351,11 @@ TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) {
EXPECT_GE(m.bucket_count(), 123);
}
-TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) {
-#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17)
+template <typename TypeParam>
+void InitializerListBucketAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InitializerListBucketAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using A = typename TypeParam::allocator_type;
hash_internal::Generator<T> gen;
@@ -289,11 +365,17 @@ TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) {
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
-#endif
}
-TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) {
-#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17)
+TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) {
+ InitializerListBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void InitializerListBucketHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InitializerListBucketHashAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using A = typename TypeParam::allocator_type;
@@ -306,10 +388,13 @@ TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) {
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
-#endif
}
-TYPED_TEST_P(ConstructorTest, Assignment) {
+TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) {
+ InitializerListBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+TYPED_TEST_P(ConstructorTest, CopyAssignment) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
@@ -395,17 +480,17 @@ TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) {
REGISTER_TYPED_TEST_CASE_P(
ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual,
- BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc,
- BucketAlloc, InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc,
+ BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc,
+ InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc,
InputIteratorBucketHashAlloc, CopyConstructor, CopyConstructorAlloc,
MoveConstructor, MoveConstructorAlloc, InitializerListBucketHashEqualAlloc,
- InitializerListBucketAlloc, InitializerListBucketHashAlloc, Assignment,
- MoveAssignment, AssignmentFromInitializerList,
- AssignmentOverwritesExisting, MoveAssignmentOverwritesExisting,
+ InitializerListBucketAlloc, InitializerListBucketHashAlloc, CopyAssignment,
+ MoveAssignment, AssignmentFromInitializerList, AssignmentOverwritesExisting,
+ MoveAssignmentOverwritesExisting,
AssignmentFromInitializerListOverwritesExisting, AssignmentOnSelf);
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_
diff --git a/absl/container/internal/unordered_set_lookup_test.h b/absl/container/internal/unordered_set_lookup_test.h
index 9174279a..722fb1c2 100644
--- a/absl/container/internal/unordered_set_lookup_test.h
+++ b/absl/container/internal/unordered_set_lookup_test.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -21,13 +21,13 @@
#include "absl/container/internal/hash_policy_testing.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
template <class UnordSet>
class LookupTest : public ::testing::Test {};
-TYPED_TEST_CASE_P(LookupTest);
+TYPED_TEST_SUITE_P(LookupTest);
TYPED_TEST_P(LookupTest, Count) {
using T = hash_internal::GeneratedType<TypeParam>;
@@ -85,7 +85,7 @@ TYPED_TEST_P(LookupTest, EqualRange) {
REGISTER_TYPED_TEST_CASE_P(LookupTest, Count, Find, EqualRange);
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_
diff --git a/absl/container/internal/unordered_set_members_test.h b/absl/container/internal/unordered_set_members_test.h
new file mode 100644
index 00000000..756a95cb
--- /dev/null
+++ b/absl/container/internal/unordered_set_members_test.h
@@ -0,0 +1,86 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MEMBERS_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MEMBERS_TEST_H_
+
+#include <type_traits>
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/meta/type_traits.h"
+
+namespace absl {
+inline namespace lts_2019_08_08 {
+namespace container_internal {
+
+template <class UnordSet>
+class MembersTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(MembersTest);
+
+template <typename T>
+void UseType() {}
+
+TYPED_TEST_P(MembersTest, Typedefs) {
+ EXPECT_TRUE((std::is_same<typename TypeParam::key_type,
+ typename TypeParam::value_type>()));
+ EXPECT_TRUE((absl::conjunction<
+ absl::negation<std::is_signed<typename TypeParam::size_type>>,
+ std::is_integral<typename TypeParam::size_type>>()));
+ EXPECT_TRUE((absl::conjunction<
+ std::is_signed<typename TypeParam::difference_type>,
+ std::is_integral<typename TypeParam::difference_type>>()));
+ EXPECT_TRUE((std::is_convertible<
+ decltype(std::declval<const typename TypeParam::hasher&>()(
+ std::declval<const typename TypeParam::key_type&>())),
+ size_t>()));
+ EXPECT_TRUE((std::is_convertible<
+ decltype(std::declval<const typename TypeParam::key_equal&>()(
+ std::declval<const typename TypeParam::key_type&>(),
+ std::declval<const typename TypeParam::key_type&>())),
+ bool>()));
+ EXPECT_TRUE((std::is_same<typename TypeParam::allocator_type::value_type,
+ typename TypeParam::value_type>()));
+ EXPECT_TRUE((std::is_same<typename TypeParam::value_type&,
+ typename TypeParam::reference>()));
+ EXPECT_TRUE((std::is_same<const typename TypeParam::value_type&,
+ typename TypeParam::const_reference>()));
+ EXPECT_TRUE((std::is_same<typename std::allocator_traits<
+ typename TypeParam::allocator_type>::pointer,
+ typename TypeParam::pointer>()));
+ EXPECT_TRUE(
+ (std::is_same<typename std::allocator_traits<
+ typename TypeParam::allocator_type>::const_pointer,
+ typename TypeParam::const_pointer>()));
+}
+
+TYPED_TEST_P(MembersTest, SimpleFunctions) {
+ EXPECT_GT(TypeParam().max_size(), 0);
+}
+
+TYPED_TEST_P(MembersTest, BeginEnd) {
+ TypeParam t = {typename TypeParam::value_type{}};
+ EXPECT_EQ(t.begin(), t.cbegin());
+ EXPECT_EQ(t.end(), t.cend());
+ EXPECT_NE(t.begin(), t.end());
+ EXPECT_NE(t.cbegin(), t.cend());
+}
+
+REGISTER_TYPED_TEST_SUITE_P(MembersTest, Typedefs, SimpleFunctions, BeginEnd);
+
+} // namespace container_internal
+} // inline namespace lts_2019_08_08
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MEMBERS_TEST_H_
diff --git a/absl/container/internal/unordered_set_modifiers_test.h b/absl/container/internal/unordered_set_modifiers_test.h
index 0a1e9b1b..d3e534d3 100644
--- a/absl/container/internal/unordered_set_modifiers_test.h
+++ b/absl/container/internal/unordered_set_modifiers_test.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -21,13 +21,13 @@
#include "absl/container/internal/hash_policy_testing.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
template <class UnordSet>
class ModifiersTest : public ::testing::Test {};
-TYPED_TEST_CASE_P(ModifiersTest);
+TYPED_TEST_SUITE_P(ModifiersTest);
TYPED_TEST_P(ModifiersTest, Clear) {
using T = hash_internal::GeneratedType<TypeParam>;
@@ -184,7 +184,7 @@ REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint,
EraseKey, Swap);
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_
diff --git a/absl/container/internal/unordered_set_test.cc b/absl/container/internal/unordered_set_test.cc
index 263059eb..2356e187 100644
--- a/absl/container/internal/unordered_set_test.cc
+++ b/absl/container/internal/unordered_set_test.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,24 +16,26 @@
#include "absl/container/internal/unordered_set_constructor_test.h"
#include "absl/container/internal/unordered_set_lookup_test.h"
+#include "absl/container/internal/unordered_set_members_test.h"
#include "absl/container/internal/unordered_set_modifiers_test.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
namespace {
-using SetTypes =
- ::testing::Types<std::unordered_set<int, StatefulTestingHash,
- StatefulTestingEqual, Alloc<int>>,
- std::unordered_set<std::string, StatefulTestingHash,
- StatefulTestingEqual, Alloc<std::string>>>;
+using SetTypes = ::testing::Types<
+ std::unordered_set<int, StatefulTestingHash, StatefulTestingEqual,
+ Alloc<int>>,
+ std::unordered_set<std::string, StatefulTestingHash, StatefulTestingEqual,
+ Alloc<std::string>>>;
-INSTANTIATE_TYPED_TEST_CASE_P(UnorderedSet, ConstructorTest, SetTypes);
-INSTANTIATE_TYPED_TEST_CASE_P(UnorderedSet, LookupTest, SetTypes);
-INSTANTIATE_TYPED_TEST_CASE_P(UnorderedSet, ModifiersTest, SetTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedSet, ConstructorTest, SetTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedSet, LookupTest, SetTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedSet, MembersTest, SetTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedSet, ModifiersTest, SetTypes);
} // namespace
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl